aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_bind.c24
-rw-r--r--drivers/acpi/pci_irq.c5
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_idle.c8
-rw-r--r--drivers/acpi/processor_perflib.c12
-rw-r--r--drivers/acpi/processor_throttling.c2
-rw-r--r--drivers/acpi/video.c18
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c159
-rw-r--r--drivers/ata/ata_piix.c20
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/libata-core.c11
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-sff.c20
-rw-r--r--drivers/ata/pata_ali.c17
-rw-r--r--drivers/ata/pata_efar.c17
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_netcell.c13
-rw-r--r--drivers/ata/pata_palmld.c150
-rw-r--r--drivers/ata/sata_nv.c131
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/ata/sata_sx4.c11
-rw-r--r--drivers/base/bus.c6
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/driver.c4
-rw-r--r--drivers/base/firmware_class.c129
-rw-r--r--drivers/base/platform.c36
-rw-r--r--drivers/base/power/main.c98
-rw-r--r--drivers/base/sys.c16
-rw-r--r--drivers/block/DAC960.c10
-rw-r--r--drivers/block/Kconfig6
-rw-r--r--drivers/block/amiflop.c54
-rw-r--r--drivers/block/ataflop.c66
-rw-r--r--drivers/block/brd.c7
-rw-r--r--drivers/block/cciss.c927
-rw-r--r--drivers/block/cciss.h34
-rw-r--r--drivers/block/cciss_cmd.h2
-rw-r--r--drivers/block/cciss_scsi.c109
-rw-r--r--drivers/block/cpqarray.c20
-rw-r--r--drivers/block/floppy.c85
-rw-r--r--drivers/block/hd.c106
-rw-r--r--drivers/block/loop.c37
-rw-r--r--drivers/block/mg_disk.c537
-rw-r--r--drivers/block/nbd.c23
-rw-r--r--drivers/block/paride/pcd.c29
-rw-r--r--drivers/block/paride/pd.c22
-rw-r--r--drivers/block/paride/pf.c47
-rw-r--r--drivers/block/pktcdvd.c8
-rw-r--r--drivers/block/ps3disk.c24
-rw-r--r--drivers/block/sunvdc.c14
-rw-r--r--drivers/block/swim.c48
-rw-r--r--drivers/block/swim3.c107
-rw-r--r--drivers/block/sx8.c17
-rw-r--r--drivers/block/ub.c54
-rw-r--r--drivers/block/viodasd.c12
-rw-r--r--drivers/block/virtio_blk.c120
-rw-r--r--drivers/block/xd.c41
-rw-r--r--drivers/block/xen-blkfront.c44
-rw-r--r--drivers/block/xsysace.c46
-rw-r--r--drivers/block/z2ram.c19
-rw-r--r--drivers/bluetooth/hci_ldisc.c5
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/cdrom/gdrom.c36
-rw-r--r--drivers/cdrom/viocd.c35
-rw-r--r--drivers/char/Kconfig17
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/intel-agp.c16
-rw-r--r--drivers/char/amiserial.c2
-rw-r--r--drivers/char/bfin_jtag_comm.c365
-rw-r--r--drivers/char/cyclades.c290
-rw-r--r--drivers/char/epca.c17
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/hw_random/Kconfig14
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/mxc-rnga.c247
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c26
-rw-r--r--drivers/char/hw_random/via-rng.c15
-rw-r--r--drivers/char/hw_random/virtio-rng.c30
-rw-r--r--drivers/char/ip2/i2lib.c4
-rw-r--r--drivers/char/ip2/ip2main.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c13
-rw-r--r--drivers/char/isicom.c19
-rw-r--r--drivers/char/istallion.c8
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/moxa.c5
-rw-r--r--drivers/char/mxser.c14
-rw-r--r--drivers/char/n_hdlc.c4
-rw-r--r--drivers/char/n_tty.c29
-rw-r--r--drivers/char/pcmcia/synclink_cs.c11
-rw-r--r--drivers/char/pty.c11
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/rocket.c19
-rw-r--r--drivers/char/selection.c2
-rw-r--r--drivers/char/stallion.c6
-rw-r--r--drivers/char/synclink.c9
-rw-r--r--drivers/char/synclink_gt.c86
-rw-r--r--drivers/char/synclinkmp.c9
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/char/tpm/tpm_bios.c3
-rw-r--r--drivers/char/tty_audit.c10
-rw-r--r--drivers/char/tty_io.c122
-rw-r--r--drivers/char/tty_ioctl.c88
-rw-r--r--drivers/char/tty_ldisc.c549
-rw-r--r--drivers/char/tty_port.c47
-rw-r--r--drivers/char/virtio_console.c26
-rw-r--r--drivers/char/vt.c8
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/sh_cmt.c116
-rw-r--r--drivers/clocksource/sh_mtu2.c357
-rw-r--r--drivers/clocksource/sh_tmu.c461
-rw-r--r--drivers/connector/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c5
-rw-r--r--drivers/crypto/Kconfig8
-rw-r--r--drivers/crypto/hifn_795x.c8
-rw-r--r--drivers/crypto/ixp4xx_crypto.c33
-rw-r--r--drivers/crypto/padlock-aes.c15
-rw-r--r--drivers/crypto/talitos.c713
-rw-r--r--drivers/dma/fsldma.c71
-rw-r--r--drivers/dma/ioat_dma.c2
-rw-r--r--drivers/edac/Kconfig34
-rw-r--r--drivers/edac/Makefile9
-rw-r--r--drivers/edac/amd64_edac.c3354
-rw-r--r--drivers/edac/amd64_edac.h644
-rw-r--r--drivers/edac/amd64_edac_dbg.c255
-rw-r--r--drivers/edac/amd64_edac_err_types.c161
-rw-r--r--drivers/edac/amd64_edac_inj.c185
-rw-r--r--drivers/edac/amd8111_edac.c4
-rw-r--r--drivers/edac/amd8131_edac.c2
-rw-r--r--drivers/edac/e752x_edac.c2
-rw-r--r--drivers/edac/edac_core.h9
-rw-r--r--drivers/firmware/dmi_scan.c1
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpu/drm/Kconfig14
-rw-r--r--drivers/gpu/drm/drm_bufs.c11
-rw-r--r--drivers/gpu/drm/drm_crtc.c7
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c109
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c79
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_hashtab.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c8
-rw-r--r--drivers/gpu/drm/drm_mm.c165
-rw-r--r--drivers/gpu/drm/drm_modes.c18
-rw-r--r--drivers/gpu/drm/drm_stub.c17
-rw-r--r--drivers/gpu/drm/drm_sysfs.c7
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c79
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h51
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c221
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c166
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c190
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h636
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c20
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c188
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h118
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c231
-rw-r--r--drivers/gpu/drm/i915/intel_display.c671
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c26
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c34
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c163
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c248
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h6
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c6
-rw-r--r--drivers/hid/Kconfig92
-rw-r--r--drivers/hid/Makefile10
-rw-r--r--drivers/hid/hid-apple.c4
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-debug.c23
-rw-r--r--drivers/hid/hid-drff.c8
-rw-r--r--drivers/hid/hid-gaff.c8
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-lgff.c10
-rw-r--r--drivers/hid/hid-ntrig.c222
-rw-r--r--drivers/hid/hid-sjoy.c180
-rw-r--r--drivers/hid/hid-tmff.c17
-rw-r--r--drivers/hid/hid-wacom.c259
-rw-r--r--drivers/hid/hid-zpff.c7
-rw-r--r--drivers/hid/hidraw.c5
-rw-r--r--drivers/hid/usbhid/hid-core.c7
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c59
-rw-r--r--drivers/i2c/busses/i2c-ocores.c5
-rw-r--r--drivers/i2c/busses/i2c-omap.c39
-rw-r--r--drivers/i2c/busses/i2c-pxa.c22
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c48
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c2
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/alim15x3.c10
-rw-r--r--drivers/ide/at91_ide.c7
-rw-r--r--drivers/ide/au1xxx-ide.c8
-rw-r--r--drivers/ide/buddha.c9
-rw-r--r--drivers/ide/cmd640.c7
-rw-r--r--drivers/ide/cs5520.c4
-rw-r--r--drivers/ide/delkin_cb.c6
-rw-r--r--drivers/ide/falconide.c9
-rw-r--r--drivers/ide/gayle.c9
-rw-r--r--drivers/ide/hpt366.c25
-rw-r--r--drivers/ide/icside.c77
-rw-r--r--drivers/ide/ide-4drives.c6
-rw-r--r--drivers/ide/ide-atapi.c187
-rw-r--r--drivers/ide/ide-cd.c152
-rw-r--r--drivers/ide/ide-cd.h4
-rw-r--r--drivers/ide/ide-cs.c6
-rw-r--r--drivers/ide/ide-disk.c86
-rw-r--r--drivers/ide/ide-dma.c23
-rw-r--r--drivers/ide/ide-eh.c14
-rw-r--r--drivers/ide/ide-floppy.c32
-rw-r--r--drivers/ide/ide-gd.c14
-rw-r--r--drivers/ide/ide-generic.c7
-rw-r--r--drivers/ide/ide-h8300.c10
-rw-r--r--drivers/ide/ide-io.c138
-rw-r--r--drivers/ide/ide-ioctls.c1
-rw-r--r--drivers/ide/ide-iops.c47
-rw-r--r--drivers/ide/ide-legacy.c7
-rw-r--r--drivers/ide/ide-lib.c29
-rw-r--r--drivers/ide/ide-park.c7
-rw-r--r--drivers/ide/ide-pci-generic.c11
-rw-r--r--drivers/ide/ide-pm.c38
-rw-r--r--drivers/ide/ide-pnp.c6
-rw-r--r--drivers/ide/ide-probe.c104
-rw-r--r--drivers/ide/ide-tape.c822
-rw-r--r--drivers/ide/ide-taskfile.c23
-rw-r--r--drivers/ide/ide.c10
-rw-r--r--drivers/ide/ide_platform.c9
-rw-r--r--drivers/ide/macide.c9
-rw-r--r--drivers/ide/palm_bk3710.c6
-rw-r--r--drivers/ide/pdc202xx_new.c26
-rw-r--r--drivers/ide/pdc202xx_old.c106
-rw-r--r--drivers/ide/pmac.c13
-rw-r--r--drivers/ide/q40ide.c11
-rw-r--r--drivers/ide/rapide.c8
-rw-r--r--drivers/ide/scc_pata.c6
-rw-r--r--drivers/ide/setup-pci.c85
-rw-r--r--drivers/ide/sgiioc4.c7
-rw-r--r--drivers/ide/siimage.c4
-rw-r--r--drivers/ide/sl82c105.c9
-rw-r--r--drivers/ide/tc86c001.c2
-rw-r--r--drivers/ide/tx4938ide.c5
-rw-r--r--drivers/ide/tx4939ide.c7
-rw-r--r--drivers/ide/via82cxxx.c2
-rw-r--r--drivers/idle/i7300_idle.c6
-rw-r--r--drivers/ieee1394/dv1394.c5
-rw-r--r--drivers/ieee1394/ieee1394_core.h6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c32
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes_pSeries.h28
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c112
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c6
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.h2
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.c11
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.h2
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c19
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c17
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c14
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c10
-rw-r--r--drivers/input/input.c1
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/input/serio/Kconfig2
-rw-r--r--drivers/input/serio/ambakmi.c4
-rw-r--r--drivers/input/serio/libps2.c2
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c2
-rw-r--r--drivers/isdn/divert/isdn_divert.c2
-rw-r--r--drivers/isdn/mISDN/dsp_core.c2
-rw-r--r--drivers/leds/leds-h1940.c2
-rw-r--r--drivers/leds/leds-s3c24xx.c1
-rw-r--r--drivers/lguest/Kconfig2
-rw-r--r--drivers/lguest/core.c30
-rw-r--r--drivers/lguest/hypercalls.c14
-rw-r--r--drivers/lguest/interrupts_and_traps.c57
-rw-r--r--drivers/lguest/lg.h28
-rw-r--r--drivers/lguest/lguest_device.c41
-rw-r--r--drivers/lguest/lguest_user.c127
-rw-r--r--drivers/lguest/page_tables.c396
-rw-r--r--drivers/lguest/segments.c2
-rw-r--r--drivers/lguest/x86/core.c19
-rw-r--r--drivers/md/bitmap.c17
-rw-r--r--drivers/md/dm-exception-store.c2
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-table.c38
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c33
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c38
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig2
-rw-r--r--drivers/media/video/Kconfig4
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-queue.c3
-rw-r--r--drivers/memstick/core/mspro_block.c19
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt6
-rw-r--r--drivers/message/fusion/mptbase.c1570
-rw-r--r--drivers/message/fusion/mptbase.h180
-rw-r--r--drivers/message/fusion/mptctl.c692
-rw-r--r--drivers/message/fusion/mptdebug.h3
-rw-r--r--drivers/message/fusion/mptfc.c15
-rw-r--r--drivers/message/fusion/mptsas.c3136
-rw-r--r--drivers/message/fusion/mptsas.h41
-rw-r--r--drivers/message/fusion/mptscsih.c1329
-rw-r--r--drivers/message/fusion/mptscsih.h7
-rw-r--r--drivers/message/fusion/mptspi.c71
-rw-r--r--drivers/message/i2o/i2o_block.c43
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/t7l66xb.c5
-rw-r--r--drivers/mfd/tc6387xb.c5
-rw-r--r--drivers/mfd/tc6393xb.c5
-rw-r--r--drivers/mfd/wm8350-core.c8
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cb710/Kconfig25
-rw-r--r--drivers/misc/cb710/Makefile8
-rw-r--r--drivers/misc/cb710/core.c357
-rw-r--r--drivers/misc/cb710/debug.c119
-rw-r--r--drivers/misc/cb710/sgbuf2.c150
-rw-r--r--drivers/misc/enclosure.c6
-rw-r--r--drivers/mmc/card/block.c28
-rw-r--r--drivers/mmc/card/queue.c11
-rw-r--r--drivers/mmc/core/core.c107
-rw-r--r--drivers/mmc/host/Kconfig30
-rw-r--r--drivers/mmc/host/Makefile5
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h33
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/cb710-mmc.c804
-rw-r--r--drivers/mmc/host/cb710-mmc.h104
-rw-r--r--drivers/mmc/host/mmc_spi.c23
-rw-r--r--drivers/mmc/host/mmci.c4
-rw-r--r--drivers/mmc/host/mvsdio.c35
-rw-r--r--drivers/mmc/host/mxcmmc.c45
-rw-r--r--drivers/mmc/host/omap.c5
-rw-r--r--drivers/mmc/host/omap_hsmmc.c8
-rw-r--r--drivers/mmc/host/pxamci.c46
-rw-r--r--drivers/mmc/host/s3cmci.c5
-rw-r--r--drivers/mmc/host/sdhci-of.c9
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c168
-rw-r--r--drivers/mmc/host/sdhci.c58
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/tmio_mmc.c180
-rw-r--r--drivers/mmc/host/tmio_mmc.h77
-rw-r--r--drivers/mtd/Kconfig3
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c43
-rw-r--r--drivers/mtd/nand/Kconfig4
-rw-r--r--drivers/mtd/nand/davinci_nand.c7
-rw-r--r--drivers/mtd/nand/mxc_nand.c43
-rw-r--r--drivers/mtd/onenand/omap2.c1
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/arm/ixp4xx_eth.c26
-rw-r--r--drivers/net/b44.h2
-rw-r--r--drivers/net/bnx2.c193
-rw-r--r--drivers/net/bnx2.h18
-rw-r--r--drivers/net/cnic.c2717
-rw-r--r--drivers/net/cnic.h299
-rw-r--r--drivers/net/cnic_defs.h580
-rw-r--r--drivers/net/cnic_if.h299
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/igbvf/igbvf.h2
-rw-r--r--drivers/net/ipg.h2
-rw-r--r--drivers/net/mlx4/en_netdev.c2
-rw-r--r--drivers/net/mlx4/eq.c4
-rw-r--r--drivers/net/mlx4/main.c14
-rw-r--r--drivers/net/mlx4/mr.c6
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/niu.h4
-rw-r--r--drivers/net/qlge/qlge_main.c2
-rw-r--r--drivers/net/qlge/qlge_mpi.c6
-rw-r--r--drivers/net/r8169.c11
-rw-r--r--drivers/net/skfp/h/smt.h2
-rw-r--r--drivers/net/smc91x.h5
-rw-r--r--drivers/net/tokenring/3c359.c2
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c45
-rw-r--r--drivers/net/wan/ixp4xx_hss.c11
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/hostap/Kconfig8
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig4
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h2
-rw-r--r--drivers/net/wireless/wavelan_cs.c2
-rw-r--r--drivers/of/Kconfig8
-rw-r--r--drivers/oprofile/cpu_buffer.c8
-rw-r--r--drivers/parisc/iosapic.c6
-rw-r--r--drivers/parport/parport_gsc.c4
-rw-r--r--drivers/parport/parport_pc.c1802
-rw-r--r--drivers/parport/share.c13
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c63
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c54
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c4
-rw-r--r--drivers/pci/htirq.c4
-rw-r--r--drivers/pci/intel-iommu.c9
-rw-r--r--drivers/pci/intr_remapping.c54
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c174
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c2
-rw-r--r--drivers/pnp/resource.c18
-rw-r--r--drivers/regulator/da903x.c2
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/rtc-ep93xx.c149
-rw-r--r--drivers/rtc/rtc-pl030.c4
-rw-r--r--drivers/rtc/rtc-pl031.c5
-rw-r--r--drivers/s390/block/dasd.c71
-rw-r--r--drivers/s390/block/dasd_diag.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c50
-rw-r--r--drivers/s390/block/dasd_fba.c29
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dcssblk.c6
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/con3270.c38
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_block.c26
-rw-r--r--drivers/s390/char/tty3270.c57
-rw-r--r--drivers/s390/cio/cio.c6
-rw-r--r--drivers/s390/cio/device_ops.c24
-rw-r--r--drivers/s390/cio/qdio_main.c46
-rw-r--r--drivers/s390/cio/qdio_perf.c12
-rw-r--r--drivers/s390/cio/qdio_perf.h10
-rw-r--r--drivers/s390/kvm/kvm_virtio.c43
-rw-r--r--drivers/s390/net/Kconfig14
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c30
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c10
-rw-r--r--drivers/s390/scsi/zfcp_def.h7
-rw-r--r--drivers/s390/scsi/zfcp_erp.c8
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fc.c9
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c29
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c13
-rw-r--r--drivers/sbus/char/jsflash.c26
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-xxxx.c5
-rw-r--r--drivers/scsi/3w-xxxx.h2
-rw-r--r--drivers/scsi/Kconfig42
-rw-r--r--drivers/scsi/Makefile4
-rw-r--r--drivers/scsi/NCR_D700.c2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h155
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h1509
-rw-r--r--drivers/scsi/bnx2i/Kconfig7
-rw-r--r--drivers/scsi/bnx2i/Makefile3
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h771
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2405
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c438
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2064
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c142
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h1
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c26
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c23
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c6
-rw-r--r--drivers/scsi/dpt/osd_util.h2
-rw-r--r--drivers/scsi/eata.c24
-rw-r--r--drivers/scsi/fcoe/fcoe.c95
-rw-r--r--drivers/scsi/fcoe/fcoe.h1
-rw-r--r--drivers/scsi/fcoe/libfcoe.c21
-rw-r--r--drivers/scsi/fnic/Makefile15
-rw-r--r--drivers/scsi/fnic/cq_desc.h78
-rw-r--r--drivers/scsi/fnic/cq_enet_desc.h167
-rw-r--r--drivers/scsi/fnic/cq_exch_desc.h182
-rw-r--r--drivers/scsi/fnic/fcpio.h780
-rw-r--r--drivers/scsi/fnic/fnic.h265
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c56
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c742
-rw-r--r--drivers/scsi/fnic/fnic_io.h67
-rw-r--r--drivers/scsi/fnic/fnic_isr.c332
-rw-r--r--drivers/scsi/fnic/fnic_main.c943
-rw-r--r--drivers/scsi/fnic/fnic_res.c444
-rw-r--r--drivers/scsi/fnic/fnic_res.h197
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c1850
-rw-r--r--drivers/scsi/fnic/rq_enet_desc.h58
-rw-r--r--drivers/scsi/fnic/vnic_cq.c85
-rw-r--r--drivers/scsi/fnic/vnic_cq.h121
-rw-r--r--drivers/scsi/fnic/vnic_cq_copy.h62
-rw-r--r--drivers/scsi/fnic/vnic_dev.c690
-rw-r--r--drivers/scsi/fnic/vnic_dev.h161
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h281
-rw-r--r--drivers/scsi/fnic/vnic_intr.c60
-rw-r--r--drivers/scsi/fnic/vnic_intr.h118
-rw-r--r--drivers/scsi/fnic/vnic_nic.h69
-rw-r--r--drivers/scsi/fnic/vnic_resource.h61
-rw-r--r--drivers/scsi/fnic/vnic_rq.c196
-rw-r--r--drivers/scsi/fnic/vnic_rq.h235
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h99
-rw-r--r--drivers/scsi/fnic/vnic_stats.h68
-rw-r--r--drivers/scsi/fnic/vnic_wq.c182
-rw-r--r--drivers/scsi/fnic/vnic_wq.h175
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.c117
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.h128
-rw-r--r--drivers/scsi/fnic/wq_enet_desc.h96
-rw-r--r--drivers/scsi/gdth_proc.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c434
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h40
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c463
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h4
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h68
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/libfc/fc_exch.c4
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c6
-rw-r--r--drivers/scsi/libiscsi.c468
-rw-r--r--drivers/scsi/libiscsi_tcp.c18
-rw-r--r--drivers/scsi/libsas/sas_expander.c16
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c49
-rw-r--r--drivers/scsi/lpfc/lpfc.h123
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c250
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c275
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1365
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h142
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2141
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5626
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h54
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c674
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c206
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c956
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6683
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h467
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c62
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/mbox_defs.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h7
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c32
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c363
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c59
-rw-r--r--drivers/scsi/mvsas.c3222
-rw-r--r--drivers/scsi/mvsas/Kconfig42
-rw-r--r--drivers/scsi/mvsas/Makefile32
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c793
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h151
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c672
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h222
-rw-r--r--drivers/scsi/mvsas/mv_chips.h280
-rw-r--r--drivers/scsi/mvsas/mv_defs.h502
-rw-r--r--drivers/scsi/mvsas/mv_init.c703
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2154
-rw-r--r--drivers/scsi/mvsas/mv_sas.h406
-rw-r--r--drivers/scsi/osd/Kbuild25
-rwxr-xr-xdrivers/scsi/osd/Makefile37
-rw-r--r--drivers/scsi/osd/osd_initiator.c155
-rw-r--r--drivers/scsi/osd/osd_uld.c66
-rw-r--r--drivers/scsi/qla1280.c387
-rw-r--r--drivers/scsi/qla1280.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c227
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h43
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c206
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c240
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c244
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c118
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c294
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c47
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c101
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c175
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c71
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c18
-rw-r--r--drivers/scsi/sr.c17
-rw-r--r--drivers/scsi/st.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c66
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c49
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h2
-rw-r--r--drivers/scsi/u14-34f.c22
-rw-r--r--drivers/serial/8250.c22
-rw-r--r--drivers/serial/8250_gsc.c4
-rw-r--r--drivers/serial/8250_pci.c3
-rw-r--r--drivers/serial/Kconfig10
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/amba-pl010.c4
-rw-r--r--drivers/serial/amba-pl011.c40
-rw-r--r--drivers/serial/bfin_5xx.c77
-rw-r--r--drivers/serial/bfin_sport_uart.c58
-rw-r--r--drivers/serial/icom.c22
-rw-r--r--drivers/serial/imx.c309
-rw-r--r--drivers/serial/jsm/jsm.h1
-rw-r--r--drivers/serial/jsm/jsm_tty.c14
-rw-r--r--drivers/serial/mpc52xx_uart.c2
-rw-r--r--drivers/serial/sh-sci.c388
-rw-r--r--drivers/serial/sh-sci.h42
-rw-r--r--drivers/serial/timbuart.c526
-rw-r--r--drivers/serial/timbuart.h58
-rw-r--r--drivers/sh/intc.c11
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/amba-pl022.c1866
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c1
-rw-r--r--drivers/ssb/embedded.c1
-rw-r--r--drivers/staging/go7007/go7007.txt4
-rw-r--r--drivers/staging/panel/lcd-panel-cgram.txt2
-rw-r--r--drivers/staging/rt2860/common/mlme.c2
-rw-r--r--drivers/staging/rt2870/common/mlme.c2
-rw-r--r--drivers/staging/rt3070/common/mlme.c2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/class/cdc-acm.c445
-rw-r--r--drivers/usb/class/cdc-acm.h5
-rw-r--r--drivers/usb/core/inode.c5
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c5
-rw-r--r--drivers/usb/host/isp1760-hcd.c24
-rw-r--r--drivers/usb/host/ohci-ep93xx.c13
-rw-r--r--drivers/usb/serial/belkin_sa.c6
-rw-r--r--drivers/usb/serial/ch341.c46
-rw-r--r--drivers/usb/serial/console.c6
-rw-r--r--drivers/usb/serial/cp210x.c253
-rw-r--r--drivers/usb/serial/cyberjack.c6
-rw-r--r--drivers/usb/serial/cypress_m8.c81
-rw-r--r--drivers/usb/serial/digi_acceleport.c75
-rw-r--r--drivers/usb/serial/empeg.c6
-rw-r--r--drivers/usb/serial/ftdi_sio.c158
-rw-r--r--drivers/usb/serial/garmin_gps.c3
-rw-r--r--drivers/usb/serial/generic.c3
-rw-r--r--drivers/usb/serial/io_edgeport.c10
-rw-r--r--drivers/usb/serial/io_ti.c5
-rw-r--r--drivers/usb/serial/ipaq.c6
-rw-r--r--drivers/usb/serial/ipw.c18
-rw-r--r--drivers/usb/serial/ir-usb.c6
-rw-r--r--drivers/usb/serial/iuu_phoenix.c102
-rw-r--r--drivers/usb/serial/keyspan.c13
-rw-r--r--drivers/usb/serial/keyspan.h8
-rw-r--r--drivers/usb/serial/keyspan_pda.c48
-rw-r--r--drivers/usb/serial/kl5kusb105.c6
-rw-r--r--drivers/usb/serial/kobil_sct.c9
-rw-r--r--drivers/usb/serial/mct_u232.c37
-rw-r--r--drivers/usb/serial/mos7720.c3
-rw-r--r--drivers/usb/serial/mos7840.c48
-rw-r--r--drivers/usb/serial/navman.c3
-rw-r--r--drivers/usb/serial/omninet.c6
-rw-r--r--drivers/usb/serial/opticon.c3
-rw-r--r--drivers/usb/serial/option.c68
-rw-r--r--drivers/usb/serial/oti6858.c57
-rw-r--r--drivers/usb/serial/pl2303.c79
-rw-r--r--drivers/usb/serial/sierra.c351
-rw-r--r--drivers/usb/serial/spcp8x5.c85
-rw-r--r--drivers/usb/serial/symbolserial.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c6
-rw-r--r--drivers/usb/serial/usb-serial.c145
-rw-r--r--drivers/usb/serial/visor.c6
-rw-r--r--drivers/usb/serial/whiteheat.c33
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--drivers/video/Kconfig14
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/amba-clcd.c4
-rw-r--r--drivers/video/atmel_lcdfb.c10
-rw-r--r--drivers/video/aty/aty128fb.c2
-rw-r--r--drivers/video/console/vgacon.c5
-rw-r--r--drivers/video/cyber2000fb.c9
-rw-r--r--drivers/video/hitfb.c4
-rw-r--r--drivers/video/mx3fb.c4
-rw-r--r--drivers/video/omap/dispc.c14
-rw-r--r--drivers/video/omap/hwa742.c26
-rw-r--r--drivers/video/omap/rfbi.c8
-rw-r--r--drivers/video/pxa168fb.c803
-rw-r--r--drivers/video/pxa168fb.h558
-rw-r--r--drivers/video/s3c-fb.c12
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c7
-rw-r--r--drivers/video/uvesafb.c10
-rw-r--r--drivers/virtio/virtio.c29
-rw-r--r--drivers/virtio/virtio_balloon.c27
-rw-r--r--drivers/virtio/virtio_pci.c307
-rw-r--r--drivers/virtio/virtio_ring.c102
-rw-r--r--drivers/w1/Kconfig2
-rw-r--r--drivers/w1/masters/Kconfig6
-rw-r--r--drivers/watchdog/Kconfig12
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/iop_wdt.c2
-rw-r--r--drivers/watchdog/orion_wdt.c (renamed from drivers/watchdog/orion5x_wdt.c)120
-rw-r--r--drivers/xen/Kconfig20
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/events.c20
-rw-r--r--drivers/xen/evtchn.c507
-rw-r--r--drivers/xen/manage.c25
-rw-r--r--drivers/xen/sys-hypervisor.c445
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c61
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c2
-rw-r--r--drivers/xen/xenfs/super.c19
725 files changed, 79188 insertions, 17371 deletions
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 95650f83ce2..bc46de3d967 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -116,9 +116,6 @@ int acpi_pci_bind(struct acpi_device *device)
116 struct acpi_pci_data *pdata; 116 struct acpi_pci_data *pdata;
117 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 117 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
118 acpi_handle handle; 118 acpi_handle handle;
119 struct pci_dev *dev;
120 struct pci_bus *bus;
121
122 119
123 if (!device || !device->parent) 120 if (!device || !device->parent)
124 return -EINVAL; 121 return -EINVAL;
@@ -176,20 +173,9 @@ int acpi_pci_bind(struct acpi_device *device)
176 * Locate matching device in PCI namespace. If it doesn't exist 173 * Locate matching device in PCI namespace. If it doesn't exist
177 * this typically means that the device isn't currently inserted 174 * this typically means that the device isn't currently inserted
178 * (e.g. docking station, port replicator, etc.). 175 * (e.g. docking station, port replicator, etc.).
179 * We cannot simply search the global pci device list, since
180 * PCI devices are added to the global pci list when the root
181 * bridge start ops are run, which may not have happened yet.
182 */ 176 */
183 bus = pci_find_bus(data->id.segment, data->id.bus); 177 data->dev = pci_get_slot(pdata->bus,
184 if (bus) { 178 PCI_DEVFN(data->id.device, data->id.function));
185 list_for_each_entry(dev, &bus->devices, bus_list) {
186 if (dev->devfn == PCI_DEVFN(data->id.device,
187 data->id.function)) {
188 data->dev = dev;
189 break;
190 }
191 }
192 }
193 if (!data->dev) { 179 if (!data->dev) {
194 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 180 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
195 "Device %04x:%02x:%02x.%d not present in PCI namespace\n", 181 "Device %04x:%02x:%02x.%d not present in PCI namespace\n",
@@ -259,9 +245,10 @@ int acpi_pci_bind(struct acpi_device *device)
259 245
260 end: 246 end:
261 kfree(buffer.pointer); 247 kfree(buffer.pointer);
262 if (result) 248 if (result) {
249 pci_dev_put(data->dev);
263 kfree(data); 250 kfree(data);
264 251 }
265 return result; 252 return result;
266} 253}
267 254
@@ -303,6 +290,7 @@ static int acpi_pci_unbind(struct acpi_device *device)
303 if (data->dev->subordinate) { 290 if (data->dev->subordinate) {
304 acpi_pci_irq_del_prt(data->id.segment, data->bus->number); 291 acpi_pci_irq_del_prt(data->id.segment, data->bus->number);
305 } 292 }
293 pci_dev_put(data->dev);
306 kfree(data); 294 kfree(data);
307 295
308 end: 296 end:
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 51b9f8280f8..2faa9e2ac89 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -401,7 +401,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
401 /* Interrupt Line values above 0xF are forbidden */ 401 /* Interrupt Line values above 0xF are forbidden */
402 if (dev->irq > 0 && (dev->irq <= 0xF)) { 402 if (dev->irq > 0 && (dev->irq <= 0xF)) {
403 printk(" - using IRQ %d\n", dev->irq); 403 printk(" - using IRQ %d\n", dev->irq);
404 acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, 404 acpi_register_gsi(&dev->dev, dev->irq,
405 ACPI_LEVEL_SENSITIVE,
405 ACPI_ACTIVE_LOW); 406 ACPI_ACTIVE_LOW);
406 return 0; 407 return 0;
407 } else { 408 } else {
@@ -410,7 +411,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
410 } 411 }
411 } 412 }
412 413
413 rc = acpi_register_gsi(gsi, triggering, polarity); 414 rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity);
414 if (rc < 0) { 415 if (rc < 0) {
415 dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n", 416 dev_warn(&dev->dev, "PCI INT %c: failed to register GSI\n",
416 pin_name(pin)); 417 pin_name(pin));
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 45ad3288c5f..23f0fb84f1c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -844,7 +844,7 @@ static int acpi_processor_add(struct acpi_device *device)
844 if (!pr) 844 if (!pr)
845 return -ENOMEM; 845 return -ENOMEM;
846 846
847 if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 847 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
848 kfree(pr); 848 kfree(pr);
849 return -ENOMEM; 849 return -ENOMEM;
850 } 850 }
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 72069ba5f1e..10a2d913635 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -148,6 +148,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
148 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 148 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
149 return; 149 return;
150 150
151 if (boot_cpu_has(X86_FEATURE_AMDC1E))
152 type = ACPI_STATE_C1;
153
151 /* 154 /*
152 * Check, if one of the previous states already marked the lapic 155 * Check, if one of the previous states already marked the lapic
153 * unstable 156 * unstable
@@ -611,6 +614,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
611 switch (cx->type) { 614 switch (cx->type) {
612 case ACPI_STATE_C1: 615 case ACPI_STATE_C1:
613 cx->valid = 1; 616 cx->valid = 1;
617 acpi_timer_check_state(i, pr, cx);
614 break; 618 break;
615 619
616 case ACPI_STATE_C2: 620 case ACPI_STATE_C2:
@@ -830,11 +834,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
830 834
831 /* Do not access any ACPI IO ports in suspend path */ 835 /* Do not access any ACPI IO ports in suspend path */
832 if (acpi_idle_suspend) { 836 if (acpi_idle_suspend) {
833 acpi_safe_halt();
834 local_irq_enable(); 837 local_irq_enable();
838 cpu_relax();
835 return 0; 839 return 0;
836 } 840 }
837 841
842 acpi_state_timer_broadcast(pr, cx, 1);
838 kt1 = ktime_get_real(); 843 kt1 = ktime_get_real();
839 acpi_idle_do_entry(cx); 844 acpi_idle_do_entry(cx);
840 kt2 = ktime_get_real(); 845 kt2 = ktime_get_real();
@@ -842,6 +847,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
842 847
843 local_irq_enable(); 848 local_irq_enable();
844 cx->usage++; 849 cx->usage++;
850 acpi_state_timer_broadcast(pr, cx, 0);
845 851
846 return idle_time; 852 return idle_time;
847} 853}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index cafb41000f6..60e543d3234 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -309,9 +309,15 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
309 (u32) px->bus_master_latency, 309 (u32) px->bus_master_latency,
310 (u32) px->control, (u32) px->status)); 310 (u32) px->control, (u32) px->status));
311 311
312 if (!px->core_frequency) { 312 /*
313 printk(KERN_ERR PREFIX 313 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
314 "Invalid _PSS data: freq is zero\n"); 314 */
315 if (!px->core_frequency ||
316 ((u32)(px->core_frequency * 1000) !=
317 (px->core_frequency * 1000))) {
318 printk(KERN_ERR FW_BUG PREFIX
319 "Invalid BIOS _PSS frequency: 0x%llx MHz\n",
320 px->core_frequency);
315 result = -EFAULT; 321 result = -EFAULT;
316 kfree(pr->performance->states); 322 kfree(pr->performance->states);
317 goto end; 323 goto end;
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 7f16f5f8e7d..227543789ba 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -840,7 +840,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
840 state = acpi_get_throttling_state(pr, value); 840 state = acpi_get_throttling_state(pr, value);
841 if (state == -1) { 841 if (state == -1) {
842 ACPI_WARNING((AE_INFO, 842 ACPI_WARNING((AE_INFO,
843 "Invalid throttling state, reset\n")); 843 "Invalid throttling state, reset"));
844 state = 0; 844 state = 0;
845 ret = acpi_processor_set_throttling(pr, state); 845 ret = acpi_processor_set_throttling(pr, state);
846 if (ret) 846 if (ret)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 810cca90ca7..1bdfb37377e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -570,6 +570,22 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
570 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710Z"), 570 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710Z"),
571 }, 571 },
572 }, 572 },
573 {
574 .callback = video_set_bqc_offset,
575 .ident = "eMachines E510",
576 .matches = {
577 DMI_MATCH(DMI_BOARD_VENDOR, "EMACHINES"),
578 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines E510"),
579 },
580 },
581 {
582 .callback = video_set_bqc_offset,
583 .ident = "Acer Aspire 5315",
584 .matches = {
585 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
586 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
587 },
588 },
573 {} 589 {}
574}; 590};
575 591
@@ -2334,7 +2350,7 @@ static int __init acpi_video_init(void)
2334 return acpi_video_register(); 2350 return acpi_video_register();
2335} 2351}
2336 2352
2337void __exit acpi_video_exit(void) 2353void acpi_video_exit(void)
2338{ 2354{
2339 2355
2340 acpi_bus_unregister_driver(&acpi_video_bus); 2356 acpi_bus_unregister_driver(&acpi_video_bus);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 9120717c070..2aa1908e5ce 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -535,6 +535,15 @@ config PATA_OPTIDMA
535 535
536 If unsure, say N. 536 If unsure, say N.
537 537
538config PATA_PALMLD
539 tristate "Palm LifeDrive PATA support"
540 depends on MACH_PALMLD
541 help
542 This option enables support for Palm LifeDrive's internal ATA
543 port via the new ATA layer.
544
545 If unsure, say N.
546
538config PATA_PCMCIA 547config PATA_PCMCIA
539 tristate "PCMCIA PATA support" 548 tristate "PCMCIA PATA support"
540 depends on PCMCIA 549 depends on PCMCIA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 7f1ecf99528..1558059874f 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
50obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o 50obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
51obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o 51obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
52obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o 52obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
53obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
53obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o 54obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
54obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o 55obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
55obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o 56obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 08186ecbaf8..15a23031833 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -77,8 +77,6 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
77 size_t size); 77 size_t size);
78static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, 78static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
79 ssize_t size); 79 ssize_t size);
80#define MAX_SLOTS 8
81#define MAX_RETRY 15
82 80
83enum { 81enum {
84 AHCI_PCI_BAR = 5, 82 AHCI_PCI_BAR = 5,
@@ -220,6 +218,7 @@ enum {
220 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ 218 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
221 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ 219 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
222 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ 220 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
221 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
223 222
224 /* ap->flags bits */ 223 /* ap->flags bits */
225 224
@@ -230,6 +229,10 @@ enum {
230 229
231 ICH_MAP = 0x90, /* ICH MAP register */ 230 ICH_MAP = 0x90, /* ICH MAP register */
232 231
232 /* em constants */
233 EM_MAX_SLOTS = 8,
234 EM_MAX_RETRY = 5,
235
233 /* em_ctl bits */ 236 /* em_ctl bits */
234 EM_CTL_RST = (1 << 9), /* Reset */ 237 EM_CTL_RST = (1 << 9), /* Reset */
235 EM_CTL_TM = (1 << 8), /* Transmit Message */ 238 EM_CTL_TM = (1 << 8), /* Transmit Message */
@@ -281,8 +284,8 @@ struct ahci_port_priv {
281 unsigned int ncq_saw_dmas:1; 284 unsigned int ncq_saw_dmas:1;
282 unsigned int ncq_saw_sdb:1; 285 unsigned int ncq_saw_sdb:1;
283 u32 intr_mask; /* interrupts to enable */ 286 u32 intr_mask; /* interrupts to enable */
284 struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info 287 /* enclosure management info per PM slot */
285 * per PM slot */ 288 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
286}; 289};
287 290
288static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 291static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
@@ -312,7 +315,6 @@ static void ahci_error_handler(struct ata_port *ap);
312static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 315static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
313static int ahci_port_resume(struct ata_port *ap); 316static int ahci_port_resume(struct ata_port *ap);
314static void ahci_dev_config(struct ata_device *dev); 317static void ahci_dev_config(struct ata_device *dev);
315static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
316static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 318static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
317 u32 opts); 319 u32 opts);
318#ifdef CONFIG_PM 320#ifdef CONFIG_PM
@@ -403,14 +405,14 @@ static struct ata_port_operations ahci_sb600_ops = {
403#define AHCI_HFLAGS(flags) .private_data = (void *)(flags) 405#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
404 406
405static const struct ata_port_info ahci_port_info[] = { 407static const struct ata_port_info ahci_port_info[] = {
406 /* board_ahci */ 408 [board_ahci] =
407 { 409 {
408 .flags = AHCI_FLAG_COMMON, 410 .flags = AHCI_FLAG_COMMON,
409 .pio_mask = ATA_PIO4, 411 .pio_mask = ATA_PIO4,
410 .udma_mask = ATA_UDMA6, 412 .udma_mask = ATA_UDMA6,
411 .port_ops = &ahci_ops, 413 .port_ops = &ahci_ops,
412 }, 414 },
413 /* board_ahci_vt8251 */ 415 [board_ahci_vt8251] =
414 { 416 {
415 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), 417 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
416 .flags = AHCI_FLAG_COMMON, 418 .flags = AHCI_FLAG_COMMON,
@@ -418,7 +420,7 @@ static const struct ata_port_info ahci_port_info[] = {
418 .udma_mask = ATA_UDMA6, 420 .udma_mask = ATA_UDMA6,
419 .port_ops = &ahci_vt8251_ops, 421 .port_ops = &ahci_vt8251_ops,
420 }, 422 },
421 /* board_ahci_ign_iferr */ 423 [board_ahci_ign_iferr] =
422 { 424 {
423 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 425 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
424 .flags = AHCI_FLAG_COMMON, 426 .flags = AHCI_FLAG_COMMON,
@@ -426,17 +428,16 @@ static const struct ata_port_info ahci_port_info[] = {
426 .udma_mask = ATA_UDMA6, 428 .udma_mask = ATA_UDMA6,
427 .port_ops = &ahci_ops, 429 .port_ops = &ahci_ops,
428 }, 430 },
429 /* board_ahci_sb600 */ 431 [board_ahci_sb600] =
430 { 432 {
431 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 433 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
432 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI | 434 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255),
433 AHCI_HFLAG_SECT255),
434 .flags = AHCI_FLAG_COMMON, 435 .flags = AHCI_FLAG_COMMON,
435 .pio_mask = ATA_PIO4, 436 .pio_mask = ATA_PIO4,
436 .udma_mask = ATA_UDMA6, 437 .udma_mask = ATA_UDMA6,
437 .port_ops = &ahci_sb600_ops, 438 .port_ops = &ahci_sb600_ops,
438 }, 439 },
439 /* board_ahci_mv */ 440 [board_ahci_mv] =
440 { 441 {
441 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 442 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
442 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), 443 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
@@ -446,7 +447,7 @@ static const struct ata_port_info ahci_port_info[] = {
446 .udma_mask = ATA_UDMA6, 447 .udma_mask = ATA_UDMA6,
447 .port_ops = &ahci_ops, 448 .port_ops = &ahci_ops,
448 }, 449 },
449 /* board_ahci_sb700, for SB700 and SB800 */ 450 [board_ahci_sb700] = /* for SB700 and SB800 */
450 { 451 {
451 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), 452 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
452 .flags = AHCI_FLAG_COMMON, 453 .flags = AHCI_FLAG_COMMON,
@@ -454,7 +455,7 @@ static const struct ata_port_info ahci_port_info[] = {
454 .udma_mask = ATA_UDMA6, 455 .udma_mask = ATA_UDMA6,
455 .port_ops = &ahci_sb600_ops, 456 .port_ops = &ahci_sb600_ops,
456 }, 457 },
457 /* board_ahci_mcp65 */ 458 [board_ahci_mcp65] =
458 { 459 {
459 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), 460 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
460 .flags = AHCI_FLAG_COMMON, 461 .flags = AHCI_FLAG_COMMON,
@@ -462,7 +463,7 @@ static const struct ata_port_info ahci_port_info[] = {
462 .udma_mask = ATA_UDMA6, 463 .udma_mask = ATA_UDMA6,
463 .port_ops = &ahci_ops, 464 .port_ops = &ahci_ops,
464 }, 465 },
465 /* board_ahci_nopmp */ 466 [board_ahci_nopmp] =
466 { 467 {
467 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP), 468 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
468 .flags = AHCI_FLAG_COMMON, 469 .flags = AHCI_FLAG_COMMON,
@@ -1140,12 +1141,12 @@ static void ahci_start_port(struct ata_port *ap)
1140 emp = &pp->em_priv[link->pmp]; 1141 emp = &pp->em_priv[link->pmp];
1141 1142
1142 /* EM Transmit bit maybe busy during init */ 1143 /* EM Transmit bit maybe busy during init */
1143 for (i = 0; i < MAX_RETRY; i++) { 1144 for (i = 0; i < EM_MAX_RETRY; i++) {
1144 rc = ahci_transmit_led_message(ap, 1145 rc = ahci_transmit_led_message(ap,
1145 emp->led_state, 1146 emp->led_state,
1146 4); 1147 4);
1147 if (rc == -EBUSY) 1148 if (rc == -EBUSY)
1148 udelay(100); 1149 msleep(1);
1149 else 1150 else
1150 break; 1151 break;
1151 } 1152 }
@@ -1339,7 +1340,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1339 1340
1340 /* get the slot number from the message */ 1341 /* get the slot number from the message */
1341 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; 1342 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1342 if (pmp < MAX_SLOTS) 1343 if (pmp < EM_MAX_SLOTS)
1343 emp = &pp->em_priv[pmp]; 1344 emp = &pp->em_priv[pmp];
1344 else 1345 else
1345 return -EINVAL; 1346 return -EINVAL;
@@ -1407,7 +1408,7 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1407 1408
1408 /* get the slot number from the message */ 1409 /* get the slot number from the message */
1409 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; 1410 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1410 if (pmp < MAX_SLOTS) 1411 if (pmp < EM_MAX_SLOTS)
1411 emp = &pp->em_priv[pmp]; 1412 emp = &pp->em_priv[pmp];
1412 else 1413 else
1413 return -EINVAL; 1414 return -EINVAL;
@@ -2316,9 +2317,17 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2316static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 2317static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2317{ 2318{
2318 struct ata_host *host = dev_get_drvdata(&pdev->dev); 2319 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2320 struct ahci_host_priv *hpriv = host->private_data;
2319 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 2321 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2320 u32 ctl; 2322 u32 ctl;
2321 2323
2324 if (mesg.event & PM_EVENT_SUSPEND &&
2325 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2326 dev_printk(KERN_ERR, &pdev->dev,
2327 "BIOS update required for suspend/resume\n");
2328 return -EIO;
2329 }
2330
2322 if (mesg.event & PM_EVENT_SLEEP) { 2331 if (mesg.event & PM_EVENT_SLEEP) {
2323 /* AHCI spec rev1.1 section 8.3.3: 2332 /* AHCI spec rev1.1 section 8.3.3:
2324 * Software must disable interrupts prior to requesting a 2333 * Software must disable interrupts prior to requesting a
@@ -2575,6 +2584,51 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
2575 } 2584 }
2576} 2585}
2577 2586
2587/*
2588 * SB600 ahci controller on ASUS M2A-VM can't do 64bit DMA with older
2589 * BIOS. The oldest version known to be broken is 0901 and working is
2590 * 1501 which was released on 2007-10-26. Force 32bit DMA on anything
2591 * older than 1501. Please read bko#9412 for more info.
2592 */
2593static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev)
2594{
2595 static const struct dmi_system_id sysids[] = {
2596 {
2597 .ident = "ASUS M2A-VM",
2598 .matches = {
2599 DMI_MATCH(DMI_BOARD_VENDOR,
2600 "ASUSTeK Computer INC."),
2601 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2602 },
2603 },
2604 { }
2605 };
2606 const char *cutoff_mmdd = "10/26";
2607 const char *date;
2608 int year;
2609
2610 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2611 !dmi_check_system(sysids))
2612 return false;
2613
2614 /*
2615 * Argh.... both version and date are free form strings.
2616 * Let's hope they're using the same date format across
2617 * different versions.
2618 */
2619 date = dmi_get_system_info(DMI_BIOS_DATE);
2620 year = dmi_get_year(DMI_BIOS_DATE);
2621 if (date && strlen(date) >= 10 && date[2] == '/' && date[5] == '/' &&
2622 (year > 2007 ||
2623 (year == 2007 && strncmp(date, cutoff_mmdd, 5) >= 0)))
2624 return false;
2625
2626 dev_printk(KERN_WARNING, &pdev->dev, "ASUS M2A-VM: BIOS too old, "
2627 "forcing 32bit DMA, update BIOS\n");
2628
2629 return true;
2630}
2631
2578static bool ahci_broken_system_poweroff(struct pci_dev *pdev) 2632static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2579{ 2633{
2580 static const struct dmi_system_id broken_systems[] = { 2634 static const struct dmi_system_id broken_systems[] = {
@@ -2610,6 +2664,63 @@ static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2610 return false; 2664 return false;
2611} 2665}
2612 2666
2667static bool ahci_broken_suspend(struct pci_dev *pdev)
2668{
2669 static const struct dmi_system_id sysids[] = {
2670 /*
2671 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
2672 * to the harddisk doesn't become online after
2673 * resuming from STR. Warn and fail suspend.
2674 */
2675 {
2676 .ident = "dv4",
2677 .matches = {
2678 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2679 DMI_MATCH(DMI_PRODUCT_NAME,
2680 "HP Pavilion dv4 Notebook PC"),
2681 },
2682 .driver_data = "F.30", /* cutoff BIOS version */
2683 },
2684 {
2685 .ident = "dv5",
2686 .matches = {
2687 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2688 DMI_MATCH(DMI_PRODUCT_NAME,
2689 "HP Pavilion dv5 Notebook PC"),
2690 },
2691 .driver_data = "F.16", /* cutoff BIOS version */
2692 },
2693 {
2694 .ident = "dv6",
2695 .matches = {
2696 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2697 DMI_MATCH(DMI_PRODUCT_NAME,
2698 "HP Pavilion dv6 Notebook PC"),
2699 },
2700 .driver_data = "F.21", /* cutoff BIOS version */
2701 },
2702 {
2703 .ident = "HDX18",
2704 .matches = {
2705 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2706 DMI_MATCH(DMI_PRODUCT_NAME,
2707 "HP HDX18 Notebook PC"),
2708 },
2709 .driver_data = "F.23", /* cutoff BIOS version */
2710 },
2711 { } /* terminate list */
2712 };
2713 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2714 const char *ver;
2715
2716 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
2717 return false;
2718
2719 ver = dmi_get_system_info(DMI_BIOS_VERSION);
2720
2721 return !ver || strcmp(ver, dmi->driver_data) < 0;
2722}
2723
2613static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2724static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2614{ 2725{
2615 static int printed_version; 2726 static int printed_version;
@@ -2678,6 +2789,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2678 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) 2789 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
2679 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; 2790 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
2680 2791
2792 /* apply ASUS M2A_VM quirk */
2793 if (ahci_asus_m2a_vm_32bit_only(pdev))
2794 hpriv->flags |= AHCI_HFLAG_32BIT_ONLY;
2795
2681 if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) 2796 if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
2682 pci_enable_msi(pdev); 2797 pci_enable_msi(pdev);
2683 2798
@@ -2715,6 +2830,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2715 "quirky BIOS, skipping spindown on poweroff\n"); 2830 "quirky BIOS, skipping spindown on poweroff\n");
2716 } 2831 }
2717 2832
2833 if (ahci_broken_suspend(pdev)) {
2834 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
2835 dev_printk(KERN_WARNING, &pdev->dev,
2836 "BIOS update required for suspend/resume\n");
2837 }
2838
2718 /* CAP.NP sometimes indicate the index of the last enabled 2839 /* CAP.NP sometimes indicate the index of the last enabled
2719 * port, at other times, that of the last possible port, so 2840 * port, at other times, that of the last possible port, so
2720 * determining the maximum port number requires looking at 2841 * determining the maximum port number requires looking at
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d51a17c0f59..d0a14cf2bd7 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -223,10 +223,8 @@ static const struct pci_device_id piix_pci_tbl[] = {
223 /* ICH8 Mobile PATA Controller */ 223 /* ICH8 Mobile PATA Controller */
224 { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 224 { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
225 225
226 /* NOTE: The following PCI ids must be kept in sync with the 226 /* SATA ports */
227 * list in drivers/pci/quirks.c. 227
228 */
229
230 /* 82801EB (ICH5) */ 228 /* 82801EB (ICH5) */
231 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 229 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
232 /* 82801EB (ICH5) */ 230 /* 82801EB (ICH5) */
@@ -1455,6 +1453,15 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev)
1455 /* PCI slot number of the controller */ 1453 /* PCI slot number of the controller */
1456 .driver_data = (void *)0x1FUL, 1454 .driver_data = (void *)0x1FUL,
1457 }, 1455 },
1456 {
1457 .ident = "HP Compaq nc6000",
1458 .matches = {
1459 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1460 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"),
1461 },
1462 /* PCI slot number of the controller */
1463 .driver_data = (void *)0x1FUL,
1464 },
1458 1465
1459 { } /* terminate list */ 1466 { } /* terminate list */
1460 }; 1467 };
@@ -1500,8 +1507,8 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1500 dev_printk(KERN_DEBUG, &pdev->dev, 1507 dev_printk(KERN_DEBUG, &pdev->dev,
1501 "version " DRV_VERSION "\n"); 1508 "version " DRV_VERSION "\n");
1502 1509
1503 /* no hotplugging support (FIXME) */ 1510 /* no hotplugging support for later devices (FIXME) */
1504 if (!in_module_init) 1511 if (!in_module_init && ent->driver_data >= ich5_sata)
1505 return -ENODEV; 1512 return -ENODEV;
1506 1513
1507 if (piix_broken_system_poweroff(pdev)) { 1514 if (piix_broken_system_poweroff(pdev)) {
@@ -1582,6 +1589,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1582 host->ports[1]->mwdma_mask = 0; 1589 host->ports[1]->mwdma_mask = 0;
1583 host->ports[1]->udma_mask = 0; 1590 host->ports[1]->udma_mask = 0;
1584 } 1591 }
1592 host->flags |= ATA_HOST_PARALLEL_SCAN;
1585 1593
1586 pci_set_master(pdev); 1594 pci_set_master(pdev);
1587 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); 1595 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht);
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 6273d98d00e..ac176da1f94 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -748,9 +748,9 @@ static int ata_acpi_run_tf(struct ata_device *dev,
748/** 748/**
749 * ata_acpi_exec_tfs - get then write drive taskfile settings 749 * ata_acpi_exec_tfs - get then write drive taskfile settings
750 * @dev: target ATA device 750 * @dev: target ATA device
751 * @nr_executed: out paramter for the number of executed commands 751 * @nr_executed: out parameter for the number of executed commands
752 * 752 *
753 * Evaluate _GTF and excute returned taskfiles. 753 * Evaluate _GTF and execute returned taskfiles.
754 * 754 *
755 * LOCKING: 755 * LOCKING:
756 * EH context. 756 * EH context.
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c9242301cfa..ca4d208ddf3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5031,7 +5031,6 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5031{ 5031{
5032 int nr_done = 0; 5032 int nr_done = 0;
5033 u32 done_mask; 5033 u32 done_mask;
5034 int i;
5035 5034
5036 done_mask = ap->qc_active ^ qc_active; 5035 done_mask = ap->qc_active ^ qc_active;
5037 5036
@@ -5041,16 +5040,16 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5041 return -EINVAL; 5040 return -EINVAL;
5042 } 5041 }
5043 5042
5044 for (i = 0; i < ATA_MAX_QUEUE; i++) { 5043 while (done_mask) {
5045 struct ata_queued_cmd *qc; 5044 struct ata_queued_cmd *qc;
5045 unsigned int tag = __ffs(done_mask);
5046 5046
5047 if (!(done_mask & (1 << i))) 5047 qc = ata_qc_from_tag(ap, tag);
5048 continue; 5048 if (qc) {
5049
5050 if ((qc = ata_qc_from_tag(ap, i))) {
5051 ata_qc_complete(qc); 5049 ata_qc_complete(qc);
5052 nr_done++; 5050 nr_done++;
5053 } 5051 }
5052 done_mask &= ~(1 << tag);
5054 } 5053 }
5055 5054
5056 return nr_done; 5055 return nr_done;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 94919ad03df..fa22f94ca41 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2864,7 +2864,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
2864/** 2864/**
2865 * ata_set_mode - Program timings and issue SET FEATURES - XFER 2865 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2866 * @link: link on which timings will be programmed 2866 * @link: link on which timings will be programmed
2867 * @r_failed_dev: out paramter for failed device 2867 * @r_failed_dev: out parameter for failed device
2868 * 2868 *
2869 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 2869 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2870 * ata_set_mode() fails, pointer to the failing device is 2870 * ata_set_mode() fails, pointer to the failing device is
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 342316064e9..d0dfeef55db 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1084,7 +1084,7 @@ static int atapi_drain_needed(struct request *rq)
1084 if (likely(!blk_pc_request(rq))) 1084 if (likely(!blk_pc_request(rq)))
1085 return 0; 1085 return 0;
1086 1086
1087 if (!rq->data_len || (rq->cmd_flags & REQ_RW)) 1087 if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
1088 return 0; 1088 return 0;
1089 1089
1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; 1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index bb18415d3d6..bbbb1fab175 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -727,17 +727,23 @@ unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
727 else 727 else
728 iowrite16_rep(data_addr, buf, words); 728 iowrite16_rep(data_addr, buf, words);
729 729
730 /* Transfer trailing 1 byte, if any. */ 730 /* Transfer trailing byte, if any. */
731 if (unlikely(buflen & 0x01)) { 731 if (unlikely(buflen & 0x01)) {
732 __le16 align_buf[1] = { 0 }; 732 unsigned char pad[2];
733 unsigned char *trailing_buf = buf + buflen - 1;
734 733
734 /* Point buf to the tail of buffer */
735 buf += buflen - 1;
736
737 /*
738 * Use io*16_rep() accessors here as well to avoid pointlessly
739 * swapping bytes to and fro on the big endian machines...
740 */
735 if (rw == READ) { 741 if (rw == READ) {
736 align_buf[0] = cpu_to_le16(ioread16(data_addr)); 742 ioread16_rep(data_addr, pad, 1);
737 memcpy(trailing_buf, align_buf, 1); 743 *buf = pad[0];
738 } else { 744 } else {
739 memcpy(align_buf, trailing_buf, 1); 745 pad[0] = *buf;
740 iowrite16(le16_to_cpu(align_buf[0]), data_addr); 746 iowrite16_rep(data_addr, pad, 1);
741 } 747 }
742 words++; 748 words++;
743 } 749 }
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 751b7ea4816..fc9c5d6d7d8 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -497,14 +497,16 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
497 }; 497 };
498 /* Revision 0x20 added DMA */ 498 /* Revision 0x20 added DMA */
499 static const struct ata_port_info info_20 = { 499 static const struct ata_port_info info_20 = {
500 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, 500 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
501 ATA_FLAG_IGN_SIMPLEX,
501 .pio_mask = ATA_PIO4, 502 .pio_mask = ATA_PIO4,
502 .mwdma_mask = ATA_MWDMA2, 503 .mwdma_mask = ATA_MWDMA2,
503 .port_ops = &ali_20_port_ops 504 .port_ops = &ali_20_port_ops
504 }; 505 };
505 /* Revision 0x20 with support logic added UDMA */ 506 /* Revision 0x20 with support logic added UDMA */
506 static const struct ata_port_info info_20_udma = { 507 static const struct ata_port_info info_20_udma = {
507 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, 508 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
509 ATA_FLAG_IGN_SIMPLEX,
508 .pio_mask = ATA_PIO4, 510 .pio_mask = ATA_PIO4,
509 .mwdma_mask = ATA_MWDMA2, 511 .mwdma_mask = ATA_MWDMA2,
510 .udma_mask = ATA_UDMA2, 512 .udma_mask = ATA_UDMA2,
@@ -512,7 +514,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
512 }; 514 };
513 /* Revision 0xC2 adds UDMA66 */ 515 /* Revision 0xC2 adds UDMA66 */
514 static const struct ata_port_info info_c2 = { 516 static const struct ata_port_info info_c2 = {
515 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, 517 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
518 ATA_FLAG_IGN_SIMPLEX,
516 .pio_mask = ATA_PIO4, 519 .pio_mask = ATA_PIO4,
517 .mwdma_mask = ATA_MWDMA2, 520 .mwdma_mask = ATA_MWDMA2,
518 .udma_mask = ATA_UDMA4, 521 .udma_mask = ATA_UDMA4,
@@ -520,7 +523,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
520 }; 523 };
521 /* Revision 0xC3 is UDMA66 for now */ 524 /* Revision 0xC3 is UDMA66 for now */
522 static const struct ata_port_info info_c3 = { 525 static const struct ata_port_info info_c3 = {
523 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, 526 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
527 ATA_FLAG_IGN_SIMPLEX,
524 .pio_mask = ATA_PIO4, 528 .pio_mask = ATA_PIO4,
525 .mwdma_mask = ATA_MWDMA2, 529 .mwdma_mask = ATA_MWDMA2,
526 .udma_mask = ATA_UDMA4, 530 .udma_mask = ATA_UDMA4,
@@ -528,7 +532,8 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
528 }; 532 };
529 /* Revision 0xC4 is UDMA100 */ 533 /* Revision 0xC4 is UDMA100 */
530 static const struct ata_port_info info_c4 = { 534 static const struct ata_port_info info_c4 = {
531 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48, 535 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 |
536 ATA_FLAG_IGN_SIMPLEX,
532 .pio_mask = ATA_PIO4, 537 .pio_mask = ATA_PIO4,
533 .mwdma_mask = ATA_MWDMA2, 538 .mwdma_mask = ATA_MWDMA2,
534 .udma_mask = ATA_UDMA5, 539 .udma_mask = ATA_UDMA5,
@@ -536,7 +541,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
536 }; 541 };
537 /* Revision 0xC5 is UDMA133 with LBA48 DMA */ 542 /* Revision 0xC5 is UDMA133 with LBA48 DMA */
538 static const struct ata_port_info info_c5 = { 543 static const struct ata_port_info info_c5 = {
539 .flags = ATA_FLAG_SLAVE_POSS, 544 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_IGN_SIMPLEX,
540 .pio_mask = ATA_PIO4, 545 .pio_mask = ATA_PIO4,
541 .mwdma_mask = ATA_MWDMA2, 546 .mwdma_mask = ATA_MWDMA2,
542 .udma_mask = ATA_UDMA6, 547 .udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 2085e0a3a05..2a6412f5d11 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -22,7 +22,7 @@
22#include <linux/ata.h> 22#include <linux/ata.h>
23 23
24#define DRV_NAME "pata_efar" 24#define DRV_NAME "pata_efar"
25#define DRV_VERSION "0.4.4" 25#define DRV_VERSION "0.4.5"
26 26
27/** 27/**
28 * efar_pre_reset - Enable bits 28 * efar_pre_reset - Enable bits
@@ -98,18 +98,17 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
98 { 2, 1 }, 98 { 2, 1 },
99 { 2, 3 }, }; 99 { 2, 3 }, };
100 100
101 if (pio > 2) 101 if (pio > 1)
102 control |= 1; /* TIME1 enable */ 102 control |= 1; /* TIME */
103 if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */ 103 if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
104 control |= 2; /* IE enable */ 104 control |= 2; /* IE */
105 /* Intel specifies that the PPE functionality is for disk only */ 105 /* Intel specifies that the prefetch/posting is for disk only */
106 if (adev->class == ATA_DEV_ATA) 106 if (adev->class == ATA_DEV_ATA)
107 control |= 4; /* PPE enable */ 107 control |= 4; /* PPE */
108 108
109 pci_read_config_word(dev, idetm_port, &idetm_data); 109 pci_read_config_word(dev, idetm_port, &idetm_data);
110 110
111 /* Enable PPE, IE and TIME as appropriate */ 111 /* Set PPE, IE, and TIME as appropriate */
112
113 if (adev->devno == 0) { 112 if (adev->devno == 0) {
114 idetm_data &= 0xCCF0; 113 idetm_data &= 0xCCF0;
115 idetm_data |= control; 114 idetm_data |= control;
@@ -129,7 +128,7 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
129 pci_write_config_byte(dev, 0x44, slave_data); 128 pci_write_config_byte(dev, 0x44, slave_data);
130 } 129 }
131 130
132 idetm_data |= 0x4000; /* Ensure SITRE is enabled */ 131 idetm_data |= 0x4000; /* Ensure SITRE is set */
133 pci_write_config_word(dev, idetm_port, idetm_data); 132 pci_write_config_word(dev, idetm_port, idetm_data);
134} 133}
135 134
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index f72c6c5b820..6932e56d179 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -48,6 +48,7 @@
48 * 48 *
49 */ 49 */
50 50
51#include <linux/async.h>
51#include <linux/kernel.h> 52#include <linux/kernel.h>
52#include <linux/module.h> 53#include <linux/module.h>
53#include <linux/pci.h> 54#include <linux/pci.h>
@@ -1028,6 +1029,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
1028 &legacy_sht); 1029 &legacy_sht);
1029 if (ret) 1030 if (ret)
1030 goto fail; 1031 goto fail;
1032 async_synchronize_full();
1031 ld->platform_dev = pdev; 1033 ld->platform_dev = pdev;
1032 1034
1033 /* Nothing found means we drop the port as its probably not there */ 1035 /* Nothing found means we drop the port as its probably not there */
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index bdb236957cb..f0d52f72f5b 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -20,13 +20,24 @@
20 20
21/* No PIO or DMA methods needed for this device */ 21/* No PIO or DMA methods needed for this device */
22 22
23static unsigned int netcell_read_id(struct ata_device *adev,
24 struct ata_taskfile *tf, u16 *id)
25{
26 unsigned int err_mask = ata_do_dev_read_id(adev, tf, id);
27 /* Firmware forgets to mark words 85-87 valid */
28 if (err_mask == 0)
29 id[ATA_ID_CSF_DEFAULT] |= 0x4000;
30 return err_mask;
31}
32
23static struct scsi_host_template netcell_sht = { 33static struct scsi_host_template netcell_sht = {
24 ATA_BMDMA_SHT(DRV_NAME), 34 ATA_BMDMA_SHT(DRV_NAME),
25}; 35};
26 36
27static struct ata_port_operations netcell_ops = { 37static struct ata_port_operations netcell_ops = {
28 .inherits = &ata_bmdma_port_ops, 38 .inherits = &ata_bmdma_port_ops,
29 .cable_detect = ata_cable_80wire, 39 .cable_detect = ata_cable_80wire,
40 .read_id = netcell_read_id,
30}; 41};
31 42
32 43
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
new file mode 100644
index 00000000000..11fb4ccc74b
--- /dev/null
+++ b/drivers/ata/pata_palmld.c
@@ -0,0 +1,150 @@
1/*
2 * drivers/ata/pata_palmld.c
3 *
4 * Driver for IDE channel in Palm LifeDrive
5 *
6 * Based on research of:
7 * Alex Osborne <ato@meshy.org>
8 *
9 * Rewrite for mainline:
10 * Marek Vasut <marek.vasut@gmail.com>
11 *
12 * Rewritten version based on pata_ixp4xx_cf.c:
13 * ixp4xx PATA/Compact Flash driver
14 * Copyright (C) 2006-07 Tower Technologies
15 * Author: Alessandro Zummo <a.zummo@towertech.it>
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License version 2 as
19 * published by the Free Software Foundation.
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/libata.h>
26#include <linux/irq.h>
27#include <linux/platform_device.h>
28#include <linux/delay.h>
29#include <linux/gpio.h>
30
31#include <scsi/scsi_host.h>
32#include <mach/palmld.h>
33
34#define DRV_NAME "pata_palmld"
35
36static struct scsi_host_template palmld_sht = {
37 ATA_PIO_SHT(DRV_NAME),
38};
39
40static struct ata_port_operations palmld_port_ops = {
41 .inherits = &ata_sff_port_ops,
42 .sff_data_xfer = ata_sff_data_xfer_noirq,
43 .cable_detect = ata_cable_40wire,
44};
45
46static __devinit int palmld_pata_probe(struct platform_device *pdev)
47{
48 struct ata_host *host;
49 struct ata_port *ap;
50 void __iomem *mem;
51 int ret;
52
53 /* allocate host */
54 host = ata_host_alloc(&pdev->dev, 1);
55 if (!host)
56 return -ENOMEM;
57
58 /* remap drive's physical memory address */
59 mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000);
60 if (!mem)
61 return -ENOMEM;
62
63 /* request and activate power GPIO, IRQ GPIO */
64 ret = gpio_request(GPIO_NR_PALMLD_IDE_PWEN, "HDD PWR");
65 if (ret)
66 goto err1;
67 ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_PWEN, 1);
68 if (ret)
69 goto err2;
70
71 ret = gpio_request(GPIO_NR_PALMLD_IDE_RESET, "HDD RST");
72 if (ret)
73 goto err2;
74 ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_RESET, 0);
75 if (ret)
76 goto err3;
77
78 /* reset the drive */
79 gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0);
80 msleep(30);
81 gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 1);
82 msleep(30);
83
84 /* setup the ata port */
85 ap = host->ports[0];
86 ap->ops = &palmld_port_ops;
87 ap->pio_mask = ATA_PIO4;
88 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_PIO_POLLING;
89
90 /* memory mapping voodoo */
91 ap->ioaddr.cmd_addr = mem + 0x10;
92 ap->ioaddr.altstatus_addr = mem + 0xe;
93 ap->ioaddr.ctl_addr = mem + 0xe;
94
95 /* start the port */
96 ata_sff_std_ports(&ap->ioaddr);
97
98 /* activate host */
99 return ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING,
100 &palmld_sht);
101
102err3:
103 gpio_free(GPIO_NR_PALMLD_IDE_RESET);
104err2:
105 gpio_free(GPIO_NR_PALMLD_IDE_PWEN);
106err1:
107 return ret;
108}
109
110static __devexit int palmld_pata_remove(struct platform_device *dev)
111{
112 struct ata_host *host = platform_get_drvdata(dev);
113
114 ata_host_detach(host);
115
116 /* power down the HDD */
117 gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0);
118
119 gpio_free(GPIO_NR_PALMLD_IDE_RESET);
120 gpio_free(GPIO_NR_PALMLD_IDE_PWEN);
121
122 return 0;
123}
124
125static struct platform_driver palmld_pata_platform_driver = {
126 .driver = {
127 .name = DRV_NAME,
128 .owner = THIS_MODULE,
129 },
130 .probe = palmld_pata_probe,
131 .remove = __devexit_p(palmld_pata_remove),
132};
133
134static int __init palmld_pata_init(void)
135{
136 return platform_driver_register(&palmld_pata_platform_driver);
137}
138
139static void __exit palmld_pata_exit(void)
140{
141 platform_driver_unregister(&palmld_pata_platform_driver);
142}
143
144MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
145MODULE_DESCRIPTION("PalmLD PATA driver");
146MODULE_LICENSE("GPL");
147MODULE_ALIAS("platform:" DRV_NAME);
148
149module_init(palmld_pata_init);
150module_exit(palmld_pata_exit);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 6cda12ba812..b2d11f300c3 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -305,8 +305,8 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 307
308static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class, 308static int nv_hardreset(struct ata_link *link, unsigned int *class,
309 unsigned long deadline); 309 unsigned long deadline);
310static void nv_nf2_freeze(struct ata_port *ap); 310static void nv_nf2_freeze(struct ata_port *ap);
311static void nv_nf2_thaw(struct ata_port *ap); 311static void nv_nf2_thaw(struct ata_port *ap);
312static void nv_ck804_freeze(struct ata_port *ap); 312static void nv_ck804_freeze(struct ata_port *ap);
@@ -406,49 +406,82 @@ static struct scsi_host_template nv_swncq_sht = {
406 .slave_configure = nv_swncq_slave_config, 406 .slave_configure = nv_swncq_slave_config,
407}; 407};
408 408
409static struct ata_port_operations nv_common_ops = { 409/*
410 * NV SATA controllers have various different problems with hardreset
411 * protocol depending on the specific controller and device.
412 *
413 * GENERIC:
414 *
415 * bko11195 reports that link doesn't come online after hardreset on
416 * generic nv's and there have been several other similar reports on
417 * linux-ide.
418 *
419 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
420 * softreset.
421 *
422 * NF2/3:
423 *
424 * bko3352 reports nf2/3 controllers can't determine device signature
425 * reliably after hardreset. The following thread reports detection
426 * failure on cold boot with the standard debouncing timing.
427 *
428 * http://thread.gmane.org/gmane.linux.ide/34098
429 *
430 * bko12176 reports that hardreset fails to bring up the link during
431 * boot on nf2.
432 *
433 * CK804:
434 *
435 * For initial probing after boot and hot plugging, hardreset mostly
436 * works fine on CK804 but curiously, reprobing on the initial port
437 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
438 * FIS in somewhat undeterministic way.
439 *
440 * SWNCQ:
441 *
442 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
443 * hardreset should be used and hardreset can't report proper
444 * signature, which suggests that mcp5x is closer to nf2 as long as
445 * reset quirkiness is concerned.
446 *
447 * bko12703 reports that boot probing fails for intel SSD with
448 * hardreset. Link fails to come online. Softreset works fine.
449 *
450 * The failures are varied but the following patterns seem true for
451 * all flavors.
452 *
453 * - Softreset during boot always works.
454 *
455 * - Hardreset during boot sometimes fails to bring up the link on
456 * certain comibnations and device signature acquisition is
457 * unreliable.
458 *
459 * - Hardreset is often necessary after hotplug.
460 *
461 * So, preferring softreset for boot probing and error handling (as
462 * hardreset might bring down the link) but using hardreset for
463 * post-boot probing should work around the above issues in most
464 * cases. Define nv_hardreset() which only kicks in for post-boot
465 * probing and use it for all variants.
466 */
467static struct ata_port_operations nv_generic_ops = {
410 .inherits = &ata_bmdma_port_ops, 468 .inherits = &ata_bmdma_port_ops,
411 .lost_interrupt = ATA_OP_NULL, 469 .lost_interrupt = ATA_OP_NULL,
412 .scr_read = nv_scr_read, 470 .scr_read = nv_scr_read,
413 .scr_write = nv_scr_write, 471 .scr_write = nv_scr_write,
472 .hardreset = nv_hardreset,
414}; 473};
415 474
416/* OSDL bz11195 reports that link doesn't come online after hardreset
417 * on generic nv's and there have been several other similar reports
418 * on linux-ide. Disable hardreset for generic nv's.
419 */
420static struct ata_port_operations nv_generic_ops = {
421 .inherits = &nv_common_ops,
422 .hardreset = ATA_OP_NULL,
423};
424
425/* nf2 is ripe with hardreset related problems.
426 *
427 * kernel bz#3352 reports nf2/3 controllers can't determine device
428 * signature reliably. The following thread reports detection failure
429 * on cold boot with the standard debouncing timing.
430 *
431 * http://thread.gmane.org/gmane.linux.ide/34098
432 *
433 * And bz#12176 reports that hardreset simply doesn't work on nf2.
434 * Give up on it and just don't do hardreset.
435 */
436static struct ata_port_operations nv_nf2_ops = { 475static struct ata_port_operations nv_nf2_ops = {
437 .inherits = &nv_generic_ops, 476 .inherits = &nv_generic_ops,
438 .freeze = nv_nf2_freeze, 477 .freeze = nv_nf2_freeze,
439 .thaw = nv_nf2_thaw, 478 .thaw = nv_nf2_thaw,
440}; 479};
441 480
442/* For initial probing after boot and hot plugging, hardreset mostly
443 * works fine on CK804 but curiously, reprobing on the initial port by
444 * rescanning or rmmod/insmod fails to acquire the initial D2H Reg FIS
445 * in somewhat undeterministic way. Use noclassify hardreset.
446 */
447static struct ata_port_operations nv_ck804_ops = { 481static struct ata_port_operations nv_ck804_ops = {
448 .inherits = &nv_common_ops, 482 .inherits = &nv_generic_ops,
449 .freeze = nv_ck804_freeze, 483 .freeze = nv_ck804_freeze,
450 .thaw = nv_ck804_thaw, 484 .thaw = nv_ck804_thaw,
451 .hardreset = nv_noclassify_hardreset,
452 .host_stop = nv_ck804_host_stop, 485 .host_stop = nv_ck804_host_stop,
453}; 486};
454 487
@@ -476,19 +509,8 @@ static struct ata_port_operations nv_adma_ops = {
476 .host_stop = nv_adma_host_stop, 509 .host_stop = nv_adma_host_stop,
477}; 510};
478 511
479/* Kernel bz#12351 reports that when SWNCQ is enabled, for hotplug to
480 * work, hardreset should be used and hardreset can't report proper
481 * signature, which suggests that mcp5x is closer to nf2 as long as
482 * reset quirkiness is concerned. Define separate ops for mcp5x with
483 * nv_noclassify_hardreset().
484 */
485static struct ata_port_operations nv_mcp5x_ops = {
486 .inherits = &nv_common_ops,
487 .hardreset = nv_noclassify_hardreset,
488};
489
490static struct ata_port_operations nv_swncq_ops = { 512static struct ata_port_operations nv_swncq_ops = {
491 .inherits = &nv_mcp5x_ops, 513 .inherits = &nv_generic_ops,
492 514
493 .qc_defer = ata_std_qc_defer, 515 .qc_defer = ata_std_qc_defer,
494 .qc_prep = nv_swncq_qc_prep, 516 .qc_prep = nv_swncq_qc_prep,
@@ -557,7 +579,7 @@ static const struct ata_port_info nv_port_info[] = {
557 .pio_mask = NV_PIO_MASK, 579 .pio_mask = NV_PIO_MASK,
558 .mwdma_mask = NV_MWDMA_MASK, 580 .mwdma_mask = NV_MWDMA_MASK,
559 .udma_mask = NV_UDMA_MASK, 581 .udma_mask = NV_UDMA_MASK,
560 .port_ops = &nv_mcp5x_ops, 582 .port_ops = &nv_generic_ops,
561 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht), 583 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
562 }, 584 },
563 /* SWNCQ */ 585 /* SWNCQ */
@@ -1559,15 +1581,24 @@ static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1559 return 0; 1581 return 0;
1560} 1582}
1561 1583
1562static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class, 1584static int nv_hardreset(struct ata_link *link, unsigned int *class,
1563 unsigned long deadline) 1585 unsigned long deadline)
1564{ 1586{
1565 bool online; 1587 struct ata_eh_context *ehc = &link->eh_context;
1566 int rc;
1567 1588
1568 rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline, 1589 /* Do hardreset iff it's post-boot probing, please read the
1569 &online, NULL); 1590 * comment above port ops for details.
1570 return online ? -EAGAIN : rc; 1591 */
1592 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1593 !ata_dev_enabled(link->device))
1594 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1595 NULL, NULL);
1596 else if (!(ehc->i.flags & ATA_EHI_QUIET))
1597 ata_link_printk(link, KERN_INFO,
1598 "nv: skipping hardreset on occupied port\n");
1599
1600 /* device signature acquisition is unreliable */
1601 return -EAGAIN;
1571} 1602}
1572 1603
1573static void nv_nf2_freeze(struct ata_port *ap) 1604static void nv_nf2_freeze(struct ata_port *ap)
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index e67ce8e5caa..030ec079b18 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -183,7 +183,7 @@ static struct scsi_host_template sil_sht = {
183}; 183};
184 184
185static struct ata_port_operations sil_ops = { 185static struct ata_port_operations sil_ops = {
186 .inherits = &ata_bmdma_port_ops, 186 .inherits = &ata_bmdma32_port_ops,
187 .dev_config = sil_dev_config, 187 .dev_config = sil_dev_config,
188 .set_mode = sil_set_mode, 188 .set_mode = sil_set_mode,
189 .bmdma_setup = sil_bmdma_setup, 189 .bmdma_setup = sil_bmdma_setup,
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index eb05a3c82a9..bbcf970068a 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -193,6 +193,7 @@ enum {
193 PDC_TIMER_MASK_INT, 193 PDC_TIMER_MASK_INT,
194}; 194};
195 195
196#define ECC_ERASE_BUF_SZ (128 * 1024)
196 197
197struct pdc_port_priv { 198struct pdc_port_priv {
198 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512]; 199 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
@@ -1280,7 +1281,6 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
1280{ 1281{
1281 int speed, size, length; 1282 int speed, size, length;
1282 u32 addr, spd0, pci_status; 1283 u32 addr, spd0, pci_status;
1283 u32 tmp = 0;
1284 u32 time_period = 0; 1284 u32 time_period = 0;
1285 u32 tcount = 0; 1285 u32 tcount = 0;
1286 u32 ticks = 0; 1286 u32 ticks = 0;
@@ -1395,14 +1395,17 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
1395 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1395 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1396 PDC_DIMM_SPD_TYPE, &spd0); 1396 PDC_DIMM_SPD_TYPE, &spd0);
1397 if (spd0 == 0x02) { 1397 if (spd0 == 0x02) {
1398 void *buf;
1398 VPRINTK("Start ECC initialization\n"); 1399 VPRINTK("Start ECC initialization\n");
1399 addr = 0; 1400 addr = 0;
1400 length = size * 1024 * 1024; 1401 length = size * 1024 * 1024;
1402 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1401 while (addr < length) { 1403 while (addr < length) {
1402 pdc20621_put_to_dimm(host, (void *) &tmp, addr, 1404 pdc20621_put_to_dimm(host, buf, addr,
1403 sizeof(u32)); 1405 ECC_ERASE_BUF_SZ);
1404 addr += sizeof(u32); 1406 addr += ECC_ERASE_BUF_SZ;
1405 } 1407 }
1408 kfree(buf);
1406 VPRINTK("Finish ECC initialization\n"); 1409 VPRINTK("Finish ECC initialization\n");
1407 } 1410 }
1408 return 0; 1411 return 0;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index dc030f1f00f..4b04a15146d 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -279,7 +279,7 @@ static struct device *next_device(struct klist_iter *i)
279 * 279 *
280 * NOTE: The device that returns a non-zero value is not retained 280 * NOTE: The device that returns a non-zero value is not retained
281 * in any way, nor is its refcount incremented. If the caller needs 281 * in any way, nor is its refcount incremented. If the caller needs
282 * to retain this data, it should do, and increment the reference 282 * to retain this data, it should do so, and increment the reference
283 * count in the supplied callback. 283 * count in the supplied callback.
284 */ 284 */
285int bus_for_each_dev(struct bus_type *bus, struct device *start, 285int bus_for_each_dev(struct bus_type *bus, struct device *start,
@@ -700,8 +700,10 @@ int bus_add_driver(struct device_driver *drv)
700 } 700 }
701 701
702 kobject_uevent(&priv->kobj, KOBJ_ADD); 702 kobject_uevent(&priv->kobj, KOBJ_ADD);
703 return error; 703 return 0;
704out_unregister: 704out_unregister:
705 kfree(drv->p);
706 drv->p = NULL;
705 kobject_put(&priv->kobj); 707 kobject_put(&priv->kobj);
706out_put_bus: 708out_put_bus:
707 bus_put(bus); 709 bus_put(bus);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4aa527b8a91..1977d4beb89 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -879,7 +879,7 @@ int device_add(struct device *dev)
879 } 879 }
880 880
881 if (!dev_name(dev)) 881 if (!dev_name(dev))
882 goto done; 882 goto name_error;
883 883
884 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 884 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
885 885
@@ -978,6 +978,9 @@ done:
978 cleanup_device_parent(dev); 978 cleanup_device_parent(dev);
979 if (parent) 979 if (parent)
980 put_device(parent); 980 put_device(parent);
981name_error:
982 kfree(dev->p);
983 dev->p = NULL;
981 goto done; 984 goto done;
982} 985}
983 986
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index c51f11bb29a..8ae0f63602e 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -257,6 +257,10 @@ EXPORT_SYMBOL_GPL(driver_register);
257 */ 257 */
258void driver_unregister(struct device_driver *drv) 258void driver_unregister(struct device_driver *drv)
259{ 259{
260 if (!drv || !drv->p) {
261 WARN(1, "Unexpected driver unregister!\n");
262 return;
263 }
260 driver_remove_groups(drv, drv->groups); 264 driver_remove_groups(drv, drv->groups);
261 bus_remove_driver(drv); 265 bus_remove_driver(drv);
262} 266}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d3a59c688fe..8a267c42762 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -17,7 +17,7 @@
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/kthread.h> 19#include <linux/kthread.h>
20 20#include <linux/highmem.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include "base.h" 22#include "base.h"
23 23
@@ -45,7 +45,10 @@ struct firmware_priv {
45 struct bin_attribute attr_data; 45 struct bin_attribute attr_data;
46 struct firmware *fw; 46 struct firmware *fw;
47 unsigned long status; 47 unsigned long status;
48 int alloc_size; 48 struct page **pages;
49 int nr_pages;
50 int page_array_size;
51 const char *vdata;
49 struct timer_list timeout; 52 struct timer_list timeout;
50}; 53};
51 54
@@ -122,6 +125,10 @@ static ssize_t firmware_loading_show(struct device *dev,
122 return sprintf(buf, "%d\n", loading); 125 return sprintf(buf, "%d\n", loading);
123} 126}
124 127
128/* Some architectures don't have PAGE_KERNEL_RO */
129#ifndef PAGE_KERNEL_RO
130#define PAGE_KERNEL_RO PAGE_KERNEL
131#endif
125/** 132/**
126 * firmware_loading_store - set value in the 'loading' control file 133 * firmware_loading_store - set value in the 'loading' control file
127 * @dev: device pointer 134 * @dev: device pointer
@@ -141,6 +148,7 @@ static ssize_t firmware_loading_store(struct device *dev,
141{ 148{
142 struct firmware_priv *fw_priv = dev_get_drvdata(dev); 149 struct firmware_priv *fw_priv = dev_get_drvdata(dev);
143 int loading = simple_strtol(buf, NULL, 10); 150 int loading = simple_strtol(buf, NULL, 10);
151 int i;
144 152
145 switch (loading) { 153 switch (loading) {
146 case 1: 154 case 1:
@@ -151,13 +159,30 @@ static ssize_t firmware_loading_store(struct device *dev,
151 } 159 }
152 vfree(fw_priv->fw->data); 160 vfree(fw_priv->fw->data);
153 fw_priv->fw->data = NULL; 161 fw_priv->fw->data = NULL;
162 for (i = 0; i < fw_priv->nr_pages; i++)
163 __free_page(fw_priv->pages[i]);
164 kfree(fw_priv->pages);
165 fw_priv->pages = NULL;
166 fw_priv->page_array_size = 0;
167 fw_priv->nr_pages = 0;
154 fw_priv->fw->size = 0; 168 fw_priv->fw->size = 0;
155 fw_priv->alloc_size = 0;
156 set_bit(FW_STATUS_LOADING, &fw_priv->status); 169 set_bit(FW_STATUS_LOADING, &fw_priv->status);
157 mutex_unlock(&fw_lock); 170 mutex_unlock(&fw_lock);
158 break; 171 break;
159 case 0: 172 case 0:
160 if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { 173 if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
174 vfree(fw_priv->fw->data);
175 fw_priv->fw->data = vmap(fw_priv->pages,
176 fw_priv->nr_pages,
177 0, PAGE_KERNEL_RO);
178 if (!fw_priv->fw->data) {
179 dev_err(dev, "%s: vmap() failed\n", __func__);
180 goto err;
181 }
182 /* Pages will be freed by vfree() */
183 fw_priv->pages = NULL;
184 fw_priv->page_array_size = 0;
185 fw_priv->nr_pages = 0;
161 complete(&fw_priv->completion); 186 complete(&fw_priv->completion);
162 clear_bit(FW_STATUS_LOADING, &fw_priv->status); 187 clear_bit(FW_STATUS_LOADING, &fw_priv->status);
163 break; 188 break;
@@ -167,6 +192,7 @@ static ssize_t firmware_loading_store(struct device *dev,
167 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); 192 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
168 /* fallthrough */ 193 /* fallthrough */
169 case -1: 194 case -1:
195 err:
170 fw_load_abort(fw_priv); 196 fw_load_abort(fw_priv);
171 break; 197 break;
172 } 198 }
@@ -191,8 +217,28 @@ firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
191 ret_count = -ENODEV; 217 ret_count = -ENODEV;
192 goto out; 218 goto out;
193 } 219 }
194 ret_count = memory_read_from_buffer(buffer, count, &offset, 220 if (offset > fw->size)
195 fw->data, fw->size); 221 return 0;
222 if (count > fw->size - offset)
223 count = fw->size - offset;
224
225 ret_count = count;
226
227 while (count) {
228 void *page_data;
229 int page_nr = offset >> PAGE_SHIFT;
230 int page_ofs = offset & (PAGE_SIZE-1);
231 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
232
233 page_data = kmap(fw_priv->pages[page_nr]);
234
235 memcpy(buffer, page_data + page_ofs, page_cnt);
236
237 kunmap(fw_priv->pages[page_nr]);
238 buffer += page_cnt;
239 offset += page_cnt;
240 count -= page_cnt;
241 }
196out: 242out:
197 mutex_unlock(&fw_lock); 243 mutex_unlock(&fw_lock);
198 return ret_count; 244 return ret_count;
@@ -201,27 +247,39 @@ out:
201static int 247static int
202fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) 248fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
203{ 249{
204 u8 *new_data; 250 int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
205 int new_size = fw_priv->alloc_size; 251
252 /* If the array of pages is too small, grow it... */
253 if (fw_priv->page_array_size < pages_needed) {
254 int new_array_size = max(pages_needed,
255 fw_priv->page_array_size * 2);
256 struct page **new_pages;
257
258 new_pages = kmalloc(new_array_size * sizeof(void *),
259 GFP_KERNEL);
260 if (!new_pages) {
261 fw_load_abort(fw_priv);
262 return -ENOMEM;
263 }
264 memcpy(new_pages, fw_priv->pages,
265 fw_priv->page_array_size * sizeof(void *));
266 memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
267 (new_array_size - fw_priv->page_array_size));
268 kfree(fw_priv->pages);
269 fw_priv->pages = new_pages;
270 fw_priv->page_array_size = new_array_size;
271 }
206 272
207 if (min_size <= fw_priv->alloc_size) 273 while (fw_priv->nr_pages < pages_needed) {
208 return 0; 274 fw_priv->pages[fw_priv->nr_pages] =
275 alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
209 276
210 new_size = ALIGN(min_size, PAGE_SIZE); 277 if (!fw_priv->pages[fw_priv->nr_pages]) {
211 new_data = vmalloc(new_size); 278 fw_load_abort(fw_priv);
212 if (!new_data) { 279 return -ENOMEM;
213 printk(KERN_ERR "%s: unable to alloc buffer\n", __func__); 280 }
214 /* Make sure that we don't keep incomplete data */ 281 fw_priv->nr_pages++;
215 fw_load_abort(fw_priv);
216 return -ENOMEM;
217 }
218 fw_priv->alloc_size = new_size;
219 if (fw_priv->fw->data) {
220 memcpy(new_data, fw_priv->fw->data, fw_priv->fw->size);
221 vfree(fw_priv->fw->data);
222 } 282 }
223 fw_priv->fw->data = new_data;
224 BUG_ON(min_size > fw_priv->alloc_size);
225 return 0; 283 return 0;
226} 284}
227 285
@@ -258,10 +316,25 @@ firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr,
258 if (retval) 316 if (retval)
259 goto out; 317 goto out;
260 318
261 memcpy((u8 *)fw->data + offset, buffer, count);
262
263 fw->size = max_t(size_t, offset + count, fw->size);
264 retval = count; 319 retval = count;
320
321 while (count) {
322 void *page_data;
323 int page_nr = offset >> PAGE_SHIFT;
324 int page_ofs = offset & (PAGE_SIZE - 1);
325 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
326
327 page_data = kmap(fw_priv->pages[page_nr]);
328
329 memcpy(page_data + page_ofs, buffer, page_cnt);
330
331 kunmap(fw_priv->pages[page_nr]);
332 buffer += page_cnt;
333 offset += page_cnt;
334 count -= page_cnt;
335 }
336
337 fw->size = max_t(size_t, offset, fw->size);
265out: 338out:
266 mutex_unlock(&fw_lock); 339 mutex_unlock(&fw_lock);
267 return retval; 340 return retval;
@@ -277,7 +350,11 @@ static struct bin_attribute firmware_attr_data_tmpl = {
277static void fw_dev_release(struct device *dev) 350static void fw_dev_release(struct device *dev)
278{ 351{
279 struct firmware_priv *fw_priv = dev_get_drvdata(dev); 352 struct firmware_priv *fw_priv = dev_get_drvdata(dev);
353 int i;
280 354
355 for (i = 0; i < fw_priv->nr_pages; i++)
356 __free_page(fw_priv->pages[i]);
357 kfree(fw_priv->pages);
281 kfree(fw_priv); 358 kfree(fw_priv);
282 kfree(dev); 359 kfree(dev);
283 360
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 8b4708e0624..ead3f64c41d 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -469,22 +469,6 @@ static void platform_drv_shutdown(struct device *_dev)
469 drv->shutdown(dev); 469 drv->shutdown(dev);
470} 470}
471 471
472static int platform_drv_suspend(struct device *_dev, pm_message_t state)
473{
474 struct platform_driver *drv = to_platform_driver(_dev->driver);
475 struct platform_device *dev = to_platform_device(_dev);
476
477 return drv->suspend(dev, state);
478}
479
480static int platform_drv_resume(struct device *_dev)
481{
482 struct platform_driver *drv = to_platform_driver(_dev->driver);
483 struct platform_device *dev = to_platform_device(_dev);
484
485 return drv->resume(dev);
486}
487
488/** 472/**
489 * platform_driver_register 473 * platform_driver_register
490 * @drv: platform driver structure 474 * @drv: platform driver structure
@@ -498,10 +482,10 @@ int platform_driver_register(struct platform_driver *drv)
498 drv->driver.remove = platform_drv_remove; 482 drv->driver.remove = platform_drv_remove;
499 if (drv->shutdown) 483 if (drv->shutdown)
500 drv->driver.shutdown = platform_drv_shutdown; 484 drv->driver.shutdown = platform_drv_shutdown;
501 if (drv->suspend) 485 if (drv->suspend || drv->resume)
502 drv->driver.suspend = platform_drv_suspend; 486 pr_warning("Platform driver '%s' needs updating - please use "
503 if (drv->resume) 487 "dev_pm_ops\n", drv->driver.name);
504 drv->driver.resume = platform_drv_resume; 488
505 return driver_register(&drv->driver); 489 return driver_register(&drv->driver);
506} 490}
507EXPORT_SYMBOL_GPL(platform_driver_register); 491EXPORT_SYMBOL_GPL(platform_driver_register);
@@ -633,10 +617,12 @@ static int platform_match(struct device *dev, struct device_driver *drv)
633 617
634static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 618static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
635{ 619{
620 struct platform_driver *pdrv = to_platform_driver(dev->driver);
621 struct platform_device *pdev = to_platform_device(dev);
636 int ret = 0; 622 int ret = 0;
637 623
638 if (dev->driver && dev->driver->suspend) 624 if (dev->driver && pdrv->suspend)
639 ret = dev->driver->suspend(dev, mesg); 625 ret = pdrv->suspend(pdev, mesg);
640 626
641 return ret; 627 return ret;
642} 628}
@@ -667,10 +653,12 @@ static int platform_legacy_resume_early(struct device *dev)
667 653
668static int platform_legacy_resume(struct device *dev) 654static int platform_legacy_resume(struct device *dev)
669{ 655{
656 struct platform_driver *pdrv = to_platform_driver(dev->driver);
657 struct platform_device *pdev = to_platform_device(dev);
670 int ret = 0; 658 int ret = 0;
671 659
672 if (dev->driver && dev->driver->resume) 660 if (dev->driver && pdrv->resume)
673 ret = dev->driver->resume(dev); 661 ret = pdrv->resume(pdev);
674 662
675 return ret; 663 return ret;
676} 664}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 69b4ddb7de3..fae72545898 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -315,13 +315,13 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
315/*------------------------- Resume routines -------------------------*/ 315/*------------------------- Resume routines -------------------------*/
316 316
317/** 317/**
318 * resume_device_noirq - Power on one device (early resume). 318 * device_resume_noirq - Power on one device (early resume).
319 * @dev: Device. 319 * @dev: Device.
320 * @state: PM transition of the system being carried out. 320 * @state: PM transition of the system being carried out.
321 * 321 *
322 * Must be called with interrupts disabled. 322 * Must be called with interrupts disabled.
323 */ 323 */
324static int resume_device_noirq(struct device *dev, pm_message_t state) 324static int device_resume_noirq(struct device *dev, pm_message_t state)
325{ 325{
326 int error = 0; 326 int error = 0;
327 327
@@ -334,9 +334,6 @@ static int resume_device_noirq(struct device *dev, pm_message_t state)
334 if (dev->bus->pm) { 334 if (dev->bus->pm) {
335 pm_dev_dbg(dev, state, "EARLY "); 335 pm_dev_dbg(dev, state, "EARLY ");
336 error = pm_noirq_op(dev, dev->bus->pm, state); 336 error = pm_noirq_op(dev, dev->bus->pm, state);
337 } else if (dev->bus->resume_early) {
338 pm_dev_dbg(dev, state, "legacy EARLY ");
339 error = dev->bus->resume_early(dev);
340 } 337 }
341 End: 338 End:
342 TRACE_RESUME(error); 339 TRACE_RESUME(error);
@@ -344,50 +341,40 @@ static int resume_device_noirq(struct device *dev, pm_message_t state)
344} 341}
345 342
346/** 343/**
347 * dpm_power_up - Power on all regular (non-sysdev) devices. 344 * dpm_resume_noirq - Power on all regular (non-sysdev) devices.
348 * @state: PM transition of the system being carried out. 345 * @state: PM transition of the system being carried out.
349 * 346 *
350 * Execute the appropriate "noirq resume" callback for all devices marked 347 * Call the "noirq" resume handlers for all devices marked as
351 * as DPM_OFF_IRQ. 348 * DPM_OFF_IRQ and enable device drivers to receive interrupts.
352 * 349 *
353 * Must be called under dpm_list_mtx. Device drivers should not receive 350 * Must be called under dpm_list_mtx. Device drivers should not receive
354 * interrupts while it's being executed. 351 * interrupts while it's being executed.
355 */ 352 */
356static void dpm_power_up(pm_message_t state) 353void dpm_resume_noirq(pm_message_t state)
357{ 354{
358 struct device *dev; 355 struct device *dev;
359 356
357 mutex_lock(&dpm_list_mtx);
360 list_for_each_entry(dev, &dpm_list, power.entry) 358 list_for_each_entry(dev, &dpm_list, power.entry)
361 if (dev->power.status > DPM_OFF) { 359 if (dev->power.status > DPM_OFF) {
362 int error; 360 int error;
363 361
364 dev->power.status = DPM_OFF; 362 dev->power.status = DPM_OFF;
365 error = resume_device_noirq(dev, state); 363 error = device_resume_noirq(dev, state);
366 if (error) 364 if (error)
367 pm_dev_err(dev, state, " early", error); 365 pm_dev_err(dev, state, " early", error);
368 } 366 }
369} 367 mutex_unlock(&dpm_list_mtx);
370
371/**
372 * device_power_up - Turn on all devices that need special attention.
373 * @state: PM transition of the system being carried out.
374 *
375 * Call the "early" resume handlers and enable device drivers to receive
376 * interrupts.
377 */
378void device_power_up(pm_message_t state)
379{
380 dpm_power_up(state);
381 resume_device_irqs(); 368 resume_device_irqs();
382} 369}
383EXPORT_SYMBOL_GPL(device_power_up); 370EXPORT_SYMBOL_GPL(dpm_resume_noirq);
384 371
385/** 372/**
386 * resume_device - Restore state for one device. 373 * device_resume - Restore state for one device.
387 * @dev: Device. 374 * @dev: Device.
388 * @state: PM transition of the system being carried out. 375 * @state: PM transition of the system being carried out.
389 */ 376 */
390static int resume_device(struct device *dev, pm_message_t state) 377static int device_resume(struct device *dev, pm_message_t state)
391{ 378{
392 int error = 0; 379 int error = 0;
393 380
@@ -412,9 +399,6 @@ static int resume_device(struct device *dev, pm_message_t state)
412 if (dev->type->pm) { 399 if (dev->type->pm) {
413 pm_dev_dbg(dev, state, "type "); 400 pm_dev_dbg(dev, state, "type ");
414 error = pm_op(dev, dev->type->pm, state); 401 error = pm_op(dev, dev->type->pm, state);
415 } else if (dev->type->resume) {
416 pm_dev_dbg(dev, state, "legacy type ");
417 error = dev->type->resume(dev);
418 } 402 }
419 if (error) 403 if (error)
420 goto End; 404 goto End;
@@ -460,7 +444,7 @@ static void dpm_resume(pm_message_t state)
460 dev->power.status = DPM_RESUMING; 444 dev->power.status = DPM_RESUMING;
461 mutex_unlock(&dpm_list_mtx); 445 mutex_unlock(&dpm_list_mtx);
462 446
463 error = resume_device(dev, state); 447 error = device_resume(dev, state);
464 448
465 mutex_lock(&dpm_list_mtx); 449 mutex_lock(&dpm_list_mtx);
466 if (error) 450 if (error)
@@ -478,11 +462,11 @@ static void dpm_resume(pm_message_t state)
478} 462}
479 463
480/** 464/**
481 * complete_device - Complete a PM transition for given device 465 * device_complete - Complete a PM transition for given device
482 * @dev: Device. 466 * @dev: Device.
483 * @state: PM transition of the system being carried out. 467 * @state: PM transition of the system being carried out.
484 */ 468 */
485static void complete_device(struct device *dev, pm_message_t state) 469static void device_complete(struct device *dev, pm_message_t state)
486{ 470{
487 down(&dev->sem); 471 down(&dev->sem);
488 472
@@ -525,7 +509,7 @@ static void dpm_complete(pm_message_t state)
525 dev->power.status = DPM_ON; 509 dev->power.status = DPM_ON;
526 mutex_unlock(&dpm_list_mtx); 510 mutex_unlock(&dpm_list_mtx);
527 511
528 complete_device(dev, state); 512 device_complete(dev, state);
529 513
530 mutex_lock(&dpm_list_mtx); 514 mutex_lock(&dpm_list_mtx);
531 } 515 }
@@ -538,19 +522,19 @@ static void dpm_complete(pm_message_t state)
538} 522}
539 523
540/** 524/**
541 * device_resume - Restore state of each device in system. 525 * dpm_resume_end - Restore state of each device in system.
542 * @state: PM transition of the system being carried out. 526 * @state: PM transition of the system being carried out.
543 * 527 *
544 * Resume all the devices, unlock them all, and allow new 528 * Resume all the devices, unlock them all, and allow new
545 * devices to be registered once again. 529 * devices to be registered once again.
546 */ 530 */
547void device_resume(pm_message_t state) 531void dpm_resume_end(pm_message_t state)
548{ 532{
549 might_sleep(); 533 might_sleep();
550 dpm_resume(state); 534 dpm_resume(state);
551 dpm_complete(state); 535 dpm_complete(state);
552} 536}
553EXPORT_SYMBOL_GPL(device_resume); 537EXPORT_SYMBOL_GPL(dpm_resume_end);
554 538
555 539
556/*------------------------- Suspend routines -------------------------*/ 540/*------------------------- Suspend routines -------------------------*/
@@ -575,13 +559,13 @@ static pm_message_t resume_event(pm_message_t sleep_state)
575} 559}
576 560
577/** 561/**
578 * suspend_device_noirq - Shut down one device (late suspend). 562 * device_suspend_noirq - Shut down one device (late suspend).
579 * @dev: Device. 563 * @dev: Device.
580 * @state: PM transition of the system being carried out. 564 * @state: PM transition of the system being carried out.
581 * 565 *
582 * This is called with interrupts off and only a single CPU running. 566 * This is called with interrupts off and only a single CPU running.
583 */ 567 */
584static int suspend_device_noirq(struct device *dev, pm_message_t state) 568static int device_suspend_noirq(struct device *dev, pm_message_t state)
585{ 569{
586 int error = 0; 570 int error = 0;
587 571
@@ -591,49 +575,47 @@ static int suspend_device_noirq(struct device *dev, pm_message_t state)
591 if (dev->bus->pm) { 575 if (dev->bus->pm) {
592 pm_dev_dbg(dev, state, "LATE "); 576 pm_dev_dbg(dev, state, "LATE ");
593 error = pm_noirq_op(dev, dev->bus->pm, state); 577 error = pm_noirq_op(dev, dev->bus->pm, state);
594 } else if (dev->bus->suspend_late) {
595 pm_dev_dbg(dev, state, "legacy LATE ");
596 error = dev->bus->suspend_late(dev, state);
597 suspend_report_result(dev->bus->suspend_late, error);
598 } 578 }
599 return error; 579 return error;
600} 580}
601 581
602/** 582/**
603 * device_power_down - Shut down special devices. 583 * dpm_suspend_noirq - Power down all regular (non-sysdev) devices.
604 * @state: PM transition of the system being carried out. 584 * @state: PM transition of the system being carried out.
605 * 585 *
606 * Prevent device drivers from receiving interrupts and call the "late" 586 * Prevent device drivers from receiving interrupts and call the "noirq"
607 * suspend handlers. 587 * suspend handlers.
608 * 588 *
609 * Must be called under dpm_list_mtx. 589 * Must be called under dpm_list_mtx.
610 */ 590 */
611int device_power_down(pm_message_t state) 591int dpm_suspend_noirq(pm_message_t state)
612{ 592{
613 struct device *dev; 593 struct device *dev;
614 int error = 0; 594 int error = 0;
615 595
616 suspend_device_irqs(); 596 suspend_device_irqs();
597 mutex_lock(&dpm_list_mtx);
617 list_for_each_entry_reverse(dev, &dpm_list, power.entry) { 598 list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
618 error = suspend_device_noirq(dev, state); 599 error = device_suspend_noirq(dev, state);
619 if (error) { 600 if (error) {
620 pm_dev_err(dev, state, " late", error); 601 pm_dev_err(dev, state, " late", error);
621 break; 602 break;
622 } 603 }
623 dev->power.status = DPM_OFF_IRQ; 604 dev->power.status = DPM_OFF_IRQ;
624 } 605 }
606 mutex_unlock(&dpm_list_mtx);
625 if (error) 607 if (error)
626 device_power_up(resume_event(state)); 608 dpm_resume_noirq(resume_event(state));
627 return error; 609 return error;
628} 610}
629EXPORT_SYMBOL_GPL(device_power_down); 611EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
630 612
631/** 613/**
632 * suspend_device - Save state of one device. 614 * device_suspend - Save state of one device.
633 * @dev: Device. 615 * @dev: Device.
634 * @state: PM transition of the system being carried out. 616 * @state: PM transition of the system being carried out.
635 */ 617 */
636static int suspend_device(struct device *dev, pm_message_t state) 618static int device_suspend(struct device *dev, pm_message_t state)
637{ 619{
638 int error = 0; 620 int error = 0;
639 621
@@ -656,10 +638,6 @@ static int suspend_device(struct device *dev, pm_message_t state)
656 if (dev->type->pm) { 638 if (dev->type->pm) {
657 pm_dev_dbg(dev, state, "type "); 639 pm_dev_dbg(dev, state, "type ");
658 error = pm_op(dev, dev->type->pm, state); 640 error = pm_op(dev, dev->type->pm, state);
659 } else if (dev->type->suspend) {
660 pm_dev_dbg(dev, state, "legacy type ");
661 error = dev->type->suspend(dev, state);
662 suspend_report_result(dev->type->suspend, error);
663 } 641 }
664 if (error) 642 if (error)
665 goto End; 643 goto End;
@@ -700,7 +678,7 @@ static int dpm_suspend(pm_message_t state)
700 get_device(dev); 678 get_device(dev);
701 mutex_unlock(&dpm_list_mtx); 679 mutex_unlock(&dpm_list_mtx);
702 680
703 error = suspend_device(dev, state); 681 error = device_suspend(dev, state);
704 682
705 mutex_lock(&dpm_list_mtx); 683 mutex_lock(&dpm_list_mtx);
706 if (error) { 684 if (error) {
@@ -719,11 +697,11 @@ static int dpm_suspend(pm_message_t state)
719} 697}
720 698
721/** 699/**
722 * prepare_device - Execute the ->prepare() callback(s) for given device. 700 * device_prepare - Execute the ->prepare() callback(s) for given device.
723 * @dev: Device. 701 * @dev: Device.
724 * @state: PM transition of the system being carried out. 702 * @state: PM transition of the system being carried out.
725 */ 703 */
726static int prepare_device(struct device *dev, pm_message_t state) 704static int device_prepare(struct device *dev, pm_message_t state)
727{ 705{
728 int error = 0; 706 int error = 0;
729 707
@@ -777,7 +755,7 @@ static int dpm_prepare(pm_message_t state)
777 dev->power.status = DPM_PREPARING; 755 dev->power.status = DPM_PREPARING;
778 mutex_unlock(&dpm_list_mtx); 756 mutex_unlock(&dpm_list_mtx);
779 757
780 error = prepare_device(dev, state); 758 error = device_prepare(dev, state);
781 759
782 mutex_lock(&dpm_list_mtx); 760 mutex_lock(&dpm_list_mtx);
783 if (error) { 761 if (error) {
@@ -803,12 +781,12 @@ static int dpm_prepare(pm_message_t state)
803} 781}
804 782
805/** 783/**
806 * device_suspend - Save state and stop all devices in system. 784 * dpm_suspend_start - Save state and stop all devices in system.
807 * @state: PM transition of the system being carried out. 785 * @state: PM transition of the system being carried out.
808 * 786 *
809 * Prepare and suspend all devices. 787 * Prepare and suspend all devices.
810 */ 788 */
811int device_suspend(pm_message_t state) 789int dpm_suspend_start(pm_message_t state)
812{ 790{
813 int error; 791 int error;
814 792
@@ -818,7 +796,7 @@ int device_suspend(pm_message_t state)
818 error = dpm_suspend(state); 796 error = dpm_suspend(state);
819 return error; 797 return error;
820} 798}
821EXPORT_SYMBOL_GPL(device_suspend); 799EXPORT_SYMBOL_GPL(dpm_suspend_start);
822 800
823void __suspend_report_result(const char *function, void *fn, int ret) 801void __suspend_report_result(const char *function, void *fn, int ret)
824{ 802{
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 3236b434b96..9742a78c9fe 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -343,11 +343,15 @@ static void __sysdev_resume(struct sys_device *dev)
343 /* First, call the class-specific one */ 343 /* First, call the class-specific one */
344 if (cls->resume) 344 if (cls->resume)
345 cls->resume(dev); 345 cls->resume(dev);
346 WARN_ONCE(!irqs_disabled(),
347 "Interrupts enabled after %pF\n", cls->resume);
346 348
347 /* Call auxillary drivers next. */ 349 /* Call auxillary drivers next. */
348 list_for_each_entry(drv, &cls->drivers, entry) { 350 list_for_each_entry(drv, &cls->drivers, entry) {
349 if (drv->resume) 351 if (drv->resume)
350 drv->resume(dev); 352 drv->resume(dev);
353 WARN_ONCE(!irqs_disabled(),
354 "Interrupts enabled after %pF\n", drv->resume);
351 } 355 }
352} 356}
353 357
@@ -377,6 +381,9 @@ int sysdev_suspend(pm_message_t state)
377 if (ret) 381 if (ret)
378 return ret; 382 return ret;
379 383
384 WARN_ONCE(!irqs_disabled(),
385 "Interrupts enabled while suspending system devices\n");
386
380 pr_debug("Suspending System Devices\n"); 387 pr_debug("Suspending System Devices\n");
381 388
382 list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { 389 list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
@@ -393,6 +400,9 @@ int sysdev_suspend(pm_message_t state)
393 if (ret) 400 if (ret)
394 goto aux_driver; 401 goto aux_driver;
395 } 402 }
403 WARN_ONCE(!irqs_disabled(),
404 "Interrupts enabled after %pF\n",
405 drv->suspend);
396 } 406 }
397 407
398 /* Now call the generic one */ 408 /* Now call the generic one */
@@ -400,6 +410,9 @@ int sysdev_suspend(pm_message_t state)
400 ret = cls->suspend(sysdev, state); 410 ret = cls->suspend(sysdev, state);
401 if (ret) 411 if (ret)
402 goto cls_driver; 412 goto cls_driver;
413 WARN_ONCE(!irqs_disabled(),
414 "Interrupts enabled after %pF\n",
415 cls->suspend);
403 } 416 }
404 } 417 }
405 } 418 }
@@ -452,6 +465,9 @@ int sysdev_resume(void)
452{ 465{
453 struct sysdev_class *cls; 466 struct sysdev_class *cls;
454 467
468 WARN_ONCE(!irqs_disabled(),
469 "Interrupts enabled while resuming system devices\n");
470
455 pr_debug("Resuming System Devices\n"); 471 pr_debug("Resuming System Devices\n");
456 472
457 list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { 473 list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) {
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index f22ed6cc69f..668dc234b8e 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3321,7 +3321,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
3321 DAC960_Command_T *Command; 3321 DAC960_Command_T *Command;
3322 3322
3323 while(1) { 3323 while(1) {
3324 Request = elv_next_request(req_q); 3324 Request = blk_peek_request(req_q);
3325 if (!Request) 3325 if (!Request)
3326 return 1; 3326 return 1;
3327 3327
@@ -3338,10 +3338,10 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
3338 } 3338 }
3339 Command->Completion = Request->end_io_data; 3339 Command->Completion = Request->end_io_data;
3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data; 3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
3341 Command->BlockNumber = Request->sector; 3341 Command->BlockNumber = blk_rq_pos(Request);
3342 Command->BlockCount = Request->nr_sectors; 3342 Command->BlockCount = blk_rq_sectors(Request);
3343 Command->Request = Request; 3343 Command->Request = Request;
3344 blkdev_dequeue_request(Request); 3344 blk_start_request(Request);
3345 Command->SegmentCount = blk_rq_map_sg(req_q, 3345 Command->SegmentCount = blk_rq_map_sg(req_q,
3346 Command->Request, Command->cmd_sglist); 3346 Command->Request, Command->cmd_sglist);
3347 /* pci_map_sg MAY change the value of SegCount */ 3347 /* pci_map_sg MAY change the value of SegCount */
@@ -3431,7 +3431,7 @@ static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
3431 * successfully as possible. 3431 * successfully as possible.
3432 */ 3432 */
3433 Command->SegmentCount = 1; 3433 Command->SegmentCount = 1;
3434 Command->BlockNumber = Request->sector; 3434 Command->BlockNumber = blk_rq_pos(Request);
3435 Command->BlockCount = 1; 3435 Command->BlockCount = 1;
3436 DAC960_QueueReadWriteCommand(Command); 3436 DAC960_QueueReadWriteCommand(Command);
3437 return; 3437 return;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ddea8e485cc..bb72ada9f07 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -112,7 +112,7 @@ config GDROM
112 with up to 1 GB of data. This drive will also read standard CD ROM 112 with up to 1 GB of data. This drive will also read standard CD ROM
113 disks. Select this option to access any disks in your GD ROM drive. 113 disks. Select this option to access any disks in your GD ROM drive.
114 Most users will want to say "Y" here. 114 Most users will want to say "Y" here.
115 You can also build this as a module which will be called gdrom.ko 115 You can also build this as a module which will be called gdrom.
116 116
117source "drivers/block/paride/Kconfig" 117source "drivers/block/paride/Kconfig"
118 118
@@ -412,7 +412,7 @@ config ATA_OVER_ETH
412 412
413config MG_DISK 413config MG_DISK
414 tristate "mGine mflash, gflash support" 414 tristate "mGine mflash, gflash support"
415 depends on ARM && ATA && GPIOLIB 415 depends on ARM && GPIOLIB
416 help 416 help
417 mGine mFlash(gFlash) block device driver 417 mGine mFlash(gFlash) block device driver
418 418
@@ -438,7 +438,7 @@ source "drivers/s390/block/Kconfig"
438 438
439config XILINX_SYSACE 439config XILINX_SYSACE
440 tristate "Xilinx SystemACE support" 440 tristate "Xilinx SystemACE support"
441 depends on 4xx 441 depends on 4xx || MICROBLAZE
442 help 442 help
443 Include support for the Xilinx SystemACE CompactFlash interface 443 Include support for the Xilinx SystemACE CompactFlash interface
444 444
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8df436ff706..9c6e5b0fe89 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -112,8 +112,6 @@ module_param(fd_def_df0, ulong, 0);
112MODULE_LICENSE("GPL"); 112MODULE_LICENSE("GPL");
113 113
114static struct request_queue *floppy_queue; 114static struct request_queue *floppy_queue;
115#define QUEUE (floppy_queue)
116#define CURRENT elv_next_request(floppy_queue)
117 115
118/* 116/*
119 * Macros 117 * Macros
@@ -1335,64 +1333,60 @@ static int get_track(int drive, int track)
1335 1333
1336static void redo_fd_request(void) 1334static void redo_fd_request(void)
1337{ 1335{
1336 struct request *rq;
1338 unsigned int cnt, block, track, sector; 1337 unsigned int cnt, block, track, sector;
1339 int drive; 1338 int drive;
1340 struct amiga_floppy_struct *floppy; 1339 struct amiga_floppy_struct *floppy;
1341 char *data; 1340 char *data;
1342 unsigned long flags; 1341 unsigned long flags;
1342 int err;
1343 1343
1344 repeat: 1344next_req:
1345 if (!CURRENT) { 1345 rq = blk_fetch_request(floppy_queue);
1346 if (!rq) {
1346 /* Nothing left to do */ 1347 /* Nothing left to do */
1347 return; 1348 return;
1348 } 1349 }
1349 1350
1350 floppy = CURRENT->rq_disk->private_data; 1351 floppy = rq->rq_disk->private_data;
1351 drive = floppy - unit; 1352 drive = floppy - unit;
1352 1353
1354next_segment:
1353 /* Here someone could investigate to be more efficient */ 1355 /* Here someone could investigate to be more efficient */
1354 for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) { 1356 for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
1355#ifdef DEBUG 1357#ifdef DEBUG
1356 printk("fd: sector %ld + %d requested for %s\n", 1358 printk("fd: sector %ld + %d requested for %s\n",
1357 CURRENT->sector,cnt, 1359 blk_rq_pos(rq), cnt,
1358 (rq_data_dir(CURRENT) == READ) ? "read" : "write"); 1360 (rq_data_dir(rq) == READ) ? "read" : "write");
1359#endif 1361#endif
1360 block = CURRENT->sector + cnt; 1362 block = blk_rq_pos(rq) + cnt;
1361 if ((int)block > floppy->blocks) { 1363 if ((int)block > floppy->blocks) {
1362 end_request(CURRENT, 0); 1364 err = -EIO;
1363 goto repeat; 1365 break;
1364 } 1366 }
1365 1367
1366 track = block / (floppy->dtype->sects * floppy->type->sect_mult); 1368 track = block / (floppy->dtype->sects * floppy->type->sect_mult);
1367 sector = block % (floppy->dtype->sects * floppy->type->sect_mult); 1369 sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
1368 data = CURRENT->buffer + 512 * cnt; 1370 data = rq->buffer + 512 * cnt;
1369#ifdef DEBUG 1371#ifdef DEBUG
1370 printk("access to track %d, sector %d, with buffer at " 1372 printk("access to track %d, sector %d, with buffer at "
1371 "0x%08lx\n", track, sector, data); 1373 "0x%08lx\n", track, sector, data);
1372#endif 1374#endif
1373 1375
1374 if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
1375 printk(KERN_WARNING "do_fd_request: unknown command\n");
1376 end_request(CURRENT, 0);
1377 goto repeat;
1378 }
1379 if (get_track(drive, track) == -1) { 1376 if (get_track(drive, track) == -1) {
1380 end_request(CURRENT, 0); 1377 err = -EIO;
1381 goto repeat; 1378 break;
1382 } 1379 }
1383 1380
1384 switch (rq_data_dir(CURRENT)) { 1381 if (rq_data_dir(rq) == READ) {
1385 case READ:
1386 memcpy(data, floppy->trackbuf + sector * 512, 512); 1382 memcpy(data, floppy->trackbuf + sector * 512, 512);
1387 break; 1383 } else {
1388
1389 case WRITE:
1390 memcpy(floppy->trackbuf + sector * 512, data, 512); 1384 memcpy(floppy->trackbuf + sector * 512, data, 512);
1391 1385
1392 /* keep the drive spinning while writes are scheduled */ 1386 /* keep the drive spinning while writes are scheduled */
1393 if (!fd_motor_on(drive)) { 1387 if (!fd_motor_on(drive)) {
1394 end_request(CURRENT, 0); 1388 err = -EIO;
1395 goto repeat; 1389 break;
1396 } 1390 }
1397 /* 1391 /*
1398 * setup a callback to write the track buffer 1392 * setup a callback to write the track buffer
@@ -1404,14 +1398,12 @@ static void redo_fd_request(void)
1404 /* reset the timer */ 1398 /* reset the timer */
1405 mod_timer (flush_track_timer + drive, jiffies + 1); 1399 mod_timer (flush_track_timer + drive, jiffies + 1);
1406 local_irq_restore(flags); 1400 local_irq_restore(flags);
1407 break;
1408 } 1401 }
1409 } 1402 }
1410 CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
1411 CURRENT->sector += CURRENT->current_nr_sectors;
1412 1403
1413 end_request(CURRENT, 1); 1404 if (__blk_end_request_cur(rq, err))
1414 goto repeat; 1405 goto next_segment;
1406 goto next_req;
1415} 1407}
1416 1408
1417static void do_fd_request(struct request_queue * q) 1409static void do_fd_request(struct request_queue * q)
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4234c11c1e4..f5e7180d7f4 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -79,9 +79,7 @@
79#undef DEBUG 79#undef DEBUG
80 80
81static struct request_queue *floppy_queue; 81static struct request_queue *floppy_queue;
82 82static struct request *fd_request;
83#define QUEUE (floppy_queue)
84#define CURRENT elv_next_request(floppy_queue)
85 83
86/* Disk types: DD, HD, ED */ 84/* Disk types: DD, HD, ED */
87static struct atari_disk_type { 85static struct atari_disk_type {
@@ -376,6 +374,12 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
376static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0); 374static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
377static DEFINE_TIMER(fd_timer, check_change, 0, 0); 375static DEFINE_TIMER(fd_timer, check_change, 0, 0);
378 376
377static void fd_end_request_cur(int err)
378{
379 if (!__blk_end_request_cur(fd_request, err))
380 fd_request = NULL;
381}
382
379static inline void start_motor_off_timer(void) 383static inline void start_motor_off_timer(void)
380{ 384{
381 mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY); 385 mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
@@ -606,15 +610,15 @@ static void fd_error( void )
606 return; 610 return;
607 } 611 }
608 612
609 if (!CURRENT) 613 if (!fd_request)
610 return; 614 return;
611 615
612 CURRENT->errors++; 616 fd_request->errors++;
613 if (CURRENT->errors >= MAX_ERRORS) { 617 if (fd_request->errors >= MAX_ERRORS) {
614 printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); 618 printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
615 end_request(CURRENT, 0); 619 fd_end_request_cur(-EIO);
616 } 620 }
617 else if (CURRENT->errors == RECALIBRATE_ERRORS) { 621 else if (fd_request->errors == RECALIBRATE_ERRORS) {
618 printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); 622 printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
619 if (SelectedDrive != -1) 623 if (SelectedDrive != -1)
620 SUD.track = -1; 624 SUD.track = -1;
@@ -725,16 +729,14 @@ static void do_fd_action( int drive )
725 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) { 729 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
726 if (ReqCmd == READ) { 730 if (ReqCmd == READ) {
727 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData ); 731 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
728 if (++ReqCnt < CURRENT->current_nr_sectors) { 732 if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
729 /* read next sector */ 733 /* read next sector */
730 setup_req_params( drive ); 734 setup_req_params( drive );
731 goto repeat; 735 goto repeat;
732 } 736 }
733 else { 737 else {
734 /* all sectors finished */ 738 /* all sectors finished */
735 CURRENT->nr_sectors -= CURRENT->current_nr_sectors; 739 fd_end_request_cur(0);
736 CURRENT->sector += CURRENT->current_nr_sectors;
737 end_request(CURRENT, 1);
738 redo_fd_request(); 740 redo_fd_request();
739 return; 741 return;
740 } 742 }
@@ -1132,16 +1134,14 @@ static void fd_rwsec_done1(int status)
1132 } 1134 }
1133 } 1135 }
1134 1136
1135 if (++ReqCnt < CURRENT->current_nr_sectors) { 1137 if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
1136 /* read next sector */ 1138 /* read next sector */
1137 setup_req_params( SelectedDrive ); 1139 setup_req_params( SelectedDrive );
1138 do_fd_action( SelectedDrive ); 1140 do_fd_action( SelectedDrive );
1139 } 1141 }
1140 else { 1142 else {
1141 /* all sectors finished */ 1143 /* all sectors finished */
1142 CURRENT->nr_sectors -= CURRENT->current_nr_sectors; 1144 fd_end_request_cur(0);
1143 CURRENT->sector += CURRENT->current_nr_sectors;
1144 end_request(CURRENT, 1);
1145 redo_fd_request(); 1145 redo_fd_request();
1146 } 1146 }
1147 return; 1147 return;
@@ -1382,7 +1382,7 @@ static void setup_req_params( int drive )
1382 ReqData = ReqBuffer + 512 * ReqCnt; 1382 ReqData = ReqBuffer + 512 * ReqCnt;
1383 1383
1384 if (UseTrackbuffer) 1384 if (UseTrackbuffer)
1385 read_track = (ReqCmd == READ && CURRENT->errors == 0); 1385 read_track = (ReqCmd == READ && fd_request->errors == 0);
1386 else 1386 else
1387 read_track = 0; 1387 read_track = 0;
1388 1388
@@ -1396,25 +1396,27 @@ static void redo_fd_request(void)
1396 int drive, type; 1396 int drive, type;
1397 struct atari_floppy_struct *floppy; 1397 struct atari_floppy_struct *floppy;
1398 1398
1399 DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n", 1399 DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
1400 CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "", 1400 fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
1401 CURRENT ? CURRENT->sector : 0 )); 1401 fd_request ? blk_rq_pos(fd_request) : 0 ));
1402 1402
1403 IsFormatting = 0; 1403 IsFormatting = 0;
1404 1404
1405repeat: 1405repeat:
1406 if (!fd_request) {
1407 fd_request = blk_fetch_request(floppy_queue);
1408 if (!fd_request)
1409 goto the_end;
1410 }
1406 1411
1407 if (!CURRENT) 1412 floppy = fd_request->rq_disk->private_data;
1408 goto the_end;
1409
1410 floppy = CURRENT->rq_disk->private_data;
1411 drive = floppy - unit; 1413 drive = floppy - unit;
1412 type = floppy->type; 1414 type = floppy->type;
1413 1415
1414 if (!UD.connected) { 1416 if (!UD.connected) {
1415 /* drive not connected */ 1417 /* drive not connected */
1416 printk(KERN_ERR "Unknown Device: fd%d\n", drive ); 1418 printk(KERN_ERR "Unknown Device: fd%d\n", drive );
1417 end_request(CURRENT, 0); 1419 fd_end_request_cur(-EIO);
1418 goto repeat; 1420 goto repeat;
1419 } 1421 }
1420 1422
@@ -1430,12 +1432,12 @@ repeat:
1430 /* user supplied disk type */ 1432 /* user supplied disk type */
1431 if (--type >= NUM_DISK_MINORS) { 1433 if (--type >= NUM_DISK_MINORS) {
1432 printk(KERN_WARNING "fd%d: invalid disk format", drive ); 1434 printk(KERN_WARNING "fd%d: invalid disk format", drive );
1433 end_request(CURRENT, 0); 1435 fd_end_request_cur(-EIO);
1434 goto repeat; 1436 goto repeat;
1435 } 1437 }
1436 if (minor2disktype[type].drive_types > DriveType) { 1438 if (minor2disktype[type].drive_types > DriveType) {
1437 printk(KERN_WARNING "fd%d: unsupported disk format", drive ); 1439 printk(KERN_WARNING "fd%d: unsupported disk format", drive );
1438 end_request(CURRENT, 0); 1440 fd_end_request_cur(-EIO);
1439 goto repeat; 1441 goto repeat;
1440 } 1442 }
1441 type = minor2disktype[type].index; 1443 type = minor2disktype[type].index;
@@ -1444,8 +1446,8 @@ repeat:
1444 UD.autoprobe = 0; 1446 UD.autoprobe = 0;
1445 } 1447 }
1446 1448
1447 if (CURRENT->sector + 1 > UDT->blocks) { 1449 if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
1448 end_request(CURRENT, 0); 1450 fd_end_request_cur(-EIO);
1449 goto repeat; 1451 goto repeat;
1450 } 1452 }
1451 1453
@@ -1453,9 +1455,9 @@ repeat:
1453 del_timer( &motor_off_timer ); 1455 del_timer( &motor_off_timer );
1454 1456
1455 ReqCnt = 0; 1457 ReqCnt = 0;
1456 ReqCmd = rq_data_dir(CURRENT); 1458 ReqCmd = rq_data_dir(fd_request);
1457 ReqBlock = CURRENT->sector; 1459 ReqBlock = blk_rq_pos(fd_request);
1458 ReqBuffer = CURRENT->buffer; 1460 ReqBuffer = fd_request->buffer;
1459 setup_req_params( drive ); 1461 setup_req_params( drive );
1460 do_fd_action( drive ); 1462 do_fd_action( drive );
1461 1463
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 5f7e64ba87e..4bf8705b3ac 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -407,12 +407,7 @@ static int __init ramdisk_size(char *str)
407 rd_size = simple_strtol(str, NULL, 0); 407 rd_size = simple_strtol(str, NULL, 0);
408 return 1; 408 return 1;
409} 409}
410static int __init ramdisk_size2(char *str) 410__setup("ramdisk_size=", ramdisk_size);
411{
412 return ramdisk_size(str);
413}
414__setup("ramdisk=", ramdisk_size);
415__setup("ramdisk_size=", ramdisk_size2);
416#endif 411#endif
417 412
418/* 413/*
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 4d4d5e0d3fa..b22cec97ea1 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -180,11 +180,13 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
180 __u32); 180 __u32);
181static void start_io(ctlr_info_t *h); 181static void start_io(ctlr_info_t *h);
182static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, 182static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
183 unsigned int use_unit_num, unsigned int log_unit,
184 __u8 page_code, unsigned char *scsi3addr, int cmd_type); 183 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
185static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, 184static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
186 unsigned int use_unit_num, unsigned int log_unit, 185 __u8 page_code, unsigned char scsi3addr[],
187 __u8 page_code, int cmd_type); 186 int cmd_type);
187static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
188 int attempt_retry);
189static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
188 190
189static void fail_all_cmds(unsigned long ctlr); 191static void fail_all_cmds(unsigned long ctlr);
190static int scan_thread(void *data); 192static int scan_thread(void *data);
@@ -437,6 +439,194 @@ static void __devinit cciss_procinit(int i)
437} 439}
438#endif /* CONFIG_PROC_FS */ 440#endif /* CONFIG_PROC_FS */
439 441
442#define MAX_PRODUCT_NAME_LEN 19
443
444#define to_hba(n) container_of(n, struct ctlr_info, dev)
445#define to_drv(n) container_of(n, drive_info_struct, dev)
446
447static struct device_type cciss_host_type = {
448 .name = "cciss_host",
449};
450
451static ssize_t dev_show_unique_id(struct device *dev,
452 struct device_attribute *attr,
453 char *buf)
454{
455 drive_info_struct *drv = to_drv(dev);
456 struct ctlr_info *h = to_hba(drv->dev.parent);
457 __u8 sn[16];
458 unsigned long flags;
459 int ret = 0;
460
461 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
462 if (h->busy_configuring)
463 ret = -EBUSY;
464 else
465 memcpy(sn, drv->serial_no, sizeof(sn));
466 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
467
468 if (ret)
469 return ret;
470 else
471 return snprintf(buf, 16 * 2 + 2,
472 "%02X%02X%02X%02X%02X%02X%02X%02X"
473 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
474 sn[0], sn[1], sn[2], sn[3],
475 sn[4], sn[5], sn[6], sn[7],
476 sn[8], sn[9], sn[10], sn[11],
477 sn[12], sn[13], sn[14], sn[15]);
478}
479DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
480
481static ssize_t dev_show_vendor(struct device *dev,
482 struct device_attribute *attr,
483 char *buf)
484{
485 drive_info_struct *drv = to_drv(dev);
486 struct ctlr_info *h = to_hba(drv->dev.parent);
487 char vendor[VENDOR_LEN + 1];
488 unsigned long flags;
489 int ret = 0;
490
491 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
492 if (h->busy_configuring)
493 ret = -EBUSY;
494 else
495 memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
496 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
497
498 if (ret)
499 return ret;
500 else
501 return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
502}
503DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
504
505static ssize_t dev_show_model(struct device *dev,
506 struct device_attribute *attr,
507 char *buf)
508{
509 drive_info_struct *drv = to_drv(dev);
510 struct ctlr_info *h = to_hba(drv->dev.parent);
511 char model[MODEL_LEN + 1];
512 unsigned long flags;
513 int ret = 0;
514
515 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
516 if (h->busy_configuring)
517 ret = -EBUSY;
518 else
519 memcpy(model, drv->model, MODEL_LEN + 1);
520 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
521
522 if (ret)
523 return ret;
524 else
525 return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
526}
527DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
528
529static ssize_t dev_show_rev(struct device *dev,
530 struct device_attribute *attr,
531 char *buf)
532{
533 drive_info_struct *drv = to_drv(dev);
534 struct ctlr_info *h = to_hba(drv->dev.parent);
535 char rev[REV_LEN + 1];
536 unsigned long flags;
537 int ret = 0;
538
539 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
540 if (h->busy_configuring)
541 ret = -EBUSY;
542 else
543 memcpy(rev, drv->rev, REV_LEN + 1);
544 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
545
546 if (ret)
547 return ret;
548 else
549 return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
550}
551DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
552
553static struct attribute *cciss_dev_attrs[] = {
554 &dev_attr_unique_id.attr,
555 &dev_attr_model.attr,
556 &dev_attr_vendor.attr,
557 &dev_attr_rev.attr,
558 NULL
559};
560
561static struct attribute_group cciss_dev_attr_group = {
562 .attrs = cciss_dev_attrs,
563};
564
565static struct attribute_group *cciss_dev_attr_groups[] = {
566 &cciss_dev_attr_group,
567 NULL
568};
569
570static struct device_type cciss_dev_type = {
571 .name = "cciss_device",
572 .groups = cciss_dev_attr_groups,
573};
574
575static struct bus_type cciss_bus_type = {
576 .name = "cciss",
577};
578
579
580/*
581 * Initialize sysfs entry for each controller. This sets up and registers
582 * the 'cciss#' directory for each individual controller under
583 * /sys/bus/pci/devices/<dev>/.
584 */
585static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
586{
587 device_initialize(&h->dev);
588 h->dev.type = &cciss_host_type;
589 h->dev.bus = &cciss_bus_type;
590 dev_set_name(&h->dev, "%s", h->devname);
591 h->dev.parent = &h->pdev->dev;
592
593 return device_add(&h->dev);
594}
595
596/*
597 * Remove sysfs entries for an hba.
598 */
599static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
600{
601 device_del(&h->dev);
602}
603
604/*
605 * Initialize sysfs for each logical drive. This sets up and registers
606 * the 'c#d#' directory for each individual logical drive under
607 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
608 * /sys/block/cciss!c#d# to this entry.
609 */
610static int cciss_create_ld_sysfs_entry(struct ctlr_info *h,
611 drive_info_struct *drv,
612 int drv_index)
613{
614 device_initialize(&drv->dev);
615 drv->dev.type = &cciss_dev_type;
616 drv->dev.bus = &cciss_bus_type;
617 dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index);
618 drv->dev.parent = &h->dev;
619 return device_add(&drv->dev);
620}
621
622/*
623 * Remove sysfs entries for a logical drive.
624 */
625static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv)
626{
627 device_del(&drv->dev);
628}
629
440/* 630/*
441 * For operations that cannot sleep, a command block is allocated at init, 631 * For operations that cannot sleep, a command block is allocated at init,
442 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 632 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
@@ -1299,7 +1489,6 @@ static void cciss_softirq_done(struct request *rq)
1299{ 1489{
1300 CommandList_struct *cmd = rq->completion_data; 1490 CommandList_struct *cmd = rq->completion_data;
1301 ctlr_info_t *h = hba[cmd->ctlr]; 1491 ctlr_info_t *h = hba[cmd->ctlr];
1302 unsigned int nr_bytes;
1303 unsigned long flags; 1492 unsigned long flags;
1304 u64bit temp64; 1493 u64bit temp64;
1305 int i, ddir; 1494 int i, ddir;
@@ -1321,15 +1510,11 @@ static void cciss_softirq_done(struct request *rq)
1321 printk("Done with %p\n", rq); 1510 printk("Done with %p\n", rq);
1322#endif /* CCISS_DEBUG */ 1511#endif /* CCISS_DEBUG */
1323 1512
1324 /* 1513 /* set the residual count for pc requests */
1325 * Store the full size and set the residual count for pc requests
1326 */
1327 nr_bytes = blk_rq_bytes(rq);
1328 if (blk_pc_request(rq)) 1514 if (blk_pc_request(rq))
1329 rq->data_len = cmd->err_info->ResidualCnt; 1515 rq->resid_len = cmd->err_info->ResidualCnt;
1330 1516
1331 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes)) 1517 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
1332 BUG();
1333 1518
1334 spin_lock_irqsave(&h->lock, flags); 1519 spin_lock_irqsave(&h->lock, flags);
1335 cmd_free(h, cmd, 1); 1520 cmd_free(h, cmd, 1);
@@ -1337,6 +1522,56 @@ static void cciss_softirq_done(struct request *rq)
1337 spin_unlock_irqrestore(&h->lock, flags); 1522 spin_unlock_irqrestore(&h->lock, flags);
1338} 1523}
1339 1524
1525static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[],
1526 uint32_t log_unit)
1527{
1528 log_unit = h->drv[log_unit].LunID & 0x03fff;
1529 memset(&scsi3addr[4], 0, 4);
1530 memcpy(&scsi3addr[0], &log_unit, 4);
1531 scsi3addr[3] |= 0x40;
1532}
1533
1534/* This function gets the SCSI vendor, model, and revision of a logical drive
1535 * via the inquiry page 0. Model, vendor, and rev are set to empty strings if
1536 * they cannot be read.
1537 */
1538static void cciss_get_device_descr(int ctlr, int logvol, int withirq,
1539 char *vendor, char *model, char *rev)
1540{
1541 int rc;
1542 InquiryData_struct *inq_buf;
1543 unsigned char scsi3addr[8];
1544
1545 *vendor = '\0';
1546 *model = '\0';
1547 *rev = '\0';
1548
1549 inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1550 if (!inq_buf)
1551 return;
1552
1553 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
1554 if (withirq)
1555 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf,
1556 sizeof(InquiryData_struct), 0,
1557 scsi3addr, TYPE_CMD);
1558 else
1559 rc = sendcmd(CISS_INQUIRY, ctlr, inq_buf,
1560 sizeof(InquiryData_struct), 0,
1561 scsi3addr, TYPE_CMD);
1562 if (rc == IO_OK) {
1563 memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
1564 vendor[VENDOR_LEN] = '\0';
1565 memcpy(model, &inq_buf->data_byte[16], MODEL_LEN);
1566 model[MODEL_LEN] = '\0';
1567 memcpy(rev, &inq_buf->data_byte[32], REV_LEN);
1568 rev[REV_LEN] = '\0';
1569 }
1570
1571 kfree(inq_buf);
1572 return;
1573}
1574
1340/* This function gets the serial number of a logical drive via 1575/* This function gets the serial number of a logical drive via
1341 * inquiry page 0x83. Serial no. is 16 bytes. If the serial 1576 * inquiry page 0x83. Serial no. is 16 bytes. If the serial
1342 * number cannot be had, for whatever reason, 16 bytes of 0xff 1577 * number cannot be had, for whatever reason, 16 bytes of 0xff
@@ -1348,6 +1583,7 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1348#define PAGE_83_INQ_BYTES 64 1583#define PAGE_83_INQ_BYTES 64
1349 int rc; 1584 int rc;
1350 unsigned char *buf; 1585 unsigned char *buf;
1586 unsigned char scsi3addr[8];
1351 1587
1352 if (buflen > 16) 1588 if (buflen > 16)
1353 buflen = 16; 1589 buflen = 16;
@@ -1356,12 +1592,13 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1356 if (!buf) 1592 if (!buf)
1357 return; 1593 return;
1358 memset(serial_no, 0, buflen); 1594 memset(serial_no, 0, buflen);
1595 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
1359 if (withirq) 1596 if (withirq)
1360 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf, 1597 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
1361 PAGE_83_INQ_BYTES, 1, logvol, 0x83, TYPE_CMD); 1598 PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
1362 else 1599 else
1363 rc = sendcmd(CISS_INQUIRY, ctlr, buf, 1600 rc = sendcmd(CISS_INQUIRY, ctlr, buf,
1364 PAGE_83_INQ_BYTES, 1, logvol, 0x83, NULL, TYPE_CMD); 1601 PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
1365 if (rc == IO_OK) 1602 if (rc == IO_OK)
1366 memcpy(serial_no, &buf[8], buflen); 1603 memcpy(serial_no, &buf[8], buflen);
1367 kfree(buf); 1604 kfree(buf);
@@ -1377,7 +1614,7 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1377 disk->first_minor = drv_index << NWD_SHIFT; 1614 disk->first_minor = drv_index << NWD_SHIFT;
1378 disk->fops = &cciss_fops; 1615 disk->fops = &cciss_fops;
1379 disk->private_data = &h->drv[drv_index]; 1616 disk->private_data = &h->drv[drv_index];
1380 disk->driverfs_dev = &h->pdev->dev; 1617 disk->driverfs_dev = &h->drv[drv_index].dev;
1381 1618
1382 /* Set up queue information */ 1619 /* Set up queue information */
1383 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1620 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1394,8 +1631,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1394 1631
1395 disk->queue->queuedata = h; 1632 disk->queue->queuedata = h;
1396 1633
1397 blk_queue_hardsect_size(disk->queue, 1634 blk_queue_logical_block_size(disk->queue,
1398 h->drv[drv_index].block_size); 1635 h->drv[drv_index].block_size);
1399 1636
1400 /* Make sure all queue data is written out before */ 1637 /* Make sure all queue data is written out before */
1401 /* setting h->drv[drv_index].queue, as setting this */ 1638 /* setting h->drv[drv_index].queue, as setting this */
@@ -1468,6 +1705,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1468 drvinfo->block_size = block_size; 1705 drvinfo->block_size = block_size;
1469 drvinfo->nr_blocks = total_size + 1; 1706 drvinfo->nr_blocks = total_size + 1;
1470 1707
1708 cciss_get_device_descr(ctlr, drv_index, 1, drvinfo->vendor,
1709 drvinfo->model, drvinfo->rev);
1471 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, 1710 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
1472 sizeof(drvinfo->serial_no)); 1711 sizeof(drvinfo->serial_no));
1473 1712
@@ -1517,6 +1756,9 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1517 h->drv[drv_index].cylinders = drvinfo->cylinders; 1756 h->drv[drv_index].cylinders = drvinfo->cylinders;
1518 h->drv[drv_index].raid_level = drvinfo->raid_level; 1757 h->drv[drv_index].raid_level = drvinfo->raid_level;
1519 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); 1758 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16);
1759 memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1);
1760 memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1);
1761 memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1);
1520 1762
1521 ++h->num_luns; 1763 ++h->num_luns;
1522 disk = h->gendisk[drv_index]; 1764 disk = h->gendisk[drv_index];
@@ -1591,6 +1833,8 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1591 } 1833 }
1592 } 1834 }
1593 h->drv[drv_index].LunID = lunid; 1835 h->drv[drv_index].LunID = lunid;
1836 if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index))
1837 goto err_free_disk;
1594 1838
1595 /* Don't need to mark this busy because nobody */ 1839 /* Don't need to mark this busy because nobody */
1596 /* else knows about this disk yet to contend */ 1840 /* else knows about this disk yet to contend */
@@ -1598,6 +1842,11 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1598 h->drv[drv_index].busy_configuring = 0; 1842 h->drv[drv_index].busy_configuring = 0;
1599 wmb(); 1843 wmb();
1600 return drv_index; 1844 return drv_index;
1845
1846err_free_disk:
1847 put_disk(h->gendisk[drv_index]);
1848 h->gendisk[drv_index] = NULL;
1849 return -1;
1601} 1850}
1602 1851
1603/* This is for the special case of a controller which 1852/* This is for the special case of a controller which
@@ -1668,8 +1917,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1668 goto mem_msg; 1917 goto mem_msg;
1669 1918
1670 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, 1919 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1671 sizeof(ReportLunData_struct), 0, 1920 sizeof(ReportLunData_struct),
1672 0, 0, TYPE_CMD); 1921 0, CTLR_LUNID, TYPE_CMD);
1673 1922
1674 if (return_code == IO_OK) 1923 if (return_code == IO_OK)
1675 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); 1924 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
@@ -1718,6 +1967,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1718 h->drv[i].busy_configuring = 1; 1967 h->drv[i].busy_configuring = 1;
1719 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1968 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1720 return_code = deregister_disk(h, i, 1); 1969 return_code = deregister_disk(h, i, 1);
1970 cciss_destroy_ld_sysfs_entry(&h->drv[i]);
1721 h->drv[i].busy_configuring = 0; 1971 h->drv[i].busy_configuring = 0;
1722 } 1972 }
1723 } 1973 }
@@ -1877,11 +2127,9 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
1877 return 0; 2127 return 0;
1878} 2128}
1879 2129
1880static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller, 2130static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1881 1: address logical volume log_unit, 2131 size_t size, __u8 page_code, unsigned char *scsi3addr,
1882 2: periph device address is scsi3addr */ 2132 int cmd_type)
1883 unsigned int log_unit, __u8 page_code,
1884 unsigned char *scsi3addr, int cmd_type)
1885{ 2133{
1886 ctlr_info_t *h = hba[ctlr]; 2134 ctlr_info_t *h = hba[ctlr];
1887 u64bit buff_dma_handle; 2135 u64bit buff_dma_handle;
@@ -1897,27 +2145,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
1897 c->Header.SGTotal = 0; 2145 c->Header.SGTotal = 0;
1898 } 2146 }
1899 c->Header.Tag.lower = c->busaddr; 2147 c->Header.Tag.lower = c->busaddr;
2148 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
1900 2149
1901 c->Request.Type.Type = cmd_type; 2150 c->Request.Type.Type = cmd_type;
1902 if (cmd_type == TYPE_CMD) { 2151 if (cmd_type == TYPE_CMD) {
1903 switch (cmd) { 2152 switch (cmd) {
1904 case CISS_INQUIRY: 2153 case CISS_INQUIRY:
1905 /* If the logical unit number is 0 then, this is going
1906 to controller so It's a physical command
1907 mode = 0 target = 0. So we have nothing to write.
1908 otherwise, if use_unit_num == 1,
1909 mode = 1(volume set addressing) target = LUNID
1910 otherwise, if use_unit_num == 2,
1911 mode = 0(periph dev addr) target = scsi3addr */
1912 if (use_unit_num == 1) {
1913 c->Header.LUN.LogDev.VolId =
1914 h->drv[log_unit].LunID;
1915 c->Header.LUN.LogDev.Mode = 1;
1916 } else if (use_unit_num == 2) {
1917 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1918 8);
1919 c->Header.LUN.LogDev.Mode = 0;
1920 }
1921 /* are we trying to read a vital product page */ 2154 /* are we trying to read a vital product page */
1922 if (page_code != 0) { 2155 if (page_code != 0) {
1923 c->Request.CDB[1] = 0x01; 2156 c->Request.CDB[1] = 0x01;
@@ -1947,8 +2180,6 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
1947 break; 2180 break;
1948 2181
1949 case CCISS_READ_CAPACITY: 2182 case CCISS_READ_CAPACITY:
1950 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1951 c->Header.LUN.LogDev.Mode = 1;
1952 c->Request.CDBLen = 10; 2183 c->Request.CDBLen = 10;
1953 c->Request.Type.Attribute = ATTR_SIMPLE; 2184 c->Request.Type.Attribute = ATTR_SIMPLE;
1954 c->Request.Type.Direction = XFER_READ; 2185 c->Request.Type.Direction = XFER_READ;
@@ -1956,8 +2187,6 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
1956 c->Request.CDB[0] = cmd; 2187 c->Request.CDB[0] = cmd;
1957 break; 2188 break;
1958 case CCISS_READ_CAPACITY_16: 2189 case CCISS_READ_CAPACITY_16:
1959 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1960 c->Header.LUN.LogDev.Mode = 1;
1961 c->Request.CDBLen = 16; 2190 c->Request.CDBLen = 16;
1962 c->Request.Type.Attribute = ATTR_SIMPLE; 2191 c->Request.Type.Attribute = ATTR_SIMPLE;
1963 c->Request.Type.Direction = XFER_READ; 2192 c->Request.Type.Direction = XFER_READ;
@@ -1979,6 +2208,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
1979 c->Request.CDB[0] = BMIC_WRITE; 2208 c->Request.CDB[0] = BMIC_WRITE;
1980 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2209 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1981 break; 2210 break;
2211 case TEST_UNIT_READY:
2212 c->Request.CDBLen = 6;
2213 c->Request.Type.Attribute = ATTR_SIMPLE;
2214 c->Request.Type.Direction = XFER_NONE;
2215 c->Request.Timeout = 0;
2216 break;
1982 default: 2217 default:
1983 printk(KERN_WARNING 2218 printk(KERN_WARNING
1984 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd); 2219 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
@@ -1997,13 +2232,13 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
1997 memcpy(&c->Request.CDB[4], buff, 8); 2232 memcpy(&c->Request.CDB[4], buff, 8);
1998 break; 2233 break;
1999 case 1: /* RESET message */ 2234 case 1: /* RESET message */
2000 c->Request.CDBLen = 12; 2235 c->Request.CDBLen = 16;
2001 c->Request.Type.Attribute = ATTR_SIMPLE; 2236 c->Request.Type.Attribute = ATTR_SIMPLE;
2002 c->Request.Type.Direction = XFER_WRITE; 2237 c->Request.Type.Direction = XFER_NONE;
2003 c->Request.Timeout = 0; 2238 c->Request.Timeout = 0;
2004 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 2239 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2005 c->Request.CDB[0] = cmd; /* reset */ 2240 c->Request.CDB[0] = cmd; /* reset */
2006 c->Request.CDB[1] = 0x04; /* reset a LUN */ 2241 c->Request.CDB[1] = 0x03; /* reset a target */
2007 break; 2242 break;
2008 case 3: /* No-Op message */ 2243 case 3: /* No-Op message */
2009 c->Request.CDBLen = 1; 2244 c->Request.CDBLen = 1;
@@ -2035,114 +2270,152 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_
2035 return status; 2270 return status;
2036} 2271}
2037 2272
2038static int sendcmd_withirq(__u8 cmd, 2273static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
2039 int ctlr,
2040 void *buff,
2041 size_t size,
2042 unsigned int use_unit_num,
2043 unsigned int log_unit, __u8 page_code, int cmd_type)
2044{ 2274{
2045 ctlr_info_t *h = hba[ctlr]; 2275 switch (c->err_info->ScsiStatus) {
2046 CommandList_struct *c; 2276 case SAM_STAT_GOOD:
2277 return IO_OK;
2278 case SAM_STAT_CHECK_CONDITION:
2279 switch (0xf & c->err_info->SenseInfo[2]) {
2280 case 0: return IO_OK; /* no sense */
2281 case 1: return IO_OK; /* recovered error */
2282 default:
2283 printk(KERN_WARNING "cciss%d: cmd 0x%02x "
2284 "check condition, sense key = 0x%02x\n",
2285 h->ctlr, c->Request.CDB[0],
2286 c->err_info->SenseInfo[2]);
2287 }
2288 break;
2289 default:
2290 printk(KERN_WARNING "cciss%d: cmd 0x%02x"
2291 "scsi status = 0x%02x\n", h->ctlr,
2292 c->Request.CDB[0], c->err_info->ScsiStatus);
2293 break;
2294 }
2295 return IO_ERROR;
2296}
2297
2298static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
2299{
2300 int return_status = IO_OK;
2301
2302 if (c->err_info->CommandStatus == CMD_SUCCESS)
2303 return IO_OK;
2304
2305 switch (c->err_info->CommandStatus) {
2306 case CMD_TARGET_STATUS:
2307 return_status = check_target_status(h, c);
2308 break;
2309 case CMD_DATA_UNDERRUN:
2310 case CMD_DATA_OVERRUN:
2311 /* expected for inquiry and report lun commands */
2312 break;
2313 case CMD_INVALID:
2314 printk(KERN_WARNING "cciss: cmd 0x%02x is "
2315 "reported invalid\n", c->Request.CDB[0]);
2316 return_status = IO_ERROR;
2317 break;
2318 case CMD_PROTOCOL_ERR:
2319 printk(KERN_WARNING "cciss: cmd 0x%02x has "
2320 "protocol error \n", c->Request.CDB[0]);
2321 return_status = IO_ERROR;
2322 break;
2323 case CMD_HARDWARE_ERR:
2324 printk(KERN_WARNING "cciss: cmd 0x%02x had "
2325 " hardware error\n", c->Request.CDB[0]);
2326 return_status = IO_ERROR;
2327 break;
2328 case CMD_CONNECTION_LOST:
2329 printk(KERN_WARNING "cciss: cmd 0x%02x had "
2330 "connection lost\n", c->Request.CDB[0]);
2331 return_status = IO_ERROR;
2332 break;
2333 case CMD_ABORTED:
2334 printk(KERN_WARNING "cciss: cmd 0x%02x was "
2335 "aborted\n", c->Request.CDB[0]);
2336 return_status = IO_ERROR;
2337 break;
2338 case CMD_ABORT_FAILED:
2339 printk(KERN_WARNING "cciss: cmd 0x%02x reports "
2340 "abort failed\n", c->Request.CDB[0]);
2341 return_status = IO_ERROR;
2342 break;
2343 case CMD_UNSOLICITED_ABORT:
2344 printk(KERN_WARNING
2345 "cciss%d: unsolicited abort 0x%02x\n", h->ctlr,
2346 c->Request.CDB[0]);
2347 return_status = IO_NEEDS_RETRY;
2348 break;
2349 default:
2350 printk(KERN_WARNING "cciss: cmd 0x%02x returned "
2351 "unknown status %x\n", c->Request.CDB[0],
2352 c->err_info->CommandStatus);
2353 return_status = IO_ERROR;
2354 }
2355 return return_status;
2356}
2357
2358static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
2359 int attempt_retry)
2360{
2361 DECLARE_COMPLETION_ONSTACK(wait);
2047 u64bit buff_dma_handle; 2362 u64bit buff_dma_handle;
2048 unsigned long flags; 2363 unsigned long flags;
2049 int return_status; 2364 int return_status = IO_OK;
2050 DECLARE_COMPLETION_ONSTACK(wait);
2051 2365
2052 if ((c = cmd_alloc(h, 0)) == NULL) 2366resend_cmd2:
2053 return -ENOMEM;
2054 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2055 log_unit, page_code, NULL, cmd_type);
2056 if (return_status != IO_OK) {
2057 cmd_free(h, c, 0);
2058 return return_status;
2059 }
2060 resend_cmd2:
2061 c->waiting = &wait; 2367 c->waiting = &wait;
2062
2063 /* Put the request on the tail of the queue and send it */ 2368 /* Put the request on the tail of the queue and send it */
2064 spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 2369 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2065 addQ(&h->reqQ, c); 2370 addQ(&h->reqQ, c);
2066 h->Qdepth++; 2371 h->Qdepth++;
2067 start_io(h); 2372 start_io(h);
2068 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 2373 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2069 2374
2070 wait_for_completion(&wait); 2375 wait_for_completion(&wait);
2071 2376
2072 if (c->err_info->CommandStatus != 0) { /* an error has occurred */ 2377 if (c->err_info->CommandStatus == 0 || !attempt_retry)
2073 switch (c->err_info->CommandStatus) { 2378 goto command_done;
2074 case CMD_TARGET_STATUS:
2075 printk(KERN_WARNING "cciss: cmd %p has "
2076 " completed with errors\n", c);
2077 if (c->err_info->ScsiStatus) {
2078 printk(KERN_WARNING "cciss: cmd %p "
2079 "has SCSI Status = %x\n",
2080 c, c->err_info->ScsiStatus);
2081 }
2082 2379
2083 break; 2380 return_status = process_sendcmd_error(h, c);
2084 case CMD_DATA_UNDERRUN: 2381
2085 case CMD_DATA_OVERRUN: 2382 if (return_status == IO_NEEDS_RETRY &&
2086 /* expected for inquire and report lun commands */ 2383 c->retry_count < MAX_CMD_RETRIES) {
2087 break; 2384 printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr,
2088 case CMD_INVALID: 2385 c->Request.CDB[0]);
2089 printk(KERN_WARNING "cciss: Cmd %p is " 2386 c->retry_count++;
2090 "reported invalid\n", c); 2387 /* erase the old error information */
2091 return_status = IO_ERROR; 2388 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2092 break; 2389 return_status = IO_OK;
2093 case CMD_PROTOCOL_ERR: 2390 INIT_COMPLETION(wait);
2094 printk(KERN_WARNING "cciss: cmd %p has " 2391 goto resend_cmd2;
2095 "protocol error \n", c);
2096 return_status = IO_ERROR;
2097 break;
2098 case CMD_HARDWARE_ERR:
2099 printk(KERN_WARNING "cciss: cmd %p had "
2100 " hardware error\n", c);
2101 return_status = IO_ERROR;
2102 break;
2103 case CMD_CONNECTION_LOST:
2104 printk(KERN_WARNING "cciss: cmd %p had "
2105 "connection lost\n", c);
2106 return_status = IO_ERROR;
2107 break;
2108 case CMD_ABORTED:
2109 printk(KERN_WARNING "cciss: cmd %p was "
2110 "aborted\n", c);
2111 return_status = IO_ERROR;
2112 break;
2113 case CMD_ABORT_FAILED:
2114 printk(KERN_WARNING "cciss: cmd %p reports "
2115 "abort failed\n", c);
2116 return_status = IO_ERROR;
2117 break;
2118 case CMD_UNSOLICITED_ABORT:
2119 printk(KERN_WARNING
2120 "cciss%d: unsolicited abort %p\n", ctlr, c);
2121 if (c->retry_count < MAX_CMD_RETRIES) {
2122 printk(KERN_WARNING
2123 "cciss%d: retrying %p\n", ctlr, c);
2124 c->retry_count++;
2125 /* erase the old error information */
2126 memset(c->err_info, 0,
2127 sizeof(ErrorInfo_struct));
2128 return_status = IO_OK;
2129 INIT_COMPLETION(wait);
2130 goto resend_cmd2;
2131 }
2132 return_status = IO_ERROR;
2133 break;
2134 default:
2135 printk(KERN_WARNING "cciss: cmd %p returned "
2136 "unknown status %x\n", c,
2137 c->err_info->CommandStatus);
2138 return_status = IO_ERROR;
2139 }
2140 } 2392 }
2393
2394command_done:
2141 /* unlock the buffers from DMA */ 2395 /* unlock the buffers from DMA */
2142 buff_dma_handle.val32.lower = c->SG[0].Addr.lower; 2396 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2143 buff_dma_handle.val32.upper = c->SG[0].Addr.upper; 2397 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2144 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, 2398 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
2145 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); 2399 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2400 return return_status;
2401}
2402
2403static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
2404 __u8 page_code, unsigned char scsi3addr[],
2405 int cmd_type)
2406{
2407 ctlr_info_t *h = hba[ctlr];
2408 CommandList_struct *c;
2409 int return_status;
2410
2411 c = cmd_alloc(h, 0);
2412 if (!c)
2413 return -ENOMEM;
2414 return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
2415 scsi3addr, cmd_type);
2416 if (return_status == IO_OK)
2417 return_status = sendcmd_withirq_core(h, c, 1);
2418
2146 cmd_free(h, c, 0); 2419 cmd_free(h, c, 0);
2147 return return_status; 2420 return return_status;
2148} 2421}
@@ -2155,15 +2428,17 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
2155{ 2428{
2156 int return_code; 2429 int return_code;
2157 unsigned long t; 2430 unsigned long t;
2431 unsigned char scsi3addr[8];
2158 2432
2159 memset(inq_buff, 0, sizeof(InquiryData_struct)); 2433 memset(inq_buff, 0, sizeof(InquiryData_struct));
2434 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
2160 if (withirq) 2435 if (withirq)
2161 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, 2436 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
2162 inq_buff, sizeof(*inq_buff), 1, 2437 inq_buff, sizeof(*inq_buff),
2163 logvol, 0xC1, TYPE_CMD); 2438 0xC1, scsi3addr, TYPE_CMD);
2164 else 2439 else
2165 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff, 2440 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
2166 sizeof(*inq_buff), 1, logvol, 0xC1, NULL, 2441 sizeof(*inq_buff), 0xC1, scsi3addr,
2167 TYPE_CMD); 2442 TYPE_CMD);
2168 if (return_code == IO_OK) { 2443 if (return_code == IO_OK) {
2169 if (inq_buff->data_byte[8] == 0xFF) { 2444 if (inq_buff->data_byte[8] == 0xFF) {
@@ -2204,6 +2479,7 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2204{ 2479{
2205 ReadCapdata_struct *buf; 2480 ReadCapdata_struct *buf;
2206 int return_code; 2481 int return_code;
2482 unsigned char scsi3addr[8];
2207 2483
2208 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); 2484 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2209 if (!buf) { 2485 if (!buf) {
@@ -2211,14 +2487,15 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2211 return; 2487 return;
2212 } 2488 }
2213 2489
2490 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
2214 if (withirq) 2491 if (withirq)
2215 return_code = sendcmd_withirq(CCISS_READ_CAPACITY, 2492 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
2216 ctlr, buf, sizeof(ReadCapdata_struct), 2493 ctlr, buf, sizeof(ReadCapdata_struct),
2217 1, logvol, 0, TYPE_CMD); 2494 0, scsi3addr, TYPE_CMD);
2218 else 2495 else
2219 return_code = sendcmd(CCISS_READ_CAPACITY, 2496 return_code = sendcmd(CCISS_READ_CAPACITY,
2220 ctlr, buf, sizeof(ReadCapdata_struct), 2497 ctlr, buf, sizeof(ReadCapdata_struct),
2221 1, logvol, 0, NULL, TYPE_CMD); 2498 0, scsi3addr, TYPE_CMD);
2222 if (return_code == IO_OK) { 2499 if (return_code == IO_OK) {
2223 *total_size = be32_to_cpu(*(__be32 *) buf->total_size); 2500 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
2224 *block_size = be32_to_cpu(*(__be32 *) buf->block_size); 2501 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
@@ -2238,6 +2515,7 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
2238{ 2515{
2239 ReadCapdata_struct_16 *buf; 2516 ReadCapdata_struct_16 *buf;
2240 int return_code; 2517 int return_code;
2518 unsigned char scsi3addr[8];
2241 2519
2242 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); 2520 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2243 if (!buf) { 2521 if (!buf) {
@@ -2245,15 +2523,16 @@ cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size,
2245 return; 2523 return;
2246 } 2524 }
2247 2525
2526 log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
2248 if (withirq) { 2527 if (withirq) {
2249 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16, 2528 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2250 ctlr, buf, sizeof(ReadCapdata_struct_16), 2529 ctlr, buf, sizeof(ReadCapdata_struct_16),
2251 1, logvol, 0, TYPE_CMD); 2530 0, scsi3addr, TYPE_CMD);
2252 } 2531 }
2253 else { 2532 else {
2254 return_code = sendcmd(CCISS_READ_CAPACITY_16, 2533 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2255 ctlr, buf, sizeof(ReadCapdata_struct_16), 2534 ctlr, buf, sizeof(ReadCapdata_struct_16),
2256 1, logvol, 0, NULL, TYPE_CMD); 2535 0, scsi3addr, TYPE_CMD);
2257 } 2536 }
2258 if (return_code == IO_OK) { 2537 if (return_code == IO_OK) {
2259 *total_size = be64_to_cpu(*(__be64 *) buf->total_size); 2538 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
@@ -2303,7 +2582,7 @@ static int cciss_revalidate(struct gendisk *disk)
2303 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, 2582 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2304 inq_buff, drv); 2583 inq_buff, drv);
2305 2584
2306 blk_queue_hardsect_size(drv->queue, drv->block_size); 2585 blk_queue_logical_block_size(drv->queue, drv->block_size);
2307 set_capacity(disk, drv->nr_blocks); 2586 set_capacity(disk, drv->nr_blocks);
2308 2587
2309 kfree(inq_buff); 2588 kfree(inq_buff);
@@ -2333,86 +2612,21 @@ static unsigned long pollcomplete(int ctlr)
2333 return 1; 2612 return 1;
2334} 2613}
2335 2614
2336static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete) 2615/* Send command c to controller h and poll for it to complete.
2337{ 2616 * Turns interrupts off on the board. Used at driver init time
2338 /* We get in here if sendcmd() is polling for completions 2617 * and during SCSI error recovery.
2339 and gets some command back that it wasn't expecting --
2340 something other than that which it just sent down.
2341 Ordinarily, that shouldn't happen, but it can happen when
2342 the scsi tape stuff gets into error handling mode, and
2343 starts using sendcmd() to try to abort commands and
2344 reset tape drives. In that case, sendcmd may pick up
2345 completions of commands that were sent to logical drives
2346 through the block i/o system, or cciss ioctls completing, etc.
2347 In that case, we need to save those completions for later
2348 processing by the interrupt handler.
2349 */
2350
2351#ifdef CONFIG_CISS_SCSI_TAPE
2352 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2353
2354 /* If it's not the scsi tape stuff doing error handling, (abort */
2355 /* or reset) then we don't expect anything weird. */
2356 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2357#endif
2358 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2359 "Invalid command list address returned! (%lx)\n",
2360 ctlr, complete);
2361 /* not much we can do. */
2362#ifdef CONFIG_CISS_SCSI_TAPE
2363 return 1;
2364 }
2365
2366 /* We've sent down an abort or reset, but something else
2367 has completed */
2368 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2369 /* Uh oh. No room to save it for later... */
2370 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2371 "reject list overflow, command lost!\n", ctlr);
2372 return 1;
2373 }
2374 /* Save it for later */
2375 srl->complete[srl->ncompletions] = complete;
2376 srl->ncompletions++;
2377#endif
2378 return 0;
2379}
2380
2381/*
2382 * Send a command to the controller, and wait for it to complete.
2383 * Only used at init time.
2384 */ 2618 */
2385static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller, 2619static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
2386 1: address logical volume log_unit,
2387 2: periph device address is scsi3addr */
2388 unsigned int log_unit,
2389 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2390{ 2620{
2391 CommandList_struct *c;
2392 int i; 2621 int i;
2393 unsigned long complete; 2622 unsigned long complete;
2394 ctlr_info_t *info_p = hba[ctlr]; 2623 int status = IO_ERROR;
2395 u64bit buff_dma_handle; 2624 u64bit buff_dma_handle;
2396 int status, done = 0;
2397 2625
2398 if ((c = cmd_alloc(info_p, 1)) == NULL) { 2626resend_cmd1:
2399 printk(KERN_WARNING "cciss: unable to get memory"); 2627
2400 return IO_ERROR; 2628 /* Disable interrupt on the board. */
2401 } 2629 h->access.set_intr_mask(h, CCISS_INTR_OFF);
2402 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2403 log_unit, page_code, scsi3addr, cmd_type);
2404 if (status != IO_OK) {
2405 cmd_free(info_p, c, 1);
2406 return status;
2407 }
2408 resend_cmd1:
2409 /*
2410 * Disable interrupt
2411 */
2412#ifdef CCISS_DEBUG
2413 printk(KERN_DEBUG "cciss: turning intr off\n");
2414#endif /* CCISS_DEBUG */
2415 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2416 2630
2417 /* Make sure there is room in the command FIFO */ 2631 /* Make sure there is room in the command FIFO */
2418 /* Actually it should be completely empty at this time */ 2632 /* Actually it should be completely empty at this time */
@@ -2420,21 +2634,15 @@ static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use
2420 /* tape side of the driver. */ 2634 /* tape side of the driver. */
2421 for (i = 200000; i > 0; i--) { 2635 for (i = 200000; i > 0; i--) {
2422 /* if fifo isn't full go */ 2636 /* if fifo isn't full go */
2423 if (!(info_p->access.fifo_full(info_p))) { 2637 if (!(h->access.fifo_full(h)))
2424
2425 break; 2638 break;
2426 }
2427 udelay(10); 2639 udelay(10);
2428 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full," 2640 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2429 " waiting!\n", ctlr); 2641 " waiting!\n", h->ctlr);
2430 } 2642 }
2431 /* 2643 h->access.submit_command(h, c); /* Send the cmd */
2432 * Send the cmd
2433 */
2434 info_p->access.submit_command(info_p, c);
2435 done = 0;
2436 do { 2644 do {
2437 complete = pollcomplete(ctlr); 2645 complete = pollcomplete(h->ctlr);
2438 2646
2439#ifdef CCISS_DEBUG 2647#ifdef CCISS_DEBUG
2440 printk(KERN_DEBUG "cciss: command completed\n"); 2648 printk(KERN_DEBUG "cciss: command completed\n");
@@ -2443,97 +2651,102 @@ static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use
2443 if (complete == 1) { 2651 if (complete == 1) {
2444 printk(KERN_WARNING 2652 printk(KERN_WARNING
2445 "cciss cciss%d: SendCmd Timeout out, " 2653 "cciss cciss%d: SendCmd Timeout out, "
2446 "No command list address returned!\n", ctlr); 2654 "No command list address returned!\n", h->ctlr);
2447 status = IO_ERROR; 2655 status = IO_ERROR;
2448 done = 1;
2449 break; 2656 break;
2450 } 2657 }
2451 2658
2452 /* This will need to change for direct lookup completions */ 2659 /* Make sure it's the command we're expecting. */
2453 if ((complete & CISS_ERROR_BIT) 2660 if ((complete & ~CISS_ERROR_BIT) != c->busaddr) {
2454 && (complete & ~CISS_ERROR_BIT) == c->busaddr) { 2661 printk(KERN_WARNING "cciss%d: Unexpected command "
2455 /* if data overrun or underun on Report command 2662 "completion.\n", h->ctlr);
2456 ignore it 2663 continue;
2457 */ 2664 }
2458 if (((c->Request.CDB[0] == CISS_REPORT_LOG) || 2665
2459 (c->Request.CDB[0] == CISS_REPORT_PHYS) || 2666 /* It is our command. If no error, we're done. */
2460 (c->Request.CDB[0] == CISS_INQUIRY)) && 2667 if (!(complete & CISS_ERROR_BIT)) {
2461 ((c->err_info->CommandStatus == 2668 status = IO_OK;
2462 CMD_DATA_OVERRUN) || 2669 break;
2463 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2464 )) {
2465 complete = c->busaddr;
2466 } else {
2467 if (c->err_info->CommandStatus ==
2468 CMD_UNSOLICITED_ABORT) {
2469 printk(KERN_WARNING "cciss%d: "
2470 "unsolicited abort %p\n",
2471 ctlr, c);
2472 if (c->retry_count < MAX_CMD_RETRIES) {
2473 printk(KERN_WARNING
2474 "cciss%d: retrying %p\n",
2475 ctlr, c);
2476 c->retry_count++;
2477 /* erase the old error */
2478 /* information */
2479 memset(c->err_info, 0,
2480 sizeof
2481 (ErrorInfo_struct));
2482 goto resend_cmd1;
2483 } else {
2484 printk(KERN_WARNING
2485 "cciss%d: retried %p too "
2486 "many times\n", ctlr, c);
2487 status = IO_ERROR;
2488 goto cleanup1;
2489 }
2490 } else if (c->err_info->CommandStatus ==
2491 CMD_UNABORTABLE) {
2492 printk(KERN_WARNING
2493 "cciss%d: command could not be aborted.\n",
2494 ctlr);
2495 status = IO_ERROR;
2496 goto cleanup1;
2497 }
2498 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2499 " Error %x \n", ctlr,
2500 c->err_info->CommandStatus);
2501 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2502 " offensive info\n"
2503 " size %x\n num %x value %x\n",
2504 ctlr,
2505 c->err_info->MoreErrInfo.Invalid_Cmd.
2506 offense_size,
2507 c->err_info->MoreErrInfo.Invalid_Cmd.
2508 offense_num,
2509 c->err_info->MoreErrInfo.Invalid_Cmd.
2510 offense_value);
2511 status = IO_ERROR;
2512 goto cleanup1;
2513 }
2514 } 2670 }
2515 /* This will need changing for direct lookup completions */ 2671
2516 if (complete != c->busaddr) { 2672 /* There is an error... */
2517 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) { 2673
2518 BUG(); /* we are pretty much hosed if we get here. */ 2674 /* if data overrun or underun on Report command ignore it */
2675 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2676 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2677 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2678 ((c->err_info->CommandStatus == CMD_DATA_OVERRUN) ||
2679 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN))) {
2680 complete = c->busaddr;
2681 status = IO_OK;
2682 break;
2683 }
2684
2685 if (c->err_info->CommandStatus == CMD_UNSOLICITED_ABORT) {
2686 printk(KERN_WARNING "cciss%d: unsolicited abort %p\n",
2687 h->ctlr, c);
2688 if (c->retry_count < MAX_CMD_RETRIES) {
2689 printk(KERN_WARNING "cciss%d: retrying %p\n",
2690 h->ctlr, c);
2691 c->retry_count++;
2692 /* erase the old error information */
2693 memset(c->err_info, 0, sizeof(c->err_info));
2694 goto resend_cmd1;
2519 } 2695 }
2520 continue; 2696 printk(KERN_WARNING "cciss%d: retried %p too many "
2521 } else 2697 "times\n", h->ctlr, c);
2522 done = 1; 2698 status = IO_ERROR;
2523 } while (!done); 2699 break;
2700 }
2701
2702 if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2703 printk(KERN_WARNING "cciss%d: command could not be "
2704 "aborted.\n", h->ctlr);
2705 status = IO_ERROR;
2706 break;
2707 }
2708
2709 if (c->err_info->CommandStatus == CMD_TARGET_STATUS) {
2710 status = check_target_status(h, c);
2711 break;
2712 }
2713
2714 printk(KERN_WARNING "cciss%d: sendcmd error\n", h->ctlr);
2715 printk(KERN_WARNING "cmd = 0x%02x, CommandStatus = 0x%02x\n",
2716 c->Request.CDB[0], c->err_info->CommandStatus);
2717 status = IO_ERROR;
2718 break;
2719
2720 } while (1);
2524 2721
2525 cleanup1:
2526 /* unlock the data buffer from DMA */ 2722 /* unlock the data buffer from DMA */
2527 buff_dma_handle.val32.lower = c->SG[0].Addr.lower; 2723 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2528 buff_dma_handle.val32.upper = c->SG[0].Addr.upper; 2724 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2529 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val, 2725 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
2530 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); 2726 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2531#ifdef CONFIG_CISS_SCSI_TAPE 2727 return status;
2532 /* if we saved some commands for later, process them now. */ 2728}
2533 if (info_p->scsi_rejects.ncompletions > 0) 2729
2534 do_cciss_intr(0, info_p); 2730/*
2535#endif 2731 * Send a command to the controller, and wait for it to complete.
2536 cmd_free(info_p, c, 1); 2732 * Used at init time, and during SCSI error recovery.
2733 */
2734static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
2735 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2736{
2737 CommandList_struct *c;
2738 int status;
2739
2740 c = cmd_alloc(hba[ctlr], 1);
2741 if (!c) {
2742 printk(KERN_WARNING "cciss: unable to get memory");
2743 return IO_ERROR;
2744 }
2745 status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
2746 scsi3addr, cmd_type);
2747 if (status == IO_OK)
2748 status = sendcmd_core(hba[ctlr], c);
2749 cmd_free(hba[ctlr], c, 1);
2537 return status; 2750 return status;
2538} 2751}
2539 2752
@@ -2691,7 +2904,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2691 printk(KERN_WARNING "cciss: cmd %p has" 2904 printk(KERN_WARNING "cciss: cmd %p has"
2692 " completed with data underrun " 2905 " completed with data underrun "
2693 "reported\n", cmd); 2906 "reported\n", cmd);
2694 cmd->rq->data_len = cmd->err_info->ResidualCnt; 2907 cmd->rq->resid_len = cmd->err_info->ResidualCnt;
2695 } 2908 }
2696 break; 2909 break;
2697 case CMD_DATA_OVERRUN: 2910 case CMD_DATA_OVERRUN:
@@ -2806,7 +3019,7 @@ static void do_cciss_request(struct request_queue *q)
2806 goto startio; 3019 goto startio;
2807 3020
2808 queue: 3021 queue:
2809 creq = elv_next_request(q); 3022 creq = blk_peek_request(q);
2810 if (!creq) 3023 if (!creq)
2811 goto startio; 3024 goto startio;
2812 3025
@@ -2815,7 +3028,7 @@ static void do_cciss_request(struct request_queue *q)
2815 if ((c = cmd_alloc(h, 1)) == NULL) 3028 if ((c = cmd_alloc(h, 1)) == NULL)
2816 goto full; 3029 goto full;
2817 3030
2818 blkdev_dequeue_request(creq); 3031 blk_start_request(creq);
2819 3032
2820 spin_unlock_irq(q->queue_lock); 3033 spin_unlock_irq(q->queue_lock);
2821 3034
@@ -2840,10 +3053,10 @@ static void do_cciss_request(struct request_queue *q)
2840 c->Request.Timeout = 0; // Don't time out 3053 c->Request.Timeout = 0; // Don't time out
2841 c->Request.CDB[0] = 3054 c->Request.CDB[0] =
2842 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 3055 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2843 start_blk = creq->sector; 3056 start_blk = blk_rq_pos(creq);
2844#ifdef CCISS_DEBUG 3057#ifdef CCISS_DEBUG
2845 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector, 3058 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
2846 (int)creq->nr_sectors); 3059 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
2847#endif /* CCISS_DEBUG */ 3060#endif /* CCISS_DEBUG */
2848 3061
2849 sg_init_table(tmp_sg, MAXSGENTRIES); 3062 sg_init_table(tmp_sg, MAXSGENTRIES);
@@ -2869,8 +3082,8 @@ static void do_cciss_request(struct request_queue *q)
2869 h->maxSG = seg; 3082 h->maxSG = seg;
2870 3083
2871#ifdef CCISS_DEBUG 3084#ifdef CCISS_DEBUG
2872 printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n", 3085 printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
2873 creq->nr_sectors, seg); 3086 blk_rq_sectors(creq), seg);
2874#endif /* CCISS_DEBUG */ 3087#endif /* CCISS_DEBUG */
2875 3088
2876 c->Header.SGList = c->Header.SGTotal = seg; 3089 c->Header.SGList = c->Header.SGTotal = seg;
@@ -2882,8 +3095,8 @@ static void do_cciss_request(struct request_queue *q)
2882 c->Request.CDB[4] = (start_blk >> 8) & 0xff; 3095 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2883 c->Request.CDB[5] = start_blk & 0xff; 3096 c->Request.CDB[5] = start_blk & 0xff;
2884 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB 3097 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2885 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff; 3098 c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
2886 c->Request.CDB[8] = creq->nr_sectors & 0xff; 3099 c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
2887 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; 3100 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2888 } else { 3101 } else {
2889 u32 upper32 = upper_32_bits(start_blk); 3102 u32 upper32 = upper_32_bits(start_blk);
@@ -2898,10 +3111,10 @@ static void do_cciss_request(struct request_queue *q)
2898 c->Request.CDB[7]= (start_blk >> 16) & 0xff; 3111 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2899 c->Request.CDB[8]= (start_blk >> 8) & 0xff; 3112 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2900 c->Request.CDB[9]= start_blk & 0xff; 3113 c->Request.CDB[9]= start_blk & 0xff;
2901 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff; 3114 c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
2902 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff; 3115 c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
2903 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff; 3116 c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
2904 c->Request.CDB[13]= creq->nr_sectors & 0xff; 3117 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
2905 c->Request.CDB[14] = c->Request.CDB[15] = 0; 3118 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2906 } 3119 }
2907 } else if (blk_pc_request(creq)) { 3120 } else if (blk_pc_request(creq)) {
@@ -2931,44 +3144,18 @@ startio:
2931 3144
2932static inline unsigned long get_next_completion(ctlr_info_t *h) 3145static inline unsigned long get_next_completion(ctlr_info_t *h)
2933{ 3146{
2934#ifdef CONFIG_CISS_SCSI_TAPE
2935 /* Any rejects from sendcmd() lying around? Process them first */
2936 if (h->scsi_rejects.ncompletions == 0)
2937 return h->access.command_completed(h);
2938 else {
2939 struct sendcmd_reject_list *srl;
2940 int n;
2941 srl = &h->scsi_rejects;
2942 n = --srl->ncompletions;
2943 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2944 printk("p");
2945 return srl->complete[n];
2946 }
2947#else
2948 return h->access.command_completed(h); 3147 return h->access.command_completed(h);
2949#endif
2950} 3148}
2951 3149
2952static inline int interrupt_pending(ctlr_info_t *h) 3150static inline int interrupt_pending(ctlr_info_t *h)
2953{ 3151{
2954#ifdef CONFIG_CISS_SCSI_TAPE
2955 return (h->access.intr_pending(h)
2956 || (h->scsi_rejects.ncompletions > 0));
2957#else
2958 return h->access.intr_pending(h); 3152 return h->access.intr_pending(h);
2959#endif
2960} 3153}
2961 3154
2962static inline long interrupt_not_for_us(ctlr_info_t *h) 3155static inline long interrupt_not_for_us(ctlr_info_t *h)
2963{ 3156{
2964#ifdef CONFIG_CISS_SCSI_TAPE
2965 return (((h->access.intr_pending(h) == 0) ||
2966 (h->interrupts_enabled == 0))
2967 && (h->scsi_rejects.ncompletions == 0));
2968#else
2969 return (((h->access.intr_pending(h) == 0) || 3157 return (((h->access.intr_pending(h) == 0) ||
2970 (h->interrupts_enabled == 0))); 3158 (h->interrupts_enabled == 0)));
2971#endif
2972} 3159}
2973 3160
2974static irqreturn_t do_cciss_intr(int irq, void *dev_id) 3161static irqreturn_t do_cciss_intr(int irq, void *dev_id)
@@ -3723,12 +3910,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3723 INIT_HLIST_HEAD(&hba[i]->reqQ); 3910 INIT_HLIST_HEAD(&hba[i]->reqQ);
3724 3911
3725 if (cciss_pci_init(hba[i], pdev) != 0) 3912 if (cciss_pci_init(hba[i], pdev) != 0)
3726 goto clean1; 3913 goto clean0;
3727 3914
3728 sprintf(hba[i]->devname, "cciss%d", i); 3915 sprintf(hba[i]->devname, "cciss%d", i);
3729 hba[i]->ctlr = i; 3916 hba[i]->ctlr = i;
3730 hba[i]->pdev = pdev; 3917 hba[i]->pdev = pdev;
3731 3918
3919 if (cciss_create_hba_sysfs_entry(hba[i]))
3920 goto clean0;
3921
3732 /* configure PCI DMA stuff */ 3922 /* configure PCI DMA stuff */
3733 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 3923 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
3734 dac = 1; 3924 dac = 1;
@@ -3787,15 +3977,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3787 printk(KERN_ERR "cciss: out of memory"); 3977 printk(KERN_ERR "cciss: out of memory");
3788 goto clean4; 3978 goto clean4;
3789 } 3979 }
3790#ifdef CONFIG_CISS_SCSI_TAPE
3791 hba[i]->scsi_rejects.complete =
3792 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3793 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3794 if (hba[i]->scsi_rejects.complete == NULL) {
3795 printk(KERN_ERR "cciss: out of memory");
3796 goto clean4;
3797 }
3798#endif
3799 spin_lock_init(&hba[i]->lock); 3980 spin_lock_init(&hba[i]->lock);
3800 3981
3801 /* Initialize the pdev driver private data. 3982 /* Initialize the pdev driver private data.
@@ -3828,7 +4009,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3828 } 4009 }
3829 4010
3830 return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff, 4011 return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
3831 sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD); 4012 sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
3832 if (return_code == IO_OK) { 4013 if (return_code == IO_OK) {
3833 hba[i]->firm_ver[0] = inq_buff->data_byte[32]; 4014 hba[i]->firm_ver[0] = inq_buff->data_byte[32];
3834 hba[i]->firm_ver[1] = inq_buff->data_byte[33]; 4015 hba[i]->firm_ver[1] = inq_buff->data_byte[33];
@@ -3855,9 +4036,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3855 4036
3856clean4: 4037clean4:
3857 kfree(inq_buff); 4038 kfree(inq_buff);
3858#ifdef CONFIG_CISS_SCSI_TAPE
3859 kfree(hba[i]->scsi_rejects.complete);
3860#endif
3861 kfree(hba[i]->cmd_pool_bits); 4039 kfree(hba[i]->cmd_pool_bits);
3862 if (hba[i]->cmd_pool) 4040 if (hba[i]->cmd_pool)
3863 pci_free_consistent(hba[i]->pdev, 4041 pci_free_consistent(hba[i]->pdev,
@@ -3872,6 +4050,8 @@ clean4:
3872clean2: 4050clean2:
3873 unregister_blkdev(hba[i]->major, hba[i]->devname); 4051 unregister_blkdev(hba[i]->major, hba[i]->devname);
3874clean1: 4052clean1:
4053 cciss_destroy_hba_sysfs_entry(hba[i]);
4054clean0:
3875 hba[i]->busy_initializing = 0; 4055 hba[i]->busy_initializing = 0;
3876 /* cleanup any queues that may have been initialized */ 4056 /* cleanup any queues that may have been initialized */
3877 for (j=0; j <= hba[i]->highest_lun; j++){ 4057 for (j=0; j <= hba[i]->highest_lun; j++){
@@ -3907,8 +4087,8 @@ static void cciss_shutdown(struct pci_dev *pdev)
3907 /* sendcmd will turn off interrupt, and send the flush... 4087 /* sendcmd will turn off interrupt, and send the flush...
3908 * To write all data in the battery backed cache to disks */ 4088 * To write all data in the battery backed cache to disks */
3909 memset(flush_buf, 0, 4); 4089 memset(flush_buf, 0, 4);
3910 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL, 4090 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0,
3911 TYPE_CMD); 4091 CTLR_LUNID, TYPE_CMD);
3912 if (return_code == IO_OK) { 4092 if (return_code == IO_OK) {
3913 printk(KERN_INFO "Completed flushing cache on controller %d\n", i); 4093 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3914 } else { 4094 } else {
@@ -3973,15 +4153,13 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
3973 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), 4153 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3974 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); 4154 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3975 kfree(hba[i]->cmd_pool_bits); 4155 kfree(hba[i]->cmd_pool_bits);
3976#ifdef CONFIG_CISS_SCSI_TAPE
3977 kfree(hba[i]->scsi_rejects.complete);
3978#endif
3979 /* 4156 /*
3980 * Deliberately omit pci_disable_device(): it does something nasty to 4157 * Deliberately omit pci_disable_device(): it does something nasty to
3981 * Smart Array controllers that pci_enable_device does not undo 4158 * Smart Array controllers that pci_enable_device does not undo
3982 */ 4159 */
3983 pci_release_regions(pdev); 4160 pci_release_regions(pdev);
3984 pci_set_drvdata(pdev, NULL); 4161 pci_set_drvdata(pdev, NULL);
4162 cciss_destroy_hba_sysfs_entry(hba[i]);
3985 free_hba(i); 4163 free_hba(i);
3986} 4164}
3987 4165
@@ -3999,6 +4177,8 @@ static struct pci_driver cciss_pci_driver = {
3999 */ 4177 */
4000static int __init cciss_init(void) 4178static int __init cciss_init(void)
4001{ 4179{
4180 int err;
4181
4002 /* 4182 /*
4003 * The hardware requires that commands are aligned on a 64-bit 4183 * The hardware requires that commands are aligned on a 64-bit
4004 * boundary. Given that we use pci_alloc_consistent() to allocate an 4184 * boundary. Given that we use pci_alloc_consistent() to allocate an
@@ -4008,8 +4188,20 @@ static int __init cciss_init(void)
4008 4188
4009 printk(KERN_INFO DRIVER_NAME "\n"); 4189 printk(KERN_INFO DRIVER_NAME "\n");
4010 4190
4191 err = bus_register(&cciss_bus_type);
4192 if (err)
4193 return err;
4194
4011 /* Register for our PCI devices */ 4195 /* Register for our PCI devices */
4012 return pci_register_driver(&cciss_pci_driver); 4196 err = pci_register_driver(&cciss_pci_driver);
4197 if (err)
4198 goto err_bus_register;
4199
4200 return 0;
4201
4202err_bus_register:
4203 bus_unregister(&cciss_bus_type);
4204 return err;
4013} 4205}
4014 4206
4015static void __exit cciss_cleanup(void) 4207static void __exit cciss_cleanup(void)
@@ -4026,6 +4218,7 @@ static void __exit cciss_cleanup(void)
4026 } 4218 }
4027 } 4219 }
4028 remove_proc_entry("driver/cciss", NULL); 4220 remove_proc_entry("driver/cciss", NULL);
4221 bus_unregister(&cciss_bus_type);
4029} 4222}
4030 4223
4031static void fail_all_cmds(unsigned long ctlr) 4224static void fail_all_cmds(unsigned long ctlr)
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 703e08038fb..06a5db25b29 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -11,6 +11,11 @@
11 11
12#define IO_OK 0 12#define IO_OK 0
13#define IO_ERROR 1 13#define IO_ERROR 1
14#define IO_NEEDS_RETRY 3
15
16#define VENDOR_LEN 8
17#define MODEL_LEN 16
18#define REV_LEN 4
14 19
15struct ctlr_info; 20struct ctlr_info;
16typedef struct ctlr_info ctlr_info_t; 21typedef struct ctlr_info ctlr_info_t;
@@ -34,23 +39,20 @@ typedef struct _drive_info_struct
34 int cylinders; 39 int cylinders;
35 int raid_level; /* set to -1 to indicate that 40 int raid_level; /* set to -1 to indicate that
36 * the drive is not in use/configured 41 * the drive is not in use/configured
37 */ 42 */
38 int busy_configuring; /*This is set when the drive is being removed 43 int busy_configuring; /* This is set when a drive is being removed
39 *to prevent it from being opened or it's queue 44 * to prevent it from being opened or it's
40 *from being started. 45 * queue from being started.
41 */ 46 */
42 __u8 serial_no[16]; /* from inquiry page 0x83, */ 47 struct device dev;
43 /* not necc. null terminated. */ 48 __u8 serial_no[16]; /* from inquiry page 0x83,
49 * not necc. null terminated.
50 */
51 char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
52 char model[MODEL_LEN + 1]; /* SCSI model string */
53 char rev[REV_LEN + 1]; /* SCSI revision string */
44} drive_info_struct; 54} drive_info_struct;
45 55
46#ifdef CONFIG_CISS_SCSI_TAPE
47
48struct sendcmd_reject_list {
49 int ncompletions;
50 unsigned long *complete; /* array of NR_CMDS tags */
51};
52
53#endif
54struct ctlr_info 56struct ctlr_info
55{ 57{
56 int ctlr; 58 int ctlr;
@@ -118,11 +120,11 @@ struct ctlr_info
118 void *scsi_ctlr; /* ptr to structure containing scsi related stuff */ 120 void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
119 /* list of block side commands the scsi error handling sucked up */ 121 /* list of block side commands the scsi error handling sucked up */
120 /* and saved for later processing */ 122 /* and saved for later processing */
121 struct sendcmd_reject_list scsi_rejects;
122#endif 123#endif
123 unsigned char alive; 124 unsigned char alive;
124 struct completion *rescan_wait; 125 struct completion *rescan_wait;
125 struct task_struct *cciss_scan_thread; 126 struct task_struct *cciss_scan_thread;
127 struct device dev;
126}; 128};
127 129
128/* Defining the diffent access_menthods */ 130/* Defining the diffent access_menthods */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index 40b1b92dae7..cd665b00c7c 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -217,6 +217,8 @@ typedef union _LUNAddr_struct {
217 LogDevAddr_struct LogDev; 217 LogDevAddr_struct LogDev;
218} LUNAddr_struct; 218} LUNAddr_struct;
219 219
220#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
221
220typedef struct _CommandListHeader_struct { 222typedef struct _CommandListHeader_struct {
221 BYTE ReplyQueue; 223 BYTE ReplyQueue;
222 BYTE SGList; 224 BYTE SGList;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index a3fd87b4144..3315268b4ec 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -44,20 +44,13 @@
44#define CCISS_ABORT_MSG 0x00 44#define CCISS_ABORT_MSG 0x00
45#define CCISS_RESET_MSG 0x01 45#define CCISS_RESET_MSG 0x01
46 46
47/* some prototypes... */ 47static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
48static int sendcmd( 48 size_t size,
49 __u8 cmd, 49 __u8 page_code, unsigned char *scsi3addr,
50 int ctlr,
51 void *buff,
52 size_t size,
53 unsigned int use_unit_num, /* 0: address the controller,
54 1: address logical volume log_unit,
55 2: address is in scsi3addr */
56 unsigned int log_unit,
57 __u8 page_code,
58 unsigned char *scsi3addr,
59 int cmd_type); 50 int cmd_type);
60 51
52static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool);
53static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool);
61 54
62static int cciss_scsi_proc_info( 55static int cciss_scsi_proc_info(
63 struct Scsi_Host *sh, 56 struct Scsi_Host *sh,
@@ -1575,6 +1568,75 @@ cciss_seq_tape_report(struct seq_file *seq, int ctlr)
1575 CPQ_TAPE_UNLOCK(ctlr, flags); 1568 CPQ_TAPE_UNLOCK(ctlr, flags);
1576} 1569}
1577 1570
1571static int wait_for_device_to_become_ready(ctlr_info_t *h,
1572 unsigned char lunaddr[])
1573{
1574 int rc;
1575 int count = 0;
1576 int waittime = HZ;
1577 CommandList_struct *c;
1578
1579 c = cmd_alloc(h, 1);
1580 if (!c) {
1581 printk(KERN_WARNING "cciss%d: out of memory in "
1582 "wait_for_device_to_become_ready.\n", h->ctlr);
1583 return IO_ERROR;
1584 }
1585
1586 /* Send test unit ready until device ready, or give up. */
1587 while (count < 20) {
1588
1589 /* Wait for a bit. do this first, because if we send
1590 * the TUR right away, the reset will just abort it.
1591 */
1592 schedule_timeout_uninterruptible(waittime);
1593 count++;
1594
1595 /* Increase wait time with each try, up to a point. */
1596 if (waittime < (HZ * 30))
1597 waittime = waittime * 2;
1598
1599 /* Send the Test Unit Ready */
1600 rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0,
1601 lunaddr, TYPE_CMD);
1602 if (rc == 0)
1603 rc = sendcmd_withirq_core(h, c, 0);
1604
1605 (void) process_sendcmd_error(h, c);
1606
1607 if (rc != 0)
1608 goto retry_tur;
1609
1610 if (c->err_info->CommandStatus == CMD_SUCCESS)
1611 break;
1612
1613 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
1614 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1615 if (c->err_info->SenseInfo[2] == NO_SENSE)
1616 break;
1617 if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) {
1618 unsigned char asc;
1619 asc = c->err_info->SenseInfo[12];
1620 check_for_unit_attention(h, c);
1621 if (asc == POWER_OR_RESET)
1622 break;
1623 }
1624 }
1625retry_tur:
1626 printk(KERN_WARNING "cciss%d: Waiting %d secs "
1627 "for device to become ready.\n",
1628 h->ctlr, waittime / HZ);
1629 rc = 1; /* device not ready. */
1630 }
1631
1632 if (rc)
1633 printk("cciss%d: giving up on device.\n", h->ctlr);
1634 else
1635 printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr);
1636
1637 cmd_free(h, c, 1);
1638 return rc;
1639}
1578 1640
1579/* Need at least one of these error handlers to keep ../scsi/hosts.c from 1641/* Need at least one of these error handlers to keep ../scsi/hosts.c from
1580 * complaining. Doing a host- or bus-reset can't do anything good here. 1642 * complaining. Doing a host- or bus-reset can't do anything good here.
@@ -1591,6 +1653,7 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
1591{ 1653{
1592 int rc; 1654 int rc;
1593 CommandList_struct *cmd_in_trouble; 1655 CommandList_struct *cmd_in_trouble;
1656 unsigned char lunaddr[8];
1594 ctlr_info_t **c; 1657 ctlr_info_t **c;
1595 int ctlr; 1658 int ctlr;
1596 1659
@@ -1600,19 +1663,15 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
1600 return FAILED; 1663 return FAILED;
1601 ctlr = (*c)->ctlr; 1664 ctlr = (*c)->ctlr;
1602 printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr); 1665 printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr);
1603
1604 /* find the command that's giving us trouble */ 1666 /* find the command that's giving us trouble */
1605 cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; 1667 cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
1606 if (cmd_in_trouble == NULL) { /* paranoia */ 1668 if (cmd_in_trouble == NULL) /* paranoia */
1607 return FAILED; 1669 return FAILED;
1608 } 1670 memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
1609 /* send a reset to the SCSI LUN which the command was sent to */ 1671 /* send a reset to the SCSI LUN which the command was sent to */
1610 rc = sendcmd(CCISS_RESET_MSG, ctlr, NULL, 0, 2, 0, 0, 1672 rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr,
1611 (unsigned char *) &cmd_in_trouble->Header.LUN.LunAddrBytes[0],
1612 TYPE_MSG); 1673 TYPE_MSG);
1613 /* sendcmd turned off interrupts on the board, turn 'em back on. */ 1674 if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0)
1614 (*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
1615 if (rc == 0)
1616 return SUCCESS; 1675 return SUCCESS;
1617 printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr); 1676 printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr);
1618 return FAILED; 1677 return FAILED;
@@ -1622,6 +1681,7 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
1622{ 1681{
1623 int rc; 1682 int rc;
1624 CommandList_struct *cmd_to_abort; 1683 CommandList_struct *cmd_to_abort;
1684 unsigned char lunaddr[8];
1625 ctlr_info_t **c; 1685 ctlr_info_t **c;
1626 int ctlr; 1686 int ctlr;
1627 1687
@@ -1636,12 +1696,9 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
1636 cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble; 1696 cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
1637 if (cmd_to_abort == NULL) /* paranoia */ 1697 if (cmd_to_abort == NULL) /* paranoia */
1638 return FAILED; 1698 return FAILED;
1639 rc = sendcmd(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag, 1699 memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
1640 0, 2, 0, 0, 1700 rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
1641 (unsigned char *) &cmd_to_abort->Header.LUN.LunAddrBytes[0], 1701 0, 0, lunaddr, TYPE_MSG);
1642 TYPE_MSG);
1643 /* sendcmd turned off interrupts on the board, turn 'em back on. */
1644 (*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
1645 if (rc == 0) 1702 if (rc == 0)
1646 return SUCCESS; 1703 return SUCCESS;
1647 return FAILED; 1704 return FAILED;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index ca268ca1115..44fa2018f6b 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -474,7 +474,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
474 disk->fops = &ida_fops; 474 disk->fops = &ida_fops;
475 if (j && !drv->nr_blks) 475 if (j && !drv->nr_blks)
476 continue; 476 continue;
477 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size); 477 blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
478 set_capacity(disk, drv->nr_blks); 478 set_capacity(disk, drv->nr_blks);
479 disk->queue = hba[i]->queue; 479 disk->queue = hba[i]->queue;
480 disk->private_data = drv; 480 disk->private_data = drv;
@@ -903,7 +903,7 @@ static void do_ida_request(struct request_queue *q)
903 goto startio; 903 goto startio;
904 904
905queue_next: 905queue_next:
906 creq = elv_next_request(q); 906 creq = blk_peek_request(q);
907 if (!creq) 907 if (!creq)
908 goto startio; 908 goto startio;
909 909
@@ -912,17 +912,18 @@ queue_next:
912 if ((c = cmd_alloc(h,1)) == NULL) 912 if ((c = cmd_alloc(h,1)) == NULL)
913 goto startio; 913 goto startio;
914 914
915 blkdev_dequeue_request(creq); 915 blk_start_request(creq);
916 916
917 c->ctlr = h->ctlr; 917 c->ctlr = h->ctlr;
918 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv; 918 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
919 c->hdr.size = sizeof(rblk_t) >> 2; 919 c->hdr.size = sizeof(rblk_t) >> 2;
920 c->size += sizeof(rblk_t); 920 c->size += sizeof(rblk_t);
921 921
922 c->req.hdr.blk = creq->sector; 922 c->req.hdr.blk = blk_rq_pos(creq);
923 c->rq = creq; 923 c->rq = creq;
924DBGPX( 924DBGPX(
925 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors); 925 printk("sector=%d, nr_sectors=%u\n",
926 blk_rq_pos(creq), blk_rq_sectors(creq));
926); 927);
927 sg_init_table(tmp_sg, SG_MAX); 928 sg_init_table(tmp_sg, SG_MAX);
928 seg = blk_rq_map_sg(q, creq, tmp_sg); 929 seg = blk_rq_map_sg(q, creq, tmp_sg);
@@ -940,9 +941,9 @@ DBGPX(
940 tmp_sg[i].offset, 941 tmp_sg[i].offset,
941 tmp_sg[i].length, dir); 942 tmp_sg[i].length, dir);
942 } 943 }
943DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); ); 944DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
944 c->req.hdr.sg_cnt = seg; 945 c->req.hdr.sg_cnt = seg;
945 c->req.hdr.blk_cnt = creq->nr_sectors; 946 c->req.hdr.blk_cnt = blk_rq_sectors(creq);
946 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE; 947 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
947 c->type = CMD_RWREQ; 948 c->type = CMD_RWREQ;
948 949
@@ -1024,8 +1025,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
1024 cmd->req.sg[i].size, ddir); 1025 cmd->req.sg[i].size, ddir);
1025 1026
1026 DBGPX(printk("Done with %p\n", rq);); 1027 DBGPX(printk("Done with %p\n", rq););
1027 if (__blk_end_request(rq, error, blk_rq_bytes(rq))) 1028 __blk_end_request_all(rq, error);
1028 BUG();
1029} 1029}
1030 1030
1031/* 1031/*
@@ -1546,7 +1546,7 @@ static int revalidate_allvol(ctlr_info_t *host)
1546 drv_info_t *drv = &host->drv[i]; 1546 drv_info_t *drv = &host->drv[i];
1547 if (i && !drv->nr_blks) 1547 if (i && !drv->nr_blks)
1548 continue; 1548 continue;
1549 blk_queue_hardsect_size(host->queue, drv->blk_size); 1549 blk_queue_logical_block_size(host->queue, drv->blk_size);
1550 set_capacity(disk, drv->nr_blks); 1550 set_capacity(disk, drv->nr_blks);
1551 disk->queue = host->queue; 1551 disk->queue = host->queue;
1552 disk->private_data = drv; 1552 disk->private_data = drv;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1300df6f164..862b40c9018 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -931,7 +931,7 @@ static inline void unlock_fdc(void)
931 del_timer(&fd_timeout); 931 del_timer(&fd_timeout);
932 cont = NULL; 932 cont = NULL;
933 clear_bit(0, &fdc_busy); 933 clear_bit(0, &fdc_busy);
934 if (elv_next_request(floppy_queue)) 934 if (current_req || blk_peek_request(floppy_queue))
935 do_fd_request(floppy_queue); 935 do_fd_request(floppy_queue);
936 spin_unlock_irqrestore(&floppy_lock, flags); 936 spin_unlock_irqrestore(&floppy_lock, flags);
937 wake_up(&fdc_wait); 937 wake_up(&fdc_wait);
@@ -2303,7 +2303,7 @@ static void floppy_end_request(struct request *req, int error)
2303 2303
2304 /* current_count_sectors can be zero if transfer failed */ 2304 /* current_count_sectors can be zero if transfer failed */
2305 if (error) 2305 if (error)
2306 nr_sectors = req->current_nr_sectors; 2306 nr_sectors = blk_rq_cur_sectors(req);
2307 if (__blk_end_request(req, error, nr_sectors << 9)) 2307 if (__blk_end_request(req, error, nr_sectors << 9))
2308 return; 2308 return;
2309 2309
@@ -2332,7 +2332,7 @@ static void request_done(int uptodate)
2332 if (uptodate) { 2332 if (uptodate) {
2333 /* maintain values for invalidation on geometry 2333 /* maintain values for invalidation on geometry
2334 * change */ 2334 * change */
2335 block = current_count_sectors + req->sector; 2335 block = current_count_sectors + blk_rq_pos(req);
2336 INFBOUND(DRS->maxblock, block); 2336 INFBOUND(DRS->maxblock, block);
2337 if (block > _floppy->sect) 2337 if (block > _floppy->sect)
2338 DRS->maxtrack = 1; 2338 DRS->maxtrack = 1;
@@ -2346,10 +2346,10 @@ static void request_done(int uptodate)
2346 /* record write error information */ 2346 /* record write error information */
2347 DRWE->write_errors++; 2347 DRWE->write_errors++;
2348 if (DRWE->write_errors == 1) { 2348 if (DRWE->write_errors == 1) {
2349 DRWE->first_error_sector = req->sector; 2349 DRWE->first_error_sector = blk_rq_pos(req);
2350 DRWE->first_error_generation = DRS->generation; 2350 DRWE->first_error_generation = DRS->generation;
2351 } 2351 }
2352 DRWE->last_error_sector = req->sector; 2352 DRWE->last_error_sector = blk_rq_pos(req);
2353 DRWE->last_error_generation = DRS->generation; 2353 DRWE->last_error_generation = DRS->generation;
2354 } 2354 }
2355 spin_lock_irqsave(q->queue_lock, flags); 2355 spin_lock_irqsave(q->queue_lock, flags);
@@ -2503,24 +2503,23 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2503 2503
2504 max_sector = transfer_size(ssize, 2504 max_sector = transfer_size(ssize,
2505 min(max_sector, max_sector_2), 2505 min(max_sector, max_sector_2),
2506 current_req->nr_sectors); 2506 blk_rq_sectors(current_req));
2507 2507
2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && 2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
2509 buffer_max > fsector_t + current_req->nr_sectors) 2509 buffer_max > fsector_t + blk_rq_sectors(current_req))
2510 current_count_sectors = min_t(int, buffer_max - fsector_t, 2510 current_count_sectors = min_t(int, buffer_max - fsector_t,
2511 current_req->nr_sectors); 2511 blk_rq_sectors(current_req));
2512 2512
2513 remaining = current_count_sectors << 9; 2513 remaining = current_count_sectors << 9;
2514#ifdef FLOPPY_SANITY_CHECK 2514#ifdef FLOPPY_SANITY_CHECK
2515 if ((remaining >> 9) > current_req->nr_sectors && 2515 if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
2516 CT(COMMAND) == FD_WRITE) {
2517 DPRINT("in copy buffer\n"); 2516 DPRINT("in copy buffer\n");
2518 printk("current_count_sectors=%ld\n", current_count_sectors); 2517 printk("current_count_sectors=%ld\n", current_count_sectors);
2519 printk("remaining=%d\n", remaining >> 9); 2518 printk("remaining=%d\n", remaining >> 9);
2520 printk("current_req->nr_sectors=%ld\n", 2519 printk("current_req->nr_sectors=%u\n",
2521 current_req->nr_sectors); 2520 blk_rq_sectors(current_req));
2522 printk("current_req->current_nr_sectors=%u\n", 2521 printk("current_req->current_nr_sectors=%u\n",
2523 current_req->current_nr_sectors); 2522 blk_rq_cur_sectors(current_req));
2524 printk("max_sector=%d\n", max_sector); 2523 printk("max_sector=%d\n", max_sector);
2525 printk("ssize=%d\n", ssize); 2524 printk("ssize=%d\n", ssize);
2526 } 2525 }
@@ -2530,7 +2529,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2530 2529
2531 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9); 2530 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
2532 2531
2533 size = current_req->current_nr_sectors << 9; 2532 size = blk_rq_cur_bytes(current_req);
2534 2533
2535 rq_for_each_segment(bv, current_req, iter) { 2534 rq_for_each_segment(bv, current_req, iter) {
2536 if (!remaining) 2535 if (!remaining)
@@ -2648,10 +2647,10 @@ static int make_raw_rw_request(void)
2648 2647
2649 max_sector = _floppy->sect * _floppy->head; 2648 max_sector = _floppy->sect * _floppy->head;
2650 2649
2651 TRACK = (int)current_req->sector / max_sector; 2650 TRACK = (int)blk_rq_pos(current_req) / max_sector;
2652 fsector_t = (int)current_req->sector % max_sector; 2651 fsector_t = (int)blk_rq_pos(current_req) % max_sector;
2653 if (_floppy->track && TRACK >= _floppy->track) { 2652 if (_floppy->track && TRACK >= _floppy->track) {
2654 if (current_req->current_nr_sectors & 1) { 2653 if (blk_rq_cur_sectors(current_req) & 1) {
2655 current_count_sectors = 1; 2654 current_count_sectors = 1;
2656 return 1; 2655 return 1;
2657 } else 2656 } else
@@ -2669,7 +2668,7 @@ static int make_raw_rw_request(void)
2669 if (fsector_t >= max_sector) { 2668 if (fsector_t >= max_sector) {
2670 current_count_sectors = 2669 current_count_sectors =
2671 min_t(int, _floppy->sect - fsector_t, 2670 min_t(int, _floppy->sect - fsector_t,
2672 current_req->nr_sectors); 2671 blk_rq_sectors(current_req));
2673 return 1; 2672 return 1;
2674 } 2673 }
2675 SIZECODE = 2; 2674 SIZECODE = 2;
@@ -2720,7 +2719,7 @@ static int make_raw_rw_request(void)
2720 2719
2721 in_sector_offset = (fsector_t % _floppy->sect) % ssize; 2720 in_sector_offset = (fsector_t % _floppy->sect) % ssize;
2722 aligned_sector_t = fsector_t - in_sector_offset; 2721 aligned_sector_t = fsector_t - in_sector_offset;
2723 max_size = current_req->nr_sectors; 2722 max_size = blk_rq_sectors(current_req);
2724 if ((raw_cmd->track == buffer_track) && 2723 if ((raw_cmd->track == buffer_track) &&
2725 (current_drive == buffer_drive) && 2724 (current_drive == buffer_drive) &&
2726 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) { 2725 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
@@ -2729,10 +2728,10 @@ static int make_raw_rw_request(void)
2729 copy_buffer(1, max_sector, buffer_max); 2728 copy_buffer(1, max_sector, buffer_max);
2730 return 1; 2729 return 1;
2731 } 2730 }
2732 } else if (in_sector_offset || current_req->nr_sectors < ssize) { 2731 } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
2733 if (CT(COMMAND) == FD_WRITE) { 2732 if (CT(COMMAND) == FD_WRITE) {
2734 if (fsector_t + current_req->nr_sectors > ssize && 2733 if (fsector_t + blk_rq_sectors(current_req) > ssize &&
2735 fsector_t + current_req->nr_sectors < ssize + ssize) 2734 fsector_t + blk_rq_sectors(current_req) < ssize + ssize)
2736 max_size = ssize + ssize; 2735 max_size = ssize + ssize;
2737 else 2736 else
2738 max_size = ssize; 2737 max_size = ssize;
@@ -2776,7 +2775,7 @@ static int make_raw_rw_request(void)
2776 (indirect * 2 > direct * 3 && 2775 (indirect * 2 > direct * 3 &&
2777 *errors < DP->max_errors.read_track && ((!probing 2776 *errors < DP->max_errors.read_track && ((!probing
2778 || (DP->read_track & (1 << DRS->probed_format)))))) { 2777 || (DP->read_track & (1 << DRS->probed_format)))))) {
2779 max_size = current_req->nr_sectors; 2778 max_size = blk_rq_sectors(current_req);
2780 } else { 2779 } else {
2781 raw_cmd->kernel_data = current_req->buffer; 2780 raw_cmd->kernel_data = current_req->buffer;
2782 raw_cmd->length = current_count_sectors << 9; 2781 raw_cmd->length = current_count_sectors << 9;
@@ -2801,7 +2800,7 @@ static int make_raw_rw_request(void)
2801 fsector_t > buffer_max || 2800 fsector_t > buffer_max ||
2802 fsector_t < buffer_min || 2801 fsector_t < buffer_min ||
2803 ((CT(COMMAND) == FD_READ || 2802 ((CT(COMMAND) == FD_READ ||
2804 (!in_sector_offset && current_req->nr_sectors >= ssize)) && 2803 (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
2805 max_sector > 2 * max_buffer_sectors + buffer_min && 2804 max_sector > 2 * max_buffer_sectors + buffer_min &&
2806 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min) 2805 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
2807 /* not enough space */ 2806 /* not enough space */
@@ -2879,8 +2878,8 @@ static int make_raw_rw_request(void)
2879 printk("write\n"); 2878 printk("write\n");
2880 return 0; 2879 return 0;
2881 } 2880 }
2882 } else if (raw_cmd->length > current_req->nr_sectors << 9 || 2881 } else if (raw_cmd->length > blk_rq_bytes(current_req) ||
2883 current_count_sectors > current_req->nr_sectors) { 2882 current_count_sectors > blk_rq_sectors(current_req)) {
2884 DPRINT("buffer overrun in direct transfer\n"); 2883 DPRINT("buffer overrun in direct transfer\n");
2885 return 0; 2884 return 0;
2886 } else if (raw_cmd->length < current_count_sectors << 9) { 2885 } else if (raw_cmd->length < current_count_sectors << 9) {
@@ -2913,7 +2912,7 @@ static void redo_fd_request(void)
2913 struct request *req; 2912 struct request *req;
2914 2913
2915 spin_lock_irq(floppy_queue->queue_lock); 2914 spin_lock_irq(floppy_queue->queue_lock);
2916 req = elv_next_request(floppy_queue); 2915 req = blk_fetch_request(floppy_queue);
2917 spin_unlock_irq(floppy_queue->queue_lock); 2916 spin_unlock_irq(floppy_queue->queue_lock);
2918 if (!req) { 2917 if (!req) {
2919 do_floppy = NULL; 2918 do_floppy = NULL;
@@ -2990,8 +2989,9 @@ static void do_fd_request(struct request_queue * q)
2990 if (usage_count == 0) { 2989 if (usage_count == 0) {
2991 printk("warning: usage count=0, current_req=%p exiting\n", 2990 printk("warning: usage count=0, current_req=%p exiting\n",
2992 current_req); 2991 current_req);
2993 printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector, 2992 printk("sect=%ld type=%x flags=%x\n",
2994 current_req->cmd_type, current_req->cmd_flags); 2993 (long)blk_rq_pos(current_req), current_req->cmd_type,
2994 current_req->cmd_flags);
2995 return; 2995 return;
2996 } 2996 }
2997 if (test_bit(0, &fdc_busy)) { 2997 if (test_bit(0, &fdc_busy)) {
@@ -4148,6 +4148,24 @@ static void floppy_device_release(struct device *dev)
4148{ 4148{
4149} 4149}
4150 4150
4151static int floppy_resume(struct platform_device *dev)
4152{
4153 int fdc;
4154
4155 for (fdc = 0; fdc < N_FDC; fdc++)
4156 if (FDCS->address != -1)
4157 user_reset_fdc(-1, FD_RESET_ALWAYS, 0);
4158
4159 return 0;
4160}
4161
4162static struct platform_driver floppy_driver = {
4163 .resume = floppy_resume,
4164 .driver = {
4165 .name = "floppy",
4166 },
4167};
4168
4151static struct platform_device floppy_device[N_DRIVE]; 4169static struct platform_device floppy_device[N_DRIVE];
4152 4170
4153static struct kobject *floppy_find(dev_t dev, int *part, void *data) 4171static struct kobject *floppy_find(dev_t dev, int *part, void *data)
@@ -4196,10 +4214,14 @@ static int __init floppy_init(void)
4196 if (err) 4214 if (err)
4197 goto out_put_disk; 4215 goto out_put_disk;
4198 4216
4217 err = platform_driver_register(&floppy_driver);
4218 if (err)
4219 goto out_unreg_blkdev;
4220
4199 floppy_queue = blk_init_queue(do_fd_request, &floppy_lock); 4221 floppy_queue = blk_init_queue(do_fd_request, &floppy_lock);
4200 if (!floppy_queue) { 4222 if (!floppy_queue) {
4201 err = -ENOMEM; 4223 err = -ENOMEM;
4202 goto out_unreg_blkdev; 4224 goto out_unreg_driver;
4203 } 4225 }
4204 blk_queue_max_sectors(floppy_queue, 64); 4226 blk_queue_max_sectors(floppy_queue, 64);
4205 4227
@@ -4346,6 +4368,8 @@ out_flush_work:
4346out_unreg_region: 4368out_unreg_region:
4347 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 4369 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
4348 blk_cleanup_queue(floppy_queue); 4370 blk_cleanup_queue(floppy_queue);
4371out_unreg_driver:
4372 platform_driver_unregister(&floppy_driver);
4349out_unreg_blkdev: 4373out_unreg_blkdev:
4350 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4374 unregister_blkdev(FLOPPY_MAJOR, "fd");
4351out_put_disk: 4375out_put_disk:
@@ -4566,6 +4590,7 @@ static void __exit floppy_module_exit(void)
4566 4590
4567 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 4591 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
4568 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4592 unregister_blkdev(FLOPPY_MAJOR, "fd");
4593 platform_driver_unregister(&floppy_driver);
4569 4594
4570 for (drive = 0; drive < N_DRIVE; drive++) { 4595 for (drive = 0; drive < N_DRIVE; drive++) {
4571 del_timer_sync(&motor_off_timer[drive]); 4596 del_timer_sync(&motor_off_timer[drive]);
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index baaa9e486e5..f65b3f369eb 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -98,10 +98,9 @@
98 98
99static DEFINE_SPINLOCK(hd_lock); 99static DEFINE_SPINLOCK(hd_lock);
100static struct request_queue *hd_queue; 100static struct request_queue *hd_queue;
101static struct request *hd_req;
101 102
102#define MAJOR_NR HD_MAJOR 103#define MAJOR_NR HD_MAJOR
103#define QUEUE (hd_queue)
104#define CURRENT elv_next_request(hd_queue)
105 104
106#define TIMEOUT_VALUE (6*HZ) 105#define TIMEOUT_VALUE (6*HZ)
107#define HD_DELAY 0 106#define HD_DELAY 0
@@ -195,11 +194,24 @@ static void __init hd_setup(char *str, int *ints)
195 NR_HD = hdind+1; 194 NR_HD = hdind+1;
196} 195}
197 196
197static bool hd_end_request(int err, unsigned int bytes)
198{
199 if (__blk_end_request(hd_req, err, bytes))
200 return true;
201 hd_req = NULL;
202 return false;
203}
204
205static bool hd_end_request_cur(int err)
206{
207 return hd_end_request(err, blk_rq_cur_bytes(hd_req));
208}
209
198static void dump_status(const char *msg, unsigned int stat) 210static void dump_status(const char *msg, unsigned int stat)
199{ 211{
200 char *name = "hd?"; 212 char *name = "hd?";
201 if (CURRENT) 213 if (hd_req)
202 name = CURRENT->rq_disk->disk_name; 214 name = hd_req->rq_disk->disk_name;
203 215
204#ifdef VERBOSE_ERRORS 216#ifdef VERBOSE_ERRORS
205 printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff); 217 printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
@@ -227,8 +239,8 @@ static void dump_status(const char *msg, unsigned int stat)
227 if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) { 239 if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
228 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL), 240 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
229 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR)); 241 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
230 if (CURRENT) 242 if (hd_req)
231 printk(", sector=%ld", CURRENT->sector); 243 printk(", sector=%ld", blk_rq_pos(hd_req));
232 } 244 }
233 printk("\n"); 245 printk("\n");
234 } 246 }
@@ -406,11 +418,12 @@ static void unexpected_hd_interrupt(void)
406 */ 418 */
407static void bad_rw_intr(void) 419static void bad_rw_intr(void)
408{ 420{
409 struct request *req = CURRENT; 421 struct request *req = hd_req;
422
410 if (req != NULL) { 423 if (req != NULL) {
411 struct hd_i_struct *disk = req->rq_disk->private_data; 424 struct hd_i_struct *disk = req->rq_disk->private_data;
412 if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) { 425 if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
413 end_request(req, 0); 426 hd_end_request_cur(-EIO);
414 disk->special_op = disk->recalibrate = 1; 427 disk->special_op = disk->recalibrate = 1;
415 } else if (req->errors % RESET_FREQ == 0) 428 } else if (req->errors % RESET_FREQ == 0)
416 reset = 1; 429 reset = 1;
@@ -452,37 +465,30 @@ static void read_intr(void)
452 bad_rw_intr(); 465 bad_rw_intr();
453 hd_request(); 466 hd_request();
454 return; 467 return;
468
455ok_to_read: 469ok_to_read:
456 req = CURRENT; 470 req = hd_req;
457 insw(HD_DATA, req->buffer, 256); 471 insw(HD_DATA, req->buffer, 256);
458 req->sector++;
459 req->buffer += 512;
460 req->errors = 0;
461 i = --req->nr_sectors;
462 --req->current_nr_sectors;
463#ifdef DEBUG 472#ifdef DEBUG
464 printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n", 473 printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
465 req->rq_disk->disk_name, req->sector, req->nr_sectors, 474 req->rq_disk->disk_name, blk_rq_pos(req) + 1,
466 req->buffer+512); 475 blk_rq_sectors(req) - 1, req->buffer+512);
467#endif 476#endif
468 if (req->current_nr_sectors <= 0) 477 if (hd_end_request(0, 512)) {
469 end_request(req, 1);
470 if (i > 0) {
471 SET_HANDLER(&read_intr); 478 SET_HANDLER(&read_intr);
472 return; 479 return;
473 } 480 }
481
474 (void) inb_p(HD_STATUS); 482 (void) inb_p(HD_STATUS);
475#if (HD_DELAY > 0) 483#if (HD_DELAY > 0)
476 last_req = read_timer(); 484 last_req = read_timer();
477#endif 485#endif
478 if (elv_next_request(QUEUE)) 486 hd_request();
479 hd_request();
480 return;
481} 487}
482 488
483static void write_intr(void) 489static void write_intr(void)
484{ 490{
485 struct request *req = CURRENT; 491 struct request *req = hd_req;
486 int i; 492 int i;
487 int retries = 100000; 493 int retries = 100000;
488 494
@@ -492,30 +498,25 @@ static void write_intr(void)
492 continue; 498 continue;
493 if (!OK_STATUS(i)) 499 if (!OK_STATUS(i))
494 break; 500 break;
495 if ((req->nr_sectors <= 1) || (i & DRQ_STAT)) 501 if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
496 goto ok_to_write; 502 goto ok_to_write;
497 } while (--retries > 0); 503 } while (--retries > 0);
498 dump_status("write_intr", i); 504 dump_status("write_intr", i);
499 bad_rw_intr(); 505 bad_rw_intr();
500 hd_request(); 506 hd_request();
501 return; 507 return;
508
502ok_to_write: 509ok_to_write:
503 req->sector++; 510 if (hd_end_request(0, 512)) {
504 i = --req->nr_sectors;
505 --req->current_nr_sectors;
506 req->buffer += 512;
507 if (!i || (req->bio && req->current_nr_sectors <= 0))
508 end_request(req, 1);
509 if (i > 0) {
510 SET_HANDLER(&write_intr); 511 SET_HANDLER(&write_intr);
511 outsw(HD_DATA, req->buffer, 256); 512 outsw(HD_DATA, req->buffer, 256);
512 } else { 513 return;
514 }
515
513#if (HD_DELAY > 0) 516#if (HD_DELAY > 0)
514 last_req = read_timer(); 517 last_req = read_timer();
515#endif 518#endif
516 hd_request(); 519 hd_request();
517 }
518 return;
519} 520}
520 521
521static void recal_intr(void) 522static void recal_intr(void)
@@ -537,18 +538,18 @@ static void hd_times_out(unsigned long dummy)
537 538
538 do_hd = NULL; 539 do_hd = NULL;
539 540
540 if (!CURRENT) 541 if (!hd_req)
541 return; 542 return;
542 543
543 spin_lock_irq(hd_queue->queue_lock); 544 spin_lock_irq(hd_queue->queue_lock);
544 reset = 1; 545 reset = 1;
545 name = CURRENT->rq_disk->disk_name; 546 name = hd_req->rq_disk->disk_name;
546 printk("%s: timeout\n", name); 547 printk("%s: timeout\n", name);
547 if (++CURRENT->errors >= MAX_ERRORS) { 548 if (++hd_req->errors >= MAX_ERRORS) {
548#ifdef DEBUG 549#ifdef DEBUG
549 printk("%s: too many errors\n", name); 550 printk("%s: too many errors\n", name);
550#endif 551#endif
551 end_request(CURRENT, 0); 552 hd_end_request_cur(-EIO);
552 } 553 }
553 hd_request(); 554 hd_request();
554 spin_unlock_irq(hd_queue->queue_lock); 555 spin_unlock_irq(hd_queue->queue_lock);
@@ -563,7 +564,7 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
563 } 564 }
564 if (disk->head > 16) { 565 if (disk->head > 16) {
565 printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); 566 printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
566 end_request(req, 0); 567 hd_end_request_cur(-EIO);
567 } 568 }
568 disk->special_op = 0; 569 disk->special_op = 0;
569 return 1; 570 return 1;
@@ -590,24 +591,27 @@ static void hd_request(void)
590repeat: 591repeat:
591 del_timer(&device_timer); 592 del_timer(&device_timer);
592 593
593 req = CURRENT; 594 if (!hd_req) {
594 if (!req) { 595 hd_req = blk_fetch_request(hd_queue);
595 do_hd = NULL; 596 if (!hd_req) {
596 return; 597 do_hd = NULL;
598 return;
599 }
597 } 600 }
601 req = hd_req;
598 602
599 if (reset) { 603 if (reset) {
600 reset_hd(); 604 reset_hd();
601 return; 605 return;
602 } 606 }
603 disk = req->rq_disk->private_data; 607 disk = req->rq_disk->private_data;
604 block = req->sector; 608 block = blk_rq_pos(req);
605 nsect = req->nr_sectors; 609 nsect = blk_rq_sectors(req);
606 if (block >= get_capacity(req->rq_disk) || 610 if (block >= get_capacity(req->rq_disk) ||
607 ((block+nsect) > get_capacity(req->rq_disk))) { 611 ((block+nsect) > get_capacity(req->rq_disk))) {
608 printk("%s: bad access: block=%d, count=%d\n", 612 printk("%s: bad access: block=%d, count=%d\n",
609 req->rq_disk->disk_name, block, nsect); 613 req->rq_disk->disk_name, block, nsect);
610 end_request(req, 0); 614 hd_end_request_cur(-EIO);
611 goto repeat; 615 goto repeat;
612 } 616 }
613 617
@@ -647,7 +651,7 @@ repeat:
647 break; 651 break;
648 default: 652 default:
649 printk("unknown hd-command\n"); 653 printk("unknown hd-command\n");
650 end_request(req, 0); 654 hd_end_request_cur(-EIO);
651 break; 655 break;
652 } 656 }
653 } 657 }
@@ -720,7 +724,7 @@ static int __init hd_init(void)
720 blk_queue_max_sectors(hd_queue, 255); 724 blk_queue_max_sectors(hd_queue, 255);
721 init_timer(&device_timer); 725 init_timer(&device_timer);
722 device_timer.function = hd_times_out; 726 device_timer.function = hd_times_out;
723 blk_queue_hardsect_size(hd_queue, 512); 727 blk_queue_logical_block_size(hd_queue, 512);
724 728
725 if (!NR_HD) { 729 if (!NR_HD) {
726 /* 730 /*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ddae8082589..801f4ab8330 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -511,11 +511,7 @@ out:
511 */ 511 */
512static void loop_add_bio(struct loop_device *lo, struct bio *bio) 512static void loop_add_bio(struct loop_device *lo, struct bio *bio)
513{ 513{
514 if (lo->lo_biotail) { 514 bio_list_add(&lo->lo_bio_list, bio);
515 lo->lo_biotail->bi_next = bio;
516 lo->lo_biotail = bio;
517 } else
518 lo->lo_bio = lo->lo_biotail = bio;
519} 515}
520 516
521/* 517/*
@@ -523,16 +519,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
523 */ 519 */
524static struct bio *loop_get_bio(struct loop_device *lo) 520static struct bio *loop_get_bio(struct loop_device *lo)
525{ 521{
526 struct bio *bio; 522 return bio_list_pop(&lo->lo_bio_list);
527
528 if ((bio = lo->lo_bio)) {
529 if (bio == lo->lo_biotail)
530 lo->lo_biotail = NULL;
531 lo->lo_bio = bio->bi_next;
532 bio->bi_next = NULL;
533 }
534
535 return bio;
536} 523}
537 524
538static int loop_make_request(struct request_queue *q, struct bio *old_bio) 525static int loop_make_request(struct request_queue *q, struct bio *old_bio)
@@ -609,12 +596,13 @@ static int loop_thread(void *data)
609 596
610 set_user_nice(current, -20); 597 set_user_nice(current, -20);
611 598
612 while (!kthread_should_stop() || lo->lo_bio) { 599 while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
613 600
614 wait_event_interruptible(lo->lo_event, 601 wait_event_interruptible(lo->lo_event,
615 lo->lo_bio || kthread_should_stop()); 602 !bio_list_empty(&lo->lo_bio_list) ||
603 kthread_should_stop());
616 604
617 if (!lo->lo_bio) 605 if (bio_list_empty(&lo->lo_bio_list))
618 continue; 606 continue;
619 spin_lock_irq(&lo->lo_lock); 607 spin_lock_irq(&lo->lo_lock);
620 bio = loop_get_bio(lo); 608 bio = loop_get_bio(lo);
@@ -721,10 +709,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
721 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 709 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
722 goto out_putf; 710 goto out_putf;
723 711
724 /* new backing store needs to support loop (eg splice_read) */
725 if (!inode->i_fop->splice_read)
726 goto out_putf;
727
728 /* size of the new backing store needs to be the same */ 712 /* size of the new backing store needs to be the same */
729 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 713 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
730 goto out_putf; 714 goto out_putf;
@@ -800,12 +784,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
800 error = -EINVAL; 784 error = -EINVAL;
801 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) { 785 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
802 const struct address_space_operations *aops = mapping->a_ops; 786 const struct address_space_operations *aops = mapping->a_ops;
803 /* 787
804 * If we can't read - sorry. If we only can't write - well,
805 * it's going to be read-only.
806 */
807 if (!file->f_op->splice_read)
808 goto out_putf;
809 if (aops->write_begin) 788 if (aops->write_begin)
810 lo_flags |= LO_FLAGS_USE_AOPS; 789 lo_flags |= LO_FLAGS_USE_AOPS;
811 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write) 790 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
@@ -841,7 +820,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
841 lo->old_gfp_mask = mapping_gfp_mask(mapping); 820 lo->old_gfp_mask = mapping_gfp_mask(mapping);
842 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 821 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
843 822
844 lo->lo_bio = lo->lo_biotail = NULL; 823 bio_list_init(&lo->lo_bio_list);
845 824
846 /* 825 /*
847 * set queue make_request_fn, and add limits based on lower level 826 * set queue make_request_fn, and add limits based on lower level
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index f3898353d0a..60de5a01e71 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -17,71 +17,220 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/blkdev.h> 18#include <linux/blkdev.h>
19#include <linux/hdreg.h> 19#include <linux/hdreg.h>
20#include <linux/libata.h> 20#include <linux/ata.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/mg_disk.h>
26 25
27#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1) 26#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
28 27
28/* name for block device */
29#define MG_DISK_NAME "mgd"
30/* name for platform device */
31#define MG_DEV_NAME "mg_disk"
32
33#define MG_DISK_MAJ 0
34#define MG_DISK_MAX_PART 16
35#define MG_SECTOR_SIZE 512
36#define MG_MAX_SECTS 256
37
38/* Register offsets */
39#define MG_BUFF_OFFSET 0x8000
40#define MG_STORAGE_BUFFER_SIZE 0x200
41#define MG_REG_OFFSET 0xC000
42#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
43#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
44#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
45#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
46#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
47#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
48#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
49#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
50#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
51#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
52#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
53
54/* handy status */
55#define MG_STAT_READY (ATA_DRDY | ATA_DSC)
56#define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
57 ATA_ERR))) == MG_STAT_READY)
58
59/* error code for others */
60#define MG_ERR_NONE 0
61#define MG_ERR_TIMEOUT 0x100
62#define MG_ERR_INIT_STAT 0x101
63#define MG_ERR_TRANSLATION 0x102
64#define MG_ERR_CTRL_RST 0x103
65#define MG_ERR_INV_STAT 0x104
66#define MG_ERR_RSTOUT 0x105
67
68#define MG_MAX_ERRORS 6 /* Max read/write errors */
69
70/* command */
71#define MG_CMD_RD 0x20
72#define MG_CMD_WR 0x30
73#define MG_CMD_SLEEP 0x99
74#define MG_CMD_WAKEUP 0xC3
75#define MG_CMD_ID 0xEC
76#define MG_CMD_WR_CONF 0x3C
77#define MG_CMD_RD_CONF 0x40
78
79/* operation mode */
80#define MG_OP_CASCADE (1 << 0)
81#define MG_OP_CASCADE_SYNC_RD (1 << 1)
82#define MG_OP_CASCADE_SYNC_WR (1 << 2)
83#define MG_OP_INTERLEAVE (1 << 3)
84
85/* synchronous */
86#define MG_BURST_LAT_4 (3 << 4)
87#define MG_BURST_LAT_5 (4 << 4)
88#define MG_BURST_LAT_6 (5 << 4)
89#define MG_BURST_LAT_7 (6 << 4)
90#define MG_BURST_LAT_8 (7 << 4)
91#define MG_BURST_LEN_4 (1 << 1)
92#define MG_BURST_LEN_8 (2 << 1)
93#define MG_BURST_LEN_16 (3 << 1)
94#define MG_BURST_LEN_32 (4 << 1)
95#define MG_BURST_LEN_CONT (0 << 1)
96
97/* timeout value (unit: ms) */
98#define MG_TMAX_CONF_TO_CMD 1
99#define MG_TMAX_WAIT_RD_DRQ 10
100#define MG_TMAX_WAIT_WR_DRQ 500
101#define MG_TMAX_RST_TO_BUSY 10
102#define MG_TMAX_HDRST_TO_RDY 500
103#define MG_TMAX_SWRST_TO_RDY 500
104#define MG_TMAX_RSTOUT 3000
105
106/* device attribution */
107/* use mflash as boot device */
108#define MG_BOOT_DEV (1 << 0)
109/* use mflash as storage device */
110#define MG_STORAGE_DEV (1 << 1)
111/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
112#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
113
114#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
115
116/* names of GPIO resource */
117#define MG_RST_PIN "mg_rst"
118/* except MG_BOOT_DEV, reset-out pin should be assigned */
119#define MG_RSTOUT_PIN "mg_rstout"
120
121/* private driver data */
122struct mg_drv_data {
123 /* disk resource */
124 u32 use_polling;
125
126 /* device attribution */
127 u32 dev_attr;
128
129 /* internally used */
130 struct mg_host *host;
131};
132
133/* main structure for mflash driver */
134struct mg_host {
135 struct device *dev;
136
137 struct request_queue *breq;
138 struct request *req;
139 spinlock_t lock;
140 struct gendisk *gd;
141
142 struct timer_list timer;
143 void (*mg_do_intr) (struct mg_host *);
144
145 u16 id[ATA_ID_WORDS];
146
147 u16 cyls;
148 u16 heads;
149 u16 sectors;
150 u32 n_sectors;
151 u32 nres_sectors;
152
153 void __iomem *dev_base;
154 unsigned int irq;
155 unsigned int rst;
156 unsigned int rstout;
157
158 u32 major;
159 u32 error;
160};
161
162/*
163 * Debugging macro and defines
164 */
165#undef DO_MG_DEBUG
166#ifdef DO_MG_DEBUG
167# define MG_DBG(fmt, args...) \
168 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
169#else /* CONFIG_MG_DEBUG */
170# define MG_DBG(fmt, args...) do { } while (0)
171#endif /* CONFIG_MG_DEBUG */
172
29static void mg_request(struct request_queue *); 173static void mg_request(struct request_queue *);
30 174
175static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
176{
177 if (__blk_end_request(host->req, err, nr_bytes))
178 return true;
179
180 host->req = NULL;
181 return false;
182}
183
184static bool mg_end_request_cur(struct mg_host *host, int err)
185{
186 return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
187}
188
31static void mg_dump_status(const char *msg, unsigned int stat, 189static void mg_dump_status(const char *msg, unsigned int stat,
32 struct mg_host *host) 190 struct mg_host *host)
33{ 191{
34 char *name = MG_DISK_NAME; 192 char *name = MG_DISK_NAME;
35 struct request *req;
36 193
37 if (host->breq) { 194 if (host->req)
38 req = elv_next_request(host->breq); 195 name = host->req->rq_disk->disk_name;
39 if (req)
40 name = req->rq_disk->disk_name;
41 }
42 196
43 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff); 197 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
44 if (stat & MG_REG_STATUS_BIT_BUSY) 198 if (stat & ATA_BUSY)
45 printk("Busy "); 199 printk("Busy ");
46 if (stat & MG_REG_STATUS_BIT_READY) 200 if (stat & ATA_DRDY)
47 printk("DriveReady "); 201 printk("DriveReady ");
48 if (stat & MG_REG_STATUS_BIT_WRITE_FAULT) 202 if (stat & ATA_DF)
49 printk("WriteFault "); 203 printk("WriteFault ");
50 if (stat & MG_REG_STATUS_BIT_SEEK_DONE) 204 if (stat & ATA_DSC)
51 printk("SeekComplete "); 205 printk("SeekComplete ");
52 if (stat & MG_REG_STATUS_BIT_DATA_REQ) 206 if (stat & ATA_DRQ)
53 printk("DataRequest "); 207 printk("DataRequest ");
54 if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR) 208 if (stat & ATA_CORR)
55 printk("CorrectedError "); 209 printk("CorrectedError ");
56 if (stat & MG_REG_STATUS_BIT_ERROR) 210 if (stat & ATA_ERR)
57 printk("Error "); 211 printk("Error ");
58 printk("}\n"); 212 printk("}\n");
59 if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) { 213 if ((stat & ATA_ERR) == 0) {
60 host->error = 0; 214 host->error = 0;
61 } else { 215 } else {
62 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR); 216 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
63 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg, 217 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
64 host->error & 0xff); 218 host->error & 0xff);
65 if (host->error & MG_REG_ERR_BBK) 219 if (host->error & ATA_BBK)
66 printk("BadSector "); 220 printk("BadSector ");
67 if (host->error & MG_REG_ERR_UNC) 221 if (host->error & ATA_UNC)
68 printk("UncorrectableError "); 222 printk("UncorrectableError ");
69 if (host->error & MG_REG_ERR_IDNF) 223 if (host->error & ATA_IDNF)
70 printk("SectorIdNotFound "); 224 printk("SectorIdNotFound ");
71 if (host->error & MG_REG_ERR_ABRT) 225 if (host->error & ATA_ABORTED)
72 printk("DriveStatusError "); 226 printk("DriveStatusError ");
73 if (host->error & MG_REG_ERR_AMNF) 227 if (host->error & ATA_AMNF)
74 printk("AddrMarkNotFound "); 228 printk("AddrMarkNotFound ");
75 printk("}"); 229 printk("}");
76 if (host->error & 230 if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
77 (MG_REG_ERR_BBK | MG_REG_ERR_UNC | 231 if (host->req)
78 MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) { 232 printk(", sector=%u",
79 if (host->breq) { 233 (unsigned int)blk_rq_pos(host->req));
80 req = elv_next_request(host->breq);
81 if (req)
82 printk(", sector=%u", (u32)req->sector);
83 }
84
85 } 234 }
86 printk("\n"); 235 printk("\n");
87 } 236 }
@@ -100,12 +249,12 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
100 249
101 do { 250 do {
102 cur_jiffies = jiffies; 251 cur_jiffies = jiffies;
103 if (status & MG_REG_STATUS_BIT_BUSY) { 252 if (status & ATA_BUSY) {
104 if (expect == MG_REG_STATUS_BIT_BUSY) 253 if (expect == ATA_BUSY)
105 break; 254 break;
106 } else { 255 } else {
107 /* Check the error condition! */ 256 /* Check the error condition! */
108 if (status & MG_REG_STATUS_BIT_ERROR) { 257 if (status & ATA_ERR) {
109 mg_dump_status("mg_wait", status, host); 258 mg_dump_status("mg_wait", status, host);
110 break; 259 break;
111 } 260 }
@@ -114,8 +263,8 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
114 if (MG_READY_OK(status)) 263 if (MG_READY_OK(status))
115 break; 264 break;
116 265
117 if (expect == MG_REG_STATUS_BIT_DATA_REQ) 266 if (expect == ATA_DRQ)
118 if (status & MG_REG_STATUS_BIT_DATA_REQ) 267 if (status & ATA_DRQ)
119 break; 268 break;
120 } 269 }
121 if (!msec) { 270 if (!msec) {
@@ -173,6 +322,42 @@ static irqreturn_t mg_irq(int irq, void *dev_id)
173 return IRQ_HANDLED; 322 return IRQ_HANDLED;
174} 323}
175 324
325/* local copy of ata_id_string() */
326static void mg_id_string(const u16 *id, unsigned char *s,
327 unsigned int ofs, unsigned int len)
328{
329 unsigned int c;
330
331 BUG_ON(len & 1);
332
333 while (len > 0) {
334 c = id[ofs] >> 8;
335 *s = c;
336 s++;
337
338 c = id[ofs] & 0xff;
339 *s = c;
340 s++;
341
342 ofs++;
343 len -= 2;
344 }
345}
346
347/* local copy of ata_id_c_string() */
348static void mg_id_c_string(const u16 *id, unsigned char *s,
349 unsigned int ofs, unsigned int len)
350{
351 unsigned char *p;
352
353 mg_id_string(id, s, ofs, len - 1);
354
355 p = s + strnlen(s, len - 1);
356 while (p > s && p[-1] == ' ')
357 p--;
358 *p = '\0';
359}
360
176static int mg_get_disk_id(struct mg_host *host) 361static int mg_get_disk_id(struct mg_host *host)
177{ 362{
178 u32 i; 363 u32 i;
@@ -184,12 +369,10 @@ static int mg_get_disk_id(struct mg_host *host)
184 char serial[ATA_ID_SERNO_LEN + 1]; 369 char serial[ATA_ID_SERNO_LEN + 1];
185 370
186 if (!prv_data->use_polling) 371 if (!prv_data->use_polling)
187 outb(MG_REG_CTRL_INTR_DISABLE, 372 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
188 (unsigned long)host->dev_base +
189 MG_REG_DRV_CTRL);
190 373
191 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND); 374 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
192 err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ); 375 err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
193 if (err) 376 if (err)
194 return err; 377 return err;
195 378
@@ -219,9 +402,9 @@ static int mg_get_disk_id(struct mg_host *host)
219 host->n_sectors -= host->nres_sectors; 402 host->n_sectors -= host->nres_sectors;
220 } 403 }
221 404
222 ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev)); 405 mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
223 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); 406 mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
224 ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial)); 407 mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
225 printk(KERN_INFO "mg_disk: model: %s\n", model); 408 printk(KERN_INFO "mg_disk: model: %s\n", model);
226 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev); 409 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
227 printk(KERN_INFO "mg_disk: serial: %s\n", serial); 410 printk(KERN_INFO "mg_disk: serial: %s\n", serial);
@@ -229,8 +412,7 @@ static int mg_get_disk_id(struct mg_host *host)
229 host->n_sectors, host->nres_sectors); 412 host->n_sectors, host->nres_sectors);
230 413
231 if (!prv_data->use_polling) 414 if (!prv_data->use_polling)
232 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 415 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
233 MG_REG_DRV_CTRL);
234 416
235 return err; 417 return err;
236} 418}
@@ -244,7 +426,7 @@ static int mg_disk_init(struct mg_host *host)
244 426
245 /* hdd rst low */ 427 /* hdd rst low */
246 gpio_set_value(host->rst, 0); 428 gpio_set_value(host->rst, 0);
247 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY); 429 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
248 if (err) 430 if (err)
249 return err; 431 return err;
250 432
@@ -255,17 +437,14 @@ static int mg_disk_init(struct mg_host *host)
255 return err; 437 return err;
256 438
257 /* soft reset on */ 439 /* soft reset on */
258 outb(MG_REG_CTRL_RESET | 440 outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
259 (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
260 MG_REG_CTRL_INTR_ENABLE),
261 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 441 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
262 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY); 442 err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
263 if (err) 443 if (err)
264 return err; 444 return err;
265 445
266 /* soft reset off */ 446 /* soft reset off */
267 outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE : 447 outb(prv_data->use_polling ? ATA_NIEN : 0,
268 MG_REG_CTRL_INTR_ENABLE,
269 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 448 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
270 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY); 449 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
271 if (err) 450 if (err)
@@ -281,11 +460,10 @@ static int mg_disk_init(struct mg_host *host)
281 460
282static void mg_bad_rw_intr(struct mg_host *host) 461static void mg_bad_rw_intr(struct mg_host *host)
283{ 462{
284 struct request *req = elv_next_request(host->breq); 463 if (host->req)
285 if (req != NULL) 464 if (++host->req->errors >= MG_MAX_ERRORS ||
286 if (++req->errors >= MG_MAX_ERRORS || 465 host->error == MG_ERR_TIMEOUT)
287 host->error == MG_ERR_TIMEOUT) 466 mg_end_request_cur(host, -EIO);
288 end_request(req, 0);
289} 467}
290 468
291static unsigned int mg_out(struct mg_host *host, 469static unsigned int mg_out(struct mg_host *host,
@@ -311,7 +489,7 @@ static unsigned int mg_out(struct mg_host *host,
311 MG_REG_CYL_LOW); 489 MG_REG_CYL_LOW);
312 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base + 490 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
313 MG_REG_CYL_HIGH); 491 MG_REG_CYL_HIGH);
314 outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE), 492 outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
315 (unsigned long)host->dev_base + MG_REG_DRV_HEAD); 493 (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
316 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND); 494 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
317 return MG_ERR_NONE; 495 return MG_ERR_NONE;
@@ -319,105 +497,77 @@ static unsigned int mg_out(struct mg_host *host,
319 497
320static void mg_read(struct request *req) 498static void mg_read(struct request *req)
321{ 499{
322 u32 remains, j; 500 u32 j;
323 struct mg_host *host = req->rq_disk->private_data; 501 struct mg_host *host = req->rq_disk->private_data;
324 502
325 remains = req->nr_sectors; 503 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
326 504 MG_CMD_RD, NULL) != MG_ERR_NONE)
327 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
328 MG_ERR_NONE)
329 mg_bad_rw_intr(host); 505 mg_bad_rw_intr(host);
330 506
331 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 507 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
332 remains, req->sector, req->buffer); 508 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
509
510 do {
511 u16 *buff = (u16 *)req->buffer;
333 512
334 while (remains) { 513 if (mg_wait(host, ATA_DRQ,
335 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, 514 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
336 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
337 mg_bad_rw_intr(host); 515 mg_bad_rw_intr(host);
338 return; 516 return;
339 } 517 }
340 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { 518 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
341 *(u16 *)req->buffer = 519 *buff++ = inw((unsigned long)host->dev_base +
342 inw((unsigned long)host->dev_base + 520 MG_BUFF_OFFSET + (j << 1));
343 MG_BUFF_OFFSET + (j << 1));
344 req->buffer += 2;
345 }
346
347 req->sector++;
348 req->errors = 0;
349 remains = --req->nr_sectors;
350 --req->current_nr_sectors;
351
352 if (req->current_nr_sectors <= 0) {
353 MG_DBG("remain : %d sects\n", remains);
354 end_request(req, 1);
355 if (remains > 0)
356 req = elv_next_request(host->breq);
357 }
358 521
359 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + 522 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
360 MG_REG_COMMAND); 523 MG_REG_COMMAND);
361 } 524 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
362} 525}
363 526
364static void mg_write(struct request *req) 527static void mg_write(struct request *req)
365{ 528{
366 u32 remains, j; 529 u32 j;
367 struct mg_host *host = req->rq_disk->private_data; 530 struct mg_host *host = req->rq_disk->private_data;
368 531
369 remains = req->nr_sectors; 532 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
370 533 MG_CMD_WR, NULL) != MG_ERR_NONE) {
371 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
372 MG_ERR_NONE) {
373 mg_bad_rw_intr(host); 534 mg_bad_rw_intr(host);
374 return; 535 return;
375 } 536 }
376 537
377
378 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 538 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
379 remains, req->sector, req->buffer); 539 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
380 while (remains) { 540
381 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, 541 do {
382 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 542 u16 *buff = (u16 *)req->buffer;
543
544 if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
383 mg_bad_rw_intr(host); 545 mg_bad_rw_intr(host);
384 return; 546 return;
385 } 547 }
386 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { 548 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
387 outw(*(u16 *)req->buffer, 549 outw(*buff++, (unsigned long)host->dev_base +
388 (unsigned long)host->dev_base + 550 MG_BUFF_OFFSET + (j << 1));
389 MG_BUFF_OFFSET + (j << 1));
390 req->buffer += 2;
391 }
392 req->sector++;
393 remains = --req->nr_sectors;
394 --req->current_nr_sectors;
395
396 if (req->current_nr_sectors <= 0) {
397 MG_DBG("remain : %d sects\n", remains);
398 end_request(req, 1);
399 if (remains > 0)
400 req = elv_next_request(host->breq);
401 }
402 551
403 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 552 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
404 MG_REG_COMMAND); 553 MG_REG_COMMAND);
405 } 554 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
406} 555}
407 556
408static void mg_read_intr(struct mg_host *host) 557static void mg_read_intr(struct mg_host *host)
409{ 558{
559 struct request *req = host->req;
410 u32 i; 560 u32 i;
411 struct request *req; 561 u16 *buff;
412 562
413 /* check status */ 563 /* check status */
414 do { 564 do {
415 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); 565 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
416 if (i & MG_REG_STATUS_BIT_BUSY) 566 if (i & ATA_BUSY)
417 break; 567 break;
418 if (!MG_READY_OK(i)) 568 if (!MG_READY_OK(i))
419 break; 569 break;
420 if (i & MG_REG_STATUS_BIT_DATA_REQ) 570 if (i & ATA_DRQ)
421 goto ok_to_read; 571 goto ok_to_read;
422 } while (0); 572 } while (0);
423 mg_dump_status("mg_read_intr", i, host); 573 mg_dump_status("mg_read_intr", i, host);
@@ -427,60 +577,42 @@ static void mg_read_intr(struct mg_host *host)
427 577
428ok_to_read: 578ok_to_read:
429 /* get current segment of request */ 579 /* get current segment of request */
430 req = elv_next_request(host->breq); 580 buff = (u16 *)req->buffer;
431 581
432 /* read 1 sector */ 582 /* read 1 sector */
433 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) { 583 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
434 *(u16 *)req->buffer = 584 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
435 inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + 585 (i << 1));
436 (i << 1));
437 req->buffer += 2;
438 }
439 586
440 /* manipulate request */
441 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 587 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
442 req->sector, req->nr_sectors - 1, req->buffer); 588 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
443
444 req->sector++;
445 req->errors = 0;
446 i = --req->nr_sectors;
447 --req->current_nr_sectors;
448
449 /* let know if current segment done */
450 if (req->current_nr_sectors <= 0)
451 end_request(req, 1);
452
453 /* set handler if read remains */
454 if (i > 0) {
455 host->mg_do_intr = mg_read_intr;
456 mod_timer(&host->timer, jiffies + 3 * HZ);
457 }
458 589
459 /* send read confirm */ 590 /* send read confirm */
460 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); 591 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
461 592
462 /* goto next request */ 593 if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
463 if (!i) 594 /* set handler if read remains */
595 host->mg_do_intr = mg_read_intr;
596 mod_timer(&host->timer, jiffies + 3 * HZ);
597 } else /* goto next request */
464 mg_request(host->breq); 598 mg_request(host->breq);
465} 599}
466 600
467static void mg_write_intr(struct mg_host *host) 601static void mg_write_intr(struct mg_host *host)
468{ 602{
603 struct request *req = host->req;
469 u32 i, j; 604 u32 i, j;
470 u16 *buff; 605 u16 *buff;
471 struct request *req; 606 bool rem;
472
473 /* get current segment of request */
474 req = elv_next_request(host->breq);
475 607
476 /* check status */ 608 /* check status */
477 do { 609 do {
478 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); 610 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
479 if (i & MG_REG_STATUS_BIT_BUSY) 611 if (i & ATA_BUSY)
480 break; 612 break;
481 if (!MG_READY_OK(i)) 613 if (!MG_READY_OK(i))
482 break; 614 break;
483 if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ)) 615 if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
484 goto ok_to_write; 616 goto ok_to_write;
485 } while (0); 617 } while (0);
486 mg_dump_status("mg_write_intr", i, host); 618 mg_dump_status("mg_write_intr", i, host);
@@ -489,18 +621,8 @@ static void mg_write_intr(struct mg_host *host)
489 return; 621 return;
490 622
491ok_to_write: 623ok_to_write:
492 /* manipulate request */ 624 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
493 req->sector++; 625 /* write 1 sector and set handler if remains */
494 i = --req->nr_sectors;
495 --req->current_nr_sectors;
496 req->buffer += MG_SECTOR_SIZE;
497
498 /* let know if current segment or all done */
499 if (!i || (req->bio && req->current_nr_sectors <= 0))
500 end_request(req, 1);
501
502 /* write 1 sector and set handler if remains */
503 if (i > 0) {
504 buff = (u16 *)req->buffer; 626 buff = (u16 *)req->buffer;
505 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { 627 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
506 outw(*buff, (unsigned long)host->dev_base + 628 outw(*buff, (unsigned long)host->dev_base +
@@ -508,7 +630,7 @@ ok_to_write:
508 buff++; 630 buff++;
509 } 631 }
510 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 632 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
511 req->sector, req->nr_sectors, req->buffer); 633 blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
512 host->mg_do_intr = mg_write_intr; 634 host->mg_do_intr = mg_write_intr;
513 mod_timer(&host->timer, jiffies + 3 * HZ); 635 mod_timer(&host->timer, jiffies + 3 * HZ);
514 } 636 }
@@ -516,7 +638,7 @@ ok_to_write:
516 /* send write confirm */ 638 /* send write confirm */
517 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); 639 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
518 640
519 if (!i) 641 if (!rem)
520 mg_request(host->breq); 642 mg_request(host->breq);
521} 643}
522 644
@@ -524,49 +646,45 @@ void mg_times_out(unsigned long data)
524{ 646{
525 struct mg_host *host = (struct mg_host *)data; 647 struct mg_host *host = (struct mg_host *)data;
526 char *name; 648 char *name;
527 struct request *req;
528 649
529 spin_lock_irq(&host->lock); 650 spin_lock_irq(&host->lock);
530 651
531 req = elv_next_request(host->breq); 652 if (!host->req)
532 if (!req)
533 goto out_unlock; 653 goto out_unlock;
534 654
535 host->mg_do_intr = NULL; 655 host->mg_do_intr = NULL;
536 656
537 name = req->rq_disk->disk_name; 657 name = host->req->rq_disk->disk_name;
538 printk(KERN_DEBUG "%s: timeout\n", name); 658 printk(KERN_DEBUG "%s: timeout\n", name);
539 659
540 host->error = MG_ERR_TIMEOUT; 660 host->error = MG_ERR_TIMEOUT;
541 mg_bad_rw_intr(host); 661 mg_bad_rw_intr(host);
542 662
543 mg_request(host->breq);
544out_unlock: 663out_unlock:
664 mg_request(host->breq);
545 spin_unlock_irq(&host->lock); 665 spin_unlock_irq(&host->lock);
546} 666}
547 667
548static void mg_request_poll(struct request_queue *q) 668static void mg_request_poll(struct request_queue *q)
549{ 669{
550 struct request *req; 670 struct mg_host *host = q->queuedata;
551 struct mg_host *host;
552 671
553 while ((req = elv_next_request(q)) != NULL) { 672 while (1) {
554 host = req->rq_disk->private_data; 673 if (!host->req) {
555 if (blk_fs_request(req)) { 674 host->req = blk_fetch_request(q);
556 switch (rq_data_dir(req)) { 675 if (!host->req)
557 case READ:
558 mg_read(req);
559 break;
560 case WRITE:
561 mg_write(req);
562 break;
563 default:
564 printk(KERN_WARNING "%s:%d unknown command\n",
565 __func__, __LINE__);
566 end_request(req, 0);
567 break; 676 break;
568 }
569 } 677 }
678
679 if (unlikely(!blk_fs_request(host->req))) {
680 mg_end_request_cur(host, -EIO);
681 continue;
682 }
683
684 if (rq_data_dir(host->req) == READ)
685 mg_read(host->req);
686 else
687 mg_write(host->req);
570 } 688 }
571} 689}
572 690
@@ -588,18 +706,15 @@ static unsigned int mg_issue_req(struct request *req,
588 break; 706 break;
589 case WRITE: 707 case WRITE:
590 /* TODO : handler */ 708 /* TODO : handler */
591 outb(MG_REG_CTRL_INTR_DISABLE, 709 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
592 (unsigned long)host->dev_base +
593 MG_REG_DRV_CTRL);
594 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) 710 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
595 != MG_ERR_NONE) { 711 != MG_ERR_NONE) {
596 mg_bad_rw_intr(host); 712 mg_bad_rw_intr(host);
597 return host->error; 713 return host->error;
598 } 714 }
599 del_timer(&host->timer); 715 del_timer(&host->timer);
600 mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ); 716 mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
601 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 717 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
602 MG_REG_DRV_CTRL);
603 if (host->error) { 718 if (host->error) {
604 mg_bad_rw_intr(host); 719 mg_bad_rw_intr(host);
605 return host->error; 720 return host->error;
@@ -614,11 +729,6 @@ static unsigned int mg_issue_req(struct request *req,
614 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 729 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
615 MG_REG_COMMAND); 730 MG_REG_COMMAND);
616 break; 731 break;
617 default:
618 printk(KERN_WARNING "%s:%d unknown command\n",
619 __func__, __LINE__);
620 end_request(req, 0);
621 break;
622 } 732 }
623 return MG_ERR_NONE; 733 return MG_ERR_NONE;
624} 734}
@@ -626,16 +736,17 @@ static unsigned int mg_issue_req(struct request *req,
626/* This function also called from IRQ context */ 736/* This function also called from IRQ context */
627static void mg_request(struct request_queue *q) 737static void mg_request(struct request_queue *q)
628{ 738{
739 struct mg_host *host = q->queuedata;
629 struct request *req; 740 struct request *req;
630 struct mg_host *host;
631 u32 sect_num, sect_cnt; 741 u32 sect_num, sect_cnt;
632 742
633 while (1) { 743 while (1) {
634 req = elv_next_request(q); 744 if (!host->req) {
635 if (!req) 745 host->req = blk_fetch_request(q);
636 return; 746 if (!host->req)
637 747 break;
638 host = req->rq_disk->private_data; 748 }
749 req = host->req;
639 750
640 /* check unwanted request call */ 751 /* check unwanted request call */
641 if (host->mg_do_intr) 752 if (host->mg_do_intr)
@@ -643,9 +754,9 @@ static void mg_request(struct request_queue *q)
643 754
644 del_timer(&host->timer); 755 del_timer(&host->timer);
645 756
646 sect_num = req->sector; 757 sect_num = blk_rq_pos(req);
647 /* deal whole segments */ 758 /* deal whole segments */
648 sect_cnt = req->nr_sectors; 759 sect_cnt = blk_rq_sectors(req);
649 760
650 /* sanity check */ 761 /* sanity check */
651 if (sect_num >= get_capacity(req->rq_disk) || 762 if (sect_num >= get_capacity(req->rq_disk) ||
@@ -655,12 +766,14 @@ static void mg_request(struct request_queue *q)
655 "%s: bad access: sector=%d, count=%d\n", 766 "%s: bad access: sector=%d, count=%d\n",
656 req->rq_disk->disk_name, 767 req->rq_disk->disk_name,
657 sect_num, sect_cnt); 768 sect_num, sect_cnt);
658 end_request(req, 0); 769 mg_end_request_cur(host, -EIO);
659 continue; 770 continue;
660 } 771 }
661 772
662 if (!blk_fs_request(req)) 773 if (unlikely(!blk_fs_request(req))) {
663 return; 774 mg_end_request_cur(host, -EIO);
775 continue;
776 }
664 777
665 if (!mg_issue_req(req, host, sect_num, sect_cnt)) 778 if (!mg_issue_req(req, host, sect_num, sect_cnt))
666 return; 779 return;
@@ -690,9 +803,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
690 return -EIO; 803 return -EIO;
691 804
692 if (!prv_data->use_polling) 805 if (!prv_data->use_polling)
693 outb(MG_REG_CTRL_INTR_DISABLE, 806 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
694 (unsigned long)host->dev_base +
695 MG_REG_DRV_CTRL);
696 807
697 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND); 808 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
698 /* wait until mflash deep sleep */ 809 /* wait until mflash deep sleep */
@@ -700,9 +811,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
700 811
701 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) { 812 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
702 if (!prv_data->use_polling) 813 if (!prv_data->use_polling)
703 outb(MG_REG_CTRL_INTR_ENABLE, 814 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
704 (unsigned long)host->dev_base +
705 MG_REG_DRV_CTRL);
706 return -EIO; 815 return -EIO;
707 } 816 }
708 817
@@ -725,8 +834,7 @@ static int mg_resume(struct platform_device *plat_dev)
725 return -EIO; 834 return -EIO;
726 835
727 if (!prv_data->use_polling) 836 if (!prv_data->use_polling)
728 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 837 outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
729 MG_REG_DRV_CTRL);
730 838
731 return 0; 839 return 0;
732} 840}
@@ -877,6 +985,7 @@ static int mg_probe(struct platform_device *plat_dev)
877 __func__, __LINE__); 985 __func__, __LINE__);
878 goto probe_err_5; 986 goto probe_err_5;
879 } 987 }
988 host->breq->queuedata = host;
880 989
881 /* mflash is random device, thanx for the noop */ 990 /* mflash is random device, thanx for the noop */
882 elevator_exit(host->breq->elevator); 991 elevator_exit(host->breq->elevator);
@@ -887,7 +996,7 @@ static int mg_probe(struct platform_device *plat_dev)
887 goto probe_err_6; 996 goto probe_err_6;
888 } 997 }
889 blk_queue_max_sectors(host->breq, MG_MAX_SECTS); 998 blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
890 blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE); 999 blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
891 1000
892 init_timer(&host->timer); 1001 init_timer(&host->timer);
893 host->timer.function = mg_times_out; 1002 host->timer.function = mg_times_out;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4d6de4f15cc..5d23ffad7c7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -110,7 +110,7 @@ static void nbd_end_request(struct request *req)
110 req, error ? "failed" : "done"); 110 req, error ? "failed" : "done");
111 111
112 spin_lock_irqsave(q->queue_lock, flags); 112 spin_lock_irqsave(q->queue_lock, flags);
113 __blk_end_request(req, error, req->nr_sectors << 9); 113 __blk_end_request_all(req, error);
114 spin_unlock_irqrestore(q->queue_lock, flags); 114 spin_unlock_irqrestore(q->queue_lock, flags);
115} 115}
116 116
@@ -231,19 +231,19 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
231{ 231{
232 int result, flags; 232 int result, flags;
233 struct nbd_request request; 233 struct nbd_request request;
234 unsigned long size = req->nr_sectors << 9; 234 unsigned long size = blk_rq_bytes(req);
235 235
236 request.magic = htonl(NBD_REQUEST_MAGIC); 236 request.magic = htonl(NBD_REQUEST_MAGIC);
237 request.type = htonl(nbd_cmd(req)); 237 request.type = htonl(nbd_cmd(req));
238 request.from = cpu_to_be64((u64) req->sector << 9); 238 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
239 request.len = htonl(size); 239 request.len = htonl(size);
240 memcpy(request.handle, &req, sizeof(req)); 240 memcpy(request.handle, &req, sizeof(req));
241 241
242 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", 242 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
243 lo->disk->disk_name, req, 243 lo->disk->disk_name, req,
244 nbdcmd_to_ascii(nbd_cmd(req)), 244 nbdcmd_to_ascii(nbd_cmd(req)),
245 (unsigned long long)req->sector << 9, 245 (unsigned long long)blk_rq_pos(req) << 9,
246 req->nr_sectors << 9); 246 blk_rq_bytes(req));
247 result = sock_xmit(lo, 1, &request, sizeof(request), 247 result = sock_xmit(lo, 1, &request, sizeof(request),
248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
249 if (result <= 0) { 249 if (result <= 0) {
@@ -533,11 +533,9 @@ static void do_nbd_request(struct request_queue *q)
533{ 533{
534 struct request *req; 534 struct request *req;
535 535
536 while ((req = elv_next_request(q)) != NULL) { 536 while ((req = blk_fetch_request(q)) != NULL) {
537 struct nbd_device *lo; 537 struct nbd_device *lo;
538 538
539 blkdev_dequeue_request(req);
540
541 spin_unlock_irq(q->queue_lock); 539 spin_unlock_irq(q->queue_lock);
542 540
543 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", 541 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
@@ -580,13 +578,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
580 blk_rq_init(NULL, &sreq); 578 blk_rq_init(NULL, &sreq);
581 sreq.cmd_type = REQ_TYPE_SPECIAL; 579 sreq.cmd_type = REQ_TYPE_SPECIAL;
582 nbd_cmd(&sreq) = NBD_CMD_DISC; 580 nbd_cmd(&sreq) = NBD_CMD_DISC;
583 /*
584 * Set these to sane values in case server implementation
585 * fails to check the request type first and also to keep
586 * debugging output cleaner.
587 */
588 sreq.sector = 0;
589 sreq.nr_sectors = 0;
590 if (!lo->sock) 581 if (!lo->sock)
591 return -EINVAL; 582 return -EINVAL;
592 nbd_send_req(lo, &sreq); 583 nbd_send_req(lo, &sreq);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index e91d4b4b014..911dfd98d81 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -719,32 +719,37 @@ static void do_pcd_request(struct request_queue * q)
719 if (pcd_busy) 719 if (pcd_busy)
720 return; 720 return;
721 while (1) { 721 while (1) {
722 pcd_req = elv_next_request(q); 722 if (!pcd_req) {
723 if (!pcd_req) 723 pcd_req = blk_fetch_request(q);
724 return; 724 if (!pcd_req)
725 return;
726 }
725 727
726 if (rq_data_dir(pcd_req) == READ) { 728 if (rq_data_dir(pcd_req) == READ) {
727 struct pcd_unit *cd = pcd_req->rq_disk->private_data; 729 struct pcd_unit *cd = pcd_req->rq_disk->private_data;
728 if (cd != pcd_current) 730 if (cd != pcd_current)
729 pcd_bufblk = -1; 731 pcd_bufblk = -1;
730 pcd_current = cd; 732 pcd_current = cd;
731 pcd_sector = pcd_req->sector; 733 pcd_sector = blk_rq_pos(pcd_req);
732 pcd_count = pcd_req->current_nr_sectors; 734 pcd_count = blk_rq_cur_sectors(pcd_req);
733 pcd_buf = pcd_req->buffer; 735 pcd_buf = pcd_req->buffer;
734 pcd_busy = 1; 736 pcd_busy = 1;
735 ps_set_intr(do_pcd_read, NULL, 0, nice); 737 ps_set_intr(do_pcd_read, NULL, 0, nice);
736 return; 738 return;
737 } else 739 } else {
738 end_request(pcd_req, 0); 740 __blk_end_request_all(pcd_req, -EIO);
741 pcd_req = NULL;
742 }
739 } 743 }
740} 744}
741 745
742static inline void next_request(int success) 746static inline void next_request(int err)
743{ 747{
744 unsigned long saved_flags; 748 unsigned long saved_flags;
745 749
746 spin_lock_irqsave(&pcd_lock, saved_flags); 750 spin_lock_irqsave(&pcd_lock, saved_flags);
747 end_request(pcd_req, success); 751 if (!__blk_end_request_cur(pcd_req, err))
752 pcd_req = NULL;
748 pcd_busy = 0; 753 pcd_busy = 0;
749 do_pcd_request(pcd_queue); 754 do_pcd_request(pcd_queue);
750 spin_unlock_irqrestore(&pcd_lock, saved_flags); 755 spin_unlock_irqrestore(&pcd_lock, saved_flags);
@@ -781,7 +786,7 @@ static void pcd_start(void)
781 786
782 if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { 787 if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
783 pcd_bufblk = -1; 788 pcd_bufblk = -1;
784 next_request(0); 789 next_request(-EIO);
785 return; 790 return;
786 } 791 }
787 792
@@ -796,7 +801,7 @@ static void do_pcd_read(void)
796 pcd_retries = 0; 801 pcd_retries = 0;
797 pcd_transfer(); 802 pcd_transfer();
798 if (!pcd_count) { 803 if (!pcd_count) {
799 next_request(1); 804 next_request(0);
800 return; 805 return;
801 } 806 }
802 807
@@ -815,7 +820,7 @@ static void do_pcd_read_drq(void)
815 return; 820 return;
816 } 821 }
817 pcd_bufblk = -1; 822 pcd_bufblk = -1;
818 next_request(0); 823 next_request(-EIO);
819 return; 824 return;
820 } 825 }
821 826
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 9299455b0af..bf5955b3d87 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -410,10 +410,12 @@ static void run_fsm(void)
410 pd_claimed = 0; 410 pd_claimed = 0;
411 phase = NULL; 411 phase = NULL;
412 spin_lock_irqsave(&pd_lock, saved_flags); 412 spin_lock_irqsave(&pd_lock, saved_flags);
413 end_request(pd_req, res); 413 if (!__blk_end_request_cur(pd_req,
414 pd_req = elv_next_request(pd_queue); 414 res == Ok ? 0 : -EIO)) {
415 if (!pd_req) 415 pd_req = blk_fetch_request(pd_queue);
416 stop = 1; 416 if (!pd_req)
417 stop = 1;
418 }
417 spin_unlock_irqrestore(&pd_lock, saved_flags); 419 spin_unlock_irqrestore(&pd_lock, saved_flags);
418 if (stop) 420 if (stop)
419 return; 421 return;
@@ -443,11 +445,11 @@ static enum action do_pd_io_start(void)
443 445
444 pd_cmd = rq_data_dir(pd_req); 446 pd_cmd = rq_data_dir(pd_req);
445 if (pd_cmd == READ || pd_cmd == WRITE) { 447 if (pd_cmd == READ || pd_cmd == WRITE) {
446 pd_block = pd_req->sector; 448 pd_block = blk_rq_pos(pd_req);
447 pd_count = pd_req->current_nr_sectors; 449 pd_count = blk_rq_cur_sectors(pd_req);
448 if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) 450 if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
449 return Fail; 451 return Fail;
450 pd_run = pd_req->nr_sectors; 452 pd_run = blk_rq_sectors(pd_req);
451 pd_buf = pd_req->buffer; 453 pd_buf = pd_req->buffer;
452 pd_retries = 0; 454 pd_retries = 0;
453 if (pd_cmd == READ) 455 if (pd_cmd == READ)
@@ -477,8 +479,8 @@ static int pd_next_buf(void)
477 if (pd_count) 479 if (pd_count)
478 return 0; 480 return 0;
479 spin_lock_irqsave(&pd_lock, saved_flags); 481 spin_lock_irqsave(&pd_lock, saved_flags);
480 end_request(pd_req, 1); 482 __blk_end_request_cur(pd_req, 0);
481 pd_count = pd_req->current_nr_sectors; 483 pd_count = blk_rq_cur_sectors(pd_req);
482 pd_buf = pd_req->buffer; 484 pd_buf = pd_req->buffer;
483 spin_unlock_irqrestore(&pd_lock, saved_flags); 485 spin_unlock_irqrestore(&pd_lock, saved_flags);
484 return 0; 486 return 0;
@@ -702,7 +704,7 @@ static void do_pd_request(struct request_queue * q)
702{ 704{
703 if (pd_req) 705 if (pd_req)
704 return; 706 return;
705 pd_req = elv_next_request(q); 707 pd_req = blk_fetch_request(q);
706 if (!pd_req) 708 if (!pd_req)
707 return; 709 return;
708 710
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index bef3b997ba3..68a90834e99 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -750,12 +750,10 @@ static int pf_ready(void)
750 750
751static struct request_queue *pf_queue; 751static struct request_queue *pf_queue;
752 752
753static void pf_end_request(int uptodate) 753static void pf_end_request(int err)
754{ 754{
755 if (pf_req) { 755 if (pf_req && !__blk_end_request_cur(pf_req, err))
756 end_request(pf_req, uptodate);
757 pf_req = NULL; 756 pf_req = NULL;
758 }
759} 757}
760 758
761static void do_pf_request(struct request_queue * q) 759static void do_pf_request(struct request_queue * q)
@@ -763,17 +761,19 @@ static void do_pf_request(struct request_queue * q)
763 if (pf_busy) 761 if (pf_busy)
764 return; 762 return;
765repeat: 763repeat:
766 pf_req = elv_next_request(q); 764 if (!pf_req) {
767 if (!pf_req) 765 pf_req = blk_fetch_request(q);
768 return; 766 if (!pf_req)
767 return;
768 }
769 769
770 pf_current = pf_req->rq_disk->private_data; 770 pf_current = pf_req->rq_disk->private_data;
771 pf_block = pf_req->sector; 771 pf_block = blk_rq_pos(pf_req);
772 pf_run = pf_req->nr_sectors; 772 pf_run = blk_rq_sectors(pf_req);
773 pf_count = pf_req->current_nr_sectors; 773 pf_count = blk_rq_cur_sectors(pf_req);
774 774
775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { 775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
776 pf_end_request(0); 776 pf_end_request(-EIO);
777 goto repeat; 777 goto repeat;
778 } 778 }
779 779
@@ -788,7 +788,7 @@ repeat:
788 pi_do_claimed(pf_current->pi, do_pf_write); 788 pi_do_claimed(pf_current->pi, do_pf_write);
789 else { 789 else {
790 pf_busy = 0; 790 pf_busy = 0;
791 pf_end_request(0); 791 pf_end_request(-EIO);
792 goto repeat; 792 goto repeat;
793 } 793 }
794} 794}
@@ -805,23 +805,22 @@ static int pf_next_buf(void)
805 return 1; 805 return 1;
806 if (!pf_count) { 806 if (!pf_count) {
807 spin_lock_irqsave(&pf_spin_lock, saved_flags); 807 spin_lock_irqsave(&pf_spin_lock, saved_flags);
808 pf_end_request(1); 808 pf_end_request(0);
809 pf_req = elv_next_request(pf_queue);
810 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 809 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
811 if (!pf_req) 810 if (!pf_req)
812 return 1; 811 return 1;
813 pf_count = pf_req->current_nr_sectors; 812 pf_count = blk_rq_cur_sectors(pf_req);
814 pf_buf = pf_req->buffer; 813 pf_buf = pf_req->buffer;
815 } 814 }
816 return 0; 815 return 0;
817} 816}
818 817
819static inline void next_request(int success) 818static inline void next_request(int err)
820{ 819{
821 unsigned long saved_flags; 820 unsigned long saved_flags;
822 821
823 spin_lock_irqsave(&pf_spin_lock, saved_flags); 822 spin_lock_irqsave(&pf_spin_lock, saved_flags);
824 pf_end_request(success); 823 pf_end_request(err);
825 pf_busy = 0; 824 pf_busy = 0;
826 do_pf_request(pf_queue); 825 do_pf_request(pf_queue);
827 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 826 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
@@ -844,7 +843,7 @@ static void do_pf_read_start(void)
844 pi_do_claimed(pf_current->pi, do_pf_read_start); 843 pi_do_claimed(pf_current->pi, do_pf_read_start);
845 return; 844 return;
846 } 845 }
847 next_request(0); 846 next_request(-EIO);
848 return; 847 return;
849 } 848 }
850 pf_mask = STAT_DRQ; 849 pf_mask = STAT_DRQ;
@@ -863,7 +862,7 @@ static void do_pf_read_drq(void)
863 pi_do_claimed(pf_current->pi, do_pf_read_start); 862 pi_do_claimed(pf_current->pi, do_pf_read_start);
864 return; 863 return;
865 } 864 }
866 next_request(0); 865 next_request(-EIO);
867 return; 866 return;
868 } 867 }
869 pi_read_block(pf_current->pi, pf_buf, 512); 868 pi_read_block(pf_current->pi, pf_buf, 512);
@@ -871,7 +870,7 @@ static void do_pf_read_drq(void)
871 break; 870 break;
872 } 871 }
873 pi_disconnect(pf_current->pi); 872 pi_disconnect(pf_current->pi);
874 next_request(1); 873 next_request(0);
875} 874}
876 875
877static void do_pf_write(void) 876static void do_pf_write(void)
@@ -890,7 +889,7 @@ static void do_pf_write_start(void)
890 pi_do_claimed(pf_current->pi, do_pf_write_start); 889 pi_do_claimed(pf_current->pi, do_pf_write_start);
891 return; 890 return;
892 } 891 }
893 next_request(0); 892 next_request(-EIO);
894 return; 893 return;
895 } 894 }
896 895
@@ -903,7 +902,7 @@ static void do_pf_write_start(void)
903 pi_do_claimed(pf_current->pi, do_pf_write_start); 902 pi_do_claimed(pf_current->pi, do_pf_write_start);
904 return; 903 return;
905 } 904 }
906 next_request(0); 905 next_request(-EIO);
907 return; 906 return;
908 } 907 }
909 pi_write_block(pf_current->pi, pf_buf, 512); 908 pi_write_block(pf_current->pi, pf_buf, 512);
@@ -923,11 +922,11 @@ static void do_pf_write_done(void)
923 pi_do_claimed(pf_current->pi, do_pf_write_start); 922 pi_do_claimed(pf_current->pi, do_pf_write_start);
924 return; 923 return;
925 } 924 }
926 next_request(0); 925 next_request(-EIO);
927 return; 926 return;
928 } 927 }
929 pi_disconnect(pf_current->pi); 928 pi_disconnect(pf_current->pi);
930 next_request(1); 929 next_request(0);
931} 930}
932 931
933static int __init pf_init(void) 932static int __init pf_init(void)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index dc7a8c352da..d57f1175948 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
991 */ 991 */
992static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 992static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
993{ 993{
994 if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) { 994 if ((pd->settings.size << 9) / CD_FRAMESIZE
995 <= queue_max_phys_segments(q)) {
995 /* 996 /*
996 * The cdrom device can handle one segment/frame 997 * The cdrom device can handle one segment/frame
997 */ 998 */
998 clear_bit(PACKET_MERGE_SEGS, &pd->flags); 999 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
999 return 0; 1000 return 0;
1000 } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) { 1001 } else if ((pd->settings.size << 9) / PAGE_SIZE
1002 <= queue_max_phys_segments(q)) {
1001 /* 1003 /*
1002 * We can handle this case at the expense of some extra memory 1004 * We can handle this case at the expense of some extra memory
1003 * copies during write operations 1005 * copies during write operations
@@ -2657,7 +2659,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
2657 struct request_queue *q = pd->disk->queue; 2659 struct request_queue *q = pd->disk->queue;
2658 2660
2659 blk_queue_make_request(q, pkt_make_request); 2661 blk_queue_make_request(q, pkt_make_request);
2660 blk_queue_hardsect_size(q, CD_FRAMESIZE); 2662 blk_queue_logical_block_size(q, CD_FRAMESIZE);
2661 blk_queue_max_sectors(q, PACKET_MAX_SECTORS); 2663 blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
2662 blk_queue_merge_bvec(q, pkt_merge_bvec); 2664 blk_queue_merge_bvec(q, pkt_merge_bvec);
2663 q->queuedata = pd; 2665 q->queuedata = pd;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index bccc42bb921..aaeeb544228 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -134,13 +134,12 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
134 rq_for_each_segment(bv, req, iter) 134 rq_for_each_segment(bv, req, iter)
135 n++; 135 n++;
136 dev_dbg(&dev->sbd.core, 136 dev_dbg(&dev->sbd.core,
137 "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n", 137 "%s:%u: %s req has %u bvecs for %u sectors\n",
138 __func__, __LINE__, op, n, req->nr_sectors, 138 __func__, __LINE__, op, n, blk_rq_sectors(req));
139 req->hard_nr_sectors);
140#endif 139#endif
141 140
142 start_sector = req->sector * priv->blocking_factor; 141 start_sector = blk_rq_pos(req) * priv->blocking_factor;
143 sectors = req->nr_sectors * priv->blocking_factor; 142 sectors = blk_rq_sectors(req) * priv->blocking_factor;
144 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n", 143 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
145 __func__, __LINE__, op, sectors, start_sector); 144 __func__, __LINE__, op, sectors, start_sector);
146 145
@@ -158,7 +157,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
158 if (res) { 157 if (res) {
159 dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, 158 dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
160 __LINE__, op, res); 159 __LINE__, op, res);
161 end_request(req, 0); 160 __blk_end_request_all(req, -EIO);
162 return 0; 161 return 0;
163 } 162 }
164 163
@@ -180,7 +179,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
180 if (res) { 179 if (res) {
181 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n", 180 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
182 __func__, __LINE__, res); 181 __func__, __LINE__, res);
183 end_request(req, 0); 182 __blk_end_request_all(req, -EIO);
184 return 0; 183 return 0;
185 } 184 }
186 185
@@ -195,7 +194,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
195 194
196 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 195 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
197 196
198 while ((req = elv_next_request(q))) { 197 while ((req = blk_fetch_request(q))) {
199 if (blk_fs_request(req)) { 198 if (blk_fs_request(req)) {
200 if (ps3disk_submit_request_sg(dev, req)) 199 if (ps3disk_submit_request_sg(dev, req))
201 break; 200 break;
@@ -205,7 +204,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
205 break; 204 break;
206 } else { 205 } else {
207 blk_dump_rq_flags(req, DEVICE_NAME " bad request"); 206 blk_dump_rq_flags(req, DEVICE_NAME " bad request");
208 end_request(req, 0); 207 __blk_end_request_all(req, -EIO);
209 continue; 208 continue;
210 } 209 }
211 } 210 }
@@ -231,7 +230,6 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
231 struct request *req; 230 struct request *req;
232 int res, read, error; 231 int res, read, error;
233 u64 tag, status; 232 u64 tag, status;
234 unsigned long num_sectors;
235 const char *op; 233 const char *op;
236 234
237 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); 235 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
@@ -261,11 +259,9 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
261 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 259 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
262 req->cmd[0] == REQ_LB_OP_FLUSH) { 260 req->cmd[0] == REQ_LB_OP_FLUSH) {
263 read = 0; 261 read = 0;
264 num_sectors = req->hard_cur_sectors;
265 op = "flush"; 262 op = "flush";
266 } else { 263 } else {
267 read = !rq_data_dir(req); 264 read = !rq_data_dir(req);
268 num_sectors = req->nr_sectors;
269 op = read ? "read" : "write"; 265 op = read ? "read" : "write";
270 } 266 }
271 if (status) { 267 if (status) {
@@ -281,7 +277,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
281 } 277 }
282 278
283 spin_lock(&priv->lock); 279 spin_lock(&priv->lock);
284 __blk_end_request(req, error, num_sectors << 9); 280 __blk_end_request_all(req, error);
285 priv->req = NULL; 281 priv->req = NULL;
286 ps3disk_do_request(dev, priv->queue); 282 ps3disk_do_request(dev, priv->queue);
287 spin_unlock(&priv->lock); 283 spin_unlock(&priv->lock);
@@ -481,7 +477,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
481 blk_queue_max_sectors(queue, dev->bounce_size >> 9); 477 blk_queue_max_sectors(queue, dev->bounce_size >> 9);
482 blk_queue_segment_boundary(queue, -1UL); 478 blk_queue_segment_boundary(queue, -1UL);
483 blk_queue_dma_alignment(queue, dev->blk_size-1); 479 blk_queue_dma_alignment(queue, dev->blk_size-1);
484 blk_queue_hardsect_size(queue, dev->blk_size); 480 blk_queue_logical_block_size(queue, dev->blk_size);
485 481
486 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, 482 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
487 ps3disk_prepare_flush); 483 ps3disk_prepare_flush);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5861e33efe6..cbfd9c0aef0 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -212,11 +212,6 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD); 212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
213} 213}
214 214
215static void vdc_end_request(struct request *req, int error, int num_sectors)
216{
217 __blk_end_request(req, error, num_sectors << 9);
218}
219
220static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, 215static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
221 unsigned int index) 216 unsigned int index)
222{ 217{
@@ -239,7 +234,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
239 234
240 rqe->req = NULL; 235 rqe->req = NULL;
241 236
242 vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9); 237 __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
243 238
244 if (blk_queue_stopped(port->disk->queue)) 239 if (blk_queue_stopped(port->disk->queue))
245 blk_start_queue(port->disk->queue); 240 blk_start_queue(port->disk->queue);
@@ -421,7 +416,7 @@ static int __send_request(struct request *req)
421 desc->slice = 0; 416 desc->slice = 0;
422 } 417 }
423 desc->status = ~0; 418 desc->status = ~0;
424 desc->offset = (req->sector << 9) / port->vdisk_block_size; 419 desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
425 desc->size = len; 420 desc->size = len;
426 desc->ncookies = err; 421 desc->ncookies = err;
427 422
@@ -446,14 +441,13 @@ out:
446static void do_vdc_request(struct request_queue *q) 441static void do_vdc_request(struct request_queue *q)
447{ 442{
448 while (1) { 443 while (1) {
449 struct request *req = elv_next_request(q); 444 struct request *req = blk_fetch_request(q);
450 445
451 if (!req) 446 if (!req)
452 break; 447 break;
453 448
454 blkdev_dequeue_request(req);
455 if (__send_request(req) < 0) 449 if (__send_request(req) < 0)
456 vdc_end_request(req, -EIO, req->hard_nr_sectors); 450 __blk_end_request_all(req, -EIO);
457 } 451 }
458} 452}
459 453
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index d22cc385693..cf7877fb8a7 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -514,7 +514,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
514 ret = swim_read_sector(fs, side, track, sector, 514 ret = swim_read_sector(fs, side, track, sector,
515 buffer); 515 buffer);
516 if (try-- == 0) 516 if (try-- == 0)
517 return -1; 517 return -EIO;
518 } while (ret != 512); 518 } while (ret != 512);
519 519
520 buffer += ret; 520 buffer += ret;
@@ -528,45 +528,31 @@ static void redo_fd_request(struct request_queue *q)
528 struct request *req; 528 struct request *req;
529 struct floppy_state *fs; 529 struct floppy_state *fs;
530 530
531 while ((req = elv_next_request(q))) { 531 req = blk_fetch_request(q);
532 while (req) {
533 int err = -EIO;
532 534
533 fs = req->rq_disk->private_data; 535 fs = req->rq_disk->private_data;
534 if (req->sector < 0 || req->sector >= fs->total_secs) { 536 if (blk_rq_pos(req) >= fs->total_secs)
535 end_request(req, 0); 537 goto done;
536 continue; 538 if (!fs->disk_in)
537 } 539 goto done;
538 if (req->current_nr_sectors == 0) { 540 if (rq_data_dir(req) == WRITE && fs->write_protected)
539 end_request(req, 1); 541 goto done;
540 continue; 542
541 }
542 if (!fs->disk_in) {
543 end_request(req, 0);
544 continue;
545 }
546 if (rq_data_dir(req) == WRITE) {
547 if (fs->write_protected) {
548 end_request(req, 0);
549 continue;
550 }
551 }
552 switch (rq_data_dir(req)) { 543 switch (rq_data_dir(req)) {
553 case WRITE: 544 case WRITE:
554 /* NOT IMPLEMENTED */ 545 /* NOT IMPLEMENTED */
555 end_request(req, 0);
556 break; 546 break;
557 case READ: 547 case READ:
558 if (floppy_read_sectors(fs, req->sector, 548 err = floppy_read_sectors(fs, blk_rq_pos(req),
559 req->current_nr_sectors, 549 blk_rq_cur_sectors(req),
560 req->buffer)) { 550 req->buffer);
561 end_request(req, 0);
562 continue;
563 }
564 req->nr_sectors -= req->current_nr_sectors;
565 req->sector += req->current_nr_sectors;
566 req->buffer += req->current_nr_sectors * 512;
567 end_request(req, 1);
568 break; 551 break;
569 } 552 }
553 done:
554 if (!__blk_end_request_cur(req, err))
555 req = blk_fetch_request(q);
570 } 556 }
571} 557}
572 558
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 612965307ba..80df93e3cdd 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,6 +251,20 @@ static int floppy_release(struct gendisk *disk, fmode_t mode);
251static int floppy_check_change(struct gendisk *disk); 251static int floppy_check_change(struct gendisk *disk);
252static int floppy_revalidate(struct gendisk *disk); 252static int floppy_revalidate(struct gendisk *disk);
253 253
254static bool swim3_end_request(int err, unsigned int nr_bytes)
255{
256 if (__blk_end_request(fd_req, err, nr_bytes))
257 return true;
258
259 fd_req = NULL;
260 return false;
261}
262
263static bool swim3_end_request_cur(int err)
264{
265 return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
266}
267
254static void swim3_select(struct floppy_state *fs, int sel) 268static void swim3_select(struct floppy_state *fs, int sel)
255{ 269{
256 struct swim3 __iomem *sw = fs->swim3; 270 struct swim3 __iomem *sw = fs->swim3;
@@ -310,25 +324,27 @@ static void start_request(struct floppy_state *fs)
310 wake_up(&fs->wait); 324 wake_up(&fs->wait);
311 return; 325 return;
312 } 326 }
313 while (fs->state == idle && (req = elv_next_request(swim3_queue))) { 327 while (fs->state == idle) {
328 if (!fd_req) {
329 fd_req = blk_fetch_request(swim3_queue);
330 if (!fd_req)
331 break;
332 }
333 req = fd_req;
314#if 0 334#if 0
315 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n", 335 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
316 req->rq_disk->disk_name, req->cmd, 336 req->rq_disk->disk_name, req->cmd,
317 (long)req->sector, req->nr_sectors, req->buffer); 337 (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
318 printk(" errors=%d current_nr_sectors=%ld\n", 338 printk(" errors=%d current_nr_sectors=%u\n",
319 req->errors, req->current_nr_sectors); 339 req->errors, blk_rq_cur_sectors(req));
320#endif 340#endif
321 341
322 if (req->sector < 0 || req->sector >= fs->total_secs) { 342 if (blk_rq_pos(req) >= fs->total_secs) {
323 end_request(req, 0); 343 swim3_end_request_cur(-EIO);
324 continue;
325 }
326 if (req->current_nr_sectors == 0) {
327 end_request(req, 1);
328 continue; 344 continue;
329 } 345 }
330 if (fs->ejected) { 346 if (fs->ejected) {
331 end_request(req, 0); 347 swim3_end_request_cur(-EIO);
332 continue; 348 continue;
333 } 349 }
334 350
@@ -336,18 +352,19 @@ static void start_request(struct floppy_state *fs)
336 if (fs->write_prot < 0) 352 if (fs->write_prot < 0)
337 fs->write_prot = swim3_readbit(fs, WRITE_PROT); 353 fs->write_prot = swim3_readbit(fs, WRITE_PROT);
338 if (fs->write_prot) { 354 if (fs->write_prot) {
339 end_request(req, 0); 355 swim3_end_request_cur(-EIO);
340 continue; 356 continue;
341 } 357 }
342 } 358 }
343 359
344 /* Do not remove the cast. req->sector is now a sector_t and 360 /* Do not remove the cast. blk_rq_pos(req) is now a
345 * can be 64 bits, but it will never go past 32 bits for this 361 * sector_t and can be 64 bits, but it will never go
346 * driver anyway, so we can safely cast it down and not have 362 * past 32 bits for this driver anyway, so we can
347 * to do a 64/32 division 363 * safely cast it down and not have to do a 64/32
364 * division
348 */ 365 */
349 fs->req_cyl = ((long)req->sector) / fs->secpercyl; 366 fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
350 x = ((long)req->sector) % fs->secpercyl; 367 x = ((long)blk_rq_pos(req)) % fs->secpercyl;
351 fs->head = x / fs->secpertrack; 368 fs->head = x / fs->secpertrack;
352 fs->req_sector = x % fs->secpertrack + 1; 369 fs->req_sector = x % fs->secpertrack + 1;
353 fd_req = req; 370 fd_req = req;
@@ -424,7 +441,7 @@ static inline void setup_transfer(struct floppy_state *fs)
424 struct dbdma_cmd *cp = fs->dma_cmd; 441 struct dbdma_cmd *cp = fs->dma_cmd;
425 struct dbdma_regs __iomem *dr = fs->dma; 442 struct dbdma_regs __iomem *dr = fs->dma;
426 443
427 if (fd_req->current_nr_sectors <= 0) { 444 if (blk_rq_cur_sectors(fd_req) <= 0) {
428 printk(KERN_ERR "swim3: transfer 0 sectors?\n"); 445 printk(KERN_ERR "swim3: transfer 0 sectors?\n");
429 return; 446 return;
430 } 447 }
@@ -432,8 +449,8 @@ static inline void setup_transfer(struct floppy_state *fs)
432 n = 1; 449 n = 1;
433 else { 450 else {
434 n = fs->secpertrack - fs->req_sector + 1; 451 n = fs->secpertrack - fs->req_sector + 1;
435 if (n > fd_req->current_nr_sectors) 452 if (n > blk_rq_cur_sectors(fd_req))
436 n = fd_req->current_nr_sectors; 453 n = blk_rq_cur_sectors(fd_req);
437 } 454 }
438 fs->scount = n; 455 fs->scount = n;
439 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); 456 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
@@ -508,7 +525,7 @@ static void act(struct floppy_state *fs)
508 case do_transfer: 525 case do_transfer:
509 if (fs->cur_cyl != fs->req_cyl) { 526 if (fs->cur_cyl != fs->req_cyl) {
510 if (fs->retries > 5) { 527 if (fs->retries > 5) {
511 end_request(fd_req, 0); 528 swim3_end_request_cur(-EIO);
512 fs->state = idle; 529 fs->state = idle;
513 return; 530 return;
514 } 531 }
@@ -540,7 +557,7 @@ static void scan_timeout(unsigned long data)
540 out_8(&sw->intr_enable, 0); 557 out_8(&sw->intr_enable, 0);
541 fs->cur_cyl = -1; 558 fs->cur_cyl = -1;
542 if (fs->retries > 5) { 559 if (fs->retries > 5) {
543 end_request(fd_req, 0); 560 swim3_end_request_cur(-EIO);
544 fs->state = idle; 561 fs->state = idle;
545 start_request(fs); 562 start_request(fs);
546 } else { 563 } else {
@@ -559,7 +576,7 @@ static void seek_timeout(unsigned long data)
559 out_8(&sw->select, RELAX); 576 out_8(&sw->select, RELAX);
560 out_8(&sw->intr_enable, 0); 577 out_8(&sw->intr_enable, 0);
561 printk(KERN_ERR "swim3: seek timeout\n"); 578 printk(KERN_ERR "swim3: seek timeout\n");
562 end_request(fd_req, 0); 579 swim3_end_request_cur(-EIO);
563 fs->state = idle; 580 fs->state = idle;
564 start_request(fs); 581 start_request(fs);
565} 582}
@@ -583,7 +600,7 @@ static void settle_timeout(unsigned long data)
583 return; 600 return;
584 } 601 }
585 printk(KERN_ERR "swim3: seek settle timeout\n"); 602 printk(KERN_ERR "swim3: seek settle timeout\n");
586 end_request(fd_req, 0); 603 swim3_end_request_cur(-EIO);
587 fs->state = idle; 604 fs->state = idle;
588 start_request(fs); 605 start_request(fs);
589} 606}
@@ -593,8 +610,6 @@ static void xfer_timeout(unsigned long data)
593 struct floppy_state *fs = (struct floppy_state *) data; 610 struct floppy_state *fs = (struct floppy_state *) data;
594 struct swim3 __iomem *sw = fs->swim3; 611 struct swim3 __iomem *sw = fs->swim3;
595 struct dbdma_regs __iomem *dr = fs->dma; 612 struct dbdma_regs __iomem *dr = fs->dma;
596 struct dbdma_cmd *cp = fs->dma_cmd;
597 unsigned long s;
598 int n; 613 int n;
599 614
600 fs->timeout_pending = 0; 615 fs->timeout_pending = 0;
@@ -605,17 +620,10 @@ static void xfer_timeout(unsigned long data)
605 out_8(&sw->intr_enable, 0); 620 out_8(&sw->intr_enable, 0);
606 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); 621 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
607 out_8(&sw->select, RELAX); 622 out_8(&sw->select, RELAX);
608 if (rq_data_dir(fd_req) == WRITE)
609 ++cp;
610 if (ld_le16(&cp->xfer_status) != 0)
611 s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
612 else
613 s = 0;
614 fd_req->sector += s;
615 fd_req->current_nr_sectors -= s;
616 printk(KERN_ERR "swim3: timeout %sing sector %ld\n", 623 printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
617 (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); 624 (rq_data_dir(fd_req)==WRITE? "writ": "read"),
618 end_request(fd_req, 0); 625 (long)blk_rq_pos(fd_req));
626 swim3_end_request_cur(-EIO);
619 fs->state = idle; 627 fs->state = idle;
620 start_request(fs); 628 start_request(fs);
621} 629}
@@ -646,7 +654,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
646 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); 654 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
647 fs->cur_cyl = -1; 655 fs->cur_cyl = -1;
648 if (fs->retries > 5) { 656 if (fs->retries > 5) {
649 end_request(fd_req, 0); 657 swim3_end_request_cur(-EIO);
650 fs->state = idle; 658 fs->state = idle;
651 start_request(fs); 659 start_request(fs);
652 } else { 660 } else {
@@ -719,9 +727,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
719 if (intr & ERROR_INTR) { 727 if (intr & ERROR_INTR) {
720 n = fs->scount - 1 - resid / 512; 728 n = fs->scount - 1 - resid / 512;
721 if (n > 0) { 729 if (n > 0) {
722 fd_req->sector += n; 730 blk_update_request(fd_req, 0, n << 9);
723 fd_req->current_nr_sectors -= n;
724 fd_req->buffer += n * 512;
725 fs->req_sector += n; 731 fs->req_sector += n;
726 } 732 }
727 if (fs->retries < 5) { 733 if (fs->retries < 5) {
@@ -730,8 +736,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
730 } else { 736 } else {
731 printk("swim3: error %sing block %ld (err=%x)\n", 737 printk("swim3: error %sing block %ld (err=%x)\n",
732 rq_data_dir(fd_req) == WRITE? "writ": "read", 738 rq_data_dir(fd_req) == WRITE? "writ": "read",
733 (long)fd_req->sector, err); 739 (long)blk_rq_pos(fd_req), err);
734 end_request(fd_req, 0); 740 swim3_end_request_cur(-EIO);
735 fs->state = idle; 741 fs->state = idle;
736 } 742 }
737 } else { 743 } else {
@@ -740,18 +746,12 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
740 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); 746 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
741 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", 747 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
742 fs->state, rq_data_dir(fd_req), intr, err); 748 fs->state, rq_data_dir(fd_req), intr, err);
743 end_request(fd_req, 0); 749 swim3_end_request_cur(-EIO);
744 fs->state = idle; 750 fs->state = idle;
745 start_request(fs); 751 start_request(fs);
746 break; 752 break;
747 } 753 }
748 fd_req->sector += fs->scount; 754 if (swim3_end_request(0, fs->scount << 9)) {
749 fd_req->current_nr_sectors -= fs->scount;
750 fd_req->buffer += fs->scount * 512;
751 if (fd_req->current_nr_sectors <= 0) {
752 end_request(fd_req, 1);
753 fs->state = idle;
754 } else {
755 fs->req_sector += fs->scount; 755 fs->req_sector += fs->scount;
756 if (fs->req_sector > fs->secpertrack) { 756 if (fs->req_sector > fs->secpertrack) {
757 fs->req_sector -= fs->secpertrack; 757 fs->req_sector -= fs->secpertrack;
@@ -761,7 +761,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
761 } 761 }
762 } 762 }
763 act(fs); 763 act(fs);
764 } 764 } else
765 fs->state = idle;
765 } 766 }
766 if (fs->state == idle) 767 if (fs->state == idle)
767 start_request(fs); 768 start_request(fs);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index ff0448e4bf0..da403b6a7f4 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
749 struct request *req = crq->rq; 749 struct request *req = crq->rq;
750 int rc; 750 int rc;
751 751
752 rc = __blk_end_request(req, error, blk_rq_bytes(req)); 752 __blk_end_request_all(req, error);
753 assert(rc == 0);
754 753
755 rc = carm_put_request(host, crq); 754 rc = carm_put_request(host, crq);
756 assert(rc == 0); 755 assert(rc == 0);
@@ -811,12 +810,10 @@ static void carm_oob_rq_fn(struct request_queue *q)
811 810
812 while (1) { 811 while (1) {
813 DPRINTK("get req\n"); 812 DPRINTK("get req\n");
814 rq = elv_next_request(q); 813 rq = blk_fetch_request(q);
815 if (!rq) 814 if (!rq)
816 break; 815 break;
817 816
818 blkdev_dequeue_request(rq);
819
820 crq = rq->special; 817 crq = rq->special;
821 assert(crq != NULL); 818 assert(crq != NULL);
822 assert(crq->rq == rq); 819 assert(crq->rq == rq);
@@ -847,7 +844,7 @@ static void carm_rq_fn(struct request_queue *q)
847 844
848queue_one_request: 845queue_one_request:
849 VPRINTK("get req\n"); 846 VPRINTK("get req\n");
850 rq = elv_next_request(q); 847 rq = blk_peek_request(q);
851 if (!rq) 848 if (!rq)
852 return; 849 return;
853 850
@@ -858,7 +855,7 @@ queue_one_request:
858 } 855 }
859 crq->rq = rq; 856 crq->rq = rq;
860 857
861 blkdev_dequeue_request(rq); 858 blk_start_request(rq);
862 859
863 if (rq_data_dir(rq) == WRITE) { 860 if (rq_data_dir(rq) == WRITE) {
864 writing = 1; 861 writing = 1;
@@ -904,10 +901,10 @@ queue_one_request:
904 msg->sg_count = n_elem; 901 msg->sg_count = n_elem;
905 msg->sg_type = SGT_32BIT; 902 msg->sg_type = SGT_32BIT;
906 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag)); 903 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
907 msg->lba = cpu_to_le32(rq->sector & 0xffffffff); 904 msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
908 tmp = (rq->sector >> 16) >> 16; 905 tmp = (blk_rq_pos(rq) >> 16) >> 16;
909 msg->lba_high = cpu_to_le16( (u16) tmp ); 906 msg->lba_high = cpu_to_le16( (u16) tmp );
910 msg->lba_count = cpu_to_le16(rq->nr_sectors); 907 msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
911 908
912 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); 909 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
913 for (i = 0; i < n_elem; i++) { 910 for (i = 0; i < n_elem; i++) {
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 689cd27ac89..cc54473b8e7 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -360,8 +360,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
360static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 360static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
361 struct ub_scsi_cmd *cmd, struct ub_request *urq); 361 struct ub_scsi_cmd *cmd, struct ub_request *urq);
362static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 362static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
363static void ub_end_rq(struct request *rq, unsigned int status, 363static void ub_end_rq(struct request *rq, unsigned int status);
364 unsigned int cmd_len);
365static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 364static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
366 struct ub_request *urq, struct ub_scsi_cmd *cmd); 365 struct ub_request *urq, struct ub_scsi_cmd *cmd);
367static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 366static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -627,7 +626,7 @@ static void ub_request_fn(struct request_queue *q)
627 struct ub_lun *lun = q->queuedata; 626 struct ub_lun *lun = q->queuedata;
628 struct request *rq; 627 struct request *rq;
629 628
630 while ((rq = elv_next_request(q)) != NULL) { 629 while ((rq = blk_peek_request(q)) != NULL) {
631 if (ub_request_fn_1(lun, rq) != 0) { 630 if (ub_request_fn_1(lun, rq) != 0) {
632 blk_stop_queue(q); 631 blk_stop_queue(q);
633 break; 632 break;
@@ -643,14 +642,14 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
643 int n_elem; 642 int n_elem;
644 643
645 if (atomic_read(&sc->poison)) { 644 if (atomic_read(&sc->poison)) {
646 blkdev_dequeue_request(rq); 645 blk_start_request(rq);
647 ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq)); 646 ub_end_rq(rq, DID_NO_CONNECT << 16);
648 return 0; 647 return 0;
649 } 648 }
650 649
651 if (lun->changed && !blk_pc_request(rq)) { 650 if (lun->changed && !blk_pc_request(rq)) {
652 blkdev_dequeue_request(rq); 651 blk_start_request(rq);
653 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq)); 652 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
654 return 0; 653 return 0;
655 } 654 }
656 655
@@ -660,7 +659,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
660 return -1; 659 return -1;
661 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 660 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
662 661
663 blkdev_dequeue_request(rq); 662 blk_start_request(rq);
664 663
665 urq = &lun->urq; 664 urq = &lun->urq;
666 memset(urq, 0, sizeof(struct ub_request)); 665 memset(urq, 0, sizeof(struct ub_request));
@@ -702,7 +701,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
702 701
703drop: 702drop:
704 ub_put_cmd(lun, cmd); 703 ub_put_cmd(lun, cmd);
705 ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq)); 704 ub_end_rq(rq, DID_ERROR << 16);
706 return 0; 705 return 0;
707} 706}
708 707
@@ -723,11 +722,11 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
723 /* 722 /*
724 * build the command 723 * build the command
725 * 724 *
726 * The call to blk_queue_hardsect_size() guarantees that request 725 * The call to blk_queue_logical_block_size() guarantees that request
727 * is aligned, but it is given in terms of 512 byte units, always. 726 * is aligned, but it is given in terms of 512 byte units, always.
728 */ 727 */
729 block = rq->sector >> lun->capacity.bshift; 728 block = blk_rq_pos(rq) >> lun->capacity.bshift;
730 nblks = rq->nr_sectors >> lun->capacity.bshift; 729 nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
731 730
732 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; 731 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
733 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ 732 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
@@ -739,7 +738,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
739 cmd->cdb[8] = nblks; 738 cmd->cdb[8] = nblks;
740 cmd->cdb_len = 10; 739 cmd->cdb_len = 10;
741 740
742 cmd->len = rq->nr_sectors * 512; 741 cmd->len = blk_rq_bytes(rq);
743} 742}
744 743
745static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 744static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
@@ -747,7 +746,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
747{ 746{
748 struct request *rq = urq->rq; 747 struct request *rq = urq->rq;
749 748
750 if (rq->data_len == 0) { 749 if (blk_rq_bytes(rq) == 0) {
751 cmd->dir = UB_DIR_NONE; 750 cmd->dir = UB_DIR_NONE;
752 } else { 751 } else {
753 if (rq_data_dir(rq) == WRITE) 752 if (rq_data_dir(rq) == WRITE)
@@ -762,7 +761,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
762 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); 761 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
763 cmd->cdb_len = rq->cmd_len; 762 cmd->cdb_len = rq->cmd_len;
764 763
765 cmd->len = rq->data_len; 764 cmd->len = blk_rq_bytes(rq);
766 765
767 /* 766 /*
768 * To reapply this to every URB is not as incorrect as it looks. 767 * To reapply this to every URB is not as incorrect as it looks.
@@ -777,16 +776,15 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
777 struct ub_request *urq = cmd->back; 776 struct ub_request *urq = cmd->back;
778 struct request *rq; 777 struct request *rq;
779 unsigned int scsi_status; 778 unsigned int scsi_status;
780 unsigned int cmd_len;
781 779
782 rq = urq->rq; 780 rq = urq->rq;
783 781
784 if (cmd->error == 0) { 782 if (cmd->error == 0) {
785 if (blk_pc_request(rq)) { 783 if (blk_pc_request(rq)) {
786 if (cmd->act_len >= rq->data_len) 784 if (cmd->act_len >= rq->resid_len)
787 rq->data_len = 0; 785 rq->resid_len = 0;
788 else 786 else
789 rq->data_len -= cmd->act_len; 787 rq->resid_len -= cmd->act_len;
790 scsi_status = 0; 788 scsi_status = 0;
791 } else { 789 } else {
792 if (cmd->act_len != cmd->len) { 790 if (cmd->act_len != cmd->len) {
@@ -818,17 +816,14 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
818 816
819 urq->rq = NULL; 817 urq->rq = NULL;
820 818
821 cmd_len = cmd->len;
822 ub_put_cmd(lun, cmd); 819 ub_put_cmd(lun, cmd);
823 ub_end_rq(rq, scsi_status, cmd_len); 820 ub_end_rq(rq, scsi_status);
824 blk_start_queue(lun->disk->queue); 821 blk_start_queue(lun->disk->queue);
825} 822}
826 823
827static void ub_end_rq(struct request *rq, unsigned int scsi_status, 824static void ub_end_rq(struct request *rq, unsigned int scsi_status)
828 unsigned int cmd_len)
829{ 825{
830 int error; 826 int error;
831 long rqlen;
832 827
833 if (scsi_status == 0) { 828 if (scsi_status == 0) {
834 error = 0; 829 error = 0;
@@ -836,12 +831,7 @@ static void ub_end_rq(struct request *rq, unsigned int scsi_status,
836 error = -EIO; 831 error = -EIO;
837 rq->errors = scsi_status; 832 rq->errors = scsi_status;
838 } 833 }
839 rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */ 834 __blk_end_request_all(rq, error);
840 if (__blk_end_request(rq, error, cmd_len)) {
841 printk(KERN_WARNING DRV_NAME
842 ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
843 blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
844 }
845} 835}
846 836
847static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 837static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
@@ -1759,7 +1749,7 @@ static int ub_bd_revalidate(struct gendisk *disk)
1759 ub_revalidate(lun->udev, lun); 1749 ub_revalidate(lun->udev, lun);
1760 1750
1761 /* XXX Support sector size switching like in sr.c */ 1751 /* XXX Support sector size switching like in sr.c */
1762 blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); 1752 blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
1763 set_capacity(disk, lun->capacity.nsec); 1753 set_capacity(disk, lun->capacity.nsec);
1764 // set_disk_ro(sdkp->disk, lun->readonly); 1754 // set_disk_ro(sdkp->disk, lun->readonly);
1765 1755
@@ -2334,7 +2324,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2334 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2324 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2335 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2325 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
2336 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2326 blk_queue_max_sectors(q, UB_MAX_SECTORS);
2337 blk_queue_hardsect_size(q, lun->capacity.bsize); 2327 blk_queue_logical_block_size(q, lun->capacity.bsize);
2338 2328
2339 lun->disk = disk; 2329 lun->disk = disk;
2340 q->queuedata = lun; 2330 q->queuedata = lun;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index ecccf65dce2..390d69bb7c4 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -252,7 +252,7 @@ static int send_request(struct request *req)
252 struct viodasd_device *d; 252 struct viodasd_device *d;
253 unsigned long flags; 253 unsigned long flags;
254 254
255 start = (u64)req->sector << 9; 255 start = (u64)blk_rq_pos(req) << 9;
256 256
257 if (rq_data_dir(req) == READ) { 257 if (rq_data_dir(req) == READ) {
258 direction = DMA_FROM_DEVICE; 258 direction = DMA_FROM_DEVICE;
@@ -361,19 +361,17 @@ static void do_viodasd_request(struct request_queue *q)
361 * back later. 361 * back later.
362 */ 362 */
363 while (num_req_outstanding < VIOMAXREQ) { 363 while (num_req_outstanding < VIOMAXREQ) {
364 req = elv_next_request(q); 364 req = blk_fetch_request(q);
365 if (req == NULL) 365 if (req == NULL)
366 return; 366 return;
367 /* dequeue the current request from the queue */
368 blkdev_dequeue_request(req);
369 /* check that request contains a valid command */ 367 /* check that request contains a valid command */
370 if (!blk_fs_request(req)) { 368 if (!blk_fs_request(req)) {
371 viodasd_end_request(req, -EIO, req->hard_nr_sectors); 369 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
372 continue; 370 continue;
373 } 371 }
374 /* Try sending the request */ 372 /* Try sending the request */
375 if (send_request(req) != 0) 373 if (send_request(req) != 0)
376 viodasd_end_request(req, -EIO, req->hard_nr_sectors); 374 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
377 } 375 }
378} 376}
379 377
@@ -590,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
590 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); 588 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
591 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n", 589 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
592 event->xRc, bevent->sub_result, err->msg); 590 event->xRc, bevent->sub_result, err->msg);
593 num_sect = req->hard_nr_sectors; 591 num_sect = blk_rq_sectors(req);
594 } 592 }
595 qlock = req->q->queue_lock; 593 qlock = req->q->queue_lock;
596 spin_lock_irqsave(qlock, irq_flags); 594 spin_lock_irqsave(qlock, irq_flags);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5d34764c8a8..43db3ea15b5 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -37,6 +37,7 @@ struct virtblk_req
37 struct list_head list; 37 struct list_head list;
38 struct request *req; 38 struct request *req;
39 struct virtio_blk_outhdr out_hdr; 39 struct virtio_blk_outhdr out_hdr;
40 struct virtio_scsi_inhdr in_hdr;
40 u8 status; 41 u8 status;
41}; 42};
42 43
@@ -50,6 +51,7 @@ static void blk_done(struct virtqueue *vq)
50 spin_lock_irqsave(&vblk->lock, flags); 51 spin_lock_irqsave(&vblk->lock, flags);
51 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { 52 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
52 int error; 53 int error;
54
53 switch (vbr->status) { 55 switch (vbr->status) {
54 case VIRTIO_BLK_S_OK: 56 case VIRTIO_BLK_S_OK:
55 error = 0; 57 error = 0;
@@ -62,7 +64,13 @@ static void blk_done(struct virtqueue *vq)
62 break; 64 break;
63 } 65 }
64 66
65 __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req)); 67 if (blk_pc_request(vbr->req)) {
68 vbr->req->resid_len = vbr->in_hdr.residual;
69 vbr->req->sense_len = vbr->in_hdr.sense_len;
70 vbr->req->errors = vbr->in_hdr.errors;
71 }
72
73 __blk_end_request_all(vbr->req, error);
66 list_del(&vbr->list); 74 list_del(&vbr->list);
67 mempool_free(vbr, vblk->pool); 75 mempool_free(vbr, vblk->pool);
68 } 76 }
@@ -74,7 +82,7 @@ static void blk_done(struct virtqueue *vq)
74static bool do_req(struct request_queue *q, struct virtio_blk *vblk, 82static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
75 struct request *req) 83 struct request *req)
76{ 84{
77 unsigned long num, out, in; 85 unsigned long num, out = 0, in = 0;
78 struct virtblk_req *vbr; 86 struct virtblk_req *vbr;
79 87
80 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); 88 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
@@ -85,7 +93,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
85 vbr->req = req; 93 vbr->req = req;
86 if (blk_fs_request(vbr->req)) { 94 if (blk_fs_request(vbr->req)) {
87 vbr->out_hdr.type = 0; 95 vbr->out_hdr.type = 0;
88 vbr->out_hdr.sector = vbr->req->sector; 96 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
89 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 97 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
90 } else if (blk_pc_request(vbr->req)) { 98 } else if (blk_pc_request(vbr->req)) {
91 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 99 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
@@ -99,18 +107,36 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
99 if (blk_barrier_rq(vbr->req)) 107 if (blk_barrier_rq(vbr->req))
100 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 108 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
101 109
102 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); 110 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
103 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
104 sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
105 111
106 if (rq_data_dir(vbr->req) == WRITE) { 112 /*
107 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; 113 * If this is a packet command we need a couple of additional headers.
108 out = 1 + num; 114 * Behind the normal outhdr we put a segment with the scsi command
109 in = 1; 115 * block, and before the normal inhdr we put the sense data and the
110 } else { 116 * inhdr with additional status information before the normal inhdr.
111 vbr->out_hdr.type |= VIRTIO_BLK_T_IN; 117 */
112 out = 1; 118 if (blk_pc_request(vbr->req))
113 in = 1 + num; 119 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
120
121 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
122
123 if (blk_pc_request(vbr->req)) {
124 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
125 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
126 sizeof(vbr->in_hdr));
127 }
128
129 sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
130 sizeof(vbr->status));
131
132 if (num) {
133 if (rq_data_dir(vbr->req) == WRITE) {
134 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
135 out += num;
136 } else {
137 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
138 in += num;
139 }
114 } 140 }
115 141
116 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { 142 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
@@ -124,12 +150,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
124 150
125static void do_virtblk_request(struct request_queue *q) 151static void do_virtblk_request(struct request_queue *q)
126{ 152{
127 struct virtio_blk *vblk = NULL; 153 struct virtio_blk *vblk = q->queuedata;
128 struct request *req; 154 struct request *req;
129 unsigned int issued = 0; 155 unsigned int issued = 0;
130 156
131 while ((req = elv_next_request(q)) != NULL) { 157 while ((req = blk_peek_request(q)) != NULL) {
132 vblk = req->rq_disk->private_data;
133 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); 158 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
134 159
135 /* If this request fails, stop queue and wait for something to 160 /* If this request fails, stop queue and wait for something to
@@ -138,7 +163,7 @@ static void do_virtblk_request(struct request_queue *q)
138 blk_stop_queue(q); 163 blk_stop_queue(q);
139 break; 164 break;
140 } 165 }
141 blkdev_dequeue_request(req); 166 blk_start_request(req);
142 issued++; 167 issued++;
143 } 168 }
144 169
@@ -146,12 +171,51 @@ static void do_virtblk_request(struct request_queue *q)
146 vblk->vq->vq_ops->kick(vblk->vq); 171 vblk->vq->vq_ops->kick(vblk->vq);
147} 172}
148 173
174/* return ATA identify data
175 */
176static int virtblk_identify(struct gendisk *disk, void *argp)
177{
178 struct virtio_blk *vblk = disk->private_data;
179 void *opaque;
180 int err = -ENOMEM;
181
182 opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
183 if (!opaque)
184 goto out;
185
186 err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY,
187 offsetof(struct virtio_blk_config, identify), opaque,
188 VIRTIO_BLK_ID_BYTES);
189
190 if (err)
191 goto out_kfree;
192
193 if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES))
194 err = -EFAULT;
195
196out_kfree:
197 kfree(opaque);
198out:
199 return err;
200}
201
149static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, 202static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
150 unsigned cmd, unsigned long data) 203 unsigned cmd, unsigned long data)
151{ 204{
152 return scsi_cmd_ioctl(bdev->bd_disk->queue, 205 struct gendisk *disk = bdev->bd_disk;
153 bdev->bd_disk, mode, cmd, 206 struct virtio_blk *vblk = disk->private_data;
154 (void __user *)data); 207 void __user *argp = (void __user *)data;
208
209 if (cmd == HDIO_GET_IDENTITY)
210 return virtblk_identify(disk, argp);
211
212 /*
213 * Only allow the generic SCSI ioctls if the host can support it.
214 */
215 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
216 return -ENOIOCTLCMD;
217
218 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
155} 219}
156 220
157/* We provide getgeo only to please some old bootloader/partitioning tools */ 221/* We provide getgeo only to please some old bootloader/partitioning tools */
@@ -190,7 +254,7 @@ static int index_to_minor(int index)
190 return index << PART_BITS; 254 return index << PART_BITS;
191} 255}
192 256
193static int virtblk_probe(struct virtio_device *vdev) 257static int __devinit virtblk_probe(struct virtio_device *vdev)
194{ 258{
195 struct virtio_blk *vblk; 259 struct virtio_blk *vblk;
196 int err; 260 int err;
@@ -224,7 +288,7 @@ static int virtblk_probe(struct virtio_device *vdev)
224 sg_init_table(vblk->sg, vblk->sg_elems); 288 sg_init_table(vblk->sg, vblk->sg_elems);
225 289
226 /* We expect one virtqueue, for output. */ 290 /* We expect one virtqueue, for output. */
227 vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); 291 vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
228 if (IS_ERR(vblk->vq)) { 292 if (IS_ERR(vblk->vq)) {
229 err = PTR_ERR(vblk->vq); 293 err = PTR_ERR(vblk->vq);
230 goto out_free_vblk; 294 goto out_free_vblk;
@@ -249,6 +313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
249 goto out_put_disk; 313 goto out_put_disk;
250 } 314 }
251 315
316 vblk->disk->queue->queuedata = vblk;
252 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue); 317 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
253 318
254 if (index < 26) { 319 if (index < 26) {
@@ -313,7 +378,7 @@ static int virtblk_probe(struct virtio_device *vdev)
313 offsetof(struct virtio_blk_config, blk_size), 378 offsetof(struct virtio_blk_config, blk_size),
314 &blk_size); 379 &blk_size);
315 if (!err) 380 if (!err)
316 blk_queue_hardsect_size(vblk->disk->queue, blk_size); 381 blk_queue_logical_block_size(vblk->disk->queue, blk_size);
317 382
318 add_disk(vblk->disk); 383 add_disk(vblk->disk);
319 return 0; 384 return 0;
@@ -323,14 +388,14 @@ out_put_disk:
323out_mempool: 388out_mempool:
324 mempool_destroy(vblk->pool); 389 mempool_destroy(vblk->pool);
325out_free_vq: 390out_free_vq:
326 vdev->config->del_vq(vblk->vq); 391 vdev->config->del_vqs(vdev);
327out_free_vblk: 392out_free_vblk:
328 kfree(vblk); 393 kfree(vblk);
329out: 394out:
330 return err; 395 return err;
331} 396}
332 397
333static void virtblk_remove(struct virtio_device *vdev) 398static void __devexit virtblk_remove(struct virtio_device *vdev)
334{ 399{
335 struct virtio_blk *vblk = vdev->priv; 400 struct virtio_blk *vblk = vdev->priv;
336 401
@@ -344,7 +409,7 @@ static void virtblk_remove(struct virtio_device *vdev)
344 blk_cleanup_queue(vblk->disk->queue); 409 blk_cleanup_queue(vblk->disk->queue);
345 put_disk(vblk->disk); 410 put_disk(vblk->disk);
346 mempool_destroy(vblk->pool); 411 mempool_destroy(vblk->pool);
347 vdev->config->del_vq(vblk->vq); 412 vdev->config->del_vqs(vdev);
348 kfree(vblk); 413 kfree(vblk);
349} 414}
350 415
@@ -356,6 +421,7 @@ static struct virtio_device_id id_table[] = {
356static unsigned int features[] = { 421static unsigned int features[] = {
357 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, 422 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
358 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 423 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
424 VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY
359}; 425};
360 426
361static struct virtio_driver virtio_blk = { 427static struct virtio_driver virtio_blk = {
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 64b496fce98..ce242921992 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -305,30 +305,25 @@ static void do_xd_request (struct request_queue * q)
305 if (xdc_busy) 305 if (xdc_busy)
306 return; 306 return;
307 307
308 while ((req = elv_next_request(q)) != NULL) { 308 req = blk_fetch_request(q);
309 unsigned block = req->sector; 309 while (req) {
310 unsigned count = req->nr_sectors; 310 unsigned block = blk_rq_pos(req);
311 int rw = rq_data_dir(req); 311 unsigned count = blk_rq_cur_sectors(req);
312 XD_INFO *disk = req->rq_disk->private_data; 312 XD_INFO *disk = req->rq_disk->private_data;
313 int res = 0; 313 int res = -EIO;
314 int retry; 314 int retry;
315 315
316 if (!blk_fs_request(req)) { 316 if (!blk_fs_request(req))
317 end_request(req, 0); 317 goto done;
318 continue; 318 if (block + count > get_capacity(req->rq_disk))
319 } 319 goto done;
320 if (block + count > get_capacity(req->rq_disk)) {
321 end_request(req, 0);
322 continue;
323 }
324 if (rw != READ && rw != WRITE) {
325 printk("do_xd_request: unknown request\n");
326 end_request(req, 0);
327 continue;
328 }
329 for (retry = 0; (retry < XD_RETRIES) && !res; retry++) 320 for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
330 res = xd_readwrite(rw, disk, req->buffer, block, count); 321 res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
331 end_request(req, res); /* wrap up, 0 = fail, 1 = success */ 322 block, count);
323 done:
324 /* wrap up, 0 = success, -errno = fail */
325 if (!__blk_end_request_cur(req, res))
326 req = blk_fetch_request(q);
332 } 327 }
333} 328}
334 329
@@ -418,7 +413,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
418 printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); 413 printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
419 xd_recalibrate(drive); 414 xd_recalibrate(drive);
420 spin_lock_irq(&xd_lock); 415 spin_lock_irq(&xd_lock);
421 return (0); 416 return -EIO;
422 case 2: 417 case 2:
423 if (sense[0] & 0x30) { 418 if (sense[0] & 0x30) {
424 printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); 419 printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
@@ -439,7 +434,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
439 else 434 else
440 printk(" - no valid disk address\n"); 435 printk(" - no valid disk address\n");
441 spin_lock_irq(&xd_lock); 436 spin_lock_irq(&xd_lock);
442 return (0); 437 return -EIO;
443 } 438 }
444 if (xd_dma_buffer) 439 if (xd_dma_buffer)
445 for (i=0; i < (temp * 0x200); i++) 440 for (i=0; i < (temp * 0x200); i++)
@@ -448,7 +443,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
448 count -= temp, buffer += temp * 0x200, block += temp; 443 count -= temp, buffer += temp * 0x200, block += temp;
449 } 444 }
450 spin_lock_irq(&xd_lock); 445 spin_lock_irq(&xd_lock);
451 return (1); 446 return 0;
452} 447}
453 448
454/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */ 449/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8f905089b72..c1996829d5e 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(blkif_io_lock);
122static int get_id_from_freelist(struct blkfront_info *info) 122static int get_id_from_freelist(struct blkfront_info *info)
123{ 123{
124 unsigned long free = info->shadow_free; 124 unsigned long free = info->shadow_free;
125 BUG_ON(free > BLK_RING_SIZE); 125 BUG_ON(free >= BLK_RING_SIZE);
126 info->shadow_free = info->shadow[free].req.id; 126 info->shadow_free = info->shadow[free].req.id;
127 info->shadow[free].req.id = 0x0fffffee; /* debug */ 127 info->shadow[free].req.id = 0x0fffffee; /* debug */
128 return free; 128 return free;
@@ -231,7 +231,7 @@ static int blkif_queue_request(struct request *req)
231 info->shadow[id].request = (unsigned long)req; 231 info->shadow[id].request = (unsigned long)req;
232 232
233 ring_req->id = id; 233 ring_req->id = id;
234 ring_req->sector_number = (blkif_sector_t)req->sector; 234 ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
235 ring_req->handle = info->handle; 235 ring_req->handle = info->handle;
236 236
237 ring_req->operation = rq_data_dir(req) ? 237 ring_req->operation = rq_data_dir(req) ?
@@ -299,25 +299,25 @@ static void do_blkif_request(struct request_queue *rq)
299 299
300 queued = 0; 300 queued = 0;
301 301
302 while ((req = elv_next_request(rq)) != NULL) { 302 while ((req = blk_peek_request(rq)) != NULL) {
303 info = req->rq_disk->private_data; 303 info = req->rq_disk->private_data;
304 if (!blk_fs_request(req)) {
305 end_request(req, 0);
306 continue;
307 }
308 304
309 if (RING_FULL(&info->ring)) 305 if (RING_FULL(&info->ring))
310 goto wait; 306 goto wait;
311 307
312 pr_debug("do_blk_req %p: cmd %p, sec %lx, " 308 blk_start_request(req);
313 "(%u/%li) buffer:%p [%s]\n",
314 req, req->cmd, (unsigned long)req->sector,
315 req->current_nr_sectors,
316 req->nr_sectors, req->buffer,
317 rq_data_dir(req) ? "write" : "read");
318 309
310 if (!blk_fs_request(req)) {
311 __blk_end_request_all(req, -EIO);
312 continue;
313 }
314
315 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
316 "(%u/%u) buffer:%p [%s]\n",
317 req, req->cmd, (unsigned long)blk_rq_pos(req),
318 blk_rq_cur_sectors(req), blk_rq_sectors(req),
319 req->buffer, rq_data_dir(req) ? "write" : "read");
319 320
320 blkdev_dequeue_request(req);
321 if (blkif_queue_request(req)) { 321 if (blkif_queue_request(req)) {
322 blk_requeue_request(rq, req); 322 blk_requeue_request(rq, req);
323wait: 323wait:
@@ -344,7 +344,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
344 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 344 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
345 345
346 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 346 /* Hard sector size and max sectors impersonate the equiv. hardware. */
347 blk_queue_hardsect_size(rq, sector_size); 347 blk_queue_logical_block_size(rq, sector_size);
348 blk_queue_max_sectors(rq, 512); 348 blk_queue_max_sectors(rq, 512);
349 349
350 /* Each segment in a request is up to an aligned page in size. */ 350 /* Each segment in a request is up to an aligned page in size. */
@@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
551 551
552 for (i = info->ring.rsp_cons; i != rp; i++) { 552 for (i = info->ring.rsp_cons; i != rp; i++) {
553 unsigned long id; 553 unsigned long id;
554 int ret;
555 554
556 bret = RING_GET_RESPONSE(&info->ring, i); 555 bret = RING_GET_RESPONSE(&info->ring, i);
557 id = bret->id; 556 id = bret->id;
@@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
578 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 577 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
579 "request: %x\n", bret->status); 578 "request: %x\n", bret->status);
580 579
581 ret = __blk_end_request(req, error, blk_rq_bytes(req)); 580 __blk_end_request_all(req, error);
582 BUG_ON(ret);
583 break; 581 break;
584 default: 582 default:
585 BUG(); 583 BUG();
@@ -934,8 +932,6 @@ static void blkfront_closing(struct xenbus_device *dev)
934 932
935 spin_lock_irqsave(&blkif_io_lock, flags); 933 spin_lock_irqsave(&blkif_io_lock, flags);
936 934
937 del_gendisk(info->gd);
938
939 /* No more blkif_request(). */ 935 /* No more blkif_request(). */
940 blk_stop_queue(info->rq); 936 blk_stop_queue(info->rq);
941 937
@@ -949,6 +945,8 @@ static void blkfront_closing(struct xenbus_device *dev)
949 blk_cleanup_queue(info->rq); 945 blk_cleanup_queue(info->rq);
950 info->rq = NULL; 946 info->rq = NULL;
951 947
948 del_gendisk(info->gd);
949
952 out: 950 out:
953 xenbus_frontend_closed(dev); 951 xenbus_frontend_closed(dev);
954} 952}
@@ -977,8 +975,10 @@ static void backend_changed(struct xenbus_device *dev,
977 break; 975 break;
978 976
979 case XenbusStateClosing: 977 case XenbusStateClosing:
980 if (info->gd == NULL) 978 if (info->gd == NULL) {
981 xenbus_dev_fatal(dev, -ENODEV, "gd is NULL"); 979 xenbus_frontend_closed(dev);
980 break;
981 }
982 bd = bdget_disk(info->gd, 0); 982 bd = bdget_disk(info->gd, 0);
983 if (bd == NULL) 983 if (bd == NULL)
984 xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); 984 xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 4aecf5dc6a9..f08491a3a81 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -463,10 +463,11 @@ struct request *ace_get_next_request(struct request_queue * q)
463{ 463{
464 struct request *req; 464 struct request *req;
465 465
466 while ((req = elv_next_request(q)) != NULL) { 466 while ((req = blk_peek_request(q)) != NULL) {
467 if (blk_fs_request(req)) 467 if (blk_fs_request(req))
468 break; 468 break;
469 end_request(req, 0); 469 blk_start_request(req);
470 __blk_end_request_all(req, -EIO);
470 } 471 }
471 return req; 472 return req;
472} 473}
@@ -492,9 +493,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
492 set_capacity(ace->gd, 0); 493 set_capacity(ace->gd, 0);
493 dev_info(ace->dev, "No CF in slot\n"); 494 dev_info(ace->dev, "No CF in slot\n");
494 495
495 /* Drop all pending requests */ 496 /* Drop all in-flight and pending requests */
496 while ((req = elv_next_request(ace->queue)) != NULL) 497 if (ace->req) {
497 end_request(req, 0); 498 __blk_end_request_all(ace->req, -EIO);
499 ace->req = NULL;
500 }
501 while ((req = blk_fetch_request(ace->queue)) != NULL)
502 __blk_end_request_all(req, -EIO);
498 503
499 /* Drop back to IDLE state and notify waiters */ 504 /* Drop back to IDLE state and notify waiters */
500 ace->fsm_state = ACE_FSM_STATE_IDLE; 505 ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -642,19 +647,21 @@ static void ace_fsm_dostate(struct ace_device *ace)
642 ace->fsm_state = ACE_FSM_STATE_IDLE; 647 ace->fsm_state = ACE_FSM_STATE_IDLE;
643 break; 648 break;
644 } 649 }
650 blk_start_request(req);
645 651
646 /* Okay, it's a data request, set it up for transfer */ 652 /* Okay, it's a data request, set it up for transfer */
647 dev_dbg(ace->dev, 653 dev_dbg(ace->dev,
648 "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n", 654 "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
649 (unsigned long long) req->sector, req->hard_nr_sectors, 655 (unsigned long long)blk_rq_pos(req),
650 req->current_nr_sectors, rq_data_dir(req)); 656 blk_rq_sectors(req), blk_rq_cur_sectors(req),
657 rq_data_dir(req));
651 658
652 ace->req = req; 659 ace->req = req;
653 ace->data_ptr = req->buffer; 660 ace->data_ptr = req->buffer;
654 ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR; 661 ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
655 ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF); 662 ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
656 663
657 count = req->hard_nr_sectors; 664 count = blk_rq_sectors(req);
658 if (rq_data_dir(req)) { 665 if (rq_data_dir(req)) {
659 /* Kick off write request */ 666 /* Kick off write request */
660 dev_dbg(ace->dev, "write data\n"); 667 dev_dbg(ace->dev, "write data\n");
@@ -688,7 +695,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
688 dev_dbg(ace->dev, 695 dev_dbg(ace->dev,
689 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", 696 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
690 ace->fsm_task, ace->fsm_iter_num, 697 ace->fsm_task, ace->fsm_iter_num,
691 ace->req->current_nr_sectors * 16, 698 blk_rq_cur_sectors(ace->req) * 16,
692 ace->data_count, ace->in_irq); 699 ace->data_count, ace->in_irq);
693 ace_fsm_yield(ace); /* need to poll CFBSY bit */ 700 ace_fsm_yield(ace); /* need to poll CFBSY bit */
694 break; 701 break;
@@ -697,7 +704,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
697 dev_dbg(ace->dev, 704 dev_dbg(ace->dev,
698 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", 705 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
699 ace->fsm_task, ace->fsm_iter_num, 706 ace->fsm_task, ace->fsm_iter_num,
700 ace->req->current_nr_sectors * 16, 707 blk_rq_cur_sectors(ace->req) * 16,
701 ace->data_count, ace->in_irq); 708 ace->data_count, ace->in_irq);
702 ace_fsm_yieldirq(ace); 709 ace_fsm_yieldirq(ace);
703 break; 710 break;
@@ -717,14 +724,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
717 } 724 }
718 725
719 /* bio finished; is there another one? */ 726 /* bio finished; is there another one? */
720 if (__blk_end_request(ace->req, 0, 727 if (__blk_end_request_cur(ace->req, 0)) {
721 blk_rq_cur_bytes(ace->req))) { 728 /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
722 /* dev_dbg(ace->dev, "next block; h=%li c=%i\n", 729 * blk_rq_sectors(ace->req),
723 * ace->req->hard_nr_sectors, 730 * blk_rq_cur_sectors(ace->req));
724 * ace->req->current_nr_sectors);
725 */ 731 */
726 ace->data_ptr = ace->req->buffer; 732 ace->data_ptr = ace->req->buffer;
727 ace->data_count = ace->req->current_nr_sectors * 16; 733 ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
728 ace_fsm_yieldirq(ace); 734 ace_fsm_yieldirq(ace);
729 break; 735 break;
730 } 736 }
@@ -978,7 +984,7 @@ static int __devinit ace_setup(struct ace_device *ace)
978 ace->queue = blk_init_queue(ace_request, &ace->lock); 984 ace->queue = blk_init_queue(ace_request, &ace->lock);
979 if (ace->queue == NULL) 985 if (ace->queue == NULL)
980 goto err_blk_initq; 986 goto err_blk_initq;
981 blk_queue_hardsect_size(ace->queue, 512); 987 blk_queue_logical_block_size(ace->queue, 512);
982 988
983 /* 989 /*
984 * Allocate and initialize GD structure 990 * Allocate and initialize GD structure
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 80754cdd311..4575171e5be 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -70,15 +70,18 @@ static struct gendisk *z2ram_gendisk;
70static void do_z2_request(struct request_queue *q) 70static void do_z2_request(struct request_queue *q)
71{ 71{
72 struct request *req; 72 struct request *req;
73 while ((req = elv_next_request(q)) != NULL) { 73
74 unsigned long start = req->sector << 9; 74 req = blk_fetch_request(q);
75 unsigned long len = req->current_nr_sectors << 9; 75 while (req) {
76 unsigned long start = blk_rq_pos(req) << 9;
77 unsigned long len = blk_rq_cur_bytes(req);
78 int err = 0;
76 79
77 if (start + len > z2ram_size) { 80 if (start + len > z2ram_size) {
78 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", 81 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
79 req->sector, req->current_nr_sectors); 82 blk_rq_pos(req), blk_rq_cur_sectors(req));
80 end_request(req, 0); 83 err = -EIO;
81 continue; 84 goto done;
82 } 85 }
83 while (len) { 86 while (len) {
84 unsigned long addr = start & Z2RAM_CHUNKMASK; 87 unsigned long addr = start & Z2RAM_CHUNKMASK;
@@ -93,7 +96,9 @@ static void do_z2_request(struct request_queue *q)
93 start += size; 96 start += size;
94 len -= size; 97 len -= size;
95 } 98 }
96 end_request(req, 1); 99 done:
100 if (!__blk_end_request_cur(req, err))
101 req = blk_fetch_request(q);
97 } 102 }
98} 103}
99 104
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index af761dc434f..4895f0e0532 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -277,8 +277,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
277 /* FIXME: why is this needed. Note don't use ldisc_ref here as the 277 /* FIXME: why is this needed. Note don't use ldisc_ref here as the
278 open path is before the ldisc is referencable */ 278 open path is before the ldisc is referencable */
279 279
280 if (tty->ldisc.ops->flush_buffer) 280 if (tty->ldisc->ops->flush_buffer)
281 tty->ldisc.ops->flush_buffer(tty); 281 tty->ldisc->ops->flush_buffer(tty);
282 tty_driver_flush_buffer(tty); 282 tty_driver_flush_buffer(tty);
283 283
284 return 0; 284 return 0;
@@ -463,7 +463,6 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
463 clear_bit(HCI_UART_PROTO_SET, &hu->flags); 463 clear_bit(HCI_UART_PROTO_SET, &hu->flags);
464 return err; 464 return err;
465 } 465 }
466 tty->low_latency = 1;
467 } else 466 } else
468 return -EBUSY; 467 return -EBUSY;
469 break; 468 break;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index cceace61ef2..71d1b9bab70 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2101 nr = nframes; 2101 nr = nframes;
2102 if (cdi->cdda_method == CDDA_BPC_SINGLE) 2102 if (cdi->cdda_method == CDDA_BPC_SINGLE)
2103 nr = 1; 2103 nr = 1;
2104 if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9)) 2104 if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
2105 nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW; 2105 nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
2106 2106
2107 len = nr * CD_FRAMESIZE_RAW; 2107 len = nr * CD_FRAMESIZE_RAW;
2108 2108
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 2eecb779437..b5621f27c4b 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -584,8 +584,8 @@ static void gdrom_readdisk_dma(struct work_struct *work)
584 list_for_each_safe(elem, next, &gdrom_deferred) { 584 list_for_each_safe(elem, next, &gdrom_deferred) {
585 req = list_entry(elem, struct request, queuelist); 585 req = list_entry(elem, struct request, queuelist);
586 spin_unlock(&gdrom_lock); 586 spin_unlock(&gdrom_lock);
587 block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET; 587 block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
588 block_cnt = req->nr_sectors/GD_TO_BLK; 588 block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG); 589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); 590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG); 591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
@@ -632,39 +632,35 @@ static void gdrom_readdisk_dma(struct work_struct *work)
632 * before handling ending the request */ 632 * before handling ending the request */
633 spin_lock(&gdrom_lock); 633 spin_lock(&gdrom_lock);
634 list_del_init(&req->queuelist); 634 list_del_init(&req->queuelist);
635 __blk_end_request(req, err, blk_rq_bytes(req)); 635 __blk_end_request_all(req, err);
636 } 636 }
637 spin_unlock(&gdrom_lock); 637 spin_unlock(&gdrom_lock);
638 kfree(read_command); 638 kfree(read_command);
639} 639}
640 640
641static void gdrom_request_handler_dma(struct request *req)
642{
643 /* dequeue, add to list of deferred work
644 * and then schedule workqueue */
645 blkdev_dequeue_request(req);
646 list_add_tail(&req->queuelist, &gdrom_deferred);
647 schedule_work(&work);
648}
649
650static void gdrom_request(struct request_queue *rq) 641static void gdrom_request(struct request_queue *rq)
651{ 642{
652 struct request *req; 643 struct request *req;
653 644
654 while ((req = elv_next_request(rq)) != NULL) { 645 while ((req = blk_fetch_request(rq)) != NULL) {
655 if (!blk_fs_request(req)) { 646 if (!blk_fs_request(req)) {
656 printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); 647 printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
657 end_request(req, 0); 648 __blk_end_request_all(req, -EIO);
649 continue;
658 } 650 }
659 if (rq_data_dir(req) != READ) { 651 if (rq_data_dir(req) != READ) {
660 printk(KERN_NOTICE "GDROM: Read only device -"); 652 printk(KERN_NOTICE "GDROM: Read only device -");
661 printk(" write request ignored\n"); 653 printk(" write request ignored\n");
662 end_request(req, 0); 654 __blk_end_request_all(req, -EIO);
655 continue;
663 } 656 }
664 if (req->nr_sectors) 657
665 gdrom_request_handler_dma(req); 658 /*
666 else 659 * Add to list of deferred work and then schedule
667 end_request(req, 0); 660 * workqueue.
661 */
662 list_add_tail(&req->queuelist, &gdrom_deferred);
663 schedule_work(&work);
668 } 664 }
669} 665}
670 666
@@ -743,7 +739,7 @@ static void __devinit probe_gdrom_setupdisk(void)
743 739
744static int __devinit probe_gdrom_setupqueue(void) 740static int __devinit probe_gdrom_setupqueue(void)
745{ 741{
746 blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR); 742 blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
747 /* using DMA so memory will need to be contiguous */ 743 /* using DMA so memory will need to be contiguous */
748 blk_queue_max_hw_segments(gd.gdrom_rq, 1); 744 blk_queue_max_hw_segments(gd.gdrom_rq, 1);
749 /* set a large max size to get most from DMA */ 745 /* set a large max size to get most from DMA */
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 13929356135..0fff646cc2f 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -282,7 +282,7 @@ static int send_request(struct request *req)
282 viopath_targetinst(viopath_hostLp), 282 viopath_targetinst(viopath_hostLp),
283 (u64)req, VIOVERSION << 16, 283 (u64)req, VIOVERSION << 16,
284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr, 284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
285 (u64)req->sector * 512, len, 0); 285 (u64)blk_rq_pos(req) * 512, len, 0);
286 if (hvrc != HvLpEvent_Rc_Good) { 286 if (hvrc != HvLpEvent_Rc_Good) {
287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc); 287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
288 return -1; 288 return -1;
@@ -291,36 +291,19 @@ static int send_request(struct request *req)
291 return 0; 291 return 0;
292} 292}
293 293
294static void viocd_end_request(struct request *req, int error)
295{
296 int nsectors = req->hard_nr_sectors;
297
298 /*
299 * Make sure it's fully ended, and ensure that we process
300 * at least one sector.
301 */
302 if (blk_pc_request(req))
303 nsectors = (req->data_len + 511) >> 9;
304 if (!nsectors)
305 nsectors = 1;
306
307 if (__blk_end_request(req, error, nsectors << 9))
308 BUG();
309}
310
311static int rwreq; 294static int rwreq;
312 295
313static void do_viocd_request(struct request_queue *q) 296static void do_viocd_request(struct request_queue *q)
314{ 297{
315 struct request *req; 298 struct request *req;
316 299
317 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { 300 while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
318 if (!blk_fs_request(req)) 301 if (!blk_fs_request(req))
319 viocd_end_request(req, -EIO); 302 __blk_end_request_all(req, -EIO);
320 else if (send_request(req) < 0) { 303 else if (send_request(req) < 0) {
321 printk(VIOCD_KERN_WARNING 304 printk(VIOCD_KERN_WARNING
322 "unable to send message to OS/400!"); 305 "unable to send message to OS/400!");
323 viocd_end_request(req, -EIO); 306 __blk_end_request_all(req, -EIO);
324 } else 307 } else
325 rwreq++; 308 rwreq++;
326 } 309 }
@@ -486,8 +469,8 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
486 case viocdopen: 469 case viocdopen:
487 if (event->xRc == 0) { 470 if (event->xRc == 0) {
488 di = &viocd_diskinfo[bevent->disk]; 471 di = &viocd_diskinfo[bevent->disk];
489 blk_queue_hardsect_size(di->viocd_disk->queue, 472 blk_queue_logical_block_size(di->viocd_disk->queue,
490 bevent->block_size); 473 bevent->block_size);
491 set_capacity(di->viocd_disk, 474 set_capacity(di->viocd_disk,
492 bevent->media_size * 475 bevent->media_size *
493 bevent->block_size / 512); 476 bevent->block_size / 512);
@@ -531,9 +514,9 @@ return_complete:
531 "with rc %d:0x%04X: %s\n", 514 "with rc %d:0x%04X: %s\n",
532 req, event->xRc, 515 req, event->xRc,
533 bevent->sub_result, err->msg); 516 bevent->sub_result, err->msg);
534 viocd_end_request(req, -EIO); 517 __blk_end_request_all(req, -EIO);
535 } else 518 } else
536 viocd_end_request(req, 0); 519 __blk_end_request_all(req, 0);
537 520
538 /* restart handling of incoming requests */ 521 /* restart handling of incoming requests */
539 spin_unlock_irqrestore(&viocd_reqlock, flags); 522 spin_unlock_irqrestore(&viocd_reqlock, flags);
@@ -587,7 +570,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
587 struct device_node *node = vdev->dev.archdata.of_node; 570 struct device_node *node = vdev->dev.archdata.of_node;
588 571
589 deviceno = vdev->unit_address; 572 deviceno = vdev->unit_address;
590 if (deviceno > VIOCD_MAX_CD) 573 if (deviceno >= VIOCD_MAX_CD)
591 return -ENODEV; 574 return -ENODEV;
592 if (!node) 575 if (!node)
593 return -ENODEV; 576 return -ENODEV;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 735bbe2be51..30bae6de6a0 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -97,6 +97,19 @@ config DEVKMEM
97 kind of kernel debugging operations. 97 kind of kernel debugging operations.
98 When in doubt, say "N". 98 When in doubt, say "N".
99 99
100config BFIN_JTAG_COMM
101 tristate "Blackfin JTAG Communication"
102 depends on BLACKFIN
103 help
104 Add support for emulating a TTY device over the Blackfin JTAG.
105
106 To compile this driver as a module, choose M here: the
107 module will be called bfin_jtag_comm.
108
109config BFIN_JTAG_COMM_CONSOLE
110 bool "Console on Blackfin JTAG"
111 depends on BFIN_JTAG_COMM=y
112
100config SERIAL_NONSTANDARD 113config SERIAL_NONSTANDARD
101 bool "Non-standard serial port support" 114 bool "Non-standard serial port support"
102 depends on HAS_IOMEM 115 depends on HAS_IOMEM
@@ -679,7 +692,7 @@ config HVCS
679 this driver. 692 this driver.
680 693
681 To compile this driver as a module, choose M here: the 694 To compile this driver as a module, choose M here: the
682 module will be called hvcs.ko. Additionally, this module 695 module will be called hvcs. Additionally, this module
683 will depend on arch specific APIs exported from hvcserver.ko 696 will depend on arch specific APIs exported from hvcserver.ko
684 which will also be compiled when this driver is built as a 697 which will also be compiled when this driver is built as a
685 module. 698 module.
@@ -893,7 +906,7 @@ config DTLK
893 906
894config XILINX_HWICAP 907config XILINX_HWICAP
895 tristate "Xilinx HWICAP Support" 908 tristate "Xilinx HWICAP Support"
896 depends on XILINX_VIRTEX 909 depends on XILINX_VIRTEX || MICROBLAZE
897 help 910 help
898 This option enables support for Xilinx Internal Configuration 911 This option enables support for Xilinx Internal Configuration
899 Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex 912 Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 9caf5b5ad1c..189efcff08c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_LEGACY_PTYS) += pty.o
13obj-$(CONFIG_UNIX98_PTYS) += pty.o 13obj-$(CONFIG_UNIX98_PTYS) += pty.o
14obj-y += misc.o 14obj-y += misc.o
15obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o 15obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o
16obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o
16obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o 17obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
17obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o 18obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
18obj-$(CONFIG_AUDIT) += tty_audit.o 19obj-$(CONFIG_AUDIT) += tty_audit.o
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 3686912427b..7a748fa0dfc 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -46,6 +46,10 @@
46#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 46#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
47#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 47#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
48#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 48#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
49#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040
50#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
51#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
52#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
49 53
50/* cover 915 and 945 variants */ 54/* cover 915 and 945 variants */
51#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 55#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -75,7 +79,9 @@
75 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ 79 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
76 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ 80 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
77 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ 81 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
78 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB) 82 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
83 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
84 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB)
79 85
80extern int agp_memory_reserved; 86extern int agp_memory_reserved;
81 87
@@ -1211,6 +1217,8 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1211 case PCI_DEVICE_ID_INTEL_Q45_HB: 1217 case PCI_DEVICE_ID_INTEL_Q45_HB:
1212 case PCI_DEVICE_ID_INTEL_G45_HB: 1218 case PCI_DEVICE_ID_INTEL_G45_HB:
1213 case PCI_DEVICE_ID_INTEL_G41_HB: 1219 case PCI_DEVICE_ID_INTEL_G41_HB:
1220 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
1221 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
1214 *gtt_offset = *gtt_size = MB(2); 1222 *gtt_offset = *gtt_size = MB(2);
1215 break; 1223 break;
1216 default: 1224 default:
@@ -2186,6 +2194,10 @@ static const struct intel_driver_description {
2186 "G45/G43", NULL, &intel_i965_driver }, 2194 "G45/G43", NULL, &intel_i965_driver },
2187 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, 2195 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
2188 "G41", NULL, &intel_i965_driver }, 2196 "G41", NULL, &intel_i965_driver },
2197 { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
2198 "IGDNG/D", NULL, &intel_i965_driver },
2199 { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2200 "IGDNG/M", NULL, &intel_i965_driver },
2189 { 0, 0, 0, NULL, NULL, NULL } 2201 { 0, 0, 0, NULL, NULL, NULL }
2190}; 2202};
2191 2203
@@ -2387,6 +2399,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
2387 ID(PCI_DEVICE_ID_INTEL_Q45_HB), 2399 ID(PCI_DEVICE_ID_INTEL_Q45_HB),
2388 ID(PCI_DEVICE_ID_INTEL_G45_HB), 2400 ID(PCI_DEVICE_ID_INTEL_G45_HB),
2389 ID(PCI_DEVICE_ID_INTEL_G41_HB), 2401 ID(PCI_DEVICE_ID_INTEL_G41_HB),
2402 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
2403 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
2390 { } 2404 { }
2391}; 2405};
2392 2406
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index fd3ebd1be57..72429b6b2fa 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -779,7 +779,7 @@ static void change_speed(struct async_struct *info,
779 info->IER |= UART_IER_MSI; 779 info->IER |= UART_IER_MSI;
780 } 780 }
781 /* TBD: 781 /* TBD:
782 * Does clearing IER_MSI imply that we should disbale the VBL interrupt ? 782 * Does clearing IER_MSI imply that we should disable the VBL interrupt ?
783 */ 783 */
784 784
785 /* 785 /*
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
new file mode 100644
index 00000000000..44c113d5604
--- /dev/null
+++ b/drivers/char/bfin_jtag_comm.c
@@ -0,0 +1,365 @@
1/*
2 * TTY over Blackfin JTAG Communication
3 *
4 * Copyright 2008-2009 Analog Devices Inc.
5 *
6 * Enter bugs at http://blackfin.uclinux.org/
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11#include <linux/circ_buf.h>
12#include <linux/console.h>
13#include <linux/delay.h>
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/kthread.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/sched.h>
20#include <linux/tty.h>
21#include <linux/tty_driver.h>
22#include <linux/tty_flip.h>
23#include <asm/atomic.h>
24
25/* See the Debug/Emulation chapter in the HRM */
26#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */
27#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */
28#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */
29#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */
30
31#define DRV_NAME "bfin-jtag-comm"
32#define DEV_NAME "ttyBFJC"
33
34#define pr_init(fmt, args...) ({ static const __initdata char __fmt[] = fmt; printk(__fmt, ## args); })
35#define debug(fmt, args...) pr_debug(DRV_NAME ": " fmt, ## args)
36
37static inline uint32_t bfin_write_emudat(uint32_t emudat)
38{
39 __asm__ __volatile__("emudat = %0;" : : "d"(emudat));
40 return emudat;
41}
42
43static inline uint32_t bfin_read_emudat(void)
44{
45 uint32_t emudat;
46 __asm__ __volatile__("%0 = emudat;" : "=d"(emudat));
47 return emudat;
48}
49
50static inline uint32_t bfin_write_emudat_chars(char a, char b, char c, char d)
51{
52 return bfin_write_emudat((a << 0) | (b << 8) | (c << 16) | (d << 24));
53}
54
55#define CIRC_SIZE 2048 /* see comment in tty_io.c:do_tty_write() */
56#define CIRC_MASK (CIRC_SIZE - 1)
57#define circ_empty(circ) ((circ)->head == (circ)->tail)
58#define circ_free(circ) CIRC_SPACE((circ)->head, (circ)->tail, CIRC_SIZE)
59#define circ_cnt(circ) CIRC_CNT((circ)->head, (circ)->tail, CIRC_SIZE)
60#define circ_byte(circ, idx) ((circ)->buf[(idx) & CIRC_MASK])
61
62static struct tty_driver *bfin_jc_driver;
63static struct task_struct *bfin_jc_kthread;
64static struct tty_struct * volatile bfin_jc_tty;
65static unsigned long bfin_jc_count;
66static DEFINE_MUTEX(bfin_jc_tty_mutex);
67static volatile struct circ_buf bfin_jc_write_buf;
68
69static int
70bfin_jc_emudat_manager(void *arg)
71{
72 uint32_t inbound_len = 0, outbound_len = 0;
73
74 while (!kthread_should_stop()) {
75 /* no one left to give data to, so sleep */
76 if (bfin_jc_tty == NULL && circ_empty(&bfin_jc_write_buf)) {
77 debug("waiting for readers\n");
78 __set_current_state(TASK_UNINTERRUPTIBLE);
79 schedule();
80 __set_current_state(TASK_RUNNING);
81 }
82
83 /* no data available, so just chill */
84 if (!(bfin_read_DBGSTAT() & EMUDIF) && circ_empty(&bfin_jc_write_buf)) {
85 debug("waiting for data (in_len = %i) (circ: %i %i)\n",
86 inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head);
87 if (inbound_len)
88 schedule();
89 else
90 schedule_timeout_interruptible(HZ);
91 continue;
92 }
93
94 /* if incoming data is ready, eat it */
95 if (bfin_read_DBGSTAT() & EMUDIF) {
96 struct tty_struct *tty;
97 mutex_lock(&bfin_jc_tty_mutex);
98 tty = (struct tty_struct *)bfin_jc_tty;
99 if (tty != NULL) {
100 uint32_t emudat = bfin_read_emudat();
101 if (inbound_len == 0) {
102 debug("incoming length: 0x%08x\n", emudat);
103 inbound_len = emudat;
104 } else {
105 size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
106 debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
107 inbound_len -= num_chars;
108 tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars);
109 tty_flip_buffer_push(tty);
110 }
111 }
112 mutex_unlock(&bfin_jc_tty_mutex);
113 }
114
115 /* if outgoing data is ready, post it */
116 if (!(bfin_read_DBGSTAT() & EMUDOF) && !circ_empty(&bfin_jc_write_buf)) {
117 if (outbound_len == 0) {
118 outbound_len = circ_cnt(&bfin_jc_write_buf);
119 bfin_write_emudat(outbound_len);
120 debug("outgoing length: 0x%08x\n", outbound_len);
121 } else {
122 struct tty_struct *tty;
123 int tail = bfin_jc_write_buf.tail;
124 size_t ate = (4 <= outbound_len ? 4 : outbound_len);
125 uint32_t emudat =
126 bfin_write_emudat_chars(
127 circ_byte(&bfin_jc_write_buf, tail + 0),
128 circ_byte(&bfin_jc_write_buf, tail + 1),
129 circ_byte(&bfin_jc_write_buf, tail + 2),
130 circ_byte(&bfin_jc_write_buf, tail + 3)
131 );
132 bfin_jc_write_buf.tail += ate;
133 outbound_len -= ate;
134 mutex_lock(&bfin_jc_tty_mutex);
135 tty = (struct tty_struct *)bfin_jc_tty;
136 if (tty)
137 tty_wakeup(tty);
138 mutex_unlock(&bfin_jc_tty_mutex);
139 debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
140 }
141 }
142 }
143
144 __set_current_state(TASK_RUNNING);
145 return 0;
146}
147
148static int
149bfin_jc_open(struct tty_struct *tty, struct file *filp)
150{
151 mutex_lock(&bfin_jc_tty_mutex);
152 debug("open %lu\n", bfin_jc_count);
153 ++bfin_jc_count;
154 bfin_jc_tty = tty;
155 wake_up_process(bfin_jc_kthread);
156 mutex_unlock(&bfin_jc_tty_mutex);
157 return 0;
158}
159
160static void
161bfin_jc_close(struct tty_struct *tty, struct file *filp)
162{
163 mutex_lock(&bfin_jc_tty_mutex);
164 debug("close %lu\n", bfin_jc_count);
165 if (--bfin_jc_count == 0)
166 bfin_jc_tty = NULL;
167 wake_up_process(bfin_jc_kthread);
168 mutex_unlock(&bfin_jc_tty_mutex);
169}
170
171/* XXX: we dont handle the put_char() case where we must handle count = 1 */
172static int
173bfin_jc_circ_write(const unsigned char *buf, int count)
174{
175 int i;
176 count = min(count, circ_free(&bfin_jc_write_buf));
177 debug("going to write chunk of %i bytes\n", count);
178 for (i = 0; i < count; ++i)
179 circ_byte(&bfin_jc_write_buf, bfin_jc_write_buf.head + i) = buf[i];
180 bfin_jc_write_buf.head += i;
181 return i;
182}
183
184#ifndef CONFIG_BFIN_JTAG_COMM_CONSOLE
185# define acquire_console_sem()
186# define release_console_sem()
187#endif
188static int
189bfin_jc_write(struct tty_struct *tty, const unsigned char *buf, int count)
190{
191 int i;
192 acquire_console_sem();
193 i = bfin_jc_circ_write(buf, count);
194 release_console_sem();
195 wake_up_process(bfin_jc_kthread);
196 return i;
197}
198
199static void
200bfin_jc_flush_chars(struct tty_struct *tty)
201{
202 wake_up_process(bfin_jc_kthread);
203}
204
205static int
206bfin_jc_write_room(struct tty_struct *tty)
207{
208 return circ_free(&bfin_jc_write_buf);
209}
210
211static int
212bfin_jc_chars_in_buffer(struct tty_struct *tty)
213{
214 return circ_cnt(&bfin_jc_write_buf);
215}
216
217static void
218bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout)
219{
220 unsigned long expire = jiffies + timeout;
221 while (!circ_empty(&bfin_jc_write_buf)) {
222 if (signal_pending(current))
223 break;
224 if (time_after(jiffies, expire))
225 break;
226 }
227}
228
229static struct tty_operations bfin_jc_ops = {
230 .open = bfin_jc_open,
231 .close = bfin_jc_close,
232 .write = bfin_jc_write,
233 /*.put_char = bfin_jc_put_char,*/
234 .flush_chars = bfin_jc_flush_chars,
235 .write_room = bfin_jc_write_room,
236 .chars_in_buffer = bfin_jc_chars_in_buffer,
237 .wait_until_sent = bfin_jc_wait_until_sent,
238};
239
240static int __init bfin_jc_init(void)
241{
242 int ret;
243
244 bfin_jc_kthread = kthread_create(bfin_jc_emudat_manager, NULL, DRV_NAME);
245 if (IS_ERR(bfin_jc_kthread))
246 return PTR_ERR(bfin_jc_kthread);
247
248 ret = -ENOMEM;
249
250 bfin_jc_write_buf.head = bfin_jc_write_buf.tail = 0;
251 bfin_jc_write_buf.buf = kmalloc(CIRC_SIZE, GFP_KERNEL);
252 if (!bfin_jc_write_buf.buf)
253 goto err;
254
255 bfin_jc_driver = alloc_tty_driver(1);
256 if (!bfin_jc_driver)
257 goto err;
258
259 bfin_jc_driver->owner = THIS_MODULE;
260 bfin_jc_driver->driver_name = DRV_NAME;
261 bfin_jc_driver->name = DEV_NAME;
262 bfin_jc_driver->type = TTY_DRIVER_TYPE_SERIAL;
263 bfin_jc_driver->subtype = SERIAL_TYPE_NORMAL;
264 bfin_jc_driver->init_termios = tty_std_termios;
265 tty_set_operations(bfin_jc_driver, &bfin_jc_ops);
266
267 ret = tty_register_driver(bfin_jc_driver);
268 if (ret)
269 goto err;
270
271 pr_init(KERN_INFO DRV_NAME ": initialized\n");
272
273 return 0;
274
275 err:
276 put_tty_driver(bfin_jc_driver);
277 kfree(bfin_jc_write_buf.buf);
278 kthread_stop(bfin_jc_kthread);
279 return ret;
280}
281module_init(bfin_jc_init);
282
283static void __exit bfin_jc_exit(void)
284{
285 kthread_stop(bfin_jc_kthread);
286 kfree(bfin_jc_write_buf.buf);
287 tty_unregister_driver(bfin_jc_driver);
288 put_tty_driver(bfin_jc_driver);
289}
290module_exit(bfin_jc_exit);
291
292#if defined(CONFIG_BFIN_JTAG_COMM_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
293static void
294bfin_jc_straight_buffer_write(const char *buf, unsigned count)
295{
296 unsigned ate = 0;
297 while (bfin_read_DBGSTAT() & EMUDOF)
298 continue;
299 bfin_write_emudat(count);
300 while (ate < count) {
301 while (bfin_read_DBGSTAT() & EMUDOF)
302 continue;
303 bfin_write_emudat_chars(buf[ate], buf[ate+1], buf[ate+2], buf[ate+3]);
304 ate += 4;
305 }
306}
307#endif
308
309#ifdef CONFIG_BFIN_JTAG_COMM_CONSOLE
310static void
311bfin_jc_console_write(struct console *co, const char *buf, unsigned count)
312{
313 if (bfin_jc_kthread == NULL)
314 bfin_jc_straight_buffer_write(buf, count);
315 else
316 bfin_jc_circ_write(buf, count);
317}
318
319static struct tty_driver *
320bfin_jc_console_device(struct console *co, int *index)
321{
322 *index = co->index;
323 return bfin_jc_driver;
324}
325
326static struct console bfin_jc_console = {
327 .name = DEV_NAME,
328 .write = bfin_jc_console_write,
329 .device = bfin_jc_console_device,
330 .flags = CON_ANYTIME | CON_PRINTBUFFER,
331 .index = -1,
332};
333
334static int __init bfin_jc_console_init(void)
335{
336 register_console(&bfin_jc_console);
337 return 0;
338}
339console_initcall(bfin_jc_console_init);
340#endif
341
342#ifdef CONFIG_EARLY_PRINTK
343static void __init
344bfin_jc_early_write(struct console *co, const char *buf, unsigned int count)
345{
346 bfin_jc_straight_buffer_write(buf, count);
347}
348
349static struct __initdata console bfin_jc_early_console = {
350 .name = "early_BFJC",
351 .write = bfin_jc_early_write,
352 .flags = CON_ANYTIME | CON_PRINTBUFFER,
353 .index = -1,
354};
355
356struct console * __init
357bfin_jc_early_init(unsigned int port, unsigned int cflag)
358{
359 return &bfin_jc_early_console;
360}
361#endif
362
363MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
364MODULE_DESCRIPTION("TTY over Blackfin JTAG Communication");
365MODULE_LICENSE("GPL");
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 1fdb9f657d8..f3366d3f06c 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -604,7 +604,6 @@
604 604
605#define NR_PORTS 256 605#define NR_PORTS 256
606 606
607#define ZE_V1_NPORTS 64
608#define ZO_V1 0 607#define ZO_V1 0
609#define ZO_V2 1 608#define ZO_V2 1
610#define ZE_V1 2 609#define ZE_V1 2
@@ -663,18 +662,6 @@
663static void cy_throttle(struct tty_struct *tty); 662static void cy_throttle(struct tty_struct *tty);
664static void cy_send_xchar(struct tty_struct *tty, char ch); 663static void cy_send_xchar(struct tty_struct *tty, char ch);
665 664
666#define IS_CYC_Z(card) ((card).num_chips == (unsigned int)-1)
667
668#define Z_FPGA_CHECK(card) \
669 ((readl(&((struct RUNTIME_9060 __iomem *) \
670 ((card).ctl_addr))->init_ctrl) & (1<<17)) != 0)
671
672#define ISZLOADED(card) (((ZO_V1 == readl(&((struct RUNTIME_9060 __iomem *) \
673 ((card).ctl_addr))->mail_box_0)) || \
674 Z_FPGA_CHECK(card)) && \
675 (ZFIRM_ID == readl(&((struct FIRM_ID __iomem *) \
676 ((card).base_addr+ID_ADDRESS))->signature)))
677
678#ifndef SERIAL_XMIT_SIZE 665#ifndef SERIAL_XMIT_SIZE
679#define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096)) 666#define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096))
680#endif 667#endif
@@ -687,8 +674,6 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
687#define DRIVER_VERSION 0x02010203 674#define DRIVER_VERSION 0x02010203
688#define RAM_SIZE 0x80000 675#define RAM_SIZE 0x80000
689 676
690#define Z_FPGA_LOADED(X) ((readl(&(X)->init_ctrl) & (1<<17)) != 0)
691
692enum zblock_type { 677enum zblock_type {
693 ZBLOCK_PRG = 0, 678 ZBLOCK_PRG = 0,
694 ZBLOCK_FPGA = 1 679 ZBLOCK_FPGA = 1
@@ -883,6 +868,29 @@ static void cyz_rx_restart(unsigned long);
883static struct timer_list cyz_rx_full_timer[NR_PORTS]; 868static struct timer_list cyz_rx_full_timer[NR_PORTS];
884#endif /* CONFIG_CYZ_INTR */ 869#endif /* CONFIG_CYZ_INTR */
885 870
871static inline bool cy_is_Z(struct cyclades_card *card)
872{
873 return card->num_chips == (unsigned int)-1;
874}
875
876static inline bool __cyz_fpga_loaded(struct RUNTIME_9060 __iomem *ctl_addr)
877{
878 return readl(&ctl_addr->init_ctrl) & (1 << 17);
879}
880
881static inline bool cyz_fpga_loaded(struct cyclades_card *card)
882{
883 return __cyz_fpga_loaded(card->ctl_addr.p9060);
884}
885
886static inline bool cyz_is_loaded(struct cyclades_card *card)
887{
888 struct FIRM_ID __iomem *fw_id = card->base_addr + ID_ADDRESS;
889
890 return (card->hw_ver == ZO_V1 || cyz_fpga_loaded(card)) &&
891 readl(&fw_id->signature) == ZFIRM_ID;
892}
893
886static inline int serial_paranoia_check(struct cyclades_port *info, 894static inline int serial_paranoia_check(struct cyclades_port *info,
887 char *name, const char *routine) 895 char *name, const char *routine)
888{ 896{
@@ -1395,19 +1403,15 @@ cyz_fetch_msg(struct cyclades_card *cinfo,
1395 unsigned long loc_doorbell; 1403 unsigned long loc_doorbell;
1396 1404
1397 firm_id = cinfo->base_addr + ID_ADDRESS; 1405 firm_id = cinfo->base_addr + ID_ADDRESS;
1398 if (!ISZLOADED(*cinfo))
1399 return -1;
1400 zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); 1406 zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
1401 board_ctrl = &zfw_ctrl->board_ctrl; 1407 board_ctrl = &zfw_ctrl->board_ctrl;
1402 1408
1403 loc_doorbell = readl(&((struct RUNTIME_9060 __iomem *) 1409 loc_doorbell = readl(&cinfo->ctl_addr.p9060->loc_doorbell);
1404 (cinfo->ctl_addr))->loc_doorbell);
1405 if (loc_doorbell) { 1410 if (loc_doorbell) {
1406 *cmd = (char)(0xff & loc_doorbell); 1411 *cmd = (char)(0xff & loc_doorbell);
1407 *channel = readl(&board_ctrl->fwcmd_channel); 1412 *channel = readl(&board_ctrl->fwcmd_channel);
1408 *param = (__u32) readl(&board_ctrl->fwcmd_param); 1413 *param = (__u32) readl(&board_ctrl->fwcmd_param);
1409 cy_writel(&((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))-> 1414 cy_writel(&cinfo->ctl_addr.p9060->loc_doorbell, 0xffffffff);
1410 loc_doorbell, 0xffffffff);
1411 return 1; 1415 return 1;
1412 } 1416 }
1413 return 0; 1417 return 0;
@@ -1424,15 +1428,14 @@ cyz_issue_cmd(struct cyclades_card *cinfo,
1424 unsigned int index; 1428 unsigned int index;
1425 1429
1426 firm_id = cinfo->base_addr + ID_ADDRESS; 1430 firm_id = cinfo->base_addr + ID_ADDRESS;
1427 if (!ISZLOADED(*cinfo)) 1431 if (!cyz_is_loaded(cinfo))
1428 return -1; 1432 return -1;
1429 1433
1430 zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); 1434 zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
1431 board_ctrl = &zfw_ctrl->board_ctrl; 1435 board_ctrl = &zfw_ctrl->board_ctrl;
1432 1436
1433 index = 0; 1437 index = 0;
1434 pci_doorbell = 1438 pci_doorbell = &cinfo->ctl_addr.p9060->pci_doorbell;
1435 &((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->pci_doorbell;
1436 while ((readl(pci_doorbell) & 0xff) != 0) { 1439 while ((readl(pci_doorbell) & 0xff) != 0) {
1437 if (index++ == 1000) 1440 if (index++ == 1000)
1438 return (int)(readl(pci_doorbell) & 0xff); 1441 return (int)(readl(pci_doorbell) & 0xff);
@@ -1624,10 +1627,8 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
1624 static struct BOARD_CTRL __iomem *board_ctrl; 1627 static struct BOARD_CTRL __iomem *board_ctrl;
1625 static struct CH_CTRL __iomem *ch_ctrl; 1628 static struct CH_CTRL __iomem *ch_ctrl;
1626 static struct BUF_CTRL __iomem *buf_ctrl; 1629 static struct BUF_CTRL __iomem *buf_ctrl;
1627 __u32 channel; 1630 __u32 channel, param, fw_ver;
1628 __u8 cmd; 1631 __u8 cmd;
1629 __u32 param;
1630 __u32 hw_ver, fw_ver;
1631 int special_count; 1632 int special_count;
1632 int delta_count; 1633 int delta_count;
1633 1634
@@ -1635,8 +1636,6 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
1635 zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); 1636 zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
1636 board_ctrl = &zfw_ctrl->board_ctrl; 1637 board_ctrl = &zfw_ctrl->board_ctrl;
1637 fw_ver = readl(&board_ctrl->fw_version); 1638 fw_ver = readl(&board_ctrl->fw_version);
1638 hw_ver = readl(&((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->
1639 mail_box_0);
1640 1639
1641 while (cyz_fetch_msg(cinfo, &channel, &cmd, &param) == 1) { 1640 while (cyz_fetch_msg(cinfo, &channel, &cmd, &param) == 1) {
1642 special_count = 0; 1641 special_count = 0;
@@ -1737,15 +1736,7 @@ static irqreturn_t cyz_interrupt(int irq, void *dev_id)
1737{ 1736{
1738 struct cyclades_card *cinfo = dev_id; 1737 struct cyclades_card *cinfo = dev_id;
1739 1738
1740 if (unlikely(cinfo == NULL)) { 1739 if (unlikely(!cyz_is_loaded(cinfo))) {
1741#ifdef CY_DEBUG_INTERRUPTS
1742 printk(KERN_DEBUG "cyz_interrupt: spurious interrupt %d\n",
1743 irq);
1744#endif
1745 return IRQ_NONE; /* spurious interrupt */
1746 }
1747
1748 if (unlikely(!ISZLOADED(*cinfo))) {
1749#ifdef CY_DEBUG_INTERRUPTS 1740#ifdef CY_DEBUG_INTERRUPTS
1750 printk(KERN_DEBUG "cyz_interrupt: board not yet loaded " 1741 printk(KERN_DEBUG "cyz_interrupt: board not yet loaded "
1751 "(IRQ%d).\n", irq); 1742 "(IRQ%d).\n", irq);
@@ -1785,7 +1776,6 @@ static void cyz_poll(unsigned long arg)
1785 struct tty_struct *tty; 1776 struct tty_struct *tty;
1786 struct FIRM_ID __iomem *firm_id; 1777 struct FIRM_ID __iomem *firm_id;
1787 struct ZFW_CTRL __iomem *zfw_ctrl; 1778 struct ZFW_CTRL __iomem *zfw_ctrl;
1788 struct BOARD_CTRL __iomem *board_ctrl;
1789 struct BUF_CTRL __iomem *buf_ctrl; 1779 struct BUF_CTRL __iomem *buf_ctrl;
1790 unsigned long expires = jiffies + HZ; 1780 unsigned long expires = jiffies + HZ;
1791 unsigned int port, card; 1781 unsigned int port, card;
@@ -1793,19 +1783,17 @@ static void cyz_poll(unsigned long arg)
1793 for (card = 0; card < NR_CARDS; card++) { 1783 for (card = 0; card < NR_CARDS; card++) {
1794 cinfo = &cy_card[card]; 1784 cinfo = &cy_card[card];
1795 1785
1796 if (!IS_CYC_Z(*cinfo)) 1786 if (!cy_is_Z(cinfo))
1797 continue; 1787 continue;
1798 if (!ISZLOADED(*cinfo)) 1788 if (!cyz_is_loaded(cinfo))
1799 continue; 1789 continue;
1800 1790
1801 firm_id = cinfo->base_addr + ID_ADDRESS; 1791 firm_id = cinfo->base_addr + ID_ADDRESS;
1802 zfw_ctrl = cinfo->base_addr + 1792 zfw_ctrl = cinfo->base_addr +
1803 (readl(&firm_id->zfwctrl_addr) & 0xfffff); 1793 (readl(&firm_id->zfwctrl_addr) & 0xfffff);
1804 board_ctrl = &(zfw_ctrl->board_ctrl);
1805 1794
1806 /* Skip first polling cycle to avoid racing conditions with the FW */ 1795 /* Skip first polling cycle to avoid racing conditions with the FW */
1807 if (!cinfo->intr_enabled) { 1796 if (!cinfo->intr_enabled) {
1808 cinfo->nports = (int)readl(&board_ctrl->n_channel);
1809 cinfo->intr_enabled = 1; 1797 cinfo->intr_enabled = 1;
1810 continue; 1798 continue;
1811 } 1799 }
@@ -1874,7 +1862,7 @@ static int startup(struct cyclades_port *info)
1874 1862
1875 set_line_char(info); 1863 set_line_char(info);
1876 1864
1877 if (!IS_CYC_Z(*card)) { 1865 if (!cy_is_Z(card)) {
1878 chip = channel >> 2; 1866 chip = channel >> 2;
1879 channel &= 0x03; 1867 channel &= 0x03;
1880 index = card->bus_index; 1868 index = card->bus_index;
@@ -1931,7 +1919,7 @@ static int startup(struct cyclades_port *info)
1931 base_addr = card->base_addr; 1919 base_addr = card->base_addr;
1932 1920
1933 firm_id = base_addr + ID_ADDRESS; 1921 firm_id = base_addr + ID_ADDRESS;
1934 if (!ISZLOADED(*card)) 1922 if (!cyz_is_loaded(card))
1935 return -ENODEV; 1923 return -ENODEV;
1936 1924
1937 zfw_ctrl = card->base_addr + 1925 zfw_ctrl = card->base_addr +
@@ -2026,7 +2014,7 @@ static void start_xmit(struct cyclades_port *info)
2026 2014
2027 card = info->card; 2015 card = info->card;
2028 channel = info->line - card->first_line; 2016 channel = info->line - card->first_line;
2029 if (!IS_CYC_Z(*card)) { 2017 if (!cy_is_Z(card)) {
2030 chip = channel >> 2; 2018 chip = channel >> 2;
2031 channel &= 0x03; 2019 channel &= 0x03;
2032 index = card->bus_index; 2020 index = card->bus_index;
@@ -2070,7 +2058,7 @@ static void shutdown(struct cyclades_port *info)
2070 2058
2071 card = info->card; 2059 card = info->card;
2072 channel = info->line - card->first_line; 2060 channel = info->line - card->first_line;
2073 if (!IS_CYC_Z(*card)) { 2061 if (!cy_is_Z(card)) {
2074 chip = channel >> 2; 2062 chip = channel >> 2;
2075 channel &= 0x03; 2063 channel &= 0x03;
2076 index = card->bus_index; 2064 index = card->bus_index;
@@ -2126,7 +2114,7 @@ static void shutdown(struct cyclades_port *info)
2126#endif 2114#endif
2127 2115
2128 firm_id = base_addr + ID_ADDRESS; 2116 firm_id = base_addr + ID_ADDRESS;
2129 if (!ISZLOADED(*card)) 2117 if (!cyz_is_loaded(card))
2130 return; 2118 return;
2131 2119
2132 zfw_ctrl = card->base_addr + 2120 zfw_ctrl = card->base_addr +
@@ -2233,7 +2221,7 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
2233#endif 2221#endif
2234 info->port.blocked_open++; 2222 info->port.blocked_open++;
2235 2223
2236 if (!IS_CYC_Z(*cinfo)) { 2224 if (!cy_is_Z(cinfo)) {
2237 chip = channel >> 2; 2225 chip = channel >> 2;
2238 channel &= 0x03; 2226 channel &= 0x03;
2239 index = cinfo->bus_index; 2227 index = cinfo->bus_index;
@@ -2296,7 +2284,7 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
2296 2284
2297 base_addr = cinfo->base_addr; 2285 base_addr = cinfo->base_addr;
2298 firm_id = base_addr + ID_ADDRESS; 2286 firm_id = base_addr + ID_ADDRESS;
2299 if (!ISZLOADED(*cinfo)) { 2287 if (!cyz_is_loaded(cinfo)) {
2300 __set_current_state(TASK_RUNNING); 2288 __set_current_state(TASK_RUNNING);
2301 remove_wait_queue(&info->port.open_wait, &wait); 2289 remove_wait_queue(&info->port.open_wait, &wait);
2302 return -EINVAL; 2290 return -EINVAL;
@@ -2397,16 +2385,14 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
2397 treat it as absent from the system. This 2385 treat it as absent from the system. This
2398 will make the user pay attention. 2386 will make the user pay attention.
2399 */ 2387 */
2400 if (IS_CYC_Z(*info->card)) { 2388 if (cy_is_Z(info->card)) {
2401 struct cyclades_card *cinfo = info->card; 2389 struct cyclades_card *cinfo = info->card;
2402 struct FIRM_ID __iomem *firm_id = cinfo->base_addr + ID_ADDRESS; 2390 struct FIRM_ID __iomem *firm_id = cinfo->base_addr + ID_ADDRESS;
2403 2391
2404 if (!ISZLOADED(*cinfo)) { 2392 if (!cyz_is_loaded(cinfo)) {
2405 if (((ZE_V1 == readl(&((struct RUNTIME_9060 __iomem *) 2393 if (cinfo->hw_ver == ZE_V1 && cyz_fpga_loaded(cinfo) &&
2406 (cinfo->ctl_addr))->mail_box_0)) && 2394 readl(&firm_id->signature) ==
2407 Z_FPGA_CHECK(*cinfo)) && 2395 ZFIRM_HLT) {
2408 (ZFIRM_HLT == readl(
2409 &firm_id->signature))) {
2410 printk(KERN_ERR "cyc:Cyclades-Z Error: you " 2396 printk(KERN_ERR "cyc:Cyclades-Z Error: you "
2411 "need an external power supply for " 2397 "need an external power supply for "
2412 "this number of ports.\nFirmware " 2398 "this number of ports.\nFirmware "
@@ -2423,18 +2409,13 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
2423 interrupts should be enabled as soon as the first open 2409 interrupts should be enabled as soon as the first open
2424 happens to one of its ports. */ 2410 happens to one of its ports. */
2425 if (!cinfo->intr_enabled) { 2411 if (!cinfo->intr_enabled) {
2426 struct ZFW_CTRL __iomem *zfw_ctrl; 2412 u16 intr;
2427 struct BOARD_CTRL __iomem *board_ctrl;
2428
2429 zfw_ctrl = cinfo->base_addr +
2430 (readl(&firm_id->zfwctrl_addr) &
2431 0xfffff);
2432
2433 board_ctrl = &zfw_ctrl->board_ctrl;
2434 2413
2435 /* Enable interrupts on the PLX chip */ 2414 /* Enable interrupts on the PLX chip */
2436 cy_writew(cinfo->ctl_addr + 0x68, 2415 intr = readw(&cinfo->ctl_addr.p9060->
2437 readw(cinfo->ctl_addr + 0x68) | 0x0900); 2416 intr_ctrl_stat) | 0x0900;
2417 cy_writew(&cinfo->ctl_addr.p9060->
2418 intr_ctrl_stat, intr);
2438 /* Enable interrupts on the FW */ 2419 /* Enable interrupts on the FW */
2439 retval = cyz_issue_cmd(cinfo, 0, 2420 retval = cyz_issue_cmd(cinfo, 0,
2440 C_CM_IRQ_ENBL, 0L); 2421 C_CM_IRQ_ENBL, 0L);
@@ -2442,8 +2423,6 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
2442 printk(KERN_ERR "cyc:IRQ enable retval " 2423 printk(KERN_ERR "cyc:IRQ enable retval "
2443 "was %x\n", retval); 2424 "was %x\n", retval);
2444 } 2425 }
2445 cinfo->nports =
2446 (int)readl(&board_ctrl->n_channel);
2447 cinfo->intr_enabled = 1; 2426 cinfo->intr_enabled = 1;
2448 } 2427 }
2449 } 2428 }
@@ -2556,7 +2535,7 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
2556#endif 2535#endif
2557 card = info->card; 2536 card = info->card;
2558 channel = (info->line) - (card->first_line); 2537 channel = (info->line) - (card->first_line);
2559 if (!IS_CYC_Z(*card)) { 2538 if (!cy_is_Z(card)) {
2560 chip = channel >> 2; 2539 chip = channel >> 2;
2561 channel &= 0x03; 2540 channel &= 0x03;
2562 index = card->bus_index; 2541 index = card->bus_index;
@@ -2601,7 +2580,7 @@ static void cy_flush_buffer(struct tty_struct *tty)
2601 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 2580 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2602 spin_unlock_irqrestore(&card->card_lock, flags); 2581 spin_unlock_irqrestore(&card->card_lock, flags);
2603 2582
2604 if (IS_CYC_Z(*card)) { /* If it is a Z card, flush the on-board 2583 if (cy_is_Z(card)) { /* If it is a Z card, flush the on-board
2605 buffers as well */ 2584 buffers as well */
2606 spin_lock_irqsave(&card->card_lock, flags); 2585 spin_lock_irqsave(&card->card_lock, flags);
2607 retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L); 2586 retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L);
@@ -2682,7 +2661,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
2682 2661
2683 spin_lock_irqsave(&card->card_lock, flags); 2662 spin_lock_irqsave(&card->card_lock, flags);
2684 2663
2685 if (!IS_CYC_Z(*card)) { 2664 if (!cy_is_Z(card)) {
2686 int channel = info->line - card->first_line; 2665 int channel = info->line - card->first_line;
2687 int index = card->bus_index; 2666 int index = card->bus_index;
2688 void __iomem *base_addr = card->base_addr + 2667 void __iomem *base_addr = card->base_addr +
@@ -2902,7 +2881,7 @@ static int cy_chars_in_buffer(struct tty_struct *tty)
2902 channel = (info->line) - (card->first_line); 2881 channel = (info->line) - (card->first_line);
2903 2882
2904#ifdef Z_EXT_CHARS_IN_BUFFER 2883#ifdef Z_EXT_CHARS_IN_BUFFER
2905 if (!IS_CYC_Z(cy_card[card])) { 2884 if (!cy_is_Z(card)) {
2906#endif /* Z_EXT_CHARS_IN_BUFFER */ 2885#endif /* Z_EXT_CHARS_IN_BUFFER */
2907#ifdef CY_DEBUG_IO 2886#ifdef CY_DEBUG_IO
2908 printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n", 2887 printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
@@ -2984,7 +2963,6 @@ static void set_line_char(struct cyclades_port *info)
2984 void __iomem *base_addr; 2963 void __iomem *base_addr;
2985 int chip, channel, index; 2964 int chip, channel, index;
2986 unsigned cflag, iflag; 2965 unsigned cflag, iflag;
2987 unsigned short chip_number;
2988 int baud, baud_rate = 0; 2966 int baud, baud_rate = 0;
2989 int i; 2967 int i;
2990 2968
@@ -3013,9 +2991,8 @@ static void set_line_char(struct cyclades_port *info)
3013 2991
3014 card = info->card; 2992 card = info->card;
3015 channel = info->line - card->first_line; 2993 channel = info->line - card->first_line;
3016 chip_number = channel / 4;
3017 2994
3018 if (!IS_CYC_Z(*card)) { 2995 if (!cy_is_Z(card)) {
3019 2996
3020 index = card->bus_index; 2997 index = card->bus_index;
3021 2998
@@ -3233,21 +3210,17 @@ static void set_line_char(struct cyclades_port *info)
3233 } else { 3210 } else {
3234 struct FIRM_ID __iomem *firm_id; 3211 struct FIRM_ID __iomem *firm_id;
3235 struct ZFW_CTRL __iomem *zfw_ctrl; 3212 struct ZFW_CTRL __iomem *zfw_ctrl;
3236 struct BOARD_CTRL __iomem *board_ctrl;
3237 struct CH_CTRL __iomem *ch_ctrl; 3213 struct CH_CTRL __iomem *ch_ctrl;
3238 struct BUF_CTRL __iomem *buf_ctrl;
3239 __u32 sw_flow; 3214 __u32 sw_flow;
3240 int retval; 3215 int retval;
3241 3216
3242 firm_id = card->base_addr + ID_ADDRESS; 3217 firm_id = card->base_addr + ID_ADDRESS;
3243 if (!ISZLOADED(*card)) 3218 if (!cyz_is_loaded(card))
3244 return; 3219 return;
3245 3220
3246 zfw_ctrl = card->base_addr + 3221 zfw_ctrl = card->base_addr +
3247 (readl(&firm_id->zfwctrl_addr) & 0xfffff); 3222 (readl(&firm_id->zfwctrl_addr) & 0xfffff);
3248 board_ctrl = &zfw_ctrl->board_ctrl;
3249 ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]); 3223 ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]);
3250 buf_ctrl = &zfw_ctrl->buf_ctrl[channel];
3251 3224
3252 /* baud rate */ 3225 /* baud rate */
3253 baud = tty_get_baud_rate(info->port.tty); 3226 baud = tty_get_baud_rate(info->port.tty);
@@ -3457,7 +3430,7 @@ static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value)
3457 3430
3458 card = info->card; 3431 card = info->card;
3459 channel = (info->line) - (card->first_line); 3432 channel = (info->line) - (card->first_line);
3460 if (!IS_CYC_Z(*card)) { 3433 if (!cy_is_Z(card)) {
3461 chip = channel >> 2; 3434 chip = channel >> 2;
3462 channel &= 0x03; 3435 channel &= 0x03;
3463 index = card->bus_index; 3436 index = card->bus_index;
@@ -3497,7 +3470,7 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
3497 3470
3498 card = info->card; 3471 card = info->card;
3499 channel = info->line - card->first_line; 3472 channel = info->line - card->first_line;
3500 if (!IS_CYC_Z(*card)) { 3473 if (!cy_is_Z(card)) {
3501 chip = channel >> 2; 3474 chip = channel >> 2;
3502 channel &= 0x03; 3475 channel &= 0x03;
3503 index = card->bus_index; 3476 index = card->bus_index;
@@ -3523,7 +3496,7 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
3523 } else { 3496 } else {
3524 base_addr = card->base_addr; 3497 base_addr = card->base_addr;
3525 firm_id = card->base_addr + ID_ADDRESS; 3498 firm_id = card->base_addr + ID_ADDRESS;
3526 if (ISZLOADED(*card)) { 3499 if (cyz_is_loaded(card)) {
3527 zfw_ctrl = card->base_addr + 3500 zfw_ctrl = card->base_addr +
3528 (readl(&firm_id->zfwctrl_addr) & 0xfffff); 3501 (readl(&firm_id->zfwctrl_addr) & 0xfffff);
3529 board_ctrl = &zfw_ctrl->board_ctrl; 3502 board_ctrl = &zfw_ctrl->board_ctrl;
@@ -3566,7 +3539,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file,
3566 3539
3567 card = info->card; 3540 card = info->card;
3568 channel = (info->line) - (card->first_line); 3541 channel = (info->line) - (card->first_line);
3569 if (!IS_CYC_Z(*card)) { 3542 if (!cy_is_Z(card)) {
3570 chip = channel >> 2; 3543 chip = channel >> 2;
3571 channel &= 0x03; 3544 channel &= 0x03;
3572 index = card->bus_index; 3545 index = card->bus_index;
@@ -3641,7 +3614,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file,
3641 base_addr = card->base_addr; 3614 base_addr = card->base_addr;
3642 3615
3643 firm_id = card->base_addr + ID_ADDRESS; 3616 firm_id = card->base_addr + ID_ADDRESS;
3644 if (ISZLOADED(*card)) { 3617 if (cyz_is_loaded(card)) {
3645 zfw_ctrl = card->base_addr + 3618 zfw_ctrl = card->base_addr +
3646 (readl(&firm_id->zfwctrl_addr) & 0xfffff); 3619 (readl(&firm_id->zfwctrl_addr) & 0xfffff);
3647 board_ctrl = &zfw_ctrl->board_ctrl; 3620 board_ctrl = &zfw_ctrl->board_ctrl;
@@ -3713,7 +3686,7 @@ static int cy_break(struct tty_struct *tty, int break_state)
3713 card = info->card; 3686 card = info->card;
3714 3687
3715 spin_lock_irqsave(&card->card_lock, flags); 3688 spin_lock_irqsave(&card->card_lock, flags);
3716 if (!IS_CYC_Z(*card)) { 3689 if (!cy_is_Z(card)) {
3717 /* Let the transmit ISR take care of this (since it 3690 /* Let the transmit ISR take care of this (since it
3718 requires stuffing characters into the output stream). 3691 requires stuffing characters into the output stream).
3719 */ 3692 */
@@ -3782,7 +3755,7 @@ static int set_threshold(struct cyclades_port *info, unsigned long value)
3782 3755
3783 card = info->card; 3756 card = info->card;
3784 channel = info->line - card->first_line; 3757 channel = info->line - card->first_line;
3785 if (!IS_CYC_Z(*card)) { 3758 if (!cy_is_Z(card)) {
3786 chip = channel >> 2; 3759 chip = channel >> 2;
3787 channel &= 0x03; 3760 channel &= 0x03;
3788 index = card->bus_index; 3761 index = card->bus_index;
@@ -3810,7 +3783,7 @@ static int get_threshold(struct cyclades_port *info,
3810 3783
3811 card = info->card; 3784 card = info->card;
3812 channel = info->line - card->first_line; 3785 channel = info->line - card->first_line;
3813 if (!IS_CYC_Z(*card)) { 3786 if (!cy_is_Z(card)) {
3814 chip = channel >> 2; 3787 chip = channel >> 2;
3815 channel &= 0x03; 3788 channel &= 0x03;
3816 index = card->bus_index; 3789 index = card->bus_index;
@@ -3844,7 +3817,7 @@ static int set_timeout(struct cyclades_port *info, unsigned long value)
3844 3817
3845 card = info->card; 3818 card = info->card;
3846 channel = info->line - card->first_line; 3819 channel = info->line - card->first_line;
3847 if (!IS_CYC_Z(*card)) { 3820 if (!cy_is_Z(card)) {
3848 chip = channel >> 2; 3821 chip = channel >> 2;
3849 channel &= 0x03; 3822 channel &= 0x03;
3850 index = card->bus_index; 3823 index = card->bus_index;
@@ -3867,7 +3840,7 @@ static int get_timeout(struct cyclades_port *info,
3867 3840
3868 card = info->card; 3841 card = info->card;
3869 channel = info->line - card->first_line; 3842 channel = info->line - card->first_line;
3870 if (!IS_CYC_Z(*card)) { 3843 if (!cy_is_Z(card)) {
3871 chip = channel >> 2; 3844 chip = channel >> 2;
3872 channel &= 0x03; 3845 channel &= 0x03;
3873 index = card->bus_index; 3846 index = card->bus_index;
@@ -4121,7 +4094,7 @@ static void cy_send_xchar(struct tty_struct *tty, char ch)
4121 card = info->card; 4094 card = info->card;
4122 channel = info->line - card->first_line; 4095 channel = info->line - card->first_line;
4123 4096
4124 if (IS_CYC_Z(*card)) { 4097 if (cy_is_Z(card)) {
4125 if (ch == STOP_CHAR(tty)) 4098 if (ch == STOP_CHAR(tty))
4126 cyz_issue_cmd(card, channel, C_CM_SENDXOFF, 0L); 4099 cyz_issue_cmd(card, channel, C_CM_SENDXOFF, 0L);
4127 else if (ch == START_CHAR(tty)) 4100 else if (ch == START_CHAR(tty))
@@ -4154,7 +4127,7 @@ static void cy_throttle(struct tty_struct *tty)
4154 card = info->card; 4127 card = info->card;
4155 4128
4156 if (I_IXOFF(tty)) { 4129 if (I_IXOFF(tty)) {
4157 if (!IS_CYC_Z(*card)) 4130 if (!cy_is_Z(card))
4158 cy_send_xchar(tty, STOP_CHAR(tty)); 4131 cy_send_xchar(tty, STOP_CHAR(tty));
4159 else 4132 else
4160 info->throttle = 1; 4133 info->throttle = 1;
@@ -4162,7 +4135,7 @@ static void cy_throttle(struct tty_struct *tty)
4162 4135
4163 if (tty->termios->c_cflag & CRTSCTS) { 4136 if (tty->termios->c_cflag & CRTSCTS) {
4164 channel = info->line - card->first_line; 4137 channel = info->line - card->first_line;
4165 if (!IS_CYC_Z(*card)) { 4138 if (!cy_is_Z(card)) {
4166 chip = channel >> 2; 4139 chip = channel >> 2;
4167 channel &= 0x03; 4140 channel &= 0x03;
4168 index = card->bus_index; 4141 index = card->bus_index;
@@ -4219,7 +4192,7 @@ static void cy_unthrottle(struct tty_struct *tty)
4219 if (tty->termios->c_cflag & CRTSCTS) { 4192 if (tty->termios->c_cflag & CRTSCTS) {
4220 card = info->card; 4193 card = info->card;
4221 channel = info->line - card->first_line; 4194 channel = info->line - card->first_line;
4222 if (!IS_CYC_Z(*card)) { 4195 if (!cy_is_Z(card)) {
4223 chip = channel >> 2; 4196 chip = channel >> 2;
4224 channel &= 0x03; 4197 channel &= 0x03;
4225 index = card->bus_index; 4198 index = card->bus_index;
@@ -4263,7 +4236,7 @@ static void cy_stop(struct tty_struct *tty)
4263 4236
4264 cinfo = info->card; 4237 cinfo = info->card;
4265 channel = info->line - cinfo->first_line; 4238 channel = info->line - cinfo->first_line;
4266 if (!IS_CYC_Z(*cinfo)) { 4239 if (!cy_is_Z(cinfo)) {
4267 index = cinfo->bus_index; 4240 index = cinfo->bus_index;
4268 chip = channel >> 2; 4241 chip = channel >> 2;
4269 channel &= 0x03; 4242 channel &= 0x03;
@@ -4296,7 +4269,7 @@ static void cy_start(struct tty_struct *tty)
4296 cinfo = info->card; 4269 cinfo = info->card;
4297 channel = info->line - cinfo->first_line; 4270 channel = info->line - cinfo->first_line;
4298 index = cinfo->bus_index; 4271 index = cinfo->bus_index;
4299 if (!IS_CYC_Z(*cinfo)) { 4272 if (!cy_is_Z(cinfo)) {
4300 chip = channel >> 2; 4273 chip = channel >> 2;
4301 channel &= 0x03; 4274 channel &= 0x03;
4302 base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index); 4275 base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index);
@@ -4347,33 +4320,20 @@ static void cy_hangup(struct tty_struct *tty)
4347static int __devinit cy_init_card(struct cyclades_card *cinfo) 4320static int __devinit cy_init_card(struct cyclades_card *cinfo)
4348{ 4321{
4349 struct cyclades_port *info; 4322 struct cyclades_port *info;
4350 u32 uninitialized_var(mailbox); 4323 unsigned int port;
4351 unsigned int nports, port;
4352 unsigned short chip_number; 4324 unsigned short chip_number;
4353 int uninitialized_var(index);
4354 4325
4355 spin_lock_init(&cinfo->card_lock); 4326 spin_lock_init(&cinfo->card_lock);
4327 cinfo->intr_enabled = 0;
4356 4328
4357 if (IS_CYC_Z(*cinfo)) { /* Cyclades-Z */ 4329 cinfo->ports = kcalloc(cinfo->nports, sizeof(*cinfo->ports),
4358 mailbox = readl(&((struct RUNTIME_9060 __iomem *) 4330 GFP_KERNEL);
4359 cinfo->ctl_addr)->mail_box_0);
4360 nports = (mailbox == ZE_V1) ? ZE_V1_NPORTS : 8;
4361 cinfo->intr_enabled = 0;
4362 cinfo->nports = 0; /* Will be correctly set later, after
4363 Z FW is loaded */
4364 } else {
4365 index = cinfo->bus_index;
4366 nports = cinfo->nports = CyPORTS_PER_CHIP * cinfo->num_chips;
4367 }
4368
4369 cinfo->ports = kzalloc(sizeof(*cinfo->ports) * nports, GFP_KERNEL);
4370 if (cinfo->ports == NULL) { 4331 if (cinfo->ports == NULL) {
4371 printk(KERN_ERR "Cyclades: cannot allocate ports\n"); 4332 printk(KERN_ERR "Cyclades: cannot allocate ports\n");
4372 cinfo->nports = 0;
4373 return -ENOMEM; 4333 return -ENOMEM;
4374 } 4334 }
4375 4335
4376 for (port = cinfo->first_line; port < cinfo->first_line + nports; 4336 for (port = cinfo->first_line; port < cinfo->first_line + cinfo->nports;
4377 port++) { 4337 port++) {
4378 info = &cinfo->ports[port - cinfo->first_line]; 4338 info = &cinfo->ports[port - cinfo->first_line];
4379 tty_port_init(&info->port); 4339 tty_port_init(&info->port);
@@ -4387,9 +4347,9 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
4387 init_completion(&info->shutdown_wait); 4347 init_completion(&info->shutdown_wait);
4388 init_waitqueue_head(&info->delta_msr_wait); 4348 init_waitqueue_head(&info->delta_msr_wait);
4389 4349
4390 if (IS_CYC_Z(*cinfo)) { 4350 if (cy_is_Z(cinfo)) {
4391 info->type = PORT_STARTECH; 4351 info->type = PORT_STARTECH;
4392 if (mailbox == ZO_V1) 4352 if (cinfo->hw_ver == ZO_V1)
4393 info->xmit_fifo_size = CYZ_FIFO_SIZE; 4353 info->xmit_fifo_size = CYZ_FIFO_SIZE;
4394 else 4354 else
4395 info->xmit_fifo_size = 4 * CYZ_FIFO_SIZE; 4355 info->xmit_fifo_size = 4 * CYZ_FIFO_SIZE;
@@ -4398,6 +4358,7 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
4398 cyz_rx_restart, (unsigned long)info); 4358 cyz_rx_restart, (unsigned long)info);
4399#endif 4359#endif
4400 } else { 4360 } else {
4361 int index = cinfo->bus_index;
4401 info->type = PORT_CIRRUS; 4362 info->type = PORT_CIRRUS;
4402 info->xmit_fifo_size = CyMAX_CHAR_FIFO; 4363 info->xmit_fifo_size = CyMAX_CHAR_FIFO;
4403 info->cor1 = CyPARITY_NONE | Cy_1_STOP | Cy_8_BITS; 4364 info->cor1 = CyPARITY_NONE | Cy_1_STOP | Cy_8_BITS;
@@ -4430,7 +4391,7 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
4430 } 4391 }
4431 4392
4432#ifndef CONFIG_CYZ_INTR 4393#ifndef CONFIG_CYZ_INTR
4433 if (IS_CYC_Z(*cinfo) && !timer_pending(&cyz_timerlist)) { 4394 if (cy_is_Z(cinfo) && !timer_pending(&cyz_timerlist)) {
4434 mod_timer(&cyz_timerlist, jiffies + 1); 4395 mod_timer(&cyz_timerlist, jiffies + 1);
4435#ifdef CY_PCI_DEBUG 4396#ifdef CY_PCI_DEBUG
4436 printk(KERN_DEBUG "Cyclades-Z polling initialized\n"); 4397 printk(KERN_DEBUG "Cyclades-Z polling initialized\n");
@@ -4621,11 +4582,12 @@ static int __init cy_detect_isa(void)
4621 4582
4622 /* set cy_card */ 4583 /* set cy_card */
4623 cy_card[j].base_addr = cy_isa_address; 4584 cy_card[j].base_addr = cy_isa_address;
4624 cy_card[j].ctl_addr = NULL; 4585 cy_card[j].ctl_addr.p9050 = NULL;
4625 cy_card[j].irq = (int)cy_isa_irq; 4586 cy_card[j].irq = (int)cy_isa_irq;
4626 cy_card[j].bus_index = 0; 4587 cy_card[j].bus_index = 0;
4627 cy_card[j].first_line = cy_next_channel; 4588 cy_card[j].first_line = cy_next_channel;
4628 cy_card[j].num_chips = cy_isa_nchan / 4; 4589 cy_card[j].num_chips = cy_isa_nchan / CyPORTS_PER_CHIP;
4590 cy_card[j].nports = cy_isa_nchan;
4629 if (cy_init_card(&cy_card[j])) { 4591 if (cy_init_card(&cy_card[j])) {
4630 cy_card[j].base_addr = NULL; 4592 cy_card[j].base_addr = NULL;
4631 free_irq(cy_isa_irq, &cy_card[j]); 4593 free_irq(cy_isa_irq, &cy_card[j]);
@@ -4781,7 +4743,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
4781 struct CUSTOM_REG __iomem *cust = base_addr; 4743 struct CUSTOM_REG __iomem *cust = base_addr;
4782 struct ZFW_CTRL __iomem *pt_zfwctrl; 4744 struct ZFW_CTRL __iomem *pt_zfwctrl;
4783 void __iomem *tmp; 4745 void __iomem *tmp;
4784 u32 mailbox, status; 4746 u32 mailbox, status, nchan;
4785 unsigned int i; 4747 unsigned int i;
4786 int retval; 4748 int retval;
4787 4749
@@ -4793,7 +4755,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
4793 4755
4794 /* Check whether the firmware is already loaded and running. If 4756 /* Check whether the firmware is already loaded and running. If
4795 positive, skip this board */ 4757 positive, skip this board */
4796 if (Z_FPGA_LOADED(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) { 4758 if (__cyz_fpga_loaded(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
4797 u32 cntval = readl(base_addr + 0x190); 4759 u32 cntval = readl(base_addr + 0x190);
4798 4760
4799 udelay(100); 4761 udelay(100);
@@ -4812,7 +4774,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
4812 4774
4813 mailbox = readl(&ctl_addr->mail_box_0); 4775 mailbox = readl(&ctl_addr->mail_box_0);
4814 4776
4815 if (mailbox == 0 || Z_FPGA_LOADED(ctl_addr)) { 4777 if (mailbox == 0 || __cyz_fpga_loaded(ctl_addr)) {
4816 /* stops CPU and set window to beginning of RAM */ 4778 /* stops CPU and set window to beginning of RAM */
4817 cy_writel(&ctl_addr->loc_addr_base, WIN_CREG); 4779 cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
4818 cy_writel(&cust->cpu_stop, 0); 4780 cy_writel(&cust->cpu_stop, 0);
@@ -4828,7 +4790,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
4828 base_addr); 4790 base_addr);
4829 if (retval) 4791 if (retval)
4830 goto err_rel; 4792 goto err_rel;
4831 if (!Z_FPGA_LOADED(ctl_addr)) { 4793 if (!__cyz_fpga_loaded(ctl_addr)) {
4832 dev_err(&pdev->dev, "fw upload successful, but fw is " 4794 dev_err(&pdev->dev, "fw upload successful, but fw is "
4833 "not loaded\n"); 4795 "not loaded\n");
4834 goto err_rel; 4796 goto err_rel;
@@ -4887,7 +4849,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
4887 "system before loading the new FW to the " 4849 "system before loading the new FW to the "
4888 "Cyclades-Z.\n"); 4850 "Cyclades-Z.\n");
4889 4851
4890 if (Z_FPGA_LOADED(ctl_addr)) 4852 if (__cyz_fpga_loaded(ctl_addr))
4891 plx_init(pdev, irq, ctl_addr); 4853 plx_init(pdev, irq, ctl_addr);
4892 4854
4893 retval = -EIO; 4855 retval = -EIO;
@@ -4902,16 +4864,16 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
4902 base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr), 4864 base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr),
4903 base_addr + readl(&fid->zfwctrl_addr)); 4865 base_addr + readl(&fid->zfwctrl_addr));
4904 4866
4867 nchan = readl(&pt_zfwctrl->board_ctrl.n_channel);
4905 dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n", 4868 dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n",
4906 readl(&pt_zfwctrl->board_ctrl.fw_version), 4869 readl(&pt_zfwctrl->board_ctrl.fw_version), nchan);
4907 readl(&pt_zfwctrl->board_ctrl.n_channel));
4908 4870
4909 if (readl(&pt_zfwctrl->board_ctrl.n_channel) == 0) { 4871 if (nchan == 0) {
4910 dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please " 4872 dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please "
4911 "check the connection between the Z host card and the " 4873 "check the connection between the Z host card and the "
4912 "serial expanders.\n"); 4874 "serial expanders.\n");
4913 4875
4914 if (Z_FPGA_LOADED(ctl_addr)) 4876 if (__cyz_fpga_loaded(ctl_addr))
4915 plx_init(pdev, irq, ctl_addr); 4877 plx_init(pdev, irq, ctl_addr);
4916 4878
4917 dev_info(&pdev->dev, "Null number of ports detected. Board " 4879 dev_info(&pdev->dev, "Null number of ports detected. Board "
@@ -4932,9 +4894,7 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
4932 cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) | 4894 cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
4933 0x00030800UL); 4895 0x00030800UL);
4934 4896
4935 plx_init(pdev, irq, ctl_addr); 4897 return nchan;
4936
4937 return 0;
4938err_rel: 4898err_rel:
4939 release_firmware(fw); 4899 release_firmware(fw);
4940err: 4900err:
@@ -4946,7 +4906,7 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
4946{ 4906{
4947 void __iomem *addr0 = NULL, *addr2 = NULL; 4907 void __iomem *addr0 = NULL, *addr2 = NULL;
4948 char *card_name = NULL; 4908 char *card_name = NULL;
4949 u32 mailbox; 4909 u32 uninitialized_var(mailbox);
4950 unsigned int device_id, nchan = 0, card_no, i; 4910 unsigned int device_id, nchan = 0, card_no, i;
4951 unsigned char plx_ver; 4911 unsigned char plx_ver;
4952 int retval, irq; 4912 int retval, irq;
@@ -5023,11 +4983,12 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
5023 } 4983 }
5024 4984
5025 /* Disable interrupts on the PLX before resetting it */ 4985 /* Disable interrupts on the PLX before resetting it */
5026 cy_writew(addr0 + 0x68, readw(addr0 + 0x68) & ~0x0900); 4986 cy_writew(&ctl_addr->intr_ctrl_stat,
4987 readw(&ctl_addr->intr_ctrl_stat) & ~0x0900);
5027 4988
5028 plx_init(pdev, irq, addr0); 4989 plx_init(pdev, irq, addr0);
5029 4990
5030 mailbox = (u32)readl(&ctl_addr->mail_box_0); 4991 mailbox = readl(&ctl_addr->mail_box_0);
5031 4992
5032 addr2 = ioremap_nocache(pci_resource_start(pdev, 2), 4993 addr2 = ioremap_nocache(pci_resource_start(pdev, 2),
5033 mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin); 4994 mailbox == ZE_V1 ? CyPCI_Ze_win : CyPCI_Zwin);
@@ -5038,12 +4999,8 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
5038 4999
5039 if (mailbox == ZE_V1) { 5000 if (mailbox == ZE_V1) {
5040 card_name = "Cyclades-Ze"; 5001 card_name = "Cyclades-Ze";
5041
5042 readl(&ctl_addr->mail_box_0);
5043 nchan = ZE_V1_NPORTS;
5044 } else { 5002 } else {
5045 card_name = "Cyclades-8Zo"; 5003 card_name = "Cyclades-8Zo";
5046
5047#ifdef CY_PCI_DEBUG 5004#ifdef CY_PCI_DEBUG
5048 if (mailbox == ZO_V1) { 5005 if (mailbox == ZO_V1) {
5049 cy_writel(&ctl_addr->loc_addr_base, WIN_CREG); 5006 cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
@@ -5065,15 +5022,12 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
5065 */ 5022 */
5066 if ((mailbox == ZO_V1) || (mailbox == ZO_V2)) 5023 if ((mailbox == ZO_V1) || (mailbox == ZO_V2))
5067 cy_writel(addr2 + ID_ADDRESS, 0L); 5024 cy_writel(addr2 + ID_ADDRESS, 0L);
5068
5069 retval = cyz_load_fw(pdev, addr2, addr0, irq);
5070 if (retval)
5071 goto err_unmap;
5072 /* This must be a Cyclades-8Zo/PCI. The extendable
5073 version will have a different device_id and will
5074 be allocated its maximum number of ports. */
5075 nchan = 8;
5076 } 5025 }
5026
5027 retval = cyz_load_fw(pdev, addr2, addr0, irq);
5028 if (retval <= 0)
5029 goto err_unmap;
5030 nchan = retval;
5077 } 5031 }
5078 5032
5079 if ((cy_next_channel + nchan) > NR_PORTS) { 5033 if ((cy_next_channel + nchan) > NR_PORTS) {
@@ -5103,8 +5057,10 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
5103 dev_err(&pdev->dev, "could not allocate IRQ\n"); 5057 dev_err(&pdev->dev, "could not allocate IRQ\n");
5104 goto err_unmap; 5058 goto err_unmap;
5105 } 5059 }
5106 cy_card[card_no].num_chips = nchan / 4; 5060 cy_card[card_no].num_chips = nchan / CyPORTS_PER_CHIP;
5107 } else { 5061 } else {
5062 cy_card[card_no].hw_ver = mailbox;
5063 cy_card[card_no].num_chips = (unsigned int)-1;
5108#ifdef CONFIG_CYZ_INTR 5064#ifdef CONFIG_CYZ_INTR
5109 /* allocate IRQ only if board has an IRQ */ 5065 /* allocate IRQ only if board has an IRQ */
5110 if (irq != 0 && irq != 255) { 5066 if (irq != 0 && irq != 255) {
@@ -5117,15 +5073,15 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
5117 } 5073 }
5118 } 5074 }
5119#endif /* CONFIG_CYZ_INTR */ 5075#endif /* CONFIG_CYZ_INTR */
5120 cy_card[card_no].num_chips = (unsigned int)-1;
5121 } 5076 }
5122 5077
5123 /* set cy_card */ 5078 /* set cy_card */
5124 cy_card[card_no].base_addr = addr2; 5079 cy_card[card_no].base_addr = addr2;
5125 cy_card[card_no].ctl_addr = addr0; 5080 cy_card[card_no].ctl_addr.p9050 = addr0;
5126 cy_card[card_no].irq = irq; 5081 cy_card[card_no].irq = irq;
5127 cy_card[card_no].bus_index = 1; 5082 cy_card[card_no].bus_index = 1;
5128 cy_card[card_no].first_line = cy_next_channel; 5083 cy_card[card_no].first_line = cy_next_channel;
5084 cy_card[card_no].nports = nchan;
5129 retval = cy_init_card(&cy_card[card_no]); 5085 retval = cy_init_card(&cy_card[card_no]);
5130 if (retval) 5086 if (retval)
5131 goto err_null; 5087 goto err_null;
@@ -5138,17 +5094,20 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
5138 plx_ver = readb(addr2 + CyPLX_VER) & 0x0f; 5094 plx_ver = readb(addr2 + CyPLX_VER) & 0x0f;
5139 switch (plx_ver) { 5095 switch (plx_ver) {
5140 case PLX_9050: 5096 case PLX_9050:
5141
5142 cy_writeb(addr0 + 0x4c, 0x43); 5097 cy_writeb(addr0 + 0x4c, 0x43);
5143 break; 5098 break;
5144 5099
5145 case PLX_9060: 5100 case PLX_9060:
5146 case PLX_9080: 5101 case PLX_9080:
5147 default: /* Old boards, use PLX_9060 */ 5102 default: /* Old boards, use PLX_9060 */
5148 plx_init(pdev, irq, addr0); 5103 {
5149 cy_writew(addr0 + 0x68, readw(addr0 + 0x68) | 0x0900); 5104 struct RUNTIME_9060 __iomem *ctl_addr = addr0;
5105 plx_init(pdev, irq, ctl_addr);
5106 cy_writew(&ctl_addr->intr_ctrl_stat,
5107 readw(&ctl_addr->intr_ctrl_stat) | 0x0900);
5150 break; 5108 break;
5151 } 5109 }
5110 }
5152 } 5111 }
5153 5112
5154 dev_info(&pdev->dev, "%s/PCI #%d found: %d channels starting from " 5113 dev_info(&pdev->dev, "%s/PCI #%d found: %d channels starting from "
@@ -5179,22 +5138,23 @@ static void __devexit cy_pci_remove(struct pci_dev *pdev)
5179 unsigned int i; 5138 unsigned int i;
5180 5139
5181 /* non-Z with old PLX */ 5140 /* non-Z with old PLX */
5182 if (!IS_CYC_Z(*cinfo) && (readb(cinfo->base_addr + CyPLX_VER) & 0x0f) == 5141 if (!cy_is_Z(cinfo) && (readb(cinfo->base_addr + CyPLX_VER) & 0x0f) ==
5183 PLX_9050) 5142 PLX_9050)
5184 cy_writeb(cinfo->ctl_addr + 0x4c, 0); 5143 cy_writeb(cinfo->ctl_addr.p9050 + 0x4c, 0);
5185 else 5144 else
5186#ifndef CONFIG_CYZ_INTR 5145#ifndef CONFIG_CYZ_INTR
5187 if (!IS_CYC_Z(*cinfo)) 5146 if (!cy_is_Z(cinfo))
5188#endif 5147#endif
5189 cy_writew(cinfo->ctl_addr + 0x68, 5148 cy_writew(&cinfo->ctl_addr.p9060->intr_ctrl_stat,
5190 readw(cinfo->ctl_addr + 0x68) & ~0x0900); 5149 readw(&cinfo->ctl_addr.p9060->intr_ctrl_stat) &
5150 ~0x0900);
5191 5151
5192 iounmap(cinfo->base_addr); 5152 iounmap(cinfo->base_addr);
5193 if (cinfo->ctl_addr) 5153 if (cinfo->ctl_addr.p9050)
5194 iounmap(cinfo->ctl_addr); 5154 iounmap(cinfo->ctl_addr.p9050);
5195 if (cinfo->irq 5155 if (cinfo->irq
5196#ifndef CONFIG_CYZ_INTR 5156#ifndef CONFIG_CYZ_INTR
5197 && !IS_CYC_Z(*cinfo) 5157 && !cy_is_Z(cinfo)
5198#endif /* CONFIG_CYZ_INTR */ 5158#endif /* CONFIG_CYZ_INTR */
5199 ) 5159 )
5200 free_irq(cinfo->irq, cinfo); 5160 free_irq(cinfo->irq, cinfo);
@@ -5240,7 +5200,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
5240 (cur_jifs - info->idle_stats.recv_idle)/ 5200 (cur_jifs - info->idle_stats.recv_idle)/
5241 HZ, info->idle_stats.overruns, 5201 HZ, info->idle_stats.overruns,
5242 /* FIXME: double check locking */ 5202 /* FIXME: double check locking */
5243 (long)info->port.tty->ldisc.ops->num); 5203 (long)info->port.tty->ldisc->ops->num);
5244 else 5204 else
5245 seq_printf(m, "%3d %8lu %10lu %8lu " 5205 seq_printf(m, "%3d %8lu %10lu %8lu "
5246 "%10lu %8lu %9lu %6ld\n", 5206 "%10lu %8lu %9lu %6ld\n",
@@ -5386,11 +5346,11 @@ static void __exit cy_cleanup_module(void)
5386 /* clear interrupt */ 5346 /* clear interrupt */
5387 cy_writeb(card->base_addr + Cy_ClrIntr, 0); 5347 cy_writeb(card->base_addr + Cy_ClrIntr, 0);
5388 iounmap(card->base_addr); 5348 iounmap(card->base_addr);
5389 if (card->ctl_addr) 5349 if (card->ctl_addr.p9050)
5390 iounmap(card->ctl_addr); 5350 iounmap(card->ctl_addr.p9050);
5391 if (card->irq 5351 if (card->irq
5392#ifndef CONFIG_CYZ_INTR 5352#ifndef CONFIG_CYZ_INTR
5393 && !IS_CYC_Z(*card) 5353 && !cy_is_Z(card)
5394#endif /* CONFIG_CYZ_INTR */ 5354#endif /* CONFIG_CYZ_INTR */
5395 ) 5355 )
5396 free_irq(card->irq, card); 5356 free_irq(card->irq, card);
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index af7c13ca949..abef1f7d84f 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -745,7 +745,7 @@ static int epca_carrier_raised(struct tty_port *port)
745 return 0; 745 return 0;
746} 746}
747 747
748static void epca_raise_dtr_rts(struct tty_port *port) 748static void epca_dtr_rts(struct tty_port *port, int onoff)
749{ 749{
750} 750}
751 751
@@ -925,7 +925,7 @@ static const struct tty_operations pc_ops = {
925 925
926static const struct tty_port_operations epca_port_ops = { 926static const struct tty_port_operations epca_port_ops = {
927 .carrier_raised = epca_carrier_raised, 927 .carrier_raised = epca_carrier_raised,
928 .raise_dtr_rts = epca_raise_dtr_rts, 928 .dtr_rts = epca_dtr_rts,
929}; 929};
930 930
931static int info_open(struct tty_struct *tty, struct file *filp) 931static int info_open(struct tty_struct *tty, struct file *filp)
@@ -1518,7 +1518,7 @@ static void doevent(int crd)
1518 if (event & MODEMCHG_IND) { 1518 if (event & MODEMCHG_IND) {
1519 /* A modem signal change has been indicated */ 1519 /* A modem signal change has been indicated */
1520 ch->imodem = mstat; 1520 ch->imodem = mstat;
1521 if (test_bit(ASYNC_CHECK_CD, &ch->port.flags)) { 1521 if (test_bit(ASYNCB_CHECK_CD, &ch->port.flags)) {
1522 /* We are now receiving dcd */ 1522 /* We are now receiving dcd */
1523 if (mstat & ch->dcd) 1523 if (mstat & ch->dcd)
1524 wake_up_interruptible(&ch->port.open_wait); 1524 wake_up_interruptible(&ch->port.open_wait);
@@ -1765,9 +1765,9 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
1765 * that the driver will wait on carrier detect. 1765 * that the driver will wait on carrier detect.
1766 */ 1766 */
1767 if (ts->c_cflag & CLOCAL) 1767 if (ts->c_cflag & CLOCAL)
1768 clear_bit(ASYNC_CHECK_CD, &ch->port.flags); 1768 clear_bit(ASYNCB_CHECK_CD, &ch->port.flags);
1769 else 1769 else
1770 set_bit(ASYNC_CHECK_CD, &ch->port.flags); 1770 set_bit(ASYNCB_CHECK_CD, &ch->port.flags);
1771 mval = ch->m_dtr | ch->m_rts; 1771 mval = ch->m_dtr | ch->m_rts;
1772 } /* End CBAUD not detected */ 1772 } /* End CBAUD not detected */
1773 iflag = termios2digi_i(ch, ts->c_iflag); 1773 iflag = termios2digi_i(ch, ts->c_iflag);
@@ -2114,8 +2114,8 @@ static int pc_ioctl(struct tty_struct *tty, struct file *file,
2114 tty_wait_until_sent(tty, 0); 2114 tty_wait_until_sent(tty, 0);
2115 } else { 2115 } else {
2116 /* ldisc lock already held in ioctl */ 2116 /* ldisc lock already held in ioctl */
2117 if (tty->ldisc.ops->flush_buffer) 2117 if (tty->ldisc->ops->flush_buffer)
2118 tty->ldisc.ops->flush_buffer(tty); 2118 tty->ldisc->ops->flush_buffer(tty);
2119 } 2119 }
2120 unlock_kernel(); 2120 unlock_kernel();
2121 /* Fall Thru */ 2121 /* Fall Thru */
@@ -2244,7 +2244,8 @@ static void do_softint(struct work_struct *work)
2244 if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) { 2244 if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) {
2245 tty_hangup(tty); 2245 tty_hangup(tty);
2246 wake_up_interruptible(&ch->port.open_wait); 2246 wake_up_interruptible(&ch->port.open_wait);
2247 clear_bit(ASYNC_NORMAL_ACTIVE, &ch->port.flags); 2247 clear_bit(ASYNCB_NORMAL_ACTIVE,
2248 &ch->port.flags);
2248 } 2249 }
2249 } 2250 }
2250 tty_kref_put(tty); 2251 tty_kref_put(tty);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 340ba4f9dc5..4a9f3492b92 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -224,7 +224,7 @@ static void hpet_timer_set_irq(struct hpet_dev *devp)
224 break; 224 break;
225 } 225 }
226 226
227 gsi = acpi_register_gsi(irq, ACPI_LEVEL_SENSITIVE, 227 gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
228 ACPI_ACTIVE_LOW); 228 ACPI_ACTIVE_LOW);
229 if (gsi > 0) 229 if (gsi > 0)
230 break; 230 break;
@@ -939,7 +939,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
939 irqp = &res->data.extended_irq; 939 irqp = &res->data.extended_irq;
940 940
941 for (i = 0; i < irqp->interrupt_count; i++) { 941 for (i = 0; i < irqp->interrupt_count; i++) {
942 irq = acpi_register_gsi(irqp->interrupts[i], 942 irq = acpi_register_gsi(NULL, irqp->interrupts[i],
943 irqp->triggering, irqp->polarity); 943 irqp->triggering, irqp->polarity);
944 if (irq < 0) 944 if (irq < 0)
945 return AE_ERROR; 945 return AE_ERROR;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 5fab6470f4b..f4b3f7293fe 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -88,7 +88,7 @@ config HW_RANDOM_N2RNG
88 88
89config HW_RANDOM_VIA 89config HW_RANDOM_VIA
90 tristate "VIA HW Random Number Generator support" 90 tristate "VIA HW Random Number Generator support"
91 depends on HW_RANDOM && X86_32 91 depends on HW_RANDOM && X86
92 default HW_RANDOM 92 default HW_RANDOM
93 ---help--- 93 ---help---
94 This driver provides kernel-side support for the Random Number 94 This driver provides kernel-side support for the Random Number
@@ -148,3 +148,15 @@ config HW_RANDOM_VIRTIO
148 148
149 To compile this driver as a module, choose M here: the 149 To compile this driver as a module, choose M here: the
150 module will be called virtio-rng. If unsure, say N. 150 module will be called virtio-rng. If unsure, say N.
151
152config HW_RANDOM_MXC_RNGA
153 tristate "Freescale i.MX RNGA Random Number Generator"
154 depends on HW_RANDOM && ARCH_HAS_RNGA
155 ---help---
156 This driver provides kernel-side support for the Random Number
157 Generator hardware found on Freescale i.MX processors.
158
159 To compile this driver as a module, choose M here: the
160 module will be called mxc-rnga.
161
162 If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index e81d21a5f28..fd1ecd2f673 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
15obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o 15obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
16obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o 16obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
17obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o 17obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
18obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
new file mode 100644
index 00000000000..187c6be80f4
--- /dev/null
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -0,0 +1,247 @@
1/*
2 * RNG driver for Freescale RNGA
3 *
4 * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
5 * Author: Alan Carvalho de Assis <acassis@gmail.com>
6 */
7
8/*
9 * The code contained herein is licensed under the GNU General Public
10 * License. You may obtain a copy of the GNU General Public License
11 * Version 2 or later at the following locations:
12 *
13 * http://www.opensource.org/licenses/gpl-license.html
14 * http://www.gnu.org/copyleft/gpl.html
15 *
16 * This driver is based on other RNG drivers.
17 */
18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/ioport.h>
25#include <linux/platform_device.h>
26#include <linux/hw_random.h>
27#include <linux/io.h>
28
29/* RNGA Registers */
30#define RNGA_CONTROL 0x00
31#define RNGA_STATUS 0x04
32#define RNGA_ENTROPY 0x08
33#define RNGA_OUTPUT_FIFO 0x0c
34#define RNGA_MODE 0x10
35#define RNGA_VERIFICATION_CONTROL 0x14
36#define RNGA_OSC_CONTROL_COUNTER 0x18
37#define RNGA_OSC1_COUNTER 0x1c
38#define RNGA_OSC2_COUNTER 0x20
39#define RNGA_OSC_COUNTER_STATUS 0x24
40
41/* RNGA Registers Range */
42#define RNG_ADDR_RANGE 0x28
43
44/* RNGA Control Register */
45#define RNGA_CONTROL_SLEEP 0x00000010
46#define RNGA_CONTROL_CLEAR_INT 0x00000008
47#define RNGA_CONTROL_MASK_INTS 0x00000004
48#define RNGA_CONTROL_HIGH_ASSURANCE 0x00000002
49#define RNGA_CONTROL_GO 0x00000001
50
51#define RNGA_STATUS_LEVEL_MASK 0x0000ff00
52
53/* RNGA Status Register */
54#define RNGA_STATUS_OSC_DEAD 0x80000000
55#define RNGA_STATUS_SLEEP 0x00000010
56#define RNGA_STATUS_ERROR_INT 0x00000008
57#define RNGA_STATUS_FIFO_UNDERFLOW 0x00000004
58#define RNGA_STATUS_LAST_READ_STATUS 0x00000002
59#define RNGA_STATUS_SECURITY_VIOLATION 0x00000001
60
61static struct platform_device *rng_dev;
62
63static int mxc_rnga_data_present(struct hwrng *rng)
64{
65 int level;
66 void __iomem *rng_base = (void __iomem *)rng->priv;
67
68 /* how many random numbers is in FIFO? [0-16] */
69 level = ((__raw_readl(rng_base + RNGA_STATUS) &
70 RNGA_STATUS_LEVEL_MASK) >> 8);
71
72 return level > 0 ? 1 : 0;
73}
74
75static int mxc_rnga_data_read(struct hwrng *rng, u32 * data)
76{
77 int err;
78 u32 ctrl;
79 void __iomem *rng_base = (void __iomem *)rng->priv;
80
81 /* retrieve a random number from FIFO */
82 *data = __raw_readl(rng_base + RNGA_OUTPUT_FIFO);
83
84 /* some error while reading this random number? */
85 err = __raw_readl(rng_base + RNGA_STATUS) & RNGA_STATUS_ERROR_INT;
86
87 /* if error: clear error interrupt, but doesn't return random number */
88 if (err) {
89 dev_dbg(&rng_dev->dev, "Error while reading random number!\n");
90 ctrl = __raw_readl(rng_base + RNGA_CONTROL);
91 __raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT,
92 rng_base + RNGA_CONTROL);
93 return 0;
94 } else
95 return 4;
96}
97
98static int mxc_rnga_init(struct hwrng *rng)
99{
100 u32 ctrl, osc;
101 void __iomem *rng_base = (void __iomem *)rng->priv;
102
103 /* wake up */
104 ctrl = __raw_readl(rng_base + RNGA_CONTROL);
105 __raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, rng_base + RNGA_CONTROL);
106
107 /* verify if oscillator is working */
108 osc = __raw_readl(rng_base + RNGA_STATUS);
109 if (osc & RNGA_STATUS_OSC_DEAD) {
110 dev_err(&rng_dev->dev, "RNGA Oscillator is dead!\n");
111 return -ENODEV;
112 }
113
114 /* go running */
115 ctrl = __raw_readl(rng_base + RNGA_CONTROL);
116 __raw_writel(ctrl | RNGA_CONTROL_GO, rng_base + RNGA_CONTROL);
117
118 return 0;
119}
120
121static void mxc_rnga_cleanup(struct hwrng *rng)
122{
123 u32 ctrl;
124 void __iomem *rng_base = (void __iomem *)rng->priv;
125
126 ctrl = __raw_readl(rng_base + RNGA_CONTROL);
127
128 /* stop rnga */
129 __raw_writel(ctrl & ~RNGA_CONTROL_GO, rng_base + RNGA_CONTROL);
130}
131
132static struct hwrng mxc_rnga = {
133 .name = "mxc-rnga",
134 .init = mxc_rnga_init,
135 .cleanup = mxc_rnga_cleanup,
136 .data_present = mxc_rnga_data_present,
137 .data_read = mxc_rnga_data_read
138};
139
140static int __init mxc_rnga_probe(struct platform_device *pdev)
141{
142 int err = -ENODEV;
143 struct clk *clk;
144 struct resource *res, *mem;
145 void __iomem *rng_base = NULL;
146
147 if (rng_dev)
148 return -EBUSY;
149
150 clk = clk_get(&pdev->dev, "rng");
151 if (IS_ERR(clk)) {
152 dev_err(&pdev->dev, "Could not get rng_clk!\n");
153 err = PTR_ERR(clk);
154 goto out;
155 }
156
157 clk_enable(clk);
158
159 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
160 if (!res) {
161 err = -ENOENT;
162 goto err_region;
163 }
164
165 mem = request_mem_region(res->start, resource_size(res), pdev->name);
166 if (mem == NULL) {
167 err = -EBUSY;
168 goto err_region;
169 }
170
171 rng_base = ioremap(res->start, resource_size(res));
172 if (!rng_base) {
173 err = -ENOMEM;
174 goto err_ioremap;
175 }
176
177 mxc_rnga.priv = (unsigned long)rng_base;
178
179 err = hwrng_register(&mxc_rnga);
180 if (err) {
181 dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err);
182 goto err_register;
183 }
184
185 rng_dev = pdev;
186
187 dev_info(&pdev->dev, "MXC RNGA Registered.\n");
188
189 return 0;
190
191err_register:
192 iounmap(rng_base);
193 rng_base = NULL;
194
195err_ioremap:
196 release_mem_region(res->start, resource_size(res));
197
198err_region:
199 clk_disable(clk);
200 clk_put(clk);
201
202out:
203 return err;
204}
205
206static int __exit mxc_rnga_remove(struct platform_device *pdev)
207{
208 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
209 void __iomem *rng_base = (void __iomem *)mxc_rnga.priv;
210 struct clk *clk = clk_get(&pdev->dev, "rng");
211
212 hwrng_unregister(&mxc_rnga);
213
214 iounmap(rng_base);
215
216 release_mem_region(res->start, resource_size(res));
217
218 clk_disable(clk);
219 clk_put(clk);
220
221 return 0;
222}
223
224static struct platform_driver mxc_rnga_driver = {
225 .driver = {
226 .name = "mxc_rnga",
227 .owner = THIS_MODULE,
228 },
229 .remove = __exit_p(mxc_rnga_remove),
230};
231
232static int __init mod_init(void)
233{
234 return platform_driver_probe(&mxc_rnga_driver, mxc_rnga_probe);
235}
236
237static void __exit mod_exit(void)
238{
239 platform_driver_unregister(&mxc_rnga_driver);
240}
241
242module_init(mod_init);
243module_exit(mod_exit);
244
245MODULE_AUTHOR("Freescale Semiconductor, Inc.");
246MODULE_DESCRIPTION("H/W RNGA driver for i.MX");
247MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 538313f9e7a..00dd3de1be5 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -89,7 +89,7 @@ static struct hwrng omap_rng_ops = {
89 .data_read = omap_rng_data_read, 89 .data_read = omap_rng_data_read,
90}; 90};
91 91
92static int __init omap_rng_probe(struct platform_device *pdev) 92static int __devinit omap_rng_probe(struct platform_device *pdev)
93{ 93{
94 struct resource *res, *mem; 94 struct resource *res, *mem;
95 int ret; 95 int ret;
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index dcd352ad0e7..a94e930575f 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -88,9 +88,9 @@ static struct hwrng timeriomem_rng_ops = {
88 .priv = 0, 88 .priv = 0,
89}; 89};
90 90
91static int __init timeriomem_rng_probe(struct platform_device *pdev) 91static int __devinit timeriomem_rng_probe(struct platform_device *pdev)
92{ 92{
93 struct resource *res, *mem; 93 struct resource *res;
94 int ret; 94 int ret;
95 95
96 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 96 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -98,21 +98,12 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
98 if (!res) 98 if (!res)
99 return -ENOENT; 99 return -ENOENT;
100 100
101 mem = request_mem_region(res->start, res->end - res->start + 1,
102 pdev->name);
103 if (mem == NULL)
104 return -EBUSY;
105
106 dev_set_drvdata(&pdev->dev, mem);
107
108 timeriomem_rng_data = pdev->dev.platform_data; 101 timeriomem_rng_data = pdev->dev.platform_data;
109 102
110 timeriomem_rng_data->address = ioremap(res->start, 103 timeriomem_rng_data->address = ioremap(res->start,
111 res->end - res->start + 1); 104 res->end - res->start + 1);
112 if (!timeriomem_rng_data->address) { 105 if (!timeriomem_rng_data->address)
113 ret = -ENOMEM; 106 return -EIO;
114 goto err_ioremap;
115 }
116 107
117 if (timeriomem_rng_data->period != 0 108 if (timeriomem_rng_data->period != 0
118 && usecs_to_jiffies(timeriomem_rng_data->period) > 0) { 109 && usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
@@ -125,7 +116,7 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
125 116
126 ret = hwrng_register(&timeriomem_rng_ops); 117 ret = hwrng_register(&timeriomem_rng_ops);
127 if (ret) 118 if (ret)
128 goto err_register; 119 goto failed;
129 120
130 dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", 121 dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
131 timeriomem_rng_data->address, 122 timeriomem_rng_data->address,
@@ -133,24 +124,19 @@ static int __init timeriomem_rng_probe(struct platform_device *pdev)
133 124
134 return 0; 125 return 0;
135 126
136err_register: 127failed:
137 dev_err(&pdev->dev, "problem registering\n"); 128 dev_err(&pdev->dev, "problem registering\n");
138 iounmap(timeriomem_rng_data->address); 129 iounmap(timeriomem_rng_data->address);
139err_ioremap:
140 release_resource(mem);
141 130
142 return ret; 131 return ret;
143} 132}
144 133
145static int __devexit timeriomem_rng_remove(struct platform_device *pdev) 134static int __devexit timeriomem_rng_remove(struct platform_device *pdev)
146{ 135{
147 struct resource *mem = dev_get_drvdata(&pdev->dev);
148
149 del_timer_sync(&timeriomem_rng_timer); 136 del_timer_sync(&timeriomem_rng_timer);
150 hwrng_unregister(&timeriomem_rng_ops); 137 hwrng_unregister(&timeriomem_rng_ops);
151 138
152 iounmap(timeriomem_rng_data->address); 139 iounmap(timeriomem_rng_data->address);
153 release_resource(mem);
154 140
155 return 0; 141 return 0;
156} 142}
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 4e9573c1d39..794aacb715c 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -132,6 +132,19 @@ static int via_rng_init(struct hwrng *rng)
132 struct cpuinfo_x86 *c = &cpu_data(0); 132 struct cpuinfo_x86 *c = &cpu_data(0);
133 u32 lo, hi, old_lo; 133 u32 lo, hi, old_lo;
134 134
135 /* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG
136 * is always enabled if CPUID rng_en is set. There is no
137 * RNG configuration like it used to be the case in this
138 * register */
139 if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
140 if (!cpu_has_xstore_enabled) {
141 printk(KERN_ERR PFX "can't enable hardware RNG "
142 "if XSTORE is not enabled\n");
143 return -ENODEV;
144 }
145 return 0;
146 }
147
135 /* Control the RNG via MSR. Tread lightly and pay very close 148 /* Control the RNG via MSR. Tread lightly and pay very close
136 * close attention to values written, as the reserved fields 149 * close attention to values written, as the reserved fields
137 * are documented to be "undefined and unpredictable"; but it 150 * are documented to be "undefined and unpredictable"; but it
@@ -205,5 +218,5 @@ static void __exit mod_exit(void)
205module_init(mod_init); 218module_init(mod_init);
206module_exit(mod_exit); 219module_exit(mod_exit);
207 220
208MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets"); 221MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
209MODULE_LICENSE("GPL"); 222MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 86e83f88313..32216b62324 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -35,13 +35,13 @@ static DECLARE_COMPLETION(have_data);
35 35
36static void random_recv_done(struct virtqueue *vq) 36static void random_recv_done(struct virtqueue *vq)
37{ 37{
38 int len; 38 unsigned int len;
39 39
40 /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ 40 /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
41 if (!vq->vq_ops->get_buf(vq, &len)) 41 if (!vq->vq_ops->get_buf(vq, &len))
42 return; 42 return;
43 43
44 data_left = len / sizeof(random_data[0]); 44 data_left += len;
45 complete(&have_data); 45 complete(&have_data);
46} 46}
47 47
@@ -49,7 +49,7 @@ static void register_buffer(void)
49{ 49{
50 struct scatterlist sg; 50 struct scatterlist sg;
51 51
52 sg_init_one(&sg, random_data, RANDOM_DATA_SIZE); 52 sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left);
53 /* There should always be room for one buffer. */ 53 /* There should always be room for one buffer. */
54 if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0) 54 if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0)
55 BUG(); 55 BUG();
@@ -59,24 +59,32 @@ static void register_buffer(void)
59/* At least we don't udelay() in a loop like some other drivers. */ 59/* At least we don't udelay() in a loop like some other drivers. */
60static int virtio_data_present(struct hwrng *rng, int wait) 60static int virtio_data_present(struct hwrng *rng, int wait)
61{ 61{
62 if (data_left) 62 if (data_left >= sizeof(u32))
63 return 1; 63 return 1;
64 64
65again:
65 if (!wait) 66 if (!wait)
66 return 0; 67 return 0;
67 68
68 wait_for_completion(&have_data); 69 wait_for_completion(&have_data);
70
71 /* Not enough? Re-register. */
72 if (unlikely(data_left < sizeof(u32))) {
73 register_buffer();
74 goto again;
75 }
76
69 return 1; 77 return 1;
70} 78}
71 79
72/* virtio_data_present() must have succeeded before this is called. */ 80/* virtio_data_present() must have succeeded before this is called. */
73static int virtio_data_read(struct hwrng *rng, u32 *data) 81static int virtio_data_read(struct hwrng *rng, u32 *data)
74{ 82{
75 BUG_ON(!data_left); 83 BUG_ON(data_left < sizeof(u32));
76 84 data_left -= sizeof(u32);
77 *data = random_data[--data_left]; 85 *data = random_data[data_left / 4];
78 86
79 if (!data_left) { 87 if (data_left < sizeof(u32)) {
80 init_completion(&have_data); 88 init_completion(&have_data);
81 register_buffer(); 89 register_buffer();
82 } 90 }
@@ -94,13 +102,13 @@ static int virtrng_probe(struct virtio_device *vdev)
94 int err; 102 int err;
95 103
96 /* We expect a single virtqueue. */ 104 /* We expect a single virtqueue. */
97 vq = vdev->config->find_vq(vdev, 0, random_recv_done); 105 vq = virtio_find_single_vq(vdev, random_recv_done, "input");
98 if (IS_ERR(vq)) 106 if (IS_ERR(vq))
99 return PTR_ERR(vq); 107 return PTR_ERR(vq);
100 108
101 err = hwrng_register(&virtio_hwrng); 109 err = hwrng_register(&virtio_hwrng);
102 if (err) { 110 if (err) {
103 vdev->config->del_vq(vq); 111 vdev->config->del_vqs(vdev);
104 return err; 112 return err;
105 } 113 }
106 114
@@ -112,7 +120,7 @@ static void virtrng_remove(struct virtio_device *vdev)
112{ 120{
113 vdev->config->reset(vdev); 121 vdev->config->reset(vdev);
114 hwrng_unregister(&virtio_hwrng); 122 hwrng_unregister(&virtio_hwrng);
115 vdev->config->del_vq(vq); 123 vdev->config->del_vqs(vdev);
116} 124}
117 125
118static struct virtio_device_id id_table[] = { 126static struct virtio_device_id id_table[] = {
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index 0061e18aff6..0d10b89218e 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -868,11 +868,11 @@ i2Input(i2ChanStrPtr pCh)
868 amountToMove = count; 868 amountToMove = count;
869 } 869 }
870 // Move the first block 870 // Move the first block
871 pCh->pTTY->ldisc.ops->receive_buf( pCh->pTTY, 871 pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY,
872 &(pCh->Ibuf[stripIndex]), NULL, amountToMove ); 872 &(pCh->Ibuf[stripIndex]), NULL, amountToMove );
873 // If we needed to wrap, do the second data move 873 // If we needed to wrap, do the second data move
874 if (count > amountToMove) { 874 if (count > amountToMove) {
875 pCh->pTTY->ldisc.ops->receive_buf( pCh->pTTY, 875 pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY,
876 pCh->Ibuf, NULL, count - amountToMove ); 876 pCh->Ibuf, NULL, count - amountToMove );
877 } 877 }
878 // Bump and wrap the stripIndex all at once by the amount of data read. This 878 // Bump and wrap the stripIndex all at once by the amount of data read. This
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index afd9247cf08..517271c762e 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -1315,8 +1315,8 @@ static inline void isig(int sig, struct tty_struct *tty, int flush)
1315 if (tty->pgrp) 1315 if (tty->pgrp)
1316 kill_pgrp(tty->pgrp, sig, 1); 1316 kill_pgrp(tty->pgrp, sig, 1);
1317 if (flush || !L_NOFLSH(tty)) { 1317 if (flush || !L_NOFLSH(tty)) {
1318 if ( tty->ldisc.ops->flush_buffer ) 1318 if ( tty->ldisc->ops->flush_buffer )
1319 tty->ldisc.ops->flush_buffer(tty); 1319 tty->ldisc->ops->flush_buffer(tty);
1320 i2InputFlush( tty->driver_data ); 1320 i2InputFlush( tty->driver_data );
1321 } 1321 }
1322} 1322}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index aa83a0865ec..09050797c76 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2856,6 +2856,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2856 /* Assume a single IPMB channel at zero. */ 2856 /* Assume a single IPMB channel at zero. */
2857 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 2857 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2858 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 2858 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2859 intf->curr_channel = IPMI_MAX_CHANNELS;
2859 } 2860 }
2860 2861
2861 if (rv == 0) 2862 if (rv == 0)
@@ -3648,13 +3649,13 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
3648 } 3649 }
3649 3650
3650 /* 3651 /*
3651 ** We need to make sure the channels have been initialized. 3652 * We need to make sure the channels have been initialized.
3652 ** The channel_handler routine will set the "curr_channel" 3653 * The channel_handler routine will set the "curr_channel"
3653 ** equal to or greater than IPMI_MAX_CHANNELS when all the 3654 * equal to or greater than IPMI_MAX_CHANNELS when all the
3654 ** channels for this interface have been initialized. 3655 * channels for this interface have been initialized.
3655 */ 3656 */
3656 if (intf->curr_channel < IPMI_MAX_CHANNELS) { 3657 if (intf->curr_channel < IPMI_MAX_CHANNELS) {
3657 requeue = 1; /* Just put the message back for now */ 3658 requeue = 0; /* Throw the message away */
3658 goto out; 3659 goto out;
3659 } 3660 }
3660 3661
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index a59eac584d1..4d745a89504 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -329,7 +329,7 @@ static inline void drop_rts(struct isi_port *port)
329 329
330/* card->lock MUST NOT be held */ 330/* card->lock MUST NOT be held */
331 331
332static void isicom_raise_dtr_rts(struct tty_port *port) 332static void isicom_dtr_rts(struct tty_port *port, int on)
333{ 333{
334 struct isi_port *ip = container_of(port, struct isi_port, port); 334 struct isi_port *ip = container_of(port, struct isi_port, port);
335 struct isi_board *card = ip->card; 335 struct isi_board *card = ip->card;
@@ -339,10 +339,17 @@ static void isicom_raise_dtr_rts(struct tty_port *port)
339 if (!lock_card(card)) 339 if (!lock_card(card))
340 return; 340 return;
341 341
342 outw(0x8000 | (channel << card->shift_count) | 0x02, base); 342 if (on) {
343 outw(0x0f04, base); 343 outw(0x8000 | (channel << card->shift_count) | 0x02, base);
344 InterruptTheCard(base); 344 outw(0x0f04, base);
345 ip->status |= (ISI_DTR | ISI_RTS); 345 InterruptTheCard(base);
346 ip->status |= (ISI_DTR | ISI_RTS);
347 } else {
348 outw(0x8000 | (channel << card->shift_count) | 0x02, base);
349 outw(0x0C04, base);
350 InterruptTheCard(base);
351 ip->status &= ~(ISI_DTR | ISI_RTS);
352 }
346 unlock_card(card); 353 unlock_card(card);
347} 354}
348 355
@@ -1339,7 +1346,7 @@ static const struct tty_operations isicom_ops = {
1339 1346
1340static const struct tty_port_operations isicom_port_ops = { 1347static const struct tty_port_operations isicom_port_ops = {
1341 .carrier_raised = isicom_carrier_raised, 1348 .carrier_raised = isicom_carrier_raised,
1342 .raise_dtr_rts = isicom_raise_dtr_rts, 1349 .dtr_rts = isicom_dtr_rts,
1343}; 1350};
1344 1351
1345static int __devinit reset_card(struct pci_dev *pdev, 1352static int __devinit reset_card(struct pci_dev *pdev,
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index fff19f7e29d..e18800c400b 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -1140,14 +1140,14 @@ static int stli_carrier_raised(struct tty_port *port)
1140 return (portp->sigs & TIOCM_CD) ? 1 : 0; 1140 return (portp->sigs & TIOCM_CD) ? 1 : 0;
1141} 1141}
1142 1142
1143static void stli_raise_dtr_rts(struct tty_port *port) 1143static void stli_dtr_rts(struct tty_port *port, int on)
1144{ 1144{
1145 struct stliport *portp = container_of(port, struct stliport, port); 1145 struct stliport *portp = container_of(port, struct stliport, port);
1146 struct stlibrd *brdp = stli_brds[portp->brdnr]; 1146 struct stlibrd *brdp = stli_brds[portp->brdnr];
1147 stli_mkasysigs(&portp->asig, 1, 1); 1147 stli_mkasysigs(&portp->asig, on, on);
1148 if (stli_cmdwait(brdp, portp, A_SETSIGNALS, &portp->asig, 1148 if (stli_cmdwait(brdp, portp, A_SETSIGNALS, &portp->asig,
1149 sizeof(asysigs_t), 0) < 0) 1149 sizeof(asysigs_t), 0) < 0)
1150 printk(KERN_WARNING "istallion: dtr raise failed.\n"); 1150 printk(KERN_WARNING "istallion: dtr set failed.\n");
1151} 1151}
1152 1152
1153 1153
@@ -4417,7 +4417,7 @@ static const struct tty_operations stli_ops = {
4417 4417
4418static const struct tty_port_operations stli_port_ops = { 4418static const struct tty_port_operations stli_port_ops = {
4419 .carrier_raised = stli_carrier_raised, 4419 .carrier_raised = stli_carrier_raised,
4420 .raise_dtr_rts = stli_raise_dtr_rts, 4420 .dtr_rts = stli_dtr_rts,
4421}; 4421};
4422 4422
4423/*****************************************************************************/ 4423/*****************************************************************************/
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 8f05c38c2f0..f96d0bef855 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -694,6 +694,8 @@ static ssize_t read_zero(struct file * file, char __user * buf,
694 written += chunk - unwritten; 694 written += chunk - unwritten;
695 if (unwritten) 695 if (unwritten)
696 break; 696 break;
697 if (signal_pending(current))
698 return written ? written : -ERESTARTSYS;
697 buf += chunk; 699 buf += chunk;
698 count -= chunk; 700 count -= chunk;
699 cond_resched(); 701 cond_resched();
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 4a4cab73d0b..65b6ff2442c 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1184,6 +1184,11 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
1184 return -ENODEV; 1184 return -ENODEV;
1185 } 1185 }
1186 1186
1187 if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) {
1188 mutex_unlock(&moxa_openlock);
1189 return -ENODEV;
1190 }
1191
1187 ch = &brd->ports[port % MAX_PORTS_PER_BOARD]; 1192 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
1188 ch->port.count++; 1193 ch->port.count++;
1189 tty->driver_data = ch; 1194 tty->driver_data = ch;
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index a420e8d437d..9533f43a30b 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -547,14 +547,18 @@ static int mxser_carrier_raised(struct tty_port *port)
547 return (inb(mp->ioaddr + UART_MSR) & UART_MSR_DCD)?1:0; 547 return (inb(mp->ioaddr + UART_MSR) & UART_MSR_DCD)?1:0;
548} 548}
549 549
550static void mxser_raise_dtr_rts(struct tty_port *port) 550static void mxser_dtr_rts(struct tty_port *port, int on)
551{ 551{
552 struct mxser_port *mp = container_of(port, struct mxser_port, port); 552 struct mxser_port *mp = container_of(port, struct mxser_port, port);
553 unsigned long flags; 553 unsigned long flags;
554 554
555 spin_lock_irqsave(&mp->slock, flags); 555 spin_lock_irqsave(&mp->slock, flags);
556 outb(inb(mp->ioaddr + UART_MCR) | 556 if (on)
557 UART_MCR_DTR | UART_MCR_RTS, mp->ioaddr + UART_MCR); 557 outb(inb(mp->ioaddr + UART_MCR) |
558 UART_MCR_DTR | UART_MCR_RTS, mp->ioaddr + UART_MCR);
559 else
560 outb(inb(mp->ioaddr + UART_MCR)&~(UART_MCR_DTR | UART_MCR_RTS),
561 mp->ioaddr + UART_MCR);
558 spin_unlock_irqrestore(&mp->slock, flags); 562 spin_unlock_irqrestore(&mp->slock, flags);
559} 563}
560 564
@@ -2356,7 +2360,7 @@ static const struct tty_operations mxser_ops = {
2356 2360
2357struct tty_port_operations mxser_port_ops = { 2361struct tty_port_operations mxser_port_ops = {
2358 .carrier_raised = mxser_carrier_raised, 2362 .carrier_raised = mxser_carrier_raised,
2359 .raise_dtr_rts = mxser_raise_dtr_rts, 2363 .dtr_rts = mxser_dtr_rts,
2360}; 2364};
2361 2365
2362/* 2366/*
@@ -2711,7 +2715,7 @@ static int __init mxser_module_init(void)
2711 continue; 2715 continue;
2712 2716
2713 brd = &mxser_boards[m]; 2717 brd = &mxser_boards[m];
2714 retval = mxser_get_ISA_conf(!ioaddr[b], brd); 2718 retval = mxser_get_ISA_conf(ioaddr[b], brd);
2715 if (retval <= 0) { 2719 if (retval <= 0) {
2716 brd->info = NULL; 2720 brd->info = NULL;
2717 continue; 2721 continue;
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index bacb3e2872a..461ece591a5 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -342,8 +342,8 @@ static int n_hdlc_tty_open (struct tty_struct *tty)
342#endif 342#endif
343 343
344 /* Flush any pending characters in the driver and discipline. */ 344 /* Flush any pending characters in the driver and discipline. */
345 if (tty->ldisc.ops->flush_buffer) 345 if (tty->ldisc->ops->flush_buffer)
346 tty->ldisc.ops->flush_buffer(tty); 346 tty->ldisc->ops->flush_buffer(tty);
347 347
348 tty_driver_flush_buffer(tty); 348 tty_driver_flush_buffer(tty);
349 349
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index f6f0e4ec2b5..94a5d5020ab 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -73,24 +73,6 @@
73#define ECHO_OP_SET_CANON_COL 0x81 73#define ECHO_OP_SET_CANON_COL 0x81
74#define ECHO_OP_ERASE_TAB 0x82 74#define ECHO_OP_ERASE_TAB 0x82
75 75
76static inline unsigned char *alloc_buf(void)
77{
78 gfp_t prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
79
80 if (PAGE_SIZE != N_TTY_BUF_SIZE)
81 return kmalloc(N_TTY_BUF_SIZE, prio);
82 else
83 return (unsigned char *)__get_free_page(prio);
84}
85
86static inline void free_buf(unsigned char *buf)
87{
88 if (PAGE_SIZE != N_TTY_BUF_SIZE)
89 kfree(buf);
90 else
91 free_page((unsigned long) buf);
92}
93
94static inline int tty_put_user(struct tty_struct *tty, unsigned char x, 76static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
95 unsigned char __user *ptr) 77 unsigned char __user *ptr)
96{ 78{
@@ -1558,11 +1540,11 @@ static void n_tty_close(struct tty_struct *tty)
1558{ 1540{
1559 n_tty_flush_buffer(tty); 1541 n_tty_flush_buffer(tty);
1560 if (tty->read_buf) { 1542 if (tty->read_buf) {
1561 free_buf(tty->read_buf); 1543 kfree(tty->read_buf);
1562 tty->read_buf = NULL; 1544 tty->read_buf = NULL;
1563 } 1545 }
1564 if (tty->echo_buf) { 1546 if (tty->echo_buf) {
1565 free_buf(tty->echo_buf); 1547 kfree(tty->echo_buf);
1566 tty->echo_buf = NULL; 1548 tty->echo_buf = NULL;
1567 } 1549 }
1568} 1550}
@@ -1584,17 +1566,16 @@ static int n_tty_open(struct tty_struct *tty)
1584 1566
1585 /* These are ugly. Currently a malloc failure here can panic */ 1567 /* These are ugly. Currently a malloc failure here can panic */
1586 if (!tty->read_buf) { 1568 if (!tty->read_buf) {
1587 tty->read_buf = alloc_buf(); 1569 tty->read_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
1588 if (!tty->read_buf) 1570 if (!tty->read_buf)
1589 return -ENOMEM; 1571 return -ENOMEM;
1590 } 1572 }
1591 if (!tty->echo_buf) { 1573 if (!tty->echo_buf) {
1592 tty->echo_buf = alloc_buf(); 1574 tty->echo_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
1575
1593 if (!tty->echo_buf) 1576 if (!tty->echo_buf)
1594 return -ENOMEM; 1577 return -ENOMEM;
1595 } 1578 }
1596 memset(tty->read_buf, 0, N_TTY_BUF_SIZE);
1597 memset(tty->echo_buf, 0, N_TTY_BUF_SIZE);
1598 reset_buffer_flags(tty); 1579 reset_buffer_flags(tty);
1599 tty->column = 0; 1580 tty->column = 0;
1600 n_tty_set_termios(tty, NULL); 1581 n_tty_set_termios(tty, NULL);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 19d79fc5446..77b36488922 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -383,7 +383,7 @@ static void async_mode(MGSLPC_INFO *info);
383static void tx_timeout(unsigned long context); 383static void tx_timeout(unsigned long context);
384 384
385static int carrier_raised(struct tty_port *port); 385static int carrier_raised(struct tty_port *port);
386static void raise_dtr_rts(struct tty_port *port); 386static void dtr_rts(struct tty_port *port, int onoff);
387 387
388#if SYNCLINK_GENERIC_HDLC 388#if SYNCLINK_GENERIC_HDLC
389#define dev_to_port(D) (dev_to_hdlc(D)->priv) 389#define dev_to_port(D) (dev_to_hdlc(D)->priv)
@@ -513,7 +513,7 @@ static void ldisc_receive_buf(struct tty_struct *tty,
513 513
514static const struct tty_port_operations mgslpc_port_ops = { 514static const struct tty_port_operations mgslpc_port_ops = {
515 .carrier_raised = carrier_raised, 515 .carrier_raised = carrier_raised,
516 .raise_dtr_rts = raise_dtr_rts 516 .dtr_rts = dtr_rts
517}; 517};
518 518
519static int mgslpc_probe(struct pcmcia_device *link) 519static int mgslpc_probe(struct pcmcia_device *link)
@@ -2528,13 +2528,16 @@ static int carrier_raised(struct tty_port *port)
2528 return 0; 2528 return 0;
2529} 2529}
2530 2530
2531static void raise_dtr_rts(struct tty_port *port) 2531static void dtr_rts(struct tty_port *port, int onoff)
2532{ 2532{
2533 MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); 2533 MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
2534 unsigned long flags; 2534 unsigned long flags;
2535 2535
2536 spin_lock_irqsave(&info->lock,flags); 2536 spin_lock_irqsave(&info->lock,flags);
2537 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 2537 if (onoff)
2538 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
2539 else
2540 info->serial_signals &= ~SerialSignal_RTS + SerialSignal_DTR;
2538 set_signals(info); 2541 set_signals(info);
2539 spin_unlock_irqrestore(&info->lock,flags); 2542 spin_unlock_irqrestore(&info->lock,flags);
2540} 2543}
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 31038a0052a..5acd29e6e04 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -30,7 +30,6 @@
30 30
31#include <asm/system.h> 31#include <asm/system.h>
32 32
33/* These are global because they are accessed in tty_io.c */
34#ifdef CONFIG_UNIX98_PTYS 33#ifdef CONFIG_UNIX98_PTYS
35static struct tty_driver *ptm_driver; 34static struct tty_driver *ptm_driver;
36static struct tty_driver *pts_driver; 35static struct tty_driver *pts_driver;
@@ -111,7 +110,7 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf,
111 c = to->receive_room; 110 c = to->receive_room;
112 if (c > count) 111 if (c > count)
113 c = count; 112 c = count;
114 to->ldisc.ops->receive_buf(to, buf, NULL, c); 113 to->ldisc->ops->receive_buf(to, buf, NULL, c);
115 114
116 return c; 115 return c;
117} 116}
@@ -149,11 +148,11 @@ static int pty_chars_in_buffer(struct tty_struct *tty)
149 int count; 148 int count;
150 149
151 /* We should get the line discipline lock for "tty->link" */ 150 /* We should get the line discipline lock for "tty->link" */
152 if (!to || !to->ldisc.ops->chars_in_buffer) 151 if (!to || !to->ldisc->ops->chars_in_buffer)
153 return 0; 152 return 0;
154 153
155 /* The ldisc must report 0 if no characters available to be read */ 154 /* The ldisc must report 0 if no characters available to be read */
156 count = to->ldisc.ops->chars_in_buffer(to); 155 count = to->ldisc->ops->chars_in_buffer(to);
157 156
158 if (tty->driver->subtype == PTY_TYPE_SLAVE) 157 if (tty->driver->subtype == PTY_TYPE_SLAVE)
159 return count; 158 return count;
@@ -187,8 +186,8 @@ static void pty_flush_buffer(struct tty_struct *tty)
187 if (!to) 186 if (!to)
188 return; 187 return;
189 188
190 if (to->ldisc.ops->flush_buffer) 189 if (to->ldisc->ops->flush_buffer)
191 to->ldisc.ops->flush_buffer(to); 190 to->ldisc->ops->flush_buffer(to);
192 191
193 if (to->packet) { 192 if (to->packet) {
194 spin_lock_irqsave(&tty->ctrl_lock, flags); 193 spin_lock_irqsave(&tty->ctrl_lock, flags);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b2ced39d76b..8c7444857a4 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1673,7 +1673,7 @@ unsigned int get_random_int(void)
1673 int ret; 1673 int ret;
1674 1674
1675 keyptr = get_keyptr(); 1675 keyptr = get_keyptr();
1676 hash[0] += current->pid + jiffies + get_cycles() + (int)(long)&ret; 1676 hash[0] += current->pid + jiffies + get_cycles();
1677 1677
1678 ret = half_md4_transform(hash, keyptr->secret); 1678 ret = half_md4_transform(hash, keyptr->secret);
1679 put_cpu_var(get_random_int_hash); 1679 put_cpu_var(get_random_int_hash);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 20d90e6a6e5..db32f0e4c7d 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -71,7 +71,7 @@ static int raw_open(struct inode *inode, struct file *filp)
71 err = bd_claim(bdev, raw_open); 71 err = bd_claim(bdev, raw_open);
72 if (err) 72 if (err)
73 goto out1; 73 goto out1;
74 err = set_blocksize(bdev, bdev_hardsect_size(bdev)); 74 err = set_blocksize(bdev, bdev_logical_block_size(bdev));
75 if (err) 75 if (err)
76 goto out2; 76 goto out2;
77 filp->f_flags |= O_DIRECT; 77 filp->f_flags |= O_DIRECT;
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index f59fc5cea06..63d5b628477 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -872,11 +872,16 @@ static int carrier_raised(struct tty_port *port)
872 return (sGetChanStatusLo(&info->channel) & CD_ACT) ? 1 : 0; 872 return (sGetChanStatusLo(&info->channel) & CD_ACT) ? 1 : 0;
873} 873}
874 874
875static void raise_dtr_rts(struct tty_port *port) 875static void dtr_rts(struct tty_port *port, int on)
876{ 876{
877 struct r_port *info = container_of(port, struct r_port, port); 877 struct r_port *info = container_of(port, struct r_port, port);
878 sSetDTR(&info->channel); 878 if (on) {
879 sSetRTS(&info->channel); 879 sSetDTR(&info->channel);
880 sSetRTS(&info->channel);
881 } else {
882 sClrDTR(&info->channel);
883 sClrRTS(&info->channel);
884 }
880} 885}
881 886
882/* 887/*
@@ -934,7 +939,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
934 /* 939 /*
935 * Info->count is now 1; so it's safe to sleep now. 940 * Info->count is now 1; so it's safe to sleep now.
936 */ 941 */
937 if (!test_bit(ASYNC_INITIALIZED, &port->flags)) { 942 if (!test_bit(ASYNCB_INITIALIZED, &port->flags)) {
938 cp = &info->channel; 943 cp = &info->channel;
939 sSetRxTrigger(cp, TRIG_1); 944 sSetRxTrigger(cp, TRIG_1);
940 if (sGetChanStatus(cp) & CD_ACT) 945 if (sGetChanStatus(cp) & CD_ACT)
@@ -958,7 +963,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
958 sEnRxFIFO(cp); 963 sEnRxFIFO(cp);
959 sEnTransmit(cp); 964 sEnTransmit(cp);
960 965
961 set_bit(ASYNC_INITIALIZED, &info->port.flags); 966 set_bit(ASYNCB_INITIALIZED, &info->port.flags);
962 967
963 /* 968 /*
964 * Set up the tty->alt_speed kludge 969 * Set up the tty->alt_speed kludge
@@ -1641,7 +1646,7 @@ static int rp_write(struct tty_struct *tty,
1641 /* Write remaining data into the port's xmit_buf */ 1646 /* Write remaining data into the port's xmit_buf */
1642 while (1) { 1647 while (1) {
1643 /* Hung up ? */ 1648 /* Hung up ? */
1644 if (!test_bit(ASYNC_NORMAL_ACTIVE, &info->port.flags)) 1649 if (!test_bit(ASYNCB_NORMAL_ACTIVE, &info->port.flags))
1645 goto end; 1650 goto end;
1646 c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1); 1651 c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1);
1647 c = min(c, XMIT_BUF_SIZE - info->xmit_head); 1652 c = min(c, XMIT_BUF_SIZE - info->xmit_head);
@@ -2250,7 +2255,7 @@ static const struct tty_operations rocket_ops = {
2250 2255
2251static const struct tty_port_operations rocket_port_ops = { 2256static const struct tty_port_operations rocket_port_ops = {
2252 .carrier_raised = carrier_raised, 2257 .carrier_raised = carrier_raised,
2253 .raise_dtr_rts = raise_dtr_rts, 2258 .dtr_rts = dtr_rts,
2254}; 2259};
2255 2260
2256/* 2261/*
diff --git a/drivers/char/selection.c b/drivers/char/selection.c
index cb8ca569896..f97b9e84806 100644
--- a/drivers/char/selection.c
+++ b/drivers/char/selection.c
@@ -327,7 +327,7 @@ int paste_selection(struct tty_struct *tty)
327 } 327 }
328 count = sel_buffer_lth - pasted; 328 count = sel_buffer_lth - pasted;
329 count = min(count, tty->receive_room); 329 count = min(count, tty->receive_room);
330 tty->ldisc.ops->receive_buf(tty, sel_buffer + pasted, 330 tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
331 NULL, count); 331 NULL, count);
332 pasted += count; 332 pasted += count;
333 } 333 }
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 2ad813a801d..53e504f41b2 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -772,11 +772,11 @@ static int stl_carrier_raised(struct tty_port *port)
772 return (portp->sigs & TIOCM_CD) ? 1 : 0; 772 return (portp->sigs & TIOCM_CD) ? 1 : 0;
773} 773}
774 774
775static void stl_raise_dtr_rts(struct tty_port *port) 775static void stl_dtr_rts(struct tty_port *port, int on)
776{ 776{
777 struct stlport *portp = container_of(port, struct stlport, port); 777 struct stlport *portp = container_of(port, struct stlport, port);
778 /* Takes brd_lock internally */ 778 /* Takes brd_lock internally */
779 stl_setsignals(portp, 1, 1); 779 stl_setsignals(portp, on, on);
780} 780}
781 781
782/*****************************************************************************/ 782/*****************************************************************************/
@@ -2547,7 +2547,7 @@ static const struct tty_operations stl_ops = {
2547 2547
2548static const struct tty_port_operations stl_port_ops = { 2548static const struct tty_port_operations stl_port_ops = {
2549 .carrier_raised = stl_carrier_raised, 2549 .carrier_raised = stl_carrier_raised,
2550 .raise_dtr_rts = stl_raise_dtr_rts, 2550 .dtr_rts = stl_dtr_rts,
2551}; 2551};
2552 2552
2553/*****************************************************************************/ 2553/*****************************************************************************/
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index afd0b26ca05..afded3a2379 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -3247,13 +3247,16 @@ static int carrier_raised(struct tty_port *port)
3247 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; 3247 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3248} 3248}
3249 3249
3250static void raise_dtr_rts(struct tty_port *port) 3250static void dtr_rts(struct tty_port *port, int on)
3251{ 3251{
3252 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); 3252 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3253 unsigned long flags; 3253 unsigned long flags;
3254 3254
3255 spin_lock_irqsave(&info->irq_spinlock,flags); 3255 spin_lock_irqsave(&info->irq_spinlock,flags);
3256 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 3256 if (on)
3257 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3258 else
3259 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3257 usc_set_serial_signals(info); 3260 usc_set_serial_signals(info);
3258 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3261 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3259} 3262}
@@ -4258,7 +4261,7 @@ static void mgsl_add_device( struct mgsl_struct *info )
4258 4261
4259static const struct tty_port_operations mgsl_port_ops = { 4262static const struct tty_port_operations mgsl_port_ops = {
4260 .carrier_raised = carrier_raised, 4263 .carrier_raised = carrier_raised,
4261 .raise_dtr_rts = raise_dtr_rts, 4264 .dtr_rts = dtr_rts,
4262}; 4265};
4263 4266
4264 4267
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 5e256494686..1386625fc4c 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -214,6 +214,7 @@ struct slgt_desc
214#define set_desc_next(a,b) (a).next = cpu_to_le32((unsigned int)(b)) 214#define set_desc_next(a,b) (a).next = cpu_to_le32((unsigned int)(b))
215#define set_desc_count(a,b)(a).count = cpu_to_le16((unsigned short)(b)) 215#define set_desc_count(a,b)(a).count = cpu_to_le16((unsigned short)(b))
216#define set_desc_eof(a,b) (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0)) 216#define set_desc_eof(a,b) (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0))
217#define set_desc_status(a, b) (a).status = cpu_to_le16((unsigned short)(b))
217#define desc_count(a) (le16_to_cpu((a).count)) 218#define desc_count(a) (le16_to_cpu((a).count))
218#define desc_status(a) (le16_to_cpu((a).status)) 219#define desc_status(a) (le16_to_cpu((a).status))
219#define desc_complete(a) (le16_to_cpu((a).status) & BIT15) 220#define desc_complete(a) (le16_to_cpu((a).status) & BIT15)
@@ -297,6 +298,7 @@ struct slgt_info {
297 u32 max_frame_size; /* as set by device config */ 298 u32 max_frame_size; /* as set by device config */
298 299
299 unsigned int rbuf_fill_level; 300 unsigned int rbuf_fill_level;
301 unsigned int rx_pio;
300 unsigned int if_mode; 302 unsigned int if_mode;
301 unsigned int base_clock; 303 unsigned int base_clock;
302 304
@@ -331,6 +333,8 @@ struct slgt_info {
331 struct slgt_desc *rbufs; 333 struct slgt_desc *rbufs;
332 unsigned int rbuf_current; 334 unsigned int rbuf_current;
333 unsigned int rbuf_index; 335 unsigned int rbuf_index;
336 unsigned int rbuf_fill_index;
337 unsigned short rbuf_fill_count;
334 338
335 unsigned int tbuf_count; 339 unsigned int tbuf_count;
336 struct slgt_desc *tbufs; 340 struct slgt_desc *tbufs;
@@ -2110,6 +2114,40 @@ static void ri_change(struct slgt_info *info, unsigned short status)
2110 info->pending_bh |= BH_STATUS; 2114 info->pending_bh |= BH_STATUS;
2111} 2115}
2112 2116
2117static void isr_rxdata(struct slgt_info *info)
2118{
2119 unsigned int count = info->rbuf_fill_count;
2120 unsigned int i = info->rbuf_fill_index;
2121 unsigned short reg;
2122
2123 while (rd_reg16(info, SSR) & IRQ_RXDATA) {
2124 reg = rd_reg16(info, RDR);
2125 DBGISR(("isr_rxdata %s RDR=%04X\n", info->device_name, reg));
2126 if (desc_complete(info->rbufs[i])) {
2127 /* all buffers full */
2128 rx_stop(info);
2129 info->rx_restart = 1;
2130 continue;
2131 }
2132 info->rbufs[i].buf[count++] = (unsigned char)reg;
2133 /* async mode saves status byte to buffer for each data byte */
2134 if (info->params.mode == MGSL_MODE_ASYNC)
2135 info->rbufs[i].buf[count++] = (unsigned char)(reg >> 8);
2136 if (count == info->rbuf_fill_level || (reg & BIT10)) {
2137 /* buffer full or end of frame */
2138 set_desc_count(info->rbufs[i], count);
2139 set_desc_status(info->rbufs[i], BIT15 | (reg >> 8));
2140 info->rbuf_fill_count = count = 0;
2141 if (++i == info->rbuf_count)
2142 i = 0;
2143 info->pending_bh |= BH_RECEIVE;
2144 }
2145 }
2146
2147 info->rbuf_fill_index = i;
2148 info->rbuf_fill_count = count;
2149}
2150
2113static void isr_serial(struct slgt_info *info) 2151static void isr_serial(struct slgt_info *info)
2114{ 2152{
2115 unsigned short status = rd_reg16(info, SSR); 2153 unsigned short status = rd_reg16(info, SSR);
@@ -2125,6 +2163,8 @@ static void isr_serial(struct slgt_info *info)
2125 if (info->tx_count) 2163 if (info->tx_count)
2126 isr_txeom(info, status); 2164 isr_txeom(info, status);
2127 } 2165 }
2166 if (info->rx_pio && (status & IRQ_RXDATA))
2167 isr_rxdata(info);
2128 if ((status & IRQ_RXBREAK) && (status & RXBREAK)) { 2168 if ((status & IRQ_RXBREAK) && (status & RXBREAK)) {
2129 info->icount.brk++; 2169 info->icount.brk++;
2130 /* process break detection if tty control allows */ 2170 /* process break detection if tty control allows */
@@ -2141,7 +2181,8 @@ static void isr_serial(struct slgt_info *info)
2141 } else { 2181 } else {
2142 if (status & (IRQ_TXIDLE + IRQ_TXUNDER)) 2182 if (status & (IRQ_TXIDLE + IRQ_TXUNDER))
2143 isr_txeom(info, status); 2183 isr_txeom(info, status);
2144 2184 if (info->rx_pio && (status & IRQ_RXDATA))
2185 isr_rxdata(info);
2145 if (status & IRQ_RXIDLE) { 2186 if (status & IRQ_RXIDLE) {
2146 if (status & RXIDLE) 2187 if (status & RXIDLE)
2147 info->icount.rxidle++; 2188 info->icount.rxidle++;
@@ -2642,6 +2683,10 @@ static int rx_enable(struct slgt_info *info, int enable)
2642 return -EINVAL; 2683 return -EINVAL;
2643 } 2684 }
2644 info->rbuf_fill_level = rbuf_fill_level; 2685 info->rbuf_fill_level = rbuf_fill_level;
2686 if (rbuf_fill_level < 128)
2687 info->rx_pio = 1; /* PIO mode */
2688 else
2689 info->rx_pio = 0; /* DMA mode */
2645 rx_stop(info); /* restart receiver to use new fill level */ 2690 rx_stop(info); /* restart receiver to use new fill level */
2646 } 2691 }
2647 2692
@@ -3099,13 +3144,16 @@ static int carrier_raised(struct tty_port *port)
3099 return (info->signals & SerialSignal_DCD) ? 1 : 0; 3144 return (info->signals & SerialSignal_DCD) ? 1 : 0;
3100} 3145}
3101 3146
3102static void raise_dtr_rts(struct tty_port *port) 3147static void dtr_rts(struct tty_port *port, int on)
3103{ 3148{
3104 unsigned long flags; 3149 unsigned long flags;
3105 struct slgt_info *info = container_of(port, struct slgt_info, port); 3150 struct slgt_info *info = container_of(port, struct slgt_info, port);
3106 3151
3107 spin_lock_irqsave(&info->lock,flags); 3152 spin_lock_irqsave(&info->lock,flags);
3108 info->signals |= SerialSignal_RTS + SerialSignal_DTR; 3153 if (on)
3154 info->signals |= SerialSignal_RTS + SerialSignal_DTR;
3155 else
3156 info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3109 set_signals(info); 3157 set_signals(info);
3110 spin_unlock_irqrestore(&info->lock,flags); 3158 spin_unlock_irqrestore(&info->lock,flags);
3111} 3159}
@@ -3419,7 +3467,7 @@ static void add_device(struct slgt_info *info)
3419 3467
3420static const struct tty_port_operations slgt_port_ops = { 3468static const struct tty_port_operations slgt_port_ops = {
3421 .carrier_raised = carrier_raised, 3469 .carrier_raised = carrier_raised,
3422 .raise_dtr_rts = raise_dtr_rts, 3470 .dtr_rts = dtr_rts,
3423}; 3471};
3424 3472
3425/* 3473/*
@@ -3841,15 +3889,27 @@ static void rx_start(struct slgt_info *info)
3841 rdma_reset(info); 3889 rdma_reset(info);
3842 reset_rbufs(info); 3890 reset_rbufs(info);
3843 3891
3844 /* set 1st descriptor address */ 3892 if (info->rx_pio) {
3845 wr_reg32(info, RDDAR, info->rbufs[0].pdesc); 3893 /* rx request when rx FIFO not empty */
3846 3894 wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) & ~BIT14));
3847 if (info->params.mode != MGSL_MODE_ASYNC) { 3895 slgt_irq_on(info, IRQ_RXDATA);
3848 /* enable rx DMA and DMA interrupt */ 3896 if (info->params.mode == MGSL_MODE_ASYNC) {
3849 wr_reg32(info, RDCSR, (BIT2 + BIT0)); 3897 /* enable saving of rx status */
3898 wr_reg32(info, RDCSR, BIT6);
3899 }
3850 } else { 3900 } else {
3851 /* enable saving of rx status, rx DMA and DMA interrupt */ 3901 /* rx request when rx FIFO half full */
3852 wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0)); 3902 wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT14));
3903 /* set 1st descriptor address */
3904 wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
3905
3906 if (info->params.mode != MGSL_MODE_ASYNC) {
3907 /* enable rx DMA and DMA interrupt */
3908 wr_reg32(info, RDCSR, (BIT2 + BIT0));
3909 } else {
3910 /* enable saving of rx status, rx DMA and DMA interrupt */
3911 wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
3912 }
3853 } 3913 }
3854 3914
3855 slgt_irq_on(info, IRQ_RXOVER); 3915 slgt_irq_on(info, IRQ_RXOVER);
@@ -4467,6 +4527,8 @@ static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last
4467static void reset_rbufs(struct slgt_info *info) 4527static void reset_rbufs(struct slgt_info *info)
4468{ 4528{
4469 free_rbufs(info, 0, info->rbuf_count - 1); 4529 free_rbufs(info, 0, info->rbuf_count - 1);
4530 info->rbuf_fill_index = 0;
4531 info->rbuf_fill_count = 0;
4470} 4532}
4471 4533
4472/* 4534/*
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 26de60efe4b..6f727e3c53a 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -3277,13 +3277,16 @@ static int carrier_raised(struct tty_port *port)
3277 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; 3277 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3278} 3278}
3279 3279
3280static void raise_dtr_rts(struct tty_port *port) 3280static void dtr_rts(struct tty_port *port, int on)
3281{ 3281{
3282 SLMP_INFO *info = container_of(port, SLMP_INFO, port); 3282 SLMP_INFO *info = container_of(port, SLMP_INFO, port);
3283 unsigned long flags; 3283 unsigned long flags;
3284 3284
3285 spin_lock_irqsave(&info->lock,flags); 3285 spin_lock_irqsave(&info->lock,flags);
3286 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 3286 if (on)
3287 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3288 else
3289 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3287 set_signals(info); 3290 set_signals(info);
3288 spin_unlock_irqrestore(&info->lock,flags); 3291 spin_unlock_irqrestore(&info->lock,flags);
3289} 3292}
@@ -3746,7 +3749,7 @@ static void add_device(SLMP_INFO *info)
3746 3749
3747static const struct tty_port_operations port_ops = { 3750static const struct tty_port_operations port_ops = {
3748 .carrier_raised = carrier_raised, 3751 .carrier_raised = carrier_raised,
3749 .raise_dtr_rts = raise_dtr_rts, 3752 .dtr_rts = dtr_rts,
3750}; 3753};
3751 3754
3752/* Allocate and initialize a device instance structure 3755/* Allocate and initialize a device instance structure
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index d6a807f4077..39a05b5fa9c 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -25,6 +25,7 @@
25#include <linux/kbd_kern.h> 25#include <linux/kbd_kern.h>
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/quotaops.h> 27#include <linux/quotaops.h>
28#include <linux/perf_counter.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/suspend.h> 31#include <linux/suspend.h>
@@ -243,6 +244,7 @@ static void sysrq_handle_showregs(int key, struct tty_struct *tty)
243 struct pt_regs *regs = get_irq_regs(); 244 struct pt_regs *regs = get_irq_regs();
244 if (regs) 245 if (regs)
245 show_regs(regs); 246 show_regs(regs);
247 perf_counter_print_debug();
246} 248}
247static struct sysrq_key_op sysrq_showregs_op = { 249static struct sysrq_key_op sysrq_showregs_op = {
248 .handler = sysrq_handle_showregs, 250 .handler = sysrq_handle_showregs,
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index ed306eb1057..0c2f55a38b9 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -212,7 +212,8 @@ static int get_event_name(char *dest, struct tcpa_event *event,
212 unsigned char * event_entry) 212 unsigned char * event_entry)
213{ 213{
214 const char *name = ""; 214 const char *name = "";
215 char data[40] = ""; 215 /* 41 so there is room for 40 data and 1 nul */
216 char data[41] = "";
216 int i, n_len = 0, d_len = 0; 217 int i, n_len = 0, d_len = 0;
217 struct tcpa_pc_event *pc_event; 218 struct tcpa_pc_event *pc_event;
218 219
diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c
index 55ba6f14288..ac16fbec72d 100644
--- a/drivers/char/tty_audit.c
+++ b/drivers/char/tty_audit.c
@@ -29,10 +29,7 @@ static struct tty_audit_buf *tty_audit_buf_alloc(int major, int minor,
29 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 29 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
30 if (!buf) 30 if (!buf)
31 goto err; 31 goto err;
32 if (PAGE_SIZE != N_TTY_BUF_SIZE) 32 buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
33 buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
34 else
35 buf->data = (unsigned char *)__get_free_page(GFP_KERNEL);
36 if (!buf->data) 33 if (!buf->data)
37 goto err_buf; 34 goto err_buf;
38 atomic_set(&buf->count, 1); 35 atomic_set(&buf->count, 1);
@@ -52,10 +49,7 @@ err:
52static void tty_audit_buf_free(struct tty_audit_buf *buf) 49static void tty_audit_buf_free(struct tty_audit_buf *buf)
53{ 50{
54 WARN_ON(buf->valid != 0); 51 WARN_ON(buf->valid != 0);
55 if (PAGE_SIZE != N_TTY_BUF_SIZE) 52 kfree(buf->data);
56 kfree(buf->data);
57 else
58 free_page((unsigned long)buf->data);
59 kfree(buf); 53 kfree(buf);
60} 54}
61 55
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 66b99a2049e..939e198d767 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -295,7 +295,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
295 struct tty_driver *p, *res = NULL; 295 struct tty_driver *p, *res = NULL;
296 int tty_line = 0; 296 int tty_line = 0;
297 int len; 297 int len;
298 char *str; 298 char *str, *stp;
299 299
300 for (str = name; *str; str++) 300 for (str = name; *str; str++)
301 if ((*str >= '0' && *str <= '9') || *str == ',') 301 if ((*str >= '0' && *str <= '9') || *str == ',')
@@ -311,13 +311,14 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
311 list_for_each_entry(p, &tty_drivers, tty_drivers) { 311 list_for_each_entry(p, &tty_drivers, tty_drivers) {
312 if (strncmp(name, p->name, len) != 0) 312 if (strncmp(name, p->name, len) != 0)
313 continue; 313 continue;
314 if (*str == ',') 314 stp = str;
315 str++; 315 if (*stp == ',')
316 if (*str == '\0') 316 stp++;
317 str = NULL; 317 if (*stp == '\0')
318 stp = NULL;
318 319
319 if (tty_line >= 0 && tty_line <= p->num && p->ops && 320 if (tty_line >= 0 && tty_line <= p->num && p->ops &&
320 p->ops->poll_init && !p->ops->poll_init(p, tty_line, str)) { 321 p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
321 res = tty_driver_kref_get(p); 322 res = tty_driver_kref_get(p);
322 *line = tty_line; 323 *line = tty_line;
323 break; 324 break;
@@ -470,43 +471,6 @@ void tty_wakeup(struct tty_struct *tty)
470EXPORT_SYMBOL_GPL(tty_wakeup); 471EXPORT_SYMBOL_GPL(tty_wakeup);
471 472
472/** 473/**
473 * tty_ldisc_flush - flush line discipline queue
474 * @tty: tty
475 *
476 * Flush the line discipline queue (if any) for this tty. If there
477 * is no line discipline active this is a no-op.
478 */
479
480void tty_ldisc_flush(struct tty_struct *tty)
481{
482 struct tty_ldisc *ld = tty_ldisc_ref(tty);
483 if (ld) {
484 if (ld->ops->flush_buffer)
485 ld->ops->flush_buffer(tty);
486 tty_ldisc_deref(ld);
487 }
488 tty_buffer_flush(tty);
489}
490
491EXPORT_SYMBOL_GPL(tty_ldisc_flush);
492
493/**
494 * tty_reset_termios - reset terminal state
495 * @tty: tty to reset
496 *
497 * Restore a terminal to the driver default state
498 */
499
500static void tty_reset_termios(struct tty_struct *tty)
501{
502 mutex_lock(&tty->termios_mutex);
503 *tty->termios = tty->driver->init_termios;
504 tty->termios->c_ispeed = tty_termios_input_baud_rate(tty->termios);
505 tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios);
506 mutex_unlock(&tty->termios_mutex);
507}
508
509/**
510 * do_tty_hangup - actual handler for hangup events 474 * do_tty_hangup - actual handler for hangup events
511 * @work: tty device 475 * @work: tty device
512 * 476 *
@@ -535,7 +499,6 @@ static void do_tty_hangup(struct work_struct *work)
535 struct file *cons_filp = NULL; 499 struct file *cons_filp = NULL;
536 struct file *filp, *f = NULL; 500 struct file *filp, *f = NULL;
537 struct task_struct *p; 501 struct task_struct *p;
538 struct tty_ldisc *ld;
539 int closecount = 0, n; 502 int closecount = 0, n;
540 unsigned long flags; 503 unsigned long flags;
541 int refs = 0; 504 int refs = 0;
@@ -566,40 +529,8 @@ static void do_tty_hangup(struct work_struct *work)
566 filp->f_op = &hung_up_tty_fops; 529 filp->f_op = &hung_up_tty_fops;
567 } 530 }
568 file_list_unlock(); 531 file_list_unlock();
569 /*
570 * FIXME! What are the locking issues here? This may me overdoing
571 * things... This question is especially important now that we've
572 * removed the irqlock.
573 */
574 ld = tty_ldisc_ref(tty);
575 if (ld != NULL) {
576 /* We may have no line discipline at this point */
577 if (ld->ops->flush_buffer)
578 ld->ops->flush_buffer(tty);
579 tty_driver_flush_buffer(tty);
580 if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
581 ld->ops->write_wakeup)
582 ld->ops->write_wakeup(tty);
583 if (ld->ops->hangup)
584 ld->ops->hangup(tty);
585 }
586 /*
587 * FIXME: Once we trust the LDISC code better we can wait here for
588 * ldisc completion and fix the driver call race
589 */
590 wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
591 wake_up_interruptible_poll(&tty->read_wait, POLLIN);
592 /*
593 * Shutdown the current line discipline, and reset it to
594 * N_TTY.
595 */
596 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
597 tty_reset_termios(tty);
598 /* Defer ldisc switch */
599 /* tty_deferred_ldisc_switch(N_TTY);
600 532
601 This should get done automatically when the port closes and 533 tty_ldisc_hangup(tty);
602 tty_release is called */
603 534
604 read_lock(&tasklist_lock); 535 read_lock(&tasklist_lock);
605 if (tty->session) { 536 if (tty->session) {
@@ -628,12 +559,15 @@ static void do_tty_hangup(struct work_struct *work)
628 read_unlock(&tasklist_lock); 559 read_unlock(&tasklist_lock);
629 560
630 spin_lock_irqsave(&tty->ctrl_lock, flags); 561 spin_lock_irqsave(&tty->ctrl_lock, flags);
631 tty->flags = 0; 562 clear_bit(TTY_THROTTLED, &tty->flags);
563 clear_bit(TTY_PUSH, &tty->flags);
564 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
632 put_pid(tty->session); 565 put_pid(tty->session);
633 put_pid(tty->pgrp); 566 put_pid(tty->pgrp);
634 tty->session = NULL; 567 tty->session = NULL;
635 tty->pgrp = NULL; 568 tty->pgrp = NULL;
636 tty->ctrl_status = 0; 569 tty->ctrl_status = 0;
570 set_bit(TTY_HUPPED, &tty->flags);
637 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 571 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
638 572
639 /* Account for the p->signal references we killed */ 573 /* Account for the p->signal references we killed */
@@ -659,10 +593,7 @@ static void do_tty_hangup(struct work_struct *work)
659 * can't yet guarantee all that. 593 * can't yet guarantee all that.
660 */ 594 */
661 set_bit(TTY_HUPPED, &tty->flags); 595 set_bit(TTY_HUPPED, &tty->flags);
662 if (ld) { 596 tty_ldisc_enable(tty);
663 tty_ldisc_enable(tty);
664 tty_ldisc_deref(ld);
665 }
666 unlock_kernel(); 597 unlock_kernel();
667 if (f) 598 if (f)
668 fput(f); 599 fput(f);
@@ -2480,6 +2411,24 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int
2480 return tty->ops->tiocmset(tty, file, set, clear); 2411 return tty->ops->tiocmset(tty, file, set, clear);
2481} 2412}
2482 2413
2414struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
2415{
2416 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
2417 tty->driver->subtype == PTY_TYPE_MASTER)
2418 tty = tty->link;
2419 return tty;
2420}
2421EXPORT_SYMBOL(tty_pair_get_tty);
2422
2423struct tty_struct *tty_pair_get_pty(struct tty_struct *tty)
2424{
2425 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
2426 tty->driver->subtype == PTY_TYPE_MASTER)
2427 return tty;
2428 return tty->link;
2429}
2430EXPORT_SYMBOL(tty_pair_get_pty);
2431
2483/* 2432/*
2484 * Split this up, as gcc can choke on it otherwise.. 2433 * Split this up, as gcc can choke on it otherwise..
2485 */ 2434 */
@@ -2495,11 +2444,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2495 if (tty_paranoia_check(tty, inode, "tty_ioctl")) 2444 if (tty_paranoia_check(tty, inode, "tty_ioctl"))
2496 return -EINVAL; 2445 return -EINVAL;
2497 2446
2498 real_tty = tty; 2447 real_tty = tty_pair_get_tty(tty);
2499 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
2500 tty->driver->subtype == PTY_TYPE_MASTER)
2501 real_tty = tty->link;
2502
2503 2448
2504 /* 2449 /*
2505 * Factor out some common prep work 2450 * Factor out some common prep work
@@ -2555,7 +2500,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2555 case TIOCGSID: 2500 case TIOCGSID:
2556 return tiocgsid(tty, real_tty, p); 2501 return tiocgsid(tty, real_tty, p);
2557 case TIOCGETD: 2502 case TIOCGETD:
2558 return put_user(tty->ldisc.ops->num, (int __user *)p); 2503 return put_user(tty->ldisc->ops->num, (int __user *)p);
2559 case TIOCSETD: 2504 case TIOCSETD:
2560 return tiocsetd(tty, p); 2505 return tiocsetd(tty, p);
2561 /* 2506 /*
@@ -2770,6 +2715,7 @@ void initialize_tty_struct(struct tty_struct *tty,
2770 tty->buf.head = tty->buf.tail = NULL; 2715 tty->buf.head = tty->buf.tail = NULL;
2771 tty_buffer_init(tty); 2716 tty_buffer_init(tty);
2772 mutex_init(&tty->termios_mutex); 2717 mutex_init(&tty->termios_mutex);
2718 mutex_init(&tty->ldisc_mutex);
2773 init_waitqueue_head(&tty->write_wait); 2719 init_waitqueue_head(&tty->write_wait);
2774 init_waitqueue_head(&tty->read_wait); 2720 init_waitqueue_head(&tty->read_wait);
2775 INIT_WORK(&tty->hangup_work, do_tty_hangup); 2721 INIT_WORK(&tty->hangup_work, do_tty_hangup);
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index 6f4c7d0a53b..8116bb1c8f8 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -97,14 +97,19 @@ EXPORT_SYMBOL(tty_driver_flush_buffer);
97 * @tty: terminal 97 * @tty: terminal
98 * 98 *
99 * Indicate that a tty should stop transmitting data down the stack. 99 * Indicate that a tty should stop transmitting data down the stack.
100 * Takes the termios mutex to protect against parallel throttle/unthrottle
101 * and also to ensure the driver can consistently reference its own
102 * termios data at this point when implementing software flow control.
100 */ 103 */
101 104
102void tty_throttle(struct tty_struct *tty) 105void tty_throttle(struct tty_struct *tty)
103{ 106{
107 mutex_lock(&tty->termios_mutex);
104 /* check TTY_THROTTLED first so it indicates our state */ 108 /* check TTY_THROTTLED first so it indicates our state */
105 if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) && 109 if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
106 tty->ops->throttle) 110 tty->ops->throttle)
107 tty->ops->throttle(tty); 111 tty->ops->throttle(tty);
112 mutex_unlock(&tty->termios_mutex);
108} 113}
109EXPORT_SYMBOL(tty_throttle); 114EXPORT_SYMBOL(tty_throttle);
110 115
@@ -113,13 +118,21 @@ EXPORT_SYMBOL(tty_throttle);
113 * @tty: terminal 118 * @tty: terminal
114 * 119 *
115 * Indicate that a tty may continue transmitting data down the stack. 120 * Indicate that a tty may continue transmitting data down the stack.
121 * Takes the termios mutex to protect against parallel throttle/unthrottle
122 * and also to ensure the driver can consistently reference its own
123 * termios data at this point when implementing software flow control.
124 *
125 * Drivers should however remember that the stack can issue a throttle,
126 * then change flow control method, then unthrottle.
116 */ 127 */
117 128
118void tty_unthrottle(struct tty_struct *tty) 129void tty_unthrottle(struct tty_struct *tty)
119{ 130{
131 mutex_lock(&tty->termios_mutex);
120 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) && 132 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) &&
121 tty->ops->unthrottle) 133 tty->ops->unthrottle)
122 tty->ops->unthrottle(tty); 134 tty->ops->unthrottle(tty);
135 mutex_unlock(&tty->termios_mutex);
123} 136}
124EXPORT_SYMBOL(tty_unthrottle); 137EXPORT_SYMBOL(tty_unthrottle);
125 138
@@ -613,9 +626,25 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
613 return 0; 626 return 0;
614} 627}
615 628
629static void copy_termios(struct tty_struct *tty, struct ktermios *kterm)
630{
631 mutex_lock(&tty->termios_mutex);
632 memcpy(kterm, tty->termios, sizeof(struct ktermios));
633 mutex_unlock(&tty->termios_mutex);
634}
635
636static void copy_termios_locked(struct tty_struct *tty, struct ktermios *kterm)
637{
638 mutex_lock(&tty->termios_mutex);
639 memcpy(kterm, tty->termios_locked, sizeof(struct ktermios));
640 mutex_unlock(&tty->termios_mutex);
641}
642
616static int get_termio(struct tty_struct *tty, struct termio __user *termio) 643static int get_termio(struct tty_struct *tty, struct termio __user *termio)
617{ 644{
618 if (kernel_termios_to_user_termio(termio, tty->termios)) 645 struct ktermios kterm;
646 copy_termios(tty, &kterm);
647 if (kernel_termios_to_user_termio(termio, &kterm))
619 return -EFAULT; 648 return -EFAULT;
620 return 0; 649 return 0;
621} 650}
@@ -917,6 +946,8 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
917 struct tty_struct *real_tty; 946 struct tty_struct *real_tty;
918 void __user *p = (void __user *)arg; 947 void __user *p = (void __user *)arg;
919 int ret = 0; 948 int ret = 0;
949 struct ktermios kterm;
950 struct termiox ktermx;
920 951
921 if (tty->driver->type == TTY_DRIVER_TYPE_PTY && 952 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
922 tty->driver->subtype == PTY_TYPE_MASTER) 953 tty->driver->subtype == PTY_TYPE_MASTER)
@@ -952,23 +983,20 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
952 return set_termios(real_tty, p, TERMIOS_OLD); 983 return set_termios(real_tty, p, TERMIOS_OLD);
953#ifndef TCGETS2 984#ifndef TCGETS2
954 case TCGETS: 985 case TCGETS:
955 mutex_lock(&real_tty->termios_mutex); 986 copy_termios(real_tty, &kterm);
956 if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios)) 987 if (kernel_termios_to_user_termios((struct termios __user *)arg, &kterm))
957 ret = -EFAULT; 988 ret = -EFAULT;
958 mutex_unlock(&real_tty->termios_mutex);
959 return ret; 989 return ret;
960#else 990#else
961 case TCGETS: 991 case TCGETS:
962 mutex_lock(&real_tty->termios_mutex); 992 copy_termios(real_tty, &kterm);
963 if (kernel_termios_to_user_termios_1((struct termios __user *)arg, real_tty->termios)) 993 if (kernel_termios_to_user_termios_1((struct termios __user *)arg, &kterm))
964 ret = -EFAULT; 994 ret = -EFAULT;
965 mutex_unlock(&real_tty->termios_mutex);
966 return ret; 995 return ret;
967 case TCGETS2: 996 case TCGETS2:
968 mutex_lock(&real_tty->termios_mutex); 997 copy_termios(real_tty, &kterm);
969 if (kernel_termios_to_user_termios((struct termios2 __user *)arg, real_tty->termios)) 998 if (kernel_termios_to_user_termios((struct termios2 __user *)arg, &kterm))
970 ret = -EFAULT; 999 ret = -EFAULT;
971 mutex_unlock(&real_tty->termios_mutex);
972 return ret; 1000 return ret;
973 case TCSETSF2: 1001 case TCSETSF2:
974 return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT); 1002 return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT);
@@ -987,34 +1015,36 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
987 return set_termios(real_tty, p, TERMIOS_TERMIO); 1015 return set_termios(real_tty, p, TERMIOS_TERMIO);
988#ifndef TCGETS2 1016#ifndef TCGETS2
989 case TIOCGLCKTRMIOS: 1017 case TIOCGLCKTRMIOS:
990 mutex_lock(&real_tty->termios_mutex); 1018 copy_termios_locked(real_tty, &kterm);
991 if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios_locked)) 1019 if (kernel_termios_to_user_termios((struct termios __user *)arg, &kterm))
992 ret = -EFAULT; 1020 ret = -EFAULT;
993 mutex_unlock(&real_tty->termios_mutex);
994 return ret; 1021 return ret;
995 case TIOCSLCKTRMIOS: 1022 case TIOCSLCKTRMIOS:
996 if (!capable(CAP_SYS_ADMIN)) 1023 if (!capable(CAP_SYS_ADMIN))
997 return -EPERM; 1024 return -EPERM;
998 mutex_lock(&real_tty->termios_mutex); 1025 copy_termios_locked(real_tty, &kterm);
999 if (user_termios_to_kernel_termios(real_tty->termios_locked, 1026 if (user_termios_to_kernel_termios(&kterm,
1000 (struct termios __user *) arg)) 1027 (struct termios __user *) arg))
1001 ret = -EFAULT; 1028 return -EFAULT;
1029 mutex_lock(&real_tty->termios_mutex);
1030 memcpy(real_tty->termios_locked, &kterm, sizeof(struct ktermios));
1002 mutex_unlock(&real_tty->termios_mutex); 1031 mutex_unlock(&real_tty->termios_mutex);
1003 return ret; 1032 return 0;
1004#else 1033#else
1005 case TIOCGLCKTRMIOS: 1034 case TIOCGLCKTRMIOS:
1006 mutex_lock(&real_tty->termios_mutex); 1035 copy_termios_locked(real_tty, &kterm);
1007 if (kernel_termios_to_user_termios_1((struct termios __user *)arg, real_tty->termios_locked)) 1036 if (kernel_termios_to_user_termios_1((struct termios __user *)arg, &kterm))
1008 ret = -EFAULT; 1037 ret = -EFAULT;
1009 mutex_unlock(&real_tty->termios_mutex);
1010 return ret; 1038 return ret;
1011 case TIOCSLCKTRMIOS: 1039 case TIOCSLCKTRMIOS:
1012 if (!capable(CAP_SYS_ADMIN)) 1040 if (!capable(CAP_SYS_ADMIN))
1013 ret = -EPERM; 1041 return -EPERM;
1014 mutex_lock(&real_tty->termios_mutex); 1042 copy_termios_locked(real_tty, &kterm);
1015 if (user_termios_to_kernel_termios_1(real_tty->termios_locked, 1043 if (user_termios_to_kernel_termios_1(&kterm,
1016 (struct termios __user *) arg)) 1044 (struct termios __user *) arg))
1017 ret = -EFAULT; 1045 return -EFAULT;
1046 mutex_lock(&real_tty->termios_mutex);
1047 memcpy(real_tty->termios_locked, &kterm, sizeof(struct ktermios));
1018 mutex_unlock(&real_tty->termios_mutex); 1048 mutex_unlock(&real_tty->termios_mutex);
1019 return ret; 1049 return ret;
1020#endif 1050#endif
@@ -1023,9 +1053,10 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
1023 if (real_tty->termiox == NULL) 1053 if (real_tty->termiox == NULL)
1024 return -EINVAL; 1054 return -EINVAL;
1025 mutex_lock(&real_tty->termios_mutex); 1055 mutex_lock(&real_tty->termios_mutex);
1026 if (copy_to_user(p, real_tty->termiox, sizeof(struct termiox))) 1056 memcpy(&ktermx, real_tty->termiox, sizeof(struct termiox));
1027 ret = -EFAULT;
1028 mutex_unlock(&real_tty->termios_mutex); 1057 mutex_unlock(&real_tty->termios_mutex);
1058 if (copy_to_user(p, &ktermx, sizeof(struct termiox)))
1059 ret = -EFAULT;
1029 return ret; 1060 return ret;
1030 case TCSETX: 1061 case TCSETX:
1031 return set_termiox(real_tty, p, 0); 1062 return set_termiox(real_tty, p, 0);
@@ -1035,10 +1066,9 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
1035 return set_termiox(real_tty, p, TERMIOS_FLUSH); 1066 return set_termiox(real_tty, p, TERMIOS_FLUSH);
1036#endif 1067#endif
1037 case TIOCGSOFTCAR: 1068 case TIOCGSOFTCAR:
1038 mutex_lock(&real_tty->termios_mutex); 1069 copy_termios(real_tty, &kterm);
1039 ret = put_user(C_CLOCAL(real_tty) ? 1 : 0, 1070 ret = put_user((kterm.c_cflag & CLOCAL) ? 1 : 0,
1040 (int __user *)arg); 1071 (int __user *)arg);
1041 mutex_unlock(&real_tty->termios_mutex);
1042 return ret; 1072 return ret;
1043 case TIOCSSOFTCAR: 1073 case TIOCSSOFTCAR:
1044 if (get_user(arg, (unsigned int __user *) arg)) 1074 if (get_user(arg, (unsigned int __user *) arg))
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index f78f5b0127a..39c8f86dedd 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -115,19 +115,22 @@ EXPORT_SYMBOL(tty_unregister_ldisc);
115/** 115/**
116 * tty_ldisc_try_get - try and reference an ldisc 116 * tty_ldisc_try_get - try and reference an ldisc
117 * @disc: ldisc number 117 * @disc: ldisc number
118 * @ld: tty ldisc structure to complete
119 * 118 *
120 * Attempt to open and lock a line discipline into place. Return 119 * Attempt to open and lock a line discipline into place. Return
121 * the line discipline refcounted and assigned in ld. On an error 120 * the line discipline refcounted or an error.
122 * report the error code back
123 */ 121 */
124 122
125static int tty_ldisc_try_get(int disc, struct tty_ldisc *ld) 123static struct tty_ldisc *tty_ldisc_try_get(int disc)
126{ 124{
127 unsigned long flags; 125 unsigned long flags;
126 struct tty_ldisc *ld;
128 struct tty_ldisc_ops *ldops; 127 struct tty_ldisc_ops *ldops;
129 int err = -EINVAL; 128 int err = -EINVAL;
130 129
130 ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
131 if (ld == NULL)
132 return ERR_PTR(-ENOMEM);
133
131 spin_lock_irqsave(&tty_ldisc_lock, flags); 134 spin_lock_irqsave(&tty_ldisc_lock, flags);
132 ld->ops = NULL; 135 ld->ops = NULL;
133 ldops = tty_ldiscs[disc]; 136 ldops = tty_ldiscs[disc];
@@ -140,17 +143,19 @@ static int tty_ldisc_try_get(int disc, struct tty_ldisc *ld)
140 /* lock it */ 143 /* lock it */
141 ldops->refcount++; 144 ldops->refcount++;
142 ld->ops = ldops; 145 ld->ops = ldops;
146 ld->refcount = 0;
143 err = 0; 147 err = 0;
144 } 148 }
145 } 149 }
146 spin_unlock_irqrestore(&tty_ldisc_lock, flags); 150 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
147 return err; 151 if (err)
152 return ERR_PTR(err);
153 return ld;
148} 154}
149 155
150/** 156/**
151 * tty_ldisc_get - take a reference to an ldisc 157 * tty_ldisc_get - take a reference to an ldisc
152 * @disc: ldisc number 158 * @disc: ldisc number
153 * @ld: tty line discipline structure to use
154 * 159 *
155 * Takes a reference to a line discipline. Deals with refcounts and 160 * Takes a reference to a line discipline. Deals with refcounts and
156 * module locking counts. Returns NULL if the discipline is not available. 161 * module locking counts. Returns NULL if the discipline is not available.
@@ -161,52 +166,54 @@ static int tty_ldisc_try_get(int disc, struct tty_ldisc *ld)
161 * takes tty_ldisc_lock to guard against ldisc races 166 * takes tty_ldisc_lock to guard against ldisc races
162 */ 167 */
163 168
164static int tty_ldisc_get(int disc, struct tty_ldisc *ld) 169static struct tty_ldisc *tty_ldisc_get(int disc)
165{ 170{
166 int err; 171 struct tty_ldisc *ld;
167 172
168 if (disc < N_TTY || disc >= NR_LDISCS) 173 if (disc < N_TTY || disc >= NR_LDISCS)
169 return -EINVAL; 174 return ERR_PTR(-EINVAL);
170 err = tty_ldisc_try_get(disc, ld); 175 ld = tty_ldisc_try_get(disc);
171 if (err < 0) { 176 if (IS_ERR(ld)) {
172 request_module("tty-ldisc-%d", disc); 177 request_module("tty-ldisc-%d", disc);
173 err = tty_ldisc_try_get(disc, ld); 178 ld = tty_ldisc_try_get(disc);
174 } 179 }
175 return err; 180 return ld;
176} 181}
177 182
178/** 183/**
179 * tty_ldisc_put - drop ldisc reference 184 * tty_ldisc_put - drop ldisc reference
180 * @disc: ldisc number 185 * @ld: ldisc
181 * 186 *
182 * Drop a reference to a line discipline. Manage refcounts and 187 * Drop a reference to a line discipline. Manage refcounts and
183 * module usage counts 188 * module usage counts. Free the ldisc once the recount hits zero.
184 * 189 *
185 * Locking: 190 * Locking:
186 * takes tty_ldisc_lock to guard against ldisc races 191 * takes tty_ldisc_lock to guard against ldisc races
187 */ 192 */
188 193
189static void tty_ldisc_put(struct tty_ldisc_ops *ld) 194static void tty_ldisc_put(struct tty_ldisc *ld)
190{ 195{
191 unsigned long flags; 196 unsigned long flags;
192 int disc = ld->num; 197 int disc = ld->ops->num;
198 struct tty_ldisc_ops *ldo;
193 199
194 BUG_ON(disc < N_TTY || disc >= NR_LDISCS); 200 BUG_ON(disc < N_TTY || disc >= NR_LDISCS);
195 201
196 spin_lock_irqsave(&tty_ldisc_lock, flags); 202 spin_lock_irqsave(&tty_ldisc_lock, flags);
197 ld = tty_ldiscs[disc]; 203 ldo = tty_ldiscs[disc];
198 BUG_ON(ld->refcount == 0); 204 BUG_ON(ldo->refcount == 0);
199 ld->refcount--; 205 ldo->refcount--;
200 module_put(ld->owner); 206 module_put(ldo->owner);
201 spin_unlock_irqrestore(&tty_ldisc_lock, flags); 207 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
208 kfree(ld);
202} 209}
203 210
204static void * tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos) 211static void *tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos)
205{ 212{
206 return (*pos < NR_LDISCS) ? pos : NULL; 213 return (*pos < NR_LDISCS) ? pos : NULL;
207} 214}
208 215
209static void * tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos) 216static void *tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos)
210{ 217{
211 (*pos)++; 218 (*pos)++;
212 return (*pos < NR_LDISCS) ? pos : NULL; 219 return (*pos < NR_LDISCS) ? pos : NULL;
@@ -219,12 +226,13 @@ static void tty_ldiscs_seq_stop(struct seq_file *m, void *v)
219static int tty_ldiscs_seq_show(struct seq_file *m, void *v) 226static int tty_ldiscs_seq_show(struct seq_file *m, void *v)
220{ 227{
221 int i = *(loff_t *)v; 228 int i = *(loff_t *)v;
222 struct tty_ldisc ld; 229 struct tty_ldisc *ld;
223 230
224 if (tty_ldisc_get(i, &ld) < 0) 231 ld = tty_ldisc_try_get(i);
232 if (IS_ERR(ld))
225 return 0; 233 return 0;
226 seq_printf(m, "%-10s %2d\n", ld.ops->name ? ld.ops->name : "???", i); 234 seq_printf(m, "%-10s %2d\n", ld->ops->name ? ld->ops->name : "???", i);
227 tty_ldisc_put(ld.ops); 235 tty_ldisc_put(ld);
228 return 0; 236 return 0;
229} 237}
230 238
@@ -263,8 +271,7 @@ const struct file_operations tty_ldiscs_proc_fops = {
263 271
264static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld) 272static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
265{ 273{
266 ld->refcount = 0; 274 tty->ldisc = ld;
267 tty->ldisc = *ld;
268} 275}
269 276
270/** 277/**
@@ -286,7 +293,7 @@ static int tty_ldisc_try(struct tty_struct *tty)
286 int ret = 0; 293 int ret = 0;
287 294
288 spin_lock_irqsave(&tty_ldisc_lock, flags); 295 spin_lock_irqsave(&tty_ldisc_lock, flags);
289 ld = &tty->ldisc; 296 ld = tty->ldisc;
290 if (test_bit(TTY_LDISC, &tty->flags)) { 297 if (test_bit(TTY_LDISC, &tty->flags)) {
291 ld->refcount++; 298 ld->refcount++;
292 ret = 1; 299 ret = 1;
@@ -315,10 +322,9 @@ struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
315{ 322{
316 /* wait_event is a macro */ 323 /* wait_event is a macro */
317 wait_event(tty_ldisc_wait, tty_ldisc_try(tty)); 324 wait_event(tty_ldisc_wait, tty_ldisc_try(tty));
318 WARN_ON(tty->ldisc.refcount == 0); 325 WARN_ON(tty->ldisc->refcount == 0);
319 return &tty->ldisc; 326 return tty->ldisc;
320} 327}
321
322EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); 328EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
323 329
324/** 330/**
@@ -335,10 +341,9 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
335struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) 341struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
336{ 342{
337 if (tty_ldisc_try(tty)) 343 if (tty_ldisc_try(tty))
338 return &tty->ldisc; 344 return tty->ldisc;
339 return NULL; 345 return NULL;
340} 346}
341
342EXPORT_SYMBOL_GPL(tty_ldisc_ref); 347EXPORT_SYMBOL_GPL(tty_ldisc_ref);
343 348
344/** 349/**
@@ -366,7 +371,6 @@ void tty_ldisc_deref(struct tty_ldisc *ld)
366 wake_up(&tty_ldisc_wait); 371 wake_up(&tty_ldisc_wait);
367 spin_unlock_irqrestore(&tty_ldisc_lock, flags); 372 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
368} 373}
369
370EXPORT_SYMBOL_GPL(tty_ldisc_deref); 374EXPORT_SYMBOL_GPL(tty_ldisc_deref);
371 375
372/** 376/**
@@ -389,6 +393,26 @@ void tty_ldisc_enable(struct tty_struct *tty)
389} 393}
390 394
391/** 395/**
396 * tty_ldisc_flush - flush line discipline queue
397 * @tty: tty
398 *
399 * Flush the line discipline queue (if any) for this tty. If there
400 * is no line discipline active this is a no-op.
401 */
402
403void tty_ldisc_flush(struct tty_struct *tty)
404{
405 struct tty_ldisc *ld = tty_ldisc_ref(tty);
406 if (ld) {
407 if (ld->ops->flush_buffer)
408 ld->ops->flush_buffer(tty);
409 tty_ldisc_deref(ld);
410 }
411 tty_buffer_flush(tty);
412}
413EXPORT_SYMBOL_GPL(tty_ldisc_flush);
414
415/**
392 * tty_set_termios_ldisc - set ldisc field 416 * tty_set_termios_ldisc - set ldisc field
393 * @tty: tty structure 417 * @tty: tty structure
394 * @num: line discipline number 418 * @num: line discipline number
@@ -407,6 +431,39 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
407 mutex_unlock(&tty->termios_mutex); 431 mutex_unlock(&tty->termios_mutex);
408} 432}
409 433
434/**
435 * tty_ldisc_open - open a line discipline
436 * @tty: tty we are opening the ldisc on
437 * @ld: discipline to open
438 *
439 * A helper opening method. Also a convenient debugging and check
440 * point.
441 */
442
443static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
444{
445 WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
446 if (ld->ops->open)
447 return ld->ops->open(tty);
448 return 0;
449}
450
451/**
452 * tty_ldisc_close - close a line discipline
453 * @tty: tty we are opening the ldisc on
454 * @ld: discipline to close
455 *
456 * A helper close method. Also a convenient debugging and check
457 * point.
458 */
459
460static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
461{
462 WARN_ON(!test_bit(TTY_LDISC_OPEN, &tty->flags));
463 clear_bit(TTY_LDISC_OPEN, &tty->flags);
464 if (ld->ops->close)
465 ld->ops->close(tty);
466}
410 467
411/** 468/**
412 * tty_ldisc_restore - helper for tty ldisc change 469 * tty_ldisc_restore - helper for tty ldisc change
@@ -420,66 +477,136 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
420static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) 477static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
421{ 478{
422 char buf[64]; 479 char buf[64];
423 struct tty_ldisc new_ldisc; 480 struct tty_ldisc *new_ldisc;
481 int r;
424 482
425 /* There is an outstanding reference here so this is safe */ 483 /* There is an outstanding reference here so this is safe */
426 tty_ldisc_get(old->ops->num, old); 484 old = tty_ldisc_get(old->ops->num);
485 WARN_ON(IS_ERR(old));
427 tty_ldisc_assign(tty, old); 486 tty_ldisc_assign(tty, old);
428 tty_set_termios_ldisc(tty, old->ops->num); 487 tty_set_termios_ldisc(tty, old->ops->num);
429 if (old->ops->open && (old->ops->open(tty) < 0)) { 488 if (tty_ldisc_open(tty, old) < 0) {
430 tty_ldisc_put(old->ops); 489 tty_ldisc_put(old);
431 /* This driver is always present */ 490 /* This driver is always present */
432 if (tty_ldisc_get(N_TTY, &new_ldisc) < 0) 491 new_ldisc = tty_ldisc_get(N_TTY);
492 if (IS_ERR(new_ldisc))
433 panic("n_tty: get"); 493 panic("n_tty: get");
434 tty_ldisc_assign(tty, &new_ldisc); 494 tty_ldisc_assign(tty, new_ldisc);
435 tty_set_termios_ldisc(tty, N_TTY); 495 tty_set_termios_ldisc(tty, N_TTY);
436 if (new_ldisc.ops->open) { 496 r = tty_ldisc_open(tty, new_ldisc);
437 int r = new_ldisc.ops->open(tty); 497 if (r < 0)
438 if (r < 0) 498 panic("Couldn't open N_TTY ldisc for "
439 panic("Couldn't open N_TTY ldisc for " 499 "%s --- error %d.",
440 "%s --- error %d.", 500 tty_name(tty, buf), r);
441 tty_name(tty, buf), r);
442 }
443 } 501 }
444} 502}
445 503
446/** 504/**
505 * tty_ldisc_halt - shut down the line discipline
506 * @tty: tty device
507 *
508 * Shut down the line discipline and work queue for this tty device.
509 * The TTY_LDISC flag being cleared ensures no further references can
510 * be obtained while the delayed work queue halt ensures that no more
511 * data is fed to the ldisc.
512 *
513 * In order to wait for any existing references to complete see
514 * tty_ldisc_wait_idle.
515 */
516
517static int tty_ldisc_halt(struct tty_struct *tty)
518{
519 clear_bit(TTY_LDISC, &tty->flags);
520 return cancel_delayed_work(&tty->buf.work);
521}
522
523/**
524 * tty_ldisc_wait_idle - wait for the ldisc to become idle
525 * @tty: tty to wait for
526 *
527 * Wait for the line discipline to become idle. The discipline must
528 * have been halted for this to guarantee it remains idle.
529 *
530 * tty_ldisc_lock protects the ref counts currently.
531 */
532
533static int tty_ldisc_wait_idle(struct tty_struct *tty)
534{
535 unsigned long flags;
536 spin_lock_irqsave(&tty_ldisc_lock, flags);
537 while (tty->ldisc->refcount) {
538 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
539 if (wait_event_timeout(tty_ldisc_wait,
540 tty->ldisc->refcount == 0, 5 * HZ) == 0)
541 return -EBUSY;
542 spin_lock_irqsave(&tty_ldisc_lock, flags);
543 }
544 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
545 return 0;
546}
547
548/**
447 * tty_set_ldisc - set line discipline 549 * tty_set_ldisc - set line discipline
448 * @tty: the terminal to set 550 * @tty: the terminal to set
449 * @ldisc: the line discipline 551 * @ldisc: the line discipline
450 * 552 *
451 * Set the discipline of a tty line. Must be called from a process 553 * Set the discipline of a tty line. Must be called from a process
452 * context. 554 * context. The ldisc change logic has to protect itself against any
555 * overlapping ldisc change (including on the other end of pty pairs),
556 * the close of one side of a tty/pty pair, and eventually hangup.
453 * 557 *
454 * Locking: takes tty_ldisc_lock. 558 * Locking: takes tty_ldisc_lock, termios_mutex
455 * called functions take termios_mutex
456 */ 559 */
457 560
458int tty_set_ldisc(struct tty_struct *tty, int ldisc) 561int tty_set_ldisc(struct tty_struct *tty, int ldisc)
459{ 562{
460 int retval; 563 int retval;
461 struct tty_ldisc o_ldisc, new_ldisc; 564 struct tty_ldisc *o_ldisc, *new_ldisc;
462 int work; 565 int work, o_work = 0;
463 unsigned long flags;
464 struct tty_struct *o_tty; 566 struct tty_struct *o_tty;
465 567
466restart: 568 new_ldisc = tty_ldisc_get(ldisc);
467 /* This is a bit ugly for now but means we can break the 'ldisc 569 if (IS_ERR(new_ldisc))
468 is part of the tty struct' assumption later */ 570 return PTR_ERR(new_ldisc);
469 retval = tty_ldisc_get(ldisc, &new_ldisc); 571
470 if (retval) 572 /*
471 return retval; 573 * We need to look at the tty locking here for pty/tty pairs
574 * when both sides try to change in parallel.
575 */
576
577 o_tty = tty->link; /* o_tty is the pty side or NULL */
578
579
580 /*
581 * Check the no-op case
582 */
583
584 if (tty->ldisc->ops->num == ldisc) {
585 tty_ldisc_put(new_ldisc);
586 return 0;
587 }
472 588
473 /* 589 /*
474 * Problem: What do we do if this blocks ? 590 * Problem: What do we do if this blocks ?
591 * We could deadlock here
475 */ 592 */
476 593
477 tty_wait_until_sent(tty, 0); 594 tty_wait_until_sent(tty, 0);
478 595
479 if (tty->ldisc.ops->num == ldisc) { 596 mutex_lock(&tty->ldisc_mutex);
480 tty_ldisc_put(new_ldisc.ops); 597
481 return 0; 598 /*
599 * We could be midstream of another ldisc change which has
600 * dropped the lock during processing. If so we need to wait.
601 */
602
603 while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
604 mutex_unlock(&tty->ldisc_mutex);
605 wait_event(tty_ldisc_wait,
606 test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
607 mutex_lock(&tty->ldisc_mutex);
482 } 608 }
609 set_bit(TTY_LDISC_CHANGING, &tty->flags);
483 610
484 /* 611 /*
485 * No more input please, we are switching. The new ldisc 612 * No more input please, we are switching. The new ldisc
@@ -489,8 +616,6 @@ restart:
489 tty->receive_room = 0; 616 tty->receive_room = 0;
490 617
491 o_ldisc = tty->ldisc; 618 o_ldisc = tty->ldisc;
492 o_tty = tty->link;
493
494 /* 619 /*
495 * Make sure we don't change while someone holds a 620 * Make sure we don't change while someone holds a
496 * reference to the line discipline. The TTY_LDISC bit 621 * reference to the line discipline. The TTY_LDISC bit
@@ -501,108 +626,181 @@ restart:
501 * with a userspace app continually trying to use the tty in 626 * with a userspace app continually trying to use the tty in
502 * parallel to the change and re-referencing the tty. 627 * parallel to the change and re-referencing the tty.
503 */ 628 */
504 clear_bit(TTY_LDISC, &tty->flags);
505 if (o_tty)
506 clear_bit(TTY_LDISC, &o_tty->flags);
507 629
508 spin_lock_irqsave(&tty_ldisc_lock, flags); 630 work = tty_ldisc_halt(tty);
509 if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) {
510 if (tty->ldisc.refcount) {
511 /* Free the new ldisc we grabbed. Must drop the lock
512 first. */
513 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
514 tty_ldisc_put(o_ldisc.ops);
515 /*
516 * There are several reasons we may be busy, including
517 * random momentary I/O traffic. We must therefore
518 * retry. We could distinguish between blocking ops
519 * and retries if we made tty_ldisc_wait() smarter.
520 * That is up for discussion.
521 */
522 if (wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0)
523 return -ERESTARTSYS;
524 goto restart;
525 }
526 if (o_tty && o_tty->ldisc.refcount) {
527 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
528 tty_ldisc_put(o_tty->ldisc.ops);
529 if (wait_event_interruptible(tty_ldisc_wait, o_tty->ldisc.refcount == 0) < 0)
530 return -ERESTARTSYS;
531 goto restart;
532 }
533 }
534 /*
535 * If the TTY_LDISC bit is set, then we are racing against
536 * another ldisc change
537 */
538 if (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
539 struct tty_ldisc *ld;
540 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
541 tty_ldisc_put(new_ldisc.ops);
542 ld = tty_ldisc_ref_wait(tty);
543 tty_ldisc_deref(ld);
544 goto restart;
545 }
546 /*
547 * This flag is used to avoid two parallel ldisc changes. Once
548 * open and close are fine grained locked this may work better
549 * as a mutex shared with the open/close/hup paths
550 */
551 set_bit(TTY_LDISC_CHANGING, &tty->flags);
552 if (o_tty) 631 if (o_tty)
553 set_bit(TTY_LDISC_CHANGING, &o_tty->flags); 632 o_work = tty_ldisc_halt(o_tty);
554 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
555
556 /*
557 * From this point on we know nobody has an ldisc
558 * usage reference, nor can they obtain one until
559 * we say so later on.
560 */
561 633
562 work = cancel_delayed_work(&tty->buf.work);
563 /* 634 /*
564 * Wait for ->hangup_work and ->buf.work handlers to terminate 635 * Wait for ->hangup_work and ->buf.work handlers to terminate.
565 * MUST NOT hold locks here. 636 * We must drop the mutex here in case a hangup is also in process.
566 */ 637 */
638
639 mutex_unlock(&tty->ldisc_mutex);
640
567 flush_scheduled_work(); 641 flush_scheduled_work();
642
643 /* Let any existing reference holders finish */
644 retval = tty_ldisc_wait_idle(tty);
645 if (retval < 0) {
646 clear_bit(TTY_LDISC_CHANGING, &tty->flags);
647 tty_ldisc_put(new_ldisc);
648 return retval;
649 }
650
651 mutex_lock(&tty->ldisc_mutex);
652 if (test_bit(TTY_HUPPED, &tty->flags)) {
653 /* We were raced by the hangup method. It will have stomped
654 the ldisc data and closed the ldisc down */
655 clear_bit(TTY_LDISC_CHANGING, &tty->flags);
656 mutex_unlock(&tty->ldisc_mutex);
657 tty_ldisc_put(new_ldisc);
658 return -EIO;
659 }
660
568 /* Shutdown the current discipline. */ 661 /* Shutdown the current discipline. */
569 if (o_ldisc.ops->close) 662 tty_ldisc_close(tty, o_ldisc);
570 (o_ldisc.ops->close)(tty);
571 663
572 /* Now set up the new line discipline. */ 664 /* Now set up the new line discipline. */
573 tty_ldisc_assign(tty, &new_ldisc); 665 tty_ldisc_assign(tty, new_ldisc);
574 tty_set_termios_ldisc(tty, ldisc); 666 tty_set_termios_ldisc(tty, ldisc);
575 if (new_ldisc.ops->open) 667
576 retval = (new_ldisc.ops->open)(tty); 668 retval = tty_ldisc_open(tty, new_ldisc);
577 if (retval < 0) { 669 if (retval < 0) {
578 tty_ldisc_put(new_ldisc.ops); 670 /* Back to the old one or N_TTY if we can't */
579 tty_ldisc_restore(tty, &o_ldisc); 671 tty_ldisc_put(new_ldisc);
672 tty_ldisc_restore(tty, o_ldisc);
580 } 673 }
674
581 /* At this point we hold a reference to the new ldisc and a 675 /* At this point we hold a reference to the new ldisc and a
582 a reference to the old ldisc. If we ended up flipping back 676 a reference to the old ldisc. If we ended up flipping back
583 to the existing ldisc we have two references to it */ 677 to the existing ldisc we have two references to it */
584 678
585 if (tty->ldisc.ops->num != o_ldisc.ops->num && tty->ops->set_ldisc) 679 if (tty->ldisc->ops->num != o_ldisc->ops->num && tty->ops->set_ldisc)
586 tty->ops->set_ldisc(tty); 680 tty->ops->set_ldisc(tty);
587 681
588 tty_ldisc_put(o_ldisc.ops); 682 tty_ldisc_put(o_ldisc);
589 683
590 /* 684 /*
591 * Allow ldisc referencing to occur as soon as the driver 685 * Allow ldisc referencing to occur again
592 * ldisc callback completes.
593 */ 686 */
594 687
595 tty_ldisc_enable(tty); 688 tty_ldisc_enable(tty);
596 if (o_tty) 689 if (o_tty)
597 tty_ldisc_enable(o_tty); 690 tty_ldisc_enable(o_tty);
598 691
599 /* Restart it in case no characters kick it off. Safe if 692 /* Restart the work queue in case no characters kick it off. Safe if
600 already running */ 693 already running */
601 if (work) 694 if (work)
602 schedule_delayed_work(&tty->buf.work, 1); 695 schedule_delayed_work(&tty->buf.work, 1);
696 if (o_work)
697 schedule_delayed_work(&o_tty->buf.work, 1);
698 mutex_unlock(&tty->ldisc_mutex);
603 return retval; 699 return retval;
604} 700}
605 701
702/**
703 * tty_reset_termios - reset terminal state
704 * @tty: tty to reset
705 *
706 * Restore a terminal to the driver default state.
707 */
708
709static void tty_reset_termios(struct tty_struct *tty)
710{
711 mutex_lock(&tty->termios_mutex);
712 *tty->termios = tty->driver->init_termios;
713 tty->termios->c_ispeed = tty_termios_input_baud_rate(tty->termios);
714 tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios);
715 mutex_unlock(&tty->termios_mutex);
716}
717
718
719/**
720 * tty_ldisc_reinit - reinitialise the tty ldisc
721 * @tty: tty to reinit
722 *
723 * Switch the tty back to N_TTY line discipline and leave the
724 * ldisc state closed
725 */
726
727static void tty_ldisc_reinit(struct tty_struct *tty)
728{
729 struct tty_ldisc *ld;
730
731 tty_ldisc_close(tty, tty->ldisc);
732 tty_ldisc_put(tty->ldisc);
733 tty->ldisc = NULL;
734 /*
735 * Switch the line discipline back
736 */
737 ld = tty_ldisc_get(N_TTY);
738 BUG_ON(IS_ERR(ld));
739 tty_ldisc_assign(tty, ld);
740 tty_set_termios_ldisc(tty, N_TTY);
741}
742
743/**
744 * tty_ldisc_hangup - hangup ldisc reset
745 * @tty: tty being hung up
746 *
747 * Some tty devices reset their termios when they receive a hangup
748 * event. In that situation we must also switch back to N_TTY properly
749 * before we reset the termios data.
750 *
751 * Locking: We can take the ldisc mutex as the rest of the code is
752 * careful to allow for this.
753 *
754 * In the pty pair case this occurs in the close() path of the
755 * tty itself so we must be careful about locking rules.
756 */
757
758void tty_ldisc_hangup(struct tty_struct *tty)
759{
760 struct tty_ldisc *ld;
761
762 /*
763 * FIXME! What are the locking issues here? This may me overdoing
764 * things... This question is especially important now that we've
765 * removed the irqlock.
766 */
767 ld = tty_ldisc_ref(tty);
768 if (ld != NULL) {
769 /* We may have no line discipline at this point */
770 if (ld->ops->flush_buffer)
771 ld->ops->flush_buffer(tty);
772 tty_driver_flush_buffer(tty);
773 if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
774 ld->ops->write_wakeup)
775 ld->ops->write_wakeup(tty);
776 if (ld->ops->hangup)
777 ld->ops->hangup(tty);
778 tty_ldisc_deref(ld);
779 }
780 /*
781 * FIXME: Once we trust the LDISC code better we can wait here for
782 * ldisc completion and fix the driver call race
783 */
784 wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
785 wake_up_interruptible_poll(&tty->read_wait, POLLIN);
786 /*
787 * Shutdown the current line discipline, and reset it to
788 * N_TTY.
789 */
790 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
791 /* Avoid racing set_ldisc */
792 mutex_lock(&tty->ldisc_mutex);
793 /* Switch back to N_TTY */
794 tty_ldisc_reinit(tty);
795 /* At this point we have a closed ldisc and we want to
796 reopen it. We could defer this to the next open but
797 it means auditing a lot of other paths so this is a FIXME */
798 WARN_ON(tty_ldisc_open(tty, tty->ldisc));
799 tty_ldisc_enable(tty);
800 mutex_unlock(&tty->ldisc_mutex);
801 tty_reset_termios(tty);
802 }
803}
606 804
607/** 805/**
608 * tty_ldisc_setup - open line discipline 806 * tty_ldisc_setup - open line discipline
@@ -610,24 +808,23 @@ restart:
610 * @o_tty: pair tty for pty/tty pairs 808 * @o_tty: pair tty for pty/tty pairs
611 * 809 *
612 * Called during the initial open of a tty/pty pair in order to set up the 810 * Called during the initial open of a tty/pty pair in order to set up the
613 * line discplines and bind them to the tty. 811 * line disciplines and bind them to the tty. This has no locking issues
812 * as the device isn't yet active.
614 */ 813 */
615 814
616int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty) 815int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
617{ 816{
618 struct tty_ldisc *ld = &tty->ldisc; 817 struct tty_ldisc *ld = tty->ldisc;
619 int retval; 818 int retval;
620 819
621 if (ld->ops->open) { 820 retval = tty_ldisc_open(tty, ld);
622 retval = (ld->ops->open)(tty); 821 if (retval)
623 if (retval) 822 return retval;
624 return retval; 823
625 } 824 if (o_tty) {
626 if (o_tty && o_tty->ldisc.ops->open) { 825 retval = tty_ldisc_open(o_tty, o_tty->ldisc);
627 retval = (o_tty->ldisc.ops->open)(o_tty);
628 if (retval) { 826 if (retval) {
629 if (ld->ops->close) 827 tty_ldisc_close(tty, ld);
630 (ld->ops->close)(tty);
631 return retval; 828 return retval;
632 } 829 }
633 tty_ldisc_enable(o_tty); 830 tty_ldisc_enable(o_tty);
@@ -635,32 +832,25 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
635 tty_ldisc_enable(tty); 832 tty_ldisc_enable(tty);
636 return 0; 833 return 0;
637} 834}
638
639/** 835/**
640 * tty_ldisc_release - release line discipline 836 * tty_ldisc_release - release line discipline
641 * @tty: tty being shut down 837 * @tty: tty being shut down
642 * @o_tty: pair tty for pty/tty pairs 838 * @o_tty: pair tty for pty/tty pairs
643 * 839 *
644 * Called during the final close of a tty/pty pair in order to shut down the 840 * Called during the final close of a tty/pty pair in order to shut down
645 * line discpline layer. 841 * the line discpline layer. On exit the ldisc assigned is N_TTY and the
842 * ldisc has not been opened.
646 */ 843 */
647 844
648void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty) 845void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
649{ 846{
650 unsigned long flags;
651 struct tty_ldisc ld;
652 /* 847 /*
653 * Prevent flush_to_ldisc() from rescheduling the work for later. Then 848 * Prevent flush_to_ldisc() from rescheduling the work for later. Then
654 * kill any delayed work. As this is the final close it does not 849 * kill any delayed work. As this is the final close it does not
655 * race with the set_ldisc code path. 850 * race with the set_ldisc code path.
656 */ 851 */
657 clear_bit(TTY_LDISC, &tty->flags);
658 cancel_delayed_work(&tty->buf.work);
659
660 /*
661 * Wait for ->hangup_work and ->buf.work handlers to terminate
662 */
663 852
853 tty_ldisc_halt(tty);
664 flush_scheduled_work(); 854 flush_scheduled_work();
665 855
666 /* 856 /*
@@ -668,38 +858,19 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
668 * side waiters as the file is closing so user count on the file 858 * side waiters as the file is closing so user count on the file
669 * side is zero. 859 * side is zero.
670 */ 860 */
671 spin_lock_irqsave(&tty_ldisc_lock, flags); 861
672 while (tty->ldisc.refcount) { 862 tty_ldisc_wait_idle(tty);
673 spin_unlock_irqrestore(&tty_ldisc_lock, flags); 863
674 wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0);
675 spin_lock_irqsave(&tty_ldisc_lock, flags);
676 }
677 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
678 /* 864 /*
679 * Shutdown the current line discipline, and reset it to N_TTY. 865 * Shutdown the current line discipline, and reset it to N_TTY.
680 * 866 *
681 * FIXME: this MUST get fixed for the new reflocking 867 * FIXME: this MUST get fixed for the new reflocking
682 */ 868 */
683 if (tty->ldisc.ops->close)
684 (tty->ldisc.ops->close)(tty);
685 tty_ldisc_put(tty->ldisc.ops);
686 869
687 /* 870 tty_ldisc_reinit(tty);
688 * Switch the line discipline back 871 /* This will need doing differently if we need to lock */
689 */ 872 if (o_tty)
690 WARN_ON(tty_ldisc_get(N_TTY, &ld)); 873 tty_ldisc_release(o_tty, NULL);
691 tty_ldisc_assign(tty, &ld);
692 tty_set_termios_ldisc(tty, N_TTY);
693 if (o_tty) {
694 /* FIXME: could o_tty be in setldisc here ? */
695 clear_bit(TTY_LDISC, &o_tty->flags);
696 if (o_tty->ldisc.ops->close)
697 (o_tty->ldisc.ops->close)(o_tty);
698 tty_ldisc_put(o_tty->ldisc.ops);
699 WARN_ON(tty_ldisc_get(N_TTY, &ld));
700 tty_ldisc_assign(o_tty, &ld);
701 tty_set_termios_ldisc(o_tty, N_TTY);
702 }
703} 874}
704 875
705/** 876/**
@@ -712,10 +883,10 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
712 883
713void tty_ldisc_init(struct tty_struct *tty) 884void tty_ldisc_init(struct tty_struct *tty)
714{ 885{
715 struct tty_ldisc ld; 886 struct tty_ldisc *ld = tty_ldisc_get(N_TTY);
716 if (tty_ldisc_get(N_TTY, &ld) < 0) 887 if (IS_ERR(ld))
717 panic("n_tty: init_tty"); 888 panic("n_tty: init_tty");
718 tty_ldisc_assign(tty, &ld); 889 tty_ldisc_assign(tty, ld);
719} 890}
720 891
721void tty_ldisc_begin(void) 892void tty_ldisc_begin(void)
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 9b8004c7268..62dadfc95e3 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -137,7 +137,7 @@ int tty_port_carrier_raised(struct tty_port *port)
137EXPORT_SYMBOL(tty_port_carrier_raised); 137EXPORT_SYMBOL(tty_port_carrier_raised);
138 138
139/** 139/**
140 * tty_port_raise_dtr_rts - Riase DTR/RTS 140 * tty_port_raise_dtr_rts - Raise DTR/RTS
141 * @port: tty port 141 * @port: tty port
142 * 142 *
143 * Wrapper for the DTR/RTS raise logic. For the moment this is used 143 * Wrapper for the DTR/RTS raise logic. For the moment this is used
@@ -147,12 +147,28 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
147 147
148void tty_port_raise_dtr_rts(struct tty_port *port) 148void tty_port_raise_dtr_rts(struct tty_port *port)
149{ 149{
150 if (port->ops->raise_dtr_rts) 150 if (port->ops->dtr_rts)
151 port->ops->raise_dtr_rts(port); 151 port->ops->dtr_rts(port, 1);
152} 152}
153EXPORT_SYMBOL(tty_port_raise_dtr_rts); 153EXPORT_SYMBOL(tty_port_raise_dtr_rts);
154 154
155/** 155/**
156 * tty_port_lower_dtr_rts - Lower DTR/RTS
157 * @port: tty port
158 *
159 * Wrapper for the DTR/RTS raise logic. For the moment this is used
160 * to hide some internal details. This will eventually become entirely
161 * internal to the tty port.
162 */
163
164void tty_port_lower_dtr_rts(struct tty_port *port)
165{
166 if (port->ops->dtr_rts)
167 port->ops->dtr_rts(port, 0);
168}
169EXPORT_SYMBOL(tty_port_lower_dtr_rts);
170
171/**
156 * tty_port_block_til_ready - Waiting logic for tty open 172 * tty_port_block_til_ready - Waiting logic for tty open
157 * @port: the tty port being opened 173 * @port: the tty port being opened
158 * @tty: the tty device being bound 174 * @tty: the tty device being bound
@@ -167,7 +183,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
167 * - port flags and counts 183 * - port flags and counts
168 * 184 *
169 * The passed tty_port must implement the carrier_raised method if it can 185 * The passed tty_port must implement the carrier_raised method if it can
170 * do carrier detect and the raise_dtr_rts method if it supports software 186 * do carrier detect and the dtr_rts method if it supports software
171 * management of these lines. Note that the dtr/rts raise is done each 187 * management of these lines. Note that the dtr/rts raise is done each
172 * iteration as a hangup may have previously dropped them while we wait. 188 * iteration as a hangup may have previously dropped them while we wait.
173 */ 189 */
@@ -182,7 +198,8 @@ int tty_port_block_til_ready(struct tty_port *port,
182 198
183 /* block if port is in the process of being closed */ 199 /* block if port is in the process of being closed */
184 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) { 200 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
185 interruptible_sleep_on(&port->close_wait); 201 wait_event_interruptible(port->close_wait,
202 !(port->flags & ASYNC_CLOSING));
186 if (port->flags & ASYNC_HUP_NOTIFY) 203 if (port->flags & ASYNC_HUP_NOTIFY)
187 return -EAGAIN; 204 return -EAGAIN;
188 else 205 else
@@ -205,7 +222,6 @@ int tty_port_block_til_ready(struct tty_port *port,
205 before the next open may complete */ 222 before the next open may complete */
206 223
207 retval = 0; 224 retval = 0;
208 add_wait_queue(&port->open_wait, &wait);
209 225
210 /* The port lock protects the port counts */ 226 /* The port lock protects the port counts */
211 spin_lock_irqsave(&port->lock, flags); 227 spin_lock_irqsave(&port->lock, flags);
@@ -219,7 +235,7 @@ int tty_port_block_til_ready(struct tty_port *port,
219 if (tty->termios->c_cflag & CBAUD) 235 if (tty->termios->c_cflag & CBAUD)
220 tty_port_raise_dtr_rts(port); 236 tty_port_raise_dtr_rts(port);
221 237
222 set_current_state(TASK_INTERRUPTIBLE); 238 prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE);
223 /* Check for a hangup or uninitialised port. Return accordingly */ 239 /* Check for a hangup or uninitialised port. Return accordingly */
224 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) { 240 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
225 if (port->flags & ASYNC_HUP_NOTIFY) 241 if (port->flags & ASYNC_HUP_NOTIFY)
@@ -240,8 +256,7 @@ int tty_port_block_til_ready(struct tty_port *port,
240 } 256 }
241 schedule(); 257 schedule();
242 } 258 }
243 set_current_state(TASK_RUNNING); 259 finish_wait(&port->open_wait, &wait);
244 remove_wait_queue(&port->open_wait, &wait);
245 260
246 /* Update counts. A parallel hangup will have set count to zero and 261 /* Update counts. A parallel hangup will have set count to zero and
247 we must not mess that up further */ 262 we must not mess that up further */
@@ -292,6 +307,17 @@ int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct f
292 if (port->flags & ASYNC_INITIALIZED && 307 if (port->flags & ASYNC_INITIALIZED &&
293 port->closing_wait != ASYNC_CLOSING_WAIT_NONE) 308 port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
294 tty_wait_until_sent(tty, port->closing_wait); 309 tty_wait_until_sent(tty, port->closing_wait);
310 if (port->drain_delay) {
311 unsigned int bps = tty_get_baud_rate(tty);
312 long timeout;
313
314 if (bps > 1200)
315 timeout = max_t(long, (HZ * 10 * port->drain_delay) / bps,
316 HZ / 10);
317 else
318 timeout = 2 * HZ;
319 schedule_timeout_interruptible(timeout);
320 }
295 return 1; 321 return 1;
296} 322}
297EXPORT_SYMBOL(tty_port_close_start); 323EXPORT_SYMBOL(tty_port_close_start);
@@ -302,6 +328,9 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
302 328
303 tty_ldisc_flush(tty); 329 tty_ldisc_flush(tty);
304 330
331 if (tty->termios->c_cflag & HUPCL)
332 tty_port_lower_dtr_rts(port);
333
305 spin_lock_irqsave(&port->lock, flags); 334 spin_lock_irqsave(&port->lock, flags);
306 tty->closing = 0; 335 tty->closing = 0;
307 336
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ff6f5a4b58f..c74dacfa679 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -188,6 +188,9 @@ static void hvc_handle_input(struct virtqueue *vq)
188 * Finally we put our input buffer in the input queue, ready to receive. */ 188 * Finally we put our input buffer in the input queue, ready to receive. */
189static int __devinit virtcons_probe(struct virtio_device *dev) 189static int __devinit virtcons_probe(struct virtio_device *dev)
190{ 190{
191 vq_callback_t *callbacks[] = { hvc_handle_input, NULL};
192 const char *names[] = { "input", "output" };
193 struct virtqueue *vqs[2];
191 int err; 194 int err;
192 195
193 vdev = dev; 196 vdev = dev;
@@ -199,20 +202,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
199 goto fail; 202 goto fail;
200 } 203 }
201 204
202 /* Find the input queue. */ 205 /* Find the queues. */
203 /* FIXME: This is why we want to wean off hvc: we do nothing 206 /* FIXME: This is why we want to wean off hvc: we do nothing
204 * when input comes in. */ 207 * when input comes in. */
205 in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input); 208 err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
206 if (IS_ERR(in_vq)) { 209 if (err)
207 err = PTR_ERR(in_vq);
208 goto free; 210 goto free;
209 }
210 211
211 out_vq = vdev->config->find_vq(vdev, 1, NULL); 212 in_vq = vqs[0];
212 if (IS_ERR(out_vq)) { 213 out_vq = vqs[1];
213 err = PTR_ERR(out_vq);
214 goto free_in_vq;
215 }
216 214
217 /* Start using the new console output. */ 215 /* Start using the new console output. */
218 virtio_cons.get_chars = get_chars; 216 virtio_cons.get_chars = get_chars;
@@ -233,17 +231,15 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
233 hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); 231 hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE);
234 if (IS_ERR(hvc)) { 232 if (IS_ERR(hvc)) {
235 err = PTR_ERR(hvc); 233 err = PTR_ERR(hvc);
236 goto free_out_vq; 234 goto free_vqs;
237 } 235 }
238 236
239 /* Register the input buffer the first time. */ 237 /* Register the input buffer the first time. */
240 add_inbuf(); 238 add_inbuf();
241 return 0; 239 return 0;
242 240
243free_out_vq: 241free_vqs:
244 vdev->config->del_vq(out_vq); 242 vdev->config->del_vqs(vdev);
245free_in_vq:
246 vdev->config->del_vq(in_vq);
247free: 243free:
248 kfree(inbuf); 244 kfree(inbuf);
249fail: 245fail:
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 08151d4de48..c796a86ab7f 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -95,7 +95,6 @@
95#include <linux/timer.h> 95#include <linux/timer.h>
96#include <linux/interrupt.h> 96#include <linux/interrupt.h>
97#include <linux/workqueue.h> 97#include <linux/workqueue.h>
98#include <linux/bootmem.h>
99#include <linux/pm.h> 98#include <linux/pm.h>
100#include <linux/font.h> 99#include <linux/font.h>
101#include <linux/bitops.h> 100#include <linux/bitops.h>
@@ -2875,14 +2874,11 @@ static int __init con_init(void)
2875 mod_timer(&console_timer, jiffies + blankinterval); 2874 mod_timer(&console_timer, jiffies + blankinterval);
2876 } 2875 }
2877 2876
2878 /*
2879 * kmalloc is not running yet - we use the bootmem allocator.
2880 */
2881 for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) { 2877 for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
2882 vc_cons[currcons].d = vc = alloc_bootmem(sizeof(struct vc_data)); 2878 vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT);
2883 INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); 2879 INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
2884 visual_init(vc, currcons, 1); 2880 visual_init(vc, currcons, 1);
2885 vc->vc_screenbuf = (unsigned short *)alloc_bootmem(vc->vc_screenbuf_size); 2881 vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
2886 vc->vc_kmalloced = 0; 2882 vc->vc_kmalloced = 0;
2887 vc_init(vc, vc->vc_rows, vc->vc_cols, 2883 vc_init(vc, vc->vc_rows, vc->vc_cols,
2888 currcons || !vc->vc_sw->con_save_screen); 2884 currcons || !vc->vc_sw->con_save_screen);
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 1efb2879a94..eef216f7f61 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
3obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o 3obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
4obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o 4obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
5obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o 5obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
6obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
7obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 1c92c39a53a..cf56a2af5fe 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -18,7 +18,6 @@
18 */ 18 */
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/bootmem.h>
22#include <linux/platform_device.h> 21#include <linux/platform_device.h>
23#include <linux/spinlock.h> 22#include <linux/spinlock.h>
24#include <linux/interrupt.h> 23#include <linux/interrupt.h>
@@ -29,7 +28,7 @@
29#include <linux/err.h> 28#include <linux/err.h>
30#include <linux/clocksource.h> 29#include <linux/clocksource.h>
31#include <linux/clockchips.h> 30#include <linux/clockchips.h>
32#include <linux/sh_cmt.h> 31#include <linux/sh_timer.h>
33 32
34struct sh_cmt_priv { 33struct sh_cmt_priv {
35 void __iomem *mapbase; 34 void __iomem *mapbase;
@@ -47,6 +46,7 @@ struct sh_cmt_priv {
47 unsigned long rate; 46 unsigned long rate;
48 spinlock_t lock; 47 spinlock_t lock;
49 struct clock_event_device ced; 48 struct clock_event_device ced;
49 struct clocksource cs;
50 unsigned long total_cycles; 50 unsigned long total_cycles;
51}; 51};
52 52
@@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(sh_cmt_lock);
59 59
60static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr) 60static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
61{ 61{
62 struct sh_cmt_config *cfg = p->pdev->dev.platform_data; 62 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
63 void __iomem *base = p->mapbase; 63 void __iomem *base = p->mapbase;
64 unsigned long offs; 64 unsigned long offs;
65 65
@@ -83,7 +83,7 @@ static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
83static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr, 83static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
84 unsigned long value) 84 unsigned long value)
85{ 85{
86 struct sh_cmt_config *cfg = p->pdev->dev.platform_data; 86 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
87 void __iomem *base = p->mapbase; 87 void __iomem *base = p->mapbase;
88 unsigned long offs; 88 unsigned long offs;
89 89
@@ -110,23 +110,28 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
110 int *has_wrapped) 110 int *has_wrapped)
111{ 111{
112 unsigned long v1, v2, v3; 112 unsigned long v1, v2, v3;
113 int o1, o2;
114
115 o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
113 116
114 /* Make sure the timer value is stable. Stolen from acpi_pm.c */ 117 /* Make sure the timer value is stable. Stolen from acpi_pm.c */
115 do { 118 do {
119 o2 = o1;
116 v1 = sh_cmt_read(p, CMCNT); 120 v1 = sh_cmt_read(p, CMCNT);
117 v2 = sh_cmt_read(p, CMCNT); 121 v2 = sh_cmt_read(p, CMCNT);
118 v3 = sh_cmt_read(p, CMCNT); 122 v3 = sh_cmt_read(p, CMCNT);
119 } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) 123 o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit;
120 || (v3 > v1 && v3 < v2))); 124 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
125 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
121 126
122 *has_wrapped = sh_cmt_read(p, CMCSR) & p->overflow_bit; 127 *has_wrapped = o1;
123 return v2; 128 return v2;
124} 129}
125 130
126 131
127static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) 132static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
128{ 133{
129 struct sh_cmt_config *cfg = p->pdev->dev.platform_data; 134 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
130 unsigned long flags, value; 135 unsigned long flags, value;
131 136
132 /* start stop register shared by multiple timer channels */ 137 /* start stop register shared by multiple timer channels */
@@ -144,7 +149,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
144 149
145static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 150static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
146{ 151{
147 struct sh_cmt_config *cfg = p->pdev->dev.platform_data; 152 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
148 int ret; 153 int ret;
149 154
150 /* enable clock */ 155 /* enable clock */
@@ -153,16 +158,18 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
153 pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk); 158 pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk);
154 return ret; 159 return ret;
155 } 160 }
156 *rate = clk_get_rate(p->clk) / 8;
157 161
158 /* make sure channel is disabled */ 162 /* make sure channel is disabled */
159 sh_cmt_start_stop_ch(p, 0); 163 sh_cmt_start_stop_ch(p, 0);
160 164
161 /* configure channel, periodic mode and maximum timeout */ 165 /* configure channel, periodic mode and maximum timeout */
162 if (p->width == 16) 166 if (p->width == 16) {
163 sh_cmt_write(p, CMCSR, 0); 167 *rate = clk_get_rate(p->clk) / 512;
164 else 168 sh_cmt_write(p, CMCSR, 0x43);
169 } else {
170 *rate = clk_get_rate(p->clk) / 8;
165 sh_cmt_write(p, CMCSR, 0x01a4); 171 sh_cmt_write(p, CMCSR, 0x01a4);
172 }
166 173
167 sh_cmt_write(p, CMCOR, 0xffffffff); 174 sh_cmt_write(p, CMCOR, 0xffffffff);
168 sh_cmt_write(p, CMCNT, 0); 175 sh_cmt_write(p, CMCNT, 0);
@@ -376,6 +383,68 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
376 spin_unlock_irqrestore(&p->lock, flags); 383 spin_unlock_irqrestore(&p->lock, flags);
377} 384}
378 385
386static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
387{
388 return container_of(cs, struct sh_cmt_priv, cs);
389}
390
391static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
392{
393 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
394 unsigned long flags, raw;
395 unsigned long value;
396 int has_wrapped;
397
398 spin_lock_irqsave(&p->lock, flags);
399 value = p->total_cycles;
400 raw = sh_cmt_get_counter(p, &has_wrapped);
401
402 if (unlikely(has_wrapped))
403 raw += p->match_value;
404 spin_unlock_irqrestore(&p->lock, flags);
405
406 return value + raw;
407}
408
409static int sh_cmt_clocksource_enable(struct clocksource *cs)
410{
411 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
412 int ret;
413
414 p->total_cycles = 0;
415
416 ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
417 if (ret)
418 return ret;
419
420 /* TODO: calculate good shift from rate and counter bit width */
421 cs->shift = 0;
422 cs->mult = clocksource_hz2mult(p->rate, cs->shift);
423 return 0;
424}
425
426static void sh_cmt_clocksource_disable(struct clocksource *cs)
427{
428 sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
429}
430
431static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
432 char *name, unsigned long rating)
433{
434 struct clocksource *cs = &p->cs;
435
436 cs->name = name;
437 cs->rating = rating;
438 cs->read = sh_cmt_clocksource_read;
439 cs->enable = sh_cmt_clocksource_enable;
440 cs->disable = sh_cmt_clocksource_disable;
441 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
442 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
443 pr_info("sh_cmt: %s used as clock source\n", cs->name);
444 clocksource_register(cs);
445 return 0;
446}
447
379static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) 448static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
380{ 449{
381 return container_of(ced, struct sh_cmt_priv, ced); 450 return container_of(ced, struct sh_cmt_priv, ced);
@@ -468,9 +537,9 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
468 clockevents_register_device(ced); 537 clockevents_register_device(ced);
469} 538}
470 539
471int sh_cmt_register(struct sh_cmt_priv *p, char *name, 540static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
472 unsigned long clockevent_rating, 541 unsigned long clockevent_rating,
473 unsigned long clocksource_rating) 542 unsigned long clocksource_rating)
474{ 543{
475 if (p->width == (sizeof(p->max_match_value) * 8)) 544 if (p->width == (sizeof(p->max_match_value) * 8))
476 p->max_match_value = ~0; 545 p->max_match_value = ~0;
@@ -483,12 +552,15 @@ int sh_cmt_register(struct sh_cmt_priv *p, char *name,
483 if (clockevent_rating) 552 if (clockevent_rating)
484 sh_cmt_register_clockevent(p, name, clockevent_rating); 553 sh_cmt_register_clockevent(p, name, clockevent_rating);
485 554
555 if (clocksource_rating)
556 sh_cmt_register_clocksource(p, name, clocksource_rating);
557
486 return 0; 558 return 0;
487} 559}
488 560
489static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) 561static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
490{ 562{
491 struct sh_cmt_config *cfg = pdev->dev.platform_data; 563 struct sh_timer_config *cfg = pdev->dev.platform_data;
492 struct resource *res; 564 struct resource *res;
493 int irq, ret; 565 int irq, ret;
494 ret = -ENXIO; 566 ret = -ENXIO;
@@ -545,7 +617,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
545 if (resource_size(res) == 6) { 617 if (resource_size(res) == 6) {
546 p->width = 16; 618 p->width = 16;
547 p->overflow_bit = 0x80; 619 p->overflow_bit = 0x80;
548 p->clear_bits = ~0xc0; 620 p->clear_bits = ~0x80;
549 } else { 621 } else {
550 p->width = 32; 622 p->width = 32;
551 p->overflow_bit = 0x8000; 623 p->overflow_bit = 0x8000;
@@ -566,8 +638,14 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
566static int __devinit sh_cmt_probe(struct platform_device *pdev) 638static int __devinit sh_cmt_probe(struct platform_device *pdev)
567{ 639{
568 struct sh_cmt_priv *p = platform_get_drvdata(pdev); 640 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
641 struct sh_timer_config *cfg = pdev->dev.platform_data;
569 int ret; 642 int ret;
570 643
644 if (p) {
645 pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name);
646 return 0;
647 }
648
571 p = kmalloc(sizeof(*p), GFP_KERNEL); 649 p = kmalloc(sizeof(*p), GFP_KERNEL);
572 if (p == NULL) { 650 if (p == NULL) {
573 dev_err(&pdev->dev, "failed to allocate driver data\n"); 651 dev_err(&pdev->dev, "failed to allocate driver data\n");
@@ -577,7 +655,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
577 ret = sh_cmt_setup(p, pdev); 655 ret = sh_cmt_setup(p, pdev);
578 if (ret) { 656 if (ret) {
579 kfree(p); 657 kfree(p);
580
581 platform_set_drvdata(pdev, NULL); 658 platform_set_drvdata(pdev, NULL);
582 } 659 }
583 return ret; 660 return ret;
@@ -606,6 +683,7 @@ static void __exit sh_cmt_exit(void)
606 platform_driver_unregister(&sh_cmt_device_driver); 683 platform_driver_unregister(&sh_cmt_device_driver);
607} 684}
608 685
686early_platform_init("earlytimer", &sh_cmt_device_driver);
609module_init(sh_cmt_init); 687module_init(sh_cmt_init);
610module_exit(sh_cmt_exit); 688module_exit(sh_cmt_exit);
611 689
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
new file mode 100644
index 00000000000..d1ae75454d1
--- /dev/null
+++ b/drivers/clocksource/sh_mtu2.c
@@ -0,0 +1,357 @@
1/*
2 * SuperH Timer Support - MTU2
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/clk.h>
28#include <linux/irq.h>
29#include <linux/err.h>
30#include <linux/clockchips.h>
31#include <linux/sh_timer.h>
32
33struct sh_mtu2_priv {
34 void __iomem *mapbase;
35 struct clk *clk;
36 struct irqaction irqaction;
37 struct platform_device *pdev;
38 unsigned long rate;
39 unsigned long periodic;
40 struct clock_event_device ced;
41};
42
43static DEFINE_SPINLOCK(sh_mtu2_lock);
44
45#define TSTR -1 /* shared register */
46#define TCR 0 /* channel register */
47#define TMDR 1 /* channel register */
48#define TIOR 2 /* channel register */
49#define TIER 3 /* channel register */
50#define TSR 4 /* channel register */
51#define TCNT 5 /* channel register */
52#define TGR 6 /* channel register */
53
54static unsigned long mtu2_reg_offs[] = {
55 [TCR] = 0,
56 [TMDR] = 1,
57 [TIOR] = 2,
58 [TIER] = 4,
59 [TSR] = 5,
60 [TCNT] = 6,
61 [TGR] = 8,
62};
63
64static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr)
65{
66 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
67 void __iomem *base = p->mapbase;
68 unsigned long offs;
69
70 if (reg_nr == TSTR)
71 return ioread8(base + cfg->channel_offset);
72
73 offs = mtu2_reg_offs[reg_nr];
74
75 if ((reg_nr == TCNT) || (reg_nr == TGR))
76 return ioread16(base + offs);
77 else
78 return ioread8(base + offs);
79}
80
81static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr,
82 unsigned long value)
83{
84 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
85 void __iomem *base = p->mapbase;
86 unsigned long offs;
87
88 if (reg_nr == TSTR) {
89 iowrite8(value, base + cfg->channel_offset);
90 return;
91 }
92
93 offs = mtu2_reg_offs[reg_nr];
94
95 if ((reg_nr == TCNT) || (reg_nr == TGR))
96 iowrite16(value, base + offs);
97 else
98 iowrite8(value, base + offs);
99}
100
101static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
102{
103 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
104 unsigned long flags, value;
105
106 /* start stop register shared by multiple timer channels */
107 spin_lock_irqsave(&sh_mtu2_lock, flags);
108 value = sh_mtu2_read(p, TSTR);
109
110 if (start)
111 value |= 1 << cfg->timer_bit;
112 else
113 value &= ~(1 << cfg->timer_bit);
114
115 sh_mtu2_write(p, TSTR, value);
116 spin_unlock_irqrestore(&sh_mtu2_lock, flags);
117}
118
119static int sh_mtu2_enable(struct sh_mtu2_priv *p)
120{
121 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
122 int ret;
123
124 /* enable clock */
125 ret = clk_enable(p->clk);
126 if (ret) {
127 pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk);
128 return ret;
129 }
130
131 /* make sure channel is disabled */
132 sh_mtu2_start_stop_ch(p, 0);
133
134 p->rate = clk_get_rate(p->clk) / 64;
135 p->periodic = (p->rate + HZ/2) / HZ;
136
137 /* "Periodic Counter Operation" */
138 sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */
139 sh_mtu2_write(p, TIOR, 0);
140 sh_mtu2_write(p, TGR, p->periodic);
141 sh_mtu2_write(p, TCNT, 0);
142 sh_mtu2_write(p, TMDR, 0);
143 sh_mtu2_write(p, TIER, 0x01);
144
145 /* enable channel */
146 sh_mtu2_start_stop_ch(p, 1);
147
148 return 0;
149}
150
151static void sh_mtu2_disable(struct sh_mtu2_priv *p)
152{
153 /* disable channel */
154 sh_mtu2_start_stop_ch(p, 0);
155
156 /* stop clock */
157 clk_disable(p->clk);
158}
159
160static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
161{
162 struct sh_mtu2_priv *p = dev_id;
163
164 /* acknowledge interrupt */
165 sh_mtu2_read(p, TSR);
166 sh_mtu2_write(p, TSR, 0xfe);
167
168 /* notify clockevent layer */
169 p->ced.event_handler(&p->ced);
170 return IRQ_HANDLED;
171}
172
173static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced)
174{
175 return container_of(ced, struct sh_mtu2_priv, ced);
176}
177
178static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
179 struct clock_event_device *ced)
180{
181 struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced);
182 int disabled = 0;
183
184 /* deal with old setting first */
185 switch (ced->mode) {
186 case CLOCK_EVT_MODE_PERIODIC:
187 sh_mtu2_disable(p);
188 disabled = 1;
189 break;
190 default:
191 break;
192 }
193
194 switch (mode) {
195 case CLOCK_EVT_MODE_PERIODIC:
196 pr_info("sh_mtu2: %s used for periodic clock events\n",
197 ced->name);
198 sh_mtu2_enable(p);
199 break;
200 case CLOCK_EVT_MODE_UNUSED:
201 if (!disabled)
202 sh_mtu2_disable(p);
203 break;
204 case CLOCK_EVT_MODE_SHUTDOWN:
205 default:
206 break;
207 }
208}
209
210static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
211 char *name, unsigned long rating)
212{
213 struct clock_event_device *ced = &p->ced;
214 int ret;
215
216 memset(ced, 0, sizeof(*ced));
217
218 ced->name = name;
219 ced->features = CLOCK_EVT_FEAT_PERIODIC;
220 ced->rating = rating;
221 ced->cpumask = cpumask_of(0);
222 ced->set_mode = sh_mtu2_clock_event_mode;
223
224 ret = setup_irq(p->irqaction.irq, &p->irqaction);
225 if (ret) {
226 pr_err("sh_mtu2: failed to request irq %d\n",
227 p->irqaction.irq);
228 return;
229 }
230
231 pr_info("sh_mtu2: %s used for clock events\n", ced->name);
232 clockevents_register_device(ced);
233}
234
235static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
236 unsigned long clockevent_rating)
237{
238 if (clockevent_rating)
239 sh_mtu2_register_clockevent(p, name, clockevent_rating);
240
241 return 0;
242}
243
244static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
245{
246 struct sh_timer_config *cfg = pdev->dev.platform_data;
247 struct resource *res;
248 int irq, ret;
249 ret = -ENXIO;
250
251 memset(p, 0, sizeof(*p));
252 p->pdev = pdev;
253
254 if (!cfg) {
255 dev_err(&p->pdev->dev, "missing platform data\n");
256 goto err0;
257 }
258
259 platform_set_drvdata(pdev, p);
260
261 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
262 if (!res) {
263 dev_err(&p->pdev->dev, "failed to get I/O memory\n");
264 goto err0;
265 }
266
267 irq = platform_get_irq(p->pdev, 0);
268 if (irq < 0) {
269 dev_err(&p->pdev->dev, "failed to get irq\n");
270 goto err0;
271 }
272
273 /* map memory, let mapbase point to our channel */
274 p->mapbase = ioremap_nocache(res->start, resource_size(res));
275 if (p->mapbase == NULL) {
276 pr_err("sh_mtu2: failed to remap I/O memory\n");
277 goto err0;
278 }
279
280 /* setup data for setup_irq() (too early for request_irq()) */
281 p->irqaction.name = cfg->name;
282 p->irqaction.handler = sh_mtu2_interrupt;
283 p->irqaction.dev_id = p;
284 p->irqaction.irq = irq;
285 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
286 p->irqaction.mask = CPU_MASK_NONE;
287
288 /* get hold of clock */
289 p->clk = clk_get(&p->pdev->dev, cfg->clk);
290 if (IS_ERR(p->clk)) {
291 pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk);
292 ret = PTR_ERR(p->clk);
293 goto err1;
294 }
295
296 return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating);
297 err1:
298 iounmap(p->mapbase);
299 err0:
300 return ret;
301}
302
303static int __devinit sh_mtu2_probe(struct platform_device *pdev)
304{
305 struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
306 struct sh_timer_config *cfg = pdev->dev.platform_data;
307 int ret;
308
309 if (p) {
310 pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name);
311 return 0;
312 }
313
314 p = kmalloc(sizeof(*p), GFP_KERNEL);
315 if (p == NULL) {
316 dev_err(&pdev->dev, "failed to allocate driver data\n");
317 return -ENOMEM;
318 }
319
320 ret = sh_mtu2_setup(p, pdev);
321 if (ret) {
322 kfree(p);
323 platform_set_drvdata(pdev, NULL);
324 }
325 return ret;
326}
327
328static int __devexit sh_mtu2_remove(struct platform_device *pdev)
329{
330 return -EBUSY; /* cannot unregister clockevent */
331}
332
333static struct platform_driver sh_mtu2_device_driver = {
334 .probe = sh_mtu2_probe,
335 .remove = __devexit_p(sh_mtu2_remove),
336 .driver = {
337 .name = "sh_mtu2",
338 }
339};
340
341static int __init sh_mtu2_init(void)
342{
343 return platform_driver_register(&sh_mtu2_device_driver);
344}
345
346static void __exit sh_mtu2_exit(void)
347{
348 platform_driver_unregister(&sh_mtu2_device_driver);
349}
350
351early_platform_init("earlytimer", &sh_mtu2_device_driver);
352module_init(sh_mtu2_init);
353module_exit(sh_mtu2_exit);
354
355MODULE_AUTHOR("Magnus Damm");
356MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
357MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
new file mode 100644
index 00000000000..d6ea4398bf6
--- /dev/null
+++ b/drivers/clocksource/sh_tmu.c
@@ -0,0 +1,461 @@
1/*
2 * SuperH Timer Support - TMU
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/clk.h>
28#include <linux/irq.h>
29#include <linux/err.h>
30#include <linux/clocksource.h>
31#include <linux/clockchips.h>
32#include <linux/sh_timer.h>
33
34struct sh_tmu_priv {
35 void __iomem *mapbase;
36 struct clk *clk;
37 struct irqaction irqaction;
38 struct platform_device *pdev;
39 unsigned long rate;
40 unsigned long periodic;
41 struct clock_event_device ced;
42 struct clocksource cs;
43};
44
45static DEFINE_SPINLOCK(sh_tmu_lock);
46
47#define TSTR -1 /* shared register */
48#define TCOR 0 /* channel register */
49#define TCNT 1 /* channel register */
50#define TCR 2 /* channel register */
51
52static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr)
53{
54 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
55 void __iomem *base = p->mapbase;
56 unsigned long offs;
57
58 if (reg_nr == TSTR)
59 return ioread8(base - cfg->channel_offset);
60
61 offs = reg_nr << 2;
62
63 if (reg_nr == TCR)
64 return ioread16(base + offs);
65 else
66 return ioread32(base + offs);
67}
68
69static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr,
70 unsigned long value)
71{
72 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
73 void __iomem *base = p->mapbase;
74 unsigned long offs;
75
76 if (reg_nr == TSTR) {
77 iowrite8(value, base - cfg->channel_offset);
78 return;
79 }
80
81 offs = reg_nr << 2;
82
83 if (reg_nr == TCR)
84 iowrite16(value, base + offs);
85 else
86 iowrite32(value, base + offs);
87}
88
89static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
90{
91 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
92 unsigned long flags, value;
93
94 /* start stop register shared by multiple timer channels */
95 spin_lock_irqsave(&sh_tmu_lock, flags);
96 value = sh_tmu_read(p, TSTR);
97
98 if (start)
99 value |= 1 << cfg->timer_bit;
100 else
101 value &= ~(1 << cfg->timer_bit);
102
103 sh_tmu_write(p, TSTR, value);
104 spin_unlock_irqrestore(&sh_tmu_lock, flags);
105}
106
107static int sh_tmu_enable(struct sh_tmu_priv *p)
108{
109 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
110 int ret;
111
112 /* enable clock */
113 ret = clk_enable(p->clk);
114 if (ret) {
115 pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk);
116 return ret;
117 }
118
119 /* make sure channel is disabled */
120 sh_tmu_start_stop_ch(p, 0);
121
122 /* maximum timeout */
123 sh_tmu_write(p, TCOR, 0xffffffff);
124 sh_tmu_write(p, TCNT, 0xffffffff);
125
126 /* configure channel to parent clock / 4, irq off */
127 p->rate = clk_get_rate(p->clk) / 4;
128 sh_tmu_write(p, TCR, 0x0000);
129
130 /* enable channel */
131 sh_tmu_start_stop_ch(p, 1);
132
133 return 0;
134}
135
136static void sh_tmu_disable(struct sh_tmu_priv *p)
137{
138 /* disable channel */
139 sh_tmu_start_stop_ch(p, 0);
140
141 /* stop clock */
142 clk_disable(p->clk);
143}
144
145static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
146 int periodic)
147{
148 /* stop timer */
149 sh_tmu_start_stop_ch(p, 0);
150
151 /* acknowledge interrupt */
152 sh_tmu_read(p, TCR);
153
154 /* enable interrupt */
155 sh_tmu_write(p, TCR, 0x0020);
156
157 /* reload delta value in case of periodic timer */
158 if (periodic)
159 sh_tmu_write(p, TCOR, delta);
160 else
161 sh_tmu_write(p, TCOR, 0);
162
163 sh_tmu_write(p, TCNT, delta);
164
165 /* start timer */
166 sh_tmu_start_stop_ch(p, 1);
167}
168
169static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
170{
171 struct sh_tmu_priv *p = dev_id;
172
173 /* disable or acknowledge interrupt */
174 if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
175 sh_tmu_write(p, TCR, 0x0000);
176 else
177 sh_tmu_write(p, TCR, 0x0020);
178
179 /* notify clockevent layer */
180 p->ced.event_handler(&p->ced);
181 return IRQ_HANDLED;
182}
183
184static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs)
185{
186 return container_of(cs, struct sh_tmu_priv, cs);
187}
188
189static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
190{
191 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
192
193 return sh_tmu_read(p, TCNT) ^ 0xffffffff;
194}
195
196static int sh_tmu_clocksource_enable(struct clocksource *cs)
197{
198 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
199 int ret;
200
201 ret = sh_tmu_enable(p);
202 if (ret)
203 return ret;
204
205 /* TODO: calculate good shift from rate and counter bit width */
206 cs->shift = 10;
207 cs->mult = clocksource_hz2mult(p->rate, cs->shift);
208 return 0;
209}
210
211static void sh_tmu_clocksource_disable(struct clocksource *cs)
212{
213 sh_tmu_disable(cs_to_sh_tmu(cs));
214}
215
216static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
217 char *name, unsigned long rating)
218{
219 struct clocksource *cs = &p->cs;
220
221 cs->name = name;
222 cs->rating = rating;
223 cs->read = sh_tmu_clocksource_read;
224 cs->enable = sh_tmu_clocksource_enable;
225 cs->disable = sh_tmu_clocksource_disable;
226 cs->mask = CLOCKSOURCE_MASK(32);
227 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
228 pr_info("sh_tmu: %s used as clock source\n", cs->name);
229 clocksource_register(cs);
230 return 0;
231}
232
233static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced)
234{
235 return container_of(ced, struct sh_tmu_priv, ced);
236}
237
238static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
239{
240 struct clock_event_device *ced = &p->ced;
241
242 sh_tmu_enable(p);
243
244 /* TODO: calculate good shift from rate and counter bit width */
245
246 ced->shift = 32;
247 ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
248 ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
249 ced->min_delta_ns = 5000;
250
251 if (periodic) {
252 p->periodic = (p->rate + HZ/2) / HZ;
253 sh_tmu_set_next(p, p->periodic, 1);
254 }
255}
256
257static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
258 struct clock_event_device *ced)
259{
260 struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
261 int disabled = 0;
262
263 /* deal with old setting first */
264 switch (ced->mode) {
265 case CLOCK_EVT_MODE_PERIODIC:
266 case CLOCK_EVT_MODE_ONESHOT:
267 sh_tmu_disable(p);
268 disabled = 1;
269 break;
270 default:
271 break;
272 }
273
274 switch (mode) {
275 case CLOCK_EVT_MODE_PERIODIC:
276 pr_info("sh_tmu: %s used for periodic clock events\n",
277 ced->name);
278 sh_tmu_clock_event_start(p, 1);
279 break;
280 case CLOCK_EVT_MODE_ONESHOT:
281 pr_info("sh_tmu: %s used for oneshot clock events\n",
282 ced->name);
283 sh_tmu_clock_event_start(p, 0);
284 break;
285 case CLOCK_EVT_MODE_UNUSED:
286 if (!disabled)
287 sh_tmu_disable(p);
288 break;
289 case CLOCK_EVT_MODE_SHUTDOWN:
290 default:
291 break;
292 }
293}
294
295static int sh_tmu_clock_event_next(unsigned long delta,
296 struct clock_event_device *ced)
297{
298 struct sh_tmu_priv *p = ced_to_sh_tmu(ced);
299
300 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
301
302 /* program new delta value */
303 sh_tmu_set_next(p, delta, 0);
304 return 0;
305}
306
307static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
308 char *name, unsigned long rating)
309{
310 struct clock_event_device *ced = &p->ced;
311 int ret;
312
313 memset(ced, 0, sizeof(*ced));
314
315 ced->name = name;
316 ced->features = CLOCK_EVT_FEAT_PERIODIC;
317 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
318 ced->rating = rating;
319 ced->cpumask = cpumask_of(0);
320 ced->set_next_event = sh_tmu_clock_event_next;
321 ced->set_mode = sh_tmu_clock_event_mode;
322
323 ret = setup_irq(p->irqaction.irq, &p->irqaction);
324 if (ret) {
325 pr_err("sh_tmu: failed to request irq %d\n",
326 p->irqaction.irq);
327 return;
328 }
329
330 pr_info("sh_tmu: %s used for clock events\n", ced->name);
331 clockevents_register_device(ced);
332}
333
334static int sh_tmu_register(struct sh_tmu_priv *p, char *name,
335 unsigned long clockevent_rating,
336 unsigned long clocksource_rating)
337{
338 if (clockevent_rating)
339 sh_tmu_register_clockevent(p, name, clockevent_rating);
340 else if (clocksource_rating)
341 sh_tmu_register_clocksource(p, name, clocksource_rating);
342
343 return 0;
344}
345
346static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
347{
348 struct sh_timer_config *cfg = pdev->dev.platform_data;
349 struct resource *res;
350 int irq, ret;
351 ret = -ENXIO;
352
353 memset(p, 0, sizeof(*p));
354 p->pdev = pdev;
355
356 if (!cfg) {
357 dev_err(&p->pdev->dev, "missing platform data\n");
358 goto err0;
359 }
360
361 platform_set_drvdata(pdev, p);
362
363 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
364 if (!res) {
365 dev_err(&p->pdev->dev, "failed to get I/O memory\n");
366 goto err0;
367 }
368
369 irq = platform_get_irq(p->pdev, 0);
370 if (irq < 0) {
371 dev_err(&p->pdev->dev, "failed to get irq\n");
372 goto err0;
373 }
374
375 /* map memory, let mapbase point to our channel */
376 p->mapbase = ioremap_nocache(res->start, resource_size(res));
377 if (p->mapbase == NULL) {
378 pr_err("sh_tmu: failed to remap I/O memory\n");
379 goto err0;
380 }
381
382 /* setup data for setup_irq() (too early for request_irq()) */
383 p->irqaction.name = cfg->name;
384 p->irqaction.handler = sh_tmu_interrupt;
385 p->irqaction.dev_id = p;
386 p->irqaction.irq = irq;
387 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
388 p->irqaction.mask = CPU_MASK_NONE;
389
390 /* get hold of clock */
391 p->clk = clk_get(&p->pdev->dev, cfg->clk);
392 if (IS_ERR(p->clk)) {
393 pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk);
394 ret = PTR_ERR(p->clk);
395 goto err1;
396 }
397
398 return sh_tmu_register(p, cfg->name,
399 cfg->clockevent_rating,
400 cfg->clocksource_rating);
401 err1:
402 iounmap(p->mapbase);
403 err0:
404 return ret;
405}
406
407static int __devinit sh_tmu_probe(struct platform_device *pdev)
408{
409 struct sh_tmu_priv *p = platform_get_drvdata(pdev);
410 struct sh_timer_config *cfg = pdev->dev.platform_data;
411 int ret;
412
413 if (p) {
414 pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name);
415 return 0;
416 }
417
418 p = kmalloc(sizeof(*p), GFP_KERNEL);
419 if (p == NULL) {
420 dev_err(&pdev->dev, "failed to allocate driver data\n");
421 return -ENOMEM;
422 }
423
424 ret = sh_tmu_setup(p, pdev);
425 if (ret) {
426 kfree(p);
427 platform_set_drvdata(pdev, NULL);
428 }
429 return ret;
430}
431
432static int __devexit sh_tmu_remove(struct platform_device *pdev)
433{
434 return -EBUSY; /* cannot unregister clockevent and clocksource */
435}
436
437static struct platform_driver sh_tmu_device_driver = {
438 .probe = sh_tmu_probe,
439 .remove = __devexit_p(sh_tmu_remove),
440 .driver = {
441 .name = "sh_tmu",
442 }
443};
444
445static int __init sh_tmu_init(void)
446{
447 return platform_driver_register(&sh_tmu_device_driver);
448}
449
450static void __exit sh_tmu_exit(void)
451{
452 platform_driver_unregister(&sh_tmu_device_driver);
453}
454
455early_platform_init("earlytimer", &sh_tmu_device_driver);
456module_init(sh_tmu_init);
457module_exit(sh_tmu_exit);
458
459MODULE_AUTHOR("Magnus Damm");
460MODULE_DESCRIPTION("SuperH TMU Timer Driver");
461MODULE_LICENSE("GPL v2");
diff --git a/drivers/connector/Kconfig b/drivers/connector/Kconfig
index 100bfd42206..6e6730f9dfd 100644
--- a/drivers/connector/Kconfig
+++ b/drivers/connector/Kconfig
@@ -7,7 +7,7 @@ menuconfig CONNECTOR
7 of the netlink socket protocol. 7 of the netlink socket protocol.
8 8
9 Connector support can also be built as a module. If so, the module 9 Connector support can also be built as a module. If so, the module
10 will be called cn.ko. 10 will be called cn.
11 11
12if CONNECTOR 12if CONNECTOR
13 13
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d270e8eb3e6..6e2ec0b1894 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -808,7 +808,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
808 ret = -ENOMEM; 808 ret = -ENOMEM;
809 goto nomem_out; 809 goto nomem_out;
810 } 810 }
811 if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { 811 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
812 free_cpumask_var(policy->cpus); 812 free_cpumask_var(policy->cpus);
813 kfree(policy); 813 kfree(policy);
814 ret = -ENOMEM; 814 ret = -ENOMEM;
@@ -1070,11 +1070,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1070 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1070 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1071#endif 1071#endif
1072 1072
1073 unlock_policy_rwsem_write(cpu);
1074
1073 if (cpufreq_driver->target) 1075 if (cpufreq_driver->target)
1074 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1076 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1075 1077
1076 unlock_policy_rwsem_write(cpu);
1077
1078 kobject_put(&data->kobj); 1078 kobject_put(&data->kobj);
1079 1079
1080 /* we need to make sure that the underlying kobj is actually 1080 /* we need to make sure that the underlying kobj is actually
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 2ecd95e4ab1..7a74d175287 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -91,6 +91,9 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
91 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 91 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
92 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 92 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
93 * is recursive for the same process. -Venki 93 * is recursive for the same process. -Venki
94 * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
95 * would deadlock with cancel_delayed_work_sync(), which is needed for proper
96 * raceless workqueue teardown.
94 */ 97 */
95static DEFINE_MUTEX(dbs_mutex); 98static DEFINE_MUTEX(dbs_mutex);
96 99
@@ -542,7 +545,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
542static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 545static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
543{ 546{
544 dbs_info->enable = 0; 547 dbs_info->enable = 0;
545 cancel_delayed_work(&dbs_info->work); 548 cancel_delayed_work_sync(&dbs_info->work);
546} 549}
547 550
548static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 551static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 338f428a15b..e741c339df7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -98,6 +98,9 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
98 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 98 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
99 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 99 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
100 * is recursive for the same process. -Venki 100 * is recursive for the same process. -Venki
101 * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
102 * would deadlock with cancel_delayed_work_sync(), which is needed for proper
103 * raceless workqueue teardown.
101 */ 104 */
102static DEFINE_MUTEX(dbs_mutex); 105static DEFINE_MUTEX(dbs_mutex);
103 106
@@ -562,7 +565,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
562static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 565static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
563{ 566{
564 dbs_info->enable = 0; 567 dbs_info->enable = 0;
565 cancel_delayed_work(&dbs_info->work); 568 cancel_delayed_work_sync(&dbs_info->work);
566} 569}
567 570
568static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 571static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 01afd758072..5b27692372b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -12,7 +12,7 @@ if CRYPTO_HW
12 12
13config CRYPTO_DEV_PADLOCK 13config CRYPTO_DEV_PADLOCK
14 tristate "Support for VIA PadLock ACE" 14 tristate "Support for VIA PadLock ACE"
15 depends on X86_32 && !UML 15 depends on X86 && !UML
16 select CRYPTO_ALGAPI 16 select CRYPTO_ALGAPI
17 help 17 help
18 Some VIA processors come with an integrated crypto engine 18 Some VIA processors come with an integrated crypto engine
@@ -34,7 +34,7 @@ config CRYPTO_DEV_PADLOCK_AES
34 Available in VIA C3 and newer CPUs. 34 Available in VIA C3 and newer CPUs.
35 35
36 If unsure say M. The compiled module will be 36 If unsure say M. The compiled module will be
37 called padlock-aes.ko 37 called padlock-aes.
38 38
39config CRYPTO_DEV_PADLOCK_SHA 39config CRYPTO_DEV_PADLOCK_SHA
40 tristate "PadLock driver for SHA1 and SHA256 algorithms" 40 tristate "PadLock driver for SHA1 and SHA256 algorithms"
@@ -47,7 +47,7 @@ config CRYPTO_DEV_PADLOCK_SHA
47 Available in VIA C7 and newer processors. 47 Available in VIA C7 and newer processors.
48 48
49 If unsure say M. The compiled module will be 49 If unsure say M. The compiled module will be
50 called padlock-sha.ko 50 called padlock-sha.
51 51
52config CRYPTO_DEV_GEODE 52config CRYPTO_DEV_GEODE
53 tristate "Support for the Geode LX AES engine" 53 tristate "Support for the Geode LX AES engine"
@@ -79,7 +79,7 @@ config ZCRYPT_MONOLITHIC
79 bool "Monolithic zcrypt module" 79 bool "Monolithic zcrypt module"
80 depends on ZCRYPT="m" 80 depends on ZCRYPT="m"
81 help 81 help
82 Select this option if you want to have a single module z90crypt.ko 82 Select this option if you want to have a single module z90crypt,
83 that contains all parts of the crypto device driver (ap bus, 83 that contains all parts of the crypto device driver (ap bus,
84 request router and all the card drivers). 84 request router and all the card drivers).
85 85
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 2bef086fb34..5f753fc0873 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2564,7 +2564,7 @@ static void hifn_tasklet_callback(unsigned long data)
2564 hifn_process_queue(dev); 2564 hifn_process_queue(dev);
2565} 2565}
2566 2566
2567static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2567static int __devinit hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2568{ 2568{
2569 int err, i; 2569 int err, i;
2570 struct hifn_device *dev; 2570 struct hifn_device *dev;
@@ -2696,7 +2696,7 @@ err_out_disable_pci_device:
2696 return err; 2696 return err;
2697} 2697}
2698 2698
2699static void hifn_remove(struct pci_dev *pdev) 2699static void __devexit hifn_remove(struct pci_dev *pdev)
2700{ 2700{
2701 int i; 2701 int i;
2702 struct hifn_device *dev; 2702 struct hifn_device *dev;
@@ -2744,7 +2744,7 @@ static struct pci_driver hifn_pci_driver = {
2744 .remove = __devexit_p(hifn_remove), 2744 .remove = __devexit_p(hifn_remove),
2745}; 2745};
2746 2746
2747static int __devinit hifn_init(void) 2747static int __init hifn_init(void)
2748{ 2748{
2749 unsigned int freq; 2749 unsigned int freq;
2750 int err; 2750 int err;
@@ -2789,7 +2789,7 @@ static int __devinit hifn_init(void)
2789 return 0; 2789 return 0;
2790} 2790}
2791 2791
2792static void __devexit hifn_fini(void) 2792static void __exit hifn_fini(void)
2793{ 2793{
2794 pci_unregister_driver(&hifn_pci_driver); 2794 pci_unregister_driver(&hifn_pci_driver);
2795 2795
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index f9f05d7a707..6c6656d3b1e 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -415,6 +415,7 @@ static void crypto_done_action(unsigned long arg)
415static int init_ixp_crypto(void) 415static int init_ixp_crypto(void)
416{ 416{
417 int ret = -ENODEV; 417 int ret = -ENODEV;
418 u32 msg[2] = { 0, 0 };
418 419
419 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH | 420 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
420 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) { 421 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
@@ -426,9 +427,35 @@ static int init_ixp_crypto(void)
426 return ret; 427 return ret;
427 428
428 if (!npe_running(npe_c)) { 429 if (!npe_running(npe_c)) {
429 npe_load_firmware(npe_c, npe_name(npe_c), dev); 430 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
431 if (ret) {
432 return ret;
433 }
434 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
435 goto npe_error;
436 } else {
437 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
438 goto npe_error;
439
440 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
441 goto npe_error;
430 } 442 }
431 443
444 switch ((msg[1]>>16) & 0xff) {
445 case 3:
446 printk(KERN_WARNING "Firmware of %s lacks AES support\n",
447 npe_name(npe_c));
448 support_aes = 0;
449 break;
450 case 4:
451 case 5:
452 support_aes = 1;
453 break;
454 default:
455 printk(KERN_ERR "Firmware of %s lacks crypto support\n",
456 npe_name(npe_c));
457 return -ENODEV;
458 }
432 /* buffer_pool will also be used to sometimes store the hmac, 459 /* buffer_pool will also be used to sometimes store the hmac,
433 * so assure it is large enough 460 * so assure it is large enough
434 */ 461 */
@@ -459,6 +486,10 @@ static int init_ixp_crypto(void)
459 486
460 qmgr_enable_irq(RECV_QID); 487 qmgr_enable_irq(RECV_QID);
461 return 0; 488 return 0;
489
490npe_error:
491 printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
492 ret = -EIO;
462err: 493err:
463 if (ctx_pool) 494 if (ctx_pool)
464 dma_pool_destroy(ctx_pool); 495 dma_pool_destroy(ctx_pool);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 3f0fdd18255..87f92c39b5f 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -154,7 +154,11 @@ static inline void padlock_reset_key(struct cword *cword)
154 int cpu = raw_smp_processor_id(); 154 int cpu = raw_smp_processor_id();
155 155
156 if (cword != per_cpu(last_cword, cpu)) 156 if (cword != per_cpu(last_cword, cpu))
157#ifndef CONFIG_X86_64
157 asm volatile ("pushfl; popfl"); 158 asm volatile ("pushfl; popfl");
159#else
160 asm volatile ("pushfq; popfq");
161#endif
158} 162}
159 163
160static inline void padlock_store_cword(struct cword *cword) 164static inline void padlock_store_cword(struct cword *cword)
@@ -208,10 +212,19 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
208 212
209 asm volatile ("test $1, %%cl;" 213 asm volatile ("test $1, %%cl;"
210 "je 1f;" 214 "je 1f;"
215#ifndef CONFIG_X86_64
211 "lea -1(%%ecx), %%eax;" 216 "lea -1(%%ecx), %%eax;"
212 "mov $1, %%ecx;" 217 "mov $1, %%ecx;"
218#else
219 "lea -1(%%rcx), %%rax;"
220 "mov $1, %%rcx;"
221#endif
213 ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ 222 ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
223#ifndef CONFIG_X86_64
214 "mov %%eax, %%ecx;" 224 "mov %%eax, %%ecx;"
225#else
226 "mov %%rax, %%rcx;"
227#endif
215 "1:" 228 "1:"
216 ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 229 ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
217 : "+S"(input), "+D"(output) 230 : "+S"(input), "+D"(output)
@@ -489,4 +502,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
489MODULE_LICENSE("GPL"); 502MODULE_LICENSE("GPL");
490MODULE_AUTHOR("Michal Ludvig"); 503MODULE_AUTHOR("Michal Ludvig");
491 504
492MODULE_ALIAS("aes-all"); 505MODULE_ALIAS("aes");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index a3918c16b3d..c70775fd3ce 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -44,6 +44,8 @@
44#include <crypto/sha.h> 44#include <crypto/sha.h>
45#include <crypto/aead.h> 45#include <crypto/aead.h>
46#include <crypto/authenc.h> 46#include <crypto/authenc.h>
47#include <crypto/skcipher.h>
48#include <crypto/scatterwalk.h>
47 49
48#include "talitos.h" 50#include "talitos.h"
49 51
@@ -339,7 +341,8 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
339 status = error; 341 status = error;
340 342
341 dma_unmap_single(dev, request->dma_desc, 343 dma_unmap_single(dev, request->dma_desc,
342 sizeof(struct talitos_desc), DMA_BIDIRECTIONAL); 344 sizeof(struct talitos_desc),
345 DMA_BIDIRECTIONAL);
343 346
344 /* copy entries so we can call callback outside lock */ 347 /* copy entries so we can call callback outside lock */
345 saved_req.desc = request->desc; 348 saved_req.desc = request->desc;
@@ -413,7 +416,8 @@ static struct talitos_desc *current_desc(struct device *dev, int ch)
413/* 416/*
414 * user diagnostics; report root cause of error based on execution unit status 417 * user diagnostics; report root cause of error based on execution unit status
415 */ 418 */
416static void report_eu_error(struct device *dev, int ch, struct talitos_desc *desc) 419static void report_eu_error(struct device *dev, int ch,
420 struct talitos_desc *desc)
417{ 421{
418 struct talitos_private *priv = dev_get_drvdata(dev); 422 struct talitos_private *priv = dev_get_drvdata(dev);
419 int i; 423 int i;
@@ -684,8 +688,8 @@ struct talitos_ctx {
684 unsigned int authsize; 688 unsigned int authsize;
685}; 689};
686 690
687static int aead_authenc_setauthsize(struct crypto_aead *authenc, 691static int aead_setauthsize(struct crypto_aead *authenc,
688 unsigned int authsize) 692 unsigned int authsize)
689{ 693{
690 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 694 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
691 695
@@ -694,8 +698,8 @@ static int aead_authenc_setauthsize(struct crypto_aead *authenc,
694 return 0; 698 return 0;
695} 699}
696 700
697static int aead_authenc_setkey(struct crypto_aead *authenc, 701static int aead_setkey(struct crypto_aead *authenc,
698 const u8 *key, unsigned int keylen) 702 const u8 *key, unsigned int keylen)
699{ 703{
700 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 704 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
701 struct rtattr *rta = (void *)key; 705 struct rtattr *rta = (void *)key;
@@ -740,7 +744,7 @@ badkey:
740} 744}
741 745
742/* 746/*
743 * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor 747 * talitos_edesc - s/w-extended descriptor
744 * @src_nents: number of segments in input scatterlist 748 * @src_nents: number of segments in input scatterlist
745 * @dst_nents: number of segments in output scatterlist 749 * @dst_nents: number of segments in output scatterlist
746 * @dma_len: length of dma mapped link_tbl space 750 * @dma_len: length of dma mapped link_tbl space
@@ -752,17 +756,67 @@ badkey:
752 * is greater than 1, an integrity check value is concatenated to the end 756 * is greater than 1, an integrity check value is concatenated to the end
753 * of link_tbl data 757 * of link_tbl data
754 */ 758 */
755struct ipsec_esp_edesc { 759struct talitos_edesc {
756 int src_nents; 760 int src_nents;
757 int dst_nents; 761 int dst_nents;
762 int src_is_chained;
763 int dst_is_chained;
758 int dma_len; 764 int dma_len;
759 dma_addr_t dma_link_tbl; 765 dma_addr_t dma_link_tbl;
760 struct talitos_desc desc; 766 struct talitos_desc desc;
761 struct talitos_ptr link_tbl[0]; 767 struct talitos_ptr link_tbl[0];
762}; 768};
763 769
770static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
771 unsigned int nents, enum dma_data_direction dir,
772 int chained)
773{
774 if (unlikely(chained))
775 while (sg) {
776 dma_map_sg(dev, sg, 1, dir);
777 sg = scatterwalk_sg_next(sg);
778 }
779 else
780 dma_map_sg(dev, sg, nents, dir);
781 return nents;
782}
783
784static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
785 enum dma_data_direction dir)
786{
787 while (sg) {
788 dma_unmap_sg(dev, sg, 1, dir);
789 sg = scatterwalk_sg_next(sg);
790 }
791}
792
793static void talitos_sg_unmap(struct device *dev,
794 struct talitos_edesc *edesc,
795 struct scatterlist *src,
796 struct scatterlist *dst)
797{
798 unsigned int src_nents = edesc->src_nents ? : 1;
799 unsigned int dst_nents = edesc->dst_nents ? : 1;
800
801 if (src != dst) {
802 if (edesc->src_is_chained)
803 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
804 else
805 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
806
807 if (edesc->dst_is_chained)
808 talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE);
809 else
810 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
811 } else
812 if (edesc->src_is_chained)
813 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
814 else
815 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
816}
817
764static void ipsec_esp_unmap(struct device *dev, 818static void ipsec_esp_unmap(struct device *dev,
765 struct ipsec_esp_edesc *edesc, 819 struct talitos_edesc *edesc,
766 struct aead_request *areq) 820 struct aead_request *areq)
767{ 821{
768 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); 822 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
@@ -772,15 +826,7 @@ static void ipsec_esp_unmap(struct device *dev,
772 826
773 dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE); 827 dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
774 828
775 if (areq->src != areq->dst) { 829 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
776 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
777 DMA_TO_DEVICE);
778 dma_unmap_sg(dev, areq->dst, edesc->dst_nents ? : 1,
779 DMA_FROM_DEVICE);
780 } else {
781 dma_unmap_sg(dev, areq->src, edesc->src_nents ? : 1,
782 DMA_BIDIRECTIONAL);
783 }
784 830
785 if (edesc->dma_len) 831 if (edesc->dma_len)
786 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 832 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
@@ -795,13 +841,14 @@ static void ipsec_esp_encrypt_done(struct device *dev,
795 int err) 841 int err)
796{ 842{
797 struct aead_request *areq = context; 843 struct aead_request *areq = context;
798 struct ipsec_esp_edesc *edesc =
799 container_of(desc, struct ipsec_esp_edesc, desc);
800 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 844 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
801 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 845 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
846 struct talitos_edesc *edesc;
802 struct scatterlist *sg; 847 struct scatterlist *sg;
803 void *icvdata; 848 void *icvdata;
804 849
850 edesc = container_of(desc, struct talitos_edesc, desc);
851
805 ipsec_esp_unmap(dev, edesc, areq); 852 ipsec_esp_unmap(dev, edesc, areq);
806 853
807 /* copy the generated ICV to dst */ 854 /* copy the generated ICV to dst */
@@ -819,17 +866,18 @@ static void ipsec_esp_encrypt_done(struct device *dev,
819} 866}
820 867
821static void ipsec_esp_decrypt_swauth_done(struct device *dev, 868static void ipsec_esp_decrypt_swauth_done(struct device *dev,
822 struct talitos_desc *desc, void *context, 869 struct talitos_desc *desc,
823 int err) 870 void *context, int err)
824{ 871{
825 struct aead_request *req = context; 872 struct aead_request *req = context;
826 struct ipsec_esp_edesc *edesc =
827 container_of(desc, struct ipsec_esp_edesc, desc);
828 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 873 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
829 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 874 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
875 struct talitos_edesc *edesc;
830 struct scatterlist *sg; 876 struct scatterlist *sg;
831 void *icvdata; 877 void *icvdata;
832 878
879 edesc = container_of(desc, struct talitos_edesc, desc);
880
833 ipsec_esp_unmap(dev, edesc, req); 881 ipsec_esp_unmap(dev, edesc, req);
834 882
835 if (!err) { 883 if (!err) {
@@ -851,20 +899,20 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
851} 899}
852 900
853static void ipsec_esp_decrypt_hwauth_done(struct device *dev, 901static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
854 struct talitos_desc *desc, void *context, 902 struct talitos_desc *desc,
855 int err) 903 void *context, int err)
856{ 904{
857 struct aead_request *req = context; 905 struct aead_request *req = context;
858 struct ipsec_esp_edesc *edesc = 906 struct talitos_edesc *edesc;
859 container_of(desc, struct ipsec_esp_edesc, desc); 907
908 edesc = container_of(desc, struct talitos_edesc, desc);
860 909
861 ipsec_esp_unmap(dev, edesc, req); 910 ipsec_esp_unmap(dev, edesc, req);
862 911
863 /* check ICV auth status */ 912 /* check ICV auth status */
864 if (!err) 913 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
865 if ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != 914 DESC_HDR_LO_ICCR1_PASS))
866 DESC_HDR_LO_ICCR1_PASS) 915 err = -EBADMSG;
867 err = -EBADMSG;
868 916
869 kfree(edesc); 917 kfree(edesc);
870 918
@@ -886,7 +934,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
886 link_tbl_ptr->j_extent = 0; 934 link_tbl_ptr->j_extent = 0;
887 link_tbl_ptr++; 935 link_tbl_ptr++;
888 cryptlen -= sg_dma_len(sg); 936 cryptlen -= sg_dma_len(sg);
889 sg = sg_next(sg); 937 sg = scatterwalk_sg_next(sg);
890 } 938 }
891 939
892 /* adjust (decrease) last one (or two) entry's len to cryptlen */ 940 /* adjust (decrease) last one (or two) entry's len to cryptlen */
@@ -910,7 +958,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
910/* 958/*
911 * fill in and submit ipsec_esp descriptor 959 * fill in and submit ipsec_esp descriptor
912 */ 960 */
913static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, 961static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
914 u8 *giv, u64 seq, 962 u8 *giv, u64 seq,
915 void (*callback) (struct device *dev, 963 void (*callback) (struct device *dev,
916 struct talitos_desc *desc, 964 struct talitos_desc *desc,
@@ -952,32 +1000,31 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
952 desc->ptr[4].len = cpu_to_be16(cryptlen); 1000 desc->ptr[4].len = cpu_to_be16(cryptlen);
953 desc->ptr[4].j_extent = authsize; 1001 desc->ptr[4].j_extent = authsize;
954 1002
955 if (areq->src == areq->dst) 1003 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
956 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1, 1004 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
957 DMA_BIDIRECTIONAL); 1005 : DMA_TO_DEVICE,
958 else 1006 edesc->src_is_chained);
959 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ? : 1,
960 DMA_TO_DEVICE);
961 1007
962 if (sg_count == 1) { 1008 if (sg_count == 1) {
963 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1009 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
964 } else { 1010 } else {
965 sg_link_tbl_len = cryptlen; 1011 sg_link_tbl_len = cryptlen;
966 1012
967 if ((edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) && 1013 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
968 (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
969 sg_link_tbl_len = cryptlen + authsize; 1014 sg_link_tbl_len = cryptlen + authsize;
970 } 1015
971 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, 1016 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
972 &edesc->link_tbl[0]); 1017 &edesc->link_tbl[0]);
973 if (sg_count > 1) { 1018 if (sg_count > 1) {
974 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1019 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
975 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); 1020 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
976 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1021 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
977 edesc->dma_len, DMA_BIDIRECTIONAL); 1022 edesc->dma_len,
1023 DMA_BIDIRECTIONAL);
978 } else { 1024 } else {
979 /* Only one segment now, so no link tbl needed */ 1025 /* Only one segment now, so no link tbl needed */
980 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1026 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->
1027 src));
981 } 1028 }
982 } 1029 }
983 1030
@@ -985,10 +1032,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
985 desc->ptr[5].len = cpu_to_be16(cryptlen); 1032 desc->ptr[5].len = cpu_to_be16(cryptlen);
986 desc->ptr[5].j_extent = authsize; 1033 desc->ptr[5].j_extent = authsize;
987 1034
988 if (areq->src != areq->dst) { 1035 if (areq->src != areq->dst)
989 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, 1036 sg_count = talitos_map_sg(dev, areq->dst,
990 DMA_FROM_DEVICE); 1037 edesc->dst_nents ? : 1,
991 } 1038 DMA_FROM_DEVICE,
1039 edesc->dst_is_chained);
992 1040
993 if (sg_count == 1) { 1041 if (sg_count == 1) {
994 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1042 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
@@ -1033,49 +1081,55 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
1033 return ret; 1081 return ret;
1034} 1082}
1035 1083
1036
1037/* 1084/*
1038 * derive number of elements in scatterlist 1085 * derive number of elements in scatterlist
1039 */ 1086 */
1040static int sg_count(struct scatterlist *sg_list, int nbytes) 1087static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
1041{ 1088{
1042 struct scatterlist *sg = sg_list; 1089 struct scatterlist *sg = sg_list;
1043 int sg_nents = 0; 1090 int sg_nents = 0;
1044 1091
1045 while (nbytes) { 1092 *chained = 0;
1093 while (nbytes > 0) {
1046 sg_nents++; 1094 sg_nents++;
1047 nbytes -= sg->length; 1095 nbytes -= sg->length;
1048 sg = sg_next(sg); 1096 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1097 *chained = 1;
1098 sg = scatterwalk_sg_next(sg);
1049 } 1099 }
1050 1100
1051 return sg_nents; 1101 return sg_nents;
1052} 1102}
1053 1103
1054/* 1104/*
1055 * allocate and map the ipsec_esp extended descriptor 1105 * allocate and map the extended descriptor
1056 */ 1106 */
1057static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, 1107static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1058 int icv_stashing) 1108 struct scatterlist *src,
1109 struct scatterlist *dst,
1110 unsigned int cryptlen,
1111 unsigned int authsize,
1112 int icv_stashing,
1113 u32 cryptoflags)
1059{ 1114{
1060 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1115 struct talitos_edesc *edesc;
1061 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1062 struct ipsec_esp_edesc *edesc;
1063 int src_nents, dst_nents, alloc_len, dma_len; 1116 int src_nents, dst_nents, alloc_len, dma_len;
1064 gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1117 int src_chained, dst_chained = 0;
1118 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1065 GFP_ATOMIC; 1119 GFP_ATOMIC;
1066 1120
1067 if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) { 1121 if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1068 dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n"); 1122 dev_err(dev, "length exceeds h/w max limit\n");
1069 return ERR_PTR(-EINVAL); 1123 return ERR_PTR(-EINVAL);
1070 } 1124 }
1071 1125
1072 src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize); 1126 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1073 src_nents = (src_nents == 1) ? 0 : src_nents; 1127 src_nents = (src_nents == 1) ? 0 : src_nents;
1074 1128
1075 if (areq->dst == areq->src) { 1129 if (dst == src) {
1076 dst_nents = src_nents; 1130 dst_nents = src_nents;
1077 } else { 1131 } else {
1078 dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize); 1132 dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained);
1079 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1133 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1080 } 1134 }
1081 1135
@@ -1084,39 +1138,52 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1084 * allowing for two separate entries for ICV and generated ICV (+ 2), 1138 * allowing for two separate entries for ICV and generated ICV (+ 2),
1085 * and the ICV data itself 1139 * and the ICV data itself
1086 */ 1140 */
1087 alloc_len = sizeof(struct ipsec_esp_edesc); 1141 alloc_len = sizeof(struct talitos_edesc);
1088 if (src_nents || dst_nents) { 1142 if (src_nents || dst_nents) {
1089 dma_len = (src_nents + dst_nents + 2) * 1143 dma_len = (src_nents + dst_nents + 2) *
1090 sizeof(struct talitos_ptr) + ctx->authsize; 1144 sizeof(struct talitos_ptr) + authsize;
1091 alloc_len += dma_len; 1145 alloc_len += dma_len;
1092 } else { 1146 } else {
1093 dma_len = 0; 1147 dma_len = 0;
1094 alloc_len += icv_stashing ? ctx->authsize : 0; 1148 alloc_len += icv_stashing ? authsize : 0;
1095 } 1149 }
1096 1150
1097 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1151 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1098 if (!edesc) { 1152 if (!edesc) {
1099 dev_err(ctx->dev, "could not allocate edescriptor\n"); 1153 dev_err(dev, "could not allocate edescriptor\n");
1100 return ERR_PTR(-ENOMEM); 1154 return ERR_PTR(-ENOMEM);
1101 } 1155 }
1102 1156
1103 edesc->src_nents = src_nents; 1157 edesc->src_nents = src_nents;
1104 edesc->dst_nents = dst_nents; 1158 edesc->dst_nents = dst_nents;
1159 edesc->src_is_chained = src_chained;
1160 edesc->dst_is_chained = dst_chained;
1105 edesc->dma_len = dma_len; 1161 edesc->dma_len = dma_len;
1106 edesc->dma_link_tbl = dma_map_single(ctx->dev, &edesc->link_tbl[0], 1162 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1107 edesc->dma_len, DMA_BIDIRECTIONAL); 1163 edesc->dma_len, DMA_BIDIRECTIONAL);
1108 1164
1109 return edesc; 1165 return edesc;
1110} 1166}
1111 1167
1112static int aead_authenc_encrypt(struct aead_request *req) 1168static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1169 int icv_stashing)
1170{
1171 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1172 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1173
1174 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1175 areq->cryptlen, ctx->authsize, icv_stashing,
1176 areq->base.flags);
1177}
1178
1179static int aead_encrypt(struct aead_request *req)
1113{ 1180{
1114 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1181 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1115 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1182 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1116 struct ipsec_esp_edesc *edesc; 1183 struct talitos_edesc *edesc;
1117 1184
1118 /* allocate extended descriptor */ 1185 /* allocate extended descriptor */
1119 edesc = ipsec_esp_edesc_alloc(req, 0); 1186 edesc = aead_edesc_alloc(req, 0);
1120 if (IS_ERR(edesc)) 1187 if (IS_ERR(edesc))
1121 return PTR_ERR(edesc); 1188 return PTR_ERR(edesc);
1122 1189
@@ -1126,70 +1193,67 @@ static int aead_authenc_encrypt(struct aead_request *req)
1126 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done); 1193 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1127} 1194}
1128 1195
1129 1196static int aead_decrypt(struct aead_request *req)
1130
1131static int aead_authenc_decrypt(struct aead_request *req)
1132{ 1197{
1133 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1198 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1134 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1199 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1135 unsigned int authsize = ctx->authsize; 1200 unsigned int authsize = ctx->authsize;
1136 struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1201 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1137 struct ipsec_esp_edesc *edesc; 1202 struct talitos_edesc *edesc;
1138 struct scatterlist *sg; 1203 struct scatterlist *sg;
1139 void *icvdata; 1204 void *icvdata;
1140 1205
1141 req->cryptlen -= authsize; 1206 req->cryptlen -= authsize;
1142 1207
1143 /* allocate extended descriptor */ 1208 /* allocate extended descriptor */
1144 edesc = ipsec_esp_edesc_alloc(req, 1); 1209 edesc = aead_edesc_alloc(req, 1);
1145 if (IS_ERR(edesc)) 1210 if (IS_ERR(edesc))
1146 return PTR_ERR(edesc); 1211 return PTR_ERR(edesc);
1147 1212
1148 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && 1213 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1149 (((!edesc->src_nents && !edesc->dst_nents) || 1214 ((!edesc->src_nents && !edesc->dst_nents) ||
1150 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT))) { 1215 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1151 1216
1152 /* decrypt and check the ICV */ 1217 /* decrypt and check the ICV */
1153 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND | 1218 edesc->desc.hdr = ctx->desc_hdr_template |
1219 DESC_HDR_DIR_INBOUND |
1154 DESC_HDR_MODE1_MDEU_CICV; 1220 DESC_HDR_MODE1_MDEU_CICV;
1155 1221
1156 /* reset integrity check result bits */ 1222 /* reset integrity check result bits */
1157 edesc->desc.hdr_lo = 0; 1223 edesc->desc.hdr_lo = 0;
1158 1224
1159 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_hwauth_done); 1225 return ipsec_esp(edesc, req, NULL, 0,
1226 ipsec_esp_decrypt_hwauth_done);
1160 1227
1161 } else { 1228 }
1162
1163 /* Have to check the ICV with software */
1164 1229
1165 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1230 /* Have to check the ICV with software */
1231 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1166 1232
1167 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1233 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1168 if (edesc->dma_len) 1234 if (edesc->dma_len)
1169 icvdata = &edesc->link_tbl[edesc->src_nents + 1235 icvdata = &edesc->link_tbl[edesc->src_nents +
1170 edesc->dst_nents + 2]; 1236 edesc->dst_nents + 2];
1171 else 1237 else
1172 icvdata = &edesc->link_tbl[0]; 1238 icvdata = &edesc->link_tbl[0];
1173 1239
1174 sg = sg_last(req->src, edesc->src_nents ? : 1); 1240 sg = sg_last(req->src, edesc->src_nents ? : 1);
1175 1241
1176 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, 1242 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1177 ctx->authsize); 1243 ctx->authsize);
1178 1244
1179 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done); 1245 return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
1180 }
1181} 1246}
1182 1247
1183static int aead_authenc_givencrypt( 1248static int aead_givencrypt(struct aead_givcrypt_request *req)
1184 struct aead_givcrypt_request *req)
1185{ 1249{
1186 struct aead_request *areq = &req->areq; 1250 struct aead_request *areq = &req->areq;
1187 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1251 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1188 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1252 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1189 struct ipsec_esp_edesc *edesc; 1253 struct talitos_edesc *edesc;
1190 1254
1191 /* allocate extended descriptor */ 1255 /* allocate extended descriptor */
1192 edesc = ipsec_esp_edesc_alloc(areq, 0); 1256 edesc = aead_edesc_alloc(areq, 0);
1193 if (IS_ERR(edesc)) 1257 if (IS_ERR(edesc))
1194 return PTR_ERR(edesc); 1258 return PTR_ERR(edesc);
1195 1259
@@ -1204,31 +1268,228 @@ static int aead_authenc_givencrypt(
1204 ipsec_esp_encrypt_done); 1268 ipsec_esp_encrypt_done);
1205} 1269}
1206 1270
1271static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1272 const u8 *key, unsigned int keylen)
1273{
1274 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1275 struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
1276
1277 if (keylen > TALITOS_MAX_KEY_SIZE)
1278 goto badkey;
1279
1280 if (keylen < alg->min_keysize || keylen > alg->max_keysize)
1281 goto badkey;
1282
1283 memcpy(&ctx->key, key, keylen);
1284 ctx->keylen = keylen;
1285
1286 return 0;
1287
1288badkey:
1289 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1290 return -EINVAL;
1291}
1292
1293static void common_nonsnoop_unmap(struct device *dev,
1294 struct talitos_edesc *edesc,
1295 struct ablkcipher_request *areq)
1296{
1297 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1298 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1299 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1300
1301 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1302
1303 if (edesc->dma_len)
1304 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1305 DMA_BIDIRECTIONAL);
1306}
1307
1308static void ablkcipher_done(struct device *dev,
1309 struct talitos_desc *desc, void *context,
1310 int err)
1311{
1312 struct ablkcipher_request *areq = context;
1313 struct talitos_edesc *edesc;
1314
1315 edesc = container_of(desc, struct talitos_edesc, desc);
1316
1317 common_nonsnoop_unmap(dev, edesc, areq);
1318
1319 kfree(edesc);
1320
1321 areq->base.complete(&areq->base, err);
1322}
1323
1324static int common_nonsnoop(struct talitos_edesc *edesc,
1325 struct ablkcipher_request *areq,
1326 u8 *giv,
1327 void (*callback) (struct device *dev,
1328 struct talitos_desc *desc,
1329 void *context, int error))
1330{
1331 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1332 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1333 struct device *dev = ctx->dev;
1334 struct talitos_desc *desc = &edesc->desc;
1335 unsigned int cryptlen = areq->nbytes;
1336 unsigned int ivsize;
1337 int sg_count, ret;
1338
1339 /* first DWORD empty */
1340 desc->ptr[0].len = 0;
1341 desc->ptr[0].ptr = 0;
1342 desc->ptr[0].j_extent = 0;
1343
1344 /* cipher iv */
1345 ivsize = crypto_ablkcipher_ivsize(cipher);
1346 map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
1347 DMA_TO_DEVICE);
1348
1349 /* cipher key */
1350 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1351 (char *)&ctx->key, 0, DMA_TO_DEVICE);
1352
1353 /*
1354 * cipher in
1355 */
1356 desc->ptr[3].len = cpu_to_be16(cryptlen);
1357 desc->ptr[3].j_extent = 0;
1358
1359 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1360 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1361 : DMA_TO_DEVICE,
1362 edesc->src_is_chained);
1363
1364 if (sg_count == 1) {
1365 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
1366 } else {
1367 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1368 &edesc->link_tbl[0]);
1369 if (sg_count > 1) {
1370 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1371 desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
1372 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1373 edesc->dma_len,
1374 DMA_BIDIRECTIONAL);
1375 } else {
1376 /* Only one segment now, so no link tbl needed */
1377 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->
1378 src));
1379 }
1380 }
1381
1382 /* cipher out */
1383 desc->ptr[4].len = cpu_to_be16(cryptlen);
1384 desc->ptr[4].j_extent = 0;
1385
1386 if (areq->src != areq->dst)
1387 sg_count = talitos_map_sg(dev, areq->dst,
1388 edesc->dst_nents ? : 1,
1389 DMA_FROM_DEVICE,
1390 edesc->dst_is_chained);
1391
1392 if (sg_count == 1) {
1393 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst));
1394 } else {
1395 struct talitos_ptr *link_tbl_ptr =
1396 &edesc->link_tbl[edesc->src_nents + 1];
1397
1398 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1399 desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
1400 edesc->dma_link_tbl +
1401 edesc->src_nents + 1);
1402 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1403 link_tbl_ptr);
1404 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1405 edesc->dma_len, DMA_BIDIRECTIONAL);
1406 }
1407
1408 /* iv out */
1409 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1410 DMA_FROM_DEVICE);
1411
1412 /* last DWORD empty */
1413 desc->ptr[6].len = 0;
1414 desc->ptr[6].ptr = 0;
1415 desc->ptr[6].j_extent = 0;
1416
1417 ret = talitos_submit(dev, desc, callback, areq);
1418 if (ret != -EINPROGRESS) {
1419 common_nonsnoop_unmap(dev, edesc, areq);
1420 kfree(edesc);
1421 }
1422 return ret;
1423}
1424
1425static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1426 areq)
1427{
1428 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1429 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1430
1431 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes,
1432 0, 0, areq->base.flags);
1433}
1434
1435static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1436{
1437 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1438 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1439 struct talitos_edesc *edesc;
1440
1441 /* allocate extended descriptor */
1442 edesc = ablkcipher_edesc_alloc(areq);
1443 if (IS_ERR(edesc))
1444 return PTR_ERR(edesc);
1445
1446 /* set encrypt */
1447 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1448
1449 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1450}
1451
1452static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1453{
1454 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1455 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1456 struct talitos_edesc *edesc;
1457
1458 /* allocate extended descriptor */
1459 edesc = ablkcipher_edesc_alloc(areq);
1460 if (IS_ERR(edesc))
1461 return PTR_ERR(edesc);
1462
1463 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1464
1465 return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
1466}
1467
1207struct talitos_alg_template { 1468struct talitos_alg_template {
1208 char name[CRYPTO_MAX_ALG_NAME]; 1469 struct crypto_alg alg;
1209 char driver_name[CRYPTO_MAX_ALG_NAME];
1210 unsigned int blocksize;
1211 struct aead_alg aead;
1212 struct device *dev;
1213 __be32 desc_hdr_template; 1470 __be32 desc_hdr_template;
1214}; 1471};
1215 1472
1216static struct talitos_alg_template driver_algs[] = { 1473static struct talitos_alg_template driver_algs[] = {
1217 /* single-pass ipsec_esp descriptor */ 1474 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1218 { 1475 {
1219 .name = "authenc(hmac(sha1),cbc(aes))", 1476 .alg = {
1220 .driver_name = "authenc-hmac-sha1-cbc-aes-talitos", 1477 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1221 .blocksize = AES_BLOCK_SIZE, 1478 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1222 .aead = { 1479 .cra_blocksize = AES_BLOCK_SIZE,
1223 .setkey = aead_authenc_setkey, 1480 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1224 .setauthsize = aead_authenc_setauthsize, 1481 .cra_type = &crypto_aead_type,
1225 .encrypt = aead_authenc_encrypt, 1482 .cra_aead = {
1226 .decrypt = aead_authenc_decrypt, 1483 .setkey = aead_setkey,
1227 .givencrypt = aead_authenc_givencrypt, 1484 .setauthsize = aead_setauthsize,
1228 .geniv = "<built-in>", 1485 .encrypt = aead_encrypt,
1229 .ivsize = AES_BLOCK_SIZE, 1486 .decrypt = aead_decrypt,
1230 .maxauthsize = SHA1_DIGEST_SIZE, 1487 .givencrypt = aead_givencrypt,
1231 }, 1488 .geniv = "<built-in>",
1489 .ivsize = AES_BLOCK_SIZE,
1490 .maxauthsize = SHA1_DIGEST_SIZE,
1491 }
1492 },
1232 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1493 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1233 DESC_HDR_SEL0_AESU | 1494 DESC_HDR_SEL0_AESU |
1234 DESC_HDR_MODE0_AESU_CBC | 1495 DESC_HDR_MODE0_AESU_CBC |
@@ -1238,19 +1499,23 @@ static struct talitos_alg_template driver_algs[] = {
1238 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 1499 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1239 }, 1500 },
1240 { 1501 {
1241 .name = "authenc(hmac(sha1),cbc(des3_ede))", 1502 .alg = {
1242 .driver_name = "authenc-hmac-sha1-cbc-3des-talitos", 1503 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1243 .blocksize = DES3_EDE_BLOCK_SIZE, 1504 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1244 .aead = { 1505 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1245 .setkey = aead_authenc_setkey, 1506 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1246 .setauthsize = aead_authenc_setauthsize, 1507 .cra_type = &crypto_aead_type,
1247 .encrypt = aead_authenc_encrypt, 1508 .cra_aead = {
1248 .decrypt = aead_authenc_decrypt, 1509 .setkey = aead_setkey,
1249 .givencrypt = aead_authenc_givencrypt, 1510 .setauthsize = aead_setauthsize,
1250 .geniv = "<built-in>", 1511 .encrypt = aead_encrypt,
1251 .ivsize = DES3_EDE_BLOCK_SIZE, 1512 .decrypt = aead_decrypt,
1252 .maxauthsize = SHA1_DIGEST_SIZE, 1513 .givencrypt = aead_givencrypt,
1253 }, 1514 .geniv = "<built-in>",
1515 .ivsize = DES3_EDE_BLOCK_SIZE,
1516 .maxauthsize = SHA1_DIGEST_SIZE,
1517 }
1518 },
1254 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1519 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1255 DESC_HDR_SEL0_DEU | 1520 DESC_HDR_SEL0_DEU |
1256 DESC_HDR_MODE0_DEU_CBC | 1521 DESC_HDR_MODE0_DEU_CBC |
@@ -1261,19 +1526,23 @@ static struct talitos_alg_template driver_algs[] = {
1261 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 1526 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1262 }, 1527 },
1263 { 1528 {
1264 .name = "authenc(hmac(sha256),cbc(aes))", 1529 .alg = {
1265 .driver_name = "authenc-hmac-sha256-cbc-aes-talitos", 1530 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1266 .blocksize = AES_BLOCK_SIZE, 1531 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
1267 .aead = { 1532 .cra_blocksize = AES_BLOCK_SIZE,
1268 .setkey = aead_authenc_setkey, 1533 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1269 .setauthsize = aead_authenc_setauthsize, 1534 .cra_type = &crypto_aead_type,
1270 .encrypt = aead_authenc_encrypt, 1535 .cra_aead = {
1271 .decrypt = aead_authenc_decrypt, 1536 .setkey = aead_setkey,
1272 .givencrypt = aead_authenc_givencrypt, 1537 .setauthsize = aead_setauthsize,
1273 .geniv = "<built-in>", 1538 .encrypt = aead_encrypt,
1274 .ivsize = AES_BLOCK_SIZE, 1539 .decrypt = aead_decrypt,
1275 .maxauthsize = SHA256_DIGEST_SIZE, 1540 .givencrypt = aead_givencrypt,
1276 }, 1541 .geniv = "<built-in>",
1542 .ivsize = AES_BLOCK_SIZE,
1543 .maxauthsize = SHA256_DIGEST_SIZE,
1544 }
1545 },
1277 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1546 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1278 DESC_HDR_SEL0_AESU | 1547 DESC_HDR_SEL0_AESU |
1279 DESC_HDR_MODE0_AESU_CBC | 1548 DESC_HDR_MODE0_AESU_CBC |
@@ -1283,19 +1552,23 @@ static struct talitos_alg_template driver_algs[] = {
1283 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 1552 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1284 }, 1553 },
1285 { 1554 {
1286 .name = "authenc(hmac(sha256),cbc(des3_ede))", 1555 .alg = {
1287 .driver_name = "authenc-hmac-sha256-cbc-3des-talitos", 1556 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
1288 .blocksize = DES3_EDE_BLOCK_SIZE, 1557 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
1289 .aead = { 1558 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1290 .setkey = aead_authenc_setkey, 1559 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1291 .setauthsize = aead_authenc_setauthsize, 1560 .cra_type = &crypto_aead_type,
1292 .encrypt = aead_authenc_encrypt, 1561 .cra_aead = {
1293 .decrypt = aead_authenc_decrypt, 1562 .setkey = aead_setkey,
1294 .givencrypt = aead_authenc_givencrypt, 1563 .setauthsize = aead_setauthsize,
1295 .geniv = "<built-in>", 1564 .encrypt = aead_encrypt,
1296 .ivsize = DES3_EDE_BLOCK_SIZE, 1565 .decrypt = aead_decrypt,
1297 .maxauthsize = SHA256_DIGEST_SIZE, 1566 .givencrypt = aead_givencrypt,
1298 }, 1567 .geniv = "<built-in>",
1568 .ivsize = DES3_EDE_BLOCK_SIZE,
1569 .maxauthsize = SHA256_DIGEST_SIZE,
1570 }
1571 },
1299 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1572 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1300 DESC_HDR_SEL0_DEU | 1573 DESC_HDR_SEL0_DEU |
1301 DESC_HDR_MODE0_DEU_CBC | 1574 DESC_HDR_MODE0_DEU_CBC |
@@ -1306,19 +1579,23 @@ static struct talitos_alg_template driver_algs[] = {
1306 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 1579 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
1307 }, 1580 },
1308 { 1581 {
1309 .name = "authenc(hmac(md5),cbc(aes))", 1582 .alg = {
1310 .driver_name = "authenc-hmac-md5-cbc-aes-talitos", 1583 .cra_name = "authenc(hmac(md5),cbc(aes))",
1311 .blocksize = AES_BLOCK_SIZE, 1584 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
1312 .aead = { 1585 .cra_blocksize = AES_BLOCK_SIZE,
1313 .setkey = aead_authenc_setkey, 1586 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1314 .setauthsize = aead_authenc_setauthsize, 1587 .cra_type = &crypto_aead_type,
1315 .encrypt = aead_authenc_encrypt, 1588 .cra_aead = {
1316 .decrypt = aead_authenc_decrypt, 1589 .setkey = aead_setkey,
1317 .givencrypt = aead_authenc_givencrypt, 1590 .setauthsize = aead_setauthsize,
1318 .geniv = "<built-in>", 1591 .encrypt = aead_encrypt,
1319 .ivsize = AES_BLOCK_SIZE, 1592 .decrypt = aead_decrypt,
1320 .maxauthsize = MD5_DIGEST_SIZE, 1593 .givencrypt = aead_givencrypt,
1321 }, 1594 .geniv = "<built-in>",
1595 .ivsize = AES_BLOCK_SIZE,
1596 .maxauthsize = MD5_DIGEST_SIZE,
1597 }
1598 },
1322 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1599 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1323 DESC_HDR_SEL0_AESU | 1600 DESC_HDR_SEL0_AESU |
1324 DESC_HDR_MODE0_AESU_CBC | 1601 DESC_HDR_MODE0_AESU_CBC |
@@ -1328,19 +1605,23 @@ static struct talitos_alg_template driver_algs[] = {
1328 DESC_HDR_MODE1_MDEU_MD5_HMAC, 1605 DESC_HDR_MODE1_MDEU_MD5_HMAC,
1329 }, 1606 },
1330 { 1607 {
1331 .name = "authenc(hmac(md5),cbc(des3_ede))", 1608 .alg = {
1332 .driver_name = "authenc-hmac-md5-cbc-3des-talitos", 1609 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1333 .blocksize = DES3_EDE_BLOCK_SIZE, 1610 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
1334 .aead = { 1611 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1335 .setkey = aead_authenc_setkey, 1612 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1336 .setauthsize = aead_authenc_setauthsize, 1613 .cra_type = &crypto_aead_type,
1337 .encrypt = aead_authenc_encrypt, 1614 .cra_aead = {
1338 .decrypt = aead_authenc_decrypt, 1615 .setkey = aead_setkey,
1339 .givencrypt = aead_authenc_givencrypt, 1616 .setauthsize = aead_setauthsize,
1340 .geniv = "<built-in>", 1617 .encrypt = aead_encrypt,
1341 .ivsize = DES3_EDE_BLOCK_SIZE, 1618 .decrypt = aead_decrypt,
1342 .maxauthsize = MD5_DIGEST_SIZE, 1619 .givencrypt = aead_givencrypt,
1343 }, 1620 .geniv = "<built-in>",
1621 .ivsize = DES3_EDE_BLOCK_SIZE,
1622 .maxauthsize = MD5_DIGEST_SIZE,
1623 }
1624 },
1344 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 1625 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1345 DESC_HDR_SEL0_DEU | 1626 DESC_HDR_SEL0_DEU |
1346 DESC_HDR_MODE0_DEU_CBC | 1627 DESC_HDR_MODE0_DEU_CBC |
@@ -1349,6 +1630,52 @@ static struct talitos_alg_template driver_algs[] = {
1349 DESC_HDR_MODE1_MDEU_INIT | 1630 DESC_HDR_MODE1_MDEU_INIT |
1350 DESC_HDR_MODE1_MDEU_PAD | 1631 DESC_HDR_MODE1_MDEU_PAD |
1351 DESC_HDR_MODE1_MDEU_MD5_HMAC, 1632 DESC_HDR_MODE1_MDEU_MD5_HMAC,
1633 },
1634 /* ABLKCIPHER algorithms. */
1635 {
1636 .alg = {
1637 .cra_name = "cbc(aes)",
1638 .cra_driver_name = "cbc-aes-talitos",
1639 .cra_blocksize = AES_BLOCK_SIZE,
1640 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1641 CRYPTO_ALG_ASYNC,
1642 .cra_type = &crypto_ablkcipher_type,
1643 .cra_ablkcipher = {
1644 .setkey = ablkcipher_setkey,
1645 .encrypt = ablkcipher_encrypt,
1646 .decrypt = ablkcipher_decrypt,
1647 .geniv = "eseqiv",
1648 .min_keysize = AES_MIN_KEY_SIZE,
1649 .max_keysize = AES_MAX_KEY_SIZE,
1650 .ivsize = AES_BLOCK_SIZE,
1651 }
1652 },
1653 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1654 DESC_HDR_SEL0_AESU |
1655 DESC_HDR_MODE0_AESU_CBC,
1656 },
1657 {
1658 .alg = {
1659 .cra_name = "cbc(des3_ede)",
1660 .cra_driver_name = "cbc-3des-talitos",
1661 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1662 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1663 CRYPTO_ALG_ASYNC,
1664 .cra_type = &crypto_ablkcipher_type,
1665 .cra_ablkcipher = {
1666 .setkey = ablkcipher_setkey,
1667 .encrypt = ablkcipher_encrypt,
1668 .decrypt = ablkcipher_decrypt,
1669 .geniv = "eseqiv",
1670 .min_keysize = DES3_EDE_KEY_SIZE,
1671 .max_keysize = DES3_EDE_KEY_SIZE,
1672 .ivsize = DES3_EDE_BLOCK_SIZE,
1673 }
1674 },
1675 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1676 DESC_HDR_SEL0_DEU |
1677 DESC_HDR_MODE0_DEU_CBC |
1678 DESC_HDR_MODE0_DEU_3DES,
1352 } 1679 }
1353}; 1680};
1354 1681
@@ -1362,12 +1689,14 @@ struct talitos_crypto_alg {
1362static int talitos_cra_init(struct crypto_tfm *tfm) 1689static int talitos_cra_init(struct crypto_tfm *tfm)
1363{ 1690{
1364 struct crypto_alg *alg = tfm->__crt_alg; 1691 struct crypto_alg *alg = tfm->__crt_alg;
1365 struct talitos_crypto_alg *talitos_alg = 1692 struct talitos_crypto_alg *talitos_alg;
1366 container_of(alg, struct talitos_crypto_alg, crypto_alg);
1367 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 1693 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1368 1694
1695 talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg);
1696
1369 /* update context with ptr to dev */ 1697 /* update context with ptr to dev */
1370 ctx->dev = talitos_alg->dev; 1698 ctx->dev = talitos_alg->dev;
1699
1371 /* copy descriptor header template value */ 1700 /* copy descriptor header template value */
1372 ctx->desc_hdr_template = talitos_alg->desc_hdr_template; 1701 ctx->desc_hdr_template = talitos_alg->desc_hdr_template;
1373 1702
@@ -1453,19 +1782,13 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1453 return ERR_PTR(-ENOMEM); 1782 return ERR_PTR(-ENOMEM);
1454 1783
1455 alg = &t_alg->crypto_alg; 1784 alg = &t_alg->crypto_alg;
1785 *alg = template->alg;
1456 1786
1457 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
1458 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1459 template->driver_name);
1460 alg->cra_module = THIS_MODULE; 1787 alg->cra_module = THIS_MODULE;
1461 alg->cra_init = talitos_cra_init; 1788 alg->cra_init = talitos_cra_init;
1462 alg->cra_priority = TALITOS_CRA_PRIORITY; 1789 alg->cra_priority = TALITOS_CRA_PRIORITY;
1463 alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
1464 alg->cra_blocksize = template->blocksize;
1465 alg->cra_alignmask = 0; 1790 alg->cra_alignmask = 0;
1466 alg->cra_type = &crypto_aead_type;
1467 alg->cra_ctxsize = sizeof(struct talitos_ctx); 1791 alg->cra_ctxsize = sizeof(struct talitos_ctx);
1468 alg->cra_u.aead = template->aead;
1469 1792
1470 t_alg->desc_hdr_template = template->desc_hdr_template; 1793 t_alg->desc_hdr_template = template->desc_hdr_template;
1471 t_alg->dev = dev; 1794 t_alg->dev = dev;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index da8a8ed9e41..f18d1bde043 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -179,9 +179,14 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan)
179static void set_ld_eol(struct fsl_dma_chan *fsl_chan, 179static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
180 struct fsl_desc_sw *desc) 180 struct fsl_desc_sw *desc)
181{ 181{
182 u64 snoop_bits;
183
184 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
185 ? FSL_DMA_SNEN : 0;
186
182 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 187 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
183 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, 188 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
184 64); 189 | snoop_bits, 64);
185} 190}
186 191
187static void append_ld_queue(struct fsl_dma_chan *fsl_chan, 192static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
@@ -313,8 +318,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
313 318
314static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 319static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
315{ 320{
316 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
317 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); 321 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
322 struct fsl_desc_sw *desc;
318 unsigned long flags; 323 unsigned long flags;
319 dma_cookie_t cookie; 324 dma_cookie_t cookie;
320 325
@@ -322,14 +327,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
322 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 327 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
323 328
324 cookie = fsl_chan->common.cookie; 329 cookie = fsl_chan->common.cookie;
325 cookie++; 330 list_for_each_entry(desc, &tx->tx_list, node) {
326 if (cookie < 0) 331 cookie++;
327 cookie = 1; 332 if (cookie < 0)
328 desc->async_tx.cookie = cookie; 333 cookie = 1;
329 fsl_chan->common.cookie = desc->async_tx.cookie;
330 334
331 append_ld_queue(fsl_chan, desc); 335 desc->async_tx.cookie = cookie;
332 list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); 336 }
337
338 fsl_chan->common.cookie = cookie;
339 append_ld_queue(fsl_chan, tx_to_fsl_desc(tx));
340 list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev);
333 341
334 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 342 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
335 343
@@ -454,8 +462,8 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
454{ 462{
455 struct fsl_dma_chan *fsl_chan; 463 struct fsl_dma_chan *fsl_chan;
456 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 464 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
465 struct list_head *list;
457 size_t copy; 466 size_t copy;
458 LIST_HEAD(link_chain);
459 467
460 if (!chan) 468 if (!chan)
461 return NULL; 469 return NULL;
@@ -472,7 +480,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
472 if (!new) { 480 if (!new) {
473 dev_err(fsl_chan->dev, 481 dev_err(fsl_chan->dev,
474 "No free memory for link descriptor\n"); 482 "No free memory for link descriptor\n");
475 return NULL; 483 goto fail;
476 } 484 }
477#ifdef FSL_DMA_LD_DEBUG 485#ifdef FSL_DMA_LD_DEBUG
478 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 486 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
@@ -507,7 +515,19 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
507 /* Set End-of-link to the last link descriptor of new list*/ 515 /* Set End-of-link to the last link descriptor of new list*/
508 set_ld_eol(fsl_chan, new); 516 set_ld_eol(fsl_chan, new);
509 517
510 return first ? &first->async_tx : NULL; 518 return &first->async_tx;
519
520fail:
521 if (!first)
522 return NULL;
523
524 list = &first->async_tx.tx_list;
525 list_for_each_entry_safe_reverse(new, prev, list, node) {
526 list_del(&new->node);
527 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
528 }
529
530 return NULL;
511} 531}
512 532
513/** 533/**
@@ -598,15 +618,16 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
598 dma_addr_t next_dest_addr; 618 dma_addr_t next_dest_addr;
599 unsigned long flags; 619 unsigned long flags;
600 620
621 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
622
601 if (!dma_is_idle(fsl_chan)) 623 if (!dma_is_idle(fsl_chan))
602 return; 624 goto out_unlock;
603 625
604 dma_halt(fsl_chan); 626 dma_halt(fsl_chan);
605 627
606 /* If there are some link descriptors 628 /* If there are some link descriptors
607 * not transfered in queue. We need to start it. 629 * not transfered in queue. We need to start it.
608 */ 630 */
609 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
610 631
611 /* Find the first un-transfer desciptor */ 632 /* Find the first un-transfer desciptor */
612 for (ld_node = fsl_chan->ld_queue.next; 633 for (ld_node = fsl_chan->ld_queue.next;
@@ -617,19 +638,20 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
617 fsl_chan->common.cookie) == DMA_SUCCESS); 638 fsl_chan->common.cookie) == DMA_SUCCESS);
618 ld_node = ld_node->next); 639 ld_node = ld_node->next);
619 640
620 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
621
622 if (ld_node != &fsl_chan->ld_queue) { 641 if (ld_node != &fsl_chan->ld_queue) {
623 /* Get the ld start address from ld_queue */ 642 /* Get the ld start address from ld_queue */
624 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 643 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
625 dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", 644 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
626 (void *)next_dest_addr); 645 (unsigned long long)next_dest_addr);
627 set_cdar(fsl_chan, next_dest_addr); 646 set_cdar(fsl_chan, next_dest_addr);
628 dma_start(fsl_chan); 647 dma_start(fsl_chan);
629 } else { 648 } else {
630 set_cdar(fsl_chan, 0); 649 set_cdar(fsl_chan, 0);
631 set_ndar(fsl_chan, 0); 650 set_ndar(fsl_chan, 0);
632 } 651 }
652
653out_unlock:
654 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
633} 655}
634 656
635/** 657/**
@@ -734,8 +756,9 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
734 */ 756 */
735 if (stat & FSL_DMA_SR_EOSI) { 757 if (stat & FSL_DMA_SR_EOSI) {
736 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 758 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
737 dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", 759 dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
738 (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); 760 (unsigned long long)get_cdar(fsl_chan),
761 (unsigned long long)get_ndar(fsl_chan));
739 stat &= ~FSL_DMA_SR_EOSI; 762 stat &= ~FSL_DMA_SR_EOSI;
740 update_cookie = 1; 763 update_cookie = 1;
741 } 764 }
@@ -830,7 +853,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
830 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 853 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
831 854
832 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; 855 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
833 if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { 856 if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
834 dev_err(fdev->dev, "There is no %d channel!\n", 857 dev_err(fdev->dev, "There is no %d channel!\n",
835 new_fsl_chan->id); 858 new_fsl_chan->id);
836 err = -EINVAL; 859 err = -EINVAL;
@@ -925,8 +948,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
925 } 948 }
926 949
927 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 950 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
928 "controller at %p...\n", 951 "controller at 0x%llx...\n",
929 match->compatible, (void *)fdev->reg.start); 952 match->compatible, (unsigned long long)fdev->reg.start);
930 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end 953 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
931 - fdev->reg.start + 1); 954 - fdev->reg.start + 1);
932 955
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 1955ee8d6d2..a600fc0f796 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -173,7 +173,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
173 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); 173 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
174 174
175#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL 175#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
176 if (i7300_idle_platform_probe(NULL, NULL) == 0) { 176 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) {
177 device->common.chancnt--; 177 device->common.chancnt--;
178 } 178 }
179#endif 179#endif
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index e5f5c5a8ba6..ab4f3592a11 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -49,7 +49,6 @@ config EDAC_DEBUG_VERBOSE
49 49
50config EDAC_MM_EDAC 50config EDAC_MM_EDAC
51 tristate "Main Memory EDAC (Error Detection And Correction) reporting" 51 tristate "Main Memory EDAC (Error Detection And Correction) reporting"
52 default y
53 help 52 help
54 Some systems are able to detect and correct errors in main 53 Some systems are able to detect and correct errors in main
55 memory. EDAC can report statistics on memory error 54 memory. EDAC can report statistics on memory error
@@ -58,6 +57,31 @@ config EDAC_MM_EDAC
58 occurred so that a particular failing memory module can be 57 occurred so that a particular failing memory module can be
59 replaced. If unsure, select 'Y'. 58 replaced. If unsure, select 'Y'.
60 59
60config EDAC_AMD64
61 tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
62 depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI
63 help
64 Support for error detection and correction on the AMD 64
65 Families of Memory Controllers (K8, F10h and F11h)
66
67config EDAC_AMD64_ERROR_INJECTION
68 bool "Sysfs Error Injection facilities"
69 depends on EDAC_AMD64
70 help
71 Recent Opterons (Family 10h and later) provide for Memory Error
72 Injection into the ECC detection circuits. The amd64_edac module
73 allows the operator/user to inject Uncorrectable and Correctable
74 errors into DRAM.
75
76 When enabled, in each of the respective memory controller directories
77 (/sys/devices/system/edac/mc/mcX), there are 3 input files:
78
79 - inject_section (0..3, 16-byte section of 64-byte cacheline),
80 - inject_word (0..8, 16-bit word of 16-byte section),
81 - inject_ecc_vector (hex ecc vector: select bits of inject word)
82
83 In addition, there are two control files, inject_read and inject_write,
84 which trigger the DRAM ECC Read and Write respectively.
61 85
62config EDAC_AMD76X 86config EDAC_AMD76X
63 tristate "AMD 76x (760, 762, 768)" 87 tristate "AMD 76x (760, 762, 768)"
@@ -192,16 +216,20 @@ config EDAC_PPC4XX
192 216
193config EDAC_AMD8131 217config EDAC_AMD8131
194 tristate "AMD8131 HyperTransport PCI-X Tunnel" 218 tristate "AMD8131 HyperTransport PCI-X Tunnel"
195 depends on EDAC_MM_EDAC && PCI 219 depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
196 help 220 help
197 Support for error detection and correction on the 221 Support for error detection and correction on the
198 AMD8131 HyperTransport PCI-X Tunnel chip. 222 AMD8131 HyperTransport PCI-X Tunnel chip.
223 Note, add more Kconfig dependency if it's adopted
224 on some machine other than Maple.
199 225
200config EDAC_AMD8111 226config EDAC_AMD8111
201 tristate "AMD8111 HyperTransport I/O Hub" 227 tristate "AMD8111 HyperTransport I/O Hub"
202 depends on EDAC_MM_EDAC && PCI 228 depends on EDAC_MM_EDAC && PCI && PPC_MAPLE
203 help 229 help
204 Support for error detection and correction on the 230 Support for error detection and correction on the
205 AMD8111 HyperTransport I/O Hub chip. 231 AMD8111 HyperTransport I/O Hub chip.
232 Note, add more Kconfig dependency if it's adopted
233 on some machine other than Maple.
206 234
207endif # EDAC 235endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index a5fdcf02f59..633dc5604ee 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -30,8 +30,17 @@ obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
30obj-$(CONFIG_EDAC_X38) += x38_edac.o 30obj-$(CONFIG_EDAC_X38) += x38_edac.o
31obj-$(CONFIG_EDAC_I82860) += i82860_edac.o 31obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
32obj-$(CONFIG_EDAC_R82600) += r82600_edac.o 32obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
33
34amd64_edac_mod-y := amd64_edac_err_types.o amd64_edac.o
35amd64_edac_mod-$(CONFIG_EDAC_DEBUG) += amd64_edac_dbg.o
36amd64_edac_mod-$(CONFIG_EDAC_AMD64_ERROR_INJECTION) += amd64_edac_inj.o
37
38obj-$(CONFIG_EDAC_AMD64) += amd64_edac_mod.o
39
33obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o 40obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
34obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o 41obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
35obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o 42obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
36obj-$(CONFIG_EDAC_CELL) += cell_edac.o 43obj-$(CONFIG_EDAC_CELL) += cell_edac.o
37obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o 44obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o
45obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
46obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
new file mode 100644
index 00000000000..c36bf40568c
--- /dev/null
+++ b/drivers/edac/amd64_edac.c
@@ -0,0 +1,3354 @@
1#include "amd64_edac.h"
2#include <asm/k8.h>
3
4static struct edac_pci_ctl_info *amd64_ctl_pci;
5
6static int report_gart_errors;
7module_param(report_gart_errors, int, 0644);
8
9/*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
12 */
13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644);
15
16/* Lookup table for all possible MC control instances */
17struct amd64_pvt;
18static struct mem_ctl_info *mci_lookup[MAX_NUMNODES];
19static struct amd64_pvt *pvt_lookup[MAX_NUMNODES];
20
21/*
22 * Memory scrubber control interface. For K8, memory scrubbing is handled by
23 * hardware and can involve L2 cache, dcache as well as the main memory. With
24 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
25 * functionality.
26 *
27 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
28 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
29 * bytes/sec for the setting.
30 *
31 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
32 * other archs, we might not have access to the caches directly.
33 */
34
35/*
36 * scan the scrub rate mapping table for a close or matching bandwidth value to
37 * issue. If requested is too big, then use last maximum value found.
38 */
39static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
40 u32 min_scrubrate)
41{
42 u32 scrubval;
43 int i;
44
45 /*
46 * map the configured rate (new_bw) to a value specific to the AMD64
47 * memory controller and apply to register. Search for the first
48 * bandwidth entry that is greater or equal than the setting requested
49 * and program that. If at last entry, turn off DRAM scrubbing.
50 */
51 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
52 /*
53 * skip scrub rates which aren't recommended
54 * (see F10 BKDG, F3x58)
55 */
56 if (scrubrates[i].scrubval < min_scrubrate)
57 continue;
58
59 if (scrubrates[i].bandwidth <= new_bw)
60 break;
61
62 /*
63 * if no suitable bandwidth found, turn off DRAM scrubbing
64 * entirely by falling back to the last element in the
65 * scrubrates array.
66 */
67 }
68
69 scrubval = scrubrates[i].scrubval;
70 if (scrubval)
71 edac_printk(KERN_DEBUG, EDAC_MC,
72 "Setting scrub rate bandwidth: %u\n",
73 scrubrates[i].bandwidth);
74 else
75 edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
76
77 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
78
79 return 0;
80}
81
82static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
83{
84 struct amd64_pvt *pvt = mci->pvt_info;
85 u32 min_scrubrate = 0x0;
86
87 switch (boot_cpu_data.x86) {
88 case 0xf:
89 min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
90 break;
91 case 0x10:
92 min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
93 break;
94 case 0x11:
95 min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
96 break;
97
98 default:
99 amd64_printk(KERN_ERR, "Unsupported family!\n");
100 break;
101 }
102 return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
103 min_scrubrate);
104}
105
106static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
107{
108 struct amd64_pvt *pvt = mci->pvt_info;
109 u32 scrubval = 0;
110 int status = -1, i, ret = 0;
111
112 ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
113 if (ret)
114 debugf0("Reading K8_SCRCTRL failed\n");
115
116 scrubval = scrubval & 0x001F;
117
118 edac_printk(KERN_DEBUG, EDAC_MC,
119 "pci-read, sdram scrub control value: %d \n", scrubval);
120
121 for (i = 0; ARRAY_SIZE(scrubrates); i++) {
122 if (scrubrates[i].scrubval == scrubval) {
123 *bw = scrubrates[i].bandwidth;
124 status = 0;
125 break;
126 }
127 }
128
129 return status;
130}
131
132/* Map from a CSROW entry to the mask entry that operates on it */
133static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
134{
135 return csrow >> (pvt->num_dcsm >> 3);
136}
137
138/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
139static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
140{
141 if (dct == 0)
142 return pvt->dcsb0[csrow];
143 else
144 return pvt->dcsb1[csrow];
145}
146
147/*
148 * Return the 'mask' address the i'th CS entry. This function is needed because
149 * there number of DCSM registers on Rev E and prior vs Rev F and later is
150 * different.
151 */
152static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
153{
154 if (dct == 0)
155 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
156 else
157 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
158}
159
160
161/*
162 * In *base and *limit, pass back the full 40-bit base and limit physical
163 * addresses for the node given by node_id. This information is obtained from
164 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
165 * base and limit addresses are of type SysAddr, as defined at the start of
166 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
167 * in the address range they represent.
168 */
169static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
170 u64 *base, u64 *limit)
171{
172 *base = pvt->dram_base[node_id];
173 *limit = pvt->dram_limit[node_id];
174}
175
176/*
177 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
178 * with node_id
179 */
180static int amd64_base_limit_match(struct amd64_pvt *pvt,
181 u64 sys_addr, int node_id)
182{
183 u64 base, limit, addr;
184
185 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
186
187 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
188 * all ones if the most significant implemented address bit is 1.
189 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
190 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
191 * Application Programming.
192 */
193 addr = sys_addr & 0x000000ffffffffffull;
194
195 return (addr >= base) && (addr <= limit);
196}
197
198/*
199 * Attempt to map a SysAddr to a node. On success, return a pointer to the
200 * mem_ctl_info structure for the node that the SysAddr maps to.
201 *
202 * On failure, return NULL.
203 */
204static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
205 u64 sys_addr)
206{
207 struct amd64_pvt *pvt;
208 int node_id;
209 u32 intlv_en, bits;
210
211 /*
212 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
213 * 3.4.4.2) registers to map the SysAddr to a node ID.
214 */
215 pvt = mci->pvt_info;
216
217 /*
218 * The value of this field should be the same for all DRAM Base
219 * registers. Therefore we arbitrarily choose to read it from the
220 * register for node 0.
221 */
222 intlv_en = pvt->dram_IntlvEn[0];
223
224 if (intlv_en == 0) {
225 for (node_id = 0; ; ) {
226 if (amd64_base_limit_match(pvt, sys_addr, node_id))
227 break;
228
229 if (++node_id >= DRAM_REG_COUNT)
230 goto err_no_match;
231 }
232 goto found;
233 }
234
235 if (unlikely((intlv_en != (0x01 << 8)) &&
236 (intlv_en != (0x03 << 8)) &&
237 (intlv_en != (0x07 << 8)))) {
238 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
239 "IntlvEn field of DRAM Base Register for node 0: "
240 "This probably indicates a BIOS bug.\n", intlv_en);
241 return NULL;
242 }
243
244 bits = (((u32) sys_addr) >> 12) & intlv_en;
245
246 for (node_id = 0; ; ) {
247 if ((pvt->dram_limit[node_id] & intlv_en) == bits)
248 break; /* intlv_sel field matches */
249
250 if (++node_id >= DRAM_REG_COUNT)
251 goto err_no_match;
252 }
253
254 /* sanity test for sys_addr */
255 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
256 amd64_printk(KERN_WARNING,
257 "%s(): sys_addr 0x%lx falls outside base/limit "
258 "address range for node %d with node interleaving "
259 "enabled.\n", __func__, (unsigned long)sys_addr,
260 node_id);
261 return NULL;
262 }
263
264found:
265 return edac_mc_find(node_id);
266
267err_no_match:
268 debugf2("sys_addr 0x%lx doesn't match any node\n",
269 (unsigned long)sys_addr);
270
271 return NULL;
272}
273
274/*
275 * Extract the DRAM CS base address from selected csrow register.
276 */
277static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
278{
279 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
280 pvt->dcs_shift;
281}
282
283/*
284 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
285 */
286static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
287{
288 u64 dcsm_bits, other_bits;
289 u64 mask;
290
291 /* Extract bits from DRAM CS Mask. */
292 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
293
294 other_bits = pvt->dcsm_mask;
295 other_bits = ~(other_bits << pvt->dcs_shift);
296
297 /*
298 * The extracted bits from DCSM belong in the spaces represented by
299 * the cleared bits in other_bits.
300 */
301 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
302
303 return mask;
304}
305
306/*
307 * @input_addr is an InputAddr associated with the node given by mci. Return the
308 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
309 */
310static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
311{
312 struct amd64_pvt *pvt;
313 int csrow;
314 u64 base, mask;
315
316 pvt = mci->pvt_info;
317
318 /*
319 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
320 * base/mask register pair, test the condition shown near the start of
321 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
322 */
323 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
324
325 /* This DRAM chip select is disabled on this node */
326 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
327 continue;
328
329 base = base_from_dct_base(pvt, csrow);
330 mask = ~mask_from_dct_mask(pvt, csrow);
331
332 if ((input_addr & mask) == (base & mask)) {
333 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
334 (unsigned long)input_addr, csrow,
335 pvt->mc_node_id);
336
337 return csrow;
338 }
339 }
340
341 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
342 (unsigned long)input_addr, pvt->mc_node_id);
343
344 return -1;
345}
346
347/*
348 * Return the base value defined by the DRAM Base register for the node
349 * represented by mci. This function returns the full 40-bit value despite the
350 * fact that the register only stores bits 39-24 of the value. See section
351 * 3.4.4.1 (BKDG #26094, K8, revA-E)
352 */
353static inline u64 get_dram_base(struct mem_ctl_info *mci)
354{
355 struct amd64_pvt *pvt = mci->pvt_info;
356
357 return pvt->dram_base[pvt->mc_node_id];
358}
359
360/*
361 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
362 * for the node represented by mci. Info is passed back in *hole_base,
363 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
364 * info is invalid. Info may be invalid for either of the following reasons:
365 *
366 * - The revision of the node is not E or greater. In this case, the DRAM Hole
367 * Address Register does not exist.
368 *
369 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
370 * indicating that its contents are not valid.
371 *
372 * The values passed back in *hole_base, *hole_offset, and *hole_size are
373 * complete 32-bit values despite the fact that the bitfields in the DHAR
374 * only represent bits 31-24 of the base and offset values.
375 */
376int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
377 u64 *hole_offset, u64 *hole_size)
378{
379 struct amd64_pvt *pvt = mci->pvt_info;
380 u64 base;
381
382 /* only revE and later have the DRAM Hole Address Register */
383 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) {
384 debugf1(" revision %d for node %d does not support DHAR\n",
385 pvt->ext_model, pvt->mc_node_id);
386 return 1;
387 }
388
389 /* only valid for Fam10h */
390 if (boot_cpu_data.x86 == 0x10 &&
391 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
392 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
393 return 1;
394 }
395
396 if ((pvt->dhar & DHAR_VALID) == 0) {
397 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
398 pvt->mc_node_id);
399 return 1;
400 }
401
402 /* This node has Memory Hoisting */
403
404 /* +------------------+--------------------+--------------------+-----
405 * | memory | DRAM hole | relocated |
406 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
407 * | | | DRAM hole |
408 * | | | [0x100000000, |
409 * | | | (0x100000000+ |
410 * | | | (0xffffffff-x))] |
411 * +------------------+--------------------+--------------------+-----
412 *
413 * Above is a diagram of physical memory showing the DRAM hole and the
414 * relocated addresses from the DRAM hole. As shown, the DRAM hole
415 * starts at address x (the base address) and extends through address
416 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
417 * addresses in the hole so that they start at 0x100000000.
418 */
419
420 base = dhar_base(pvt->dhar);
421
422 *hole_base = base;
423 *hole_size = (0x1ull << 32) - base;
424
425 if (boot_cpu_data.x86 > 0xf)
426 *hole_offset = f10_dhar_offset(pvt->dhar);
427 else
428 *hole_offset = k8_dhar_offset(pvt->dhar);
429
430 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
431 pvt->mc_node_id, (unsigned long)*hole_base,
432 (unsigned long)*hole_offset, (unsigned long)*hole_size);
433
434 return 0;
435}
436EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
437
438/*
439 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
440 * assumed that sys_addr maps to the node given by mci.
441 *
442 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
443 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
444 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
445 * then it is also involved in translating a SysAddr to a DramAddr. Sections
446 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
447 * These parts of the documentation are unclear. I interpret them as follows:
448 *
449 * When node n receives a SysAddr, it processes the SysAddr as follows:
450 *
451 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
452 * Limit registers for node n. If the SysAddr is not within the range
453 * specified by the base and limit values, then node n ignores the Sysaddr
454 * (since it does not map to node n). Otherwise continue to step 2 below.
455 *
456 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
457 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
458 * the range of relocated addresses (starting at 0x100000000) from the DRAM
459 * hole. If not, skip to step 3 below. Else get the value of the
460 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
461 * offset defined by this value from the SysAddr.
462 *
463 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
464 * Base register for node n. To obtain the DramAddr, subtract the base
465 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
466 */
467static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
468{
469 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
470 int ret = 0;
471
472 dram_base = get_dram_base(mci);
473
474 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
475 &hole_size);
476 if (!ret) {
477 if ((sys_addr >= (1ull << 32)) &&
478 (sys_addr < ((1ull << 32) + hole_size))) {
479 /* use DHAR to translate SysAddr to DramAddr */
480 dram_addr = sys_addr - hole_offset;
481
482 debugf2("using DHAR to translate SysAddr 0x%lx to "
483 "DramAddr 0x%lx\n",
484 (unsigned long)sys_addr,
485 (unsigned long)dram_addr);
486
487 return dram_addr;
488 }
489 }
490
491 /*
492 * Translate the SysAddr to a DramAddr as shown near the start of
493 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
494 * only deals with 40-bit values. Therefore we discard bits 63-40 of
495 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
496 * discard are all 1s. Otherwise the bits we discard are all 0s. See
497 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
498 * Programmer's Manual Volume 1 Application Programming.
499 */
500 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
501
502 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
503 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
504 (unsigned long)dram_addr);
505 return dram_addr;
506}
507
508/*
509 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
510 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
511 * for node interleaving.
512 */
513static int num_node_interleave_bits(unsigned intlv_en)
514{
515 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
516 int n;
517
518 BUG_ON(intlv_en > 7);
519 n = intlv_shift_table[intlv_en];
520 return n;
521}
522
523/* Translate the DramAddr given by @dram_addr to an InputAddr. */
524static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
525{
526 struct amd64_pvt *pvt;
527 int intlv_shift;
528 u64 input_addr;
529
530 pvt = mci->pvt_info;
531
532 /*
533 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
534 * concerning translating a DramAddr to an InputAddr.
535 */
536 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
537 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
538 (dram_addr & 0xfff);
539
540 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
541 intlv_shift, (unsigned long)dram_addr,
542 (unsigned long)input_addr);
543
544 return input_addr;
545}
546
547/*
548 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
549 * assumed that @sys_addr maps to the node given by mci.
550 */
551static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
552{
553 u64 input_addr;
554
555 input_addr =
556 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
557
558 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
559 (unsigned long)sys_addr, (unsigned long)input_addr);
560
561 return input_addr;
562}
563
564
565/*
566 * @input_addr is an InputAddr associated with the node represented by mci.
567 * Translate @input_addr to a DramAddr and return the result.
568 */
569static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
570{
571 struct amd64_pvt *pvt;
572 int node_id, intlv_shift;
573 u64 bits, dram_addr;
574 u32 intlv_sel;
575
576 /*
577 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
578 * shows how to translate a DramAddr to an InputAddr. Here we reverse
579 * this procedure. When translating from a DramAddr to an InputAddr, the
580 * bits used for node interleaving are discarded. Here we recover these
581 * bits from the IntlvSel field of the DRAM Limit register (section
582 * 3.4.4.2) for the node that input_addr is associated with.
583 */
584 pvt = mci->pvt_info;
585 node_id = pvt->mc_node_id;
586 BUG_ON((node_id < 0) || (node_id > 7));
587
588 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
589
590 if (intlv_shift == 0) {
591 debugf1(" InputAddr 0x%lx translates to DramAddr of "
592 "same value\n", (unsigned long)input_addr);
593
594 return input_addr;
595 }
596
597 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
598 (input_addr & 0xfff);
599
600 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
601 dram_addr = bits + (intlv_sel << 12);
602
603 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
604 "(%d node interleave bits)\n", (unsigned long)input_addr,
605 (unsigned long)dram_addr, intlv_shift);
606
607 return dram_addr;
608}
609
610/*
611 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
612 * @dram_addr to a SysAddr.
613 */
614static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
615{
616 struct amd64_pvt *pvt = mci->pvt_info;
617 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
618 int ret = 0;
619
620 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
621 &hole_size);
622 if (!ret) {
623 if ((dram_addr >= hole_base) &&
624 (dram_addr < (hole_base + hole_size))) {
625 sys_addr = dram_addr + hole_offset;
626
627 debugf1("using DHAR to translate DramAddr 0x%lx to "
628 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
629 (unsigned long)sys_addr);
630
631 return sys_addr;
632 }
633 }
634
635 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
636 sys_addr = dram_addr + base;
637
638 /*
639 * The sys_addr we have computed up to this point is a 40-bit value
640 * because the k8 deals with 40-bit values. However, the value we are
641 * supposed to return is a full 64-bit physical address. The AMD
642 * x86-64 architecture specifies that the most significant implemented
643 * address bit through bit 63 of a physical address must be either all
644 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
645 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
646 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
647 * Programming.
648 */
649 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
650
651 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
652 pvt->mc_node_id, (unsigned long)dram_addr,
653 (unsigned long)sys_addr);
654
655 return sys_addr;
656}
657
658/*
659 * @input_addr is an InputAddr associated with the node given by mci. Translate
660 * @input_addr to a SysAddr.
661 */
662static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
663 u64 input_addr)
664{
665 return dram_addr_to_sys_addr(mci,
666 input_addr_to_dram_addr(mci, input_addr));
667}
668
669/*
670 * Find the minimum and maximum InputAddr values that map to the given @csrow.
671 * Pass back these values in *input_addr_min and *input_addr_max.
672 */
673static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
674 u64 *input_addr_min, u64 *input_addr_max)
675{
676 struct amd64_pvt *pvt;
677 u64 base, mask;
678
679 pvt = mci->pvt_info;
680 BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT));
681
682 base = base_from_dct_base(pvt, csrow);
683 mask = mask_from_dct_mask(pvt, csrow);
684
685 *input_addr_min = base & ~mask;
686 *input_addr_max = base | mask | pvt->dcs_mask_notused;
687}
688
689/*
690 * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
691 * Address High (section 3.6.4.6) register values and return the result. Address
692 * is located in the info structure (nbeah and nbeal), the encoding is device
693 * specific.
694 */
695static u64 extract_error_address(struct mem_ctl_info *mci,
696 struct amd64_error_info_regs *info)
697{
698 struct amd64_pvt *pvt = mci->pvt_info;
699
700 return pvt->ops->get_error_address(mci, info);
701}
702
703
704/* Map the Error address to a PAGE and PAGE OFFSET. */
705static inline void error_address_to_page_and_offset(u64 error_address,
706 u32 *page, u32 *offset)
707{
708 *page = (u32) (error_address >> PAGE_SHIFT);
709 *offset = ((u32) error_address) & ~PAGE_MASK;
710}
711
712/*
713 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
714 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
715 * of a node that detected an ECC memory error. mci represents the node that
716 * the error address maps to (possibly different from the node that detected
717 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
718 * error.
719 */
720static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
721{
722 int csrow;
723
724 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
725
726 if (csrow == -1)
727 amd64_mc_printk(mci, KERN_ERR,
728 "Failed to translate InputAddr to csrow for "
729 "address 0x%lx\n", (unsigned long)sys_addr);
730 return csrow;
731}
732
733static int get_channel_from_ecc_syndrome(unsigned short syndrome);
734
735static void amd64_cpu_display_info(struct amd64_pvt *pvt)
736{
737 if (boot_cpu_data.x86 == 0x11)
738 edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
739 else if (boot_cpu_data.x86 == 0x10)
740 edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
741 else if (boot_cpu_data.x86 == 0xf)
742 edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
743 (pvt->ext_model >= OPTERON_CPU_REV_F) ?
744 "Rev F or later" : "Rev E or earlier");
745 else
746 /* we'll hardly ever ever get here */
747 edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
748}
749
750/*
751 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
752 * are ECC capable.
753 */
754static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
755{
756 int bit;
757 enum dev_type edac_cap = EDAC_NONE;
758
759 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
760 ? 19
761 : 17;
762
763 if (pvt->dclr0 >> BIT(bit))
764 edac_cap = EDAC_FLAG_SECDED;
765
766 return edac_cap;
767}
768
769
770static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
771 int ganged);
772
773/* Display and decode various NB registers for debug purposes. */
774static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
775{
776 int ganged;
777
778 debugf1(" nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n",
779 pvt->nbcap,
780 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "True" : "False",
781 (pvt->nbcap & K8_NBCAP_DUAL_NODE) ? "True" : "False",
782 (pvt->nbcap & K8_NBCAP_8_NODE) ? "True" : "False");
783 debugf1(" ECC Capable=%s ChipKill Capable=%s\n",
784 (pvt->nbcap & K8_NBCAP_SECDED) ? "True" : "False",
785 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "True" : "False");
786 debugf1(" DramCfg0-low=0x%08x DIMM-ECC=%s Parity=%s Width=%s\n",
787 pvt->dclr0,
788 (pvt->dclr0 & BIT(19)) ? "Enabled" : "Disabled",
789 (pvt->dclr0 & BIT(8)) ? "Enabled" : "Disabled",
790 (pvt->dclr0 & BIT(11)) ? "128b" : "64b");
791 debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s DIMM Type=%s\n",
792 (pvt->dclr0 & BIT(12)) ? "Y" : "N",
793 (pvt->dclr0 & BIT(13)) ? "Y" : "N",
794 (pvt->dclr0 & BIT(14)) ? "Y" : "N",
795 (pvt->dclr0 & BIT(15)) ? "Y" : "N",
796 (pvt->dclr0 & BIT(16)) ? "UN-Buffered" : "Buffered");
797
798
799 debugf1(" online-spare: 0x%8.08x\n", pvt->online_spare);
800
801 if (boot_cpu_data.x86 == 0xf) {
802 debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
803 pvt->dhar, dhar_base(pvt->dhar),
804 k8_dhar_offset(pvt->dhar));
805 debugf1(" DramHoleValid=%s\n",
806 (pvt->dhar & DHAR_VALID) ? "True" : "False");
807
808 debugf1(" dbam-dkt: 0x%8.08x\n", pvt->dbam0);
809
810 /* everything below this point is Fam10h and above */
811 return;
812
813 } else {
814 debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
815 pvt->dhar, dhar_base(pvt->dhar),
816 f10_dhar_offset(pvt->dhar));
817 debugf1(" DramMemHoistValid=%s DramHoleValid=%s\n",
818 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) ?
819 "True" : "False",
820 (pvt->dhar & DHAR_VALID) ?
821 "True" : "False");
822 }
823
824 /* Only if NOT ganged does dcl1 have valid info */
825 if (!dct_ganging_enabled(pvt)) {
826 debugf1(" DramCfg1-low=0x%08x DIMM-ECC=%s Parity=%s "
827 "Width=%s\n", pvt->dclr1,
828 (pvt->dclr1 & BIT(19)) ? "Enabled" : "Disabled",
829 (pvt->dclr1 & BIT(8)) ? "Enabled" : "Disabled",
830 (pvt->dclr1 & BIT(11)) ? "128b" : "64b");
831 debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s "
832 "DIMM Type=%s\n",
833 (pvt->dclr1 & BIT(12)) ? "Y" : "N",
834 (pvt->dclr1 & BIT(13)) ? "Y" : "N",
835 (pvt->dclr1 & BIT(14)) ? "Y" : "N",
836 (pvt->dclr1 & BIT(15)) ? "Y" : "N",
837 (pvt->dclr1 & BIT(16)) ? "UN-Buffered" : "Buffered");
838 }
839
840 /*
841 * Determine if ganged and then dump memory sizes for first controller,
842 * and if NOT ganged dump info for 2nd controller.
843 */
844 ganged = dct_ganging_enabled(pvt);
845
846 f10_debug_display_dimm_sizes(0, pvt, ganged);
847
848 if (!ganged)
849 f10_debug_display_dimm_sizes(1, pvt, ganged);
850}
851
852/* Read in both of DBAM registers */
853static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
854{
855 int err = 0;
856 unsigned int reg;
857
858 reg = DBAM0;
859 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam0);
860 if (err)
861 goto err_reg;
862
863 if (boot_cpu_data.x86 >= 0x10) {
864 reg = DBAM1;
865 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam1);
866
867 if (err)
868 goto err_reg;
869 }
870
871err_reg:
872 debugf0("Error reading F2x%03x.\n", reg);
873}
874
875/*
876 * NOTE: CPU Revision Dependent code: Rev E and Rev F
877 *
878 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
879 * set the shift factor for the DCSB and DCSM values.
880 *
881 * ->dcs_mask_notused, RevE:
882 *
883 * To find the max InputAddr for the csrow, start with the base address and set
884 * all bits that are "don't care" bits in the test at the start of section
885 * 3.5.4 (p. 84).
886 *
887 * The "don't care" bits are all set bits in the mask and all bits in the gaps
888 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
889 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
890 * gaps.
891 *
892 * ->dcs_mask_notused, RevF and later:
893 *
894 * To find the max InputAddr for the csrow, start with the base address and set
895 * all bits that are "don't care" bits in the test at the start of NPT section
896 * 4.5.4 (p. 87).
897 *
898 * The "don't care" bits are all set bits in the mask and all bits in the gaps
899 * between bit ranges [36:27] and [21:13].
900 *
901 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
902 * which are all bits in the above-mentioned gaps.
903 */
904static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
905{
906 if (pvt->ext_model >= OPTERON_CPU_REV_F) {
907 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
908 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
909 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
910 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
911
912 switch (boot_cpu_data.x86) {
913 case 0xf:
914 pvt->num_dcsm = REV_F_DCSM_COUNT;
915 break;
916
917 case 0x10:
918 pvt->num_dcsm = F10_DCSM_COUNT;
919 break;
920
921 case 0x11:
922 pvt->num_dcsm = F11_DCSM_COUNT;
923 break;
924
925 default:
926 amd64_printk(KERN_ERR, "Unsupported family!\n");
927 break;
928 }
929 } else {
930 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
931 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
932 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
933 pvt->dcs_shift = REV_E_DCS_SHIFT;
934 pvt->num_dcsm = REV_E_DCSM_COUNT;
935 }
936}
937
938/*
939 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
940 */
941static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
942{
943 int cs, reg, err = 0;
944
945 amd64_set_dct_base_and_mask(pvt);
946
947 for (cs = 0; cs < CHIPSELECT_COUNT; cs++) {
948 reg = K8_DCSB0 + (cs * 4);
949 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
950 &pvt->dcsb0[cs]);
951 if (unlikely(err))
952 debugf0("Reading K8_DCSB0[%d] failed\n", cs);
953 else
954 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
955 cs, pvt->dcsb0[cs], reg);
956
957 /* If DCT are NOT ganged, then read in DCT1's base */
958 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
959 reg = F10_DCSB1 + (cs * 4);
960 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
961 &pvt->dcsb1[cs]);
962 if (unlikely(err))
963 debugf0("Reading F10_DCSB1[%d] failed\n", cs);
964 else
965 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
966 cs, pvt->dcsb1[cs], reg);
967 } else {
968 pvt->dcsb1[cs] = 0;
969 }
970 }
971
972 for (cs = 0; cs < pvt->num_dcsm; cs++) {
973 reg = K8_DCSB0 + (cs * 4);
974 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
975 &pvt->dcsm0[cs]);
976 if (unlikely(err))
977 debugf0("Reading K8_DCSM0 failed\n");
978 else
979 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
980 cs, pvt->dcsm0[cs], reg);
981
982 /* If DCT are NOT ganged, then read in DCT1's mask */
983 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
984 reg = F10_DCSM1 + (cs * 4);
985 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
986 &pvt->dcsm1[cs]);
987 if (unlikely(err))
988 debugf0("Reading F10_DCSM1[%d] failed\n", cs);
989 else
990 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
991 cs, pvt->dcsm1[cs], reg);
992 } else
993 pvt->dcsm1[cs] = 0;
994 }
995}
996
997static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
998{
999 enum mem_type type;
1000
1001 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) {
1002 /* Rev F and later */
1003 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1004 } else {
1005 /* Rev E and earlier */
1006 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1007 }
1008
1009 debugf1(" Memory type is: %s\n",
1010 (type == MEM_DDR2) ? "MEM_DDR2" :
1011 (type == MEM_RDDR2) ? "MEM_RDDR2" :
1012 (type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
1013
1014 return type;
1015}
1016
1017/*
1018 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
1019 * and the later RevF memory controllers (DDR vs DDR2)
1020 *
1021 * Return:
1022 * number of memory channels in operation
1023 * Pass back:
1024 * contents of the DCL0_LOW register
1025 */
1026static int k8_early_channel_count(struct amd64_pvt *pvt)
1027{
1028 int flag, err = 0;
1029
1030 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1031 if (err)
1032 return err;
1033
1034 if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) {
1035 /* RevF (NPT) and later */
1036 flag = pvt->dclr0 & F10_WIDTH_128;
1037 } else {
1038 /* RevE and earlier */
1039 flag = pvt->dclr0 & REVE_WIDTH_128;
1040 }
1041
1042 /* not used */
1043 pvt->dclr1 = 0;
1044
1045 return (flag) ? 2 : 1;
1046}
1047
1048/* extract the ERROR ADDRESS for the K8 CPUs */
1049static u64 k8_get_error_address(struct mem_ctl_info *mci,
1050 struct amd64_error_info_regs *info)
1051{
1052 return (((u64) (info->nbeah & 0xff)) << 32) +
1053 (info->nbeal & ~0x03);
1054}
1055
1056/*
1057 * Read the Base and Limit registers for K8 based Memory controllers; extract
1058 * fields from the 'raw' reg into separate data fields
1059 *
1060 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1061 */
1062static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1063{
1064 u32 low;
1065 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1066 int err;
1067
1068 err = pci_read_config_dword(pvt->addr_f1_ctl,
1069 K8_DRAM_BASE_LOW + off, &low);
1070 if (err)
1071 debugf0("Reading K8_DRAM_BASE_LOW failed\n");
1072
1073 /* Extract parts into separate data entries */
1074 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1075 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1076 pvt->dram_rw_en[dram] = (low & 0x3);
1077
1078 err = pci_read_config_dword(pvt->addr_f1_ctl,
1079 K8_DRAM_LIMIT_LOW + off, &low);
1080 if (err)
1081 debugf0("Reading K8_DRAM_LIMIT_LOW failed\n");
1082
1083 /*
1084 * Extract parts into separate data entries. Limit is the HIGHEST memory
1085 * location of the region, so lower 24 bits need to be all ones
1086 */
1087 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1088 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1089 pvt->dram_DstNode[dram] = (low & 0x7);
1090}
1091
1092static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1093 struct amd64_error_info_regs *info,
1094 u64 SystemAddress)
1095{
1096 struct mem_ctl_info *src_mci;
1097 unsigned short syndrome;
1098 int channel, csrow;
1099 u32 page, offset;
1100
1101 /* Extract the syndrome parts and form a 16-bit syndrome */
1102 syndrome = EXTRACT_HIGH_SYNDROME(info->nbsl) << 8;
1103 syndrome |= EXTRACT_LOW_SYNDROME(info->nbsh);
1104
1105 /* CHIPKILL enabled */
1106 if (info->nbcfg & K8_NBCFG_CHIPKILL) {
1107 channel = get_channel_from_ecc_syndrome(syndrome);
1108 if (channel < 0) {
1109 /*
1110 * Syndrome didn't map, so we don't know which of the
1111 * 2 DIMMs is in error. So we need to ID 'both' of them
1112 * as suspect.
1113 */
1114 amd64_mc_printk(mci, KERN_WARNING,
1115 "unknown syndrome 0x%x - possible error "
1116 "reporting race\n", syndrome);
1117 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1118 return;
1119 }
1120 } else {
1121 /*
1122 * non-chipkill ecc mode
1123 *
1124 * The k8 documentation is unclear about how to determine the
1125 * channel number when using non-chipkill memory. This method
1126 * was obtained from email communication with someone at AMD.
1127 * (Wish the email was placed in this comment - norsk)
1128 */
1129 channel = ((SystemAddress & BIT(3)) != 0);
1130 }
1131
1132 /*
1133 * Find out which node the error address belongs to. This may be
1134 * different from the node that detected the error.
1135 */
1136 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
1137 if (src_mci) {
1138 amd64_mc_printk(mci, KERN_ERR,
1139 "failed to map error address 0x%lx to a node\n",
1140 (unsigned long)SystemAddress);
1141 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1142 return;
1143 }
1144
1145 /* Now map the SystemAddress to a CSROW */
1146 csrow = sys_addr_to_csrow(src_mci, SystemAddress);
1147 if (csrow < 0) {
1148 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1149 } else {
1150 error_address_to_page_and_offset(SystemAddress, &page, &offset);
1151
1152 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1153 channel, EDAC_MOD_STR);
1154 }
1155}
1156
1157/*
1158 * determrine the number of PAGES in for this DIMM's size based on its DRAM
1159 * Address Mapping.
1160 *
1161 * First step is to calc the number of bits to shift a value of 1 left to
1162 * indicate show many pages. Start with the DBAM value as the starting bits,
1163 * then proceed to adjust those shift bits, based on CPU rev and the table.
1164 * See BKDG on the DBAM
1165 */
1166static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1167{
1168 int nr_pages;
1169
1170 if (pvt->ext_model >= OPTERON_CPU_REV_F) {
1171 nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
1172 } else {
1173 /*
1174 * RevE and less section; this line is tricky. It collapses the
1175 * table used by RevD and later to one that matches revisions CG
1176 * and earlier.
1177 */
1178 dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
1179 (dram_map > 8 ? 4 : (dram_map > 5 ?
1180 3 : (dram_map > 2 ? 1 : 0))) : 0;
1181
1182 /* 25 shift is 32MiB minimum DIMM size in RevE and prior */
1183 nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
1184 }
1185
1186 return nr_pages;
1187}
1188
1189/*
1190 * Get the number of DCT channels in use.
1191 *
1192 * Return:
1193 * number of Memory Channels in operation
1194 * Pass back:
1195 * contents of the DCL0_LOW register
1196 */
1197static int f10_early_channel_count(struct amd64_pvt *pvt)
1198{
1199 int err = 0, channels = 0;
1200 u32 dbam;
1201
1202 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1203 if (err)
1204 goto err_reg;
1205
1206 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
1207 if (err)
1208 goto err_reg;
1209
1210 /* If we are in 128 bit mode, then we are using 2 channels */
1211 if (pvt->dclr0 & F10_WIDTH_128) {
1212 debugf0("Data WIDTH is 128 bits - 2 channels\n");
1213 channels = 2;
1214 return channels;
1215 }
1216
1217 /*
1218 * Need to check if in UN-ganged mode: In such, there are 2 channels,
1219 * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit
1220 * will be OFF.
1221 *
1222 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1223 * their CSEnable bit on. If so, then SINGLE DIMM case.
1224 */
1225 debugf0("Data WIDTH is NOT 128 bits - need more decoding\n");
1226
1227 /*
1228 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1229 * is more than just one DIMM present in unganged mode. Need to check
1230 * both controllers since DIMMs can be placed in either one.
1231 */
1232 channels = 0;
1233 err = pci_read_config_dword(pvt->dram_f2_ctl, DBAM0, &dbam);
1234 if (err)
1235 goto err_reg;
1236
1237 if (DBAM_DIMM(0, dbam) > 0)
1238 channels++;
1239 if (DBAM_DIMM(1, dbam) > 0)
1240 channels++;
1241 if (DBAM_DIMM(2, dbam) > 0)
1242 channels++;
1243 if (DBAM_DIMM(3, dbam) > 0)
1244 channels++;
1245
1246 /* If more than 2 DIMMs are present, then we have 2 channels */
1247 if (channels > 2)
1248 channels = 2;
1249 else if (channels == 0) {
1250 /* No DIMMs on DCT0, so look at DCT1 */
1251 err = pci_read_config_dword(pvt->dram_f2_ctl, DBAM1, &dbam);
1252 if (err)
1253 goto err_reg;
1254
1255 if (DBAM_DIMM(0, dbam) > 0)
1256 channels++;
1257 if (DBAM_DIMM(1, dbam) > 0)
1258 channels++;
1259 if (DBAM_DIMM(2, dbam) > 0)
1260 channels++;
1261 if (DBAM_DIMM(3, dbam) > 0)
1262 channels++;
1263
1264 if (channels > 2)
1265 channels = 2;
1266 }
1267
1268 /* If we found ALL 0 values, then assume just ONE DIMM-ONE Channel */
1269 if (channels == 0)
1270 channels = 1;
1271
1272 debugf0("DIMM count= %d\n", channels);
1273
1274 return channels;
1275
1276err_reg:
1277 return -1;
1278
1279}
1280
1281static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1282{
1283 return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
1284}
1285
1286/* Enable extended configuration access via 0xCF8 feature */
1287static void amd64_setup(struct amd64_pvt *pvt)
1288{
1289 u32 reg;
1290
1291 pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1292
1293 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1294 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1295 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1296}
1297
1298/* Restore the extended configuration access via 0xCF8 feature */
1299static void amd64_teardown(struct amd64_pvt *pvt)
1300{
1301 u32 reg;
1302
1303 pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1304
1305 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1306 if (pvt->flags.cf8_extcfg)
1307 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1308 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1309}
1310
1311static u64 f10_get_error_address(struct mem_ctl_info *mci,
1312 struct amd64_error_info_regs *info)
1313{
1314 return (((u64) (info->nbeah & 0xffff)) << 32) +
1315 (info->nbeal & ~0x01);
1316}
1317
1318/*
1319 * Read the Base and Limit registers for F10 based Memory controllers. Extract
1320 * fields from the 'raw' reg into separate data fields.
1321 *
1322 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1323 */
1324static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1325{
1326 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1327
1328 low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1329 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1330
1331 /* read the 'raw' DRAM BASE Address register */
1332 pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base);
1333
1334 /* Read from the ECS data register */
1335 pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base);
1336
1337 /* Extract parts into separate data entries */
1338 pvt->dram_rw_en[dram] = (low_base & 0x3);
1339
1340 if (pvt->dram_rw_en[dram] == 0)
1341 return;
1342
1343 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1344
1345 pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) |
1346 ((u64) low_base & 0xFFFF0000))) << 8;
1347
1348 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1349 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1350
1351 /* read the 'raw' LIMIT registers */
1352 pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit);
1353
1354 /* Read from the ECS data register for the HIGH portion */
1355 pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit);
1356
1357 debugf0(" HW Regs: BASE=0x%08x-%08x LIMIT= 0x%08x-%08x\n",
1358 high_base, low_base, high_limit, low_limit);
1359
1360 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1361 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1362
1363 /*
1364 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1365 * memory location of the region, so low 24 bits need to be all ones.
1366 */
1367 low_limit |= 0x0000FFFF;
1368 pvt->dram_limit[dram] =
1369 ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF);
1370}
1371
1372static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1373{
1374 int err = 0;
1375
1376 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
1377 &pvt->dram_ctl_select_low);
1378 if (err) {
1379 debugf0("Reading F10_DCTL_SEL_LOW failed\n");
1380 } else {
1381 debugf0("DRAM_DCTL_SEL_LOW=0x%x DctSelBaseAddr=0x%x\n",
1382 pvt->dram_ctl_select_low, dct_sel_baseaddr(pvt));
1383
1384 debugf0(" DRAM DCTs are=%s DRAM Is=%s DRAM-Ctl-"
1385 "sel-hi-range=%s\n",
1386 (dct_ganging_enabled(pvt) ? "GANGED" : "NOT GANGED"),
1387 (dct_dram_enabled(pvt) ? "Enabled" : "Disabled"),
1388 (dct_high_range_enabled(pvt) ? "Enabled" : "Disabled"));
1389
1390 debugf0(" DctDatIntLv=%s MemCleared=%s DctSelIntLvAddr=0x%x\n",
1391 (dct_data_intlv_enabled(pvt) ? "Enabled" : "Disabled"),
1392 (dct_memory_cleared(pvt) ? "True " : "False "),
1393 dct_sel_interleave_addr(pvt));
1394 }
1395
1396 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
1397 &pvt->dram_ctl_select_high);
1398 if (err)
1399 debugf0("Reading F10_DCTL_SEL_HIGH failed\n");
1400}
1401
1402/*
1403 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1404 * Interleaving Modes.
1405 */
1406static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1407 int hi_range_sel, u32 intlv_en)
1408{
1409 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1410
1411 if (dct_ganging_enabled(pvt))
1412 cs = 0;
1413 else if (hi_range_sel)
1414 cs = dct_sel_high;
1415 else if (dct_interleave_enabled(pvt)) {
1416 /*
1417 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1418 */
1419 if (dct_sel_interleave_addr(pvt) == 0)
1420 cs = sys_addr >> 6 & 1;
1421 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1422 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1423
1424 if (dct_sel_interleave_addr(pvt) & 1)
1425 cs = (sys_addr >> 9 & 1) ^ temp;
1426 else
1427 cs = (sys_addr >> 6 & 1) ^ temp;
1428 } else if (intlv_en & 4)
1429 cs = sys_addr >> 15 & 1;
1430 else if (intlv_en & 2)
1431 cs = sys_addr >> 14 & 1;
1432 else if (intlv_en & 1)
1433 cs = sys_addr >> 13 & 1;
1434 else
1435 cs = sys_addr >> 12 & 1;
1436 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1437 cs = ~dct_sel_high & 1;
1438 else
1439 cs = 0;
1440
1441 return cs;
1442}
1443
1444static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1445{
1446 if (intlv_en == 1)
1447 return 1;
1448 else if (intlv_en == 3)
1449 return 2;
1450 else if (intlv_en == 7)
1451 return 3;
1452
1453 return 0;
1454}
1455
1456/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1457static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1458 u32 dct_sel_base_addr,
1459 u64 dct_sel_base_off,
1460 u32 hole_valid, u32 hole_off,
1461 u64 dram_base)
1462{
1463 u64 chan_off;
1464
1465 if (hi_range_sel) {
1466 if (!(dct_sel_base_addr & 0xFFFFF800) &&
1467 hole_valid && (sys_addr >= 0x100000000ULL))
1468 chan_off = hole_off << 16;
1469 else
1470 chan_off = dct_sel_base_off;
1471 } else {
1472 if (hole_valid && (sys_addr >= 0x100000000ULL))
1473 chan_off = hole_off << 16;
1474 else
1475 chan_off = dram_base & 0xFFFFF8000000ULL;
1476 }
1477
1478 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1479 (chan_off & 0x0000FFFFFF800000ULL);
1480}
1481
1482/* Hack for the time being - Can we get this from BIOS?? */
1483#define CH0SPARE_RANK 0
1484#define CH1SPARE_RANK 1
1485
1486/*
1487 * checks if the csrow passed in is marked as SPARED, if so returns the new
1488 * spare row
1489 */
1490static inline int f10_process_possible_spare(int csrow,
1491 u32 cs, struct amd64_pvt *pvt)
1492{
1493 u32 swap_done;
1494 u32 bad_dram_cs;
1495
1496 /* Depending on channel, isolate respective SPARING info */
1497 if (cs) {
1498 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1499 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1500 if (swap_done && (csrow == bad_dram_cs))
1501 csrow = CH1SPARE_RANK;
1502 } else {
1503 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1504 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1505 if (swap_done && (csrow == bad_dram_cs))
1506 csrow = CH0SPARE_RANK;
1507 }
1508 return csrow;
1509}
1510
1511/*
1512 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1513 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1514 *
1515 * Return:
1516 * -EINVAL: NOT FOUND
1517 * 0..csrow = Chip-Select Row
1518 */
1519static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1520{
1521 struct mem_ctl_info *mci;
1522 struct amd64_pvt *pvt;
1523 u32 cs_base, cs_mask;
1524 int cs_found = -EINVAL;
1525 int csrow;
1526
1527 mci = mci_lookup[nid];
1528 if (!mci)
1529 return cs_found;
1530
1531 pvt = mci->pvt_info;
1532
1533 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1534
1535 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
1536
1537 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1538 if (!(cs_base & K8_DCSB_CS_ENABLE))
1539 continue;
1540
1541 /*
1542 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1543 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1544 * of the actual address.
1545 */
1546 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1547
1548 /*
1549 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1550 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1551 */
1552 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1553
1554 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1555 csrow, cs_base, cs_mask);
1556
1557 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1558
1559 debugf1(" Final CSMask=0x%x\n", cs_mask);
1560 debugf1(" (InputAddr & ~CSMask)=0x%x "
1561 "(CSBase & ~CSMask)=0x%x\n",
1562 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1563
1564 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1565 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1566
1567 debugf1(" MATCH csrow=%d\n", cs_found);
1568 break;
1569 }
1570 }
1571 return cs_found;
1572}
1573
1574/* For a given @dram_range, check if @sys_addr falls within it. */
1575static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1576 u64 sys_addr, int *nid, int *chan_sel)
1577{
1578 int node_id, cs_found = -EINVAL, high_range = 0;
1579 u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1580 u32 hole_valid, tmp, dct_sel_base, channel;
1581 u64 dram_base, chan_addr, dct_sel_base_off;
1582
1583 dram_base = pvt->dram_base[dram_range];
1584 intlv_en = pvt->dram_IntlvEn[dram_range];
1585
1586 node_id = pvt->dram_DstNode[dram_range];
1587 intlv_sel = pvt->dram_IntlvSel[dram_range];
1588
1589 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1590 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1591
1592 /*
1593 * This assumes that one node's DHAR is the same as all the other
1594 * nodes' DHAR.
1595 */
1596 hole_off = (pvt->dhar & 0x0000FF80);
1597 hole_valid = (pvt->dhar & 0x1);
1598 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1599
1600 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1601 hole_off, hole_valid, intlv_sel);
1602
1603 if (intlv_en ||
1604 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1605 return -EINVAL;
1606
1607 dct_sel_base = dct_sel_baseaddr(pvt);
1608
1609 /*
1610 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1611 * select between DCT0 and DCT1.
1612 */
1613 if (dct_high_range_enabled(pvt) &&
1614 !dct_ganging_enabled(pvt) &&
1615 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1616 high_range = 1;
1617
1618 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1619
1620 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1621 dct_sel_base_off, hole_valid,
1622 hole_off, dram_base);
1623
1624 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1625
1626 /* remove Node ID (in case of memory interleaving) */
1627 tmp = chan_addr & 0xFC0;
1628
1629 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1630
1631 /* remove channel interleave and hash */
1632 if (dct_interleave_enabled(pvt) &&
1633 !dct_high_range_enabled(pvt) &&
1634 !dct_ganging_enabled(pvt)) {
1635 if (dct_sel_interleave_addr(pvt) != 1)
1636 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1637 else {
1638 tmp = chan_addr & 0xFC0;
1639 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1640 | tmp;
1641 }
1642 }
1643
1644 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1645 chan_addr, (u32)(chan_addr >> 8));
1646
1647 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1648
1649 if (cs_found >= 0) {
1650 *nid = node_id;
1651 *chan_sel = channel;
1652 }
1653 return cs_found;
1654}
1655
1656static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1657 int *node, int *chan_sel)
1658{
1659 int dram_range, cs_found = -EINVAL;
1660 u64 dram_base, dram_limit;
1661
1662 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1663
1664 if (!pvt->dram_rw_en[dram_range])
1665 continue;
1666
1667 dram_base = pvt->dram_base[dram_range];
1668 dram_limit = pvt->dram_limit[dram_range];
1669
1670 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1671
1672 cs_found = f10_match_to_this_node(pvt, dram_range,
1673 sys_addr, node,
1674 chan_sel);
1675 if (cs_found >= 0)
1676 break;
1677 }
1678 }
1679 return cs_found;
1680}
1681
1682/*
1683 * This the F10h reference code from AMD to map a @sys_addr to NodeID,
1684 * CSROW, Channel.
1685 *
1686 * The @sys_addr is usually an error address received from the hardware.
1687 */
1688static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1689 struct amd64_error_info_regs *info,
1690 u64 sys_addr)
1691{
1692 struct amd64_pvt *pvt = mci->pvt_info;
1693 u32 page, offset;
1694 unsigned short syndrome;
1695 int nid, csrow, chan = 0;
1696
1697 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1698
1699 if (csrow >= 0) {
1700 error_address_to_page_and_offset(sys_addr, &page, &offset);
1701
1702 syndrome = EXTRACT_HIGH_SYNDROME(info->nbsl) << 8;
1703 syndrome |= EXTRACT_LOW_SYNDROME(info->nbsh);
1704
1705 /*
1706 * Is CHIPKILL on? If so, then we can attempt to use the
1707 * syndrome to isolate which channel the error was on.
1708 */
1709 if (pvt->nbcfg & K8_NBCFG_CHIPKILL)
1710 chan = get_channel_from_ecc_syndrome(syndrome);
1711
1712 if (chan >= 0) {
1713 edac_mc_handle_ce(mci, page, offset, syndrome,
1714 csrow, chan, EDAC_MOD_STR);
1715 } else {
1716 /*
1717 * Channel unknown, report all channels on this
1718 * CSROW as failed.
1719 */
1720 for (chan = 0; chan < mci->csrows[csrow].nr_channels;
1721 chan++) {
1722 edac_mc_handle_ce(mci, page, offset,
1723 syndrome,
1724 csrow, chan,
1725 EDAC_MOD_STR);
1726 }
1727 }
1728
1729 } else {
1730 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1731 }
1732}
1733
1734/*
1735 * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
1736 * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
1737 * indicates an empty DIMM slot, as reported by Hardware on empty slots.
1738 *
1739 * Normalize to 128MB by subracting 27 bit shift.
1740 */
1741static int map_dbam_to_csrow_size(int index)
1742{
1743 int mega_bytes = 0;
1744
1745 if (index > 0 && index <= DBAM_MAX_VALUE)
1746 mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27)));
1747
1748 return mega_bytes;
1749}
1750
1751/*
1752 * debug routine to display the memory sizes of a DIMM (ganged or not) and it
1753 * CSROWs as well
1754 */
1755static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
1756 int ganged)
1757{
1758 int dimm, size0, size1;
1759 u32 dbam;
1760 u32 *dcsb;
1761
1762 debugf1(" dbam%d: 0x%8.08x CSROW is %s\n", ctrl,
1763 ctrl ? pvt->dbam1 : pvt->dbam0,
1764 ganged ? "GANGED - dbam1 not used" : "NON-GANGED");
1765
1766 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1767 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1768
1769 /* Dump memory sizes for DIMM and its CSROWs */
1770 for (dimm = 0; dimm < 4; dimm++) {
1771
1772 size0 = 0;
1773 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1774 size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
1775
1776 size1 = 0;
1777 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1778 size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
1779
1780 debugf1(" CTRL-%d DIMM-%d=%5dMB CSROW-%d=%5dMB "
1781 "CSROW-%d=%5dMB\n",
1782 ctrl,
1783 dimm,
1784 size0 + size1,
1785 dimm * 2,
1786 size0,
1787 dimm * 2 + 1,
1788 size1);
1789 }
1790}
1791
1792/*
1793 * Very early hardware probe on pci_probe thread to determine if this module
1794 * supports the hardware.
1795 *
1796 * Return:
1797 * 0 for OK
1798 * 1 for error
1799 */
1800static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
1801{
1802 int ret = 0;
1803
1804 /*
1805 * If we are on a DDR3 machine, we don't know yet if
1806 * we support that properly at this time
1807 */
1808 if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) ||
1809 (pvt->dchr1 & F10_DCHR_Ddr3Mode)) {
1810
1811 amd64_printk(KERN_WARNING,
1812 "%s() This machine is running with DDR3 memory. "
1813 "This is not currently supported. "
1814 "DCHR0=0x%x DCHR1=0x%x\n",
1815 __func__, pvt->dchr0, pvt->dchr1);
1816
1817 amd64_printk(KERN_WARNING,
1818 " Contact '%s' module MAINTAINER to help add"
1819 " support.\n",
1820 EDAC_MOD_STR);
1821
1822 ret = 1;
1823
1824 }
1825 return ret;
1826}
1827
1828/*
1829 * There currently are 3 types type of MC devices for AMD Athlon/Opterons
1830 * (as per PCI DEVICE_IDs):
1831 *
1832 * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI
1833 * DEVICE ID, even though there is differences between the different Revisions
1834 * (CG,D,E,F).
1835 *
1836 * Family F10h and F11h.
1837 *
1838 */
1839static struct amd64_family_type amd64_family_types[] = {
1840 [K8_CPUS] = {
1841 .ctl_name = "RevF",
1842 .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1843 .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1844 .ops = {
1845 .early_channel_count = k8_early_channel_count,
1846 .get_error_address = k8_get_error_address,
1847 .read_dram_base_limit = k8_read_dram_base_limit,
1848 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1849 .dbam_map_to_pages = k8_dbam_map_to_pages,
1850 }
1851 },
1852 [F10_CPUS] = {
1853 .ctl_name = "Family 10h",
1854 .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1855 .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1856 .ops = {
1857 .probe_valid_hardware = f10_probe_valid_hardware,
1858 .early_channel_count = f10_early_channel_count,
1859 .get_error_address = f10_get_error_address,
1860 .read_dram_base_limit = f10_read_dram_base_limit,
1861 .read_dram_ctl_register = f10_read_dram_ctl_register,
1862 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1863 .dbam_map_to_pages = f10_dbam_map_to_pages,
1864 }
1865 },
1866 [F11_CPUS] = {
1867 .ctl_name = "Family 11h",
1868 .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
1869 .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
1870 .ops = {
1871 .probe_valid_hardware = f10_probe_valid_hardware,
1872 .early_channel_count = f10_early_channel_count,
1873 .get_error_address = f10_get_error_address,
1874 .read_dram_base_limit = f10_read_dram_base_limit,
1875 .read_dram_ctl_register = f10_read_dram_ctl_register,
1876 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1877 .dbam_map_to_pages = f10_dbam_map_to_pages,
1878 }
1879 },
1880};
1881
1882static struct pci_dev *pci_get_related_function(unsigned int vendor,
1883 unsigned int device,
1884 struct pci_dev *related)
1885{
1886 struct pci_dev *dev = NULL;
1887
1888 dev = pci_get_device(vendor, device, dev);
1889 while (dev) {
1890 if ((dev->bus->number == related->bus->number) &&
1891 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1892 break;
1893 dev = pci_get_device(vendor, device, dev);
1894 }
1895
1896 return dev;
1897}
1898
1899/*
1900 * syndrome mapping table for ECC ChipKill devices
1901 *
1902 * The comment in each row is the token (nibble) number that is in error.
1903 * The least significant nibble of the syndrome is the mask for the bits
1904 * that are in error (need to be toggled) for the particular nibble.
1905 *
1906 * Each row contains 16 entries.
1907 * The first entry (0th) is the channel number for that row of syndromes.
1908 * The remaining 15 entries are the syndromes for the respective Error
1909 * bit mask index.
1910 *
1911 * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the
1912 * bit in error.
1913 * The 2nd index entry is 0x0010 that the second bit is damaged.
1914 * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits
1915 * are damaged.
1916 * Thus so on until index 15, 0x1111, whose entry has the syndrome
1917 * indicating that all 4 bits are damaged.
1918 *
1919 * A search is performed on this table looking for a given syndrome.
1920 *
1921 * See the AMD documentation for ECC syndromes. This ECC table is valid
1922 * across all the versions of the AMD64 processors.
1923 *
1924 * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a
1925 * COLUMN index, then search all ROWS of that column, looking for a match
1926 * with the input syndrome. The ROW value will be the token number.
1927 *
1928 * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this
1929 * error.
1930 */
1931#define NUMBER_ECC_ROWS 36
1932static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = {
1933 /* Channel 0 syndromes */
1934 {/*0*/ 0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57,
1935 0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df },
1936 {/*1*/ 0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7,
1937 0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f },
1938 {/*2*/ 0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
1939 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f },
1940 {/*3*/ 0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057,
1941 0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df },
1942 {/*4*/ 0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097,
1943 0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f },
1944 {/*5*/ 0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857,
1945 0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf },
1946 {/*6*/ 0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467,
1947 0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f },
1948 {/*7*/ 0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27,
1949 0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff },
1950 {/*8*/ 0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177,
1951 0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f },
1952 {/*9*/ 0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07,
1953 0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f },
1954 {/*a*/ 0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07,
1955 0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f },
1956 {/*b*/ 0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7,
1957 0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f },
1958 {/*c*/ 0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87,
1959 0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f },
1960 {/*d*/ 0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067,
1961 0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f },
1962 {/*e*/ 0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77,
1963 0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f },
1964 {/*f*/ 0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77,
1965 0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f },
1966
1967 /* Channel 1 syndromes */
1968 {/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187,
1969 0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f },
1970 {/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627,
1971 0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff },
1972 {/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97,
1973 0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f },
1974 {/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97,
1975 0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f },
1976 {/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987,
1977 0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f },
1978 {/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677,
1979 0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f },
1980 {/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387,
1981 0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f },
1982 {/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17,
1983 0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf },
1984 {/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7,
1985 0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f },
1986 {/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397,
1987 0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f },
1988 {/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617,
1989 0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf },
1990 {/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527,
1991 0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff },
1992 {/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7,
1993 0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef },
1994 {/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7,
1995 0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def },
1996 {/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67,
1997 0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f },
1998 {/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377,
1999 0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f },
2000
2001 /* ECC bits are also in the set of tokens and they too can go bad
2002 * first 2 cover channel 0, while the second 2 cover channel 1
2003 */
2004 {/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807,
2005 0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f },
2006 {/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07,
2007 0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f },
2008 {/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97,
2009 0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f },
2010 {/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757,
2011 0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df }
2012};
2013
2014/*
2015 * Given the syndrome argument, scan each of the channel tables for a syndrome
2016 * match. Depending on which table it is found, return the channel number.
2017 */
2018static int get_channel_from_ecc_syndrome(unsigned short syndrome)
2019{
2020 int row;
2021 int column;
2022
2023 /* Determine column to scan */
2024 column = syndrome & 0xF;
2025
2026 /* Scan all rows, looking for syndrome, or end of table */
2027 for (row = 0; row < NUMBER_ECC_ROWS; row++) {
2028 if (ecc_chipkill_syndromes[row][column] == syndrome)
2029 return ecc_chipkill_syndromes[row][0];
2030 }
2031
2032 debugf0("syndrome(%x) not found\n", syndrome);
2033 return -1;
2034}
2035
2036/*
2037 * Check for valid error in the NB Status High register. If so, proceed to read
2038 * NB Status Low, NB Address Low and NB Address High registers and store data
2039 * into error structure.
2040 *
2041 * Returns:
2042 * - 1: if hardware regs contains valid error info
2043 * - 0: if no valid error is indicated
2044 */
2045static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
2046 struct amd64_error_info_regs *regs)
2047{
2048 struct amd64_pvt *pvt;
2049 struct pci_dev *misc_f3_ctl;
2050 int err = 0;
2051
2052 pvt = mci->pvt_info;
2053 misc_f3_ctl = pvt->misc_f3_ctl;
2054
2055 err = pci_read_config_dword(misc_f3_ctl, K8_NBSH, &regs->nbsh);
2056 if (err)
2057 goto err_reg;
2058
2059 if (!(regs->nbsh & K8_NBSH_VALID_BIT))
2060 return 0;
2061
2062 /* valid error, read remaining error information registers */
2063 err = pci_read_config_dword(misc_f3_ctl, K8_NBSL, &regs->nbsl);
2064 if (err)
2065 goto err_reg;
2066
2067 err = pci_read_config_dword(misc_f3_ctl, K8_NBEAL, &regs->nbeal);
2068 if (err)
2069 goto err_reg;
2070
2071 err = pci_read_config_dword(misc_f3_ctl, K8_NBEAH, &regs->nbeah);
2072 if (err)
2073 goto err_reg;
2074
2075 err = pci_read_config_dword(misc_f3_ctl, K8_NBCFG, &regs->nbcfg);
2076 if (err)
2077 goto err_reg;
2078
2079 return 1;
2080
2081err_reg:
2082 debugf0("Reading error info register failed\n");
2083 return 0;
2084}
2085
2086/*
2087 * This function is called to retrieve the error data from hardware and store it
2088 * in the info structure.
2089 *
2090 * Returns:
2091 * - 1: if a valid error is found
2092 * - 0: if no error is found
2093 */
2094static int amd64_get_error_info(struct mem_ctl_info *mci,
2095 struct amd64_error_info_regs *info)
2096{
2097 struct amd64_pvt *pvt;
2098 struct amd64_error_info_regs regs;
2099
2100 pvt = mci->pvt_info;
2101
2102 if (!amd64_get_error_info_regs(mci, info))
2103 return 0;
2104
2105 /*
2106 * Here's the problem with the K8's EDAC reporting: There are four
2107 * registers which report pieces of error information. They are shared
2108 * between CEs and UEs. Furthermore, contrary to what is stated in the
2109 * BKDG, the overflow bit is never used! Every error always updates the
2110 * reporting registers.
2111 *
2112 * Can you see the race condition? All four error reporting registers
2113 * must be read before a new error updates them! There is no way to read
2114 * all four registers atomically. The best than can be done is to detect
2115 * that a race has occured and then report the error without any kind of
2116 * precision.
2117 *
2118 * What is still positive is that errors are still reported and thus
2119 * problems can still be detected - just not localized because the
2120 * syndrome and address are spread out across registers.
2121 *
2122 * Grrrrr!!!!! Here's hoping that AMD fixes this in some future K8 rev.
2123 * UEs and CEs should have separate register sets with proper overflow
2124 * bits that are used! At very least the problem can be fixed by
2125 * honoring the ErrValid bit in 'nbsh' and not updating registers - just
2126 * set the overflow bit - unless the current error is CE and the new
2127 * error is UE which would be the only situation for overwriting the
2128 * current values.
2129 */
2130
2131 regs = *info;
2132
2133 /* Use info from the second read - most current */
2134 if (unlikely(!amd64_get_error_info_regs(mci, info)))
2135 return 0;
2136
2137 /* clear the error bits in hardware */
2138 pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT);
2139
2140 /* Check for the possible race condition */
2141 if ((regs.nbsh != info->nbsh) ||
2142 (regs.nbsl != info->nbsl) ||
2143 (regs.nbeah != info->nbeah) ||
2144 (regs.nbeal != info->nbeal)) {
2145 amd64_mc_printk(mci, KERN_WARNING,
2146 "hardware STATUS read access race condition "
2147 "detected!\n");
2148 return 0;
2149 }
2150 return 1;
2151}
2152
2153static inline void amd64_decode_gart_tlb_error(struct mem_ctl_info *mci,
2154 struct amd64_error_info_regs *info)
2155{
2156 u32 err_code;
2157 u32 ec_tt; /* error code transaction type (2b) */
2158 u32 ec_ll; /* error code cache level (2b) */
2159
2160 err_code = EXTRACT_ERROR_CODE(info->nbsl);
2161 ec_ll = EXTRACT_LL_CODE(err_code);
2162 ec_tt = EXTRACT_TT_CODE(err_code);
2163
2164 amd64_mc_printk(mci, KERN_ERR,
2165 "GART TLB event: transaction type(%s), "
2166 "cache level(%s)\n", tt_msgs[ec_tt], ll_msgs[ec_ll]);
2167}
2168
2169static inline void amd64_decode_mem_cache_error(struct mem_ctl_info *mci,
2170 struct amd64_error_info_regs *info)
2171{
2172 u32 err_code;
2173 u32 ec_rrrr; /* error code memory transaction (4b) */
2174 u32 ec_tt; /* error code transaction type (2b) */
2175 u32 ec_ll; /* error code cache level (2b) */
2176
2177 err_code = EXTRACT_ERROR_CODE(info->nbsl);
2178 ec_ll = EXTRACT_LL_CODE(err_code);
2179 ec_tt = EXTRACT_TT_CODE(err_code);
2180 ec_rrrr = EXTRACT_RRRR_CODE(err_code);
2181
2182 amd64_mc_printk(mci, KERN_ERR,
2183 "cache hierarchy error: memory transaction type(%s), "
2184 "transaction type(%s), cache level(%s)\n",
2185 rrrr_msgs[ec_rrrr], tt_msgs[ec_tt], ll_msgs[ec_ll]);
2186}
2187
2188
2189/*
2190 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
2191 * ADDRESS and process.
2192 */
2193static void amd64_handle_ce(struct mem_ctl_info *mci,
2194 struct amd64_error_info_regs *info)
2195{
2196 struct amd64_pvt *pvt = mci->pvt_info;
2197 u64 SystemAddress;
2198
2199 /* Ensure that the Error Address is VALID */
2200 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2201 amd64_mc_printk(mci, KERN_ERR,
2202 "HW has no ERROR_ADDRESS available\n");
2203 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
2204 return;
2205 }
2206
2207 SystemAddress = extract_error_address(mci, info);
2208
2209 amd64_mc_printk(mci, KERN_ERR,
2210 "CE ERROR_ADDRESS= 0x%llx\n", SystemAddress);
2211
2212 pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress);
2213}
2214
2215/* Handle any Un-correctable Errors (UEs) */
2216static void amd64_handle_ue(struct mem_ctl_info *mci,
2217 struct amd64_error_info_regs *info)
2218{
2219 int csrow;
2220 u64 SystemAddress;
2221 u32 page, offset;
2222 struct mem_ctl_info *log_mci, *src_mci = NULL;
2223
2224 log_mci = mci;
2225
2226 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2227 amd64_mc_printk(mci, KERN_CRIT,
2228 "HW has no ERROR_ADDRESS available\n");
2229 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2230 return;
2231 }
2232
2233 SystemAddress = extract_error_address(mci, info);
2234
2235 /*
2236 * Find out which node the error address belongs to. This may be
2237 * different from the node that detected the error.
2238 */
2239 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
2240 if (!src_mci) {
2241 amd64_mc_printk(mci, KERN_CRIT,
2242 "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
2243 (unsigned long)SystemAddress);
2244 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2245 return;
2246 }
2247
2248 log_mci = src_mci;
2249
2250 csrow = sys_addr_to_csrow(log_mci, SystemAddress);
2251 if (csrow < 0) {
2252 amd64_mc_printk(mci, KERN_CRIT,
2253 "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
2254 (unsigned long)SystemAddress);
2255 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2256 } else {
2257 error_address_to_page_and_offset(SystemAddress, &page, &offset);
2258 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
2259 }
2260}
2261
2262static void amd64_decode_bus_error(struct mem_ctl_info *mci,
2263 struct amd64_error_info_regs *info)
2264{
2265 u32 err_code, ext_ec;
2266 u32 ec_pp; /* error code participating processor (2p) */
2267 u32 ec_to; /* error code timed out (1b) */
2268 u32 ec_rrrr; /* error code memory transaction (4b) */
2269 u32 ec_ii; /* error code memory or I/O (2b) */
2270 u32 ec_ll; /* error code cache level (2b) */
2271
2272 ext_ec = EXTRACT_EXT_ERROR_CODE(info->nbsl);
2273 err_code = EXTRACT_ERROR_CODE(info->nbsl);
2274
2275 ec_ll = EXTRACT_LL_CODE(err_code);
2276 ec_ii = EXTRACT_II_CODE(err_code);
2277 ec_rrrr = EXTRACT_RRRR_CODE(err_code);
2278 ec_to = EXTRACT_TO_CODE(err_code);
2279 ec_pp = EXTRACT_PP_CODE(err_code);
2280
2281 amd64_mc_printk(mci, KERN_ERR,
2282 "BUS ERROR:\n"
2283 " time-out(%s) mem or i/o(%s)\n"
2284 " participating processor(%s)\n"
2285 " memory transaction type(%s)\n"
2286 " cache level(%s) Error Found by: %s\n",
2287 to_msgs[ec_to],
2288 ii_msgs[ec_ii],
2289 pp_msgs[ec_pp],
2290 rrrr_msgs[ec_rrrr],
2291 ll_msgs[ec_ll],
2292 (info->nbsh & K8_NBSH_ERR_SCRUBER) ?
2293 "Scrubber" : "Normal Operation");
2294
2295 /* If this was an 'observed' error, early out */
2296 if (ec_pp == K8_NBSL_PP_OBS)
2297 return; /* We aren't the node involved */
2298
2299 /* Parse out the extended error code for ECC events */
2300 switch (ext_ec) {
2301 /* F10 changed to one Extended ECC error code */
2302 case F10_NBSL_EXT_ERR_RES: /* Reserved field */
2303 case F10_NBSL_EXT_ERR_ECC: /* F10 ECC ext err code */
2304 break;
2305
2306 default:
2307 amd64_mc_printk(mci, KERN_ERR, "NOT ECC: no special error "
2308 "handling for this error\n");
2309 return;
2310 }
2311
2312 if (info->nbsh & K8_NBSH_CECC)
2313 amd64_handle_ce(mci, info);
2314 else if (info->nbsh & K8_NBSH_UECC)
2315 amd64_handle_ue(mci, info);
2316
2317 /*
2318 * If main error is CE then overflow must be CE. If main error is UE
2319 * then overflow is unknown. We'll call the overflow a CE - if
2320 * panic_on_ue is set then we're already panic'ed and won't arrive
2321 * here. Else, then apparently someone doesn't think that UE's are
2322 * catastrophic.
2323 */
2324 if (info->nbsh & K8_NBSH_OVERFLOW)
2325 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR
2326 "Error Overflow set");
2327}
2328
2329int amd64_process_error_info(struct mem_ctl_info *mci,
2330 struct amd64_error_info_regs *info,
2331 int handle_errors)
2332{
2333 struct amd64_pvt *pvt;
2334 struct amd64_error_info_regs *regs;
2335 u32 err_code, ext_ec;
2336 int gart_tlb_error = 0;
2337
2338 pvt = mci->pvt_info;
2339
2340 /* If caller doesn't want us to process the error, return */
2341 if (!handle_errors)
2342 return 1;
2343
2344 regs = info;
2345
2346 debugf1("NorthBridge ERROR: mci(0x%p)\n", mci);
2347 debugf1(" MC node(%d) Error-Address(0x%.8x-%.8x)\n",
2348 pvt->mc_node_id, regs->nbeah, regs->nbeal);
2349 debugf1(" nbsh(0x%.8x) nbsl(0x%.8x)\n",
2350 regs->nbsh, regs->nbsl);
2351 debugf1(" Valid Error=%s Overflow=%s\n",
2352 (regs->nbsh & K8_NBSH_VALID_BIT) ? "True" : "False",
2353 (regs->nbsh & K8_NBSH_OVERFLOW) ? "True" : "False");
2354 debugf1(" Err Uncorrected=%s MCA Error Reporting=%s\n",
2355 (regs->nbsh & K8_NBSH_UNCORRECTED_ERR) ?
2356 "True" : "False",
2357 (regs->nbsh & K8_NBSH_ERR_ENABLE) ?
2358 "True" : "False");
2359 debugf1(" MiscErr Valid=%s ErrAddr Valid=%s PCC=%s\n",
2360 (regs->nbsh & K8_NBSH_MISC_ERR_VALID) ?
2361 "True" : "False",
2362 (regs->nbsh & K8_NBSH_VALID_ERROR_ADDR) ?
2363 "True" : "False",
2364 (regs->nbsh & K8_NBSH_PCC) ?
2365 "True" : "False");
2366 debugf1(" CECC=%s UECC=%s Found by Scruber=%s\n",
2367 (regs->nbsh & K8_NBSH_CECC) ?
2368 "True" : "False",
2369 (regs->nbsh & K8_NBSH_UECC) ?
2370 "True" : "False",
2371 (regs->nbsh & K8_NBSH_ERR_SCRUBER) ?
2372 "True" : "False");
2373 debugf1(" CORE0=%s CORE1=%s CORE2=%s CORE3=%s\n",
2374 (regs->nbsh & K8_NBSH_CORE0) ? "True" : "False",
2375 (regs->nbsh & K8_NBSH_CORE1) ? "True" : "False",
2376 (regs->nbsh & K8_NBSH_CORE2) ? "True" : "False",
2377 (regs->nbsh & K8_NBSH_CORE3) ? "True" : "False");
2378
2379
2380 err_code = EXTRACT_ERROR_CODE(regs->nbsl);
2381
2382 /* Determine which error type:
2383 * 1) GART errors - non-fatal, developmental events
2384 * 2) MEMORY errors
2385 * 3) BUS errors
2386 * 4) Unknown error
2387 */
2388 if (TEST_TLB_ERROR(err_code)) {
2389 /*
2390 * GART errors are intended to help graphics driver developers
2391 * to detect bad GART PTEs. It is recommended by AMD to disable
2392 * GART table walk error reporting by default[1] (currently
2393 * being disabled in mce_cpu_quirks()) and according to the
2394 * comment in mce_cpu_quirks(), such GART errors can be
2395 * incorrectly triggered. We may see these errors anyway and
2396 * unless requested by the user, they won't be reported.
2397 *
2398 * [1] section 13.10.1 on BIOS and Kernel Developers Guide for
2399 * AMD NPT family 0Fh processors
2400 */
2401 if (report_gart_errors == 0)
2402 return 1;
2403
2404 /*
2405 * Only if GART error reporting is requested should we generate
2406 * any logs.
2407 */
2408 gart_tlb_error = 1;
2409
2410 debugf1("GART TLB error\n");
2411 amd64_decode_gart_tlb_error(mci, info);
2412 } else if (TEST_MEM_ERROR(err_code)) {
2413 debugf1("Memory/Cache error\n");
2414 amd64_decode_mem_cache_error(mci, info);
2415 } else if (TEST_BUS_ERROR(err_code)) {
2416 debugf1("Bus (Link/DRAM) error\n");
2417 amd64_decode_bus_error(mci, info);
2418 } else {
2419 /* shouldn't reach here! */
2420 amd64_mc_printk(mci, KERN_WARNING,
2421 "%s(): unknown MCE error 0x%x\n", __func__,
2422 err_code);
2423 }
2424
2425 ext_ec = EXTRACT_EXT_ERROR_CODE(regs->nbsl);
2426 amd64_mc_printk(mci, KERN_ERR,
2427 "ExtErr=(0x%x) %s\n", ext_ec, ext_msgs[ext_ec]);
2428
2429 if (((ext_ec >= F10_NBSL_EXT_ERR_CRC &&
2430 ext_ec <= F10_NBSL_EXT_ERR_TGT) ||
2431 (ext_ec == F10_NBSL_EXT_ERR_RMW)) &&
2432 EXTRACT_LDT_LINK(info->nbsh)) {
2433
2434 amd64_mc_printk(mci, KERN_ERR,
2435 "Error on hypertransport link: %s\n",
2436 htlink_msgs[
2437 EXTRACT_LDT_LINK(info->nbsh)]);
2438 }
2439
2440 /*
2441 * Check the UE bit of the NB status high register, if set generate some
2442 * logs. If NOT a GART error, then process the event as a NO-INFO event.
2443 * If it was a GART error, skip that process.
2444 */
2445 if (regs->nbsh & K8_NBSH_UNCORRECTED_ERR) {
2446 amd64_mc_printk(mci, KERN_CRIT, "uncorrected error\n");
2447 if (!gart_tlb_error)
2448 edac_mc_handle_ue_no_info(mci, "UE bit is set\n");
2449 }
2450
2451 if (regs->nbsh & K8_NBSH_PCC)
2452 amd64_mc_printk(mci, KERN_CRIT,
2453 "PCC (processor context corrupt) set\n");
2454
2455 return 1;
2456}
2457EXPORT_SYMBOL_GPL(amd64_process_error_info);
2458
2459/*
2460 * The main polling 'check' function, called FROM the edac core to perform the
2461 * error checking and if an error is encountered, error processing.
2462 */
2463static void amd64_check(struct mem_ctl_info *mci)
2464{
2465 struct amd64_error_info_regs info;
2466
2467 if (amd64_get_error_info(mci, &info))
2468 amd64_process_error_info(mci, &info, 1);
2469}
2470
2471/*
2472 * Input:
2473 * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
2474 * 2) AMD Family index value
2475 *
2476 * Ouput:
2477 * Upon return of 0, the following filled in:
2478 *
2479 * struct pvt->addr_f1_ctl
2480 * struct pvt->misc_f3_ctl
2481 *
2482 * Filled in with related device funcitions of 'dram_f2_ctl'
2483 * These devices are "reserved" via the pci_get_device()
2484 *
2485 * Upon return of 1 (error status):
2486 *
2487 * Nothing reserved
2488 */
2489static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
2490{
2491 const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
2492
2493 /* Reserve the ADDRESS MAP Device */
2494 pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2495 amd64_dev->addr_f1_ctl,
2496 pvt->dram_f2_ctl);
2497
2498 if (!pvt->addr_f1_ctl) {
2499 amd64_printk(KERN_ERR, "error address map device not found: "
2500 "vendor %x device 0x%x (broken BIOS?)\n",
2501 PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
2502 return 1;
2503 }
2504
2505 /* Reserve the MISC Device */
2506 pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2507 amd64_dev->misc_f3_ctl,
2508 pvt->dram_f2_ctl);
2509
2510 if (!pvt->misc_f3_ctl) {
2511 pci_dev_put(pvt->addr_f1_ctl);
2512 pvt->addr_f1_ctl = NULL;
2513
2514 amd64_printk(KERN_ERR, "error miscellaneous device not found: "
2515 "vendor %x device 0x%x (broken BIOS?)\n",
2516 PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
2517 return 1;
2518 }
2519
2520 debugf1(" Addr Map device PCI Bus ID:\t%s\n",
2521 pci_name(pvt->addr_f1_ctl));
2522 debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n",
2523 pci_name(pvt->dram_f2_ctl));
2524 debugf1(" Misc device PCI Bus ID:\t%s\n",
2525 pci_name(pvt->misc_f3_ctl));
2526
2527 return 0;
2528}
2529
2530static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2531{
2532 pci_dev_put(pvt->addr_f1_ctl);
2533 pci_dev_put(pvt->misc_f3_ctl);
2534}
2535
2536/*
2537 * Retrieve the hardware registers of the memory controller (this includes the
2538 * 'Address Map' and 'Misc' device regs)
2539 */
2540static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2541{
2542 u64 msr_val;
2543 int dram, err = 0;
2544
2545 /*
2546 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2547 * those are Read-As-Zero
2548 */
2549 rdmsrl(MSR_K8_TOP_MEM1, msr_val);
2550 pvt->top_mem = msr_val >> 23;
2551 debugf0(" TOP_MEM=0x%08llx\n", pvt->top_mem);
2552
2553 /* check first whether TOP_MEM2 is enabled */
2554 rdmsrl(MSR_K8_SYSCFG, msr_val);
2555 if (msr_val & (1U << 21)) {
2556 rdmsrl(MSR_K8_TOP_MEM2, msr_val);
2557 pvt->top_mem2 = msr_val >> 23;
2558 debugf0(" TOP_MEM2=0x%08llx\n", pvt->top_mem2);
2559 } else
2560 debugf0(" TOP_MEM2 disabled.\n");
2561
2562 amd64_cpu_display_info(pvt);
2563
2564 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
2565 if (err)
2566 goto err_reg;
2567
2568 if (pvt->ops->read_dram_ctl_register)
2569 pvt->ops->read_dram_ctl_register(pvt);
2570
2571 for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
2572 /*
2573 * Call CPU specific READ function to get the DRAM Base and
2574 * Limit values from the DCT.
2575 */
2576 pvt->ops->read_dram_base_limit(pvt, dram);
2577
2578 /*
2579 * Only print out debug info on rows with both R and W Enabled.
2580 * Normal processing, compiler should optimize this whole 'if'
2581 * debug output block away.
2582 */
2583 if (pvt->dram_rw_en[dram] != 0) {
2584 debugf1(" DRAM_BASE[%d]: 0x%8.08x-%8.08x "
2585 "DRAM_LIMIT: 0x%8.08x-%8.08x\n",
2586 dram,
2587 (u32)(pvt->dram_base[dram] >> 32),
2588 (u32)(pvt->dram_base[dram] & 0xFFFFFFFF),
2589 (u32)(pvt->dram_limit[dram] >> 32),
2590 (u32)(pvt->dram_limit[dram] & 0xFFFFFFFF));
2591 debugf1(" IntlvEn=%s %s %s "
2592 "IntlvSel=%d DstNode=%d\n",
2593 pvt->dram_IntlvEn[dram] ?
2594 "Enabled" : "Disabled",
2595 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2596 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2597 pvt->dram_IntlvSel[dram],
2598 pvt->dram_DstNode[dram]);
2599 }
2600 }
2601
2602 amd64_read_dct_base_mask(pvt);
2603
2604 err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
2605 if (err)
2606 goto err_reg;
2607
2608 amd64_read_dbam_reg(pvt);
2609
2610 err = pci_read_config_dword(pvt->misc_f3_ctl,
2611 F10_ONLINE_SPARE, &pvt->online_spare);
2612 if (err)
2613 goto err_reg;
2614
2615 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
2616 if (err)
2617 goto err_reg;
2618
2619 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
2620 if (err)
2621 goto err_reg;
2622
2623 if (!dct_ganging_enabled(pvt)) {
2624 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1,
2625 &pvt->dclr1);
2626 if (err)
2627 goto err_reg;
2628
2629 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_1,
2630 &pvt->dchr1);
2631 if (err)
2632 goto err_reg;
2633 }
2634
2635 amd64_dump_misc_regs(pvt);
2636
2637err_reg:
2638 debugf0("Reading an MC register failed\n");
2639
2640}
2641
2642/*
2643 * NOTE: CPU Revision Dependent code
2644 *
2645 * Input:
2646 * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1)
2647 * k8 private pointer to -->
2648 * DRAM Bank Address mapping register
2649 * node_id
2650 * DCL register where dual_channel_active is
2651 *
2652 * The DBAM register consists of 4 sets of 4 bits each definitions:
2653 *
2654 * Bits: CSROWs
2655 * 0-3 CSROWs 0 and 1
2656 * 4-7 CSROWs 2 and 3
2657 * 8-11 CSROWs 4 and 5
2658 * 12-15 CSROWs 6 and 7
2659 *
2660 * Values range from: 0 to 15
2661 * The meaning of the values depends on CPU revision and dual-channel state,
2662 * see relevant BKDG more info.
2663 *
2664 * The memory controller provides for total of only 8 CSROWs in its current
2665 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2666 * single channel or two (2) DIMMs in dual channel mode.
2667 *
2668 * The following code logic collapses the various tables for CSROW based on CPU
2669 * revision.
2670 *
2671 * Returns:
2672 * The number of PAGE_SIZE pages on the specified CSROW number it
2673 * encompasses
2674 *
2675 */
2676static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2677{
2678 u32 dram_map, nr_pages;
2679
2680 /*
2681 * The math on this doesn't look right on the surface because x/2*4 can
2682 * be simplified to x*2 but this expression makes use of the fact that
2683 * it is integral math where 1/2=0. This intermediate value becomes the
2684 * number of bits to shift the DBAM register to extract the proper CSROW
2685 * field.
2686 */
2687 dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2688
2689 nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map);
2690
2691 /*
2692 * If dual channel then double the memory size of single channel.
2693 * Channel count is 1 or 2
2694 */
2695 nr_pages <<= (pvt->channel_count - 1);
2696
2697 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map);
2698 debugf0(" nr_pages= %u channel-count = %d\n",
2699 nr_pages, pvt->channel_count);
2700
2701 return nr_pages;
2702}
2703
2704/*
2705 * Initialize the array of csrow attribute instances, based on the values
2706 * from pci config hardware registers.
2707 */
2708static int amd64_init_csrows(struct mem_ctl_info *mci)
2709{
2710 struct csrow_info *csrow;
2711 struct amd64_pvt *pvt;
2712 u64 input_addr_min, input_addr_max, sys_addr;
2713 int i, err = 0, empty = 1;
2714
2715 pvt = mci->pvt_info;
2716
2717 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
2718 if (err)
2719 debugf0("Reading K8_NBCFG failed\n");
2720
2721 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2722 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2723 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2724 );
2725
2726 for (i = 0; i < CHIPSELECT_COUNT; i++) {
2727 csrow = &mci->csrows[i];
2728
2729 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2730 debugf1("----CSROW %d EMPTY for node %d\n", i,
2731 pvt->mc_node_id);
2732 continue;
2733 }
2734
2735 debugf1("----CSROW %d VALID for MC node %d\n",
2736 i, pvt->mc_node_id);
2737
2738 empty = 0;
2739 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2740 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2741 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2742 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2743 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2744 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2745 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2746 /* 8 bytes of resolution */
2747
2748 csrow->mtype = amd64_determine_memory_type(pvt);
2749
2750 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2751 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2752 (unsigned long)input_addr_min,
2753 (unsigned long)input_addr_max);
2754 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2755 (unsigned long)sys_addr, csrow->page_mask);
2756 debugf1(" nr_pages: %u first_page: 0x%lx "
2757 "last_page: 0x%lx\n",
2758 (unsigned)csrow->nr_pages,
2759 csrow->first_page, csrow->last_page);
2760
2761 /*
2762 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2763 */
2764 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2765 csrow->edac_mode =
2766 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2767 EDAC_S4ECD4ED : EDAC_SECDED;
2768 else
2769 csrow->edac_mode = EDAC_NONE;
2770 }
2771
2772 return empty;
2773}
2774
2775/*
2776 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
2777 * enable it.
2778 */
2779static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2780{
2781 struct amd64_pvt *pvt = mci->pvt_info;
2782 const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
2783 int cpu, idx = 0, err = 0;
2784 struct msr msrs[cpumask_weight(cpumask)];
2785 u32 value;
2786 u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2787
2788 if (!ecc_enable_override)
2789 return;
2790
2791 memset(msrs, 0, sizeof(msrs));
2792
2793 amd64_printk(KERN_WARNING,
2794 "'ecc_enable_override' parameter is active, "
2795 "Enabling AMD ECC hardware now: CAUTION\n");
2796
2797 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
2798 if (err)
2799 debugf0("Reading K8_NBCTL failed\n");
2800
2801 /* turn on UECCn and CECCEn bits */
2802 pvt->old_nbctl = value & mask;
2803 pvt->nbctl_mcgctl_saved = 1;
2804
2805 value |= mask;
2806 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2807
2808 rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2809
2810 for_each_cpu(cpu, cpumask) {
2811 if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
2812 set_bit(idx, &pvt->old_mcgctl);
2813
2814 msrs[idx].l |= K8_MSR_MCGCTL_NBE;
2815 idx++;
2816 }
2817 wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2818
2819 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
2820 if (err)
2821 debugf0("Reading K8_NBCFG failed\n");
2822
2823 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2824 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2825 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2826
2827 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2828 amd64_printk(KERN_WARNING,
2829 "This node reports that DRAM ECC is "
2830 "currently Disabled; ENABLING now\n");
2831
2832 /* Attempt to turn on DRAM ECC Enable */
2833 value |= K8_NBCFG_ECC_ENABLE;
2834 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2835
2836 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
2837 if (err)
2838 debugf0("Reading K8_NBCFG failed\n");
2839
2840 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2841 amd64_printk(KERN_WARNING,
2842 "Hardware rejects Enabling DRAM ECC checking\n"
2843 "Check memory DIMM configuration\n");
2844 } else {
2845 amd64_printk(KERN_DEBUG,
2846 "Hardware accepted DRAM ECC Enable\n");
2847 }
2848 }
2849 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2850 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2851 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2852
2853 pvt->ctl_error_info.nbcfg = value;
2854}
2855
2856static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2857{
2858 const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
2859 int cpu, idx = 0, err = 0;
2860 struct msr msrs[cpumask_weight(cpumask)];
2861 u32 value;
2862 u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2863
2864 if (!pvt->nbctl_mcgctl_saved)
2865 return;
2866
2867 memset(msrs, 0, sizeof(msrs));
2868
2869 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
2870 if (err)
2871 debugf0("Reading K8_NBCTL failed\n");
2872 value &= ~mask;
2873 value |= pvt->old_nbctl;
2874
2875 /* restore the NB Enable MCGCTL bit */
2876 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2877
2878 rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2879
2880 for_each_cpu(cpu, cpumask) {
2881 msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
2882 msrs[idx].l |=
2883 test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
2884 idx++;
2885 }
2886
2887 wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2888}
2889
2890static void check_mcg_ctl(void *ret)
2891{
2892 u64 msr_val = 0;
2893 u8 nbe;
2894
2895 rdmsrl(MSR_IA32_MCG_CTL, msr_val);
2896 nbe = msr_val & K8_MSR_MCGCTL_NBE;
2897
2898 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2899 raw_smp_processor_id(), msr_val,
2900 (nbe ? "enabled" : "disabled"));
2901
2902 if (!nbe)
2903 *(int *)ret = 0;
2904}
2905
2906/* check MCG_CTL on all the cpus on this node */
2907static int amd64_mcg_ctl_enabled_on_cpus(const cpumask_t *mask)
2908{
2909 int ret = 1;
2910 preempt_disable();
2911 smp_call_function_many(mask, check_mcg_ctl, &ret, 1);
2912 preempt_enable();
2913
2914 return ret;
2915}
2916
2917/*
2918 * EDAC requires that the BIOS have ECC enabled before taking over the
2919 * processing of ECC errors. This is because the BIOS can properly initialize
2920 * the memory system completely. A command line option allows to force-enable
2921 * hardware ECC later in amd64_enable_ecc_error_reporting().
2922 */
2923static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2924{
2925 u32 value;
2926 int err = 0, ret = 0;
2927 u8 ecc_enabled = 0;
2928
2929 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
2930 if (err)
2931 debugf0("Reading K8_NBCTL failed\n");
2932
2933 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2934
2935 ret = amd64_mcg_ctl_enabled_on_cpus(cpumask_of_node(pvt->mc_node_id));
2936
2937 debugf0("K8_NBCFG=0x%x, DRAM ECC is %s\n", value,
2938 (value & K8_NBCFG_ECC_ENABLE ? "enabled" : "disabled"));
2939
2940 if (!ecc_enabled || !ret) {
2941 if (!ecc_enabled) {
2942 amd64_printk(KERN_WARNING, "This node reports that "
2943 "Memory ECC is currently "
2944 "disabled.\n");
2945
2946 amd64_printk(KERN_WARNING, "bit 0x%lx in register "
2947 "F3x%x of the MISC_CONTROL device (%s) "
2948 "should be enabled\n", K8_NBCFG_ECC_ENABLE,
2949 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2950 }
2951 if (!ret) {
2952 amd64_printk(KERN_WARNING, "bit 0x%016lx in MSR 0x%08x "
2953 "of node %d should be enabled\n",
2954 K8_MSR_MCGCTL_NBE, MSR_IA32_MCG_CTL,
2955 pvt->mc_node_id);
2956 }
2957 if (!ecc_enable_override) {
2958 amd64_printk(KERN_WARNING, "WARNING: ECC is NOT "
2959 "currently enabled by the BIOS. Module "
2960 "will NOT be loaded.\n"
2961 " Either Enable ECC in the BIOS, "
2962 "or use the 'ecc_enable_override' "
2963 "parameter.\n"
2964 " Might be a BIOS bug, if BIOS says "
2965 "ECC is enabled\n"
2966 " Use of the override can cause "
2967 "unknown side effects.\n");
2968 ret = -ENODEV;
2969 }
2970 } else {
2971 amd64_printk(KERN_INFO,
2972 "ECC is enabled by BIOS, Proceeding "
2973 "with EDAC module initialization\n");
2974
2975 /* CLEAR the override, since BIOS controlled it */
2976 ecc_enable_override = 0;
2977 }
2978
2979 return ret;
2980}
2981
2982struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2983 ARRAY_SIZE(amd64_inj_attrs) +
2984 1];
2985
2986struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2987
2988static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
2989{
2990 unsigned int i = 0, j = 0;
2991
2992 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2993 sysfs_attrs[i] = amd64_dbg_attrs[i];
2994
2995 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2996 sysfs_attrs[i] = amd64_inj_attrs[j];
2997
2998 sysfs_attrs[i] = terminator;
2999
3000 mci->mc_driver_sysfs_attributes = sysfs_attrs;
3001}
3002
3003static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
3004{
3005 struct amd64_pvt *pvt = mci->pvt_info;
3006
3007 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3008 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3009 mci->edac_cap = EDAC_FLAG_NONE;
3010
3011 if (pvt->nbcap & K8_NBCAP_SECDED)
3012 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3013
3014 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
3015 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3016
3017 mci->edac_cap = amd64_determine_edac_cap(pvt);
3018 mci->mod_name = EDAC_MOD_STR;
3019 mci->mod_ver = EDAC_AMD64_VERSION;
3020 mci->ctl_name = get_amd_family_name(pvt->mc_type_index);
3021 mci->dev_name = pci_name(pvt->dram_f2_ctl);
3022 mci->ctl_page_to_phys = NULL;
3023
3024 /* IMPORTANT: Set the polling 'check' function in this module */
3025 mci->edac_check = amd64_check;
3026
3027 /* memory scrubber interface */
3028 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
3029 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
3030}
3031
3032/*
3033 * Init stuff for this DRAM Controller device.
3034 *
3035 * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration
3036 * Space feature MUST be enabled on ALL Processors prior to actually reading
3037 * from the ECS registers. Since the loading of the module can occur on any
3038 * 'core', and cores don't 'see' all the other processors ECS data when the
3039 * others are NOT enabled. Our solution is to first enable ECS access in this
3040 * routine on all processors, gather some data in a amd64_pvt structure and
3041 * later come back in a finish-setup function to perform that final
3042 * initialization. See also amd64_init_2nd_stage() for that.
3043 */
3044static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
3045 int mc_type_index)
3046{
3047 struct amd64_pvt *pvt = NULL;
3048 int err = 0, ret;
3049
3050 ret = -ENOMEM;
3051 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3052 if (!pvt)
3053 goto err_exit;
3054
3055 pvt->mc_node_id = get_mc_node_id_from_pdev(dram_f2_ctl);
3056
3057 pvt->dram_f2_ctl = dram_f2_ctl;
3058 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3059 pvt->mc_type_index = mc_type_index;
3060 pvt->ops = family_ops(mc_type_index);
3061 pvt->old_mcgctl = 0;
3062
3063 /*
3064 * We have the dram_f2_ctl device as an argument, now go reserve its
3065 * sibling devices from the PCI system.
3066 */
3067 ret = -ENODEV;
3068 err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
3069 if (err)
3070 goto err_free;
3071
3072 ret = -EINVAL;
3073 err = amd64_check_ecc_enabled(pvt);
3074 if (err)
3075 goto err_put;
3076
3077 /*
3078 * Key operation here: setup of HW prior to performing ops on it. Some
3079 * setup is required to access ECS data. After this is performed, the
3080 * 'teardown' function must be called upon error and normal exit paths.
3081 */
3082 if (boot_cpu_data.x86 >= 0x10)
3083 amd64_setup(pvt);
3084
3085 /*
3086 * Save the pointer to the private data for use in 2nd initialization
3087 * stage
3088 */
3089 pvt_lookup[pvt->mc_node_id] = pvt;
3090
3091 return 0;
3092
3093err_put:
3094 amd64_free_mc_sibling_devices(pvt);
3095
3096err_free:
3097 kfree(pvt);
3098
3099err_exit:
3100 return ret;
3101}
3102
3103/*
3104 * This is the finishing stage of the init code. Needs to be performed after all
3105 * MCs' hardware have been prepped for accessing extended config space.
3106 */
3107static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
3108{
3109 int node_id = pvt->mc_node_id;
3110 struct mem_ctl_info *mci;
3111 int ret, err = 0;
3112
3113 amd64_read_mc_registers(pvt);
3114
3115 ret = -ENODEV;
3116 if (pvt->ops->probe_valid_hardware) {
3117 err = pvt->ops->probe_valid_hardware(pvt);
3118 if (err)
3119 goto err_exit;
3120 }
3121
3122 /*
3123 * We need to determine how many memory channels there are. Then use
3124 * that information for calculating the size of the dynamic instance
3125 * tables in the 'mci' structure
3126 */
3127 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3128 if (pvt->channel_count < 0)
3129 goto err_exit;
3130
3131 ret = -ENOMEM;
3132 mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id);
3133 if (!mci)
3134 goto err_exit;
3135
3136 mci->pvt_info = pvt;
3137
3138 mci->dev = &pvt->dram_f2_ctl->dev;
3139 amd64_setup_mci_misc_attributes(mci);
3140
3141 if (amd64_init_csrows(mci))
3142 mci->edac_cap = EDAC_FLAG_NONE;
3143
3144 amd64_enable_ecc_error_reporting(mci);
3145 amd64_set_mc_sysfs_attributes(mci);
3146
3147 ret = -ENODEV;
3148 if (edac_mc_add_mc(mci)) {
3149 debugf1("failed edac_mc_add_mc()\n");
3150 goto err_add_mc;
3151 }
3152
3153 mci_lookup[node_id] = mci;
3154 pvt_lookup[node_id] = NULL;
3155 return 0;
3156
3157err_add_mc:
3158 edac_mc_free(mci);
3159
3160err_exit:
3161 debugf0("failure to init 2nd stage: ret=%d\n", ret);
3162
3163 amd64_restore_ecc_error_reporting(pvt);
3164
3165 if (boot_cpu_data.x86 > 0xf)
3166 amd64_teardown(pvt);
3167
3168 amd64_free_mc_sibling_devices(pvt);
3169
3170 kfree(pvt_lookup[pvt->mc_node_id]);
3171 pvt_lookup[node_id] = NULL;
3172
3173 return ret;
3174}
3175
3176
3177static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
3178 const struct pci_device_id *mc_type)
3179{
3180 int ret = 0;
3181
3182 debugf0("(MC node=%d,mc_type='%s')\n",
3183 get_mc_node_id_from_pdev(pdev),
3184 get_amd_family_name(mc_type->driver_data));
3185
3186 ret = pci_enable_device(pdev);
3187 if (ret < 0)
3188 ret = -EIO;
3189 else
3190 ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
3191
3192 if (ret < 0)
3193 debugf0("ret=%d\n", ret);
3194
3195 return ret;
3196}
3197
3198static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
3199{
3200 struct mem_ctl_info *mci;
3201 struct amd64_pvt *pvt;
3202
3203 /* Remove from EDAC CORE tracking list */
3204 mci = edac_mc_del_mc(&pdev->dev);
3205 if (!mci)
3206 return;
3207
3208 pvt = mci->pvt_info;
3209
3210 amd64_restore_ecc_error_reporting(pvt);
3211
3212 if (boot_cpu_data.x86 > 0xf)
3213 amd64_teardown(pvt);
3214
3215 amd64_free_mc_sibling_devices(pvt);
3216
3217 kfree(pvt);
3218 mci->pvt_info = NULL;
3219
3220 mci_lookup[pvt->mc_node_id] = NULL;
3221
3222 /* Free the EDAC CORE resources */
3223 edac_mc_free(mci);
3224}
3225
3226/*
3227 * This table is part of the interface for loading drivers for PCI devices. The
3228 * PCI core identifies what devices are on a system during boot, and then
3229 * inquiry this table to see if this driver is for a given device found.
3230 */
3231static const struct pci_device_id amd64_pci_table[] __devinitdata = {
3232 {
3233 .vendor = PCI_VENDOR_ID_AMD,
3234 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
3235 .subvendor = PCI_ANY_ID,
3236 .subdevice = PCI_ANY_ID,
3237 .class = 0,
3238 .class_mask = 0,
3239 .driver_data = K8_CPUS
3240 },
3241 {
3242 .vendor = PCI_VENDOR_ID_AMD,
3243 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
3244 .subvendor = PCI_ANY_ID,
3245 .subdevice = PCI_ANY_ID,
3246 .class = 0,
3247 .class_mask = 0,
3248 .driver_data = F10_CPUS
3249 },
3250 {
3251 .vendor = PCI_VENDOR_ID_AMD,
3252 .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM,
3253 .subvendor = PCI_ANY_ID,
3254 .subdevice = PCI_ANY_ID,
3255 .class = 0,
3256 .class_mask = 0,
3257 .driver_data = F11_CPUS
3258 },
3259 {0, }
3260};
3261MODULE_DEVICE_TABLE(pci, amd64_pci_table);
3262
3263static struct pci_driver amd64_pci_driver = {
3264 .name = EDAC_MOD_STR,
3265 .probe = amd64_init_one_instance,
3266 .remove = __devexit_p(amd64_remove_one_instance),
3267 .id_table = amd64_pci_table,
3268};
3269
3270static void amd64_setup_pci_device(void)
3271{
3272 struct mem_ctl_info *mci;
3273 struct amd64_pvt *pvt;
3274
3275 if (amd64_ctl_pci)
3276 return;
3277
3278 mci = mci_lookup[0];
3279 if (mci) {
3280
3281 pvt = mci->pvt_info;
3282 amd64_ctl_pci =
3283 edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,
3284 EDAC_MOD_STR);
3285
3286 if (!amd64_ctl_pci) {
3287 pr_warning("%s(): Unable to create PCI control\n",
3288 __func__);
3289
3290 pr_warning("%s(): PCI error report via EDAC not set\n",
3291 __func__);
3292 }
3293 }
3294}
3295
3296static int __init amd64_edac_init(void)
3297{
3298 int nb, err = -ENODEV;
3299
3300 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
3301
3302 opstate_init();
3303
3304 if (cache_k8_northbridges() < 0)
3305 goto err_exit;
3306
3307 err = pci_register_driver(&amd64_pci_driver);
3308 if (err)
3309 return err;
3310
3311 /*
3312 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
3313 * amd64_pvt structs. These will be used in the 2nd stage init function
3314 * to finish initialization of the MC instances.
3315 */
3316 for (nb = 0; nb < num_k8_northbridges; nb++) {
3317 if (!pvt_lookup[nb])
3318 continue;
3319
3320 err = amd64_init_2nd_stage(pvt_lookup[nb]);
3321 if (err)
3322 goto err_exit;
3323 }
3324
3325 amd64_setup_pci_device();
3326
3327 return 0;
3328
3329err_exit:
3330 debugf0("'finish_setup' stage failed\n");
3331 pci_unregister_driver(&amd64_pci_driver);
3332
3333 return err;
3334}
3335
3336static void __exit amd64_edac_exit(void)
3337{
3338 if (amd64_ctl_pci)
3339 edac_pci_release_generic_ctl(amd64_ctl_pci);
3340
3341 pci_unregister_driver(&amd64_pci_driver);
3342}
3343
3344module_init(amd64_edac_init);
3345module_exit(amd64_edac_exit);
3346
3347MODULE_LICENSE("GPL");
3348MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3349 "Dave Peterson, Thayne Harbaugh");
3350MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3351 EDAC_AMD64_VERSION);
3352
3353module_param(edac_op_state, int, 0444);
3354MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
new file mode 100644
index 00000000000..a159957e167
--- /dev/null
+++ b/drivers/edac/amd64_edac.h
@@ -0,0 +1,644 @@
1/*
2 * AMD64 class Memory Controller kernel module
3 *
4 * Copyright (c) 2009 SoftwareBitMaker.
5 * Copyright (c) 2009 Advanced Micro Devices, Inc.
6 *
7 * This file may be distributed under the terms of the
8 * GNU General Public License.
9 *
10 * Originally Written by Thayne Harbaugh
11 *
12 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
13 * - K8 CPU Revision D and greater support
14 *
15 * Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
16 * - Module largely rewritten, with new (and hopefully correct)
17 * code for dealing with node and chip select interleaving,
18 * various code cleanup, and bug fixes
19 * - Added support for memory hoisting using DRAM hole address
20 * register
21 *
22 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
23 * -K8 Rev (1207) revision support added, required Revision
24 * specific mini-driver code to support Rev F as well as
25 * prior revisions
26 *
27 * Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
28 * -Family 10h revision support added. New PCI Device IDs,
29 * indicating new changes. Actual registers modified
30 * were slight, less than the Rev E to Rev F transition
31 * but changing the PCI Device ID was the proper thing to
32 * do, as it provides for almost automactic family
33 * detection. The mods to Rev F required more family
34 * information detection.
35 *
36 * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
37 * - misc fixes and code cleanups
38 *
39 * This module is based on the following documents
40 * (available from http://www.amd.com/):
41 *
42 * Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
43 * Opteron Processors
44 * AMD publication #: 26094
45 *` Revision: 3.26
46 *
47 * Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
48 * Processors
49 * AMD publication #: 32559
50 * Revision: 3.00
51 * Issue Date: May 2006
52 *
53 * Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
54 * Processors
55 * AMD publication #: 31116
56 * Revision: 3.00
57 * Issue Date: September 07, 2007
58 *
59 * Sections in the first 2 documents are no longer in sync with each other.
60 * The Family 10h BKDG was totally re-written from scratch with a new
61 * presentation model.
62 * Therefore, comments that refer to a Document section might be off.
63 */
64
65#include <linux/module.h>
66#include <linux/ctype.h>
67#include <linux/init.h>
68#include <linux/pci.h>
69#include <linux/pci_ids.h>
70#include <linux/slab.h>
71#include <linux/mmzone.h>
72#include <linux/edac.h>
73#include <asm/msr.h>
74#include "edac_core.h"
75
76#define amd64_printk(level, fmt, arg...) \
77 edac_printk(level, "amd64", fmt, ##arg)
78
79#define amd64_mc_printk(mci, level, fmt, arg...) \
80 edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg)
81
82/*
83 * Throughout the comments in this code, the following terms are used:
84 *
85 * SysAddr, DramAddr, and InputAddr
86 *
87 * These terms come directly from the amd64 documentation
88 * (AMD publication #26094). They are defined as follows:
89 *
90 * SysAddr:
91 * This is a physical address generated by a CPU core or a device
92 * doing DMA. If generated by a CPU core, a SysAddr is the result of
93 * a virtual to physical address translation by the CPU core's address
94 * translation mechanism (MMU).
95 *
96 * DramAddr:
97 * A DramAddr is derived from a SysAddr by subtracting an offset that
98 * depends on which node the SysAddr maps to and whether the SysAddr
99 * is within a range affected by memory hoisting. The DRAM Base
100 * (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers
101 * determine which node a SysAddr maps to.
102 *
103 * If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr
104 * is within the range of addresses specified by this register, then
105 * a value x from the DHAR is subtracted from the SysAddr to produce a
106 * DramAddr. Here, x represents the base address for the node that
107 * the SysAddr maps to plus an offset due to memory hoisting. See
108 * section 3.4.8 and the comments in amd64_get_dram_hole_info() and
109 * sys_addr_to_dram_addr() below for more information.
110 *
111 * If the SysAddr is not affected by the DHAR then a value y is
112 * subtracted from the SysAddr to produce a DramAddr. Here, y is the
113 * base address for the node that the SysAddr maps to. See section
114 * 3.4.4 and the comments in sys_addr_to_dram_addr() below for more
115 * information.
116 *
117 * InputAddr:
118 * A DramAddr is translated to an InputAddr before being passed to the
119 * memory controller for the node that the DramAddr is associated
120 * with. The memory controller then maps the InputAddr to a csrow.
121 * If node interleaving is not in use, then the InputAddr has the same
122 * value as the DramAddr. Otherwise, the InputAddr is produced by
123 * discarding the bits used for node interleaving from the DramAddr.
124 * See section 3.4.4 for more information.
125 *
126 * The memory controller for a given node uses its DRAM CS Base and
127 * DRAM CS Mask registers to map an InputAddr to a csrow. See
128 * sections 3.5.4 and 3.5.5 for more information.
129 */
130
131#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__
132#define EDAC_MOD_STR "amd64_edac"
133
134/* Extended Model from CPUID, for CPU Revision numbers */
135#define OPTERON_CPU_LE_REV_C 0
136#define OPTERON_CPU_REV_D 1
137#define OPTERON_CPU_REV_E 2
138
139/* NPT processors have the following Extended Models */
140#define OPTERON_CPU_REV_F 4
141#define OPTERON_CPU_REV_FA 5
142
143/* Hardware limit on ChipSelect rows per MC and processors per system */
144#define CHIPSELECT_COUNT 8
145#define DRAM_REG_COUNT 8
146
147
148/*
149 * PCI-defined configuration space registers
150 */
151
152
153/*
154 * Function 1 - Address Map
155 */
156#define K8_DRAM_BASE_LOW 0x40
157#define K8_DRAM_LIMIT_LOW 0x44
158#define K8_DHAR 0xf0
159
160#define DHAR_VALID BIT(0)
161#define F10_DRAM_MEM_HOIST_VALID BIT(1)
162
163#define DHAR_BASE_MASK 0xff000000
164#define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
165
166#define K8_DHAR_OFFSET_MASK 0x0000ff00
167#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
168
169#define F10_DHAR_OFFSET_MASK 0x0000ff80
170 /* NOTE: Extra mask bit vs K8 */
171#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
172
173
174/* F10 High BASE/LIMIT registers */
175#define F10_DRAM_BASE_HIGH 0x140
176#define F10_DRAM_LIMIT_HIGH 0x144
177
178
179/*
180 * Function 2 - DRAM controller
181 */
182#define K8_DCSB0 0x40
183#define F10_DCSB1 0x140
184
185#define K8_DCSB_CS_ENABLE BIT(0)
186#define K8_DCSB_NPT_SPARE BIT(1)
187#define K8_DCSB_NPT_TESTFAIL BIT(2)
188
189/*
190 * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
191 * the address
192 */
193#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
194#define REV_E_DCS_SHIFT 4
195#define REV_E_DCSM_COUNT 8
196
197#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
198#define REV_F_F1Xh_DCS_SHIFT 8
199
200/*
201 * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
202 * to form the address
203 */
204#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
205#define REV_F_DCS_SHIFT 8
206#define REV_F_DCSM_COUNT 4
207#define F10_DCSM_COUNT 4
208#define F11_DCSM_COUNT 2
209
210/* DRAM CS Mask Registers */
211#define K8_DCSM0 0x60
212#define F10_DCSM1 0x160
213
214/* REV E: select [29:21] and [15:9] from DCSM */
215#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
216
217/* unused bits [24:20] and [12:0] */
218#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
219
220/* REV F and later: select [28:19] and [13:5] from DCSM */
221#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
222
223/* unused bits [26:22] and [12:0] */
224#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
225
226#define DBAM0 0x80
227#define DBAM1 0x180
228
229/* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
230#define DBAM_DIMM(i, reg) ((((reg) >> (4*i))) & 0xF)
231
232#define DBAM_MAX_VALUE 11
233
234
235#define F10_DCLR_0 0x90
236#define F10_DCLR_1 0x190
237#define REVE_WIDTH_128 BIT(16)
238#define F10_WIDTH_128 BIT(11)
239
240
241#define F10_DCHR_0 0x94
242#define F10_DCHR_1 0x194
243
244#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
245#define F10_DCHR_Ddr3Mode BIT(8)
246#define F10_DCHR_MblMode BIT(6)
247
248
249#define F10_DCTL_SEL_LOW 0x110
250
251#define dct_sel_baseaddr(pvt) \
252 ((pvt->dram_ctl_select_low) & 0xFFFFF800)
253
254#define dct_sel_interleave_addr(pvt) \
255 (((pvt->dram_ctl_select_low) >> 6) & 0x3)
256
257enum {
258 F10_DCTL_SEL_LOW_DctSelHiRngEn = BIT(0),
259 F10_DCTL_SEL_LOW_DctSelIntLvEn = BIT(2),
260 F10_DCTL_SEL_LOW_DctGangEn = BIT(4),
261 F10_DCTL_SEL_LOW_DctDatIntLv = BIT(5),
262 F10_DCTL_SEL_LOW_DramEnable = BIT(8),
263 F10_DCTL_SEL_LOW_MemCleared = BIT(10),
264};
265
266#define dct_high_range_enabled(pvt) \
267 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelHiRngEn)
268
269#define dct_interleave_enabled(pvt) \
270 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelIntLvEn)
271
272#define dct_ganging_enabled(pvt) \
273 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctGangEn)
274
275#define dct_data_intlv_enabled(pvt) \
276 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctDatIntLv)
277
278#define dct_dram_enabled(pvt) \
279 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DramEnable)
280
281#define dct_memory_cleared(pvt) \
282 (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_MemCleared)
283
284
285#define F10_DCTL_SEL_HIGH 0x114
286
287
288/*
289 * Function 3 - Misc Control
290 */
291#define K8_NBCTL 0x40
292
293/* Correctable ECC error reporting enable */
294#define K8_NBCTL_CECCEn BIT(0)
295
296/* UnCorrectable ECC error reporting enable */
297#define K8_NBCTL_UECCEn BIT(1)
298
299#define K8_NBCFG 0x44
300#define K8_NBCFG_CHIPKILL BIT(23)
301#define K8_NBCFG_ECC_ENABLE BIT(22)
302
303#define K8_NBSL 0x48
304
305
306#define EXTRACT_HIGH_SYNDROME(x) (((x) >> 24) & 0xff)
307#define EXTRACT_EXT_ERROR_CODE(x) (((x) >> 16) & 0x1f)
308
309/* Family F10h: Normalized Extended Error Codes */
310#define F10_NBSL_EXT_ERR_RES 0x0
311#define F10_NBSL_EXT_ERR_CRC 0x1
312#define F10_NBSL_EXT_ERR_SYNC 0x2
313#define F10_NBSL_EXT_ERR_MST 0x3
314#define F10_NBSL_EXT_ERR_TGT 0x4
315#define F10_NBSL_EXT_ERR_GART 0x5
316#define F10_NBSL_EXT_ERR_RMW 0x6
317#define F10_NBSL_EXT_ERR_WDT 0x7
318#define F10_NBSL_EXT_ERR_ECC 0x8
319#define F10_NBSL_EXT_ERR_DEV 0x9
320#define F10_NBSL_EXT_ERR_LINK_DATA 0xA
321
322/* Next two are overloaded values */
323#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
324#define F10_NBSL_EXT_ERR_L3_PROTO 0xB
325
326#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
327#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
328#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
329
330/* Next two are overloaded values */
331#define F10_NBSL_EXT_ERR_GART_WALK 0xF
332#define F10_NBSL_EXT_ERR_DEV_WALK 0xF
333
334/* 0x10 to 0x1B: Reserved */
335#define F10_NBSL_EXT_ERR_L3_DATA 0x1C
336#define F10_NBSL_EXT_ERR_L3_TAG 0x1D
337#define F10_NBSL_EXT_ERR_L3_LRU 0x1E
338
339/* K8: Normalized Extended Error Codes */
340#define K8_NBSL_EXT_ERR_ECC 0x0
341#define K8_NBSL_EXT_ERR_CRC 0x1
342#define K8_NBSL_EXT_ERR_SYNC 0x2
343#define K8_NBSL_EXT_ERR_MST 0x3
344#define K8_NBSL_EXT_ERR_TGT 0x4
345#define K8_NBSL_EXT_ERR_GART 0x5
346#define K8_NBSL_EXT_ERR_RMW 0x6
347#define K8_NBSL_EXT_ERR_WDT 0x7
348#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
349#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
350
351#define EXTRACT_ERROR_CODE(x) ((x) & 0xffff)
352#define TEST_TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010)
353#define TEST_MEM_ERROR(x) (((x) & 0xFF00) == 0x0100)
354#define TEST_BUS_ERROR(x) (((x) & 0xF800) == 0x0800)
355#define EXTRACT_TT_CODE(x) (((x) >> 2) & 0x3)
356#define EXTRACT_II_CODE(x) (((x) >> 2) & 0x3)
357#define EXTRACT_LL_CODE(x) (((x) >> 0) & 0x3)
358#define EXTRACT_RRRR_CODE(x) (((x) >> 4) & 0xf)
359#define EXTRACT_TO_CODE(x) (((x) >> 8) & 0x1)
360#define EXTRACT_PP_CODE(x) (((x) >> 9) & 0x3)
361
362/*
363 * The following are for BUS type errors AFTER values have been normalized by
364 * shifting right
365 */
366#define K8_NBSL_PP_SRC 0x0
367#define K8_NBSL_PP_RES 0x1
368#define K8_NBSL_PP_OBS 0x2
369#define K8_NBSL_PP_GENERIC 0x3
370
371
372#define K8_NBSH 0x4C
373
374#define K8_NBSH_VALID_BIT BIT(31)
375#define K8_NBSH_OVERFLOW BIT(30)
376#define K8_NBSH_UNCORRECTED_ERR BIT(29)
377#define K8_NBSH_ERR_ENABLE BIT(28)
378#define K8_NBSH_MISC_ERR_VALID BIT(27)
379#define K8_NBSH_VALID_ERROR_ADDR BIT(26)
380#define K8_NBSH_PCC BIT(25)
381#define K8_NBSH_CECC BIT(14)
382#define K8_NBSH_UECC BIT(13)
383#define K8_NBSH_ERR_SCRUBER BIT(8)
384#define K8_NBSH_CORE3 BIT(3)
385#define K8_NBSH_CORE2 BIT(2)
386#define K8_NBSH_CORE1 BIT(1)
387#define K8_NBSH_CORE0 BIT(0)
388
389#define EXTRACT_LDT_LINK(x) (((x) >> 4) & 0x7)
390#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
391#define EXTRACT_LOW_SYNDROME(x) (((x) >> 15) & 0xff)
392
393
394#define K8_NBEAL 0x50
395#define K8_NBEAH 0x54
396#define K8_SCRCTRL 0x58
397
398#define F10_NB_CFG_LOW 0x88
399#define F10_NB_CFG_LOW_ENABLE_EXT_CFG BIT(14)
400
401#define F10_NB_CFG_HIGH 0x8C
402
403#define F10_ONLINE_SPARE 0xB0
404#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
405#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
406#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
407#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
408
409#define F10_NB_ARRAY_ADDR 0xB8
410
411#define F10_NB_ARRAY_DRAM_ECC 0x80000000
412
413/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
414#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
415
416#define F10_NB_ARRAY_DATA 0xBC
417
418#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
419 (BIT(((word) & 0xF) + 20) | \
420 BIT(17) | \
421 ((bits) & 0xF))
422
423#define SET_NB_DRAM_INJECTION_READ(word, bits) \
424 (BIT(((word) & 0xF) + 20) | \
425 BIT(16) | \
426 ((bits) & 0xF))
427
428#define K8_NBCAP 0xE8
429#define K8_NBCAP_CORES (BIT(12)|BIT(13))
430#define K8_NBCAP_CHIPKILL BIT(4)
431#define K8_NBCAP_SECDED BIT(3)
432#define K8_NBCAP_8_NODE BIT(2)
433#define K8_NBCAP_DUAL_NODE BIT(1)
434#define K8_NBCAP_DCT_DUAL BIT(0)
435
436/*
437 * MSR Regs
438 */
439#define K8_MSR_MCGCTL 0x017b
440#define K8_MSR_MCGCTL_NBE BIT(4)
441
442#define K8_MSR_MC4CTL 0x0410
443#define K8_MSR_MC4STAT 0x0411
444#define K8_MSR_MC4ADDR 0x0412
445
446/* AMD sets the first MC device at device ID 0x18. */
447static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev)
448{
449 return PCI_SLOT(pdev->devfn) - 0x18;
450}
451
452enum amd64_chipset_families {
453 K8_CPUS = 0,
454 F10_CPUS,
455 F11_CPUS,
456};
457
458/*
459 * Structure to hold:
460 *
461 * 1) dynamically read status and error address HW registers
462 * 2) sysfs entered values
463 * 3) MCE values
464 *
465 * Depends on entry into the modules
466 */
467struct amd64_error_info_regs {
468 u32 nbcfg;
469 u32 nbsh;
470 u32 nbsl;
471 u32 nbeah;
472 u32 nbeal;
473};
474
475/* Error injection control structure */
476struct error_injection {
477 u32 section;
478 u32 word;
479 u32 bit_map;
480};
481
482struct amd64_pvt {
483 /* pci_device handles which we utilize */
484 struct pci_dev *addr_f1_ctl;
485 struct pci_dev *dram_f2_ctl;
486 struct pci_dev *misc_f3_ctl;
487
488 int mc_node_id; /* MC index of this MC node */
489 int ext_model; /* extended model value of this node */
490
491 struct low_ops *ops; /* pointer to per PCI Device ID func table */
492
493 int channel_count;
494
495 /* Raw registers */
496 u32 dclr0; /* DRAM Configuration Low DCT0 reg */
497 u32 dclr1; /* DRAM Configuration Low DCT1 reg */
498 u32 dchr0; /* DRAM Configuration High DCT0 reg */
499 u32 dchr1; /* DRAM Configuration High DCT1 reg */
500 u32 nbcap; /* North Bridge Capabilities */
501 u32 nbcfg; /* F10 North Bridge Configuration */
502 u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */
503 u32 dhar; /* DRAM Hoist reg */
504 u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
505 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
506
507 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
508 u32 dcsb0[CHIPSELECT_COUNT];
509 u32 dcsb1[CHIPSELECT_COUNT];
510
511 /* DRAM CS Mask Registers F2x[1,0][6C:60] */
512 u32 dcsm0[CHIPSELECT_COUNT];
513 u32 dcsm1[CHIPSELECT_COUNT];
514
515 /*
516 * Decoded parts of DRAM BASE and LIMIT Registers
517 * F1x[78,70,68,60,58,50,48,40]
518 */
519 u64 dram_base[DRAM_REG_COUNT];
520 u64 dram_limit[DRAM_REG_COUNT];
521 u8 dram_IntlvSel[DRAM_REG_COUNT];
522 u8 dram_IntlvEn[DRAM_REG_COUNT];
523 u8 dram_DstNode[DRAM_REG_COUNT];
524 u8 dram_rw_en[DRAM_REG_COUNT];
525
526 /*
527 * The following fields are set at (load) run time, after CPU revision
528 * has been determined, since the dct_base and dct_mask registers vary
529 * based on revision
530 */
531 u32 dcsb_base; /* DCSB base bits */
532 u32 dcsm_mask; /* DCSM mask bits */
533 u32 num_dcsm; /* Number of DCSM registers */
534 u32 dcs_mask_notused; /* DCSM notused mask bits */
535 u32 dcs_shift; /* DCSB and DCSM shift value */
536
537 u64 top_mem; /* top of memory below 4GB */
538 u64 top_mem2; /* top of memory above 4GB */
539
540 u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
541 u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
542 u32 online_spare; /* On-Line spare Reg */
543
544 /* temp storage for when input is received from sysfs */
545 struct amd64_error_info_regs ctl_error_info;
546
547 /* place to store error injection parameters prior to issue */
548 struct error_injection injection;
549
550 /* Save old hw registers' values before we modified them */
551 u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
552 u32 old_nbctl;
553 unsigned long old_mcgctl; /* per core on this node */
554
555 /* MC Type Index value: socket F vs Family 10h */
556 u32 mc_type_index;
557
558 /* misc settings */
559 struct flags {
560 unsigned long cf8_extcfg:1;
561 } flags;
562};
563
564struct scrubrate {
565 u32 scrubval; /* bit pattern for scrub rate */
566 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
567};
568
569extern struct scrubrate scrubrates[23];
570extern u32 revf_quad_ddr2_shift[16];
571extern const char *tt_msgs[4];
572extern const char *ll_msgs[4];
573extern const char *rrrr_msgs[16];
574extern const char *to_msgs[2];
575extern const char *pp_msgs[4];
576extern const char *ii_msgs[4];
577extern const char *ext_msgs[32];
578extern const char *htlink_msgs[8];
579
580#ifdef CONFIG_EDAC_DEBUG
581#define NUM_DBG_ATTRS 9
582#else
583#define NUM_DBG_ATTRS 0
584#endif
585
586#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
587#define NUM_INJ_ATTRS 5
588#else
589#define NUM_INJ_ATTRS 0
590#endif
591
592extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
593 amd64_inj_attrs[NUM_INJ_ATTRS];
594
595/*
596 * Each of the PCI Device IDs types have their own set of hardware accessor
597 * functions and per device encoding/decoding logic.
598 */
599struct low_ops {
600 int (*probe_valid_hardware)(struct amd64_pvt *pvt);
601 int (*early_channel_count)(struct amd64_pvt *pvt);
602
603 u64 (*get_error_address)(struct mem_ctl_info *mci,
604 struct amd64_error_info_regs *info);
605 void (*read_dram_base_limit)(struct amd64_pvt *pvt, int dram);
606 void (*read_dram_ctl_register)(struct amd64_pvt *pvt);
607 void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci,
608 struct amd64_error_info_regs *info,
609 u64 SystemAddr);
610 int (*dbam_map_to_pages)(struct amd64_pvt *pvt, int dram_map);
611};
612
613struct amd64_family_type {
614 const char *ctl_name;
615 u16 addr_f1_ctl;
616 u16 misc_f3_ctl;
617 struct low_ops ops;
618};
619
620static struct amd64_family_type amd64_family_types[];
621
622static inline const char *get_amd_family_name(int index)
623{
624 return amd64_family_types[index].ctl_name;
625}
626
627static inline struct low_ops *family_ops(int index)
628{
629 return &amd64_family_types[index].ops;
630}
631
632/*
633 * For future CPU versions, verify the following as new 'slow' rates appear and
634 * modify the necessary skip values for the supported CPU.
635 */
636#define K8_MIN_SCRUB_RATE_BITS 0x0
637#define F10_MIN_SCRUB_RATE_BITS 0x5
638#define F11_MIN_SCRUB_RATE_BITS 0x6
639
640int amd64_process_error_info(struct mem_ctl_info *mci,
641 struct amd64_error_info_regs *info,
642 int handle_errors);
643int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
644 u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c
new file mode 100644
index 00000000000..0a41b248a4a
--- /dev/null
+++ b/drivers/edac/amd64_edac_dbg.c
@@ -0,0 +1,255 @@
1#include "amd64_edac.h"
2
3/*
4 * accept a hex value and store it into the virtual error register file, field:
5 * nbeal and nbeah. Assume virtual error values have already been set for: NBSL,
6 * NBSH and NBCFG. Then proceed to map the error values to a MC, CSROW and
7 * CHANNEL
8 */
9static ssize_t amd64_nbea_store(struct mem_ctl_info *mci, const char *data,
10 size_t count)
11{
12 struct amd64_pvt *pvt = mci->pvt_info;
13 unsigned long long value;
14 int ret = 0;
15
16 ret = strict_strtoull(data, 16, &value);
17 if (ret != -EINVAL) {
18 debugf0("received NBEA= 0x%llx\n", value);
19
20 /* place the value into the virtual error packet */
21 pvt->ctl_error_info.nbeal = (u32) value;
22 value >>= 32;
23 pvt->ctl_error_info.nbeah = (u32) value;
24
25 /* Process the Mapping request */
26 /* TODO: Add race prevention */
27 amd64_process_error_info(mci, &pvt->ctl_error_info, 1);
28
29 return count;
30 }
31 return ret;
32}
33
34/* display back what the last NBEA (MCA NB Address (MC4_ADDR)) was written */
35static ssize_t amd64_nbea_show(struct mem_ctl_info *mci, char *data)
36{
37 struct amd64_pvt *pvt = mci->pvt_info;
38 u64 value;
39
40 value = pvt->ctl_error_info.nbeah;
41 value <<= 32;
42 value |= pvt->ctl_error_info.nbeal;
43
44 return sprintf(data, "%llx\n", value);
45}
46
47/* store the NBSL (MCA NB Status Low (MC4_STATUS)) value user desires */
48static ssize_t amd64_nbsl_store(struct mem_ctl_info *mci, const char *data,
49 size_t count)
50{
51 struct amd64_pvt *pvt = mci->pvt_info;
52 unsigned long value;
53 int ret = 0;
54
55 ret = strict_strtoul(data, 16, &value);
56 if (ret != -EINVAL) {
57 debugf0("received NBSL= 0x%lx\n", value);
58
59 pvt->ctl_error_info.nbsl = (u32) value;
60
61 return count;
62 }
63 return ret;
64}
65
66/* display back what the last NBSL value written */
67static ssize_t amd64_nbsl_show(struct mem_ctl_info *mci, char *data)
68{
69 struct amd64_pvt *pvt = mci->pvt_info;
70 u32 value;
71
72 value = pvt->ctl_error_info.nbsl;
73
74 return sprintf(data, "%x\n", value);
75}
76
77/* store the NBSH (MCA NB Status High) value user desires */
78static ssize_t amd64_nbsh_store(struct mem_ctl_info *mci, const char *data,
79 size_t count)
80{
81 struct amd64_pvt *pvt = mci->pvt_info;
82 unsigned long value;
83 int ret = 0;
84
85 ret = strict_strtoul(data, 16, &value);
86 if (ret != -EINVAL) {
87 debugf0("received NBSH= 0x%lx\n", value);
88
89 pvt->ctl_error_info.nbsh = (u32) value;
90
91 return count;
92 }
93 return ret;
94}
95
96/* display back what the last NBSH value written */
97static ssize_t amd64_nbsh_show(struct mem_ctl_info *mci, char *data)
98{
99 struct amd64_pvt *pvt = mci->pvt_info;
100 u32 value;
101
102 value = pvt->ctl_error_info.nbsh;
103
104 return sprintf(data, "%x\n", value);
105}
106
107/* accept and store the NBCFG (MCA NB Configuration) value user desires */
108static ssize_t amd64_nbcfg_store(struct mem_ctl_info *mci,
109 const char *data, size_t count)
110{
111 struct amd64_pvt *pvt = mci->pvt_info;
112 unsigned long value;
113 int ret = 0;
114
115 ret = strict_strtoul(data, 16, &value);
116 if (ret != -EINVAL) {
117 debugf0("received NBCFG= 0x%lx\n", value);
118
119 pvt->ctl_error_info.nbcfg = (u32) value;
120
121 return count;
122 }
123 return ret;
124}
125
126/* various show routines for the controls of a MCI */
127static ssize_t amd64_nbcfg_show(struct mem_ctl_info *mci, char *data)
128{
129 struct amd64_pvt *pvt = mci->pvt_info;
130
131 return sprintf(data, "%x\n", pvt->ctl_error_info.nbcfg);
132}
133
134
135static ssize_t amd64_dhar_show(struct mem_ctl_info *mci, char *data)
136{
137 struct amd64_pvt *pvt = mci->pvt_info;
138
139 return sprintf(data, "%x\n", pvt->dhar);
140}
141
142
143static ssize_t amd64_dbam_show(struct mem_ctl_info *mci, char *data)
144{
145 struct amd64_pvt *pvt = mci->pvt_info;
146
147 return sprintf(data, "%x\n", pvt->dbam0);
148}
149
150
151static ssize_t amd64_topmem_show(struct mem_ctl_info *mci, char *data)
152{
153 struct amd64_pvt *pvt = mci->pvt_info;
154
155 return sprintf(data, "%llx\n", pvt->top_mem);
156}
157
158
159static ssize_t amd64_topmem2_show(struct mem_ctl_info *mci, char *data)
160{
161 struct amd64_pvt *pvt = mci->pvt_info;
162
163 return sprintf(data, "%llx\n", pvt->top_mem2);
164}
165
166static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
167{
168 u64 hole_base = 0;
169 u64 hole_offset = 0;
170 u64 hole_size = 0;
171
172 amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
173
174 return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
175 hole_size);
176}
177
178/*
179 * update NUM_DBG_ATTRS in case you add new members
180 */
181struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
182
183 {
184 .attr = {
185 .name = "nbea_ctl",
186 .mode = (S_IRUGO | S_IWUSR)
187 },
188 .show = amd64_nbea_show,
189 .store = amd64_nbea_store,
190 },
191 {
192 .attr = {
193 .name = "nbsl_ctl",
194 .mode = (S_IRUGO | S_IWUSR)
195 },
196 .show = amd64_nbsl_show,
197 .store = amd64_nbsl_store,
198 },
199 {
200 .attr = {
201 .name = "nbsh_ctl",
202 .mode = (S_IRUGO | S_IWUSR)
203 },
204 .show = amd64_nbsh_show,
205 .store = amd64_nbsh_store,
206 },
207 {
208 .attr = {
209 .name = "nbcfg_ctl",
210 .mode = (S_IRUGO | S_IWUSR)
211 },
212 .show = amd64_nbcfg_show,
213 .store = amd64_nbcfg_store,
214 },
215 {
216 .attr = {
217 .name = "dhar",
218 .mode = (S_IRUGO)
219 },
220 .show = amd64_dhar_show,
221 .store = NULL,
222 },
223 {
224 .attr = {
225 .name = "dbam",
226 .mode = (S_IRUGO)
227 },
228 .show = amd64_dbam_show,
229 .store = NULL,
230 },
231 {
232 .attr = {
233 .name = "topmem",
234 .mode = (S_IRUGO)
235 },
236 .show = amd64_topmem_show,
237 .store = NULL,
238 },
239 {
240 .attr = {
241 .name = "topmem2",
242 .mode = (S_IRUGO)
243 },
244 .show = amd64_topmem2_show,
245 .store = NULL,
246 },
247 {
248 .attr = {
249 .name = "dram_hole",
250 .mode = (S_IRUGO)
251 },
252 .show = amd64_hole_show,
253 .store = NULL,
254 },
255};
diff --git a/drivers/edac/amd64_edac_err_types.c b/drivers/edac/amd64_edac_err_types.c
new file mode 100644
index 00000000000..f212ff12a9d
--- /dev/null
+++ b/drivers/edac/amd64_edac_err_types.c
@@ -0,0 +1,161 @@
1#include "amd64_edac.h"
2
3/*
4 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
5 * for DDR2 DRAM mapping.
6 */
7u32 revf_quad_ddr2_shift[] = {
8 0, /* 0000b NULL DIMM (128mb) */
9 28, /* 0001b 256mb */
10 29, /* 0010b 512mb */
11 29, /* 0011b 512mb */
12 29, /* 0100b 512mb */
13 30, /* 0101b 1gb */
14 30, /* 0110b 1gb */
15 31, /* 0111b 2gb */
16 31, /* 1000b 2gb */
17 32, /* 1001b 4gb */
18 32, /* 1010b 4gb */
19 33, /* 1011b 8gb */
20 0, /* 1100b future */
21 0, /* 1101b future */
22 0, /* 1110b future */
23 0 /* 1111b future */
24};
25
26/*
27 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
28 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
29 * or higher value'.
30 *
31 *FIXME: Produce a better mapping/linearisation.
32 */
33
34struct scrubrate scrubrates[] = {
35 { 0x01, 1600000000UL},
36 { 0x02, 800000000UL},
37 { 0x03, 400000000UL},
38 { 0x04, 200000000UL},
39 { 0x05, 100000000UL},
40 { 0x06, 50000000UL},
41 { 0x07, 25000000UL},
42 { 0x08, 12284069UL},
43 { 0x09, 6274509UL},
44 { 0x0A, 3121951UL},
45 { 0x0B, 1560975UL},
46 { 0x0C, 781440UL},
47 { 0x0D, 390720UL},
48 { 0x0E, 195300UL},
49 { 0x0F, 97650UL},
50 { 0x10, 48854UL},
51 { 0x11, 24427UL},
52 { 0x12, 12213UL},
53 { 0x13, 6101UL},
54 { 0x14, 3051UL},
55 { 0x15, 1523UL},
56 { 0x16, 761UL},
57 { 0x00, 0UL}, /* scrubbing off */
58};
59
60/*
61 * string representation for the different MCA reported error types, see F3x48
62 * or MSR0000_0411.
63 */
64const char *tt_msgs[] = { /* transaction type */
65 "instruction",
66 "data",
67 "generic",
68 "reserved"
69};
70
71const char *ll_msgs[] = { /* cache level */
72 "L0",
73 "L1",
74 "L2",
75 "L3/generic"
76};
77
78const char *rrrr_msgs[] = {
79 "generic",
80 "generic read",
81 "generic write",
82 "data read",
83 "data write",
84 "inst fetch",
85 "prefetch",
86 "evict",
87 "snoop",
88 "reserved RRRR= 9",
89 "reserved RRRR= 10",
90 "reserved RRRR= 11",
91 "reserved RRRR= 12",
92 "reserved RRRR= 13",
93 "reserved RRRR= 14",
94 "reserved RRRR= 15"
95};
96
97const char *pp_msgs[] = { /* participating processor */
98 "local node originated (SRC)",
99 "local node responded to request (RES)",
100 "local node observed as 3rd party (OBS)",
101 "generic"
102};
103
104const char *to_msgs[] = {
105 "no timeout",
106 "timed out"
107};
108
109const char *ii_msgs[] = { /* memory or i/o */
110 "mem access",
111 "reserved",
112 "i/o access",
113 "generic"
114};
115
116/* Map the 5 bits of Extended Error code to the string table. */
117const char *ext_msgs[] = { /* extended error */
118 "K8 ECC error/F10 reserved", /* 0_0000b */
119 "CRC error", /* 0_0001b */
120 "sync error", /* 0_0010b */
121 "mst abort", /* 0_0011b */
122 "tgt abort", /* 0_0100b */
123 "GART error", /* 0_0101b */
124 "RMW error", /* 0_0110b */
125 "Wdog timer error", /* 0_0111b */
126 "F10-ECC/K8-Chipkill error", /* 0_1000b */
127 "DEV Error", /* 0_1001b */
128 "Link Data error", /* 0_1010b */
129 "Link or L3 Protocol error", /* 0_1011b */
130 "NB Array error", /* 0_1100b */
131 "DRAM Parity error", /* 0_1101b */
132 "Link Retry/GART Table Walk/DEV Table Walk error", /* 0_1110b */
133 "Res 0x0ff error", /* 0_1111b */
134 "Res 0x100 error", /* 1_0000b */
135 "Res 0x101 error", /* 1_0001b */
136 "Res 0x102 error", /* 1_0010b */
137 "Res 0x103 error", /* 1_0011b */
138 "Res 0x104 error", /* 1_0100b */
139 "Res 0x105 error", /* 1_0101b */
140 "Res 0x106 error", /* 1_0110b */
141 "Res 0x107 error", /* 1_0111b */
142 "Res 0x108 error", /* 1_1000b */
143 "Res 0x109 error", /* 1_1001b */
144 "Res 0x10A error", /* 1_1010b */
145 "Res 0x10B error", /* 1_1011b */
146 "L3 Cache Data error", /* 1_1100b */
147 "L3 CacheTag error", /* 1_1101b */
148 "L3 Cache LRU error", /* 1_1110b */
149 "Res 0x1FF error" /* 1_1111b */
150};
151
152const char *htlink_msgs[] = {
153 "none",
154 "1",
155 "2",
156 "1 2",
157 "3",
158 "1 3",
159 "2 3",
160 "1 2 3"
161};
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
new file mode 100644
index 00000000000..d3675b76b3a
--- /dev/null
+++ b/drivers/edac/amd64_edac_inj.c
@@ -0,0 +1,185 @@
1#include "amd64_edac.h"
2
3/*
4 * store error injection section value which refers to one of 4 16-byte sections
5 * within a 64-byte cacheline
6 *
7 * range: 0..3
8 */
9static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
10 const char *data, size_t count)
11{
12 struct amd64_pvt *pvt = mci->pvt_info;
13 unsigned long value;
14 int ret = 0;
15
16 ret = strict_strtoul(data, 10, &value);
17 if (ret != -EINVAL) {
18 pvt->injection.section = (u32) value;
19 return count;
20 }
21 return ret;
22}
23
24/*
25 * store error injection word value which refers to one of 9 16-bit word of the
26 * 16-byte (128-bit + ECC bits) section
27 *
28 * range: 0..8
29 */
30static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
31 const char *data, size_t count)
32{
33 struct amd64_pvt *pvt = mci->pvt_info;
34 unsigned long value;
35 int ret = 0;
36
37 ret = strict_strtoul(data, 10, &value);
38 if (ret != -EINVAL) {
39
40 value = (value <= 8) ? value : 0;
41 pvt->injection.word = (u32) value;
42
43 return count;
44 }
45 return ret;
46}
47
48/*
49 * store 16 bit error injection vector which enables injecting errors to the
50 * corresponding bit within the error injection word above. When used during a
51 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
52 */
53static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
54 const char *data, size_t count)
55{
56 struct amd64_pvt *pvt = mci->pvt_info;
57 unsigned long value;
58 int ret = 0;
59
60 ret = strict_strtoul(data, 16, &value);
61 if (ret != -EINVAL) {
62
63 pvt->injection.bit_map = (u32) value & 0xFFFF;
64
65 return count;
66 }
67 return ret;
68}
69
70/*
71 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
72 * fields needed by the injection registers and read the NB Array Data Port.
73 */
74static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
75 const char *data, size_t count)
76{
77 struct amd64_pvt *pvt = mci->pvt_info;
78 unsigned long value;
79 u32 section, word_bits;
80 int ret = 0;
81
82 ret = strict_strtoul(data, 10, &value);
83 if (ret != -EINVAL) {
84
85 /* Form value to choose 16-byte section of cacheline */
86 section = F10_NB_ARRAY_DRAM_ECC |
87 SET_NB_ARRAY_ADDRESS(pvt->injection.section);
88 pci_write_config_dword(pvt->misc_f3_ctl,
89 F10_NB_ARRAY_ADDR, section);
90
91 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
92 pvt->injection.bit_map);
93
94 /* Issue 'word' and 'bit' along with the READ request */
95 pci_write_config_dword(pvt->misc_f3_ctl,
96 F10_NB_ARRAY_DATA, word_bits);
97
98 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
99
100 return count;
101 }
102 return ret;
103}
104
105/*
106 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
107 * fields needed by the injection registers.
108 */
109static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
110 const char *data, size_t count)
111{
112 struct amd64_pvt *pvt = mci->pvt_info;
113 unsigned long value;
114 u32 section, word_bits;
115 int ret = 0;
116
117 ret = strict_strtoul(data, 10, &value);
118 if (ret != -EINVAL) {
119
120 /* Form value to choose 16-byte section of cacheline */
121 section = F10_NB_ARRAY_DRAM_ECC |
122 SET_NB_ARRAY_ADDRESS(pvt->injection.section);
123 pci_write_config_dword(pvt->misc_f3_ctl,
124 F10_NB_ARRAY_ADDR, section);
125
126 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
127 pvt->injection.bit_map);
128
129 /* Issue 'word' and 'bit' along with the READ request */
130 pci_write_config_dword(pvt->misc_f3_ctl,
131 F10_NB_ARRAY_DATA, word_bits);
132
133 debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
134
135 return count;
136 }
137 return ret;
138}
139
140/*
141 * update NUM_INJ_ATTRS in case you add new members
142 */
143struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
144
145 {
146 .attr = {
147 .name = "inject_section",
148 .mode = (S_IRUGO | S_IWUSR)
149 },
150 .show = NULL,
151 .store = amd64_inject_section_store,
152 },
153 {
154 .attr = {
155 .name = "inject_word",
156 .mode = (S_IRUGO | S_IWUSR)
157 },
158 .show = NULL,
159 .store = amd64_inject_word_store,
160 },
161 {
162 .attr = {
163 .name = "inject_ecc_vector",
164 .mode = (S_IRUGO | S_IWUSR)
165 },
166 .show = NULL,
167 .store = amd64_inject_ecc_vector_store,
168 },
169 {
170 .attr = {
171 .name = "inject_write",
172 .mode = (S_IRUGO | S_IWUSR)
173 },
174 .show = NULL,
175 .store = amd64_inject_write_store,
176 },
177 {
178 .attr = {
179 .name = "inject_read",
180 .mode = (S_IRUGO | S_IWUSR)
181 },
182 .show = NULL,
183 .store = amd64_inject_read_store,
184 },
185};
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 61469218112..2cb58ef743e 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -389,7 +389,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
389 dev_info->edac_dev->dev = &dev_info->dev->dev; 389 dev_info->edac_dev->dev = &dev_info->dev->dev;
390 dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR; 390 dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
391 dev_info->edac_dev->ctl_name = dev_info->ctl_name; 391 dev_info->edac_dev->ctl_name = dev_info->ctl_name;
392 dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id; 392 dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
393 393
394 if (edac_op_state == EDAC_OPSTATE_POLL) 394 if (edac_op_state == EDAC_OPSTATE_POLL)
395 dev_info->edac_dev->edac_check = dev_info->check; 395 dev_info->edac_dev->edac_check = dev_info->check;
@@ -473,7 +473,7 @@ static int amd8111_pci_probe(struct pci_dev *dev,
473 pci_info->edac_dev->dev = &pci_info->dev->dev; 473 pci_info->edac_dev->dev = &pci_info->dev->dev;
474 pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR; 474 pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
475 pci_info->edac_dev->ctl_name = pci_info->ctl_name; 475 pci_info->edac_dev->ctl_name = pci_info->ctl_name;
476 pci_info->edac_dev->dev_name = pci_info->dev->dev.bus_id; 476 pci_info->edac_dev->dev_name = dev_name(&pci_info->dev->dev);
477 477
478 if (edac_op_state == EDAC_OPSTATE_POLL) 478 if (edac_op_state == EDAC_OPSTATE_POLL)
479 pci_info->edac_dev->edac_check = pci_info->check; 479 pci_info->edac_dev->edac_check = pci_info->check;
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index c083b31cac5..b432d60c622 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -287,7 +287,7 @@ static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
287 dev_info->edac_dev->dev = &dev_info->dev->dev; 287 dev_info->edac_dev->dev = &dev_info->dev->dev;
288 dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR; 288 dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
289 dev_info->edac_dev->ctl_name = dev_info->ctl_name; 289 dev_info->edac_dev->ctl_name = dev_info->ctl_name;
290 dev_info->edac_dev->dev_name = dev_info->dev->dev.bus_id; 290 dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
291 291
292 if (edac_op_state == EDAC_OPSTATE_POLL) 292 if (edac_op_state == EDAC_OPSTATE_POLL)
293 dev_info->edac_dev->edac_check = amd8131_chipset.check; 293 dev_info->edac_dev->edac_check = amd8131_chipset.check;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index facfdb1fa71..d205d493a68 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1084,7 +1084,7 @@ static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
1084 struct pci_dev *dev = pvt->dev_d0f1; 1084 struct pci_dev *dev = pvt->dev_d0f1;
1085 int enable = 1; 1085 int enable = 1;
1086 1086
1087 /* Allow module paramter override, else see if CPU supports parity */ 1087 /* Allow module parameter override, else see if CPU supports parity */
1088 if (sysbus_parity != -1) { 1088 if (sysbus_parity != -1) {
1089 enable = sysbus_parity; 1089 enable = sysbus_parity;
1090 } else if (cpu_id[0] && 1090 } else if (cpu_id[0] &&
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 6ad95c8d636..48d3b140983 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -76,10 +76,11 @@
76extern int edac_debug_level; 76extern int edac_debug_level;
77 77
78#ifndef CONFIG_EDAC_DEBUG_VERBOSE 78#ifndef CONFIG_EDAC_DEBUG_VERBOSE
79#define edac_debug_printk(level, fmt, arg...) \ 79#define edac_debug_printk(level, fmt, arg...) \
80 do { \ 80 do { \
81 if (level <= edac_debug_level) \ 81 if (level <= edac_debug_level) \
82 edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \ 82 edac_printk(KERN_DEBUG, EDAC_DEBUG, \
83 "%s: " fmt, __func__, ##arg); \
83 } while (0) 84 } while (0)
84#else /* CONFIG_EDAC_DEBUG_VERBOSE */ 85#else /* CONFIG_EDAC_DEBUG_VERBOSE */
85#define edac_debug_printk(level, fmt, arg...) \ 86#define edac_debug_printk(level, fmt, arg...) \
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 5f1b5400d96..24c84ae8152 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -596,6 +596,7 @@ int dmi_get_year(int field)
596 596
597 return year; 597 return year;
598} 598}
599EXPORT_SYMBOL(dmi_get_year);
599 600
600/** 601/**
601 * dmi_walk - Walk the DMI table and get called back for every record 602 * dmi_walk - Walk the DMI table and get called back for every record
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index edb02530e46..11f373971fa 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -69,7 +69,7 @@ comment "Memory mapped GPIO expanders:"
69 69
70config GPIO_XILINX 70config GPIO_XILINX
71 bool "Xilinx GPIO support" 71 bool "Xilinx GPIO support"
72 depends on PPC_OF 72 depends on PPC_OF || MICROBLAZE
73 help 73 help
74 Say yes here to support the Xilinx FPGA GPIO device 74 Say yes here to support the Xilinx FPGA GPIO device
75 75
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 4cd35d8fd79..f5d46e7199d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -67,12 +67,18 @@ config DRM_I830
67 will load the correct one. 67 will load the correct one.
68 68
69config DRM_I915 69config DRM_I915
70 tristate "i915 driver"
70 select FB_CFB_FILLRECT 71 select FB_CFB_FILLRECT
71 select FB_CFB_COPYAREA 72 select FB_CFB_COPYAREA
72 select FB_CFB_IMAGEBLIT 73 select FB_CFB_IMAGEBLIT
73 select FB 74 select FB
74 select FRAMEBUFFER_CONSOLE if !EMBEDDED 75 select FRAMEBUFFER_CONSOLE if !EMBEDDED
75 tristate "i915 driver" 76 # i915 depends on ACPI_VIDEO when ACPI is enabled
77 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
78 select VIDEO_OUTPUT_CONTROL if ACPI
79 select BACKLIGHT_CLASS_DEVICE if ACPI
80 select INPUT if ACPI
81 select ACPI_VIDEO if ACPI
76 help 82 help
77 Choose this option if you have a system that has Intel 830M, 845G, 83 Choose this option if you have a system that has Intel 830M, 845G,
78 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the 84 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
@@ -84,12 +90,6 @@ config DRM_I915
84config DRM_I915_KMS 90config DRM_I915_KMS
85 bool "Enable modesetting on intel by default" 91 bool "Enable modesetting on intel by default"
86 depends on DRM_I915 92 depends on DRM_I915
87 # i915 KMS depends on ACPI_VIDEO when ACPI is enabled
88 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
89 select VIDEO_OUTPUT_CONTROL if ACPI
90 select BACKLIGHT_CLASS_DEVICE if ACPI
91 select INPUT if ACPI
92 select ACPI_VIDEO if ACPI
93 help 93 help
94 Choose this option if you want kernel modesetting enabled by default, 94 Choose this option if you want kernel modesetting enabled by default,
95 and you have a new enough userspace to support this. Running old 95 and you have a new enough userspace to support this. Running old
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 6d80d17f1e9..80a257554b3 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -170,6 +170,14 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
170 } 170 }
171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
172 (unsigned long long)map->offset, map->size, map->type); 172 (unsigned long long)map->offset, map->size, map->type);
173
174 /* page-align _DRM_SHM maps. They are allocated here so there is no security
175 * hole created by that and it works around various broken drivers that use
176 * a non-aligned quantity to map the SAREA. --BenH
177 */
178 if (map->type == _DRM_SHM)
179 map->size = PAGE_ALIGN(map->size);
180
173 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 181 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
174 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 182 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
175 return -EINVAL; 183 return -EINVAL;
@@ -363,7 +371,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
363 list->user_token = list->hash.key << PAGE_SHIFT; 371 list->user_token = list->hash.key << PAGE_SHIFT;
364 mutex_unlock(&dev->struct_mutex); 372 mutex_unlock(&dev->struct_mutex);
365 373
366 list->master = dev->primary->master; 374 if (!(map->flags & _DRM_DRIVER))
375 list->master = dev->primary->master;
367 *maplist = list; 376 *maplist = list;
368 return 0; 377 return 0;
369 } 378 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 94a76887173..8fab7890a36 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2294,7 +2294,12 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
2294 } 2294 }
2295 } 2295 }
2296 2296
2297 if (connector->funcs->set_property) 2297 /* Do DPMS ourselves */
2298 if (property == connector->dev->mode_config.dpms_property) {
2299 if (connector->funcs->dpms)
2300 (*connector->funcs->dpms)(connector, (int) out_resp->value);
2301 ret = 0;
2302 } else if (connector->funcs->set_property)
2298 ret = connector->funcs->set_property(connector, property, out_resp->value); 2303 ret = connector->funcs->set_property(connector, property, out_resp->value);
2299 2304
2300 /* store the property value if succesful */ 2305 /* store the property value if succesful */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 45890447fee..a6f73f1e99d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -199,6 +199,29 @@ static void drm_helper_add_std_modes(struct drm_device *dev,
199} 199}
200 200
201/** 201/**
202 * drm_helper_encoder_in_use - check if a given encoder is in use
203 * @encoder: encoder to check
204 *
205 * LOCKING:
206 * Caller must hold mode config lock.
207 *
208 * Walk @encoders's DRM device's mode_config and see if it's in use.
209 *
210 * RETURNS:
211 * True if @encoder is part of the mode_config, false otherwise.
212 */
213bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
214{
215 struct drm_connector *connector;
216 struct drm_device *dev = encoder->dev;
217 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
218 if (connector->encoder == encoder)
219 return true;
220 return false;
221}
222EXPORT_SYMBOL(drm_helper_encoder_in_use);
223
224/**
202 * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config 225 * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
203 * @crtc: CRTC to check 226 * @crtc: CRTC to check
204 * 227 *
@@ -216,7 +239,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
216 struct drm_device *dev = crtc->dev; 239 struct drm_device *dev = crtc->dev;
217 /* FIXME: Locking around list access? */ 240 /* FIXME: Locking around list access? */
218 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 241 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
219 if (encoder->crtc == crtc) 242 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
220 return true; 243 return true;
221 return false; 244 return false;
222} 245}
@@ -240,7 +263,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
240 263
241 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 264 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
242 encoder_funcs = encoder->helper_private; 265 encoder_funcs = encoder->helper_private;
243 if (!encoder->crtc) 266 if (!drm_helper_encoder_in_use(encoder))
244 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); 267 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
245 } 268 }
246 269
@@ -935,6 +958,88 @@ bool drm_helper_initial_config(struct drm_device *dev)
935} 958}
936EXPORT_SYMBOL(drm_helper_initial_config); 959EXPORT_SYMBOL(drm_helper_initial_config);
937 960
961static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
962{
963 int dpms = DRM_MODE_DPMS_OFF;
964 struct drm_connector *connector;
965 struct drm_device *dev = encoder->dev;
966
967 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
968 if (connector->encoder == encoder)
969 if (connector->dpms < dpms)
970 dpms = connector->dpms;
971 return dpms;
972}
973
974static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
975{
976 int dpms = DRM_MODE_DPMS_OFF;
977 struct drm_connector *connector;
978 struct drm_device *dev = crtc->dev;
979
980 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
981 if (connector->encoder && connector->encoder->crtc == crtc)
982 if (connector->dpms < dpms)
983 dpms = connector->dpms;
984 return dpms;
985}
986
987/**
988 * drm_helper_connector_dpms
989 * @connector affected connector
990 * @mode DPMS mode
991 *
992 * Calls the low-level connector DPMS function, then
993 * calls appropriate encoder and crtc DPMS functions as well
994 */
995void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
996{
997 struct drm_encoder *encoder = connector->encoder;
998 struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
999 int old_dpms;
1000
1001 if (mode == connector->dpms)
1002 return;
1003
1004 old_dpms = connector->dpms;
1005 connector->dpms = mode;
1006
1007 /* from off to on, do crtc then encoder */
1008 if (mode < old_dpms) {
1009 if (crtc) {
1010 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1011 if (crtc_funcs->dpms)
1012 (*crtc_funcs->dpms) (crtc,
1013 drm_helper_choose_crtc_dpms(crtc));
1014 }
1015 if (encoder) {
1016 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
1017 if (encoder_funcs->dpms)
1018 (*encoder_funcs->dpms) (encoder,
1019 drm_helper_choose_encoder_dpms(encoder));
1020 }
1021 }
1022
1023 /* from on to off, do encoder then crtc */
1024 if (mode > old_dpms) {
1025 if (encoder) {
1026 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
1027 if (encoder_funcs->dpms)
1028 (*encoder_funcs->dpms) (encoder,
1029 drm_helper_choose_encoder_dpms(encoder));
1030 }
1031 if (crtc) {
1032 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1033 if (crtc_funcs->dpms)
1034 (*crtc_funcs->dpms) (crtc,
1035 drm_helper_choose_crtc_dpms(crtc));
1036 }
1037 }
1038
1039 return;
1040}
1041EXPORT_SYMBOL(drm_helper_connector_dpms);
1042
938/** 1043/**
939 * drm_hotplug_stage_two 1044 * drm_hotplug_stage_two
940 * @dev DRM device 1045 * @dev DRM device
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index f01def16a66..019b7c57823 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -481,7 +481,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
481 } 481 }
482 retcode = func(dev, kdata, file_priv); 482 retcode = func(dev, kdata, file_priv);
483 483
484 if ((retcode == 0) && (cmd & IOC_OUT)) { 484 if (cmd & IOC_OUT) {
485 if (copy_to_user((void __user *)arg, kdata, 485 if (copy_to_user((void __user *)arg, kdata,
486 _IOC_SIZE(cmd)) != 0) 486 _IOC_SIZE(cmd)) != 0)
487 retcode = -EFAULT; 487 retcode = -EFAULT;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ca9c6165671..801a0d0e081 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -289,6 +289,11 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
289 struct drm_display_mode *mode; 289 struct drm_display_mode *mode;
290 struct detailed_pixel_timing *pt = &timing->data.pixel_data; 290 struct detailed_pixel_timing *pt = &timing->data.pixel_data;
291 291
292 /* ignore tiny modes */
293 if (((pt->hactive_hi << 8) | pt->hactive_lo) < 64 ||
294 ((pt->vactive_hi << 8) | pt->hactive_lo) < 64)
295 return NULL;
296
292 if (pt->stereo) { 297 if (pt->stereo) {
293 printk(KERN_WARNING "stereo mode not supported\n"); 298 printk(KERN_WARNING "stereo mode not supported\n");
294 return NULL; 299 return NULL;
@@ -584,85 +589,13 @@ int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
584} 589}
585EXPORT_SYMBOL(drm_do_probe_ddc_edid); 590EXPORT_SYMBOL(drm_do_probe_ddc_edid);
586 591
587/**
588 * Get EDID information.
589 *
590 * \param adapter : i2c device adaptor.
591 * \param buf : EDID data buffer to be filled
592 * \param len : EDID data buffer length
593 * \return 0 on success or -1 on failure.
594 *
595 * Initialize DDC, then fetch EDID information
596 * by calling drm_do_probe_ddc_edid function.
597 */
598static int drm_ddc_read(struct i2c_adapter *adapter,
599 unsigned char *buf, int len)
600{
601 struct i2c_algo_bit_data *algo_data = adapter->algo_data;
602 int i, j;
603 int ret = -1;
604
605 algo_data->setscl(algo_data->data, 1);
606
607 for (i = 0; i < 1; i++) {
608 /* For some old monitors we need the
609 * following process to initialize/stop DDC
610 */
611 algo_data->setsda(algo_data->data, 1);
612 msleep(13);
613
614 algo_data->setscl(algo_data->data, 1);
615 for (j = 0; j < 5; j++) {
616 msleep(10);
617 if (algo_data->getscl(algo_data->data))
618 break;
619 }
620 if (j == 5)
621 continue;
622
623 algo_data->setsda(algo_data->data, 0);
624 msleep(15);
625 algo_data->setscl(algo_data->data, 0);
626 msleep(15);
627 algo_data->setsda(algo_data->data, 1);
628 msleep(15);
629
630 /* Do the real work */
631 ret = drm_do_probe_ddc_edid(adapter, buf, len);
632 algo_data->setsda(algo_data->data, 0);
633 algo_data->setscl(algo_data->data, 0);
634 msleep(15);
635
636 algo_data->setscl(algo_data->data, 1);
637 for (j = 0; j < 10; j++) {
638 msleep(10);
639 if (algo_data->getscl(algo_data->data))
640 break;
641 }
642
643 algo_data->setsda(algo_data->data, 1);
644 msleep(15);
645 algo_data->setscl(algo_data->data, 0);
646 algo_data->setsda(algo_data->data, 0);
647 if (ret == 0)
648 break;
649 }
650 /* Release the DDC lines when done or the Apple Cinema HD display
651 * will switch off
652 */
653 algo_data->setsda(algo_data->data, 1);
654 algo_data->setscl(algo_data->data, 1);
655
656 return ret;
657}
658
659static int drm_ddc_read_edid(struct drm_connector *connector, 592static int drm_ddc_read_edid(struct drm_connector *connector,
660 struct i2c_adapter *adapter, 593 struct i2c_adapter *adapter,
661 char *buf, int len) 594 char *buf, int len)
662{ 595{
663 int ret; 596 int ret;
664 597
665 ret = drm_ddc_read(adapter, buf, len); 598 ret = drm_do_probe_ddc_edid(adapter, buf, len);
666 if (ret != 0) { 599 if (ret != 0) {
667 dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n", 600 dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
668 drm_get_connector_name(connector)); 601 drm_get_connector_name(connector));
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4984aa89cf3..ec43005100d 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -133,7 +133,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
133 133
134 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 134 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
135 135
136 obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); 136 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
137 137
138 obj->dev = dev; 138 obj->dev = dev;
139 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 139 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index af539f7d87d..ac35145c3e2 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -62,6 +62,7 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
62 } 62 }
63 return 0; 63 return 0;
64} 64}
65EXPORT_SYMBOL(drm_ht_create);
65 66
66void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) 67void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
67{ 68{
@@ -156,6 +157,7 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it
156 } 157 }
157 return 0; 158 return 0;
158} 159}
160EXPORT_SYMBOL(drm_ht_just_insert_please);
159 161
160int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, 162int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
161 struct drm_hash_item **item) 163 struct drm_hash_item **item)
@@ -169,6 +171,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
169 *item = hlist_entry(list, struct drm_hash_item, head); 171 *item = hlist_entry(list, struct drm_hash_item, head);
170 return 0; 172 return 0;
171} 173}
174EXPORT_SYMBOL(drm_ht_find_item);
172 175
173int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) 176int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
174{ 177{
@@ -202,3 +205,4 @@ void drm_ht_remove(struct drm_open_hash *ht)
202 ht->table = NULL; 205 ht->table = NULL;
203 } 206 }
204} 207}
208EXPORT_SYMBOL(drm_ht_remove);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 93e677a481f..fc8e5acd9d9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -196,6 +196,7 @@ int drm_irq_install(struct drm_device *dev)
196{ 196{
197 int ret = 0; 197 int ret = 0;
198 unsigned long sh_flags = 0; 198 unsigned long sh_flags = 0;
199 char *irqname;
199 200
200 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
201 return -EINVAL; 202 return -EINVAL;
@@ -227,8 +228,13 @@ int drm_irq_install(struct drm_device *dev)
227 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) 228 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
228 sh_flags = IRQF_SHARED; 229 sh_flags = IRQF_SHARED;
229 230
231 if (dev->devname)
232 irqname = dev->devname;
233 else
234 irqname = dev->driver->name;
235
230 ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler, 236 ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
231 sh_flags, dev->devname, dev); 237 sh_flags, irqname, dev);
232 238
233 if (ret < 0) { 239 if (ret < 0) {
234 mutex_lock(&dev->struct_mutex); 240 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 367c590ffbb..7819fd930a5 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -42,8 +42,11 @@
42 */ 42 */
43 43
44#include "drmP.h" 44#include "drmP.h"
45#include "drm_mm.h"
45#include <linux/slab.h> 46#include <linux/slab.h>
46 47
48#define MM_UNUSED_TARGET 4
49
47unsigned long drm_mm_tail_space(struct drm_mm *mm) 50unsigned long drm_mm_tail_space(struct drm_mm *mm)
48{ 51{
49 struct list_head *tail_node; 52 struct list_head *tail_node;
@@ -74,16 +77,62 @@ int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
74 return 0; 77 return 0;
75} 78}
76 79
80static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
81{
82 struct drm_mm_node *child;
83
84 if (atomic)
85 child = kmalloc(sizeof(*child), GFP_ATOMIC);
86 else
87 child = kmalloc(sizeof(*child), GFP_KERNEL);
88
89 if (unlikely(child == NULL)) {
90 spin_lock(&mm->unused_lock);
91 if (list_empty(&mm->unused_nodes))
92 child = NULL;
93 else {
94 child =
95 list_entry(mm->unused_nodes.next,
96 struct drm_mm_node, fl_entry);
97 list_del(&child->fl_entry);
98 --mm->num_unused;
99 }
100 spin_unlock(&mm->unused_lock);
101 }
102 return child;
103}
104
105int drm_mm_pre_get(struct drm_mm *mm)
106{
107 struct drm_mm_node *node;
108
109 spin_lock(&mm->unused_lock);
110 while (mm->num_unused < MM_UNUSED_TARGET) {
111 spin_unlock(&mm->unused_lock);
112 node = kmalloc(sizeof(*node), GFP_KERNEL);
113 spin_lock(&mm->unused_lock);
114
115 if (unlikely(node == NULL)) {
116 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
117 spin_unlock(&mm->unused_lock);
118 return ret;
119 }
120 ++mm->num_unused;
121 list_add_tail(&node->fl_entry, &mm->unused_nodes);
122 }
123 spin_unlock(&mm->unused_lock);
124 return 0;
125}
126EXPORT_SYMBOL(drm_mm_pre_get);
77 127
78static int drm_mm_create_tail_node(struct drm_mm *mm, 128static int drm_mm_create_tail_node(struct drm_mm *mm,
79 unsigned long start, 129 unsigned long start,
80 unsigned long size) 130 unsigned long size, int atomic)
81{ 131{
82 struct drm_mm_node *child; 132 struct drm_mm_node *child;
83 133
84 child = (struct drm_mm_node *) 134 child = drm_mm_kmalloc(mm, atomic);
85 drm_alloc(sizeof(*child), DRM_MEM_MM); 135 if (unlikely(child == NULL))
86 if (!child)
87 return -ENOMEM; 136 return -ENOMEM;
88 137
89 child->free = 1; 138 child->free = 1;
@@ -97,8 +146,7 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
97 return 0; 146 return 0;
98} 147}
99 148
100 149int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
101int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
102{ 150{
103 struct list_head *tail_node; 151 struct list_head *tail_node;
104 struct drm_mm_node *entry; 152 struct drm_mm_node *entry;
@@ -106,20 +154,21 @@ int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
106 tail_node = mm->ml_entry.prev; 154 tail_node = mm->ml_entry.prev;
107 entry = list_entry(tail_node, struct drm_mm_node, ml_entry); 155 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
108 if (!entry->free) { 156 if (!entry->free) {
109 return drm_mm_create_tail_node(mm, entry->start + entry->size, size); 157 return drm_mm_create_tail_node(mm, entry->start + entry->size,
158 size, atomic);
110 } 159 }
111 entry->size += size; 160 entry->size += size;
112 return 0; 161 return 0;
113} 162}
114 163
115static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, 164static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
116 unsigned long size) 165 unsigned long size,
166 int atomic)
117{ 167{
118 struct drm_mm_node *child; 168 struct drm_mm_node *child;
119 169
120 child = (struct drm_mm_node *) 170 child = drm_mm_kmalloc(parent->mm, atomic);
121 drm_alloc(sizeof(*child), DRM_MEM_MM); 171 if (unlikely(child == NULL))
122 if (!child)
123 return NULL; 172 return NULL;
124 173
125 INIT_LIST_HEAD(&child->fl_entry); 174 INIT_LIST_HEAD(&child->fl_entry);
@@ -151,8 +200,9 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
151 tmp = parent->start % alignment; 200 tmp = parent->start % alignment;
152 201
153 if (tmp) { 202 if (tmp) {
154 align_splitoff = drm_mm_split_at_start(parent, alignment - tmp); 203 align_splitoff =
155 if (!align_splitoff) 204 drm_mm_split_at_start(parent, alignment - tmp, 0);
205 if (unlikely(align_splitoff == NULL))
156 return NULL; 206 return NULL;
157 } 207 }
158 208
@@ -161,7 +211,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
161 parent->free = 0; 211 parent->free = 0;
162 return parent; 212 return parent;
163 } else { 213 } else {
164 child = drm_mm_split_at_start(parent, size); 214 child = drm_mm_split_at_start(parent, size, 0);
165 } 215 }
166 216
167 if (align_splitoff) 217 if (align_splitoff)
@@ -169,14 +219,49 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
169 219
170 return child; 220 return child;
171} 221}
222
172EXPORT_SYMBOL(drm_mm_get_block); 223EXPORT_SYMBOL(drm_mm_get_block);
173 224
225struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
226 unsigned long size,
227 unsigned alignment)
228{
229
230 struct drm_mm_node *align_splitoff = NULL;
231 struct drm_mm_node *child;
232 unsigned tmp = 0;
233
234 if (alignment)
235 tmp = parent->start % alignment;
236
237 if (tmp) {
238 align_splitoff =
239 drm_mm_split_at_start(parent, alignment - tmp, 1);
240 if (unlikely(align_splitoff == NULL))
241 return NULL;
242 }
243
244 if (parent->size == size) {
245 list_del_init(&parent->fl_entry);
246 parent->free = 0;
247 return parent;
248 } else {
249 child = drm_mm_split_at_start(parent, size, 1);
250 }
251
252 if (align_splitoff)
253 drm_mm_put_block(align_splitoff);
254
255 return child;
256}
257EXPORT_SYMBOL(drm_mm_get_block_atomic);
258
174/* 259/*
175 * Put a block. Merge with the previous and / or next block if they are free. 260 * Put a block. Merge with the previous and / or next block if they are free.
176 * Otherwise add to the free stack. 261 * Otherwise add to the free stack.
177 */ 262 */
178 263
179void drm_mm_put_block(struct drm_mm_node * cur) 264void drm_mm_put_block(struct drm_mm_node *cur)
180{ 265{
181 266
182 struct drm_mm *mm = cur->mm; 267 struct drm_mm *mm = cur->mm;
@@ -188,21 +273,27 @@ void drm_mm_put_block(struct drm_mm_node * cur)
188 int merged = 0; 273 int merged = 0;
189 274
190 if (cur_head->prev != root_head) { 275 if (cur_head->prev != root_head) {
191 prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); 276 prev_node =
277 list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
192 if (prev_node->free) { 278 if (prev_node->free) {
193 prev_node->size += cur->size; 279 prev_node->size += cur->size;
194 merged = 1; 280 merged = 1;
195 } 281 }
196 } 282 }
197 if (cur_head->next != root_head) { 283 if (cur_head->next != root_head) {
198 next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); 284 next_node =
285 list_entry(cur_head->next, struct drm_mm_node, ml_entry);
199 if (next_node->free) { 286 if (next_node->free) {
200 if (merged) { 287 if (merged) {
201 prev_node->size += next_node->size; 288 prev_node->size += next_node->size;
202 list_del(&next_node->ml_entry); 289 list_del(&next_node->ml_entry);
203 list_del(&next_node->fl_entry); 290 list_del(&next_node->fl_entry);
204 drm_free(next_node, sizeof(*next_node), 291 if (mm->num_unused < MM_UNUSED_TARGET) {
205 DRM_MEM_MM); 292 list_add(&next_node->fl_entry,
293 &mm->unused_nodes);
294 ++mm->num_unused;
295 } else
296 kfree(next_node);
206 } else { 297 } else {
207 next_node->size += cur->size; 298 next_node->size += cur->size;
208 next_node->start = cur->start; 299 next_node->start = cur->start;
@@ -215,14 +306,19 @@ void drm_mm_put_block(struct drm_mm_node * cur)
215 list_add(&cur->fl_entry, &mm->fl_entry); 306 list_add(&cur->fl_entry, &mm->fl_entry);
216 } else { 307 } else {
217 list_del(&cur->ml_entry); 308 list_del(&cur->ml_entry);
218 drm_free(cur, sizeof(*cur), DRM_MEM_MM); 309 if (mm->num_unused < MM_UNUSED_TARGET) {
310 list_add(&cur->fl_entry, &mm->unused_nodes);
311 ++mm->num_unused;
312 } else
313 kfree(cur);
219 } 314 }
220} 315}
316
221EXPORT_SYMBOL(drm_mm_put_block); 317EXPORT_SYMBOL(drm_mm_put_block);
222 318
223struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, 319struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
224 unsigned long size, 320 unsigned long size,
225 unsigned alignment, int best_match) 321 unsigned alignment, int best_match)
226{ 322{
227 struct list_head *list; 323 struct list_head *list;
228 const struct list_head *free_stack = &mm->fl_entry; 324 const struct list_head *free_stack = &mm->fl_entry;
@@ -247,7 +343,6 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
247 wasted += alignment - tmp; 343 wasted += alignment - tmp;
248 } 344 }
249 345
250
251 if (entry->size >= size + wasted) { 346 if (entry->size >= size + wasted) {
252 if (!best_match) 347 if (!best_match)
253 return entry; 348 return entry;
@@ -260,6 +355,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
260 355
261 return best; 356 return best;
262} 357}
358EXPORT_SYMBOL(drm_mm_search_free);
263 359
264int drm_mm_clean(struct drm_mm * mm) 360int drm_mm_clean(struct drm_mm * mm)
265{ 361{
@@ -267,14 +363,17 @@ int drm_mm_clean(struct drm_mm * mm)
267 363
268 return (head->next->next == head); 364 return (head->next->next == head);
269} 365}
270EXPORT_SYMBOL(drm_mm_search_free); 366EXPORT_SYMBOL(drm_mm_clean);
271 367
272int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 368int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
273{ 369{
274 INIT_LIST_HEAD(&mm->ml_entry); 370 INIT_LIST_HEAD(&mm->ml_entry);
275 INIT_LIST_HEAD(&mm->fl_entry); 371 INIT_LIST_HEAD(&mm->fl_entry);
372 INIT_LIST_HEAD(&mm->unused_nodes);
373 mm->num_unused = 0;
374 spin_lock_init(&mm->unused_lock);
276 375
277 return drm_mm_create_tail_node(mm, start, size); 376 return drm_mm_create_tail_node(mm, start, size, 0);
278} 377}
279EXPORT_SYMBOL(drm_mm_init); 378EXPORT_SYMBOL(drm_mm_init);
280 379
@@ -282,6 +381,7 @@ void drm_mm_takedown(struct drm_mm * mm)
282{ 381{
283 struct list_head *bnode = mm->fl_entry.next; 382 struct list_head *bnode = mm->fl_entry.next;
284 struct drm_mm_node *entry; 383 struct drm_mm_node *entry;
384 struct drm_mm_node *next;
285 385
286 entry = list_entry(bnode, struct drm_mm_node, fl_entry); 386 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
287 387
@@ -293,7 +393,16 @@ void drm_mm_takedown(struct drm_mm * mm)
293 393
294 list_del(&entry->fl_entry); 394 list_del(&entry->fl_entry);
295 list_del(&entry->ml_entry); 395 list_del(&entry->ml_entry);
396 kfree(entry);
397
398 spin_lock(&mm->unused_lock);
399 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
400 list_del(&entry->fl_entry);
401 kfree(entry);
402 --mm->num_unused;
403 }
404 spin_unlock(&mm->unused_lock);
296 405
297 drm_free(entry, sizeof(*entry), DRM_MEM_MM); 406 BUG_ON(mm->num_unused != 0);
298} 407}
299EXPORT_SYMBOL(drm_mm_takedown); 408EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c9b80fdd463..54f492a488a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -38,6 +38,7 @@
38#include "drm.h" 38#include "drm.h"
39#include "drm_crtc.h" 39#include "drm_crtc.h"
40 40
41#define DRM_MODESET_DEBUG "drm_mode"
41/** 42/**
42 * drm_mode_debug_printmodeline - debug print a mode 43 * drm_mode_debug_printmodeline - debug print a mode
43 * @dev: DRM device 44 * @dev: DRM device
@@ -50,12 +51,13 @@
50 */ 51 */
51void drm_mode_debug_printmodeline(struct drm_display_mode *mode) 52void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
52{ 53{
53 DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", 54 DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
54 mode->base.id, mode->name, mode->vrefresh, mode->clock, 55 "Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
55 mode->hdisplay, mode->hsync_start, 56 mode->base.id, mode->name, mode->vrefresh, mode->clock,
56 mode->hsync_end, mode->htotal, 57 mode->hdisplay, mode->hsync_start,
57 mode->vdisplay, mode->vsync_start, 58 mode->hsync_end, mode->htotal,
58 mode->vsync_end, mode->vtotal, mode->type, mode->flags); 59 mode->vdisplay, mode->vsync_start,
60 mode->vsync_end, mode->vtotal, mode->type, mode->flags);
59} 61}
60EXPORT_SYMBOL(drm_mode_debug_printmodeline); 62EXPORT_SYMBOL(drm_mode_debug_printmodeline);
61 63
@@ -401,7 +403,9 @@ void drm_mode_prune_invalid(struct drm_device *dev,
401 list_del(&mode->head); 403 list_del(&mode->head);
402 if (verbose) { 404 if (verbose) {
403 drm_mode_debug_printmodeline(mode); 405 drm_mode_debug_printmodeline(mode);
404 DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status); 406 DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
407 "Not using %s mode %d\n",
408 mode->name, mode->status);
405 } 409 }
406 drm_mode_destroy(dev, mode); 410 drm_mode_destroy(dev, mode);
407 } 411 }
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index b9631e3a1ea..89050684fe0 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -51,7 +51,22 @@ struct idr drm_minors_idr;
51struct class *drm_class; 51struct class *drm_class;
52struct proc_dir_entry *drm_proc_root; 52struct proc_dir_entry *drm_proc_root;
53struct dentry *drm_debugfs_root; 53struct dentry *drm_debugfs_root;
54 54void drm_ut_debug_printk(unsigned int request_level,
55 const char *prefix,
56 const char *function_name,
57 const char *format, ...)
58{
59 va_list args;
60
61 if (drm_debug & request_level) {
62 if (function_name)
63 printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
64 va_start(args, format);
65 vprintk(format, args);
66 va_end(args);
67 }
68}
69EXPORT_SYMBOL(drm_ut_debug_printk);
55static int drm_minor_get_id(struct drm_device *dev, int type) 70static int drm_minor_get_id(struct drm_device *dev, int type)
56{ 71{
57 int new_id; 72 int new_id;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 8f9372921f8..9987ab88083 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -147,7 +147,7 @@ static ssize_t status_show(struct device *device,
147 enum drm_connector_status status; 147 enum drm_connector_status status;
148 148
149 status = connector->funcs->detect(connector); 149 status = connector->funcs->detect(connector);
150 return snprintf(buf, PAGE_SIZE, "%s", 150 return snprintf(buf, PAGE_SIZE, "%s\n",
151 drm_get_connector_status_name(status)); 151 drm_get_connector_status_name(status));
152} 152}
153 153
@@ -166,7 +166,7 @@ static ssize_t dpms_show(struct device *device,
166 if (ret) 166 if (ret)
167 return 0; 167 return 0;
168 168
169 return snprintf(buf, PAGE_SIZE, "%s", 169 return snprintf(buf, PAGE_SIZE, "%s\n",
170 drm_get_dpms_name((int)dpms_status)); 170 drm_get_dpms_name((int)dpms_status));
171} 171}
172 172
@@ -176,7 +176,7 @@ static ssize_t enabled_show(struct device *device,
176{ 176{
177 struct drm_connector *connector = to_drm_connector(device); 177 struct drm_connector *connector = to_drm_connector(device);
178 178
179 return snprintf(buf, PAGE_SIZE, connector->encoder ? "enabled" : 179 return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
180 "disabled"); 180 "disabled");
181} 181}
182 182
@@ -317,6 +317,7 @@ static struct device_attribute connector_attrs_opt1[] = {
317 317
318static struct bin_attribute edid_attr = { 318static struct bin_attribute edid_attr = {
319 .attr.name = "edid", 319 .attr.name = "edid",
320 .attr.mode = 0444,
320 .size = 128, 321 .size = 128,
321 .read = edid_show, 322 .read = edid_show,
322}; 323};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 53d54455262..1a60626f680 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -33,6 +33,8 @@
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35 35
36#define I915_DRV "i915_drv"
37
36/* Really want an OS-independent resettable timer. Would like to have 38/* Really want an OS-independent resettable timer. Would like to have
37 * this loop run for (eg) 3 sec, but have the timer reset every time 39 * this loop run for (eg) 3 sec, but have the timer reset every time
38 * the head pointer changes, so that EBUSY only happens if the ring 40 * the head pointer changes, so that EBUSY only happens if the ring
@@ -99,7 +101,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
99 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 101 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
100 102
101 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 103 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
102 DRM_DEBUG("Enabled hardware status page\n"); 104 DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
103 return 0; 105 return 0;
104} 106}
105 107
@@ -185,7 +187,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
185 master_priv->sarea_priv = (drm_i915_sarea_t *) 187 master_priv->sarea_priv = (drm_i915_sarea_t *)
186 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 188 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
187 } else { 189 } else {
188 DRM_DEBUG("sarea not found assuming DRI2 userspace\n"); 190 DRM_DEBUG_DRIVER(I915_DRV,
191 "sarea not found assuming DRI2 userspace\n");
189 } 192 }
190 193
191 if (init->ring_size != 0) { 194 if (init->ring_size != 0) {
@@ -235,7 +238,7 @@ static int i915_dma_resume(struct drm_device * dev)
235{ 238{
236 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 239 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
237 240
238 DRM_DEBUG("%s\n", __func__); 241 DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
239 242
240 if (dev_priv->ring.map.handle == NULL) { 243 if (dev_priv->ring.map.handle == NULL) {
241 DRM_ERROR("can not ioremap virtual address for" 244 DRM_ERROR("can not ioremap virtual address for"
@@ -248,13 +251,14 @@ static int i915_dma_resume(struct drm_device * dev)
248 DRM_ERROR("Can not find hardware status page\n"); 251 DRM_ERROR("Can not find hardware status page\n");
249 return -EINVAL; 252 return -EINVAL;
250 } 253 }
251 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 254 DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n",
255 dev_priv->hw_status_page);
252 256
253 if (dev_priv->status_gfx_addr != 0) 257 if (dev_priv->status_gfx_addr != 0)
254 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 258 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
255 else 259 else
256 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 260 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
257 DRM_DEBUG("Enabled hardware status page\n"); 261 DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
258 262
259 return 0; 263 return 0;
260} 264}
@@ -548,10 +552,10 @@ static int i915_dispatch_flip(struct drm_device * dev)
548 if (!master_priv->sarea_priv) 552 if (!master_priv->sarea_priv)
549 return -EINVAL; 553 return -EINVAL;
550 554
551 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 555 DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n",
552 __func__, 556 __func__,
553 dev_priv->current_page, 557 dev_priv->current_page,
554 master_priv->sarea_priv->pf_current_page); 558 master_priv->sarea_priv->pf_current_page);
555 559
556 i915_kernel_lost_context(dev); 560 i915_kernel_lost_context(dev);
557 561
@@ -629,8 +633,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
629 return -EINVAL; 633 return -EINVAL;
630 } 634 }
631 635
632 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 636 DRM_DEBUG_DRIVER(I915_DRV,
633 batch->start, batch->used, batch->num_cliprects); 637 "i915 batchbuffer, start %x used %d cliprects %d\n",
638 batch->start, batch->used, batch->num_cliprects);
634 639
635 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 640 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
636 641
@@ -678,8 +683,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
678 void *batch_data; 683 void *batch_data;
679 int ret; 684 int ret;
680 685
681 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 686 DRM_DEBUG_DRIVER(I915_DRV,
682 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 687 "i915 cmdbuffer, buf %p sz %d cliprects %d\n",
688 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
683 689
684 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 690 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
685 691
@@ -734,7 +740,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
734{ 740{
735 int ret; 741 int ret;
736 742
737 DRM_DEBUG("%s\n", __func__); 743 DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
738 744
739 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 745 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
740 746
@@ -777,7 +783,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
777 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 783 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
778 break; 784 break;
779 default: 785 default:
780 DRM_DEBUG("Unknown parameter %d\n", param->param); 786 DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n",
787 param->param);
781 return -EINVAL; 788 return -EINVAL;
782 } 789 }
783 790
@@ -817,7 +824,8 @@ static int i915_setparam(struct drm_device *dev, void *data,
817 dev_priv->fence_reg_start = param->value; 824 dev_priv->fence_reg_start = param->value;
818 break; 825 break;
819 default: 826 default:
820 DRM_DEBUG("unknown parameter %d\n", param->param); 827 DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n",
828 param->param);
821 return -EINVAL; 829 return -EINVAL;
822 } 830 }
823 831
@@ -865,9 +873,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
865 873
866 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 874 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
867 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 875 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
868 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", 876 DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n",
869 dev_priv->status_gfx_addr); 877 dev_priv->status_gfx_addr);
870 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 878 DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n",
879 dev_priv->hw_status_page);
871 return 0; 880 return 0;
872} 881}
873 882
@@ -922,7 +931,7 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
922 * Some of the preallocated space is taken by the GTT 931 * Some of the preallocated space is taken by the GTT
923 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 932 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
924 */ 933 */
925 if (IS_G4X(dev) || IS_IGD(dev)) 934 if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
926 overhead = 4096; 935 overhead = 4096;
927 else 936 else
928 overhead = (*aperture_size / 1024) + 4096; 937 overhead = (*aperture_size / 1024) + 4096;
@@ -987,12 +996,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
987 int fb_bar = IS_I9XX(dev) ? 2 : 0; 996 int fb_bar = IS_I9XX(dev) ? 2 : 0;
988 int ret = 0; 997 int ret = 0;
989 998
990 dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
991 if (!dev->devname) {
992 ret = -ENOMEM;
993 goto out;
994 }
995
996 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 999 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
997 0xff000000; 1000 0xff000000;
998 1001
@@ -1006,7 +1009,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1006 1009
1007 ret = i915_probe_agp(dev, &agp_size, &prealloc_size); 1010 ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
1008 if (ret) 1011 if (ret)
1009 goto kfree_devname; 1012 goto out;
1010 1013
1011 /* Basic memrange allocator for stolen space (aka vram) */ 1014 /* Basic memrange allocator for stolen space (aka vram) */
1012 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1015 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
@@ -1024,7 +1027,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1024 1027
1025 ret = i915_gem_init_ringbuffer(dev); 1028 ret = i915_gem_init_ringbuffer(dev);
1026 if (ret) 1029 if (ret)
1027 goto kfree_devname; 1030 goto out;
1028 1031
1029 /* Allow hardware batchbuffers unless told otherwise. 1032 /* Allow hardware batchbuffers unless told otherwise.
1030 */ 1033 */
@@ -1056,8 +1059,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1056 1059
1057destroy_ringbuffer: 1060destroy_ringbuffer:
1058 i915_gem_cleanup_ringbuffer(dev); 1061 i915_gem_cleanup_ringbuffer(dev);
1059kfree_devname:
1060 kfree(dev->devname);
1061out: 1062out:
1062 return ret; 1063 return ret;
1063} 1064}
@@ -1161,8 +1162,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1161#endif 1162#endif
1162 1163
1163 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1164 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1164 if (IS_GM45(dev)) 1165 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1166 if (IS_G4X(dev) || IS_IGDNG(dev)) {
1167 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1165 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1168 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1169 }
1166 1170
1167 i915_gem_load(dev); 1171 i915_gem_load(dev);
1168 1172
@@ -1206,7 +1210,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1206 } 1210 }
1207 1211
1208 /* Must be done after probing outputs */ 1212 /* Must be done after probing outputs */
1209 intel_opregion_init(dev, 0); 1213 /* FIXME: verify on IGDNG */
1214 if (!IS_IGDNG(dev))
1215 intel_opregion_init(dev, 0);
1210 1216
1211 return 0; 1217 return 0;
1212 1218
@@ -1240,7 +1246,8 @@ int i915_driver_unload(struct drm_device *dev)
1240 if (dev_priv->regs != NULL) 1246 if (dev_priv->regs != NULL)
1241 iounmap(dev_priv->regs); 1247 iounmap(dev_priv->regs);
1242 1248
1243 intel_opregion_free(dev, 0); 1249 if (!IS_IGDNG(dev))
1250 intel_opregion_free(dev, 0);
1244 1251
1245 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1252 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1246 intel_modeset_cleanup(dev); 1253 intel_modeset_cleanup(dev);
@@ -1264,7 +1271,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1264{ 1271{
1265 struct drm_i915_file_private *i915_file_priv; 1272 struct drm_i915_file_private *i915_file_priv;
1266 1273
1267 DRM_DEBUG("\n"); 1274 DRM_DEBUG_DRIVER(I915_DRV, "\n");
1268 i915_file_priv = (struct drm_i915_file_private *) 1275 i915_file_priv = (struct drm_i915_file_private *)
1269 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); 1276 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
1270 1277
@@ -1273,8 +1280,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1273 1280
1274 file_priv->driver_priv = i915_file_priv; 1281 file_priv->driver_priv = i915_file_priv;
1275 1282
1276 i915_file_priv->mm.last_gem_seqno = 0; 1283 INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1277 i915_file_priv->mm.last_gem_throttle_seqno = 0;
1278 1284
1279 return 0; 1285 return 0;
1280} 1286}
@@ -1311,6 +1317,7 @@ void i915_driver_lastclose(struct drm_device * dev)
1311void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1317void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1312{ 1318{
1313 drm_i915_private_t *dev_priv = dev->dev_private; 1319 drm_i915_private_t *dev_priv = dev->dev_private;
1320 i915_gem_release(dev, file_priv);
1314 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1321 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1315 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 1322 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1316} 1323}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9b149fe824c..8ef6bcec211 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -126,6 +126,13 @@ struct drm_i915_fence_reg {
126 struct drm_gem_object *obj; 126 struct drm_gem_object *obj;
127}; 127};
128 128
129struct sdvo_device_mapping {
130 u8 dvo_port;
131 u8 slave_addr;
132 u8 dvo_wiring;
133 u8 initialized;
134};
135
129typedef struct drm_i915_private { 136typedef struct drm_i915_private {
130 struct drm_device *dev; 137 struct drm_device *dev;
131 138
@@ -143,6 +150,8 @@ typedef struct drm_i915_private {
143 drm_local_map_t hws_map; 150 drm_local_map_t hws_map;
144 struct drm_gem_object *hws_obj; 151 struct drm_gem_object *hws_obj;
145 152
153 struct resource mch_res;
154
146 unsigned int cpp; 155 unsigned int cpp;
147 int back_offset; 156 int back_offset;
148 int front_offset; 157 int front_offset;
@@ -158,6 +167,11 @@ typedef struct drm_i915_private {
158 /** Cached value of IMR to avoid reads in updating the bitfield */ 167 /** Cached value of IMR to avoid reads in updating the bitfield */
159 u32 irq_mask_reg; 168 u32 irq_mask_reg;
160 u32 pipestat[2]; 169 u32 pipestat[2];
170 /** splitted irq regs for graphics and display engine on IGDNG,
171 irq_mask_reg is still used for display irq. */
172 u32 gt_irq_mask_reg;
173 u32 gt_irq_enable_reg;
174 u32 de_irq_enable_reg;
161 175
162 u32 hotplug_supported_mask; 176 u32 hotplug_supported_mask;
163 struct work_struct hotplug_work; 177 struct work_struct hotplug_work;
@@ -180,7 +194,8 @@ typedef struct drm_i915_private {
180 int backlight_duty_cycle; /* restore backlight to this value */ 194 int backlight_duty_cycle; /* restore backlight to this value */
181 bool panel_wants_dither; 195 bool panel_wants_dither;
182 struct drm_display_mode *panel_fixed_mode; 196 struct drm_display_mode *panel_fixed_mode;
183 struct drm_display_mode *vbt_mode; /* if any */ 197 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
198 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
184 199
185 /* Feature bits from the VBIOS */ 200 /* Feature bits from the VBIOS */
186 unsigned int int_tv_support:1; 201 unsigned int int_tv_support:1;
@@ -284,6 +299,13 @@ typedef struct drm_i915_private {
284 u8 saveDACMASK; 299 u8 saveDACMASK;
285 u8 saveCR[37]; 300 u8 saveCR[37];
286 uint64_t saveFENCE[16]; 301 uint64_t saveFENCE[16];
302 u32 saveCURACNTR;
303 u32 saveCURAPOS;
304 u32 saveCURABASE;
305 u32 saveCURBCNTR;
306 u32 saveCURBPOS;
307 u32 saveCURBBASE;
308 u32 saveCURSIZE;
287 309
288 struct { 310 struct {
289 struct drm_mm gtt_space; 311 struct drm_mm gtt_space;
@@ -381,6 +403,7 @@ typedef struct drm_i915_private {
381 /* storage for physical objects */ 403 /* storage for physical objects */
382 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 404 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
383 } mm; 405 } mm;
406 struct sdvo_device_mapping sdvo_mappings[2];
384} drm_i915_private_t; 407} drm_i915_private_t;
385 408
386/** driver private structure attached to each drm_gem_object */ 409/** driver private structure attached to each drm_gem_object */
@@ -490,13 +513,16 @@ struct drm_i915_gem_request {
490 /** Time at which this request was emitted, in jiffies. */ 513 /** Time at which this request was emitted, in jiffies. */
491 unsigned long emitted_jiffies; 514 unsigned long emitted_jiffies;
492 515
516 /** global list entry for this request */
493 struct list_head list; 517 struct list_head list;
518
519 /** file_priv list entry for this request */
520 struct list_head client_list;
494}; 521};
495 522
496struct drm_i915_file_private { 523struct drm_i915_file_private {
497 struct { 524 struct {
498 uint32_t last_gem_seqno; 525 struct list_head request_list;
499 uint32_t last_gem_throttle_seqno;
500 } mm; 526 } mm;
501}; 527};
502 528
@@ -641,6 +667,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
641void i915_gem_free_all_phys_object(struct drm_device *dev); 667void i915_gem_free_all_phys_object(struct drm_device *dev);
642int i915_gem_object_get_pages(struct drm_gem_object *obj); 668int i915_gem_object_get_pages(struct drm_gem_object *obj);
643void i915_gem_object_put_pages(struct drm_gem_object *obj); 669void i915_gem_object_put_pages(struct drm_gem_object *obj);
670void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
644 671
645/* i915_gem_tiling.c */ 672/* i915_gem_tiling.c */
646void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 673void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -784,7 +811,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
784 (dev)->pci_device == 0x2E02 || \ 811 (dev)->pci_device == 0x2E02 || \
785 (dev)->pci_device == 0x2E12 || \ 812 (dev)->pci_device == 0x2E12 || \
786 (dev)->pci_device == 0x2E22 || \ 813 (dev)->pci_device == 0x2E22 || \
787 (dev)->pci_device == 0x2E32) 814 (dev)->pci_device == 0x2E32 || \
815 (dev)->pci_device == 0x0042 || \
816 (dev)->pci_device == 0x0046)
788 817
789#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ 818#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
790 (dev)->pci_device == 0x2A12) 819 (dev)->pci_device == 0x2A12)
@@ -806,20 +835,26 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
806 (dev)->pci_device == 0x29D2 || \ 835 (dev)->pci_device == 0x29D2 || \
807 (IS_IGD(dev))) 836 (IS_IGD(dev)))
808 837
838#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042)
839#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046)
840#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
841
809#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 842#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
810 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 843 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
844 IS_IGDNG(dev))
811 845
812#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 846#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
813 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ 847 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
814 IS_IGD(dev)) 848 IS_IGD(dev) || IS_IGDNG_M(dev))
815 849
816#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 850#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
851 IS_IGDNG(dev))
817/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 852/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
818 * rows, which changed the alignment requirements and fence programming. 853 * rows, which changed the alignment requirements and fence programming.
819 */ 854 */
820#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ 855#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
821 IS_I915GM(dev))) 856 IS_I915GM(dev)))
822#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) 857#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
823#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) 858#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
824 859
825#define PRIMARY_RINGBUFFER_SIZE (128*1024) 860#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b189b49c760..c0ae6bbbd9b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -349,7 +349,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
350 num_pages = last_data_page - first_data_page + 1; 350 num_pages = last_data_page - first_data_page + 1;
351 351
352 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 352 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
353 if (user_pages == NULL) 353 if (user_pages == NULL)
354 return -ENOMEM; 354 return -ENOMEM;
355 355
@@ -429,7 +429,7 @@ fail_put_user_pages:
429 SetPageDirty(user_pages[i]); 429 SetPageDirty(user_pages[i]);
430 page_cache_release(user_pages[i]); 430 page_cache_release(user_pages[i]);
431 } 431 }
432 kfree(user_pages); 432 drm_free_large(user_pages);
433 433
434 return ret; 434 return ret;
435} 435}
@@ -649,7 +649,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
650 num_pages = last_data_page - first_data_page + 1; 650 num_pages = last_data_page - first_data_page + 1;
651 651
652 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 652 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
653 if (user_pages == NULL) 653 if (user_pages == NULL)
654 return -ENOMEM; 654 return -ENOMEM;
655 655
@@ -719,7 +719,7 @@ out_unlock:
719out_unpin_pages: 719out_unpin_pages:
720 for (i = 0; i < pinned_pages; i++) 720 for (i = 0; i < pinned_pages; i++)
721 page_cache_release(user_pages[i]); 721 page_cache_release(user_pages[i]);
722 kfree(user_pages); 722 drm_free_large(user_pages);
723 723
724 return ret; 724 return ret;
725} 725}
@@ -824,7 +824,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
825 num_pages = last_data_page - first_data_page + 1; 825 num_pages = last_data_page - first_data_page + 1;
826 826
827 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 827 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
828 if (user_pages == NULL) 828 if (user_pages == NULL)
829 return -ENOMEM; 829 return -ENOMEM;
830 830
@@ -902,7 +902,7 @@ fail_unlock:
902fail_put_user_pages: 902fail_put_user_pages:
903 for (i = 0; i < pinned_pages; i++) 903 for (i = 0; i < pinned_pages; i++)
904 page_cache_release(user_pages[i]); 904 page_cache_release(user_pages[i]);
905 kfree(user_pages); 905 drm_free_large(user_pages);
906 906
907 return ret; 907 return ret;
908} 908}
@@ -989,10 +989,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
989 return -ENODEV; 989 return -ENODEV;
990 990
991 /* Only handle setting domains to types used by the CPU. */ 991 /* Only handle setting domains to types used by the CPU. */
992 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 992 if (write_domain & I915_GEM_GPU_DOMAINS)
993 return -EINVAL; 993 return -EINVAL;
994 994
995 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 995 if (read_domains & I915_GEM_GPU_DOMAINS)
996 return -EINVAL; 996 return -EINVAL;
997 997
998 /* Having something in the write domain implies it's in the read 998 /* Having something in the write domain implies it's in the read
@@ -1145,7 +1145,14 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1145 mutex_unlock(&dev->struct_mutex); 1145 mutex_unlock(&dev->struct_mutex);
1146 return VM_FAULT_SIGBUS; 1146 return VM_FAULT_SIGBUS;
1147 } 1147 }
1148 list_add(&obj_priv->list, &dev_priv->mm.inactive_list); 1148
1149 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1150 if (ret) {
1151 mutex_unlock(&dev->struct_mutex);
1152 return VM_FAULT_SIGBUS;
1153 }
1154
1155 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1149 } 1156 }
1150 1157
1151 /* Need a new fence register? */ 1158 /* Need a new fence register? */
@@ -1375,7 +1382,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1375 mutex_unlock(&dev->struct_mutex); 1382 mutex_unlock(&dev->struct_mutex);
1376 return ret; 1383 return ret;
1377 } 1384 }
1378 list_add(&obj_priv->list, &dev_priv->mm.inactive_list); 1385 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1379 } 1386 }
1380 1387
1381 drm_gem_object_unreference(obj); 1388 drm_gem_object_unreference(obj);
@@ -1408,9 +1415,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1408 } 1415 }
1409 obj_priv->dirty = 0; 1416 obj_priv->dirty = 0;
1410 1417
1411 drm_free(obj_priv->pages, 1418 drm_free_large(obj_priv->pages);
1412 page_count * sizeof(struct page *),
1413 DRM_MEM_DRIVER);
1414 obj_priv->pages = NULL; 1419 obj_priv->pages = NULL;
1415} 1420}
1416 1421
@@ -1476,14 +1481,19 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1476 * Returned sequence numbers are nonzero on success. 1481 * Returned sequence numbers are nonzero on success.
1477 */ 1482 */
1478static uint32_t 1483static uint32_t
1479i915_add_request(struct drm_device *dev, uint32_t flush_domains) 1484i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1485 uint32_t flush_domains)
1480{ 1486{
1481 drm_i915_private_t *dev_priv = dev->dev_private; 1487 drm_i915_private_t *dev_priv = dev->dev_private;
1488 struct drm_i915_file_private *i915_file_priv = NULL;
1482 struct drm_i915_gem_request *request; 1489 struct drm_i915_gem_request *request;
1483 uint32_t seqno; 1490 uint32_t seqno;
1484 int was_empty; 1491 int was_empty;
1485 RING_LOCALS; 1492 RING_LOCALS;
1486 1493
1494 if (file_priv != NULL)
1495 i915_file_priv = file_priv->driver_priv;
1496
1487 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); 1497 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
1488 if (request == NULL) 1498 if (request == NULL)
1489 return 0; 1499 return 0;
@@ -1510,6 +1520,12 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
1510 request->emitted_jiffies = jiffies; 1520 request->emitted_jiffies = jiffies;
1511 was_empty = list_empty(&dev_priv->mm.request_list); 1521 was_empty = list_empty(&dev_priv->mm.request_list);
1512 list_add_tail(&request->list, &dev_priv->mm.request_list); 1522 list_add_tail(&request->list, &dev_priv->mm.request_list);
1523 if (i915_file_priv) {
1524 list_add_tail(&request->client_list,
1525 &i915_file_priv->mm.request_list);
1526 } else {
1527 INIT_LIST_HEAD(&request->client_list);
1528 }
1513 1529
1514 /* Associate any objects on the flushing list matching the write 1530 /* Associate any objects on the flushing list matching the write
1515 * domain we're flushing with our flush. 1531 * domain we're flushing with our flush.
@@ -1659,6 +1675,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1659 i915_gem_retire_request(dev, request); 1675 i915_gem_retire_request(dev, request);
1660 1676
1661 list_del(&request->list); 1677 list_del(&request->list);
1678 list_del(&request->client_list);
1662 drm_free(request, sizeof(*request), DRM_MEM_DRIVER); 1679 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
1663 } else 1680 } else
1664 break; 1681 break;
@@ -1697,7 +1714,10 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1697 BUG_ON(seqno == 0); 1714 BUG_ON(seqno == 0);
1698 1715
1699 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1716 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1700 ier = I915_READ(IER); 1717 if (IS_IGDNG(dev))
1718 ier = I915_READ(DEIER) | I915_READ(GTIER);
1719 else
1720 ier = I915_READ(IER);
1701 if (!ier) { 1721 if (!ier) {
1702 DRM_ERROR("something (likely vbetool) disabled " 1722 DRM_ERROR("something (likely vbetool) disabled "
1703 "interrupts, re-enabling\n"); 1723 "interrupts, re-enabling\n");
@@ -1749,8 +1769,7 @@ i915_gem_flush(struct drm_device *dev,
1749 if (flush_domains & I915_GEM_DOMAIN_CPU) 1769 if (flush_domains & I915_GEM_DOMAIN_CPU)
1750 drm_agp_chipset_flush(dev); 1770 drm_agp_chipset_flush(dev);
1751 1771
1752 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU | 1772 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
1753 I915_GEM_DOMAIN_GTT)) {
1754 /* 1773 /*
1755 * read/write caches: 1774 * read/write caches:
1756 * 1775 *
@@ -1972,7 +1991,7 @@ i915_gem_evict_something(struct drm_device *dev)
1972 i915_gem_flush(dev, 1991 i915_gem_flush(dev,
1973 obj->write_domain, 1992 obj->write_domain,
1974 obj->write_domain); 1993 obj->write_domain);
1975 i915_add_request(dev, obj->write_domain); 1994 i915_add_request(dev, NULL, obj->write_domain);
1976 1995
1977 obj = NULL; 1996 obj = NULL;
1978 continue; 1997 continue;
@@ -1986,7 +2005,7 @@ i915_gem_evict_something(struct drm_device *dev)
1986 /* If we didn't do any of the above, there's nothing to be done 2005 /* If we didn't do any of the above, there's nothing to be done
1987 * and we just can't fit it in. 2006 * and we just can't fit it in.
1988 */ 2007 */
1989 return -ENOMEM; 2008 return -ENOSPC;
1990 } 2009 }
1991 return ret; 2010 return ret;
1992} 2011}
@@ -2001,7 +2020,7 @@ i915_gem_evict_everything(struct drm_device *dev)
2001 if (ret != 0) 2020 if (ret != 0)
2002 break; 2021 break;
2003 } 2022 }
2004 if (ret == -ENOMEM) 2023 if (ret == -ENOSPC)
2005 return 0; 2024 return 0;
2006 return ret; 2025 return ret;
2007} 2026}
@@ -2024,8 +2043,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2024 */ 2043 */
2025 page_count = obj->size / PAGE_SIZE; 2044 page_count = obj->size / PAGE_SIZE;
2026 BUG_ON(obj_priv->pages != NULL); 2045 BUG_ON(obj_priv->pages != NULL);
2027 obj_priv->pages = drm_calloc(page_count, sizeof(struct page *), 2046 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2028 DRM_MEM_DRIVER);
2029 if (obj_priv->pages == NULL) { 2047 if (obj_priv->pages == NULL) {
2030 DRM_ERROR("Faled to allocate page list\n"); 2048 DRM_ERROR("Faled to allocate page list\n");
2031 obj_priv->pages_refcount--; 2049 obj_priv->pages_refcount--;
@@ -2131,8 +2149,10 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2131 return; 2149 return;
2132 } 2150 }
2133 2151
2134 pitch_val = (obj_priv->stride / 128) - 1; 2152 pitch_val = obj_priv->stride / 128;
2135 WARN_ON(pitch_val & ~0x0000000f); 2153 pitch_val = ffs(pitch_val) - 1;
2154 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2155
2136 val = obj_priv->gtt_offset; 2156 val = obj_priv->gtt_offset;
2137 if (obj_priv->tiling_mode == I915_TILING_Y) 2157 if (obj_priv->tiling_mode == I915_TILING_Y)
2138 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2158 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2209,7 +2229,7 @@ try_again:
2209 loff_t offset; 2229 loff_t offset;
2210 2230
2211 if (avail == 0) 2231 if (avail == 0)
2212 return -ENOMEM; 2232 return -ENOSPC;
2213 2233
2214 for (i = dev_priv->fence_reg_start; 2234 for (i = dev_priv->fence_reg_start;
2215 i < dev_priv->num_fence_regs; i++) { 2235 i < dev_priv->num_fence_regs; i++) {
@@ -2242,7 +2262,7 @@ try_again:
2242 i915_gem_flush(dev, 2262 i915_gem_flush(dev,
2243 I915_GEM_GPU_DOMAINS, 2263 I915_GEM_GPU_DOMAINS,
2244 I915_GEM_GPU_DOMAINS); 2264 I915_GEM_GPU_DOMAINS);
2245 seqno = i915_add_request(dev, 2265 seqno = i915_add_request(dev, NULL,
2246 I915_GEM_GPU_DOMAINS); 2266 I915_GEM_GPU_DOMAINS);
2247 if (seqno == 0) 2267 if (seqno == 0)
2248 return -ENOMEM; 2268 return -ENOMEM;
@@ -2254,9 +2274,6 @@ try_again:
2254 goto try_again; 2274 goto try_again;
2255 } 2275 }
2256 2276
2257 BUG_ON(old_obj_priv->active ||
2258 (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
2259
2260 /* 2277 /*
2261 * Zap this virtual mapping so we can set up a fence again 2278 * Zap this virtual mapping so we can set up a fence again
2262 * for this object next time we need it. 2279 * for this object next time we need it.
@@ -2361,7 +2378,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2361 spin_unlock(&dev_priv->mm.active_list_lock); 2378 spin_unlock(&dev_priv->mm.active_list_lock);
2362 if (lists_empty) { 2379 if (lists_empty) {
2363 DRM_ERROR("GTT full, but LRU list empty\n"); 2380 DRM_ERROR("GTT full, but LRU list empty\n");
2364 return -ENOMEM; 2381 return -ENOSPC;
2365 } 2382 }
2366 2383
2367 ret = i915_gem_evict_something(dev); 2384 ret = i915_gem_evict_something(dev);
@@ -2406,8 +2423,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2406 * wasn't in the GTT, there shouldn't be any way it could have been in 2423 * wasn't in the GTT, there shouldn't be any way it could have been in
2407 * a GPU cache 2424 * a GPU cache
2408 */ 2425 */
2409 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 2426 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2410 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 2427 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2411 2428
2412 return 0; 2429 return 0;
2413} 2430}
@@ -2424,6 +2441,16 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2424 if (obj_priv->pages == NULL) 2441 if (obj_priv->pages == NULL)
2425 return; 2442 return;
2426 2443
2444 /* XXX: The 865 in particular appears to be weird in how it handles
2445 * cache flushing. We haven't figured it out, but the
2446 * clflush+agp_chipset_flush doesn't appear to successfully get the
2447 * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2448 */
2449 if (IS_I865G(obj->dev)) {
2450 wbinvd();
2451 return;
2452 }
2453
2427 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); 2454 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2428} 2455}
2429 2456
@@ -2439,7 +2466,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2439 2466
2440 /* Queue the GPU write cache flushing we need. */ 2467 /* Queue the GPU write cache flushing we need. */
2441 i915_gem_flush(dev, 0, obj->write_domain); 2468 i915_gem_flush(dev, 0, obj->write_domain);
2442 seqno = i915_add_request(dev, obj->write_domain); 2469 seqno = i915_add_request(dev, NULL, obj->write_domain);
2443 obj->write_domain = 0; 2470 obj->write_domain = 0;
2444 i915_gem_object_move_to_active(obj, seqno); 2471 i915_gem_object_move_to_active(obj, seqno);
2445} 2472}
@@ -3022,20 +3049,12 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
3022 drm_i915_private_t *dev_priv = dev->dev_private; 3049 drm_i915_private_t *dev_priv = dev->dev_private;
3023 int nbox = exec->num_cliprects; 3050 int nbox = exec->num_cliprects;
3024 int i = 0, count; 3051 int i = 0, count;
3025 uint32_t exec_start, exec_len; 3052 uint32_t exec_start, exec_len;
3026 RING_LOCALS; 3053 RING_LOCALS;
3027 3054
3028 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 3055 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3029 exec_len = (uint32_t) exec->batch_len; 3056 exec_len = (uint32_t) exec->batch_len;
3030 3057
3031 if ((exec_start | exec_len) & 0x7) {
3032 DRM_ERROR("alignment\n");
3033 return -EINVAL;
3034 }
3035
3036 if (!exec_start)
3037 return -EINVAL;
3038
3039 count = nbox ? nbox : 1; 3058 count = nbox ? nbox : 1;
3040 3059
3041 for (i = 0; i < count; i++) { 3060 for (i = 0; i < count; i++) {
@@ -3076,6 +3095,10 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
3076/* Throttle our rendering by waiting until the ring has completed our requests 3095/* Throttle our rendering by waiting until the ring has completed our requests
3077 * emitted over 20 msec ago. 3096 * emitted over 20 msec ago.
3078 * 3097 *
3098 * Note that if we were to use the current jiffies each time around the loop,
3099 * we wouldn't escape the function with any frames outstanding if the time to
3100 * render a frame was over 20ms.
3101 *
3079 * This should get us reasonable parallelism between CPU and GPU but also 3102 * This should get us reasonable parallelism between CPU and GPU but also
3080 * relatively low latency when blocking on a particular request to finish. 3103 * relatively low latency when blocking on a particular request to finish.
3081 */ 3104 */
@@ -3084,15 +3107,25 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3084{ 3107{
3085 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 3108 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3086 int ret = 0; 3109 int ret = 0;
3087 uint32_t seqno; 3110 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3088 3111
3089 mutex_lock(&dev->struct_mutex); 3112 mutex_lock(&dev->struct_mutex);
3090 seqno = i915_file_priv->mm.last_gem_throttle_seqno; 3113 while (!list_empty(&i915_file_priv->mm.request_list)) {
3091 i915_file_priv->mm.last_gem_throttle_seqno = 3114 struct drm_i915_gem_request *request;
3092 i915_file_priv->mm.last_gem_seqno; 3115
3093 if (seqno) 3116 request = list_first_entry(&i915_file_priv->mm.request_list,
3094 ret = i915_wait_request(dev, seqno); 3117 struct drm_i915_gem_request,
3118 client_list);
3119
3120 if (time_after_eq(request->emitted_jiffies, recent_enough))
3121 break;
3122
3123 ret = i915_wait_request(dev, request->seqno);
3124 if (ret != 0)
3125 break;
3126 }
3095 mutex_unlock(&dev->struct_mutex); 3127 mutex_unlock(&dev->struct_mutex);
3128
3096 return ret; 3129 return ret;
3097} 3130}
3098 3131
@@ -3111,7 +3144,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3111 reloc_count += exec_list[i].relocation_count; 3144 reloc_count += exec_list[i].relocation_count;
3112 } 3145 }
3113 3146
3114 *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER); 3147 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3115 if (*relocs == NULL) 3148 if (*relocs == NULL)
3116 return -ENOMEM; 3149 return -ENOMEM;
3117 3150
@@ -3125,8 +3158,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3125 exec_list[i].relocation_count * 3158 exec_list[i].relocation_count *
3126 sizeof(**relocs)); 3159 sizeof(**relocs));
3127 if (ret != 0) { 3160 if (ret != 0) {
3128 drm_free(*relocs, reloc_count * sizeof(**relocs), 3161 drm_free_large(*relocs);
3129 DRM_MEM_DRIVER);
3130 *relocs = NULL; 3162 *relocs = NULL;
3131 return -EFAULT; 3163 return -EFAULT;
3132 } 3164 }
@@ -3165,17 +3197,34 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3165 } 3197 }
3166 3198
3167err: 3199err:
3168 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); 3200 drm_free_large(relocs);
3169 3201
3170 return ret; 3202 return ret;
3171} 3203}
3172 3204
3205static int
3206i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3207 uint64_t exec_offset)
3208{
3209 uint32_t exec_start, exec_len;
3210
3211 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3212 exec_len = (uint32_t) exec->batch_len;
3213
3214 if ((exec_start | exec_len) & 0x7)
3215 return -EINVAL;
3216
3217 if (!exec_start)
3218 return -EINVAL;
3219
3220 return 0;
3221}
3222
3173int 3223int
3174i915_gem_execbuffer(struct drm_device *dev, void *data, 3224i915_gem_execbuffer(struct drm_device *dev, void *data,
3175 struct drm_file *file_priv) 3225 struct drm_file *file_priv)
3176{ 3226{
3177 drm_i915_private_t *dev_priv = dev->dev_private; 3227 drm_i915_private_t *dev_priv = dev->dev_private;
3178 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3179 struct drm_i915_gem_execbuffer *args = data; 3228 struct drm_i915_gem_execbuffer *args = data;
3180 struct drm_i915_gem_exec_object *exec_list = NULL; 3229 struct drm_i915_gem_exec_object *exec_list = NULL;
3181 struct drm_gem_object **object_list = NULL; 3230 struct drm_gem_object **object_list = NULL;
@@ -3198,10 +3247,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3198 return -EINVAL; 3247 return -EINVAL;
3199 } 3248 }
3200 /* Copy in the exec list from userland */ 3249 /* Copy in the exec list from userland */
3201 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, 3250 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3202 DRM_MEM_DRIVER); 3251 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
3203 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
3204 DRM_MEM_DRIVER);
3205 if (exec_list == NULL || object_list == NULL) { 3252 if (exec_list == NULL || object_list == NULL) {
3206 DRM_ERROR("Failed to allocate exec or object list " 3253 DRM_ERROR("Failed to allocate exec or object list "
3207 "for %d buffers\n", 3254 "for %d buffers\n",
@@ -3302,7 +3349,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3302 break; 3349 break;
3303 3350
3304 /* error other than GTT full, or we've already tried again */ 3351 /* error other than GTT full, or we've already tried again */
3305 if (ret != -ENOMEM || pin_tries >= 1) { 3352 if (ret != -ENOSPC || pin_tries >= 1) {
3306 if (ret != -ERESTARTSYS) 3353 if (ret != -ERESTARTSYS)
3307 DRM_ERROR("Failed to pin buffers %d\n", ret); 3354 DRM_ERROR("Failed to pin buffers %d\n", ret);
3308 goto err; 3355 goto err;
@@ -3321,8 +3368,20 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3321 3368
3322 /* Set the pending read domains for the batch buffer to COMMAND */ 3369 /* Set the pending read domains for the batch buffer to COMMAND */
3323 batch_obj = object_list[args->buffer_count-1]; 3370 batch_obj = object_list[args->buffer_count-1];
3324 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; 3371 if (batch_obj->pending_write_domain) {
3325 batch_obj->pending_write_domain = 0; 3372 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3373 ret = -EINVAL;
3374 goto err;
3375 }
3376 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3377
3378 /* Sanity check the batch buffer, prior to moving objects */
3379 exec_offset = exec_list[args->buffer_count - 1].offset;
3380 ret = i915_gem_check_execbuffer (args, exec_offset);
3381 if (ret != 0) {
3382 DRM_ERROR("execbuf with invalid offset/length\n");
3383 goto err;
3384 }
3326 3385
3327 i915_verify_inactive(dev, __FILE__, __LINE__); 3386 i915_verify_inactive(dev, __FILE__, __LINE__);
3328 3387
@@ -3353,7 +3412,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3353 dev->invalidate_domains, 3412 dev->invalidate_domains,
3354 dev->flush_domains); 3413 dev->flush_domains);
3355 if (dev->flush_domains) 3414 if (dev->flush_domains)
3356 (void)i915_add_request(dev, dev->flush_domains); 3415 (void)i915_add_request(dev, file_priv,
3416 dev->flush_domains);
3357 } 3417 }
3358 3418
3359 for (i = 0; i < args->buffer_count; i++) { 3419 for (i = 0; i < args->buffer_count; i++) {
@@ -3371,8 +3431,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3371 } 3431 }
3372#endif 3432#endif
3373 3433
3374 exec_offset = exec_list[args->buffer_count - 1].offset;
3375
3376#if WATCH_EXEC 3434#if WATCH_EXEC
3377 i915_gem_dump_object(batch_obj, 3435 i915_gem_dump_object(batch_obj,
3378 args->batch_len, 3436 args->batch_len,
@@ -3402,9 +3460,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3402 * *some* interrupts representing completion of buffers that we can 3460 * *some* interrupts representing completion of buffers that we can
3403 * wait on when trying to clear up gtt space). 3461 * wait on when trying to clear up gtt space).
3404 */ 3462 */
3405 seqno = i915_add_request(dev, flush_domains); 3463 seqno = i915_add_request(dev, file_priv, flush_domains);
3406 BUG_ON(seqno == 0); 3464 BUG_ON(seqno == 0);
3407 i915_file_priv->mm.last_gem_seqno = seqno;
3408 for (i = 0; i < args->buffer_count; i++) { 3465 for (i = 0; i < args->buffer_count; i++) {
3409 struct drm_gem_object *obj = object_list[i]; 3466 struct drm_gem_object *obj = object_list[i];
3410 3467
@@ -3462,10 +3519,8 @@ err:
3462 } 3519 }
3463 3520
3464pre_mutex_err: 3521pre_mutex_err:
3465 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 3522 drm_free_large(object_list);
3466 DRM_MEM_DRIVER); 3523 drm_free_large(exec_list);
3467 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
3468 DRM_MEM_DRIVER);
3469 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, 3524 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3470 DRM_MEM_DRIVER); 3525 DRM_MEM_DRIVER);
3471 3526
@@ -3512,8 +3567,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3512 atomic_inc(&dev->pin_count); 3567 atomic_inc(&dev->pin_count);
3513 atomic_add(obj->size, &dev->pin_memory); 3568 atomic_add(obj->size, &dev->pin_memory);
3514 if (!obj_priv->active && 3569 if (!obj_priv->active &&
3515 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 3570 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
3516 I915_GEM_DOMAIN_GTT)) == 0 &&
3517 !list_empty(&obj_priv->list)) 3571 !list_empty(&obj_priv->list))
3518 list_del_init(&obj_priv->list); 3572 list_del_init(&obj_priv->list);
3519 } 3573 }
@@ -3540,8 +3594,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
3540 */ 3594 */
3541 if (obj_priv->pin_count == 0) { 3595 if (obj_priv->pin_count == 0) {
3542 if (!obj_priv->active && 3596 if (!obj_priv->active &&
3543 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 3597 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
3544 I915_GEM_DOMAIN_GTT)) == 0)
3545 list_move_tail(&obj_priv->list, 3598 list_move_tail(&obj_priv->list,
3546 &dev_priv->mm.inactive_list); 3599 &dev_priv->mm.inactive_list);
3547 atomic_dec(&dev->pin_count); 3600 atomic_dec(&dev->pin_count);
@@ -3645,15 +3698,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3645 struct drm_gem_object *obj; 3698 struct drm_gem_object *obj;
3646 struct drm_i915_gem_object *obj_priv; 3699 struct drm_i915_gem_object *obj_priv;
3647 3700
3648 mutex_lock(&dev->struct_mutex);
3649 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3701 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3650 if (obj == NULL) { 3702 if (obj == NULL) {
3651 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", 3703 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3652 args->handle); 3704 args->handle);
3653 mutex_unlock(&dev->struct_mutex);
3654 return -EBADF; 3705 return -EBADF;
3655 } 3706 }
3656 3707
3708 mutex_lock(&dev->struct_mutex);
3657 /* Update the active list for the hardware's current position. 3709 /* Update the active list for the hardware's current position.
3658 * Otherwise this only updates on a delayed timer or when irqs are 3710 * Otherwise this only updates on a delayed timer or when irqs are
3659 * actually unmasked, and our working set ends up being larger than 3711 * actually unmasked, and our working set ends up being larger than
@@ -3792,9 +3844,8 @@ i915_gem_idle(struct drm_device *dev)
3792 3844
3793 /* Flush the GPU along with all non-CPU write domains 3845 /* Flush the GPU along with all non-CPU write domains
3794 */ 3846 */
3795 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), 3847 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
3796 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 3848 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
3797 seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
3798 3849
3799 if (seqno == 0) { 3850 if (seqno == 0) {
3800 mutex_unlock(&dev->struct_mutex); 3851 mutex_unlock(&dev->struct_mutex);
@@ -4344,3 +4395,17 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4344 drm_agp_chipset_flush(dev); 4395 drm_agp_chipset_flush(dev);
4345 return 0; 4396 return 0;
4346} 4397}
4398
4399void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4400{
4401 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4402
4403 /* Clean up our request list when the client is going away, so that
4404 * later retire_requests won't dereference our soon-to-be-gone
4405 * file_priv.
4406 */
4407 mutex_lock(&dev->struct_mutex);
4408 while (!list_empty(&i915_file_priv->mm.request_list))
4409 list_del_init(i915_file_priv->mm.request_list.next);
4410 mutex_unlock(&dev->struct_mutex);
4411}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 52a059354e8..9a05cadaa4a 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,6 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/acpi.h>
29#include <linux/pnp.h>
28#include "linux/string.h" 30#include "linux/string.h"
29#include "linux/bitops.h" 31#include "linux/bitops.h"
30#include "drmP.h" 32#include "drmP.h"
@@ -81,6 +83,143 @@
81 * to match what the GPU expects. 83 * to match what the GPU expects.
82 */ 84 */
83 85
86#define MCHBAR_I915 0x44
87#define MCHBAR_I965 0x48
88#define MCHBAR_SIZE (4*4096)
89
90#define DEVEN_REG 0x54
91#define DEVEN_MCHBAR_EN (1 << 28)
92
93/* Allocate space for the MCH regs if needed, return nonzero on error */
94static int
95intel_alloc_mchbar_resource(struct drm_device *dev)
96{
97 struct pci_dev *bridge_dev;
98 drm_i915_private_t *dev_priv = dev->dev_private;
99 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
100 u32 temp_lo, temp_hi = 0;
101 u64 mchbar_addr;
102 int ret = 0;
103
104 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
105 if (!bridge_dev) {
106 DRM_DEBUG("no bridge dev?!\n");
107 ret = -ENODEV;
108 goto out;
109 }
110
111 if (IS_I965G(dev))
112 pci_read_config_dword(bridge_dev, reg + 4, &temp_hi);
113 pci_read_config_dword(bridge_dev, reg, &temp_lo);
114 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
115
116 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
117 if (mchbar_addr &&
118 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
119 ret = 0;
120 goto out_put;
121 }
122
123 /* Get some space for it */
124 ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
125 MCHBAR_SIZE, MCHBAR_SIZE,
126 PCIBIOS_MIN_MEM,
127 0, pcibios_align_resource,
128 bridge_dev);
129 if (ret) {
130 DRM_DEBUG("failed bus alloc: %d\n", ret);
131 dev_priv->mch_res.start = 0;
132 goto out_put;
133 }
134
135 if (IS_I965G(dev))
136 pci_write_config_dword(bridge_dev, reg + 4,
137 upper_32_bits(dev_priv->mch_res.start));
138
139 pci_write_config_dword(bridge_dev, reg,
140 lower_32_bits(dev_priv->mch_res.start));
141out_put:
142 pci_dev_put(bridge_dev);
143out:
144 return ret;
145}
146
147/* Setup MCHBAR if possible, return true if we should disable it again */
148static bool
149intel_setup_mchbar(struct drm_device *dev)
150{
151 struct pci_dev *bridge_dev;
152 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
153 u32 temp;
154 bool need_disable = false, enabled;
155
156 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
157 if (!bridge_dev) {
158 DRM_DEBUG("no bridge dev?!\n");
159 goto out;
160 }
161
162 if (IS_I915G(dev) || IS_I915GM(dev)) {
163 pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
164 enabled = !!(temp & DEVEN_MCHBAR_EN);
165 } else {
166 pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
167 enabled = temp & 1;
168 }
169
170 /* If it's already enabled, don't have to do anything */
171 if (enabled)
172 goto out_put;
173
174 if (intel_alloc_mchbar_resource(dev))
175 goto out_put;
176
177 need_disable = true;
178
179 /* Space is allocated or reserved, so enable it. */
180 if (IS_I915G(dev) || IS_I915GM(dev)) {
181 pci_write_config_dword(bridge_dev, DEVEN_REG,
182 temp | DEVEN_MCHBAR_EN);
183 } else {
184 pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
185 pci_write_config_dword(bridge_dev, mchbar_reg, temp | 1);
186 }
187out_put:
188 pci_dev_put(bridge_dev);
189out:
190 return need_disable;
191}
192
193static void
194intel_teardown_mchbar(struct drm_device *dev, bool disable)
195{
196 drm_i915_private_t *dev_priv = dev->dev_private;
197 struct pci_dev *bridge_dev;
198 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
199 u32 temp;
200
201 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
202 if (!bridge_dev) {
203 DRM_DEBUG("no bridge dev?!\n");
204 return;
205 }
206
207 if (disable) {
208 if (IS_I915G(dev) || IS_I915GM(dev)) {
209 pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
210 temp &= ~DEVEN_MCHBAR_EN;
211 pci_write_config_dword(bridge_dev, DEVEN_REG, temp);
212 } else {
213 pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
214 temp &= ~1;
215 pci_write_config_dword(bridge_dev, mchbar_reg, temp);
216 }
217 }
218
219 if (dev_priv->mch_res.start)
220 release_resource(&dev_priv->mch_res);
221}
222
84/** 223/**
85 * Detects bit 6 swizzling of address lookup between IGD access and CPU 224 * Detects bit 6 swizzling of address lookup between IGD access and CPU
86 * access through main memory. 225 * access through main memory.
@@ -91,6 +230,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
91 drm_i915_private_t *dev_priv = dev->dev_private; 230 drm_i915_private_t *dev_priv = dev->dev_private;
92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 231 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 232 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
233 bool need_disable;
94 234
95 if (!IS_I9XX(dev)) { 235 if (!IS_I9XX(dev)) {
96 /* As far as we know, the 865 doesn't have these bit 6 236 /* As far as we know, the 865 doesn't have these bit 6
@@ -101,6 +241,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
101 } else if (IS_MOBILE(dev)) { 241 } else if (IS_MOBILE(dev)) {
102 uint32_t dcc; 242 uint32_t dcc;
103 243
244 /* Try to make sure MCHBAR is enabled before poking at it */
245 need_disable = intel_setup_mchbar(dev);
246
104 /* On mobile 9xx chipsets, channel interleave by the CPU is 247 /* On mobile 9xx chipsets, channel interleave by the CPU is
105 * determined by DCC. For single-channel, neither the CPU 248 * determined by DCC. For single-channel, neither the CPU
106 * nor the GPU do swizzling. For dual channel interleaved, 249 * nor the GPU do swizzling. For dual channel interleaved,
@@ -140,6 +283,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
140 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 283 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
141 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 284 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
142 } 285 }
286
287 intel_teardown_mchbar(dev, need_disable);
143 } else { 288 } else {
144 /* The 965, G33, and newer, have a very flexible memory 289 /* The 965, G33, and newer, have a very flexible memory
145 * configuration. It will enable dual-channel mode 290 * configuration. It will enable dual-channel mode
@@ -170,6 +315,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
170 } 315 }
171 } 316 }
172 317
318 /* FIXME: check with memory config on IGDNG */
319 if (IS_IGDNG(dev)) {
320 DRM_ERROR("disable tiling on IGDNG...\n");
321 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
322 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
323 }
324
173 dev_priv->mm.bit_6_swizzle_x = swizzle_x; 325 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
174 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 326 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
175} 327}
@@ -213,7 +365,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
213 if (tiling_mode == I915_TILING_NONE) 365 if (tiling_mode == I915_TILING_NONE)
214 return true; 366 return true;
215 367
216 if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 368 if (!IS_I9XX(dev) ||
369 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
217 tile_width = 128; 370 tile_width = 128;
218 else 371 else
219 tile_width = 512; 372 tile_width = 512;
@@ -225,11 +378,18 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
225 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 378 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
226 return false; 379 return false;
227 } else if (IS_I9XX(dev)) { 380 } else if (IS_I9XX(dev)) {
228 if (stride / tile_width > I830_FENCE_MAX_PITCH_VAL || 381 uint32_t pitch_val = ffs(stride / tile_width) - 1;
382
383 /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
384 * instead of 4 (2KB) on 945s.
385 */
386 if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
229 size > (I830_FENCE_MAX_SIZE_VAL << 20)) 387 size > (I830_FENCE_MAX_SIZE_VAL << 20))
230 return false; 388 return false;
231 } else { 389 } else {
232 if (stride / 128 > I830_FENCE_MAX_PITCH_VAL || 390 uint32_t pitch_val = ffs(stride / tile_width) - 1;
391
392 if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
233 size > (I830_FENCE_MAX_SIZE_VAL << 19)) 393 size > (I830_FENCE_MAX_SIZE_VAL << 19))
234 return false; 394 return false;
235 } 395 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 98bb4c878c4..b86b7b7130c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -58,6 +58,47 @@
58 DRM_I915_VBLANK_PIPE_B) 58 DRM_I915_VBLANK_PIPE_B)
59 59
60void 60void
61igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
62{
63 if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
64 dev_priv->gt_irq_mask_reg &= ~mask;
65 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
66 (void) I915_READ(GTIMR);
67 }
68}
69
70static inline void
71igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
72{
73 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
74 dev_priv->gt_irq_mask_reg |= mask;
75 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
76 (void) I915_READ(GTIMR);
77 }
78}
79
80/* For display hotplug interrupt */
81void
82igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
83{
84 if ((dev_priv->irq_mask_reg & mask) != 0) {
85 dev_priv->irq_mask_reg &= ~mask;
86 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
87 (void) I915_READ(DEIMR);
88 }
89}
90
91static inline void
92igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
93{
94 if ((dev_priv->irq_mask_reg & mask) != mask) {
95 dev_priv->irq_mask_reg |= mask;
96 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
97 (void) I915_READ(DEIMR);
98 }
99}
100
101void
61i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 102i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
62{ 103{
63 if ((dev_priv->irq_mask_reg & mask) != 0) { 104 if ((dev_priv->irq_mask_reg & mask) != 0) {
@@ -196,6 +237,47 @@ static void i915_hotplug_work_func(struct work_struct *work)
196 drm_sysfs_hotplug_event(dev); 237 drm_sysfs_hotplug_event(dev);
197} 238}
198 239
240irqreturn_t igdng_irq_handler(struct drm_device *dev)
241{
242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
243 int ret = IRQ_NONE;
244 u32 de_iir, gt_iir;
245 u32 new_de_iir, new_gt_iir;
246 struct drm_i915_master_private *master_priv;
247
248 de_iir = I915_READ(DEIIR);
249 gt_iir = I915_READ(GTIIR);
250
251 for (;;) {
252 if (de_iir == 0 && gt_iir == 0)
253 break;
254
255 ret = IRQ_HANDLED;
256
257 I915_WRITE(DEIIR, de_iir);
258 new_de_iir = I915_READ(DEIIR);
259 I915_WRITE(GTIIR, gt_iir);
260 new_gt_iir = I915_READ(GTIIR);
261
262 if (dev->primary->master) {
263 master_priv = dev->primary->master->driver_priv;
264 if (master_priv->sarea_priv)
265 master_priv->sarea_priv->last_dispatch =
266 READ_BREADCRUMB(dev_priv);
267 }
268
269 if (gt_iir & GT_USER_INTERRUPT) {
270 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
271 DRM_WAKEUP(&dev_priv->irq_queue);
272 }
273
274 de_iir = new_de_iir;
275 gt_iir = new_gt_iir;
276 }
277
278 return ret;
279}
280
199irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 281irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
200{ 282{
201 struct drm_device *dev = (struct drm_device *) arg; 283 struct drm_device *dev = (struct drm_device *) arg;
@@ -212,6 +294,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
212 294
213 atomic_inc(&dev_priv->irq_received); 295 atomic_inc(&dev_priv->irq_received);
214 296
297 if (IS_IGDNG(dev))
298 return igdng_irq_handler(dev);
299
215 iir = I915_READ(IIR); 300 iir = I915_READ(IIR);
216 301
217 if (IS_I965G(dev)) { 302 if (IS_I965G(dev)) {
@@ -349,8 +434,12 @@ void i915_user_irq_get(struct drm_device *dev)
349 unsigned long irqflags; 434 unsigned long irqflags;
350 435
351 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 436 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
352 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) 437 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
353 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 438 if (IS_IGDNG(dev))
439 igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
440 else
441 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
442 }
354 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 443 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
355} 444}
356 445
@@ -361,8 +450,12 @@ void i915_user_irq_put(struct drm_device *dev)
361 450
362 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 451 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
363 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 452 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
364 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) 453 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
365 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 454 if (IS_IGDNG(dev))
455 igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
456 else
457 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
458 }
366 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 459 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
367} 460}
368 461
@@ -455,6 +548,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
455 if (!(pipeconf & PIPEACONF_ENABLE)) 548 if (!(pipeconf & PIPEACONF_ENABLE))
456 return -EINVAL; 549 return -EINVAL;
457 550
551 if (IS_IGDNG(dev))
552 return 0;
553
458 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 554 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
459 if (IS_I965G(dev)) 555 if (IS_I965G(dev))
460 i915_enable_pipestat(dev_priv, pipe, 556 i915_enable_pipestat(dev_priv, pipe,
@@ -474,6 +570,9 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
474 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 570 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
475 unsigned long irqflags; 571 unsigned long irqflags;
476 572
573 if (IS_IGDNG(dev))
574 return;
575
477 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 576 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
478 i915_disable_pipestat(dev_priv, pipe, 577 i915_disable_pipestat(dev_priv, pipe,
479 PIPE_VBLANK_INTERRUPT_ENABLE | 578 PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -484,7 +583,9 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
484void i915_enable_interrupt (struct drm_device *dev) 583void i915_enable_interrupt (struct drm_device *dev)
485{ 584{
486 struct drm_i915_private *dev_priv = dev->dev_private; 585 struct drm_i915_private *dev_priv = dev->dev_private;
487 opregion_enable_asle(dev); 586
587 if (!IS_IGDNG(dev))
588 opregion_enable_asle(dev);
488 dev_priv->irq_enabled = 1; 589 dev_priv->irq_enabled = 1;
489} 590}
490 591
@@ -545,12 +646,65 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
545 646
546/* drm_dma.h hooks 647/* drm_dma.h hooks
547*/ 648*/
649static void igdng_irq_preinstall(struct drm_device *dev)
650{
651 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
652
653 I915_WRITE(HWSTAM, 0xeffe);
654
655 /* XXX hotplug from PCH */
656
657 I915_WRITE(DEIMR, 0xffffffff);
658 I915_WRITE(DEIER, 0x0);
659 (void) I915_READ(DEIER);
660
661 /* and GT */
662 I915_WRITE(GTIMR, 0xffffffff);
663 I915_WRITE(GTIER, 0x0);
664 (void) I915_READ(GTIER);
665}
666
667static int igdng_irq_postinstall(struct drm_device *dev)
668{
669 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
670 /* enable kind of interrupts always enabled */
671 u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
672 u32 render_mask = GT_USER_INTERRUPT;
673
674 dev_priv->irq_mask_reg = ~display_mask;
675 dev_priv->de_irq_enable_reg = display_mask;
676
677 /* should always can generate irq */
678 I915_WRITE(DEIIR, I915_READ(DEIIR));
679 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
680 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
681 (void) I915_READ(DEIER);
682
683 /* user interrupt should be enabled, but masked initial */
684 dev_priv->gt_irq_mask_reg = 0xffffffff;
685 dev_priv->gt_irq_enable_reg = render_mask;
686
687 I915_WRITE(GTIIR, I915_READ(GTIIR));
688 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
689 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
690 (void) I915_READ(GTIER);
691
692 return 0;
693}
694
548void i915_driver_irq_preinstall(struct drm_device * dev) 695void i915_driver_irq_preinstall(struct drm_device * dev)
549{ 696{
550 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 697 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
551 698
552 atomic_set(&dev_priv->irq_received, 0); 699 atomic_set(&dev_priv->irq_received, 0);
553 700
701 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
702
703 if (IS_IGDNG(dev)) {
704 igdng_irq_preinstall(dev);
705 return;
706 }
707
554 if (I915_HAS_HOTPLUG(dev)) { 708 if (I915_HAS_HOTPLUG(dev)) {
555 I915_WRITE(PORT_HOTPLUG_EN, 0); 709 I915_WRITE(PORT_HOTPLUG_EN, 0);
556 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 710 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -562,7 +716,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
562 I915_WRITE(IMR, 0xffffffff); 716 I915_WRITE(IMR, 0xffffffff);
563 I915_WRITE(IER, 0x0); 717 I915_WRITE(IER, 0x0);
564 (void) I915_READ(IER); 718 (void) I915_READ(IER);
565 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
566} 719}
567 720
568int i915_driver_irq_postinstall(struct drm_device *dev) 721int i915_driver_irq_postinstall(struct drm_device *dev)
@@ -570,9 +723,12 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
570 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 723 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
571 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 724 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
572 725
726 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
727
573 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 728 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
574 729
575 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 730 if (IS_IGDNG(dev))
731 return igdng_irq_postinstall(dev);
576 732
577 /* Unmask the interrupts that we always want on. */ 733 /* Unmask the interrupts that we always want on. */
578 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; 734 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -613,11 +769,24 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
613 (void) I915_READ(IER); 769 (void) I915_READ(IER);
614 770
615 opregion_enable_asle(dev); 771 opregion_enable_asle(dev);
616 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
617 772
618 return 0; 773 return 0;
619} 774}
620 775
776static void igdng_irq_uninstall(struct drm_device *dev)
777{
778 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
779 I915_WRITE(HWSTAM, 0xffffffff);
780
781 I915_WRITE(DEIMR, 0xffffffff);
782 I915_WRITE(DEIER, 0x0);
783 I915_WRITE(DEIIR, I915_READ(DEIIR));
784
785 I915_WRITE(GTIMR, 0xffffffff);
786 I915_WRITE(GTIER, 0x0);
787 I915_WRITE(GTIIR, I915_READ(GTIIR));
788}
789
621void i915_driver_irq_uninstall(struct drm_device * dev) 790void i915_driver_irq_uninstall(struct drm_device * dev)
622{ 791{
623 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 792 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -627,6 +796,11 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
627 796
628 dev_priv->vblank_pipe = 0; 797 dev_priv->vblank_pipe = 0;
629 798
799 if (IS_IGDNG(dev)) {
800 igdng_irq_uninstall(dev);
801 return;
802 }
803
630 if (I915_HAS_HOTPLUG(dev)) { 804 if (I915_HAS_HOTPLUG(dev)) {
631 I915_WRITE(PORT_HOTPLUG_EN, 0); 805 I915_WRITE(PORT_HOTPLUG_EN, 0);
632 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 806 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 15da44cf21b..f6237a0b113 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -190,7 +190,8 @@
190#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 190#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
191#define I830_FENCE_PITCH_SHIFT 4 191#define I830_FENCE_PITCH_SHIFT 4
192#define I830_FENCE_REG_VALID (1<<0) 192#define I830_FENCE_REG_VALID (1<<0)
193#define I830_FENCE_MAX_PITCH_VAL 0x10 193#define I915_FENCE_MAX_PITCH_VAL 0x10
194#define I830_FENCE_MAX_PITCH_VAL 6
194#define I830_FENCE_MAX_SIZE_VAL (1<<8) 195#define I830_FENCE_MAX_SIZE_VAL (1<<8)
195 196
196#define I915_FENCE_START_MASK 0x0ff00000 197#define I915_FENCE_START_MASK 0x0ff00000
@@ -449,6 +450,13 @@
449#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) 450#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
450#define PLL_REF_INPUT_MASK (3 << 13) 451#define PLL_REF_INPUT_MASK (3 << 13)
451#define PLL_LOAD_PULSE_PHASE_SHIFT 9 452#define PLL_LOAD_PULSE_PHASE_SHIFT 9
453/* IGDNG */
454# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
455# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
456# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
457# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
458# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
459
452/* 460/*
453 * Parallel to Serial Load Pulse phase selection. 461 * Parallel to Serial Load Pulse phase selection.
454 * Selects the phase for the 10X DPLL clock for the PCIe 462 * Selects the phase for the 10X DPLL clock for the PCIe
@@ -630,8 +638,11 @@
630/* Hotplug control (945+ only) */ 638/* Hotplug control (945+ only) */
631#define PORT_HOTPLUG_EN 0x61110 639#define PORT_HOTPLUG_EN 0x61110
632#define HDMIB_HOTPLUG_INT_EN (1 << 29) 640#define HDMIB_HOTPLUG_INT_EN (1 << 29)
641#define DPB_HOTPLUG_INT_EN (1 << 29)
633#define HDMIC_HOTPLUG_INT_EN (1 << 28) 642#define HDMIC_HOTPLUG_INT_EN (1 << 28)
643#define DPC_HOTPLUG_INT_EN (1 << 28)
634#define HDMID_HOTPLUG_INT_EN (1 << 27) 644#define HDMID_HOTPLUG_INT_EN (1 << 27)
645#define DPD_HOTPLUG_INT_EN (1 << 27)
635#define SDVOB_HOTPLUG_INT_EN (1 << 26) 646#define SDVOB_HOTPLUG_INT_EN (1 << 26)
636#define SDVOC_HOTPLUG_INT_EN (1 << 25) 647#define SDVOC_HOTPLUG_INT_EN (1 << 25)
637#define TV_HOTPLUG_INT_EN (1 << 18) 648#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -664,8 +675,11 @@
664 675
665#define PORT_HOTPLUG_STAT 0x61114 676#define PORT_HOTPLUG_STAT 0x61114
666#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 677#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
678#define DPB_HOTPLUG_INT_STATUS (1 << 29)
667#define HDMIC_HOTPLUG_INT_STATUS (1 << 28) 679#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
680#define DPC_HOTPLUG_INT_STATUS (1 << 28)
668#define HDMID_HOTPLUG_INT_STATUS (1 << 27) 681#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
682#define DPD_HOTPLUG_INT_STATUS (1 << 27)
669#define CRT_HOTPLUG_INT_STATUS (1 << 11) 683#define CRT_HOTPLUG_INT_STATUS (1 << 11)
670#define TV_HOTPLUG_INT_STATUS (1 << 10) 684#define TV_HOTPLUG_INT_STATUS (1 << 10)
671#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) 685#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
@@ -950,15 +964,15 @@
950# define DAC_A_1_3_V (0 << 4) 964# define DAC_A_1_3_V (0 << 4)
951# define DAC_A_1_1_V (1 << 4) 965# define DAC_A_1_1_V (1 << 4)
952# define DAC_A_0_7_V (2 << 4) 966# define DAC_A_0_7_V (2 << 4)
953# define DAC_A_OFF (3 << 4) 967# define DAC_A_MASK (3 << 4)
954# define DAC_B_1_3_V (0 << 2) 968# define DAC_B_1_3_V (0 << 2)
955# define DAC_B_1_1_V (1 << 2) 969# define DAC_B_1_1_V (1 << 2)
956# define DAC_B_0_7_V (2 << 2) 970# define DAC_B_0_7_V (2 << 2)
957# define DAC_B_OFF (3 << 2) 971# define DAC_B_MASK (3 << 2)
958# define DAC_C_1_3_V (0 << 0) 972# define DAC_C_1_3_V (0 << 0)
959# define DAC_C_1_1_V (1 << 0) 973# define DAC_C_1_1_V (1 << 0)
960# define DAC_C_0_7_V (2 << 0) 974# define DAC_C_0_7_V (2 << 0)
961# define DAC_C_OFF (3 << 0) 975# define DAC_C_MASK (3 << 0)
962 976
963/** 977/**
964 * CSC coefficients are stored in a floating point format with 9 bits of 978 * CSC coefficients are stored in a floating point format with 9 bits of
@@ -1327,6 +1341,163 @@
1327#define TV_V_CHROMA_0 0x68400 1341#define TV_V_CHROMA_0 0x68400
1328#define TV_V_CHROMA_42 0x684a8 1342#define TV_V_CHROMA_42 0x684a8
1329 1343
1344/* Display Port */
1345#define DP_B 0x64100
1346#define DP_C 0x64200
1347#define DP_D 0x64300
1348
1349#define DP_PORT_EN (1 << 31)
1350#define DP_PIPEB_SELECT (1 << 30)
1351
1352/* Link training mode - select a suitable mode for each stage */
1353#define DP_LINK_TRAIN_PAT_1 (0 << 28)
1354#define DP_LINK_TRAIN_PAT_2 (1 << 28)
1355#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
1356#define DP_LINK_TRAIN_OFF (3 << 28)
1357#define DP_LINK_TRAIN_MASK (3 << 28)
1358#define DP_LINK_TRAIN_SHIFT 28
1359
1360/* Signal voltages. These are mostly controlled by the other end */
1361#define DP_VOLTAGE_0_4 (0 << 25)
1362#define DP_VOLTAGE_0_6 (1 << 25)
1363#define DP_VOLTAGE_0_8 (2 << 25)
1364#define DP_VOLTAGE_1_2 (3 << 25)
1365#define DP_VOLTAGE_MASK (7 << 25)
1366#define DP_VOLTAGE_SHIFT 25
1367
1368/* Signal pre-emphasis levels, like voltages, the other end tells us what
1369 * they want
1370 */
1371#define DP_PRE_EMPHASIS_0 (0 << 22)
1372#define DP_PRE_EMPHASIS_3_5 (1 << 22)
1373#define DP_PRE_EMPHASIS_6 (2 << 22)
1374#define DP_PRE_EMPHASIS_9_5 (3 << 22)
1375#define DP_PRE_EMPHASIS_MASK (7 << 22)
1376#define DP_PRE_EMPHASIS_SHIFT 22
1377
1378/* How many wires to use. I guess 3 was too hard */
1379#define DP_PORT_WIDTH_1 (0 << 19)
1380#define DP_PORT_WIDTH_2 (1 << 19)
1381#define DP_PORT_WIDTH_4 (3 << 19)
1382#define DP_PORT_WIDTH_MASK (7 << 19)
1383
1384/* Mystic DPCD version 1.1 special mode */
1385#define DP_ENHANCED_FRAMING (1 << 18)
1386
1387/** locked once port is enabled */
1388#define DP_PORT_REVERSAL (1 << 15)
1389
1390/** sends the clock on lane 15 of the PEG for debug */
1391#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
1392
1393#define DP_SCRAMBLING_DISABLE (1 << 12)
1394
1395/** limit RGB values to avoid confusing TVs */
1396#define DP_COLOR_RANGE_16_235 (1 << 8)
1397
1398/** Turn on the audio link */
1399#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
1400
1401/** vs and hs sync polarity */
1402#define DP_SYNC_VS_HIGH (1 << 4)
1403#define DP_SYNC_HS_HIGH (1 << 3)
1404
1405/** A fantasy */
1406#define DP_DETECTED (1 << 2)
1407
1408/** The aux channel provides a way to talk to the
1409 * signal sink for DDC etc. Max packet size supported
1410 * is 20 bytes in each direction, hence the 5 fixed
1411 * data registers
1412 */
1413#define DPB_AUX_CH_CTL 0x64110
1414#define DPB_AUX_CH_DATA1 0x64114
1415#define DPB_AUX_CH_DATA2 0x64118
1416#define DPB_AUX_CH_DATA3 0x6411c
1417#define DPB_AUX_CH_DATA4 0x64120
1418#define DPB_AUX_CH_DATA5 0x64124
1419
1420#define DPC_AUX_CH_CTL 0x64210
1421#define DPC_AUX_CH_DATA1 0x64214
1422#define DPC_AUX_CH_DATA2 0x64218
1423#define DPC_AUX_CH_DATA3 0x6421c
1424#define DPC_AUX_CH_DATA4 0x64220
1425#define DPC_AUX_CH_DATA5 0x64224
1426
1427#define DPD_AUX_CH_CTL 0x64310
1428#define DPD_AUX_CH_DATA1 0x64314
1429#define DPD_AUX_CH_DATA2 0x64318
1430#define DPD_AUX_CH_DATA3 0x6431c
1431#define DPD_AUX_CH_DATA4 0x64320
1432#define DPD_AUX_CH_DATA5 0x64324
1433
1434#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
1435#define DP_AUX_CH_CTL_DONE (1 << 30)
1436#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
1437#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28)
1438#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
1439#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
1440#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
1441#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
1442#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
1443#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
1444#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
1445#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
1446#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
1447#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
1448#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
1449#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14)
1450#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13)
1451#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12)
1452#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
1453#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
1454#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
1455
1456/*
1457 * Computing GMCH M and N values for the Display Port link
1458 *
1459 * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
1460 *
1461 * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
1462 *
1463 * The GMCH value is used internally
1464 *
1465 * bytes_per_pixel is the number of bytes coming out of the plane,
1466 * which is after the LUTs, so we want the bytes for our color format.
1467 * For our current usage, this is always 3, one byte for R, G and B.
1468 */
1469#define PIPEA_GMCH_DATA_M 0x70050
1470#define PIPEB_GMCH_DATA_M 0x71050
1471
1472/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
1473#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
1474#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
1475
1476#define PIPE_GMCH_DATA_M_MASK (0xffffff)
1477
1478#define PIPEA_GMCH_DATA_N 0x70054
1479#define PIPEB_GMCH_DATA_N 0x71054
1480#define PIPE_GMCH_DATA_N_MASK (0xffffff)
1481
1482/*
1483 * Computing Link M and N values for the Display Port link
1484 *
1485 * Link M / N = pixel_clock / ls_clk
1486 *
1487 * (the DP spec calls pixel_clock the 'strm_clk')
1488 *
1489 * The Link value is transmitted in the Main Stream
1490 * Attributes and VB-ID.
1491 */
1492
1493#define PIPEA_DP_LINK_M 0x70060
1494#define PIPEB_DP_LINK_M 0x71060
1495#define PIPEA_DP_LINK_M_MASK (0xffffff)
1496
1497#define PIPEA_DP_LINK_N 0x70064
1498#define PIPEB_DP_LINK_N 0x71064
1499#define PIPEA_DP_LINK_N_MASK (0xffffff)
1500
1330/* Display & cursor control */ 1501/* Display & cursor control */
1331 1502
1332/* Pipe A */ 1503/* Pipe A */
@@ -1410,9 +1581,25 @@
1410 1581
1411/* Cursor A & B regs */ 1582/* Cursor A & B regs */
1412#define CURACNTR 0x70080 1583#define CURACNTR 0x70080
1584/* Old style CUR*CNTR flags (desktop 8xx) */
1585#define CURSOR_ENABLE 0x80000000
1586#define CURSOR_GAMMA_ENABLE 0x40000000
1587#define CURSOR_STRIDE_MASK 0x30000000
1588#define CURSOR_FORMAT_SHIFT 24
1589#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
1590#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
1591#define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT)
1592#define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT)
1593#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
1594#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
1595/* New style CUR*CNTR flags */
1596#define CURSOR_MODE 0x27
1413#define CURSOR_MODE_DISABLE 0x00 1597#define CURSOR_MODE_DISABLE 0x00
1414#define CURSOR_MODE_64_32B_AX 0x07 1598#define CURSOR_MODE_64_32B_AX 0x07
1415#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) 1599#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
1600#define MCURSOR_PIPE_SELECT (1 << 28)
1601#define MCURSOR_PIPE_A 0x00
1602#define MCURSOR_PIPE_B (1 << 28)
1416#define MCURSOR_GAMMA_ENABLE (1 << 26) 1603#define MCURSOR_GAMMA_ENABLE (1 << 26)
1417#define CURABASE 0x70084 1604#define CURABASE 0x70084
1418#define CURAPOS 0x70088 1605#define CURAPOS 0x70088
@@ -1420,6 +1607,7 @@
1420#define CURSOR_POS_SIGN 0x8000 1607#define CURSOR_POS_SIGN 0x8000
1421#define CURSOR_X_SHIFT 0 1608#define CURSOR_X_SHIFT 0
1422#define CURSOR_Y_SHIFT 16 1609#define CURSOR_Y_SHIFT 16
1610#define CURSIZE 0x700a0
1423#define CURBCNTR 0x700c0 1611#define CURBCNTR 0x700c0
1424#define CURBBASE 0x700c4 1612#define CURBBASE 0x700c4
1425#define CURBPOS 0x700c8 1613#define CURBPOS 0x700c8
@@ -1499,4 +1687,444 @@
1499# define VGA_2X_MODE (1 << 30) 1687# define VGA_2X_MODE (1 << 30)
1500# define VGA_PIPE_B_SELECT (1 << 29) 1688# define VGA_PIPE_B_SELECT (1 << 29)
1501 1689
1690/* IGDNG */
1691
1692#define CPU_VGACNTRL 0x41000
1693
1694#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
1695#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
1696#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2)
1697#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2)
1698#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2)
1699#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2)
1700#define DIGITAL_PORTA_NO_DETECT (0 << 0)
1701#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1)
1702#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0)
1703
1704/* refresh rate hardware control */
1705#define RR_HW_CTL 0x45300
1706#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
1707#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
1708
1709#define FDI_PLL_BIOS_0 0x46000
1710#define FDI_PLL_BIOS_1 0x46004
1711#define FDI_PLL_BIOS_2 0x46008
1712#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
1713#define DISPLAY_PORT_PLL_BIOS_1 0x46010
1714#define DISPLAY_PORT_PLL_BIOS_2 0x46014
1715
1716#define FDI_PLL_FREQ_CTL 0x46030
1717#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
1718#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
1719#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
1720
1721
1722#define PIPEA_DATA_M1 0x60030
1723#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
1724#define TU_SIZE_MASK 0x7e000000
1725#define PIPEA_DATA_M1_OFFSET 0
1726#define PIPEA_DATA_N1 0x60034
1727#define PIPEA_DATA_N1_OFFSET 0
1728
1729#define PIPEA_DATA_M2 0x60038
1730#define PIPEA_DATA_M2_OFFSET 0
1731#define PIPEA_DATA_N2 0x6003c
1732#define PIPEA_DATA_N2_OFFSET 0
1733
1734#define PIPEA_LINK_M1 0x60040
1735#define PIPEA_LINK_M1_OFFSET 0
1736#define PIPEA_LINK_N1 0x60044
1737#define PIPEA_LINK_N1_OFFSET 0
1738
1739#define PIPEA_LINK_M2 0x60048
1740#define PIPEA_LINK_M2_OFFSET 0
1741#define PIPEA_LINK_N2 0x6004c
1742#define PIPEA_LINK_N2_OFFSET 0
1743
1744/* PIPEB timing regs are same start from 0x61000 */
1745
1746#define PIPEB_DATA_M1 0x61030
1747#define PIPEB_DATA_M1_OFFSET 0
1748#define PIPEB_DATA_N1 0x61034
1749#define PIPEB_DATA_N1_OFFSET 0
1750
1751#define PIPEB_DATA_M2 0x61038
1752#define PIPEB_DATA_M2_OFFSET 0
1753#define PIPEB_DATA_N2 0x6103c
1754#define PIPEB_DATA_N2_OFFSET 0
1755
1756#define PIPEB_LINK_M1 0x61040
1757#define PIPEB_LINK_M1_OFFSET 0
1758#define PIPEB_LINK_N1 0x61044
1759#define PIPEB_LINK_N1_OFFSET 0
1760
1761#define PIPEB_LINK_M2 0x61048
1762#define PIPEB_LINK_M2_OFFSET 0
1763#define PIPEB_LINK_N2 0x6104c
1764#define PIPEB_LINK_N2_OFFSET 0
1765
1766/* CPU panel fitter */
1767#define PFA_CTL_1 0x68080
1768#define PFB_CTL_1 0x68880
1769#define PF_ENABLE (1<<31)
1770
1771/* legacy palette */
1772#define LGC_PALETTE_A 0x4a000
1773#define LGC_PALETTE_B 0x4a800
1774
1775/* interrupts */
1776#define DE_MASTER_IRQ_CONTROL (1 << 31)
1777#define DE_SPRITEB_FLIP_DONE (1 << 29)
1778#define DE_SPRITEA_FLIP_DONE (1 << 28)
1779#define DE_PLANEB_FLIP_DONE (1 << 27)
1780#define DE_PLANEA_FLIP_DONE (1 << 26)
1781#define DE_PCU_EVENT (1 << 25)
1782#define DE_GTT_FAULT (1 << 24)
1783#define DE_POISON (1 << 23)
1784#define DE_PERFORM_COUNTER (1 << 22)
1785#define DE_PCH_EVENT (1 << 21)
1786#define DE_AUX_CHANNEL_A (1 << 20)
1787#define DE_DP_A_HOTPLUG (1 << 19)
1788#define DE_GSE (1 << 18)
1789#define DE_PIPEB_VBLANK (1 << 15)
1790#define DE_PIPEB_EVEN_FIELD (1 << 14)
1791#define DE_PIPEB_ODD_FIELD (1 << 13)
1792#define DE_PIPEB_LINE_COMPARE (1 << 12)
1793#define DE_PIPEB_VSYNC (1 << 11)
1794#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
1795#define DE_PIPEA_VBLANK (1 << 7)
1796#define DE_PIPEA_EVEN_FIELD (1 << 6)
1797#define DE_PIPEA_ODD_FIELD (1 << 5)
1798#define DE_PIPEA_LINE_COMPARE (1 << 4)
1799#define DE_PIPEA_VSYNC (1 << 3)
1800#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
1801
1802#define DEISR 0x44000
1803#define DEIMR 0x44004
1804#define DEIIR 0x44008
1805#define DEIER 0x4400c
1806
1807/* GT interrupt */
1808#define GT_SYNC_STATUS (1 << 2)
1809#define GT_USER_INTERRUPT (1 << 0)
1810
1811#define GTISR 0x44010
1812#define GTIMR 0x44014
1813#define GTIIR 0x44018
1814#define GTIER 0x4401c
1815
1816/* PCH */
1817
1818/* south display engine interrupt */
1819#define SDE_CRT_HOTPLUG (1 << 11)
1820#define SDE_PORTD_HOTPLUG (1 << 10)
1821#define SDE_PORTC_HOTPLUG (1 << 9)
1822#define SDE_PORTB_HOTPLUG (1 << 8)
1823#define SDE_SDVOB_HOTPLUG (1 << 6)
1824
1825#define SDEISR 0xc4000
1826#define SDEIMR 0xc4004
1827#define SDEIIR 0xc4008
1828#define SDEIER 0xc400c
1829
1830/* digital port hotplug */
1831#define PCH_PORT_HOTPLUG 0xc4030
1832#define PORTD_HOTPLUG_ENABLE (1 << 20)
1833#define PORTD_PULSE_DURATION_2ms (0)
1834#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
1835#define PORTD_PULSE_DURATION_6ms (2 << 18)
1836#define PORTD_PULSE_DURATION_100ms (3 << 18)
1837#define PORTD_HOTPLUG_NO_DETECT (0)
1838#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
1839#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
1840#define PORTC_HOTPLUG_ENABLE (1 << 12)
1841#define PORTC_PULSE_DURATION_2ms (0)
1842#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
1843#define PORTC_PULSE_DURATION_6ms (2 << 10)
1844#define PORTC_PULSE_DURATION_100ms (3 << 10)
1845#define PORTC_HOTPLUG_NO_DETECT (0)
1846#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
1847#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
1848#define PORTB_HOTPLUG_ENABLE (1 << 4)
1849#define PORTB_PULSE_DURATION_2ms (0)
1850#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
1851#define PORTB_PULSE_DURATION_6ms (2 << 2)
1852#define PORTB_PULSE_DURATION_100ms (3 << 2)
1853#define PORTB_HOTPLUG_NO_DETECT (0)
1854#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
1855#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
1856
1857#define PCH_GPIOA 0xc5010
1858#define PCH_GPIOB 0xc5014
1859#define PCH_GPIOC 0xc5018
1860#define PCH_GPIOD 0xc501c
1861#define PCH_GPIOE 0xc5020
1862#define PCH_GPIOF 0xc5024
1863
1864#define PCH_DPLL_A 0xc6014
1865#define PCH_DPLL_B 0xc6018
1866
1867#define PCH_FPA0 0xc6040
1868#define PCH_FPA1 0xc6044
1869#define PCH_FPB0 0xc6048
1870#define PCH_FPB1 0xc604c
1871
1872#define PCH_DPLL_TEST 0xc606c
1873
1874#define PCH_DREF_CONTROL 0xC6200
1875#define DREF_CONTROL_MASK 0x7fc3
1876#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
1877#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
1878#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
1879#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
1880#define DREF_SSC_SOURCE_DISABLE (0<<11)
1881#define DREF_SSC_SOURCE_ENABLE (2<<11)
1882#define DREF_SSC_SOURCE_MASK (2<<11)
1883#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
1884#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
1885#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
1886#define DREF_NONSPREAD_SOURCE_MASK (2<<9)
1887#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
1888#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
1889#define DREF_SSC4_DOWNSPREAD (0<<6)
1890#define DREF_SSC4_CENTERSPREAD (1<<6)
1891#define DREF_SSC1_DISABLE (0<<1)
1892#define DREF_SSC1_ENABLE (1<<1)
1893#define DREF_SSC4_DISABLE (0)
1894#define DREF_SSC4_ENABLE (1)
1895
1896#define PCH_RAWCLK_FREQ 0xc6204
1897#define FDL_TP1_TIMER_SHIFT 12
1898#define FDL_TP1_TIMER_MASK (3<<12)
1899#define FDL_TP2_TIMER_SHIFT 10
1900#define FDL_TP2_TIMER_MASK (3<<10)
1901#define RAWCLK_FREQ_MASK 0x3ff
1902
1903#define PCH_DPLL_TMR_CFG 0xc6208
1904
1905#define PCH_SSC4_PARMS 0xc6210
1906#define PCH_SSC4_AUX_PARMS 0xc6214
1907
1908/* transcoder */
1909
1910#define TRANS_HTOTAL_A 0xe0000
1911#define TRANS_HTOTAL_SHIFT 16
1912#define TRANS_HACTIVE_SHIFT 0
1913#define TRANS_HBLANK_A 0xe0004
1914#define TRANS_HBLANK_END_SHIFT 16
1915#define TRANS_HBLANK_START_SHIFT 0
1916#define TRANS_HSYNC_A 0xe0008
1917#define TRANS_HSYNC_END_SHIFT 16
1918#define TRANS_HSYNC_START_SHIFT 0
1919#define TRANS_VTOTAL_A 0xe000c
1920#define TRANS_VTOTAL_SHIFT 16
1921#define TRANS_VACTIVE_SHIFT 0
1922#define TRANS_VBLANK_A 0xe0010
1923#define TRANS_VBLANK_END_SHIFT 16
1924#define TRANS_VBLANK_START_SHIFT 0
1925#define TRANS_VSYNC_A 0xe0014
1926#define TRANS_VSYNC_END_SHIFT 16
1927#define TRANS_VSYNC_START_SHIFT 0
1928
1929#define TRANSA_DATA_M1 0xe0030
1930#define TRANSA_DATA_N1 0xe0034
1931#define TRANSA_DATA_M2 0xe0038
1932#define TRANSA_DATA_N2 0xe003c
1933#define TRANSA_DP_LINK_M1 0xe0040
1934#define TRANSA_DP_LINK_N1 0xe0044
1935#define TRANSA_DP_LINK_M2 0xe0048
1936#define TRANSA_DP_LINK_N2 0xe004c
1937
1938#define TRANS_HTOTAL_B 0xe1000
1939#define TRANS_HBLANK_B 0xe1004
1940#define TRANS_HSYNC_B 0xe1008
1941#define TRANS_VTOTAL_B 0xe100c
1942#define TRANS_VBLANK_B 0xe1010
1943#define TRANS_VSYNC_B 0xe1014
1944
1945#define TRANSB_DATA_M1 0xe1030
1946#define TRANSB_DATA_N1 0xe1034
1947#define TRANSB_DATA_M2 0xe1038
1948#define TRANSB_DATA_N2 0xe103c
1949#define TRANSB_DP_LINK_M1 0xe1040
1950#define TRANSB_DP_LINK_N1 0xe1044
1951#define TRANSB_DP_LINK_M2 0xe1048
1952#define TRANSB_DP_LINK_N2 0xe104c
1953
1954#define TRANSACONF 0xf0008
1955#define TRANSBCONF 0xf1008
1956#define TRANS_DISABLE (0<<31)
1957#define TRANS_ENABLE (1<<31)
1958#define TRANS_STATE_MASK (1<<30)
1959#define TRANS_STATE_DISABLE (0<<30)
1960#define TRANS_STATE_ENABLE (1<<30)
1961#define TRANS_FSYNC_DELAY_HB1 (0<<27)
1962#define TRANS_FSYNC_DELAY_HB2 (1<<27)
1963#define TRANS_FSYNC_DELAY_HB3 (2<<27)
1964#define TRANS_FSYNC_DELAY_HB4 (3<<27)
1965#define TRANS_DP_AUDIO_ONLY (1<<26)
1966#define TRANS_DP_VIDEO_AUDIO (0<<26)
1967#define TRANS_PROGRESSIVE (0<<21)
1968#define TRANS_8BPC (0<<5)
1969#define TRANS_10BPC (1<<5)
1970#define TRANS_6BPC (2<<5)
1971#define TRANS_12BPC (3<<5)
1972
1973#define FDI_RXA_CHICKEN 0xc200c
1974#define FDI_RXB_CHICKEN 0xc2010
1975#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
1976
1977/* CPU: FDI_TX */
1978#define FDI_TXA_CTL 0x60100
1979#define FDI_TXB_CTL 0x61100
1980#define FDI_TX_DISABLE (0<<31)
1981#define FDI_TX_ENABLE (1<<31)
1982#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
1983#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
1984#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
1985#define FDI_LINK_TRAIN_NONE (3<<28)
1986#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
1987#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
1988#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
1989#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
1990#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
1991#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
1992#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
1993#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
1994#define FDI_DP_PORT_WIDTH_X1 (0<<19)
1995#define FDI_DP_PORT_WIDTH_X2 (1<<19)
1996#define FDI_DP_PORT_WIDTH_X3 (2<<19)
1997#define FDI_DP_PORT_WIDTH_X4 (3<<19)
1998#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
1999/* IGDNG: hardwired to 1 */
2000#define FDI_TX_PLL_ENABLE (1<<14)
2001/* both Tx and Rx */
2002#define FDI_SCRAMBLING_ENABLE (0<<7)
2003#define FDI_SCRAMBLING_DISABLE (1<<7)
2004
2005/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
2006#define FDI_RXA_CTL 0xf000c
2007#define FDI_RXB_CTL 0xf100c
2008#define FDI_RX_ENABLE (1<<31)
2009#define FDI_RX_DISABLE (0<<31)
2010/* train, dp width same as FDI_TX */
2011#define FDI_DP_PORT_WIDTH_X8 (7<<19)
2012#define FDI_8BPC (0<<16)
2013#define FDI_10BPC (1<<16)
2014#define FDI_6BPC (2<<16)
2015#define FDI_12BPC (3<<16)
2016#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
2017#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
2018#define FDI_RX_PLL_ENABLE (1<<13)
2019#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
2020#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
2021#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
2022#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
2023#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
2024#define FDI_SEL_RAWCLK (0<<4)
2025#define FDI_SEL_PCDCLK (1<<4)
2026
2027#define FDI_RXA_MISC 0xf0010
2028#define FDI_RXB_MISC 0xf1010
2029#define FDI_RXA_TUSIZE1 0xf0030
2030#define FDI_RXA_TUSIZE2 0xf0038
2031#define FDI_RXB_TUSIZE1 0xf1030
2032#define FDI_RXB_TUSIZE2 0xf1038
2033
2034/* FDI_RX interrupt register format */
2035#define FDI_RX_INTER_LANE_ALIGN (1<<10)
2036#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
2037#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
2038#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
2039#define FDI_RX_FS_CODE_ERR (1<<6)
2040#define FDI_RX_FE_CODE_ERR (1<<5)
2041#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
2042#define FDI_RX_HDCP_LINK_FAIL (1<<3)
2043#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
2044#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
2045#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
2046
2047#define FDI_RXA_IIR 0xf0014
2048#define FDI_RXA_IMR 0xf0018
2049#define FDI_RXB_IIR 0xf1014
2050#define FDI_RXB_IMR 0xf1018
2051
2052#define FDI_PLL_CTL_1 0xfe000
2053#define FDI_PLL_CTL_2 0xfe004
2054
2055/* CRT */
2056#define PCH_ADPA 0xe1100
2057#define ADPA_TRANS_SELECT_MASK (1<<30)
2058#define ADPA_TRANS_A_SELECT 0
2059#define ADPA_TRANS_B_SELECT (1<<30)
2060#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
2061#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
2062#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
2063#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
2064#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
2065#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
2066#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
2067#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
2068#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
2069#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
2070#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
2071#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
2072#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
2073#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
2074#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
2075#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
2076#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
2077#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
2078#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
2079
2080/* or SDVOB */
2081#define HDMIB 0xe1140
2082#define PORT_ENABLE (1 << 31)
2083#define TRANSCODER_A (0)
2084#define TRANSCODER_B (1 << 30)
2085#define COLOR_FORMAT_8bpc (0)
2086#define COLOR_FORMAT_12bpc (3 << 26)
2087#define SDVOB_HOTPLUG_ENABLE (1 << 23)
2088#define SDVO_ENCODING (0)
2089#define TMDS_ENCODING (2 << 10)
2090#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
2091#define SDVOB_BORDER_ENABLE (1 << 7)
2092#define AUDIO_ENABLE (1 << 6)
2093#define VSYNC_ACTIVE_HIGH (1 << 4)
2094#define HSYNC_ACTIVE_HIGH (1 << 3)
2095#define PORT_DETECTED (1 << 2)
2096
2097#define HDMIC 0xe1150
2098#define HDMID 0xe1160
2099
2100#define PCH_LVDS 0xe1180
2101#define LVDS_DETECTED (1 << 1)
2102
2103#define BLC_PWM_CPU_CTL2 0x48250
2104#define PWM_ENABLE (1 << 31)
2105#define PWM_PIPE_A (0 << 29)
2106#define PWM_PIPE_B (1 << 29)
2107#define BLC_PWM_CPU_CTL 0x48254
2108
2109#define BLC_PWM_PCH_CTL1 0xc8250
2110#define PWM_PCH_ENABLE (1 << 31)
2111#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
2112#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
2113#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
2114#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
2115
2116#define BLC_PWM_PCH_CTL2 0xc8254
2117
2118#define PCH_PP_STATUS 0xc7200
2119#define PCH_PP_CONTROL 0xc7204
2120#define EDP_FORCE_VDD (1 << 3)
2121#define EDP_BLC_ENABLE (1 << 2)
2122#define PANEL_POWER_RESET (1 << 1)
2123#define PANEL_POWER_OFF (0 << 0)
2124#define PANEL_POWER_ON (1 << 0)
2125#define PCH_PP_ON_DELAYS 0xc7208
2126#define EDP_PANEL (1 << 30)
2127#define PCH_PP_OFF_DELAYS 0xc720c
2128#define PCH_PP_DIVISOR 0xc7210
2129
1502#endif /* _I915_REG_H_ */ 2130#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ce8a21344a7..a98e2831ed3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -295,6 +295,16 @@ int i915_save_state(struct drm_device *dev)
295 i915_save_palette(dev, PIPE_B); 295 i915_save_palette(dev, PIPE_B);
296 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); 296 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
297 297
298 /* Cursor state */
299 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
300 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
301 dev_priv->saveCURABASE = I915_READ(CURABASE);
302 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
303 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
304 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
305 if (!IS_I9XX(dev))
306 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
307
298 /* CRT state */ 308 /* CRT state */
299 dev_priv->saveADPA = I915_READ(ADPA); 309 dev_priv->saveADPA = I915_READ(ADPA);
300 310
@@ -480,6 +490,16 @@ int i915_restore_state(struct drm_device *dev)
480 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); 490 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
481 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); 491 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
482 492
493 /* Cursor state */
494 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
495 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
496 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
497 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
498 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
499 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
500 if (!IS_I9XX(dev))
501 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
502
483 /* CRT state */ 503 /* CRT state */
484 I915_WRITE(ADPA, dev_priv->saveADPA); 504 I915_WRITE(ADPA, dev_priv->saveADPA);
485 505
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index fc28e2bbd54..754dd22fdd7 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -30,6 +30,8 @@
30#include "i915_drv.h" 30#include "i915_drv.h"
31#include "intel_bios.h" 31#include "intel_bios.h"
32 32
33#define SLAVE_ADDR1 0x70
34#define SLAVE_ADDR2 0x72
33 35
34static void * 36static void *
35find_section(struct bdb_header *bdb, int section_id) 37find_section(struct bdb_header *bdb, int section_id)
@@ -57,9 +59,43 @@ find_section(struct bdb_header *bdb, int section_id)
57 return NULL; 59 return NULL;
58} 60}
59 61
60/* Try to find panel data */
61static void 62static void
62parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) 63fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
64 struct lvds_dvo_timing *dvo_timing)
65{
66 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
67 dvo_timing->hactive_lo;
68 panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
69 ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
70 panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
71 dvo_timing->hsync_pulse_width;
72 panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
73 ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
74
75 panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
76 dvo_timing->vactive_lo;
77 panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
78 dvo_timing->vsync_off;
79 panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
80 dvo_timing->vsync_pulse_width;
81 panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
82 ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
83 panel_fixed_mode->clock = dvo_timing->clock * 10;
84 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
85
86 /* Some VBTs have bogus h/vtotal values */
87 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
88 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
89 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
90 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
91
92 drm_mode_set_name(panel_fixed_mode);
93}
94
95/* Try to find integrated panel data */
96static void
97parse_lfp_panel_data(struct drm_i915_private *dev_priv,
98 struct bdb_header *bdb)
63{ 99{
64 struct bdb_lvds_options *lvds_options; 100 struct bdb_lvds_options *lvds_options;
65 struct bdb_lvds_lfp_data *lvds_lfp_data; 101 struct bdb_lvds_lfp_data *lvds_lfp_data;
@@ -91,38 +127,45 @@ parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
91 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), 127 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
92 DRM_MEM_DRIVER); 128 DRM_MEM_DRIVER);
93 129
94 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | 130 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
95 dvo_timing->hactive_lo;
96 panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
97 ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
98 panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
99 dvo_timing->hsync_pulse_width;
100 panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
101 ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
102 131
103 panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | 132 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
104 dvo_timing->vactive_lo;
105 panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
106 dvo_timing->vsync_off;
107 panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
108 dvo_timing->vsync_pulse_width;
109 panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
110 ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
111 panel_fixed_mode->clock = dvo_timing->clock * 10;
112 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
113 133
114 /* Some VBTs have bogus h/vtotal values */ 134 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
115 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 135 drm_mode_debug_printmodeline(panel_fixed_mode);
116 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
117 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
118 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
119 136
120 drm_mode_set_name(panel_fixed_mode); 137 return;
138}
121 139
122 dev_priv->vbt_mode = panel_fixed_mode; 140/* Try to find sdvo panel data */
141static void
142parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
143 struct bdb_header *bdb)
144{
145 struct bdb_sdvo_lvds_options *sdvo_lvds_options;
146 struct lvds_dvo_timing *dvo_timing;
147 struct drm_display_mode *panel_fixed_mode;
123 148
124 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); 149 dev_priv->sdvo_lvds_vbt_mode = NULL;
125 drm_mode_debug_printmodeline(panel_fixed_mode); 150
151 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
152 if (!sdvo_lvds_options)
153 return;
154
155 dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
156 if (!dvo_timing)
157 return;
158
159 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
160 DRM_MEM_DRIVER);
161
162 if (!panel_fixed_mode)
163 return;
164
165 fill_detail_timing_data(panel_fixed_mode,
166 dvo_timing + sdvo_lvds_options->panel_type);
167
168 dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
126 169
127 return; 170 return;
128} 171}
@@ -152,6 +195,88 @@ parse_general_features(struct drm_i915_private *dev_priv,
152 } 195 }
153} 196}
154 197
198static void
199parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
200 struct bdb_header *bdb)
201{
202 struct sdvo_device_mapping *p_mapping;
203 struct bdb_general_definitions *p_defs;
204 struct child_device_config *p_child;
205 int i, child_device_num, count;
206 u16 block_size, *block_ptr;
207
208 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
209 if (!p_defs) {
210 DRM_DEBUG("No general definition block is found\n");
211 return;
212 }
213 /* judge whether the size of child device meets the requirements.
214 * If the child device size obtained from general definition block
215 * is different with sizeof(struct child_device_config), skip the
216 * parsing of sdvo device info
217 */
218 if (p_defs->child_dev_size != sizeof(*p_child)) {
219 /* different child dev size . Ignore it */
220 DRM_DEBUG("different child size is found. Invalid.\n");
221 return;
222 }
223 /* get the block size of general definitions */
224 block_ptr = (u16 *)((char *)p_defs - 2);
225 block_size = *block_ptr;
226 /* get the number of child device */
227 child_device_num = (block_size - sizeof(*p_defs)) /
228 sizeof(*p_child);
229 count = 0;
230 for (i = 0; i < child_device_num; i++) {
231 p_child = &(p_defs->devices[i]);
232 if (!p_child->device_type) {
233 /* skip the device block if device type is invalid */
234 continue;
235 }
236 if (p_child->slave_addr != SLAVE_ADDR1 &&
237 p_child->slave_addr != SLAVE_ADDR2) {
238 /*
239 * If the slave address is neither 0x70 nor 0x72,
240 * it is not a SDVO device. Skip it.
241 */
242 continue;
243 }
244 if (p_child->dvo_port != DEVICE_PORT_DVOB &&
245 p_child->dvo_port != DEVICE_PORT_DVOC) {
246 /* skip the incorrect SDVO port */
247 DRM_DEBUG("Incorrect SDVO port. Skip it \n");
248 continue;
249 }
250 DRM_DEBUG("the SDVO device with slave addr %2x is found on "
251 "%s port\n",
252 p_child->slave_addr,
253 (p_child->dvo_port == DEVICE_PORT_DVOB) ?
254 "SDVOB" : "SDVOC");
255 p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
256 if (!p_mapping->initialized) {
257 p_mapping->dvo_port = p_child->dvo_port;
258 p_mapping->slave_addr = p_child->slave_addr;
259 p_mapping->dvo_wiring = p_child->dvo_wiring;
260 p_mapping->initialized = 1;
261 } else {
262 DRM_DEBUG("Maybe one SDVO port is shared by "
263 "two SDVO device.\n");
264 }
265 if (p_child->slave2_addr) {
266 /* Maybe this is a SDVO device with multiple inputs */
267 /* And the mapping info is not added */
268 DRM_DEBUG("there exists the slave2_addr. Maybe this "
269 "is a SDVO device with multiple inputs.\n");
270 }
271 count++;
272 }
273
274 if (!count) {
275 /* No SDVO device info is found */
276 DRM_DEBUG("No SDVO device info is found in VBT\n");
277 }
278 return;
279}
155/** 280/**
156 * intel_init_bios - initialize VBIOS settings & find VBT 281 * intel_init_bios - initialize VBIOS settings & find VBT
157 * @dev: DRM device 282 * @dev: DRM device
@@ -199,8 +324,9 @@ intel_init_bios(struct drm_device *dev)
199 324
200 /* Grab useful general definitions */ 325 /* Grab useful general definitions */
201 parse_general_features(dev_priv, bdb); 326 parse_general_features(dev_priv, bdb);
202 parse_panel_data(dev_priv, bdb); 327 parse_lfp_panel_data(dev_priv, bdb);
203 328 parse_sdvo_panel_data(dev_priv, bdb);
329 parse_sdvo_device_mapping(dev_priv, bdb);
204 pci_unmap_rom(pdev, bios); 330 pci_unmap_rom(pdev, bios);
205 331
206 return 0; 332 return 0;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index de621aad85b..fe72e1c225d 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -135,6 +135,86 @@ struct bdb_general_features {
135 u8 rsvd11:6; /* finish byte */ 135 u8 rsvd11:6; /* finish byte */
136} __attribute__((packed)); 136} __attribute__((packed));
137 137
138/* pre-915 */
139#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
140#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
141#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
142#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
143
144/* Pre 915 */
145#define DEVICE_TYPE_NONE 0x00
146#define DEVICE_TYPE_CRT 0x01
147#define DEVICE_TYPE_TV 0x09
148#define DEVICE_TYPE_EFP 0x12
149#define DEVICE_TYPE_LFP 0x22
150/* On 915+ */
151#define DEVICE_TYPE_CRT_DPMS 0x6001
152#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
153#define DEVICE_TYPE_TV_COMPOSITE 0x0209
154#define DEVICE_TYPE_TV_MACROVISION 0x0289
155#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
156#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
157#define DEVICE_TYPE_TV_SCART 0x0209
158#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
159#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
160#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
161#define DEVICE_TYPE_EFP_DVI_I 0x6053
162#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
163#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
164#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
165#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
166#define DEVICE_TYPE_LFP_PANELLINK 0x5012
167#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
168#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
169#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
170#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
171
172#define DEVICE_CFG_NONE 0x00
173#define DEVICE_CFG_12BIT_DVOB 0x01
174#define DEVICE_CFG_12BIT_DVOC 0x02
175#define DEVICE_CFG_24BIT_DVOBC 0x09
176#define DEVICE_CFG_24BIT_DVOCB 0x0a
177#define DEVICE_CFG_DUAL_DVOB 0x11
178#define DEVICE_CFG_DUAL_DVOC 0x12
179#define DEVICE_CFG_DUAL_DVOBC 0x13
180#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
181#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
182
183#define DEVICE_WIRE_NONE 0x00
184#define DEVICE_WIRE_DVOB 0x01
185#define DEVICE_WIRE_DVOC 0x02
186#define DEVICE_WIRE_DVOBC 0x03
187#define DEVICE_WIRE_DVOBB 0x05
188#define DEVICE_WIRE_DVOCC 0x06
189#define DEVICE_WIRE_DVOB_MASTER 0x0d
190#define DEVICE_WIRE_DVOC_MASTER 0x0e
191
192#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
193#define DEVICE_PORT_DVOB 0x01
194#define DEVICE_PORT_DVOC 0x02
195
196struct child_device_config {
197 u16 handle;
198 u16 device_type;
199 u8 device_id[10]; /* See DEVICE_TYPE_* above */
200 u16 addin_offset;
201 u8 dvo_port; /* See Device_PORT_* above */
202 u8 i2c_pin;
203 u8 slave_addr;
204 u8 ddc_pin;
205 u16 edid_ptr;
206 u8 dvo_cfg; /* See DEVICE_CFG_* above */
207 u8 dvo2_port;
208 u8 i2c2_pin;
209 u8 slave2_addr;
210 u8 ddc2_pin;
211 u8 capabilities;
212 u8 dvo_wiring;/* See DEVICE_WIRE_* above */
213 u8 dvo2_wiring;
214 u16 extended_type;
215 u8 dvo_function;
216} __attribute__((packed));
217
138struct bdb_general_definitions { 218struct bdb_general_definitions {
139 /* DDC GPIO */ 219 /* DDC GPIO */
140 u8 crt_ddc_gmbus_pin; 220 u8 crt_ddc_gmbus_pin;
@@ -149,14 +229,19 @@ struct bdb_general_definitions {
149 u8 boot_display[2]; 229 u8 boot_display[2];
150 u8 child_dev_size; 230 u8 child_dev_size;
151 231
152 /* device info */ 232 /*
153 u8 tv_or_lvds_info[33]; 233 * Device info:
154 u8 dev1[33]; 234 * If TV is present, it'll be at devices[0].
155 u8 dev2[33]; 235 * LVDS will be next, either devices[0] or [1], if present.
156 u8 dev3[33]; 236 * On some platforms the number of device is 6. But could be as few as
157 u8 dev4[33]; 237 * 4 if both TV and LVDS are missing.
158 /* may be another device block here on some platforms */ 238 * And the device num is related with the size of general definition
159}; 239 * block. It is obtained by using the following formula:
240 * number = (block_size - sizeof(bdb_general_definitions))/
241 * sizeof(child_device_config);
242 */
243 struct child_device_config devices[0];
244} __attribute__((packed));
160 245
161struct bdb_lvds_options { 246struct bdb_lvds_options {
162 u8 panel_type; 247 u8 panel_type;
@@ -279,6 +364,23 @@ struct vch_bdb_22 {
279 struct vch_panel_data panels[16]; 364 struct vch_panel_data panels[16];
280} __attribute__((packed)); 365} __attribute__((packed));
281 366
367struct bdb_sdvo_lvds_options {
368 u8 panel_backlight;
369 u8 h40_set_panel_type;
370 u8 panel_type;
371 u8 ssc_clk_freq;
372 u16 als_low_trip;
373 u16 als_high_trip;
374 u8 sclalarcoeff_tab_row_num;
375 u8 sclalarcoeff_tab_row_size;
376 u8 coefficient[8];
377 u8 panel_misc_bits_1;
378 u8 panel_misc_bits_2;
379 u8 panel_misc_bits_3;
380 u8 panel_misc_bits_4;
381} __attribute__((packed));
382
383
282bool intel_init_bios(struct drm_device *dev); 384bool intel_init_bios(struct drm_device *dev);
283 385
284/* 386/*
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 19148c3df63..6de97fc6602 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -37,9 +37,14 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
37{ 37{
38 struct drm_device *dev = encoder->dev; 38 struct drm_device *dev = encoder->dev;
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp; 40 u32 temp, reg;
41 41
42 temp = I915_READ(ADPA); 42 if (IS_IGDNG(dev))
43 reg = PCH_ADPA;
44 else
45 reg = ADPA;
46
47 temp = I915_READ(reg);
43 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 48 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
44 temp |= ADPA_DAC_ENABLE; 49 temp |= ADPA_DAC_ENABLE;
45 50
@@ -58,7 +63,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
58 break; 63 break;
59 } 64 }
60 65
61 I915_WRITE(ADPA, temp); 66 I915_WRITE(reg, temp);
62} 67}
63 68
64static int intel_crt_mode_valid(struct drm_connector *connector, 69static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -101,17 +106,23 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
101 struct drm_i915_private *dev_priv = dev->dev_private; 106 struct drm_i915_private *dev_priv = dev->dev_private;
102 int dpll_md_reg; 107 int dpll_md_reg;
103 u32 adpa, dpll_md; 108 u32 adpa, dpll_md;
109 u32 adpa_reg;
104 110
105 if (intel_crtc->pipe == 0) 111 if (intel_crtc->pipe == 0)
106 dpll_md_reg = DPLL_A_MD; 112 dpll_md_reg = DPLL_A_MD;
107 else 113 else
108 dpll_md_reg = DPLL_B_MD; 114 dpll_md_reg = DPLL_B_MD;
109 115
116 if (IS_IGDNG(dev))
117 adpa_reg = PCH_ADPA;
118 else
119 adpa_reg = ADPA;
120
110 /* 121 /*
111 * Disable separate mode multiplier used when cloning SDVO to CRT 122 * Disable separate mode multiplier used when cloning SDVO to CRT
112 * XXX this needs to be adjusted when we really are cloning 123 * XXX this needs to be adjusted when we really are cloning
113 */ 124 */
114 if (IS_I965G(dev)) { 125 if (IS_I965G(dev) && !IS_IGDNG(dev)) {
115 dpll_md = I915_READ(dpll_md_reg); 126 dpll_md = I915_READ(dpll_md_reg);
116 I915_WRITE(dpll_md_reg, 127 I915_WRITE(dpll_md_reg,
117 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -125,13 +136,53 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
125 136
126 if (intel_crtc->pipe == 0) { 137 if (intel_crtc->pipe == 0) {
127 adpa |= ADPA_PIPE_A_SELECT; 138 adpa |= ADPA_PIPE_A_SELECT;
128 I915_WRITE(BCLRPAT_A, 0); 139 if (!IS_IGDNG(dev))
140 I915_WRITE(BCLRPAT_A, 0);
129 } else { 141 } else {
130 adpa |= ADPA_PIPE_B_SELECT; 142 adpa |= ADPA_PIPE_B_SELECT;
131 I915_WRITE(BCLRPAT_B, 0); 143 if (!IS_IGDNG(dev))
144 I915_WRITE(BCLRPAT_B, 0);
132 } 145 }
133 146
134 I915_WRITE(ADPA, adpa); 147 I915_WRITE(adpa_reg, adpa);
148}
149
150static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
151{
152 struct drm_device *dev = connector->dev;
153 struct drm_i915_private *dev_priv = dev->dev_private;
154 u32 adpa, temp;
155 bool ret;
156
157 temp = adpa = I915_READ(PCH_ADPA);
158
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
162 ADPA_CRT_HOTPLUG_WARMUP_10MS |
163 ADPA_CRT_HOTPLUG_SAMPLE_4S |
164 ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */
165 ADPA_CRT_HOTPLUG_VOLREF_325MV |
166 ADPA_CRT_HOTPLUG_ENABLE |
167 ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
168
169 DRM_DEBUG("pch crt adpa 0x%x", adpa);
170 I915_WRITE(PCH_ADPA, adpa);
171
172 /* This might not be needed as not specified in spec...*/
173 udelay(1000);
174
175 /* Check the status to see if both blue and green are on now */
176 adpa = I915_READ(PCH_ADPA);
177 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) ==
178 ADPA_CRT_HOTPLUG_MONITOR_COLOR)
179 ret = true;
180 else
181 ret = false;
182
183 /* restore origin register */
184 I915_WRITE(PCH_ADPA, temp);
185 return ret;
135} 186}
136 187
137/** 188/**
@@ -148,6 +199,10 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
148 struct drm_i915_private *dev_priv = dev->dev_private; 199 struct drm_i915_private *dev_priv = dev->dev_private;
149 u32 hotplug_en; 200 u32 hotplug_en;
150 int i, tries = 0; 201 int i, tries = 0;
202
203 if (IS_IGDNG(dev))
204 return intel_igdng_crt_detect_hotplug(connector);
205
151 /* 206 /*
152 * On 4 series desktop, CRT detect sequence need to be done twice 207 * On 4 series desktop, CRT detect sequence need to be done twice
153 * to get a reliable result. 208 * to get a reliable result.
@@ -198,9 +253,142 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
198 return intel_ddc_probe(intel_output); 253 return intel_ddc_probe(intel_output);
199} 254}
200 255
256static enum drm_connector_status
257intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
258{
259 struct drm_encoder *encoder = &intel_output->enc;
260 struct drm_device *dev = encoder->dev;
261 struct drm_i915_private *dev_priv = dev->dev_private;
262 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
263 uint32_t pipe = intel_crtc->pipe;
264 uint32_t save_bclrpat;
265 uint32_t save_vtotal;
266 uint32_t vtotal, vactive;
267 uint32_t vsample;
268 uint32_t vblank, vblank_start, vblank_end;
269 uint32_t dsl;
270 uint32_t bclrpat_reg;
271 uint32_t vtotal_reg;
272 uint32_t vblank_reg;
273 uint32_t vsync_reg;
274 uint32_t pipeconf_reg;
275 uint32_t pipe_dsl_reg;
276 uint8_t st00;
277 enum drm_connector_status status;
278
279 if (pipe == 0) {
280 bclrpat_reg = BCLRPAT_A;
281 vtotal_reg = VTOTAL_A;
282 vblank_reg = VBLANK_A;
283 vsync_reg = VSYNC_A;
284 pipeconf_reg = PIPEACONF;
285 pipe_dsl_reg = PIPEADSL;
286 } else {
287 bclrpat_reg = BCLRPAT_B;
288 vtotal_reg = VTOTAL_B;
289 vblank_reg = VBLANK_B;
290 vsync_reg = VSYNC_B;
291 pipeconf_reg = PIPEBCONF;
292 pipe_dsl_reg = PIPEBDSL;
293 }
294
295 save_bclrpat = I915_READ(bclrpat_reg);
296 save_vtotal = I915_READ(vtotal_reg);
297 vblank = I915_READ(vblank_reg);
298
299 vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
300 vactive = (save_vtotal & 0x7ff) + 1;
301
302 vblank_start = (vblank & 0xfff) + 1;
303 vblank_end = ((vblank >> 16) & 0xfff) + 1;
304
305 /* Set the border color to purple. */
306 I915_WRITE(bclrpat_reg, 0x500050);
307
308 if (IS_I9XX(dev)) {
309 uint32_t pipeconf = I915_READ(pipeconf_reg);
310 I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
311 /* Wait for next Vblank to substitue
312 * border color for Color info */
313 intel_wait_for_vblank(dev);
314 st00 = I915_READ8(VGA_MSR_WRITE);
315 status = ((st00 & (1 << 4)) != 0) ?
316 connector_status_connected :
317 connector_status_disconnected;
318
319 I915_WRITE(pipeconf_reg, pipeconf);
320 } else {
321 bool restore_vblank = false;
322 int count, detect;
323
324 /*
325 * If there isn't any border, add some.
326 * Yes, this will flicker
327 */
328 if (vblank_start <= vactive && vblank_end >= vtotal) {
329 uint32_t vsync = I915_READ(vsync_reg);
330 uint32_t vsync_start = (vsync & 0xffff) + 1;
331
332 vblank_start = vsync_start;
333 I915_WRITE(vblank_reg,
334 (vblank_start - 1) |
335 ((vblank_end - 1) << 16));
336 restore_vblank = true;
337 }
338 /* sample in the vertical border, selecting the larger one */
339 if (vblank_start - vactive >= vtotal - vblank_end)
340 vsample = (vblank_start + vactive) >> 1;
341 else
342 vsample = (vtotal + vblank_end) >> 1;
343
344 /*
345 * Wait for the border to be displayed
346 */
347 while (I915_READ(pipe_dsl_reg) >= vactive)
348 ;
349 while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
350 ;
351 /*
352 * Watch ST00 for an entire scanline
353 */
354 detect = 0;
355 count = 0;
356 do {
357 count++;
358 /* Read the ST00 VGA status register */
359 st00 = I915_READ8(VGA_MSR_WRITE);
360 if (st00 & (1 << 4))
361 detect++;
362 } while ((I915_READ(pipe_dsl_reg) == dsl));
363
364 /* restore vblank if necessary */
365 if (restore_vblank)
366 I915_WRITE(vblank_reg, vblank);
367 /*
368 * If more than 3/4 of the scanline detected a monitor,
369 * then it is assumed to be present. This works even on i830,
370 * where there isn't any way to force the border color across
371 * the screen
372 */
373 status = detect * 4 > count * 3 ?
374 connector_status_connected :
375 connector_status_disconnected;
376 }
377
378 /* Restore previous settings */
379 I915_WRITE(bclrpat_reg, save_bclrpat);
380
381 return status;
382}
383
201static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 384static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
202{ 385{
203 struct drm_device *dev = connector->dev; 386 struct drm_device *dev = connector->dev;
387 struct intel_output *intel_output = to_intel_output(connector);
388 struct drm_encoder *encoder = &intel_output->enc;
389 struct drm_crtc *crtc;
390 int dpms_mode;
391 enum drm_connector_status status;
204 392
205 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) { 393 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
206 if (intel_crt_detect_hotplug(connector)) 394 if (intel_crt_detect_hotplug(connector))
@@ -212,8 +400,20 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
212 if (intel_crt_detect_ddc(connector)) 400 if (intel_crt_detect_ddc(connector))
213 return connector_status_connected; 401 return connector_status_connected;
214 402
215 /* TODO use load detect */ 403 /* for pre-945g platforms use load detect */
216 return connector_status_unknown; 404 if (encoder->crtc && encoder->crtc->enabled) {
405 status = intel_crt_load_detect(encoder->crtc, intel_output);
406 } else {
407 crtc = intel_get_load_detect_pipe(intel_output,
408 NULL, &dpms_mode);
409 if (crtc) {
410 status = intel_crt_load_detect(crtc, intel_output);
411 intel_release_load_detect_pipe(intel_output, dpms_mode);
412 } else
413 status = connector_status_unknown;
414 }
415
416 return status;
217} 417}
218 418
219static void intel_crt_destroy(struct drm_connector *connector) 419static void intel_crt_destroy(struct drm_connector *connector)
@@ -236,11 +436,6 @@ static int intel_crt_set_property(struct drm_connector *connector,
236 struct drm_property *property, 436 struct drm_property *property,
237 uint64_t value) 437 uint64_t value)
238{ 438{
239 struct drm_device *dev = connector->dev;
240
241 if (property == dev->mode_config.dpms_property && connector->encoder)
242 intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf));
243
244 return 0; 439 return 0;
245} 440}
246 441
@@ -257,6 +452,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
257}; 452};
258 453
259static const struct drm_connector_funcs intel_crt_connector_funcs = { 454static const struct drm_connector_funcs intel_crt_connector_funcs = {
455 .dpms = drm_helper_connector_dpms,
260 .detect = intel_crt_detect, 456 .detect = intel_crt_detect,
261 .fill_modes = drm_helper_probe_single_connector_modes, 457 .fill_modes = drm_helper_probe_single_connector_modes,
262 .destroy = intel_crt_destroy, 458 .destroy = intel_crt_destroy,
@@ -282,6 +478,7 @@ void intel_crt_init(struct drm_device *dev)
282{ 478{
283 struct drm_connector *connector; 479 struct drm_connector *connector;
284 struct intel_output *intel_output; 480 struct intel_output *intel_output;
481 u32 i2c_reg;
285 482
286 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 483 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
287 if (!intel_output) 484 if (!intel_output)
@@ -298,7 +495,11 @@ void intel_crt_init(struct drm_device *dev)
298 &intel_output->enc); 495 &intel_output->enc);
299 496
300 /* Set up the DDC bus. */ 497 /* Set up the DDC bus. */
301 intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A"); 498 if (IS_IGDNG(dev))
499 i2c_reg = PCH_GPIOA;
500 else
501 i2c_reg = GPIOA;
502 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
302 if (!intel_output->ddc_bus) { 503 if (!intel_output->ddc_bus) {
303 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 504 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
304 "failed.\n"); 505 "failed.\n");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3387cf32f38..028f5b66e3d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -137,6 +137,8 @@ struct intel_limit {
137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
138#define INTEL_LIMIT_IGD_SDVO_DAC 8 138#define INTEL_LIMIT_IGD_SDVO_DAC 8
139#define INTEL_LIMIT_IGD_LVDS 9 139#define INTEL_LIMIT_IGD_LVDS 9
140#define INTEL_LIMIT_IGDNG_SDVO_DAC 10
141#define INTEL_LIMIT_IGDNG_LVDS 11
140 142
141/*The parameter is for SDVO on G4x platform*/ 143/*The parameter is for SDVO on G4x platform*/
142#define G4X_DOT_SDVO_MIN 25000 144#define G4X_DOT_SDVO_MIN 25000
@@ -216,12 +218,43 @@ struct intel_limit {
216#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 218#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
217#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 219#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
218 220
221/* IGDNG */
222/* as we calculate clock using (register_value + 2) for
223 N/M1/M2, so here the range value for them is (actual_value-2).
224 */
225#define IGDNG_DOT_MIN 25000
226#define IGDNG_DOT_MAX 350000
227#define IGDNG_VCO_MIN 1760000
228#define IGDNG_VCO_MAX 3510000
229#define IGDNG_N_MIN 1
230#define IGDNG_N_MAX 5
231#define IGDNG_M_MIN 79
232#define IGDNG_M_MAX 118
233#define IGDNG_M1_MIN 12
234#define IGDNG_M1_MAX 23
235#define IGDNG_M2_MIN 5
236#define IGDNG_M2_MAX 9
237#define IGDNG_P_SDVO_DAC_MIN 5
238#define IGDNG_P_SDVO_DAC_MAX 80
239#define IGDNG_P_LVDS_MIN 28
240#define IGDNG_P_LVDS_MAX 112
241#define IGDNG_P1_MIN 1
242#define IGDNG_P1_MAX 8
243#define IGDNG_P2_SDVO_DAC_SLOW 10
244#define IGDNG_P2_SDVO_DAC_FAST 5
245#define IGDNG_P2_LVDS_SLOW 14 /* single channel */
246#define IGDNG_P2_LVDS_FAST 7 /* double channel */
247#define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */
248
219static bool 249static bool
220intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 250intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
221 int target, int refclk, intel_clock_t *best_clock); 251 int target, int refclk, intel_clock_t *best_clock);
222static bool 252static bool
223intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 253intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
224 int target, int refclk, intel_clock_t *best_clock); 254 int target, int refclk, intel_clock_t *best_clock);
255static bool
256intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
257 int target, int refclk, intel_clock_t *best_clock);
225 258
226static const intel_limit_t intel_limits[] = { 259static const intel_limit_t intel_limits[] = {
227 { /* INTEL_LIMIT_I8XX_DVO_DAC */ 260 { /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -383,9 +416,47 @@ static const intel_limit_t intel_limits[] = {
383 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 416 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
384 .find_pll = intel_find_best_PLL, 417 .find_pll = intel_find_best_PLL,
385 }, 418 },
386 419 { /* INTEL_LIMIT_IGDNG_SDVO_DAC */
420 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
421 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
422 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
423 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
424 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
425 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
426 .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX },
427 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
428 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
429 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
430 .p2_fast = IGDNG_P2_SDVO_DAC_FAST },
431 .find_pll = intel_igdng_find_best_PLL,
432 },
433 { /* INTEL_LIMIT_IGDNG_LVDS */
434 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
435 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
436 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
437 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
438 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
439 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
440 .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX },
441 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
442 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
443 .p2_slow = IGDNG_P2_LVDS_SLOW,
444 .p2_fast = IGDNG_P2_LVDS_FAST },
445 .find_pll = intel_igdng_find_best_PLL,
446 },
387}; 447};
388 448
449static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
450{
451 const intel_limit_t *limit;
452 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
453 limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS];
454 else
455 limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC];
456
457 return limit;
458}
459
389static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 460static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
390{ 461{
391 struct drm_device *dev = crtc->dev; 462 struct drm_device *dev = crtc->dev;
@@ -418,7 +489,9 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
418 struct drm_device *dev = crtc->dev; 489 struct drm_device *dev = crtc->dev;
419 const intel_limit_t *limit; 490 const intel_limit_t *limit;
420 491
421 if (IS_G4X(dev)) { 492 if (IS_IGDNG(dev))
493 limit = intel_igdng_limit(crtc);
494 else if (IS_G4X(dev)) {
422 limit = intel_g4x_limit(crtc); 495 limit = intel_g4x_limit(crtc);
423 } else if (IS_I9XX(dev) && !IS_IGD(dev)) { 496 } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
424 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 497 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
@@ -630,7 +703,64 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
630 } 703 }
631 } 704 }
632 } 705 }
706 return found;
707}
708
709static bool
710intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
711 int target, int refclk, intel_clock_t *best_clock)
712{
713 struct drm_device *dev = crtc->dev;
714 struct drm_i915_private *dev_priv = dev->dev_private;
715 intel_clock_t clock;
716 int max_n;
717 bool found;
718 int err_most = 47;
719 found = false;
720
721 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
722 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
723 LVDS_CLKB_POWER_UP)
724 clock.p2 = limit->p2.p2_fast;
725 else
726 clock.p2 = limit->p2.p2_slow;
727 } else {
728 if (target < limit->p2.dot_limit)
729 clock.p2 = limit->p2.p2_slow;
730 else
731 clock.p2 = limit->p2.p2_fast;
732 }
733
734 memset(best_clock, 0, sizeof(*best_clock));
735 max_n = limit->n.max;
736 /* based on hardware requriment prefer smaller n to precision */
737 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
738 /* based on hardware requirment prefere larger m1,m2, p1 */
739 for (clock.m1 = limit->m1.max;
740 clock.m1 >= limit->m1.min; clock.m1--) {
741 for (clock.m2 = limit->m2.max;
742 clock.m2 >= limit->m2.min; clock.m2--) {
743 for (clock.p1 = limit->p1.max;
744 clock.p1 >= limit->p1.min; clock.p1--) {
745 int this_err;
633 746
747 intel_clock(dev, refclk, &clock);
748 if (!intel_PLL_is_valid(crtc, &clock))
749 continue;
750 this_err = abs((10000 - (target*10000/clock.dot)));
751 if (this_err < err_most) {
752 *best_clock = clock;
753 err_most = this_err;
754 max_n = clock.n;
755 found = true;
756 /* found on first matching */
757 goto out;
758 }
759 }
760 }
761 }
762 }
763out:
634 return found; 764 return found;
635} 765}
636 766
@@ -785,18 +915,292 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
785 return 0; 915 return 0;
786} 916}
787 917
918static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
919{
920 struct drm_device *dev = crtc->dev;
921 struct drm_i915_private *dev_priv = dev->dev_private;
922 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
923 int pipe = intel_crtc->pipe;
924 int plane = intel_crtc->pipe;
925 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
926 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
927 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
928 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
929 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
930 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
931 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
932 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
933 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
934 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
935 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
936 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
937 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
938 int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
939 int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
940 int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
941 int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
942 int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
943 int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
944 int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
945 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
946 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
947 u32 temp;
948 int tries = 5, j;
949
950 /* XXX: When our outputs are all unaware of DPMS modes other than off
951 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
952 */
953 switch (mode) {
954 case DRM_MODE_DPMS_ON:
955 case DRM_MODE_DPMS_STANDBY:
956 case DRM_MODE_DPMS_SUSPEND:
957 DRM_DEBUG("crtc %d dpms on\n", pipe);
958 /* enable PCH DPLL */
959 temp = I915_READ(pch_dpll_reg);
960 if ((temp & DPLL_VCO_ENABLE) == 0) {
961 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
962 I915_READ(pch_dpll_reg);
963 }
788 964
965 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
966 temp = I915_READ(fdi_rx_reg);
967 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
968 FDI_SEL_PCDCLK |
969 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
970 I915_READ(fdi_rx_reg);
971 udelay(200);
972
973 /* Enable CPU FDI TX PLL, always on for IGDNG */
974 temp = I915_READ(fdi_tx_reg);
975 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
976 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
977 I915_READ(fdi_tx_reg);
978 udelay(100);
979 }
789 980
790/** 981 /* Enable CPU pipe */
791 * Sets the power management mode of the pipe and plane. 982 temp = I915_READ(pipeconf_reg);
792 * 983 if ((temp & PIPEACONF_ENABLE) == 0) {
793 * This code should probably grow support for turning the cursor off and back 984 I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
794 * on appropriately at the same time as we're turning the pipe off/on. 985 I915_READ(pipeconf_reg);
795 */ 986 udelay(100);
796static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) 987 }
988
989 /* configure and enable CPU plane */
990 temp = I915_READ(dspcntr_reg);
991 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
992 I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
993 /* Flush the plane changes */
994 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
995 }
996
997 /* enable CPU FDI TX and PCH FDI RX */
998 temp = I915_READ(fdi_tx_reg);
999 temp |= FDI_TX_ENABLE;
1000 temp |= FDI_DP_PORT_WIDTH_X4; /* default */
1001 temp &= ~FDI_LINK_TRAIN_NONE;
1002 temp |= FDI_LINK_TRAIN_PATTERN_1;
1003 I915_WRITE(fdi_tx_reg, temp);
1004 I915_READ(fdi_tx_reg);
1005
1006 temp = I915_READ(fdi_rx_reg);
1007 temp &= ~FDI_LINK_TRAIN_NONE;
1008 temp |= FDI_LINK_TRAIN_PATTERN_1;
1009 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1010 I915_READ(fdi_rx_reg);
1011
1012 udelay(150);
1013
1014 /* Train FDI. */
1015 /* umask FDI RX Interrupt symbol_lock and bit_lock bit
1016 for train result */
1017 temp = I915_READ(fdi_rx_imr_reg);
1018 temp &= ~FDI_RX_SYMBOL_LOCK;
1019 temp &= ~FDI_RX_BIT_LOCK;
1020 I915_WRITE(fdi_rx_imr_reg, temp);
1021 I915_READ(fdi_rx_imr_reg);
1022 udelay(150);
1023
1024 temp = I915_READ(fdi_rx_iir_reg);
1025 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1026
1027 if ((temp & FDI_RX_BIT_LOCK) == 0) {
1028 for (j = 0; j < tries; j++) {
1029 temp = I915_READ(fdi_rx_iir_reg);
1030 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1031 if (temp & FDI_RX_BIT_LOCK)
1032 break;
1033 udelay(200);
1034 }
1035 if (j != tries)
1036 I915_WRITE(fdi_rx_iir_reg,
1037 temp | FDI_RX_BIT_LOCK);
1038 else
1039 DRM_DEBUG("train 1 fail\n");
1040 } else {
1041 I915_WRITE(fdi_rx_iir_reg,
1042 temp | FDI_RX_BIT_LOCK);
1043 DRM_DEBUG("train 1 ok 2!\n");
1044 }
1045 temp = I915_READ(fdi_tx_reg);
1046 temp &= ~FDI_LINK_TRAIN_NONE;
1047 temp |= FDI_LINK_TRAIN_PATTERN_2;
1048 I915_WRITE(fdi_tx_reg, temp);
1049
1050 temp = I915_READ(fdi_rx_reg);
1051 temp &= ~FDI_LINK_TRAIN_NONE;
1052 temp |= FDI_LINK_TRAIN_PATTERN_2;
1053 I915_WRITE(fdi_rx_reg, temp);
1054
1055 udelay(150);
1056
1057 temp = I915_READ(fdi_rx_iir_reg);
1058 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1059
1060 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
1061 for (j = 0; j < tries; j++) {
1062 temp = I915_READ(fdi_rx_iir_reg);
1063 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1064 if (temp & FDI_RX_SYMBOL_LOCK)
1065 break;
1066 udelay(200);
1067 }
1068 if (j != tries) {
1069 I915_WRITE(fdi_rx_iir_reg,
1070 temp | FDI_RX_SYMBOL_LOCK);
1071 DRM_DEBUG("train 2 ok 1!\n");
1072 } else
1073 DRM_DEBUG("train 2 fail\n");
1074 } else {
1075 I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK);
1076 DRM_DEBUG("train 2 ok 2!\n");
1077 }
1078 DRM_DEBUG("train done\n");
1079
1080 /* set transcoder timing */
1081 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
1082 I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
1083 I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
1084
1085 I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
1086 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
1087 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
1088
1089 /* enable PCH transcoder */
1090 temp = I915_READ(transconf_reg);
1091 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1092 I915_READ(transconf_reg);
1093
1094 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
1095 ;
1096
1097 /* enable normal */
1098
1099 temp = I915_READ(fdi_tx_reg);
1100 temp &= ~FDI_LINK_TRAIN_NONE;
1101 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
1102 FDI_TX_ENHANCE_FRAME_ENABLE);
1103 I915_READ(fdi_tx_reg);
1104
1105 temp = I915_READ(fdi_rx_reg);
1106 temp &= ~FDI_LINK_TRAIN_NONE;
1107 I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
1108 FDI_RX_ENHANCE_FRAME_ENABLE);
1109 I915_READ(fdi_rx_reg);
1110
1111 /* wait one idle pattern time */
1112 udelay(100);
1113
1114 intel_crtc_load_lut(crtc);
1115
1116 break;
1117 case DRM_MODE_DPMS_OFF:
1118 DRM_DEBUG("crtc %d dpms off\n", pipe);
1119
1120 /* Disable the VGA plane that we never use */
1121 I915_WRITE(CPU_VGACNTRL, VGA_DISP_DISABLE);
1122
1123 /* Disable display plane */
1124 temp = I915_READ(dspcntr_reg);
1125 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
1126 I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
1127 /* Flush the plane changes */
1128 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1129 I915_READ(dspbase_reg);
1130 }
1131
1132 /* disable cpu pipe, disable after all planes disabled */
1133 temp = I915_READ(pipeconf_reg);
1134 if ((temp & PIPEACONF_ENABLE) != 0) {
1135 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
1136 I915_READ(pipeconf_reg);
1137 /* wait for cpu pipe off, pipe state */
1138 while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0)
1139 ;
1140 } else
1141 DRM_DEBUG("crtc %d is disabled\n", pipe);
1142
1143 /* IGDNG-A : disable cpu panel fitter ? */
1144 temp = I915_READ(pf_ctl_reg);
1145 if ((temp & PF_ENABLE) != 0) {
1146 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1147 I915_READ(pf_ctl_reg);
1148 }
1149
1150 /* disable CPU FDI tx and PCH FDI rx */
1151 temp = I915_READ(fdi_tx_reg);
1152 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
1153 I915_READ(fdi_tx_reg);
1154
1155 temp = I915_READ(fdi_rx_reg);
1156 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1157 I915_READ(fdi_rx_reg);
1158
1159 /* still set train pattern 1 */
1160 temp = I915_READ(fdi_tx_reg);
1161 temp &= ~FDI_LINK_TRAIN_NONE;
1162 temp |= FDI_LINK_TRAIN_PATTERN_1;
1163 I915_WRITE(fdi_tx_reg, temp);
1164
1165 temp = I915_READ(fdi_rx_reg);
1166 temp &= ~FDI_LINK_TRAIN_NONE;
1167 temp |= FDI_LINK_TRAIN_PATTERN_1;
1168 I915_WRITE(fdi_rx_reg, temp);
1169
1170 /* disable PCH transcoder */
1171 temp = I915_READ(transconf_reg);
1172 if ((temp & TRANS_ENABLE) != 0) {
1173 I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
1174 I915_READ(transconf_reg);
1175 /* wait for PCH transcoder off, transcoder state */
1176 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0)
1177 ;
1178 }
1179
1180 /* disable PCH DPLL */
1181 temp = I915_READ(pch_dpll_reg);
1182 if ((temp & DPLL_VCO_ENABLE) != 0) {
1183 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
1184 I915_READ(pch_dpll_reg);
1185 }
1186
1187 temp = I915_READ(fdi_rx_reg);
1188 if ((temp & FDI_RX_PLL_ENABLE) != 0) {
1189 temp &= ~FDI_SEL_PCDCLK;
1190 temp &= ~FDI_RX_PLL_ENABLE;
1191 I915_WRITE(fdi_rx_reg, temp);
1192 I915_READ(fdi_rx_reg);
1193 }
1194
1195 /* Wait for the clocks to turn off. */
1196 udelay(150);
1197 break;
1198 }
1199}
1200
1201static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
797{ 1202{
798 struct drm_device *dev = crtc->dev; 1203 struct drm_device *dev = crtc->dev;
799 struct drm_i915_master_private *master_priv;
800 struct drm_i915_private *dev_priv = dev->dev_private; 1204 struct drm_i915_private *dev_priv = dev->dev_private;
801 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1205 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
802 int pipe = intel_crtc->pipe; 1206 int pipe = intel_crtc->pipe;
@@ -805,7 +1209,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
805 int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; 1209 int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR;
806 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 1210 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
807 u32 temp; 1211 u32 temp;
808 bool enabled;
809 1212
810 /* XXX: When our outputs are all unaware of DPMS modes other than off 1213 /* XXX: When our outputs are all unaware of DPMS modes other than off
811 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1214 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -890,6 +1293,26 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
890 udelay(150); 1293 udelay(150);
891 break; 1294 break;
892 } 1295 }
1296}
1297
1298/**
1299 * Sets the power management mode of the pipe and plane.
1300 *
1301 * This code should probably grow support for turning the cursor off and back
1302 * on appropriately at the same time as we're turning the pipe off/on.
1303 */
1304static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
1305{
1306 struct drm_device *dev = crtc->dev;
1307 struct drm_i915_master_private *master_priv;
1308 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1309 int pipe = intel_crtc->pipe;
1310 bool enabled;
1311
1312 if (IS_IGDNG(dev))
1313 igdng_crtc_dpms(crtc, mode);
1314 else
1315 i9xx_crtc_dpms(crtc, mode);
893 1316
894 if (!dev->primary->master) 1317 if (!dev->primary->master)
895 return; 1318 return;
@@ -947,6 +1370,12 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
947 struct drm_display_mode *mode, 1370 struct drm_display_mode *mode,
948 struct drm_display_mode *adjusted_mode) 1371 struct drm_display_mode *adjusted_mode)
949{ 1372{
1373 struct drm_device *dev = crtc->dev;
1374 if (IS_IGDNG(dev)) {
1375 /* FDI link clock is fixed at 2.7G */
1376 if (mode->clock * 3 > 27000 * 4)
1377 return MODE_CLOCK_HIGH;
1378 }
950 return true; 1379 return true;
951} 1380}
952 1381
@@ -1030,6 +1459,48 @@ static int intel_panel_fitter_pipe (struct drm_device *dev)
1030 return 1; 1459 return 1;
1031} 1460}
1032 1461
1462struct fdi_m_n {
1463 u32 tu;
1464 u32 gmch_m;
1465 u32 gmch_n;
1466 u32 link_m;
1467 u32 link_n;
1468};
1469
1470static void
1471fdi_reduce_ratio(u32 *num, u32 *den)
1472{
1473 while (*num > 0xffffff || *den > 0xffffff) {
1474 *num >>= 1;
1475 *den >>= 1;
1476 }
1477}
1478
1479#define DATA_N 0x800000
1480#define LINK_N 0x80000
1481
1482static void
1483igdng_compute_m_n(int bytes_per_pixel, int nlanes,
1484 int pixel_clock, int link_clock,
1485 struct fdi_m_n *m_n)
1486{
1487 u64 temp;
1488
1489 m_n->tu = 64; /* default size */
1490
1491 temp = (u64) DATA_N * pixel_clock;
1492 temp = div_u64(temp, link_clock);
1493 m_n->gmch_m = (temp * bytes_per_pixel) / nlanes;
1494 m_n->gmch_n = DATA_N;
1495 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
1496
1497 temp = (u64) LINK_N * pixel_clock;
1498 m_n->link_m = div_u64(temp, link_clock);
1499 m_n->link_n = LINK_N;
1500 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
1501}
1502
1503
1033static int intel_crtc_mode_set(struct drm_crtc *crtc, 1504static int intel_crtc_mode_set(struct drm_crtc *crtc,
1034 struct drm_display_mode *mode, 1505 struct drm_display_mode *mode,
1035 struct drm_display_mode *adjusted_mode, 1506 struct drm_display_mode *adjusted_mode,
@@ -1063,6 +1534,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1063 struct drm_connector *connector; 1534 struct drm_connector *connector;
1064 const intel_limit_t *limit; 1535 const intel_limit_t *limit;
1065 int ret; 1536 int ret;
1537 struct fdi_m_n m_n = {0};
1538 int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
1539 int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
1540 int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
1541 int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
1542 int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
1543 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
1544 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1545 int lvds_reg = LVDS;
1546 u32 temp;
1547 int sdvo_pixel_multiply;
1066 1548
1067 drm_vblank_pre_modeset(dev, pipe); 1549 drm_vblank_pre_modeset(dev, pipe);
1068 1550
@@ -1101,6 +1583,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1101 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); 1583 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
1102 } else if (IS_I9XX(dev)) { 1584 } else if (IS_I9XX(dev)) {
1103 refclk = 96000; 1585 refclk = 96000;
1586 if (IS_IGDNG(dev))
1587 refclk = 120000; /* 120Mhz refclk */
1104 } else { 1588 } else {
1105 refclk = 48000; 1589 refclk = 48000;
1106 } 1590 }
@@ -1114,6 +1598,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1114 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); 1598 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
1115 if (!ok) { 1599 if (!ok) {
1116 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 1600 DRM_ERROR("Couldn't find PLL settings for mode!\n");
1601 drm_vblank_post_modeset(dev, pipe);
1117 return -EINVAL; 1602 return -EINVAL;
1118 } 1603 }
1119 1604
@@ -1137,12 +1622,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1137 } 1622 }
1138 } 1623 }
1139 1624
1625 /* FDI link */
1626 if (IS_IGDNG(dev))
1627 igdng_compute_m_n(3, 4, /* lane num 4 */
1628 adjusted_mode->clock,
1629 270000, /* lane clock */
1630 &m_n);
1631
1140 if (IS_IGD(dev)) 1632 if (IS_IGD(dev))
1141 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 1633 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
1142 else 1634 else
1143 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 1635 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
1144 1636
1145 dpll = DPLL_VGA_MODE_DIS; 1637 if (!IS_IGDNG(dev))
1638 dpll = DPLL_VGA_MODE_DIS;
1639
1146 if (IS_I9XX(dev)) { 1640 if (IS_I9XX(dev)) {
1147 if (is_lvds) 1641 if (is_lvds)
1148 dpll |= DPLLB_MODE_LVDS; 1642 dpll |= DPLLB_MODE_LVDS;
@@ -1150,17 +1644,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1150 dpll |= DPLLB_MODE_DAC_SERIAL; 1644 dpll |= DPLLB_MODE_DAC_SERIAL;
1151 if (is_sdvo) { 1645 if (is_sdvo) {
1152 dpll |= DPLL_DVO_HIGH_SPEED; 1646 dpll |= DPLL_DVO_HIGH_SPEED;
1153 if (IS_I945G(dev) || IS_I945GM(dev)) { 1647 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
1154 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 1648 if (IS_I945G(dev) || IS_I945GM(dev))
1155 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 1649 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
1156 } 1650 else if (IS_IGDNG(dev))
1651 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1157 } 1652 }
1158 1653
1159 /* compute bitmask from p1 value */ 1654 /* compute bitmask from p1 value */
1160 if (IS_IGD(dev)) 1655 if (IS_IGD(dev))
1161 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; 1656 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
1162 else 1657 else {
1163 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1658 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1659 /* also FPA1 */
1660 if (IS_IGDNG(dev))
1661 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1662 }
1164 switch (clock.p2) { 1663 switch (clock.p2) {
1165 case 5: 1664 case 5:
1166 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 1665 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1175,7 +1674,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1175 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 1674 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1176 break; 1675 break;
1177 } 1676 }
1178 if (IS_I965G(dev)) 1677 if (IS_I965G(dev) && !IS_IGDNG(dev))
1179 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 1678 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1180 } else { 1679 } else {
1181 if (is_lvds) { 1680 if (is_lvds) {
@@ -1207,10 +1706,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1207 /* Set up the display plane register */ 1706 /* Set up the display plane register */
1208 dspcntr = DISPPLANE_GAMMA_ENABLE; 1707 dspcntr = DISPPLANE_GAMMA_ENABLE;
1209 1708
1210 if (pipe == 0) 1709 /* IGDNG's plane is forced to pipe, bit 24 is to
1211 dspcntr |= DISPPLANE_SEL_PIPE_A; 1710 enable color space conversion */
1212 else 1711 if (!IS_IGDNG(dev)) {
1213 dspcntr |= DISPPLANE_SEL_PIPE_B; 1712 if (pipe == 0)
1713 dspcntr |= DISPPLANE_SEL_PIPE_A;
1714 else
1715 dspcntr |= DISPPLANE_SEL_PIPE_B;
1716 }
1214 1717
1215 if (pipe == 0 && !IS_I965G(dev)) { 1718 if (pipe == 0 && !IS_I965G(dev)) {
1216 /* Enable pixel doubling when the dot clock is > 90% of the (display) 1719 /* Enable pixel doubling when the dot clock is > 90% of the (display)
@@ -1231,12 +1734,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1231 1734
1232 1735
1233 /* Disable the panel fitter if it was on our pipe */ 1736 /* Disable the panel fitter if it was on our pipe */
1234 if (intel_panel_fitter_pipe(dev) == pipe) 1737 if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe)
1235 I915_WRITE(PFIT_CONTROL, 0); 1738 I915_WRITE(PFIT_CONTROL, 0);
1236 1739
1237 DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 1740 DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
1238 drm_mode_debug_printmodeline(mode); 1741 drm_mode_debug_printmodeline(mode);
1239 1742
1743 /* assign to IGDNG registers */
1744 if (IS_IGDNG(dev)) {
1745 fp_reg = pch_fp_reg;
1746 dpll_reg = pch_dpll_reg;
1747 }
1240 1748
1241 if (dpll & DPLL_VCO_ENABLE) { 1749 if (dpll & DPLL_VCO_ENABLE) {
1242 I915_WRITE(fp_reg, fp); 1750 I915_WRITE(fp_reg, fp);
@@ -1245,13 +1753,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1245 udelay(150); 1753 udelay(150);
1246 } 1754 }
1247 1755
1756 if (IS_IGDNG(dev)) {
1757 /* enable PCH clock reference source */
1758 /* XXX need to change the setting for other outputs */
1759 u32 temp;
1760 temp = I915_READ(PCH_DREF_CONTROL);
1761 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
1762 temp |= DREF_NONSPREAD_CK505_ENABLE;
1763 temp &= ~DREF_SSC_SOURCE_MASK;
1764 temp |= DREF_SSC_SOURCE_ENABLE;
1765 temp &= ~DREF_SSC1_ENABLE;
1766 /* if no eDP, disable source output to CPU */
1767 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
1768 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1769 I915_WRITE(PCH_DREF_CONTROL, temp);
1770 }
1771
1248 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 1772 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
1249 * This is an exception to the general rule that mode_set doesn't turn 1773 * This is an exception to the general rule that mode_set doesn't turn
1250 * things on. 1774 * things on.
1251 */ 1775 */
1252 if (is_lvds) { 1776 if (is_lvds) {
1253 u32 lvds = I915_READ(LVDS); 1777 u32 lvds;
1778
1779 if (IS_IGDNG(dev))
1780 lvds_reg = PCH_LVDS;
1254 1781
1782 lvds = I915_READ(lvds_reg);
1255 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; 1783 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
1256 /* Set the B0-B3 data pairs corresponding to whether we're going to 1784 /* Set the B0-B3 data pairs corresponding to whether we're going to
1257 * set the DPLLs for dual-channel mode or not. 1785 * set the DPLLs for dual-channel mode or not.
@@ -1266,8 +1794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1266 * panels behave in the two modes. 1794 * panels behave in the two modes.
1267 */ 1795 */
1268 1796
1269 I915_WRITE(LVDS, lvds); 1797 I915_WRITE(lvds_reg, lvds);
1270 I915_READ(LVDS); 1798 I915_READ(lvds_reg);
1271 } 1799 }
1272 1800
1273 I915_WRITE(fp_reg, fp); 1801 I915_WRITE(fp_reg, fp);
@@ -1276,8 +1804,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1276 /* Wait for the clocks to stabilize. */ 1804 /* Wait for the clocks to stabilize. */
1277 udelay(150); 1805 udelay(150);
1278 1806
1279 if (IS_I965G(dev)) { 1807 if (IS_I965G(dev) && !IS_IGDNG(dev)) {
1280 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 1808 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
1281 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 1809 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
1282 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); 1810 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
1283 } else { 1811 } else {
@@ -1303,9 +1831,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1303 /* pipesrc and dspsize control the size that is scaled from, which should 1831 /* pipesrc and dspsize control the size that is scaled from, which should
1304 * always be the user's requested size. 1832 * always be the user's requested size.
1305 */ 1833 */
1306 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); 1834 if (!IS_IGDNG(dev)) {
1307 I915_WRITE(dsppos_reg, 0); 1835 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
1836 (mode->hdisplay - 1));
1837 I915_WRITE(dsppos_reg, 0);
1838 }
1308 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 1839 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
1840
1841 if (IS_IGDNG(dev)) {
1842 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
1843 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
1844 I915_WRITE(link_m1_reg, m_n.link_m);
1845 I915_WRITE(link_n1_reg, m_n.link_n);
1846
1847 /* enable FDI RX PLL too */
1848 temp = I915_READ(fdi_rx_reg);
1849 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
1850 udelay(200);
1851 }
1852
1309 I915_WRITE(pipeconf_reg, pipeconf); 1853 I915_WRITE(pipeconf_reg, pipeconf);
1310 I915_READ(pipeconf_reg); 1854 I915_READ(pipeconf_reg);
1311 1855
@@ -1315,12 +1859,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1315 1859
1316 /* Flush the plane changes */ 1860 /* Flush the plane changes */
1317 ret = intel_pipe_set_base(crtc, x, y, old_fb); 1861 ret = intel_pipe_set_base(crtc, x, y, old_fb);
1318 if (ret != 0)
1319 return ret;
1320
1321 drm_vblank_post_modeset(dev, pipe); 1862 drm_vblank_post_modeset(dev, pipe);
1322 1863
1323 return 0; 1864 return ret;
1324} 1865}
1325 1866
1326/** Loads the palette/gamma unit for the CRTC with the prepared values */ 1867/** Loads the palette/gamma unit for the CRTC with the prepared values */
@@ -1336,6 +1877,11 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
1336 if (!crtc->enabled) 1877 if (!crtc->enabled)
1337 return; 1878 return;
1338 1879
1880 /* use legacy palette for IGDNG */
1881 if (IS_IGDNG(dev))
1882 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
1883 LGC_PALETTE_B;
1884
1339 for (i = 0; i < 256; i++) { 1885 for (i = 0; i < 256; i++) {
1340 I915_WRITE(palreg + 4 * i, 1886 I915_WRITE(palreg + 4 * i,
1341 (intel_crtc->lut_r[i] << 16) | 1887 (intel_crtc->lut_r[i] << 16) |
@@ -1357,7 +1903,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1357 int pipe = intel_crtc->pipe; 1903 int pipe = intel_crtc->pipe;
1358 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 1904 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
1359 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 1905 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
1360 uint32_t temp; 1906 uint32_t temp = I915_READ(control);
1361 size_t addr; 1907 size_t addr;
1362 int ret; 1908 int ret;
1363 1909
@@ -1366,7 +1912,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1366 /* if we want to turn off the cursor ignore width and height */ 1912 /* if we want to turn off the cursor ignore width and height */
1367 if (!handle) { 1913 if (!handle) {
1368 DRM_DEBUG("cursor off\n"); 1914 DRM_DEBUG("cursor off\n");
1369 temp = CURSOR_MODE_DISABLE; 1915 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
1916 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
1917 temp |= CURSOR_MODE_DISABLE;
1918 } else {
1919 temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
1920 }
1370 addr = 0; 1921 addr = 0;
1371 bo = NULL; 1922 bo = NULL;
1372 mutex_lock(&dev->struct_mutex); 1923 mutex_lock(&dev->struct_mutex);
@@ -1409,10 +1960,19 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1409 addr = obj_priv->phys_obj->handle->busaddr; 1960 addr = obj_priv->phys_obj->handle->busaddr;
1410 } 1961 }
1411 1962
1412 temp = 0; 1963 if (!IS_I9XX(dev))
1413 /* set the pipe for the cursor */ 1964 I915_WRITE(CURSIZE, (height << 12) | width);
1414 temp |= (pipe << 28); 1965
1415 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 1966 /* Hooray for CUR*CNTR differences */
1967 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
1968 temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
1969 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
1970 temp |= (pipe << 28); /* Connect to correct pipe */
1971 } else {
1972 temp &= ~(CURSOR_FORMAT_MASK);
1973 temp |= CURSOR_ENABLE;
1974 temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
1975 }
1416 1976
1417 finish: 1977 finish:
1418 I915_WRITE(control, temp); 1978 I915_WRITE(control, temp);
@@ -1450,16 +2010,16 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1450 uint32_t adder; 2010 uint32_t adder;
1451 2011
1452 if (x < 0) { 2012 if (x < 0) {
1453 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); 2013 temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
1454 x = -x; 2014 x = -x;
1455 } 2015 }
1456 if (y < 0) { 2016 if (y < 0) {
1457 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); 2017 temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
1458 y = -y; 2018 y = -y;
1459 } 2019 }
1460 2020
1461 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); 2021 temp |= x << CURSOR_X_SHIFT;
1462 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); 2022 temp |= y << CURSOR_Y_SHIFT;
1463 2023
1464 adder = intel_crtc->cursor_addr; 2024 adder = intel_crtc->cursor_addr;
1465 I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); 2025 I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
@@ -1576,6 +2136,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
1576 } 2136 }
1577 2137
1578 encoder->crtc = crtc; 2138 encoder->crtc = crtc;
2139 intel_output->base.encoder = encoder;
1579 intel_output->load_detect_temp = true; 2140 intel_output->load_detect_temp = true;
1580 2141
1581 intel_crtc = to_intel_crtc(crtc); 2142 intel_crtc = to_intel_crtc(crtc);
@@ -1611,6 +2172,7 @@ void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_
1611 2172
1612 if (intel_output->load_detect_temp) { 2173 if (intel_output->load_detect_temp) {
1613 encoder->crtc = NULL; 2174 encoder->crtc = NULL;
2175 intel_output->base.encoder = NULL;
1614 intel_output->load_detect_temp = false; 2176 intel_output->load_detect_temp = false;
1615 crtc->enabled = drm_helper_crtc_in_use(crtc); 2177 crtc->enabled = drm_helper_crtc_in_use(crtc);
1616 drm_helper_disable_unused_functions(dev); 2178 drm_helper_disable_unused_functions(dev);
@@ -1748,6 +2310,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
1748{ 2310{
1749 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2311 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1750 2312
2313 if (intel_crtc->mode_set.mode)
2314 drm_mode_destroy(crtc->dev, intel_crtc->mode_set.mode);
1751 drm_crtc_cleanup(crtc); 2315 drm_crtc_cleanup(crtc);
1752 kfree(intel_crtc); 2316 kfree(intel_crtc);
1753} 2317}
@@ -1874,7 +2438,24 @@ static void intel_setup_outputs(struct drm_device *dev)
1874 if (IS_MOBILE(dev) && !IS_I830(dev)) 2438 if (IS_MOBILE(dev) && !IS_I830(dev))
1875 intel_lvds_init(dev); 2439 intel_lvds_init(dev);
1876 2440
1877 if (IS_I9XX(dev)) { 2441 if (IS_IGDNG(dev)) {
2442 int found;
2443
2444 if (I915_READ(HDMIB) & PORT_DETECTED) {
2445 /* check SDVOB */
2446 /* found = intel_sdvo_init(dev, HDMIB); */
2447 found = 0;
2448 if (!found)
2449 intel_hdmi_init(dev, HDMIB);
2450 }
2451
2452 if (I915_READ(HDMIC) & PORT_DETECTED)
2453 intel_hdmi_init(dev, HDMIC);
2454
2455 if (I915_READ(HDMID) & PORT_DETECTED)
2456 intel_hdmi_init(dev, HDMID);
2457
2458 } else if (IS_I9XX(dev)) {
1878 int found; 2459 int found;
1879 u32 reg; 2460 u32 reg;
1880 2461
@@ -1898,7 +2479,7 @@ static void intel_setup_outputs(struct drm_device *dev)
1898 } else 2479 } else
1899 intel_dvo_init(dev); 2480 intel_dvo_init(dev);
1900 2481
1901 if (IS_I9XX(dev) && IS_MOBILE(dev)) 2482 if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev))
1902 intel_tv_init(dev); 2483 intel_tv_init(dev);
1903 2484
1904 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2485 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 8b8d6e65cd3..1ee3007d6ec 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -316,6 +316,7 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
316}; 316};
317 317
318static const struct drm_connector_funcs intel_dvo_connector_funcs = { 318static const struct drm_connector_funcs intel_dvo_connector_funcs = {
319 .dpms = drm_helper_connector_dpms,
319 .save = intel_dvo_save, 320 .save = intel_dvo_save,
320 .restore = intel_dvo_restore, 321 .restore = intel_dvo_restore,
321 .detect = intel_dvo_detect, 322 .detect = intel_dvo_detect,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index e4652dcdd9b..0ecf6b76a40 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -207,7 +207,7 @@ static int intelfb_set_par(struct fb_info *info)
207 207
208 if (var->pixclock != -1) { 208 if (var->pixclock != -1) {
209 209
210 DRM_ERROR("PIXEL CLCOK SET\n"); 210 DRM_ERROR("PIXEL CLOCK SET\n");
211 return -EINVAL; 211 return -EINVAL;
212 } else { 212 } else {
213 struct drm_crtc *crtc; 213 struct drm_crtc *crtc;
@@ -674,8 +674,12 @@ static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *
674 par->crtc_ids[0] = crtc->base.id; 674 par->crtc_ids[0] = crtc->base.id;
675 675
676 modeset->num_connectors = conn_count; 676 modeset->num_connectors = conn_count;
677 if (modeset->mode != modeset->crtc->desired_mode) 677 if (modeset->crtc->desired_mode) {
678 modeset->mode = modeset->crtc->desired_mode; 678 if (modeset->mode)
679 drm_mode_destroy(dev, modeset->mode);
680 modeset->mode = drm_mode_duplicate(dev,
681 modeset->crtc->desired_mode);
682 }
679 683
680 par->crtc_count = 1; 684 par->crtc_count = 1;
681 685
@@ -824,8 +828,12 @@ static int intelfb_single_fb_probe(struct drm_device *dev)
824 par->crtc_ids[crtc_count++] = crtc->base.id; 828 par->crtc_ids[crtc_count++] = crtc->base.id;
825 829
826 modeset->num_connectors = conn_count; 830 modeset->num_connectors = conn_count;
827 if (modeset->mode != modeset->crtc->desired_mode) 831 if (modeset->crtc->desired_mode) {
828 modeset->mode = modeset->crtc->desired_mode; 832 if (modeset->mode)
833 drm_mode_destroy(dev, modeset->mode);
834 modeset->mode = drm_mode_duplicate(dev,
835 modeset->crtc->desired_mode);
836 }
829 } 837 }
830 par->crtc_count = crtc_count; 838 par->crtc_count = crtc_count;
831 839
@@ -857,9 +865,15 @@ void intelfb_restore(void)
857 drm_crtc_helper_set_config(&kernelfb_mode); 865 drm_crtc_helper_set_config(&kernelfb_mode);
858} 866}
859 867
868static void intelfb_restore_work_fn(struct work_struct *ignored)
869{
870 intelfb_restore();
871}
872static DECLARE_WORK(intelfb_restore_work, intelfb_restore_work_fn);
873
860static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3) 874static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
861{ 875{
862 intelfb_restore(); 876 schedule_work(&intelfb_restore_work);
863} 877}
864 878
865static struct sysrq_key_op sysrq_intelfb_restore_op = { 879static struct sysrq_key_op sysrq_intelfb_restore_op = {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d0983bb93a1..4ea2a651b92 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -56,7 +56,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
56 sdvox = SDVO_ENCODING_HDMI | 56 sdvox = SDVO_ENCODING_HDMI |
57 SDVO_BORDER_ENABLE | 57 SDVO_BORDER_ENABLE |
58 SDVO_VSYNC_ACTIVE_HIGH | 58 SDVO_VSYNC_ACTIVE_HIGH |
59 SDVO_HSYNC_ACTIVE_HIGH; 59 SDVO_HSYNC_ACTIVE_HIGH |
60 SDVO_NULL_PACKETS_DURING_VSYNC;
60 61
61 if (hdmi_priv->has_hdmi_sink) 62 if (hdmi_priv->has_hdmi_sink)
62 sdvox |= SDVO_AUDIO_ENABLE; 63 sdvox |= SDVO_AUDIO_ENABLE;
@@ -145,6 +146,22 @@ intel_hdmi_sink_detect(struct drm_connector *connector)
145} 146}
146 147
147static enum drm_connector_status 148static enum drm_connector_status
149igdng_hdmi_detect(struct drm_connector *connector)
150{
151 struct intel_output *intel_output = to_intel_output(connector);
152 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
153
154 /* FIXME hotplug detect */
155
156 hdmi_priv->has_hdmi_sink = false;
157 intel_hdmi_sink_detect(connector);
158 if (hdmi_priv->has_hdmi_sink)
159 return connector_status_connected;
160 else
161 return connector_status_disconnected;
162}
163
164static enum drm_connector_status
148intel_hdmi_detect(struct drm_connector *connector) 165intel_hdmi_detect(struct drm_connector *connector)
149{ 166{
150 struct drm_device *dev = connector->dev; 167 struct drm_device *dev = connector->dev;
@@ -153,6 +170,9 @@ intel_hdmi_detect(struct drm_connector *connector)
153 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 170 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
154 u32 temp, bit; 171 u32 temp, bit;
155 172
173 if (IS_IGDNG(dev))
174 return igdng_hdmi_detect(connector);
175
156 temp = I915_READ(PORT_HOTPLUG_EN); 176 temp = I915_READ(PORT_HOTPLUG_EN);
157 177
158 switch (hdmi_priv->sdvox_reg) { 178 switch (hdmi_priv->sdvox_reg) {
@@ -219,6 +239,7 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
219}; 239};
220 240
221static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 241static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
242 .dpms = drm_helper_connector_dpms,
222 .save = intel_hdmi_save, 243 .save = intel_hdmi_save,
223 .restore = intel_hdmi_restore, 244 .restore = intel_hdmi_restore,
224 .detect = intel_hdmi_detect, 245 .detect = intel_hdmi_detect,
@@ -268,8 +289,17 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
268 /* Set up the DDC bus. */ 289 /* Set up the DDC bus. */
269 if (sdvox_reg == SDVOB) 290 if (sdvox_reg == SDVOB)
270 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 291 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
271 else 292 else if (sdvox_reg == SDVOC)
272 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 293 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
294 else if (sdvox_reg == HDMIB)
295 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
296 "HDMIB");
297 else if (sdvox_reg == HDMIC)
298 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
299 "HDMIC");
300 else if (sdvox_reg == HDMID)
301 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
302 "HDMID");
273 303
274 if (!intel_output->ddc_bus) 304 if (!intel_output->ddc_bus)
275 goto err_connector; 305 goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 439a8651499..f073ed8432e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -37,6 +37,8 @@
37#include "i915_drm.h" 37#include "i915_drm.h"
38#include "i915_drv.h" 38#include "i915_drv.h"
39 39
40#define I915_LVDS "i915_lvds"
41
40/** 42/**
41 * Sets the backlight level. 43 * Sets the backlight level.
42 * 44 *
@@ -45,10 +47,15 @@
45static void intel_lvds_set_backlight(struct drm_device *dev, int level) 47static void intel_lvds_set_backlight(struct drm_device *dev, int level)
46{ 48{
47 struct drm_i915_private *dev_priv = dev->dev_private; 49 struct drm_i915_private *dev_priv = dev->dev_private;
48 u32 blc_pwm_ctl; 50 u32 blc_pwm_ctl, reg;
51
52 if (IS_IGDNG(dev))
53 reg = BLC_PWM_CPU_CTL;
54 else
55 reg = BLC_PWM_CTL;
49 56
50 blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; 57 blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
51 I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl | 58 I915_WRITE(reg, (blc_pwm_ctl |
52 (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); 59 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
53} 60}
54 61
@@ -58,8 +65,14 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
58static u32 intel_lvds_get_max_backlight(struct drm_device *dev) 65static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
59{ 66{
60 struct drm_i915_private *dev_priv = dev->dev_private; 67 struct drm_i915_private *dev_priv = dev->dev_private;
68 u32 reg;
69
70 if (IS_IGDNG(dev))
71 reg = BLC_PWM_PCH_CTL2;
72 else
73 reg = BLC_PWM_CTL;
61 74
62 return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> 75 return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
63 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; 76 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
64} 77}
65 78
@@ -69,23 +82,31 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
69static void intel_lvds_set_power(struct drm_device *dev, bool on) 82static void intel_lvds_set_power(struct drm_device *dev, bool on)
70{ 83{
71 struct drm_i915_private *dev_priv = dev->dev_private; 84 struct drm_i915_private *dev_priv = dev->dev_private;
72 u32 pp_status; 85 u32 pp_status, ctl_reg, status_reg;
86
87 if (IS_IGDNG(dev)) {
88 ctl_reg = PCH_PP_CONTROL;
89 status_reg = PCH_PP_STATUS;
90 } else {
91 ctl_reg = PP_CONTROL;
92 status_reg = PP_STATUS;
93 }
73 94
74 if (on) { 95 if (on) {
75 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | 96 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
76 POWER_TARGET_ON); 97 POWER_TARGET_ON);
77 do { 98 do {
78 pp_status = I915_READ(PP_STATUS); 99 pp_status = I915_READ(status_reg);
79 } while ((pp_status & PP_ON) == 0); 100 } while ((pp_status & PP_ON) == 0);
80 101
81 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); 102 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
82 } else { 103 } else {
83 intel_lvds_set_backlight(dev, 0); 104 intel_lvds_set_backlight(dev, 0);
84 105
85 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
86 ~POWER_TARGET_ON); 107 ~POWER_TARGET_ON);
87 do { 108 do {
88 pp_status = I915_READ(PP_STATUS); 109 pp_status = I915_READ(status_reg);
89 } while (pp_status & PP_ON); 110 } while (pp_status & PP_ON);
90 } 111 }
91} 112}
@@ -106,12 +127,28 @@ static void intel_lvds_save(struct drm_connector *connector)
106{ 127{
107 struct drm_device *dev = connector->dev; 128 struct drm_device *dev = connector->dev;
108 struct drm_i915_private *dev_priv = dev->dev_private; 129 struct drm_i915_private *dev_priv = dev->dev_private;
130 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
131 u32 pwm_ctl_reg;
132
133 if (IS_IGDNG(dev)) {
134 pp_on_reg = PCH_PP_ON_DELAYS;
135 pp_off_reg = PCH_PP_OFF_DELAYS;
136 pp_ctl_reg = PCH_PP_CONTROL;
137 pp_div_reg = PCH_PP_DIVISOR;
138 pwm_ctl_reg = BLC_PWM_CPU_CTL;
139 } else {
140 pp_on_reg = PP_ON_DELAYS;
141 pp_off_reg = PP_OFF_DELAYS;
142 pp_ctl_reg = PP_CONTROL;
143 pp_div_reg = PP_DIVISOR;
144 pwm_ctl_reg = BLC_PWM_CTL;
145 }
109 146
110 dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS); 147 dev_priv->savePP_ON = I915_READ(pp_on_reg);
111 dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS); 148 dev_priv->savePP_OFF = I915_READ(pp_off_reg);
112 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); 149 dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
113 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); 150 dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
114 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 151 dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
115 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & 152 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
116 BACKLIGHT_DUTY_CYCLE_MASK); 153 BACKLIGHT_DUTY_CYCLE_MASK);
117 154
@@ -127,12 +164,28 @@ static void intel_lvds_restore(struct drm_connector *connector)
127{ 164{
128 struct drm_device *dev = connector->dev; 165 struct drm_device *dev = connector->dev;
129 struct drm_i915_private *dev_priv = dev->dev_private; 166 struct drm_i915_private *dev_priv = dev->dev_private;
167 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
168 u32 pwm_ctl_reg;
169
170 if (IS_IGDNG(dev)) {
171 pp_on_reg = PCH_PP_ON_DELAYS;
172 pp_off_reg = PCH_PP_OFF_DELAYS;
173 pp_ctl_reg = PCH_PP_CONTROL;
174 pp_div_reg = PCH_PP_DIVISOR;
175 pwm_ctl_reg = BLC_PWM_CPU_CTL;
176 } else {
177 pp_on_reg = PP_ON_DELAYS;
178 pp_off_reg = PP_OFF_DELAYS;
179 pp_ctl_reg = PP_CONTROL;
180 pp_div_reg = PP_DIVISOR;
181 pwm_ctl_reg = BLC_PWM_CTL;
182 }
130 183
131 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 184 I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
132 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON); 185 I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
133 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF); 186 I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
134 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); 187 I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
135 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); 188 I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
136 if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) 189 if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
137 intel_lvds_set_power(dev, true); 190 intel_lvds_set_power(dev, true);
138 else 191 else
@@ -216,8 +269,14 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
216{ 269{
217 struct drm_device *dev = encoder->dev; 270 struct drm_device *dev = encoder->dev;
218 struct drm_i915_private *dev_priv = dev->dev_private; 271 struct drm_i915_private *dev_priv = dev->dev_private;
272 u32 reg;
273
274 if (IS_IGDNG(dev))
275 reg = BLC_PWM_CPU_CTL;
276 else
277 reg = BLC_PWM_CTL;
219 278
220 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 279 dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
221 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & 280 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
222 BACKLIGHT_DUTY_CYCLE_MASK); 281 BACKLIGHT_DUTY_CYCLE_MASK);
223 282
@@ -251,6 +310,10 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
251 * settings. 310 * settings.
252 */ 311 */
253 312
313 /* No panel fitting yet, fixme */
314 if (IS_IGDNG(dev))
315 return;
316
254 /* 317 /*
255 * Enable automatic panel scaling so that non-native modes fill the 318 * Enable automatic panel scaling so that non-native modes fill the
256 * screen. Should be enabled before the pipe is enabled, according to 319 * screen. Should be enabled before the pipe is enabled, according to
@@ -343,11 +406,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
343 struct drm_property *property, 406 struct drm_property *property,
344 uint64_t value) 407 uint64_t value)
345{ 408{
346 struct drm_device *dev = connector->dev;
347
348 if (property == dev->mode_config.dpms_property && connector->encoder)
349 intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf));
350
351 return 0; 409 return 0;
352} 410}
353 411
@@ -366,6 +424,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
366}; 424};
367 425
368static const struct drm_connector_funcs intel_lvds_connector_funcs = { 426static const struct drm_connector_funcs intel_lvds_connector_funcs = {
427 .dpms = drm_helper_connector_dpms,
369 .save = intel_lvds_save, 428 .save = intel_lvds_save,
370 .restore = intel_lvds_restore, 429 .restore = intel_lvds_restore,
371 .detect = intel_lvds_detect, 430 .detect = intel_lvds_detect,
@@ -386,12 +445,13 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
386 445
387static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) 446static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
388{ 447{
389 DRM_DEBUG("Skipping LVDS initialization for %s\n", id->ident); 448 DRM_DEBUG_KMS(I915_LVDS,
449 "Skipping LVDS initialization for %s\n", id->ident);
390 return 1; 450 return 1;
391} 451}
392 452
393/* These systems claim to have LVDS, but really don't */ 453/* These systems claim to have LVDS, but really don't */
394static const struct dmi_system_id __initdata intel_no_lvds[] = { 454static const struct dmi_system_id intel_no_lvds[] = {
395 { 455 {
396 .callback = intel_no_lvds_dmi_callback, 456 .callback = intel_no_lvds_dmi_callback,
397 .ident = "Apple Mac Mini (Core series)", 457 .ident = "Apple Mac Mini (Core series)",
@@ -424,8 +484,21 @@ static const struct dmi_system_id __initdata intel_no_lvds[] = {
424 DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"), 484 DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
425 }, 485 },
426 }, 486 },
427 487 {
428 /* FIXME: add a check for the Aopen Mini PC */ 488 .callback = intel_no_lvds_dmi_callback,
489 .ident = "AOpen Mini PC",
490 .matches = {
491 DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
492 DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
493 },
494 },
495 {
496 .callback = intel_no_lvds_dmi_callback,
497 .ident = "Aopen i945GTt-VFA",
498 .matches = {
499 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
500 },
501 },
429 502
430 { } /* terminating entry */ 503 { } /* terminating entry */
431}; 504};
@@ -446,12 +519,18 @@ void intel_lvds_init(struct drm_device *dev)
446 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 519 struct drm_display_mode *scan; /* *modes, *bios_mode; */
447 struct drm_crtc *crtc; 520 struct drm_crtc *crtc;
448 u32 lvds; 521 u32 lvds;
449 int pipe; 522 int pipe, gpio = GPIOC;
450 523
451 /* Skip init on machines we know falsely report LVDS */ 524 /* Skip init on machines we know falsely report LVDS */
452 if (dmi_check_system(intel_no_lvds)) 525 if (dmi_check_system(intel_no_lvds))
453 return; 526 return;
454 527
528 if (IS_IGDNG(dev)) {
529 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
530 return;
531 gpio = PCH_GPIOC;
532 }
533
455 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 534 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
456 if (!intel_output) { 535 if (!intel_output) {
457 return; 536 return;
@@ -486,7 +565,7 @@ void intel_lvds_init(struct drm_device *dev)
486 */ 565 */
487 566
488 /* Set up the DDC bus. */ 567 /* Set up the DDC bus. */
489 intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); 568 intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
490 if (!intel_output->ddc_bus) { 569 if (!intel_output->ddc_bus) {
491 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 570 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
492 "failed.\n"); 571 "failed.\n");
@@ -511,10 +590,10 @@ void intel_lvds_init(struct drm_device *dev)
511 } 590 }
512 591
513 /* Failed to get EDID, what about VBT? */ 592 /* Failed to get EDID, what about VBT? */
514 if (dev_priv->vbt_mode) { 593 if (dev_priv->lfp_lvds_vbt_mode) {
515 mutex_lock(&dev->mode_config.mutex); 594 mutex_lock(&dev->mode_config.mutex);
516 dev_priv->panel_fixed_mode = 595 dev_priv->panel_fixed_mode =
517 drm_mode_duplicate(dev, dev_priv->vbt_mode); 596 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
518 mutex_unlock(&dev->mode_config.mutex); 597 mutex_unlock(&dev->mode_config.mutex);
519 if (dev_priv->panel_fixed_mode) { 598 if (dev_priv->panel_fixed_mode) {
520 dev_priv->panel_fixed_mode->type |= 599 dev_priv->panel_fixed_mode->type |=
@@ -528,6 +607,11 @@ void intel_lvds_init(struct drm_device *dev)
528 * on. If so, assume that whatever is currently programmed is the 607 * on. If so, assume that whatever is currently programmed is the
529 * correct mode. 608 * correct mode.
530 */ 609 */
610
611 /* IGDNG: FIXME if still fail, not try pipe mode now */
612 if (IS_IGDNG(dev))
613 goto failed;
614
531 lvds = I915_READ(LVDS); 615 lvds = I915_READ(LVDS);
532 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; 616 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
533 crtc = intel_get_crtc_from_pipe(dev, pipe); 617 crtc = intel_get_crtc_from_pipe(dev, pipe);
@@ -546,11 +630,22 @@ void intel_lvds_init(struct drm_device *dev)
546 goto failed; 630 goto failed;
547 631
548out: 632out:
633 if (IS_IGDNG(dev)) {
634 u32 pwm;
635 /* make sure PWM is enabled */
636 pwm = I915_READ(BLC_PWM_CPU_CTL2);
637 pwm |= (PWM_ENABLE | PWM_PIPE_B);
638 I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
639
640 pwm = I915_READ(BLC_PWM_PCH_CTL1);
641 pwm |= PWM_PCH_ENABLE;
642 I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
643 }
549 drm_sysfs_connector_add(connector); 644 drm_sysfs_connector_add(connector);
550 return; 645 return;
551 646
552failed: 647failed:
553 DRM_DEBUG("No LVDS modes found, disabling.\n"); 648 DRM_DEBUG_KMS(I915_LVDS, "No LVDS modes found, disabling.\n");
554 if (intel_output->ddc_bus) 649 if (intel_output->ddc_bus)
555 intel_i2c_destroy(intel_output->ddc_bus); 650 intel_i2c_destroy(intel_output->ddc_bus);
556 drm_connector_cleanup(connector); 651 drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9913651c1e1..9a00adb3a50 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,7 +36,7 @@
36#include "intel_sdvo_regs.h" 36#include "intel_sdvo_regs.h"
37 37
38#undef SDVO_DEBUG 38#undef SDVO_DEBUG
39 39#define I915_SDVO "i915_sdvo"
40struct intel_sdvo_priv { 40struct intel_sdvo_priv {
41 struct intel_i2c_chan *i2c_bus; 41 struct intel_i2c_chan *i2c_bus;
42 int slaveaddr; 42 int slaveaddr;
@@ -69,6 +69,10 @@ struct intel_sdvo_priv {
69 * This is set if we treat the device as HDMI, instead of DVI. 69 * This is set if we treat the device as HDMI, instead of DVI.
70 */ 70 */
71 bool is_hdmi; 71 bool is_hdmi;
72 /**
73 * This is set if we detect output of sdvo device as LVDS.
74 */
75 bool is_lvds;
72 76
73 /** 77 /**
74 * Returned SDTV resolutions allowed for the current format, if the 78 * Returned SDTV resolutions allowed for the current format, if the
@@ -273,20 +277,21 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
273 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 277 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
274 int i; 278 int i;
275 279
276 printk(KERN_DEBUG "%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); 280 DRM_DEBUG_KMS(I915_SDVO, "%s: W: %02X ",
281 SDVO_NAME(sdvo_priv), cmd);
277 for (i = 0; i < args_len; i++) 282 for (i = 0; i < args_len; i++)
278 printk(KERN_DEBUG "%02X ", ((u8 *)args)[i]); 283 DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
279 for (; i < 8; i++) 284 for (; i < 8; i++)
280 printk(KERN_DEBUG " "); 285 DRM_LOG_KMS(" ");
281 for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) { 286 for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
282 if (cmd == sdvo_cmd_names[i].cmd) { 287 if (cmd == sdvo_cmd_names[i].cmd) {
283 printk(KERN_DEBUG "(%s)", sdvo_cmd_names[i].name); 288 DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
284 break; 289 break;
285 } 290 }
286 } 291 }
287 if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0])) 292 if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
288 printk(KERN_DEBUG "(%02X)", cmd); 293 DRM_LOG_KMS("(%02X)", cmd);
289 printk(KERN_DEBUG "\n"); 294 DRM_LOG_KMS("\n");
290} 295}
291#else 296#else
292#define intel_sdvo_debug_write(o, c, a, l) 297#define intel_sdvo_debug_write(o, c, a, l)
@@ -325,16 +330,16 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
325 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 330 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
326 int i; 331 int i;
327 332
328 printk(KERN_DEBUG "%s: R: ", SDVO_NAME(sdvo_priv)); 333 DRM_DEBUG_KMS(I915_SDVO, "%s: R: ", SDVO_NAME(sdvo_priv));
329 for (i = 0; i < response_len; i++) 334 for (i = 0; i < response_len; i++)
330 printk(KERN_DEBUG "%02X ", ((u8 *)response)[i]); 335 DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
331 for (; i < 8; i++) 336 for (; i < 8; i++)
332 printk(KERN_DEBUG " "); 337 DRM_LOG_KMS(" ");
333 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 338 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
334 printk(KERN_DEBUG "(%s)", cmd_status_names[status]); 339 DRM_LOG_KMS("(%s)", cmd_status_names[status]);
335 else 340 else
336 printk(KERN_DEBUG "(??? %d)", status); 341 DRM_LOG_KMS("(??? %d)", status);
337 printk(KERN_DEBUG "\n"); 342 DRM_LOG_KMS("\n");
338} 343}
339#else 344#else
340#define intel_sdvo_debug_response(o, r, l, s) 345#define intel_sdvo_debug_response(o, r, l, s)
@@ -1398,10 +1403,8 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1398static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1403static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1399{ 1404{
1400 struct intel_output *intel_output = to_intel_output(connector); 1405 struct intel_output *intel_output = to_intel_output(connector);
1401 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1402 1406
1403 /* set the bus switch and get the modes */ 1407 /* set the bus switch and get the modes */
1404 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1405 intel_ddc_get_modes(intel_output); 1408 intel_ddc_get_modes(intel_output);
1406 1409
1407#if 0 1410#if 0
@@ -1543,6 +1546,37 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1543 } 1546 }
1544} 1547}
1545 1548
1549static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1550{
1551 struct intel_output *intel_output = to_intel_output(connector);
1552 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1553 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1554
1555 /*
1556 * Attempt to get the mode list from DDC.
1557 * Assume that the preferred modes are
1558 * arranged in priority order.
1559 */
1560 /* set the bus switch and get the modes */
1561 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1562 intel_ddc_get_modes(intel_output);
1563 if (list_empty(&connector->probed_modes) == false)
1564 return;
1565
1566 /* Fetch modes from VBT */
1567 if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
1568 struct drm_display_mode *newmode;
1569 newmode = drm_mode_duplicate(connector->dev,
1570 dev_priv->sdvo_lvds_vbt_mode);
1571 if (newmode != NULL) {
1572 /* Guarantee the mode is preferred */
1573 newmode->type = (DRM_MODE_TYPE_PREFERRED |
1574 DRM_MODE_TYPE_DRIVER);
1575 drm_mode_probed_add(connector, newmode);
1576 }
1577 }
1578}
1579
1546static int intel_sdvo_get_modes(struct drm_connector *connector) 1580static int intel_sdvo_get_modes(struct drm_connector *connector)
1547{ 1581{
1548 struct intel_output *output = to_intel_output(connector); 1582 struct intel_output *output = to_intel_output(connector);
@@ -1550,6 +1584,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1550 1584
1551 if (sdvo_priv->is_tv) 1585 if (sdvo_priv->is_tv)
1552 intel_sdvo_get_tv_modes(connector); 1586 intel_sdvo_get_tv_modes(connector);
1587 else if (sdvo_priv->is_lvds == true)
1588 intel_sdvo_get_lvds_modes(connector);
1553 else 1589 else
1554 intel_sdvo_get_ddc_modes(connector); 1590 intel_sdvo_get_ddc_modes(connector);
1555 1591
@@ -1564,6 +1600,9 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1564 1600
1565 if (intel_output->i2c_bus) 1601 if (intel_output->i2c_bus)
1566 intel_i2c_destroy(intel_output->i2c_bus); 1602 intel_i2c_destroy(intel_output->i2c_bus);
1603 if (intel_output->ddc_bus)
1604 intel_i2c_destroy(intel_output->ddc_bus);
1605
1567 drm_sysfs_connector_remove(connector); 1606 drm_sysfs_connector_remove(connector);
1568 drm_connector_cleanup(connector); 1607 drm_connector_cleanup(connector);
1569 kfree(intel_output); 1608 kfree(intel_output);
@@ -1578,6 +1617,7 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
1578}; 1617};
1579 1618
1580static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 1619static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
1620 .dpms = drm_helper_connector_dpms,
1581 .save = intel_sdvo_save, 1621 .save = intel_sdvo_save,
1582 .restore = intel_sdvo_restore, 1622 .restore = intel_sdvo_restore,
1583 .detect = intel_sdvo_detect, 1623 .detect = intel_sdvo_detect,
@@ -1660,33 +1700,107 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
1660 return true; 1700 return true;
1661} 1701}
1662 1702
1703static struct intel_output *
1704intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
1705{
1706 struct drm_device *dev = chan->drm_dev;
1707 struct drm_connector *connector;
1708 struct intel_output *intel_output = NULL;
1709
1710 list_for_each_entry(connector,
1711 &dev->mode_config.connector_list, head) {
1712 if (to_intel_output(connector)->ddc_bus == chan) {
1713 intel_output = to_intel_output(connector);
1714 break;
1715 }
1716 }
1717 return intel_output;
1718}
1719
1720static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
1721 struct i2c_msg msgs[], int num)
1722{
1723 struct intel_output *intel_output;
1724 struct intel_sdvo_priv *sdvo_priv;
1725 struct i2c_algo_bit_data *algo_data;
1726 struct i2c_algorithm *algo;
1727
1728 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
1729 intel_output =
1730 intel_sdvo_chan_to_intel_output(
1731 (struct intel_i2c_chan *)(algo_data->data));
1732 if (intel_output == NULL)
1733 return -EINVAL;
1734
1735 sdvo_priv = intel_output->dev_priv;
1736 algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo;
1737
1738 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1739 return algo->master_xfer(i2c_adap, msgs, num);
1740}
1741
1742static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
1743 .master_xfer = intel_sdvo_master_xfer,
1744};
1745
1746static u8
1747intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
1748{
1749 struct drm_i915_private *dev_priv = dev->dev_private;
1750 struct sdvo_device_mapping *my_mapping, *other_mapping;
1751
1752 if (output_device == SDVOB) {
1753 my_mapping = &dev_priv->sdvo_mappings[0];
1754 other_mapping = &dev_priv->sdvo_mappings[1];
1755 } else {
1756 my_mapping = &dev_priv->sdvo_mappings[1];
1757 other_mapping = &dev_priv->sdvo_mappings[0];
1758 }
1759
1760 /* If the BIOS described our SDVO device, take advantage of it. */
1761 if (my_mapping->slave_addr)
1762 return my_mapping->slave_addr;
1763
1764 /* If the BIOS only described a different SDVO device, use the
1765 * address that it isn't using.
1766 */
1767 if (other_mapping->slave_addr) {
1768 if (other_mapping->slave_addr == 0x70)
1769 return 0x72;
1770 else
1771 return 0x70;
1772 }
1773
1774 /* No SDVO device info is found for another DVO port,
1775 * so use mapping assumption we had before BIOS parsing.
1776 */
1777 if (output_device == SDVOB)
1778 return 0x70;
1779 else
1780 return 0x72;
1781}
1782
1663bool intel_sdvo_init(struct drm_device *dev, int output_device) 1783bool intel_sdvo_init(struct drm_device *dev, int output_device)
1664{ 1784{
1665 struct drm_connector *connector; 1785 struct drm_connector *connector;
1666 struct intel_output *intel_output; 1786 struct intel_output *intel_output;
1667 struct intel_sdvo_priv *sdvo_priv; 1787 struct intel_sdvo_priv *sdvo_priv;
1668 struct intel_i2c_chan *i2cbus = NULL; 1788 struct intel_i2c_chan *i2cbus = NULL;
1789 struct intel_i2c_chan *ddcbus = NULL;
1669 int connector_type; 1790 int connector_type;
1670 u8 ch[0x40]; 1791 u8 ch[0x40];
1671 int i; 1792 int i;
1672 int encoder_type, output_id; 1793 int encoder_type, output_id;
1794 u8 slave_addr;
1673 1795
1674 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 1796 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
1675 if (!intel_output) { 1797 if (!intel_output) {
1676 return false; 1798 return false;
1677 } 1799 }
1678 1800
1679 connector = &intel_output->base;
1680
1681 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
1682 DRM_MODE_CONNECTOR_Unknown);
1683 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
1684 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); 1801 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
1685 intel_output->type = INTEL_OUTPUT_SDVO; 1802 intel_output->type = INTEL_OUTPUT_SDVO;
1686 1803
1687 connector->interlace_allowed = 0;
1688 connector->doublescan_allowed = 0;
1689
1690 /* setup the DDC bus. */ 1804 /* setup the DDC bus. */
1691 if (output_device == SDVOB) 1805 if (output_device == SDVOB)
1692 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 1806 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
@@ -1694,32 +1808,47 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1694 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 1808 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
1695 1809
1696 if (!i2cbus) 1810 if (!i2cbus)
1697 goto err_connector; 1811 goto err_inteloutput;
1698 1812
1813 slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
1699 sdvo_priv->i2c_bus = i2cbus; 1814 sdvo_priv->i2c_bus = i2cbus;
1700 1815
1701 if (output_device == SDVOB) { 1816 if (output_device == SDVOB) {
1702 output_id = 1; 1817 output_id = 1;
1703 sdvo_priv->i2c_bus->slave_addr = 0x38;
1704 } else { 1818 } else {
1705 output_id = 2; 1819 output_id = 2;
1706 sdvo_priv->i2c_bus->slave_addr = 0x39;
1707 } 1820 }
1708 1821 sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1;
1709 sdvo_priv->output_device = output_device; 1822 sdvo_priv->output_device = output_device;
1710 intel_output->i2c_bus = i2cbus; 1823 intel_output->i2c_bus = i2cbus;
1711 intel_output->dev_priv = sdvo_priv; 1824 intel_output->dev_priv = sdvo_priv;
1712 1825
1713
1714 /* Read the regs to test if we can talk to the device */ 1826 /* Read the regs to test if we can talk to the device */
1715 for (i = 0; i < 0x40; i++) { 1827 for (i = 0; i < 0x40; i++) {
1716 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { 1828 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
1717 DRM_DEBUG("No SDVO device found on SDVO%c\n", 1829 DRM_DEBUG_KMS(I915_SDVO,
1718 output_device == SDVOB ? 'B' : 'C'); 1830 "No SDVO device found on SDVO%c\n",
1831 output_device == SDVOB ? 'B' : 'C');
1719 goto err_i2c; 1832 goto err_i2c;
1720 } 1833 }
1721 } 1834 }
1722 1835
1836 /* setup the DDC bus. */
1837 if (output_device == SDVOB)
1838 ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
1839 else
1840 ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
1841
1842 if (ddcbus == NULL)
1843 goto err_i2c;
1844
1845 intel_sdvo_i2c_bit_algo.functionality =
1846 intel_output->i2c_bus->adapter.algo->functionality;
1847 ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo;
1848 intel_output->ddc_bus = ddcbus;
1849
1850 /* In defaut case sdvo lvds is false */
1851 sdvo_priv->is_lvds = false;
1723 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); 1852 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
1724 1853
1725 if (sdvo_priv->caps.output_flags & 1854 if (sdvo_priv->caps.output_flags &
@@ -1729,7 +1858,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1729 else 1858 else
1730 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; 1859 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
1731 1860
1732 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1733 encoder_type = DRM_MODE_ENCODER_TMDS; 1861 encoder_type = DRM_MODE_ENCODER_TMDS;
1734 connector_type = DRM_MODE_CONNECTOR_DVID; 1862 connector_type = DRM_MODE_CONNECTOR_DVID;
1735 1863
@@ -1747,7 +1875,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1747 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0) 1875 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0)
1748 { 1876 {
1749 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; 1877 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
1750 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1751 encoder_type = DRM_MODE_ENCODER_TVDAC; 1878 encoder_type = DRM_MODE_ENCODER_TVDAC;
1752 connector_type = DRM_MODE_CONNECTOR_SVIDEO; 1879 connector_type = DRM_MODE_CONNECTOR_SVIDEO;
1753 sdvo_priv->is_tv = true; 1880 sdvo_priv->is_tv = true;
@@ -1756,30 +1883,28 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1756 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) 1883 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
1757 { 1884 {
1758 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; 1885 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
1759 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1760 encoder_type = DRM_MODE_ENCODER_DAC; 1886 encoder_type = DRM_MODE_ENCODER_DAC;
1761 connector_type = DRM_MODE_CONNECTOR_VGA; 1887 connector_type = DRM_MODE_CONNECTOR_VGA;
1762 } 1888 }
1763 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) 1889 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
1764 { 1890 {
1765 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; 1891 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
1766 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1767 encoder_type = DRM_MODE_ENCODER_DAC; 1892 encoder_type = DRM_MODE_ENCODER_DAC;
1768 connector_type = DRM_MODE_CONNECTOR_VGA; 1893 connector_type = DRM_MODE_CONNECTOR_VGA;
1769 } 1894 }
1770 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0) 1895 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0)
1771 { 1896 {
1772 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 1897 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
1773 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1774 encoder_type = DRM_MODE_ENCODER_LVDS; 1898 encoder_type = DRM_MODE_ENCODER_LVDS;
1775 connector_type = DRM_MODE_CONNECTOR_LVDS; 1899 connector_type = DRM_MODE_CONNECTOR_LVDS;
1900 sdvo_priv->is_lvds = true;
1776 } 1901 }
1777 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1) 1902 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1)
1778 { 1903 {
1779 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; 1904 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
1780 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1781 encoder_type = DRM_MODE_ENCODER_LVDS; 1905 encoder_type = DRM_MODE_ENCODER_LVDS;
1782 connector_type = DRM_MODE_CONNECTOR_LVDS; 1906 connector_type = DRM_MODE_CONNECTOR_LVDS;
1907 sdvo_priv->is_lvds = true;
1783 } 1908 }
1784 else 1909 else
1785 { 1910 {
@@ -1787,17 +1912,25 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1787 1912
1788 sdvo_priv->controlled_output = 0; 1913 sdvo_priv->controlled_output = 0;
1789 memcpy (bytes, &sdvo_priv->caps.output_flags, 2); 1914 memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
1790 DRM_DEBUG("%s: Unknown SDVO output type (0x%02x%02x)\n", 1915 DRM_DEBUG_KMS(I915_SDVO,
1791 SDVO_NAME(sdvo_priv), 1916 "%s: Unknown SDVO output type (0x%02x%02x)\n",
1792 bytes[0], bytes[1]); 1917 SDVO_NAME(sdvo_priv),
1918 bytes[0], bytes[1]);
1793 encoder_type = DRM_MODE_ENCODER_NONE; 1919 encoder_type = DRM_MODE_ENCODER_NONE;
1794 connector_type = DRM_MODE_CONNECTOR_Unknown; 1920 connector_type = DRM_MODE_CONNECTOR_Unknown;
1795 goto err_i2c; 1921 goto err_i2c;
1796 } 1922 }
1797 1923
1924 connector = &intel_output->base;
1925 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
1926 connector_type);
1927 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
1928 connector->interlace_allowed = 0;
1929 connector->doublescan_allowed = 0;
1930 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1931
1798 drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type); 1932 drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type);
1799 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); 1933 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
1800 connector->connector_type = connector_type;
1801 1934
1802 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1935 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
1803 drm_sysfs_connector_add(connector); 1936 drm_sysfs_connector_add(connector);
@@ -1812,31 +1945,30 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1812 &sdvo_priv->pixel_clock_max); 1945 &sdvo_priv->pixel_clock_max);
1813 1946
1814 1947
1815 DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, " 1948 DRM_DEBUG_KMS(I915_SDVO, "%s device VID/DID: %02X:%02X.%02X, "
1816 "clock range %dMHz - %dMHz, " 1949 "clock range %dMHz - %dMHz, "
1817 "input 1: %c, input 2: %c, " 1950 "input 1: %c, input 2: %c, "
1818 "output 1: %c, output 2: %c\n", 1951 "output 1: %c, output 2: %c\n",
1819 SDVO_NAME(sdvo_priv), 1952 SDVO_NAME(sdvo_priv),
1820 sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, 1953 sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
1821 sdvo_priv->caps.device_rev_id, 1954 sdvo_priv->caps.device_rev_id,
1822 sdvo_priv->pixel_clock_min / 1000, 1955 sdvo_priv->pixel_clock_min / 1000,
1823 sdvo_priv->pixel_clock_max / 1000, 1956 sdvo_priv->pixel_clock_max / 1000,
1824 (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', 1957 (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
1825 (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', 1958 (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
1826 /* check currently supported outputs */ 1959 /* check currently supported outputs */
1827 sdvo_priv->caps.output_flags & 1960 sdvo_priv->caps.output_flags &
1828 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', 1961 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
1829 sdvo_priv->caps.output_flags & 1962 sdvo_priv->caps.output_flags &
1830 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); 1963 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
1831 1964
1832 intel_output->ddc_bus = i2cbus;
1833
1834 return true; 1965 return true;
1835 1966
1836err_i2c: 1967err_i2c:
1968 if (ddcbus != NULL)
1969 intel_i2c_destroy(intel_output->ddc_bus);
1837 intel_i2c_destroy(intel_output->i2c_bus); 1970 intel_i2c_destroy(intel_output->i2c_bus);
1838err_connector: 1971err_inteloutput:
1839 drm_connector_cleanup(connector);
1840 kfree(intel_output); 1972 kfree(intel_output);
1841 1973
1842 return false; 1974 return false;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d2c32983242..50d7ed70b33 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1392,6 +1392,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1392 tv_ctl &= ~TV_TEST_MODE_MASK; 1392 tv_ctl &= ~TV_TEST_MODE_MASK;
1393 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; 1393 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
1394 tv_dac &= ~TVDAC_SENSE_MASK; 1394 tv_dac &= ~TVDAC_SENSE_MASK;
1395 tv_dac &= ~DAC_A_MASK;
1396 tv_dac &= ~DAC_B_MASK;
1397 tv_dac &= ~DAC_C_MASK;
1395 tv_dac |= (TVDAC_STATE_CHG_EN | 1398 tv_dac |= (TVDAC_STATE_CHG_EN |
1396 TVDAC_A_SENSE_CTL | 1399 TVDAC_A_SENSE_CTL |
1397 TVDAC_B_SENSE_CTL | 1400 TVDAC_B_SENSE_CTL |
@@ -1626,6 +1629,7 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1626}; 1629};
1627 1630
1628static const struct drm_connector_funcs intel_tv_connector_funcs = { 1631static const struct drm_connector_funcs intel_tv_connector_funcs = {
1632 .dpms = drm_helper_connector_dpms,
1629 .save = intel_tv_save, 1633 .save = intel_tv_save,
1630 .restore = intel_tv_restore, 1634 .restore = intel_tv_restore,
1631 .detect = intel_tv_detect, 1635 .detect = intel_tv_detect,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index bc9d09dfa8e..146f3570af8 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -478,26 +478,27 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
478 478
479 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)) { 479 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)) {
480 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 480 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
481 DRM_INFO("Loading RV770 PFP Microcode\n"); 481 DRM_INFO("Loading RV770/RV790 PFP Microcode\n");
482 for (i = 0; i < R700_PFP_UCODE_SIZE; i++) 482 for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
483 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV770_pfp_microcode[i]); 483 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV770_pfp_microcode[i]);
484 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 484 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
485 485
486 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 486 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
487 DRM_INFO("Loading RV770 CP Microcode\n"); 487 DRM_INFO("Loading RV770/RV790 CP Microcode\n");
488 for (i = 0; i < R700_PM4_UCODE_SIZE; i++) 488 for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
489 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV770_cp_microcode[i]); 489 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV770_cp_microcode[i]);
490 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 490 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
491 491
492 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV730)) { 492 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV730) ||
493 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)) {
493 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 494 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
494 DRM_INFO("Loading RV730 PFP Microcode\n"); 495 DRM_INFO("Loading RV730/RV740 PFP Microcode\n");
495 for (i = 0; i < R700_PFP_UCODE_SIZE; i++) 496 for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
496 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV730_pfp_microcode[i]); 497 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV730_pfp_microcode[i]);
497 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 498 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
498 499
499 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 500 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
500 DRM_INFO("Loading RV730 CP Microcode\n"); 501 DRM_INFO("Loading RV730/RV740 CP Microcode\n");
501 for (i = 0; i < R700_PM4_UCODE_SIZE; i++) 502 for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
502 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV730_cp_microcode[i]); 503 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV730_cp_microcode[i]);
503 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 504 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
@@ -1324,6 +1325,10 @@ static void r700_gfx_init(struct drm_device *dev,
1324 dev_priv->r700_sc_prim_fifo_size = 0xf9; 1325 dev_priv->r700_sc_prim_fifo_size = 0xf9;
1325 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; 1326 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1326 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; 1327 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1328 if (dev_priv->r600_sx_max_export_pos_size > 16) {
1329 dev_priv->r600_sx_max_export_pos_size -= 16;
1330 dev_priv->r600_sx_max_export_smx_size += 16;
1331 }
1327 break; 1332 break;
1328 case CHIP_RV710: 1333 case CHIP_RV710:
1329 dev_priv->r600_max_pipes = 2; 1334 dev_priv->r600_max_pipes = 2;
@@ -1345,6 +1350,31 @@ static void r700_gfx_init(struct drm_device *dev,
1345 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; 1350 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1346 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; 1351 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1347 break; 1352 break;
1353 case CHIP_RV740:
1354 dev_priv->r600_max_pipes = 4;
1355 dev_priv->r600_max_tile_pipes = 4;
1356 dev_priv->r600_max_simds = 8;
1357 dev_priv->r600_max_backends = 4;
1358 dev_priv->r600_max_gprs = 256;
1359 dev_priv->r600_max_threads = 248;
1360 dev_priv->r600_max_stack_entries = 512;
1361 dev_priv->r600_max_hw_contexts = 8;
1362 dev_priv->r600_max_gs_threads = 16 * 2;
1363 dev_priv->r600_sx_max_export_size = 256;
1364 dev_priv->r600_sx_max_export_pos_size = 32;
1365 dev_priv->r600_sx_max_export_smx_size = 224;
1366 dev_priv->r600_sq_num_cf_insts = 2;
1367
1368 dev_priv->r700_sx_num_of_sets = 7;
1369 dev_priv->r700_sc_prim_fifo_size = 0x100;
1370 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1371 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1372
1373 if (dev_priv->r600_sx_max_export_pos_size > 16) {
1374 dev_priv->r600_sx_max_export_pos_size -= 16;
1375 dev_priv->r600_sx_max_export_smx_size += 16;
1376 }
1377 break;
1348 default: 1378 default:
1349 break; 1379 break;
1350 } 1380 }
@@ -1493,6 +1523,7 @@ static void r700_gfx_init(struct drm_device *dev,
1493 break; 1523 break;
1494 case CHIP_RV730: 1524 case CHIP_RV730:
1495 case CHIP_RV710: 1525 case CHIP_RV710:
1526 case CHIP_RV740:
1496 default: 1527 default:
1497 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4); 1528 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
1498 break; 1529 break;
@@ -1569,6 +1600,7 @@ static void r700_gfx_init(struct drm_device *dev,
1569 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1600 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1570 case CHIP_RV770: 1601 case CHIP_RV770:
1571 case CHIP_RV730: 1602 case CHIP_RV730:
1603 case CHIP_RV740:
1572 gs_prim_buffer_depth = 384; 1604 gs_prim_buffer_depth = 384;
1573 break; 1605 break;
1574 case CHIP_RV710: 1606 case CHIP_RV710:
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 77a7a4d8465..89c4c44169f 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2109,7 +2109,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
2109 2109
2110 /* prebuild the SAREA */ 2110 /* prebuild the SAREA */
2111 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); 2111 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
2112 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER, 2112 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
2113 &master_priv->sarea); 2113 &master_priv->sarea);
2114 if (ret) { 2114 if (ret) {
2115 DRM_ERROR("SAREA setup failed\n"); 2115 DRM_ERROR("SAREA setup failed\n");
@@ -2185,9 +2185,9 @@ void radeon_commit_ring(drm_radeon_private_t *dev_priv)
2185 2185
2186 /* check if the ring is padded out to 16-dword alignment */ 2186 /* check if the ring is padded out to 16-dword alignment */
2187 2187
2188 tail_aligned = dev_priv->ring.tail & 0xf; 2188 tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1);
2189 if (tail_aligned) { 2189 if (tail_aligned) {
2190 int num_p2 = 16 - tail_aligned; 2190 int num_p2 = RADEON_RING_ALIGN - tail_aligned;
2191 2191
2192 ring = dev_priv->ring.start; 2192 ring = dev_priv->ring.start;
2193 /* pad with some CP_PACKET2 */ 2193 /* pad with some CP_PACKET2 */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 8071d965f14..127d0456f62 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -146,6 +146,7 @@ enum radeon_family {
146 CHIP_RV770, 146 CHIP_RV770,
147 CHIP_RV730, 147 CHIP_RV730,
148 CHIP_RV710, 148 CHIP_RV710,
149 CHIP_RV740,
149 CHIP_LAST, 150 CHIP_LAST,
150}; 151};
151 152
@@ -1964,11 +1965,14 @@ do { \
1964 1965
1965#define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring; 1966#define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring;
1966 1967
1968#define RADEON_RING_ALIGN 16
1969
1967#define BEGIN_RING( n ) do { \ 1970#define BEGIN_RING( n ) do { \
1968 if ( RADEON_VERBOSE ) { \ 1971 if ( RADEON_VERBOSE ) { \
1969 DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ 1972 DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
1970 } \ 1973 } \
1971 _align_nr = (n + 0xf) & ~0xf; \ 1974 _align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN-1)); \
1975 _align_nr += n; \
1972 if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \ 1976 if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \
1973 COMMIT_RING(); \ 1977 COMMIT_RING(); \
1974 radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \ 1978 radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 409e00afdd0..327380888b4 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -195,10 +195,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
195 default: 195 default:
196 vsg->state = dr_via_sg_init; 196 vsg->state = dr_via_sg_init;
197 } 197 }
198 if (vsg->bounce_buffer) { 198 vfree(vsg->bounce_buffer);
199 vfree(vsg->bounce_buffer); 199 vsg->bounce_buffer = NULL;
200 vsg->bounce_buffer = NULL;
201 }
202 vsg->free_on_sequence = 0; 200 vsg->free_on_sequence = 0;
203} 201}
204 202
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 7e67dcb3d4f..7831a0318d3 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -116,9 +116,16 @@ config HID_CYPRESS
116 ---help--- 116 ---help---
117 Support for cypress mouse and barcode readers. 117 Support for cypress mouse and barcode readers.
118 118
119config DRAGONRISE_FF 119config HID_DRAGONRISE
120 tristate "DragonRise Inc. force feedback support" 120 tristate "DragonRise Inc. support" if EMBEDDED
121 depends on USB_HID 121 depends on USB_HID
122 default !EMBEDDED
123 ---help---
124 Say Y here if you have DragonRise Inc.game controllers.
125
126config DRAGONRISE_FF
127 bool "DragonRise Inc. force feedback support"
128 depends on HID_DRAGONRISE
122 select INPUT_FF_MEMLESS 129 select INPUT_FF_MEMLESS
123 ---help--- 130 ---help---
124 Say Y here if you want to enable force feedback support for DragonRise Inc. 131 Say Y here if you want to enable force feedback support for DragonRise Inc.
@@ -160,7 +167,7 @@ config HID_LOGITECH
160 Support for Logitech devices that are not fully compliant with HID standard. 167 Support for Logitech devices that are not fully compliant with HID standard.
161 168
162config LOGITECH_FF 169config LOGITECH_FF
163 bool "Logitech force feedback" 170 bool "Logitech force feedback support"
164 depends on HID_LOGITECH 171 depends on HID_LOGITECH
165 select INPUT_FF_MEMLESS 172 select INPUT_FF_MEMLESS
166 help 173 help
@@ -176,7 +183,7 @@ config LOGITECH_FF
176 force feedback. 183 force feedback.
177 184
178config LOGIRUMBLEPAD2_FF 185config LOGIRUMBLEPAD2_FF
179 bool "Logitech Rumblepad 2 force feedback" 186 bool "Logitech Rumblepad 2 force feedback support"
180 depends on HID_LOGITECH 187 depends on HID_LOGITECH
181 select INPUT_FF_MEMLESS 188 select INPUT_FF_MEMLESS
182 help 189 help
@@ -211,11 +218,19 @@ config HID_PANTHERLORD
211 ---help--- 218 ---help---
212 Support for PantherLord/GreenAsia based device support. 219 Support for PantherLord/GreenAsia based device support.
213 220
221config HID_PANTHERLORD
222 tristate "Pantherlord support" if EMBEDDED
223 depends on USB_HID
224 default !EMBEDDED
225 ---help---
226 Say Y here if you have a PantherLord/GreenAsia based game controller
227 or adapter.
228
214config PANTHERLORD_FF 229config PANTHERLORD_FF
215 bool "Pantherlord force feedback support" 230 bool "Pantherlord force feedback support"
216 depends on HID_PANTHERLORD 231 depends on HID_PANTHERLORD
217 select INPUT_FF_MEMLESS 232 select INPUT_FF_MEMLESS
218 help 233 ---help---
219 Say Y here if you have a PantherLord/GreenAsia based game controller 234 Say Y here if you have a PantherLord/GreenAsia based game controller
220 or adapter and want to enable force feedback support for it. 235 or adapter and want to enable force feedback support for it.
221 236
@@ -247,15 +262,38 @@ config HID_SUNPLUS
247 ---help--- 262 ---help---
248 Support for Sunplus wireless desktop. 263 Support for Sunplus wireless desktop.
249 264
250config GREENASIA_FF 265config HID_GREENASIA
251 tristate "GreenAsia (Product ID 0x12) force feedback support" 266 tristate "GreenAsia (Product ID 0x12) support" if EMBEDDED
252 depends on USB_HID 267 depends on USB_HID
268 default !EMBEDDED
269 ---help---
270 Say Y here if you have a GreenAsia (Product ID 0x12) based game
271 controller or adapter.
272
273config GREENASIA_FF
274 bool "GreenAsia (Product ID 0x12) force feedback support"
275 depends on HID_GREENASIA
253 select INPUT_FF_MEMLESS 276 select INPUT_FF_MEMLESS
254 ---help--- 277 ---help---
255 Say Y here if you have a GreenAsia (Product ID 0x12) based game controller 278 Say Y here if you have a GreenAsia (Product ID 0x12) based game controller
256 (like MANTA Warrior MM816 and SpeedLink Strike2 SL-6635) or adapter 279 (like MANTA Warrior MM816 and SpeedLink Strike2 SL-6635) or adapter
257 and want to enable force feedback support for it. 280 and want to enable force feedback support for it.
258 281
282config HID_SMARTJOYPLUS
283 tristate "SmartJoy PLUS PS2/USB adapter support" if EMBEDDED
284 depends on USB_HID
285 default !EMBEDDED
286 ---help---
287 Support for SmartJoy PLUS PS2/USB adapter.
288
289config SMARTJOYPLUS_FF
290 bool "SmartJoy PLUS PS2/USB adapter force feedback support"
291 depends on HID_SMARTJOYPLUS
292 select INPUT_FF_MEMLESS
293 ---help---
294 Say Y here if you have a SmartJoy PLUS PS2/USB adapter and want to
295 enable force feedback support for it.
296
259config HID_TOPSEED 297config HID_TOPSEED
260 tristate "TopSeed Cyberlink remote control support" if EMBEDDED 298 tristate "TopSeed Cyberlink remote control support" if EMBEDDED
261 depends on USB_HID 299 depends on USB_HID
@@ -263,21 +301,45 @@ config HID_TOPSEED
263 ---help--- 301 ---help---
264 Say Y if you have a TopSeed Cyberlink remote control. 302 Say Y if you have a TopSeed Cyberlink remote control.
265 303
266config THRUSTMASTER_FF 304config HID_THRUSTMASTER
267 tristate "ThrustMaster devices support" 305 tristate "ThrustMaster devices support" if EMBEDDED
268 depends on USB_HID 306 depends on USB_HID
307 default !EMBEDDED
308 ---help---
309 Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
310 a THRUSTMASTER Ferrari GT Rumble Wheel.
311
312config THRUSTMASTER_FF
313 bool "ThrustMaster devices force feedback support"
314 depends on HID_THRUSTMASTER
269 select INPUT_FF_MEMLESS 315 select INPUT_FF_MEMLESS
270 help 316 ---help---
271 Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or 317 Say Y here if you have a THRUSTMASTER FireStore Dual Power 2 or
272 a THRUSTMASTER Ferrari GT Rumble Force or Force Feedback Wheel. 318 a THRUSTMASTER Ferrari GT Rumble Force or Force Feedback Wheel and
319 want to enable force feedback support for it.
273 320
274config ZEROPLUS_FF 321config HID_WACOM
275 tristate "Zeroplus based game controller support" 322 tristate "Wacom Bluetooth devices support" if EMBEDDED
323 depends on BT_HIDP
324 default !EMBEDDED
325 ---help---
326 Support for Wacom Graphire Bluetooth tablet.
327
328config HID_ZEROPLUS
329 tristate "Zeroplus based game controller support" if EMBEDDED
276 depends on USB_HID 330 depends on USB_HID
277 select INPUT_FF_MEMLESS 331 default !EMBEDDED
278 help 332 ---help---
279 Say Y here if you have a Zeroplus based game controller. 333 Say Y here if you have a Zeroplus based game controller.
280 334
335config ZEROPLUS_FF
336 bool "Zeroplus based game controller force feedback support"
337 depends on HID_ZEROPLUS
338 select INPUT_FF_MEMLESS
339 ---help---
340 Say Y here if you have a Zeroplus based game controller and want
341 to have force feedback support for it.
342
281endmenu 343endmenu
282 344
283endif # HID_SUPPORT 345endif # HID_SUPPORT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 1f7cb0fd450..db35151673b 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
22obj-$(CONFIG_HID_CHERRY) += hid-cherry.o 22obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
23obj-$(CONFIG_HID_CHICONY) += hid-chicony.o 23obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
24obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o 24obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
25obj-$(CONFIG_DRAGONRISE_FF) += hid-drff.o 25obj-$(CONFIG_HID_DRAGONRISE) += hid-drff.o
26obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o 26obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
27obj-$(CONFIG_HID_GYRATION) += hid-gyration.o 27obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
28obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o 28obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
@@ -34,12 +34,14 @@ obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
34obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o 34obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
35obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o 35obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
36obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o 36obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
37obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
37obj-$(CONFIG_HID_SONY) += hid-sony.o 38obj-$(CONFIG_HID_SONY) += hid-sony.o
38obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o 39obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
39obj-$(CONFIG_GREENASIA_FF) += hid-gaff.o 40obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
40obj-$(CONFIG_THRUSTMASTER_FF) += hid-tmff.o 41obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
41obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o 42obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
42obj-$(CONFIG_ZEROPLUS_FF) += hid-zpff.o 43obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
44obj-$(CONFIG_HID_WACOM) += hid-wacom.o
43 45
44obj-$(CONFIG_USB_HID) += usbhid/ 46obj-$(CONFIG_USB_HID) += usbhid/
45obj-$(CONFIG_USB_MOUSE) += usbhid/ 47obj-$(CONFIG_USB_MOUSE) += usbhid/
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index acbce5745b0..303ccce05bb 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -436,10 +436,6 @@ static const struct hid_device_id apple_devices[] = {
436 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY), 436 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY),
437 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 437 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
438 438
439 /* Apple wireless Mighty Mouse */
440 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 0x030c),
441 .driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL },
442
443 { } 439 { }
444}; 440};
445MODULE_DEVICE_TABLE(hid, apple_devices); 441MODULE_DEVICE_TABLE(hid, apple_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8551693d645..f2c21d5d24e 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1312,6 +1312,8 @@ static const struct hid_device_id hid_blacklist[] = {
1312 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) }, 1312 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
1313 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, 1313 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
1314 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 1314 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
1315 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
1316 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
1315 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, 1317 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
1316 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, 1318 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
1317 1319
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 47ac1a7d66e..04359ed64b8 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -137,6 +137,14 @@ static const struct hid_usage_entry hid_usage_table[] = {
137 {0, 0x44, "BarrelSwitch"}, 137 {0, 0x44, "BarrelSwitch"},
138 {0, 0x45, "Eraser"}, 138 {0, 0x45, "Eraser"},
139 {0, 0x46, "TabletPick"}, 139 {0, 0x46, "TabletPick"},
140 {0, 0x47, "Confidence"},
141 {0, 0x48, "Width"},
142 {0, 0x49, "Height"},
143 {0, 0x51, "ContactID"},
144 {0, 0x52, "InputMode"},
145 {0, 0x53, "DeviceIndex"},
146 {0, 0x54, "ContactCount"},
147 {0, 0x55, "ContactMaximumNumber"},
140 { 15, 0, "PhysicalInterfaceDevice" }, 148 { 15, 0, "PhysicalInterfaceDevice" },
141 {0, 0x00, "Undefined"}, 149 {0, 0x00, "Undefined"},
142 {0, 0x01, "Physical_Interface_Device"}, 150 {0, 0x01, "Physical_Interface_Device"},
@@ -514,9 +522,11 @@ static const char *events[EV_MAX + 1] = {
514 [EV_FF_STATUS] = "ForceFeedbackStatus", 522 [EV_FF_STATUS] = "ForceFeedbackStatus",
515}; 523};
516 524
517static const char *syncs[2] = { 525static const char *syncs[3] = {
518 [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config", 526 [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config",
527 [SYN_MT_REPORT] = "MT Report",
519}; 528};
529
520static const char *keys[KEY_MAX + 1] = { 530static const char *keys[KEY_MAX + 1] = {
521 [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc", 531 [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc",
522 [KEY_1] = "1", [KEY_2] = "2", 532 [KEY_1] = "1", [KEY_2] = "2",
@@ -734,8 +744,17 @@ static const char *absolutes[ABS_MAX + 1] = {
734 [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X", 744 [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X",
735 [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure", 745 [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure",
736 [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt", 746 [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
737 [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "Tool Width", 747 [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "ToolWidth",
738 [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc", 748 [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc",
749 [ABS_MT_TOUCH_MAJOR] = "MTMajor",
750 [ABS_MT_TOUCH_MINOR] = "MTMinor",
751 [ABS_MT_WIDTH_MAJOR] = "MTMajorW",
752 [ABS_MT_WIDTH_MINOR] = "MTMinorW",
753 [ABS_MT_ORIENTATION] = "MTOrientation",
754 [ABS_MT_POSITION_X] = "MTPositionX",
755 [ABS_MT_POSITION_Y] = "MTPositionY",
756 [ABS_MT_TOOL_TYPE] = "MTToolType",
757 [ABS_MT_BLOB_ID] = "MTBlobID",
739}; 758};
740 759
741static const char *misc[MSC_MAX + 1] = { 760static const char *misc[MSC_MAX + 1] = {
diff --git a/drivers/hid/hid-drff.c b/drivers/hid/hid-drff.c
index 34f3eb65100..a239d20ad7a 100644
--- a/drivers/hid/hid-drff.c
+++ b/drivers/hid/hid-drff.c
@@ -32,6 +32,8 @@
32#include <linux/hid.h> 32#include <linux/hid.h>
33 33
34#include "hid-ids.h" 34#include "hid-ids.h"
35
36#ifdef CONFIG_DRAGONRISE_FF
35#include "usbhid/usbhid.h" 37#include "usbhid/usbhid.h"
36 38
37struct drff_device { 39struct drff_device {
@@ -135,6 +137,12 @@ static int drff_init(struct hid_device *hid)
135 137
136 return 0; 138 return 0;
137} 139}
140#else
141static inline int drff_init(struct hid_device *hid)
142{
143 return 0;
144}
145#endif
138 146
139static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id) 147static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
140{ 148{
diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
index 510ad3ab8d3..8a11ccddaf2 100644
--- a/drivers/hid/hid-gaff.c
+++ b/drivers/hid/hid-gaff.c
@@ -31,6 +31,8 @@
31#include <linux/usb.h> 31#include <linux/usb.h>
32#include <linux/hid.h> 32#include <linux/hid.h>
33#include "hid-ids.h" 33#include "hid-ids.h"
34
35#ifdef CONFIG_GREENASIA_FF
34#include "usbhid/usbhid.h" 36#include "usbhid/usbhid.h"
35 37
36struct gaff_device { 38struct gaff_device {
@@ -130,6 +132,12 @@ static int gaff_init(struct hid_device *hid)
130 132
131 return 0; 133 return 0;
132} 134}
135#else
136static inline int gaff_init(struct hid_device *hdev)
137{
138 return 0;
139}
140#endif
133 141
134static int ga_probe(struct hid_device *hdev, const struct hid_device_id *id) 142static int ga_probe(struct hid_device *hdev, const struct hid_device_id *id)
135{ 143{
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4d5ee2bbc62..63010103792 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -414,8 +414,10 @@
414#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006 414#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006
415 415
416#define USB_VENDOR_ID_WACOM 0x056a 416#define USB_VENDOR_ID_WACOM 0x056a
417#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
417 418
418#define USB_VENDOR_ID_WISEGROUP 0x0925 419#define USB_VENDOR_ID_WISEGROUP 0x0925
420#define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005
419#define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101 421#define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101
420#define USB_DEVICE_ID_4_PHIDGETSERVO_20 0x8104 422#define USB_DEVICE_ID_4_PHIDGETSERVO_20 0x8104
421#define USB_DEVICE_ID_8_8_4_IF_KIT 0x8201 423#define USB_DEVICE_ID_8_8_4_IF_KIT 0x8201
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index 51aff08e10c..56099709581 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -50,6 +50,12 @@ static const signed short ff_joystick[] = {
50 -1 50 -1
51}; 51};
52 52
53static const signed short ff_joystick_ac[] = {
54 FF_CONSTANT,
55 FF_AUTOCENTER,
56 -1
57};
58
53static const signed short ff_wheel[] = { 59static const signed short ff_wheel[] = {
54 FF_CONSTANT, 60 FF_CONSTANT,
55 FF_AUTOCENTER, 61 FF_AUTOCENTER,
@@ -60,8 +66,8 @@ static const struct dev_type devices[] = {
60 { 0x046d, 0xc211, ff_rumble }, 66 { 0x046d, 0xc211, ff_rumble },
61 { 0x046d, 0xc219, ff_rumble }, 67 { 0x046d, 0xc219, ff_rumble },
62 { 0x046d, 0xc283, ff_joystick }, 68 { 0x046d, 0xc283, ff_joystick },
63 { 0x046d, 0xc286, ff_joystick }, 69 { 0x046d, 0xc286, ff_joystick_ac },
64 { 0x046d, 0xc294, ff_joystick }, 70 { 0x046d, 0xc294, ff_wheel },
65 { 0x046d, 0xc295, ff_joystick }, 71 { 0x046d, 0xc295, ff_joystick },
66 { 0x046d, 0xca03, ff_wheel }, 72 { 0x046d, 0xca03, ff_wheel },
67}; 73};
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index c5b252be9c2..75ed9d2c1a3 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -1,13 +1,8 @@
1/* 1/*
2 * HID driver for some ntrig "special" devices 2 * HID driver for N-Trig touchscreens
3 * 3 *
4 * Copyright (c) 1999 Andreas Gal
5 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
6 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
7 * Copyright (c) 2006-2007 Jiri Kosina
8 * Copyright (c) 2007 Paul Walmsley
9 * Copyright (c) 2008 Jiri Slaby
10 * Copyright (c) 2008 Rafi Rubin 4 * Copyright (c) 2008 Rafi Rubin
5 * Copyright (c) 2009 Stephane Chatty
11 * 6 *
12 */ 7 */
13 8
@@ -29,15 +24,79 @@
29#define nt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ 24#define nt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
30 EV_KEY, (c)) 25 EV_KEY, (c))
31 26
27struct ntrig_data {
28 __s32 x, y, id, w, h;
29 char reading_a_point, found_contact_id;
30};
31
32/*
33 * this driver is aimed at two firmware versions in circulation:
34 * - dual pen/finger single touch
35 * - finger multitouch, pen not working
36 */
37
32static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi, 38static int ntrig_input_mapping(struct hid_device *hdev, struct hid_input *hi,
33 struct hid_field *field, struct hid_usage *usage, 39 struct hid_field *field, struct hid_usage *usage,
34 unsigned long **bit, int *max) 40 unsigned long **bit, int *max)
35{ 41{
36 if ((usage->hid & HID_USAGE_PAGE) == HID_UP_DIGITIZER && 42 switch (usage->hid & HID_USAGE_PAGE) {
37 (usage->hid & 0xff) == 0x47) { 43
38 nt_map_key_clear(BTN_TOOL_DOUBLETAP); 44 case HID_UP_GENDESK:
39 return 1; 45 switch (usage->hid) {
46 case HID_GD_X:
47 hid_map_usage(hi, usage, bit, max,
48 EV_ABS, ABS_MT_POSITION_X);
49 input_set_abs_params(hi->input, ABS_X,
50 field->logical_minimum,
51 field->logical_maximum, 0, 0);
52 return 1;
53 case HID_GD_Y:
54 hid_map_usage(hi, usage, bit, max,
55 EV_ABS, ABS_MT_POSITION_Y);
56 input_set_abs_params(hi->input, ABS_Y,
57 field->logical_minimum,
58 field->logical_maximum, 0, 0);
59 return 1;
60 }
61 return 0;
62
63 case HID_UP_DIGITIZER:
64 switch (usage->hid) {
65 /* we do not want to map these for now */
66 case HID_DG_INVERT: /* value is always 0 */
67 case HID_DG_ERASER: /* value is always 0 */
68 case HID_DG_CONTACTID: /* value is useless */
69 case HID_DG_BARRELSWITCH: /* doubtful */
70 case HID_DG_INPUTMODE:
71 case HID_DG_DEVICEINDEX:
72 case HID_DG_CONTACTCOUNT:
73 case HID_DG_CONTACTMAX:
74 return -1;
75
76 /* original mapping by Rafi Rubin */
77 case HID_DG_CONFIDENCE:
78 nt_map_key_clear(BTN_TOOL_DOUBLETAP);
79 return 1;
80
81 /* width/height mapped on TouchMajor/TouchMinor/Orientation */
82 case HID_DG_WIDTH:
83 hid_map_usage(hi, usage, bit, max,
84 EV_ABS, ABS_MT_TOUCH_MAJOR);
85 return 1;
86 case HID_DG_HEIGHT:
87 hid_map_usage(hi, usage, bit, max,
88 EV_ABS, ABS_MT_TOUCH_MINOR);
89 input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
90 0, 1, 0, 0);
91 return 1;
92 }
93 return 0;
94
95 case 0xff000000:
96 /* we do not want to map these: no input-oriented meaning */
97 return -1;
40 } 98 }
99
41 return 0; 100 return 0;
42} 101}
43 102
@@ -51,6 +110,138 @@ static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi,
51 110
52 return 0; 111 return 0;
53} 112}
113
114/*
115 * this function is called upon all reports
116 * so that we can filter contact point information,
117 * decide whether we are in multi or single touch mode
118 * and call input_mt_sync after each point if necessary
119 */
120static int ntrig_event (struct hid_device *hid, struct hid_field *field,
121 struct hid_usage *usage, __s32 value)
122{
123 struct input_dev *input = field->hidinput->input;
124 struct ntrig_data *nd = hid_get_drvdata(hid);
125
126 if (hid->claimed & HID_CLAIMED_INPUT) {
127 switch (usage->hid) {
128 case HID_GD_X:
129 nd->x = value;
130 nd->reading_a_point = 1;
131 break;
132 case HID_GD_Y:
133 nd->y = value;
134 break;
135 case HID_DG_CONTACTID:
136 nd->id = value;
137 /* we receive this only when in multitouch mode */
138 nd->found_contact_id = 1;
139 break;
140 case HID_DG_WIDTH:
141 nd->w = value;
142 break;
143 case HID_DG_HEIGHT:
144 nd->h = value;
145 /*
146 * when in single touch mode, this is the last
147 * report received in a finger event. We want
148 * to emit a normal (X, Y) position
149 */
150 if (! nd->found_contact_id) {
151 input_event(input, EV_ABS, ABS_X, nd->x);
152 input_event(input, EV_ABS, ABS_Y, nd->y);
153 }
154 break;
155 case HID_DG_TIPPRESSURE:
156 /*
157 * when in single touch mode, this is the last
158 * report received in a pen event. We want
159 * to emit a normal (X, Y) position
160 */
161 if (! nd->found_contact_id) {
162 input_event(input, EV_ABS, ABS_X, nd->x);
163 input_event(input, EV_ABS, ABS_Y, nd->y);
164 input_event(input, EV_ABS, ABS_PRESSURE, value);
165 }
166 break;
167 case 0xff000002:
168 /*
169 * we receive this when the device is in multitouch
170 * mode. The first of the three values tagged with
171 * this usage tells if the contact point is real
172 * or a placeholder
173 */
174 if (!nd->reading_a_point || value != 1)
175 break;
176 /* emit a normal (X, Y) for the first point only */
177 if (nd->id == 0) {
178 input_event(input, EV_ABS, ABS_X, nd->x);
179 input_event(input, EV_ABS, ABS_Y, nd->y);
180 }
181 input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x);
182 input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y);
183 if (nd->w > nd->h) {
184 input_event(input, EV_ABS,
185 ABS_MT_ORIENTATION, 1);
186 input_event(input, EV_ABS,
187 ABS_MT_TOUCH_MAJOR, nd->w);
188 input_event(input, EV_ABS,
189 ABS_MT_TOUCH_MINOR, nd->h);
190 } else {
191 input_event(input, EV_ABS,
192 ABS_MT_ORIENTATION, 0);
193 input_event(input, EV_ABS,
194 ABS_MT_TOUCH_MAJOR, nd->h);
195 input_event(input, EV_ABS,
196 ABS_MT_TOUCH_MINOR, nd->w);
197 }
198 input_mt_sync(field->hidinput->input);
199 nd->reading_a_point = 0;
200 nd->found_contact_id = 0;
201 break;
202
203 default:
204 /* fallback to the generic hidinput handling */
205 return 0;
206 }
207 }
208
209 /* we have handled the hidinput part, now remains hiddev */
210 if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
211 hid->hiddev_hid_event(hid, field, usage, value);
212
213 return 1;
214}
215
216static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
217{
218 int ret;
219 struct ntrig_data *nd;
220
221 nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL);
222 if (!nd) {
223 dev_err(&hdev->dev, "cannot allocate N-Trig data\n");
224 return -ENOMEM;
225 }
226 nd->reading_a_point = 0;
227 nd->found_contact_id = 0;
228 hid_set_drvdata(hdev, nd);
229
230 ret = hid_parse(hdev);
231 if (!ret)
232 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
233
234 if (ret)
235 kfree (nd);
236 return ret;
237}
238
239static void ntrig_remove(struct hid_device *hdev)
240{
241 hid_hw_stop(hdev);
242 kfree(hid_get_drvdata(hdev));
243}
244
54static const struct hid_device_id ntrig_devices[] = { 245static const struct hid_device_id ntrig_devices[] = {
55 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN), 246 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN),
56 .driver_data = NTRIG_DUPLICATE_USAGES }, 247 .driver_data = NTRIG_DUPLICATE_USAGES },
@@ -58,11 +249,20 @@ static const struct hid_device_id ntrig_devices[] = {
58}; 249};
59MODULE_DEVICE_TABLE(hid, ntrig_devices); 250MODULE_DEVICE_TABLE(hid, ntrig_devices);
60 251
252static const struct hid_usage_id ntrig_grabbed_usages[] = {
253 { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
254 { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
255};
256
61static struct hid_driver ntrig_driver = { 257static struct hid_driver ntrig_driver = {
62 .name = "ntrig", 258 .name = "ntrig",
63 .id_table = ntrig_devices, 259 .id_table = ntrig_devices,
260 .probe = ntrig_probe,
261 .remove = ntrig_remove,
64 .input_mapping = ntrig_input_mapping, 262 .input_mapping = ntrig_input_mapping,
65 .input_mapped = ntrig_input_mapped, 263 .input_mapped = ntrig_input_mapped,
264 .usage_table = ntrig_grabbed_usages,
265 .event = ntrig_event,
66}; 266};
67 267
68static int ntrig_init(void) 268static int ntrig_init(void)
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
new file mode 100644
index 00000000000..eab169e5c37
--- /dev/null
+++ b/drivers/hid/hid-sjoy.c
@@ -0,0 +1,180 @@
1/*
2 * Force feedback support for SmartJoy PLUS PS2->USB adapter
3 *
4 * Copyright (c) 2009 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
5 *
6 * Based of hid-pl.c and hid-gaff.c
7 * Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com>
8 * Copyright (c) 2008 Lukasz Lubojanski <lukasz@lubojanski.info>
9 */
10
11/*
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27/* #define DEBUG */
28
29#include <linux/input.h>
30#include <linux/usb.h>
31#include <linux/hid.h>
32#include "hid-ids.h"
33
34#ifdef CONFIG_SMARTJOYPLUS_FF
35#include "usbhid/usbhid.h"
36
37struct sjoyff_device {
38 struct hid_report *report;
39};
40
41static int hid_sjoyff_play(struct input_dev *dev, void *data,
42 struct ff_effect *effect)
43{
44 struct hid_device *hid = input_get_drvdata(dev);
45 struct sjoyff_device *sjoyff = data;
46 u32 left, right;
47
48 left = effect->u.rumble.strong_magnitude;
49 right = effect->u.rumble.weak_magnitude;
50 dev_dbg(&dev->dev, "called with 0x%08x 0x%08x\n", left, right);
51
52 left = left * 0xff / 0xffff;
53 right = (right != 0); /* on/off only */
54
55 sjoyff->report->field[0]->value[1] = right;
56 sjoyff->report->field[0]->value[2] = left;
57 dev_dbg(&dev->dev, "running with 0x%02x 0x%02x\n", left, right);
58 usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT);
59
60 return 0;
61}
62
63static int sjoyff_init(struct hid_device *hid)
64{
65 struct sjoyff_device *sjoyff;
66 struct hid_report *report;
67 struct hid_input *hidinput = list_entry(hid->inputs.next,
68 struct hid_input, list);
69 struct list_head *report_list =
70 &hid->report_enum[HID_OUTPUT_REPORT].report_list;
71 struct list_head *report_ptr = report_list;
72 struct input_dev *dev;
73 int error;
74
75 if (list_empty(report_list)) {
76 dev_err(&hid->dev, "no output reports found\n");
77 return -ENODEV;
78 }
79
80 report_ptr = report_ptr->next;
81
82 if (report_ptr == report_list) {
83 dev_err(&hid->dev, "required output report is "
84 "missing\n");
85 return -ENODEV;
86 }
87
88 report = list_entry(report_ptr, struct hid_report, list);
89 if (report->maxfield < 1) {
90 dev_err(&hid->dev, "no fields in the report\n");
91 return -ENODEV;
92 }
93
94 if (report->field[0]->report_count < 3) {
95 dev_err(&hid->dev, "not enough values in the field\n");
96 return -ENODEV;
97 }
98
99 sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL);
100 if (!sjoyff)
101 return -ENOMEM;
102
103 dev = hidinput->input;
104
105 set_bit(FF_RUMBLE, dev->ffbit);
106
107 error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play);
108 if (error) {
109 kfree(sjoyff);
110 return error;
111 }
112
113 sjoyff->report = report;
114 sjoyff->report->field[0]->value[0] = 0x01;
115 sjoyff->report->field[0]->value[1] = 0x00;
116 sjoyff->report->field[0]->value[2] = 0x00;
117 usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT);
118
119 dev_info(&hid->dev,
120 "Force feedback for SmartJoy PLUS PS2/USB adapter\n");
121
122 return 0;
123}
124#else
125static inline int sjoyff_init(struct hid_device *hid)
126{
127 return 0;
128}
129#endif
130
131static int sjoy_probe(struct hid_device *hdev, const struct hid_device_id *id)
132{
133 int ret;
134
135 ret = hid_parse(hdev);
136 if (ret) {
137 dev_err(&hdev->dev, "parse failed\n");
138 goto err;
139 }
140
141 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
142 if (ret) {
143 dev_err(&hdev->dev, "hw start failed\n");
144 goto err;
145 }
146
147 sjoyff_init(hdev);
148
149 return 0;
150err:
151 return ret;
152}
153
154static const struct hid_device_id sjoy_devices[] = {
155 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
156 { }
157};
158MODULE_DEVICE_TABLE(hid, sjoy_devices);
159
160static struct hid_driver sjoy_driver = {
161 .name = "smartjoyplus",
162 .id_table = sjoy_devices,
163 .probe = sjoy_probe,
164};
165
166static int sjoy_init(void)
167{
168 return hid_register_driver(&sjoy_driver);
169}
170
171static void sjoy_exit(void)
172{
173 hid_unregister_driver(&sjoy_driver);
174}
175
176module_init(sjoy_init);
177module_exit(sjoy_exit);
178MODULE_LICENSE("GPL");
179MODULE_AUTHOR("Jussi Kivilinna");
180
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 7c1f7b50330..fcd6ccd02fe 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -33,11 +33,6 @@
33 33
34#include "hid-ids.h" 34#include "hid-ids.h"
35 35
36#include "usbhid/usbhid.h"
37
38/* Usages for thrustmaster devices I know about */
39#define THRUSTMASTER_USAGE_FF (HID_UP_GENDESK | 0xbb)
40
41static const signed short ff_rumble[] = { 36static const signed short ff_rumble[] = {
42 FF_RUMBLE, 37 FF_RUMBLE,
43 -1 38 -1
@@ -48,6 +43,12 @@ static const signed short ff_joystick[] = {
48 -1 43 -1
49}; 44};
50 45
46#ifdef CONFIG_THRUSTMASTER_FF
47#include "usbhid/usbhid.h"
48
49/* Usages for thrustmaster devices I know about */
50#define THRUSTMASTER_USAGE_FF (HID_UP_GENDESK | 0xbb)
51
51struct tmff_device { 52struct tmff_device {
52 struct hid_report *report; 53 struct hid_report *report;
53 struct hid_field *ff_field; 54 struct hid_field *ff_field;
@@ -209,6 +210,12 @@ fail:
209 kfree(tmff); 210 kfree(tmff);
210 return error; 211 return error;
211} 212}
213#else
214static inline int tmff_init(struct hid_device *hid, const signed short *ff_bits)
215{
216 return 0;
217}
218#endif
212 219
213static int tm_probe(struct hid_device *hdev, const struct hid_device_id *id) 220static int tm_probe(struct hid_device *hdev, const struct hid_device_id *id)
214{ 221{
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
new file mode 100644
index 00000000000..1f9237f511e
--- /dev/null
+++ b/drivers/hid/hid-wacom.c
@@ -0,0 +1,259 @@
1/*
2 * Bluetooth Wacom Tablet support
3 *
4 * Copyright (c) 1999 Andreas Gal
5 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
6 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
7 * Copyright (c) 2006-2007 Jiri Kosina
8 * Copyright (c) 2007 Paul Walmsley
9 * Copyright (c) 2008 Jiri Slaby <jirislaby@gmail.com>
10 * Copyright (c) 2006 Andrew Zabolotny <zap@homelink.ru>
11 * Copyright (c) 2009 Bastien Nocera <hadess@hadess.net>
12 */
13
14/*
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the Free
17 * Software Foundation; either version 2 of the License, or (at your option)
18 * any later version.
19 */
20
21#include <linux/device.h>
22#include <linux/hid.h>
23#include <linux/module.h>
24
25#include "hid-ids.h"
26
27struct wacom_data {
28 __u16 tool;
29 unsigned char butstate;
30};
31
32static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
33 u8 *raw_data, int size)
34{
35 struct wacom_data *wdata = hid_get_drvdata(hdev);
36 struct hid_input *hidinput;
37 struct input_dev *input;
38 unsigned char *data = (unsigned char *) raw_data;
39 int tool, x, y, rw;
40
41 if (!(hdev->claimed & HID_CLAIMED_INPUT))
42 return 0;
43
44 tool = 0;
45 hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
46 input = hidinput->input;
47
48 /* Check if this is a tablet report */
49 if (data[0] != 0x03)
50 return 0;
51
52 /* Get X & Y positions */
53 x = le16_to_cpu(*(__le16 *) &data[2]);
54 y = le16_to_cpu(*(__le16 *) &data[4]);
55
56 /* Get current tool identifier */
57 if (data[1] & 0x90) { /* If pen is in the in/active area */
58 switch ((data[1] >> 5) & 3) {
59 case 0: /* Pen */
60 tool = BTN_TOOL_PEN;
61 break;
62
63 case 1: /* Rubber */
64 tool = BTN_TOOL_RUBBER;
65 break;
66
67 case 2: /* Mouse with wheel */
68 case 3: /* Mouse without wheel */
69 tool = BTN_TOOL_MOUSE;
70 break;
71 }
72
73 /* Reset tool if out of active tablet area */
74 if (!(data[1] & 0x10))
75 tool = 0;
76 }
77
78 /* If tool changed, notify input subsystem */
79 if (wdata->tool != tool) {
80 if (wdata->tool) {
81 /* Completely reset old tool state */
82 if (wdata->tool == BTN_TOOL_MOUSE) {
83 input_report_key(input, BTN_LEFT, 0);
84 input_report_key(input, BTN_RIGHT, 0);
85 input_report_key(input, BTN_MIDDLE, 0);
86 input_report_abs(input, ABS_DISTANCE,
87 input->absmax[ABS_DISTANCE]);
88 } else {
89 input_report_key(input, BTN_TOUCH, 0);
90 input_report_key(input, BTN_STYLUS, 0);
91 input_report_key(input, BTN_STYLUS2, 0);
92 input_report_abs(input, ABS_PRESSURE, 0);
93 }
94 input_report_key(input, wdata->tool, 0);
95 input_sync(input);
96 }
97 wdata->tool = tool;
98 if (tool)
99 input_report_key(input, tool, 1);
100 }
101
102 if (tool) {
103 input_report_abs(input, ABS_X, x);
104 input_report_abs(input, ABS_Y, y);
105
106 switch ((data[1] >> 5) & 3) {
107 case 2: /* Mouse with wheel */
108 input_report_key(input, BTN_MIDDLE, data[1] & 0x04);
109 rw = (data[6] & 0x01) ? -1 :
110 (data[6] & 0x02) ? 1 : 0;
111 input_report_rel(input, REL_WHEEL, rw);
112 /* fall through */
113
114 case 3: /* Mouse without wheel */
115 input_report_key(input, BTN_LEFT, data[1] & 0x01);
116 input_report_key(input, BTN_RIGHT, data[1] & 0x02);
117 /* Compute distance between mouse and tablet */
118 rw = 44 - (data[6] >> 2);
119 if (rw < 0)
120 rw = 0;
121 else if (rw > 31)
122 rw = 31;
123 input_report_abs(input, ABS_DISTANCE, rw);
124 break;
125
126 default:
127 input_report_abs(input, ABS_PRESSURE,
128 data[6] | (((__u16) (data[1] & 0x08)) << 5));
129 input_report_key(input, BTN_TOUCH, data[1] & 0x01);
130 input_report_key(input, BTN_STYLUS, data[1] & 0x02);
131 input_report_key(input, BTN_STYLUS2, (tool == BTN_TOOL_PEN) && data[1] & 0x04);
132 break;
133 }
134
135 input_sync(input);
136 }
137
138 /* Report the state of the two buttons at the top of the tablet
139 * as two extra fingerpad keys (buttons 4 & 5). */
140 rw = data[7] & 0x03;
141 if (rw != wdata->butstate) {
142 wdata->butstate = rw;
143 input_report_key(input, BTN_0, rw & 0x02);
144 input_report_key(input, BTN_1, rw & 0x01);
145 input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
146 input_sync(input);
147 }
148
149 return 1;
150}
151
152static int wacom_probe(struct hid_device *hdev,
153 const struct hid_device_id *id)
154{
155 struct hid_input *hidinput;
156 struct input_dev *input;
157 struct wacom_data *wdata;
158 int ret;
159
160 wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
161 if (wdata == NULL) {
162 dev_err(&hdev->dev, "can't alloc wacom descriptor\n");
163 return -ENOMEM;
164 }
165
166 hid_set_drvdata(hdev, wdata);
167
168 ret = hid_parse(hdev);
169 if (ret) {
170 dev_err(&hdev->dev, "parse failed\n");
171 goto err_free;
172 }
173
174 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
175 if (ret) {
176 dev_err(&hdev->dev, "hw start failed\n");
177 goto err_free;
178 }
179
180 hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
181 input = hidinput->input;
182
183 /* Basics */
184 input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
185 input->absbit[0] |= BIT(ABS_X) | BIT(ABS_Y) |
186 BIT(ABS_PRESSURE) | BIT(ABS_DISTANCE);
187 input->relbit[0] |= BIT(REL_WHEEL);
188 set_bit(BTN_TOOL_PEN, input->keybit);
189 set_bit(BTN_TOUCH, input->keybit);
190 set_bit(BTN_STYLUS, input->keybit);
191 set_bit(BTN_STYLUS2, input->keybit);
192 set_bit(BTN_LEFT, input->keybit);
193 set_bit(BTN_RIGHT, input->keybit);
194 set_bit(BTN_MIDDLE, input->keybit);
195
196 /* Pad */
197 input->evbit[0] |= BIT(EV_MSC);
198 input->mscbit[0] |= BIT(MSC_SERIAL);
199
200 /* Distance, rubber and mouse */
201 input->absbit[0] |= BIT(ABS_DISTANCE);
202 set_bit(BTN_TOOL_RUBBER, input->keybit);
203 set_bit(BTN_TOOL_MOUSE, input->keybit);
204
205 input->absmax[ABS_PRESSURE] = 511;
206 input->absmax[ABS_DISTANCE] = 32;
207
208 input->absmax[ABS_X] = 16704;
209 input->absmax[ABS_Y] = 12064;
210 input->absfuzz[ABS_X] = 4;
211 input->absfuzz[ABS_Y] = 4;
212
213 return 0;
214err_free:
215 kfree(wdata);
216 return ret;
217}
218
219static void wacom_remove(struct hid_device *hdev)
220{
221 hid_hw_stop(hdev);
222 kfree(hid_get_drvdata(hdev));
223}
224
225static const struct hid_device_id wacom_devices[] = {
226 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
227
228 { }
229};
230MODULE_DEVICE_TABLE(hid, wacom_devices);
231
232static struct hid_driver wacom_driver = {
233 .name = "wacom",
234 .id_table = wacom_devices,
235 .probe = wacom_probe,
236 .remove = wacom_remove,
237 .raw_event = wacom_raw_event,
238};
239
240static int wacom_init(void)
241{
242 int ret;
243
244 ret = hid_register_driver(&wacom_driver);
245 if (ret)
246 printk(KERN_ERR "can't register wacom driver\n");
247 printk(KERN_ERR "wacom driver registered\n");
248 return ret;
249}
250
251static void wacom_exit(void)
252{
253 hid_unregister_driver(&wacom_driver);
254}
255
256module_init(wacom_init);
257module_exit(wacom_exit);
258MODULE_LICENSE("GPL");
259
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index 85a198a1853..57f710757bf 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -27,6 +27,7 @@
27 27
28#include "hid-ids.h" 28#include "hid-ids.h"
29 29
30#ifdef CONFIG_ZEROPLUS_FF
30#include "usbhid/usbhid.h" 31#include "usbhid/usbhid.h"
31 32
32struct zpff_device { 33struct zpff_device {
@@ -108,6 +109,12 @@ static int zpff_init(struct hid_device *hid)
108 109
109 return 0; 110 return 0;
110} 111}
112#else
113static inline int zpff_init(struct hid_device *hid)
114{
115 return 0;
116}
117#endif
111 118
112static int zp_probe(struct hid_device *hdev, const struct hid_device_id *id) 119static int zp_probe(struct hid_device *hdev, const struct hid_device_id *id)
113{ 120{
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 00ccf4b1985..0c6639ea03d 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -349,10 +349,7 @@ int hidraw_connect(struct hid_device *hid)
349 int minor, result; 349 int minor, result;
350 struct hidraw *dev; 350 struct hidraw *dev;
351 351
352 /* TODO currently we accept any HID device. This should later 352 /* we accept any HID device, no matter the applications */
353 * probably be fixed to accept only those devices which provide
354 * non-input applications
355 */
356 353
357 dev = kzalloc(sizeof(struct hidraw), GFP_KERNEL); 354 dev = kzalloc(sizeof(struct hidraw), GFP_KERNEL);
358 if (!dev) 355 if (!dev)
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ac8049b5f1e..76c4bbe9dcc 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1234,12 +1234,11 @@ static int hid_post_reset(struct usb_interface *intf)
1234 struct hid_device *hid = usb_get_intfdata(intf); 1234 struct hid_device *hid = usb_get_intfdata(intf);
1235 struct usbhid_device *usbhid = hid->driver_data; 1235 struct usbhid_device *usbhid = hid->driver_data;
1236 int status; 1236 int status;
1237 1237
1238 spin_lock_irq(&usbhid->lock); 1238 spin_lock_irq(&usbhid->lock);
1239 clear_bit(HID_RESET_PENDING, &usbhid->iofl); 1239 clear_bit(HID_RESET_PENDING, &usbhid->iofl);
1240 spin_unlock_irq(&usbhid->lock); 1240 spin_unlock_irq(&usbhid->lock);
1241 hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0); 1241 hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
1242 /* FIXME: Any more reinitialization needed? */
1243 status = hid_start_in(hid); 1242 status = hid_start_in(hid);
1244 if (status < 0) 1243 if (status < 0)
1245 hid_io_error(hid); 1244 hid_io_error(hid);
@@ -1251,14 +1250,14 @@ static int hid_post_reset(struct usb_interface *intf)
1251int usbhid_get_power(struct hid_device *hid) 1250int usbhid_get_power(struct hid_device *hid)
1252{ 1251{
1253 struct usbhid_device *usbhid = hid->driver_data; 1252 struct usbhid_device *usbhid = hid->driver_data;
1254 1253
1255 return usb_autopm_get_interface(usbhid->intf); 1254 return usb_autopm_get_interface(usbhid->intf);
1256} 1255}
1257 1256
1258void usbhid_put_power(struct hid_device *hid) 1257void usbhid_put_power(struct hid_device *hid)
1259{ 1258{
1260 struct usbhid_device *usbhid = hid->driver_data; 1259 struct usbhid_device *usbhid = hid->driver_data;
1261 1260
1262 usb_autopm_put_interface(usbhid->intf); 1261 usb_autopm_put_interface(usbhid->intf);
1263} 1262}
1264 1263
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index b5e3b285169..a1787fdf5b9 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -182,7 +182,7 @@ static struct platform_driver lm78_isa_driver = {
182 .name = "lm78", 182 .name = "lm78",
183 }, 183 },
184 .probe = lm78_isa_probe, 184 .probe = lm78_isa_probe,
185 .remove = lm78_isa_remove, 185 .remove = __devexit_p(lm78_isa_remove),
186}; 186};
187 187
188 188
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f1c6ca7e285..c8460fa9cfa 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -298,7 +298,7 @@ config I2C_BLACKFIN_TWI
298config I2C_BLACKFIN_TWI_CLK_KHZ 298config I2C_BLACKFIN_TWI_CLK_KHZ
299 int "Blackfin TWI I2C clock (kHz)" 299 int "Blackfin TWI I2C clock (kHz)"
300 depends on I2C_BLACKFIN_TWI 300 depends on I2C_BLACKFIN_TWI
301 range 10 400 301 range 21 400
302 default 50 302 default 50
303 help 303 help
304 The unit of the TWI clock is kHz. 304 The unit of the TWI clock is kHz.
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index fc548b3d002..26d8987e69b 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -104,9 +104,14 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
104 write_MASTER_CTL(iface, 104 write_MASTER_CTL(iface,
105 read_MASTER_CTL(iface) | STOP); 105 read_MASTER_CTL(iface) | STOP);
106 else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && 106 else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
107 iface->cur_msg+1 < iface->msg_num) 107 iface->cur_msg + 1 < iface->msg_num) {
108 write_MASTER_CTL(iface, 108 if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
109 read_MASTER_CTL(iface) | RSTART); 109 write_MASTER_CTL(iface,
110 read_MASTER_CTL(iface) | RSTART | MDIR);
111 else
112 write_MASTER_CTL(iface,
113 (read_MASTER_CTL(iface) | RSTART) & ~MDIR);
114 }
110 SSYNC(); 115 SSYNC();
111 /* Clear status */ 116 /* Clear status */
112 write_INT_STAT(iface, XMTSERV); 117 write_INT_STAT(iface, XMTSERV);
@@ -134,9 +139,13 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
134 read_MASTER_CTL(iface) | STOP); 139 read_MASTER_CTL(iface) | STOP);
135 SSYNC(); 140 SSYNC();
136 } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && 141 } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
137 iface->cur_msg+1 < iface->msg_num) { 142 iface->cur_msg + 1 < iface->msg_num) {
138 write_MASTER_CTL(iface, 143 if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
139 read_MASTER_CTL(iface) | RSTART); 144 write_MASTER_CTL(iface,
145 read_MASTER_CTL(iface) | RSTART | MDIR);
146 else
147 write_MASTER_CTL(iface,
148 (read_MASTER_CTL(iface) | RSTART) & ~MDIR);
140 SSYNC(); 149 SSYNC();
141 } 150 }
142 /* Clear interrupt source */ 151 /* Clear interrupt source */
@@ -196,8 +205,6 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
196 /* remove restart bit and enable master receive */ 205 /* remove restart bit and enable master receive */
197 write_MASTER_CTL(iface, 206 write_MASTER_CTL(iface,
198 read_MASTER_CTL(iface) & ~RSTART); 207 read_MASTER_CTL(iface) & ~RSTART);
199 write_MASTER_CTL(iface,
200 read_MASTER_CTL(iface) | MEN | MDIR);
201 SSYNC(); 208 SSYNC();
202 } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && 209 } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
203 iface->cur_msg+1 < iface->msg_num) { 210 iface->cur_msg+1 < iface->msg_num) {
@@ -222,18 +229,19 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface)
222 } 229 }
223 230
224 if (iface->pmsg[iface->cur_msg].len <= 255) 231 if (iface->pmsg[iface->cur_msg].len <= 255)
225 write_MASTER_CTL(iface, 232 write_MASTER_CTL(iface,
226 iface->pmsg[iface->cur_msg].len << 6); 233 (read_MASTER_CTL(iface) &
234 (~(0xff << 6))) |
235 (iface->pmsg[iface->cur_msg].len << 6));
227 else { 236 else {
228 write_MASTER_CTL(iface, 0xff << 6); 237 write_MASTER_CTL(iface,
238 (read_MASTER_CTL(iface) |
239 (0xff << 6)));
229 iface->manual_stop = 1; 240 iface->manual_stop = 1;
230 } 241 }
231 /* remove restart bit and enable master receive */ 242 /* remove restart bit and enable master receive */
232 write_MASTER_CTL(iface, 243 write_MASTER_CTL(iface,
233 read_MASTER_CTL(iface) & ~RSTART); 244 read_MASTER_CTL(iface) & ~RSTART);
234 write_MASTER_CTL(iface, read_MASTER_CTL(iface) |
235 MEN | ((iface->read_write == I2C_SMBUS_READ) ?
236 MDIR : 0));
237 SSYNC(); 245 SSYNC();
238 } else { 246 } else {
239 iface->result = 1; 247 iface->result = 1;
@@ -441,6 +449,16 @@ int bfin_twi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
441 } 449 }
442 iface->transPtr = data->block; 450 iface->transPtr = data->block;
443 break; 451 break;
452 case I2C_SMBUS_I2C_BLOCK_DATA:
453 if (read_write == I2C_SMBUS_READ) {
454 iface->readNum = data->block[0];
455 iface->cur_mode = TWI_I2C_MODE_COMBINED;
456 } else {
457 iface->writeNum = data->block[0];
458 iface->cur_mode = TWI_I2C_MODE_STANDARDSUB;
459 }
460 iface->transPtr = (u8 *)&data->block[1];
461 break;
444 default: 462 default:
445 return -1; 463 return -1;
446 } 464 }
@@ -564,7 +582,7 @@ static u32 bfin_twi_functionality(struct i2c_adapter *adap)
564 return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | 582 return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
565 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | 583 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
566 I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PROC_CALL | 584 I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PROC_CALL |
567 I2C_FUNC_I2C; 585 I2C_FUNC_I2C | I2C_FUNC_SMBUS_I2C_BLOCK;
568} 586}
569 587
570static struct i2c_algorithm bfin_twi_algorithm = { 588static struct i2c_algorithm bfin_twi_algorithm = {
@@ -614,6 +632,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
614 struct i2c_adapter *p_adap; 632 struct i2c_adapter *p_adap;
615 struct resource *res; 633 struct resource *res;
616 int rc; 634 int rc;
635 unsigned int clkhilow;
617 636
618 iface = kzalloc(sizeof(struct bfin_twi_iface), GFP_KERNEL); 637 iface = kzalloc(sizeof(struct bfin_twi_iface), GFP_KERNEL);
619 if (!iface) { 638 if (!iface) {
@@ -675,10 +694,14 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
675 /* Set TWI internal clock as 10MHz */ 694 /* Set TWI internal clock as 10MHz */
676 write_CONTROL(iface, ((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F); 695 write_CONTROL(iface, ((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F);
677 696
697 /*
698 * We will not end up with a CLKDIV=0 because no one will specify
699 * 20kHz SCL or less in Kconfig now. (5 * 1024 / 20 = 0x100)
700 */
701 clkhilow = 5 * 1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ;
702
678 /* Set Twi interface clock as specified */ 703 /* Set Twi interface clock as specified */
679 write_CLKDIV(iface, ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ) 704 write_CLKDIV(iface, (clkhilow << 8) | clkhilow);
680 << 8) | ((5*1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ)
681 & 0xFF));
682 705
683 /* Enable TWI */ 706 /* Enable TWI */
684 write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA); 707 write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index e5193bf7548..3542c6ba98f 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -216,6 +216,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
216 struct ocores_i2c_platform_data *pdata; 216 struct ocores_i2c_platform_data *pdata;
217 struct resource *res, *res2; 217 struct resource *res, *res2;
218 int ret; 218 int ret;
219 int i;
219 220
220 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 221 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
221 if (!res) 222 if (!res)
@@ -271,6 +272,10 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
271 goto add_adapter_failed; 272 goto add_adapter_failed;
272 } 273 }
273 274
275 /* add in known devices to the bus */
276 for (i = 0; i < pdata->num_devices; i++)
277 i2c_new_device(&i2c->adap, pdata->devices + i);
278
274 return 0; 279 return 0;
275 280
276add_adapter_failed: 281add_adapter_failed:
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index ece0125a1ee..c73475dd0fb 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -333,8 +333,18 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
333 333
334 if (cpu_is_omap2430() || cpu_is_omap34xx()) { 334 if (cpu_is_omap2430() || cpu_is_omap34xx()) {
335 335
336 /* HSI2C controller internal clk rate should be 19.2 Mhz */ 336 /*
337 internal_clk = 19200; 337 * HSI2C controller internal clk rate should be 19.2 Mhz for
338 * HS and for all modes on 2430. On 34xx we can use lower rate
339 * to get longer filter period for better noise suppression.
340 * The filter is iclk (fclk for HS) period.
341 */
342 if (dev->speed > 400 || cpu_is_omap_2430())
343 internal_clk = 19200;
344 else if (dev->speed > 100)
345 internal_clk = 9600;
346 else
347 internal_clk = 4000;
338 fclk_rate = clk_get_rate(dev->fclk) / 1000; 348 fclk_rate = clk_get_rate(dev->fclk) / 1000;
339 349
340 /* Compute prescaler divisor */ 350 /* Compute prescaler divisor */
@@ -343,17 +353,28 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
343 353
344 /* If configured for High Speed */ 354 /* If configured for High Speed */
345 if (dev->speed > 400) { 355 if (dev->speed > 400) {
356 unsigned long scl;
357
346 /* For first phase of HS mode */ 358 /* For first phase of HS mode */
347 fsscll = internal_clk / (400 * 2) - 6; 359 scl = internal_clk / 400;
348 fssclh = internal_clk / (400 * 2) - 6; 360 fsscll = scl - (scl / 3) - 7;
361 fssclh = (scl / 3) - 5;
349 362
350 /* For second phase of HS mode */ 363 /* For second phase of HS mode */
351 hsscll = fclk_rate / (dev->speed * 2) - 6; 364 scl = fclk_rate / dev->speed;
352 hssclh = fclk_rate / (dev->speed * 2) - 6; 365 hsscll = scl - (scl / 3) - 7;
366 hssclh = (scl / 3) - 5;
367 } else if (dev->speed > 100) {
368 unsigned long scl;
369
370 /* Fast mode */
371 scl = internal_clk / dev->speed;
372 fsscll = scl - (scl / 3) - 7;
373 fssclh = (scl / 3) - 5;
353 } else { 374 } else {
354 /* To handle F/S modes */ 375 /* Standard mode */
355 fsscll = internal_clk / (dev->speed * 2) - 6; 376 fsscll = internal_clk / (dev->speed * 2) - 7;
356 fssclh = internal_clk / (dev->speed * 2) - 6; 377 fssclh = internal_clk / (dev->speed * 2) - 5;
357 } 378 }
358 scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll; 379 scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll;
359 sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh; 380 sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index acc7143d965..035a6c7e59d 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -34,10 +34,24 @@
34#include <linux/err.h> 34#include <linux/err.h>
35#include <linux/clk.h> 35#include <linux/clk.h>
36 36
37#include <mach/hardware.h>
38#include <asm/irq.h> 37#include <asm/irq.h>
39#include <asm/io.h> 38#include <asm/io.h>
40#include <mach/i2c.h> 39#include <plat/i2c.h>
40
41/*
42 * I2C register offsets will be shifted 0 or 1 bit left, depending on
43 * different SoCs
44 */
45#define REG_SHIFT_0 (0 << 0)
46#define REG_SHIFT_1 (1 << 0)
47#define REG_SHIFT(d) ((d) & 0x1)
48
49static const struct platform_device_id i2c_pxa_id_table[] = {
50 { "pxa2xx-i2c", REG_SHIFT_1 },
51 { "pxa3xx-pwri2c", REG_SHIFT_0 },
52 { },
53};
54MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
41 55
42/* 56/*
43 * I2C registers and bit definitions 57 * I2C registers and bit definitions
@@ -985,6 +999,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
985 struct pxa_i2c *i2c; 999 struct pxa_i2c *i2c;
986 struct resource *res; 1000 struct resource *res;
987 struct i2c_pxa_platform_data *plat = dev->dev.platform_data; 1001 struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
1002 struct platform_device_id *id = platform_get_device_id(dev);
988 int ret; 1003 int ret;
989 int irq; 1004 int irq;
990 1005
@@ -1028,7 +1043,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
1028 ret = -EIO; 1043 ret = -EIO;
1029 goto eremap; 1044 goto eremap;
1030 } 1045 }
1031 i2c->reg_shift = (cpu_is_pxa3xx() && (dev->id == 1)) ? 0 : 1; 1046 i2c->reg_shift = REG_SHIFT(id->driver_data);
1032 1047
1033 i2c->iobase = res->start; 1048 i2c->iobase = res->start;
1034 i2c->iosize = res_len(res); 1049 i2c->iosize = res_len(res);
@@ -1150,6 +1165,7 @@ static struct platform_driver i2c_pxa_driver = {
1150 .name = "pxa2xx-i2c", 1165 .name = "pxa2xx-i2c",
1151 .owner = THIS_MODULE, 1166 .owner = THIS_MODULE,
1152 }, 1167 },
1168 .id_table = i2c_pxa_id_table,
1153}; 1169};
1154 1170
1155static int __init i2c_adap_pxa_init(void) 1171static int __init i2c_adap_pxa_init(void)
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 1691ef0f1ee..079a312d36f 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -51,6 +51,11 @@ enum s3c24xx_i2c_state {
51 STATE_STOP 51 STATE_STOP
52}; 52};
53 53
54enum s3c24xx_i2c_type {
55 TYPE_S3C2410,
56 TYPE_S3C2440,
57};
58
54struct s3c24xx_i2c { 59struct s3c24xx_i2c {
55 spinlock_t lock; 60 spinlock_t lock;
56 wait_queue_head_t wait; 61 wait_queue_head_t wait;
@@ -88,8 +93,10 @@ struct s3c24xx_i2c {
88static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c) 93static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c)
89{ 94{
90 struct platform_device *pdev = to_platform_device(i2c->dev); 95 struct platform_device *pdev = to_platform_device(i2c->dev);
96 enum s3c24xx_i2c_type type;
91 97
92 return !strcmp(pdev->name, "s3c2440-i2c"); 98 type = platform_get_device_id(pdev)->driver_data;
99 return type == TYPE_S3C2440;
93} 100}
94 101
95/* s3c24xx_i2c_master_complete 102/* s3c24xx_i2c_master_complete
@@ -969,52 +976,41 @@ static int s3c24xx_i2c_resume(struct platform_device *dev)
969 976
970/* device driver for platform bus bits */ 977/* device driver for platform bus bits */
971 978
972static struct platform_driver s3c2410_i2c_driver = { 979static struct platform_device_id s3c24xx_driver_ids[] = {
973 .probe = s3c24xx_i2c_probe, 980 {
974 .remove = s3c24xx_i2c_remove, 981 .name = "s3c2410-i2c",
975 .suspend_late = s3c24xx_i2c_suspend_late, 982 .driver_data = TYPE_S3C2410,
976 .resume = s3c24xx_i2c_resume, 983 }, {
977 .driver = { 984 .name = "s3c2440-i2c",
978 .owner = THIS_MODULE, 985 .driver_data = TYPE_S3C2440,
979 .name = "s3c2410-i2c", 986 }, { },
980 },
981}; 987};
988MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
982 989
983static struct platform_driver s3c2440_i2c_driver = { 990static struct platform_driver s3c24xx_i2c_driver = {
984 .probe = s3c24xx_i2c_probe, 991 .probe = s3c24xx_i2c_probe,
985 .remove = s3c24xx_i2c_remove, 992 .remove = s3c24xx_i2c_remove,
986 .suspend_late = s3c24xx_i2c_suspend_late, 993 .suspend_late = s3c24xx_i2c_suspend_late,
987 .resume = s3c24xx_i2c_resume, 994 .resume = s3c24xx_i2c_resume,
995 .id_table = s3c24xx_driver_ids,
988 .driver = { 996 .driver = {
989 .owner = THIS_MODULE, 997 .owner = THIS_MODULE,
990 .name = "s3c2440-i2c", 998 .name = "s3c-i2c",
991 }, 999 },
992}; 1000};
993 1001
994static int __init i2c_adap_s3c_init(void) 1002static int __init i2c_adap_s3c_init(void)
995{ 1003{
996 int ret; 1004 return platform_driver_register(&s3c24xx_i2c_driver);
997
998 ret = platform_driver_register(&s3c2410_i2c_driver);
999 if (ret == 0) {
1000 ret = platform_driver_register(&s3c2440_i2c_driver);
1001 if (ret)
1002 platform_driver_unregister(&s3c2410_i2c_driver);
1003 }
1004
1005 return ret;
1006} 1005}
1007subsys_initcall(i2c_adap_s3c_init); 1006subsys_initcall(i2c_adap_s3c_init);
1008 1007
1009static void __exit i2c_adap_s3c_exit(void) 1008static void __exit i2c_adap_s3c_exit(void)
1010{ 1009{
1011 platform_driver_unregister(&s3c2410_i2c_driver); 1010 platform_driver_unregister(&s3c24xx_i2c_driver);
1012 platform_driver_unregister(&s3c2440_i2c_driver);
1013} 1011}
1014module_exit(i2c_adap_s3c_exit); 1012module_exit(i2c_adap_s3c_exit);
1015 1013
1016MODULE_DESCRIPTION("S3C24XX I2C Bus driver"); 1014MODULE_DESCRIPTION("S3C24XX I2C Bus driver");
1017MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); 1015MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
1018MODULE_LICENSE("GPL"); 1016MODULE_LICENSE("GPL");
1019MODULE_ALIAS("platform:s3c2410-i2c");
1020MODULE_ALIAS("platform:s3c2440-i2c");
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index baa28b73ae4..b9680f50f54 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -396,7 +396,7 @@ static int __devinit calc_CCR(unsigned long scl_hz)
396 signed char cdf, cdfm; 396 signed char cdf, cdfm;
397 int scgd, scgdm, scgds; 397 int scgd, scgdm, scgds;
398 398
399 mclk = clk_get(NULL, "module_clk"); 399 mclk = clk_get(NULL, "peripheral_clk");
400 if (IS_ERR(mclk)) { 400 if (IS_ERR(mclk)) {
401 return PTR_ERR(mclk); 401 return PTR_ERR(mclk);
402 } else { 402 } else {
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index cf06494bb74..9a5d0aaac9d 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -46,7 +46,7 @@ menuconfig IDE
46 SMART parameters from disk drives. 46 SMART parameters from disk drives.
47 47
48 To compile this driver as a module, choose M here: the 48 To compile this driver as a module, choose M here: the
49 module will be called ide-core.ko. 49 module will be called ide-core.
50 50
51 For further information, please read <file:Documentation/ide/ide.txt>. 51 For further information, please read <file:Documentation/ide/ide.txt>.
52 52
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index 537da1cde16..e59b6dee9ae 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -402,27 +402,23 @@ static u8 ali_cable_detect(ide_hwif_t *hwif)
402 return cbl; 402 return cbl;
403} 403}
404 404
405#if !defined(CONFIG_SPARC64) && !defined(CONFIG_PPC) 405#ifndef CONFIG_SPARC64
406/** 406/**
407 * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff 407 * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff
408 * @hwif: interface to configure 408 * @hwif: interface to configure
409 * 409 *
410 * Obtain the IRQ tables for an ALi based IDE solution on the PC 410 * Obtain the IRQ tables for an ALi based IDE solution on the PC
411 * class platforms. This part of the code isn't applicable to the 411 * class platforms. This part of the code isn't applicable to the
412 * Sparc and PowerPC systems. 412 * Sparc systems.
413 */ 413 */
414 414
415static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif) 415static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
416{ 416{
417 struct pci_dev *dev = to_pci_dev(hwif->dev);
418 u8 ideic, inmir; 417 u8 ideic, inmir;
419 s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6, 418 s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6,
420 1, 11, 0, 12, 0, 14, 0, 15 }; 419 1, 11, 0, 12, 0, 14, 0, 15 };
421 int irq = -1; 420 int irq = -1;
422 421
423 if (dev->device == PCI_DEVICE_ID_AL_M5229)
424 hwif->irq = hwif->channel ? 15 : 14;
425
426 if (isa_dev) { 422 if (isa_dev) {
427 /* 423 /*
428 * read IDE interface control 424 * read IDE interface control
@@ -455,7 +451,7 @@ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
455} 451}
456#else 452#else
457#define init_hwif_ali15x3 NULL 453#define init_hwif_ali15x3 NULL
458#endif /* !defined(CONFIG_SPARC64) && !defined(CONFIG_PPC) */ 454#endif /* CONFIG_SPARC64 */
459 455
460/** 456/**
461 * init_dma_ali15x3 - set up DMA on ALi15x3 457 * init_dma_ali15x3 - set up DMA on ALi15x3
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
index 403d0e4265d..fc0949a8cfd 100644
--- a/drivers/ide/at91_ide.c
+++ b/drivers/ide/at91_ide.c
@@ -216,6 +216,7 @@ static const struct ide_port_info at91_ide_port_info __initdata = {
216 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE | 216 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE |
217 IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS, 217 IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS,
218 .pio_mask = ATA_PIO6, 218 .pio_mask = ATA_PIO6,
219 .chipset = ide_generic,
219}; 220};
220 221
221/* 222/*
@@ -246,8 +247,7 @@ irqreturn_t at91_irq_handler(int irq, void *dev_id)
246static int __init at91_ide_probe(struct platform_device *pdev) 247static int __init at91_ide_probe(struct platform_device *pdev)
247{ 248{
248 int ret; 249 int ret;
249 hw_regs_t hw; 250 struct ide_hw hw, *hws[] = { &hw };
250 hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
251 struct ide_host *host; 251 struct ide_host *host;
252 struct resource *res; 252 struct resource *res;
253 unsigned long tf_base = 0, ctl_base = 0; 253 unsigned long tf_base = 0, ctl_base = 0;
@@ -304,10 +304,9 @@ static int __init at91_ide_probe(struct platform_device *pdev)
304 ide_std_init_ports(&hw, tf_base, ctl_base + 6); 304 ide_std_init_ports(&hw, tf_base, ctl_base + 6);
305 305
306 hw.irq = board->irq_pin; 306 hw.irq = board->irq_pin;
307 hw.chipset = ide_generic;
308 hw.dev = &pdev->dev; 307 hw.dev = &pdev->dev;
309 308
310 host = ide_host_alloc(&at91_ide_port_info, hws); 309 host = ide_host_alloc(&at91_ide_port_info, hws, 1);
311 if (!host) { 310 if (!host) {
312 perr("failed to allocate ide host\n"); 311 perr("failed to allocate ide host\n");
313 return -ENOMEM; 312 return -ENOMEM;
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 46013644c96..58121bd6c11 100644
--- a/drivers/ide/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
@@ -449,7 +449,7 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
449} 449}
450#endif 450#endif
451 451
452static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) 452static void auide_setup_ports(struct ide_hw *hw, _auide_hwif *ahwif)
453{ 453{
454 int i; 454 int i;
455 unsigned long *ata_regs = hw->io_ports_array; 455 unsigned long *ata_regs = hw->io_ports_array;
@@ -499,6 +499,7 @@ static const struct ide_port_info au1xxx_port_info = {
499#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 499#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
500 .mwdma_mask = ATA_MWDMA2, 500 .mwdma_mask = ATA_MWDMA2,
501#endif 501#endif
502 .chipset = ide_au1xxx,
502}; 503};
503 504
504static int au_ide_probe(struct platform_device *dev) 505static int au_ide_probe(struct platform_device *dev)
@@ -507,7 +508,7 @@ static int au_ide_probe(struct platform_device *dev)
507 struct resource *res; 508 struct resource *res;
508 struct ide_host *host; 509 struct ide_host *host;
509 int ret = 0; 510 int ret = 0;
510 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 511 struct ide_hw hw, *hws[] = { &hw };
511 512
512#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 513#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
513 char *mode = "MWDMA2"; 514 char *mode = "MWDMA2";
@@ -548,9 +549,8 @@ static int au_ide_probe(struct platform_device *dev)
548 auide_setup_ports(&hw, ahwif); 549 auide_setup_ports(&hw, ahwif);
549 hw.irq = ahwif->irq; 550 hw.irq = ahwif->irq;
550 hw.dev = &dev->dev; 551 hw.dev = &dev->dev;
551 hw.chipset = ide_au1xxx;
552 552
553 ret = ide_host_add(&au1xxx_port_info, hws, &host); 553 ret = ide_host_add(&au1xxx_port_info, hws, 1, &host);
554 if (ret) 554 if (ret)
555 goto out; 555 goto out;
556 556
diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c
index d028f8864bc..e3c6a591330 100644
--- a/drivers/ide/buddha.c
+++ b/drivers/ide/buddha.c
@@ -121,7 +121,7 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
121 return 1; 121 return 1;
122} 122}
123 123
124static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base, 124static void __init buddha_setup_ports(struct ide_hw *hw, unsigned long base,
125 unsigned long ctl, unsigned long irq_port, 125 unsigned long ctl, unsigned long irq_port,
126 ide_ack_intr_t *ack_intr) 126 ide_ack_intr_t *ack_intr)
127{ 127{
@@ -139,13 +139,12 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
139 139
140 hw->irq = IRQ_AMIGA_PORTS; 140 hw->irq = IRQ_AMIGA_PORTS;
141 hw->ack_intr = ack_intr; 141 hw->ack_intr = ack_intr;
142
143 hw->chipset = ide_generic;
144} 142}
145 143
146static const struct ide_port_info buddha_port_info = { 144static const struct ide_port_info buddha_port_info = {
147 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, 145 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
148 .irq_flags = IRQF_SHARED, 146 .irq_flags = IRQF_SHARED,
147 .chipset = ide_generic,
149}; 148};
150 149
151 /* 150 /*
@@ -161,7 +160,7 @@ static int __init buddha_init(void)
161 160
162 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { 161 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
163 unsigned long board; 162 unsigned long board;
164 hw_regs_t hw[MAX_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; 163 struct ide_hw hw[MAX_NUM_HWIFS], *hws[MAX_NUM_HWIFS];
165 164
166 if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { 165 if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
167 buddha_num_hwifs = BUDDHA_NUM_HWIFS; 166 buddha_num_hwifs = BUDDHA_NUM_HWIFS;
@@ -225,7 +224,7 @@ fail_base2:
225 hws[i] = &hw[i]; 224 hws[i] = &hw[i];
226 } 225 }
227 226
228 ide_host_add(&buddha_port_info, hws, NULL); 227 ide_host_add(&buddha_port_info, hws, i, NULL);
229 } 228 }
230 229
231 return 0; 230 return 0;
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index 8890276fef7..1683ed5c732 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -708,7 +708,7 @@ static int __init cmd640x_init(void)
708 int second_port_cmd640 = 0, rc; 708 int second_port_cmd640 = 0, rc;
709 const char *bus_type, *port2; 709 const char *bus_type, *port2;
710 u8 b, cfr; 710 u8 b, cfr;
711 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL }; 711 struct ide_hw hw[2], *hws[2];
712 712
713 if (cmd640_vlb && probe_for_cmd640_vlb()) { 713 if (cmd640_vlb && probe_for_cmd640_vlb()) {
714 bus_type = "VLB"; 714 bus_type = "VLB";
@@ -762,11 +762,9 @@ static int __init cmd640x_init(void)
762 762
763 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6); 763 ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
764 hw[0].irq = 14; 764 hw[0].irq = 14;
765 hw[0].chipset = ide_cmd640;
766 765
767 ide_std_init_ports(&hw[1], 0x170, 0x376); 766 ide_std_init_ports(&hw[1], 0x170, 0x376);
768 hw[1].irq = 15; 767 hw[1].irq = 15;
769 hw[1].chipset = ide_cmd640;
770 768
771 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" 769 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
772 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); 770 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
@@ -824,7 +822,8 @@ static int __init cmd640x_init(void)
824 cmd640_dump_regs(); 822 cmd640_dump_regs();
825#endif 823#endif
826 824
827 return ide_host_add(&cmd640_port_info, hws, NULL); 825 return ide_host_add(&cmd640_port_info, hws, second_port_cmd640 ? 2 : 1,
826 NULL);
828} 827}
829 828
830module_param_named(probe_vlb, cmd640_vlb, bool, 0); 829module_param_named(probe_vlb, cmd640_vlb, bool, 0);
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index 87987a7d36c..bd066bb9d61 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -110,7 +110,7 @@ static const struct ide_port_info cyrix_chipset __devinitdata = {
110static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) 110static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
111{ 111{
112 const struct ide_port_info *d = &cyrix_chipset; 112 const struct ide_port_info *d = &cyrix_chipset;
113 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; 113 struct ide_hw hw[2], *hws[] = { NULL, NULL };
114 114
115 ide_setup_pci_noise(dev, d); 115 ide_setup_pci_noise(dev, d);
116 116
@@ -136,7 +136,7 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
136 ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); 136 ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
137 hw[0].irq = 14; 137 hw[0].irq = 14;
138 138
139 return ide_host_add(d, hws, NULL); 139 return ide_host_add(d, hws, 2, NULL);
140} 140}
141 141
142static const struct pci_device_id cs5520_pci_tbl[] = { 142static const struct pci_device_id cs5520_pci_tbl[] = {
diff --git a/drivers/ide/delkin_cb.c b/drivers/ide/delkin_cb.c
index f153b95619b..1e10eba62ce 100644
--- a/drivers/ide/delkin_cb.c
+++ b/drivers/ide/delkin_cb.c
@@ -68,6 +68,7 @@ static const struct ide_port_info delkin_cb_port_info = {
68 IDE_HFLAG_NO_DMA, 68 IDE_HFLAG_NO_DMA,
69 .irq_flags = IRQF_SHARED, 69 .irq_flags = IRQF_SHARED,
70 .init_chipset = delkin_cb_init_chipset, 70 .init_chipset = delkin_cb_init_chipset,
71 .chipset = ide_pci,
71}; 72};
72 73
73static int __devinit 74static int __devinit
@@ -76,7 +77,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
76 struct ide_host *host; 77 struct ide_host *host;
77 unsigned long base; 78 unsigned long base;
78 int rc; 79 int rc;
79 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 80 struct ide_hw hw, *hws[] = { &hw };
80 81
81 rc = pci_enable_device(dev); 82 rc = pci_enable_device(dev);
82 if (rc) { 83 if (rc) {
@@ -97,9 +98,8 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
97 ide_std_init_ports(&hw, base + 0x10, base + 0x1e); 98 ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
98 hw.irq = dev->irq; 99 hw.irq = dev->irq;
99 hw.dev = &dev->dev; 100 hw.dev = &dev->dev;
100 hw.chipset = ide_pci; /* this enables IRQ sharing */
101 101
102 rc = ide_host_add(&delkin_cb_port_info, hws, &host); 102 rc = ide_host_add(&delkin_cb_port_info, hws, 1, &host);
103 if (rc) 103 if (rc)
104 goto out_disable; 104 goto out_disable;
105 105
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
index 0e2df6755ec..22fa27389c3 100644
--- a/drivers/ide/falconide.c
+++ b/drivers/ide/falconide.c
@@ -111,9 +111,10 @@ static const struct ide_port_info falconide_port_info = {
111 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | 111 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE |
112 IDE_HFLAG_NO_DMA, 112 IDE_HFLAG_NO_DMA,
113 .irq_flags = IRQF_SHARED, 113 .irq_flags = IRQF_SHARED,
114 .chipset = ide_generic,
114}; 115};
115 116
116static void __init falconide_setup_ports(hw_regs_t *hw) 117static void __init falconide_setup_ports(struct ide_hw *hw)
117{ 118{
118 int i; 119 int i;
119 120
@@ -128,8 +129,6 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
128 129
129 hw->irq = IRQ_MFP_IDE; 130 hw->irq = IRQ_MFP_IDE;
130 hw->ack_intr = NULL; 131 hw->ack_intr = NULL;
131
132 hw->chipset = ide_generic;
133} 132}
134 133
135 /* 134 /*
@@ -139,7 +138,7 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
139static int __init falconide_init(void) 138static int __init falconide_init(void)
140{ 139{
141 struct ide_host *host; 140 struct ide_host *host;
142 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 141 struct ide_hw hw, *hws[] = { &hw };
143 int rc; 142 int rc;
144 143
145 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) 144 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
@@ -154,7 +153,7 @@ static int __init falconide_init(void)
154 153
155 falconide_setup_ports(&hw); 154 falconide_setup_ports(&hw);
156 155
157 host = ide_host_alloc(&falconide_port_info, hws); 156 host = ide_host_alloc(&falconide_port_info, hws, 1);
158 if (host == NULL) { 157 if (host == NULL) {
159 rc = -ENOMEM; 158 rc = -ENOMEM;
160 goto err; 159 goto err;
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index c7119516c5a..4451a6a5dfe 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -88,7 +88,7 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
88 return 1; 88 return 1;
89} 89}
90 90
91static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base, 91static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base,
92 unsigned long ctl, unsigned long irq_port, 92 unsigned long ctl, unsigned long irq_port,
93 ide_ack_intr_t *ack_intr) 93 ide_ack_intr_t *ack_intr)
94{ 94{
@@ -106,14 +106,13 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
106 106
107 hw->irq = IRQ_AMIGA_PORTS; 107 hw->irq = IRQ_AMIGA_PORTS;
108 hw->ack_intr = ack_intr; 108 hw->ack_intr = ack_intr;
109
110 hw->chipset = ide_generic;
111} 109}
112 110
113static const struct ide_port_info gayle_port_info = { 111static const struct ide_port_info gayle_port_info = {
114 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | 112 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE |
115 IDE_HFLAG_NO_DMA, 113 IDE_HFLAG_NO_DMA,
116 .irq_flags = IRQF_SHARED, 114 .irq_flags = IRQF_SHARED,
115 .chipset = ide_generic,
117}; 116};
118 117
119 /* 118 /*
@@ -126,7 +125,7 @@ static int __init gayle_init(void)
126 unsigned long base, ctrlport, irqport; 125 unsigned long base, ctrlport, irqport;
127 ide_ack_intr_t *ack_intr; 126 ide_ack_intr_t *ack_intr;
128 int a4000, i, rc; 127 int a4000, i, rc;
129 hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; 128 struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
130 129
131 if (!MACH_IS_AMIGA) 130 if (!MACH_IS_AMIGA)
132 return -ENODEV; 131 return -ENODEV;
@@ -171,7 +170,7 @@ found:
171 hws[i] = &hw[i]; 170 hws[i] = &hw[i];
172 } 171 }
173 172
174 rc = ide_host_add(&gayle_port_info, hws, NULL); 173 rc = ide_host_add(&gayle_port_info, hws, i, NULL);
175 if (rc) 174 if (rc)
176 release_mem_region(res_start, res_n); 175 release_mem_region(res_start, res_n);
177 176
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 0feb66c720e..7ce68ef6b90 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -138,14 +138,6 @@
138#undef HPT_RESET_STATE_ENGINE 138#undef HPT_RESET_STATE_ENGINE
139#undef HPT_DELAY_INTERRUPT 139#undef HPT_DELAY_INTERRUPT
140 140
141static const char *quirk_drives[] = {
142 "QUANTUM FIREBALLlct08 08",
143 "QUANTUM FIREBALLP KA6.4",
144 "QUANTUM FIREBALLP LM20.4",
145 "QUANTUM FIREBALLP LM20.5",
146 NULL
147};
148
149static const char *bad_ata100_5[] = { 141static const char *bad_ata100_5[] = {
150 "IBM-DTLA-307075", 142 "IBM-DTLA-307075",
151 "IBM-DTLA-307060", 143 "IBM-DTLA-307060",
@@ -729,27 +721,13 @@ static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
729 hpt3xx_set_mode(drive, XFER_PIO_0 + pio); 721 hpt3xx_set_mode(drive, XFER_PIO_0 + pio);
730} 722}
731 723
732static void hpt3xx_quirkproc(ide_drive_t *drive)
733{
734 char *m = (char *)&drive->id[ATA_ID_PROD];
735 const char **list = quirk_drives;
736
737 while (*list)
738 if (strstr(m, *list++)) {
739 drive->quirk_list = 1;
740 return;
741 }
742
743 drive->quirk_list = 0;
744}
745
746static void hpt3xx_maskproc(ide_drive_t *drive, int mask) 724static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
747{ 725{
748 ide_hwif_t *hwif = drive->hwif; 726 ide_hwif_t *hwif = drive->hwif;
749 struct pci_dev *dev = to_pci_dev(hwif->dev); 727 struct pci_dev *dev = to_pci_dev(hwif->dev);
750 struct hpt_info *info = hpt3xx_get_info(hwif->dev); 728 struct hpt_info *info = hpt3xx_get_info(hwif->dev);
751 729
752 if (drive->quirk_list == 0) 730 if ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
753 return; 731 return;
754 732
755 if (info->chip_type >= HPT370) { 733 if (info->chip_type >= HPT370) {
@@ -1404,7 +1382,6 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
1404static const struct ide_port_ops hpt3xx_port_ops = { 1382static const struct ide_port_ops hpt3xx_port_ops = {
1405 .set_pio_mode = hpt3xx_set_pio_mode, 1383 .set_pio_mode = hpt3xx_set_pio_mode,
1406 .set_dma_mode = hpt3xx_set_mode, 1384 .set_dma_mode = hpt3xx_set_mode,
1407 .quirkproc = hpt3xx_quirkproc,
1408 .maskproc = hpt3xx_maskproc, 1385 .maskproc = hpt3xx_maskproc,
1409 .mdma_filter = hpt3xx_mdma_filter, 1386 .mdma_filter = hpt3xx_mdma_filter,
1410 .udma_filter = hpt3xx_udma_filter, 1387 .udma_filter = hpt3xx_udma_filter,
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 36da913cc55..5af3d0ffaf0 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -65,8 +65,6 @@ static struct cardinfo icside_cardinfo_v6_2 = {
65}; 65};
66 66
67struct icside_state { 67struct icside_state {
68 unsigned int channel;
69 unsigned int enabled;
70 void __iomem *irq_port; 68 void __iomem *irq_port;
71 void __iomem *ioc_base; 69 void __iomem *ioc_base;
72 unsigned int sel; 70 unsigned int sel;
@@ -116,18 +114,11 @@ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
116 struct icside_state *state = ec->irq_data; 114 struct icside_state *state = ec->irq_data;
117 void __iomem *base = state->irq_port; 115 void __iomem *base = state->irq_port;
118 116
119 state->enabled = 1; 117 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
118 readb(base + ICS_ARCIN_V6_INTROFFSET_2);
120 119
121 switch (state->channel) { 120 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
122 case 0: 121 readb(base + ICS_ARCIN_V6_INTROFFSET_1);
123 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
124 readb(base + ICS_ARCIN_V6_INTROFFSET_2);
125 break;
126 case 1:
127 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
128 readb(base + ICS_ARCIN_V6_INTROFFSET_1);
129 break;
130 }
131} 122}
132 123
133/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) 124/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
@@ -137,8 +128,6 @@ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
137{ 128{
138 struct icside_state *state = ec->irq_data; 129 struct icside_state *state = ec->irq_data;
139 130
140 state->enabled = 0;
141
142 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); 131 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
143 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); 132 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
144} 133}
@@ -160,44 +149,6 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
160 .irqpending = icside_irqpending_arcin_v6, 149 .irqpending = icside_irqpending_arcin_v6,
161}; 150};
162 151
163/*
164 * Handle routing of interrupts. This is called before
165 * we write the command to the drive.
166 */
167static void icside_maskproc(ide_drive_t *drive, int mask)
168{
169 ide_hwif_t *hwif = drive->hwif;
170 struct expansion_card *ec = ECARD_DEV(hwif->dev);
171 struct icside_state *state = ecard_get_drvdata(ec);
172 unsigned long flags;
173
174 local_irq_save(flags);
175
176 state->channel = hwif->channel;
177
178 if (state->enabled && !mask) {
179 switch (hwif->channel) {
180 case 0:
181 writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
182 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
183 break;
184 case 1:
185 writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
186 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
187 break;
188 }
189 } else {
190 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
191 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
192 }
193
194 local_irq_restore(flags);
195}
196
197static const struct ide_port_ops icside_v6_no_dma_port_ops = {
198 .maskproc = icside_maskproc,
199};
200
201#ifdef CONFIG_BLK_DEV_IDEDMA_ICS 152#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
202/* 153/*
203 * SG-DMA support. 154 * SG-DMA support.
@@ -275,7 +226,6 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
275 226
276static const struct ide_port_ops icside_v6_port_ops = { 227static const struct ide_port_ops icside_v6_port_ops = {
277 .set_dma_mode = icside_set_dma_mode, 228 .set_dma_mode = icside_set_dma_mode,
278 .maskproc = icside_maskproc,
279}; 229};
280 230
281static void icside_dma_host_set(ide_drive_t *drive, int on) 231static void icside_dma_host_set(ide_drive_t *drive, int on)
@@ -320,11 +270,6 @@ static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
320 BUG_ON(dma_channel_active(ec->dma)); 270 BUG_ON(dma_channel_active(ec->dma));
321 271
322 /* 272 /*
323 * Ensure that we have the right interrupt routed.
324 */
325 icside_maskproc(drive, 0);
326
327 /*
328 * Route the DMA signals to the correct interface. 273 * Route the DMA signals to the correct interface.
329 */ 274 */
330 writeb(state->sel | hwif->channel, state->ioc_base); 275 writeb(state->sel | hwif->channel, state->ioc_base);
@@ -381,7 +326,7 @@ static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
381 return -EOPNOTSUPP; 326 return -EOPNOTSUPP;
382} 327}
383 328
384static void icside_setup_ports(hw_regs_t *hw, void __iomem *base, 329static void icside_setup_ports(struct ide_hw *hw, void __iomem *base,
385 struct cardinfo *info, struct expansion_card *ec) 330 struct cardinfo *info, struct expansion_card *ec)
386{ 331{
387 unsigned long port = (unsigned long)base + info->dataoffset; 332 unsigned long port = (unsigned long)base + info->dataoffset;
@@ -398,11 +343,11 @@ static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
398 343
399 hw->irq = ec->irq; 344 hw->irq = ec->irq;
400 hw->dev = &ec->dev; 345 hw->dev = &ec->dev;
401 hw->chipset = ide_acorn;
402} 346}
403 347
404static const struct ide_port_info icside_v5_port_info = { 348static const struct ide_port_info icside_v5_port_info = {
405 .host_flags = IDE_HFLAG_NO_DMA, 349 .host_flags = IDE_HFLAG_NO_DMA,
350 .chipset = ide_acorn,
406}; 351};
407 352
408static int __devinit 353static int __devinit
@@ -410,7 +355,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
410{ 355{
411 void __iomem *base; 356 void __iomem *base;
412 struct ide_host *host; 357 struct ide_host *host;
413 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 358 struct ide_hw hw, *hws[] = { &hw };
414 int ret; 359 int ret;
415 360
416 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); 361 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
@@ -431,7 +376,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
431 376
432 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); 377 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
433 378
434 host = ide_host_alloc(&icside_v5_port_info, hws); 379 host = ide_host_alloc(&icside_v5_port_info, hws, 1);
435 if (host == NULL) 380 if (host == NULL)
436 return -ENODEV; 381 return -ENODEV;
437 382
@@ -452,11 +397,11 @@ err_free:
452 397
453static const struct ide_port_info icside_v6_port_info __initdata = { 398static const struct ide_port_info icside_v6_port_info __initdata = {
454 .init_dma = icside_dma_off_init, 399 .init_dma = icside_dma_off_init,
455 .port_ops = &icside_v6_no_dma_port_ops,
456 .dma_ops = &icside_v6_dma_ops, 400 .dma_ops = &icside_v6_dma_ops,
457 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, 401 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
458 .mwdma_mask = ATA_MWDMA2, 402 .mwdma_mask = ATA_MWDMA2,
459 .swdma_mask = ATA_SWDMA2, 403 .swdma_mask = ATA_SWDMA2,
404 .chipset = ide_acorn,
460}; 405};
461 406
462static int __devinit 407static int __devinit
@@ -466,7 +411,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
466 struct ide_host *host; 411 struct ide_host *host;
467 unsigned int sel = 0; 412 unsigned int sel = 0;
468 int ret; 413 int ret;
469 hw_regs_t hw[2], *hws[] = { &hw[0], &hw[1], NULL, NULL }; 414 struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] };
470 struct ide_port_info d = icside_v6_port_info; 415 struct ide_port_info d = icside_v6_port_info;
471 416
472 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); 417 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
@@ -506,7 +451,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
506 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); 451 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
507 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); 452 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
508 453
509 host = ide_host_alloc(&d, hws); 454 host = ide_host_alloc(&d, hws, 2);
510 if (host == NULL) 455 if (host == NULL)
511 return -ENODEV; 456 return -ENODEV;
512 457
diff --git a/drivers/ide/ide-4drives.c b/drivers/ide/ide-4drives.c
index 78aca75a2c4..979d342c338 100644
--- a/drivers/ide/ide-4drives.c
+++ b/drivers/ide/ide-4drives.c
@@ -25,12 +25,13 @@ static const struct ide_port_info ide_4drives_port_info = {
25 .port_ops = &ide_4drives_port_ops, 25 .port_ops = &ide_4drives_port_ops,
26 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA | 26 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA |
27 IDE_HFLAG_4DRIVES, 27 IDE_HFLAG_4DRIVES,
28 .chipset = ide_4drives,
28}; 29};
29 30
30static int __init ide_4drives_init(void) 31static int __init ide_4drives_init(void)
31{ 32{
32 unsigned long base = 0x1f0, ctl = 0x3f6; 33 unsigned long base = 0x1f0, ctl = 0x3f6;
33 hw_regs_t hw, *hws[] = { &hw, &hw, NULL, NULL }; 34 struct ide_hw hw, *hws[] = { &hw, &hw };
34 35
35 if (probe_4drives == 0) 36 if (probe_4drives == 0)
36 return -ENODEV; 37 return -ENODEV;
@@ -52,9 +53,8 @@ static int __init ide_4drives_init(void)
52 53
53 ide_std_init_ports(&hw, base, ctl); 54 ide_std_init_ports(&hw, base, ctl);
54 hw.irq = 14; 55 hw.irq = 14;
55 hw.chipset = ide_4drives;
56 56
57 return ide_host_add(&ide_4drives_port_info, hws, NULL); 57 return ide_host_add(&ide_4drives_port_info, hws, 2, NULL);
58} 58}
59 59
60module_init(ide_4drives_init); 60module_init(ide_4drives_init);
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 7201b176d75..702ef64a0f1 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -80,34 +80,6 @@ void ide_init_pc(struct ide_atapi_pc *pc)
80EXPORT_SYMBOL_GPL(ide_init_pc); 80EXPORT_SYMBOL_GPL(ide_init_pc);
81 81
82/* 82/*
83 * Generate a new packet command request in front of the request queue, before
84 * the current request, so that it will be processed immediately, on the next
85 * pass through the driver.
86 */
87static void ide_queue_pc_head(ide_drive_t *drive, struct gendisk *disk,
88 struct ide_atapi_pc *pc, struct request *rq)
89{
90 blk_rq_init(NULL, rq);
91 rq->cmd_type = REQ_TYPE_SPECIAL;
92 rq->cmd_flags |= REQ_PREEMPT;
93 rq->buffer = (char *)pc;
94 rq->rq_disk = disk;
95
96 if (pc->req_xfer) {
97 rq->data = pc->buf;
98 rq->data_len = pc->req_xfer;
99 }
100
101 memcpy(rq->cmd, pc->c, 12);
102 if (drive->media == ide_tape)
103 rq->cmd[13] = REQ_IDETAPE_PC1;
104
105 drive->hwif->rq = NULL;
106
107 elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
108}
109
110/*
111 * Add a special packet command request to the tail of the request queue, 83 * Add a special packet command request to the tail of the request queue,
112 * and wait for it to be serviced. 84 * and wait for it to be serviced.
113 */ 85 */
@@ -119,19 +91,21 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
119 91
120 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 92 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
121 rq->cmd_type = REQ_TYPE_SPECIAL; 93 rq->cmd_type = REQ_TYPE_SPECIAL;
122 rq->buffer = (char *)pc; 94 rq->special = (char *)pc;
123 95
124 if (pc->req_xfer) { 96 if (pc->req_xfer) {
125 rq->data = pc->buf; 97 error = blk_rq_map_kern(drive->queue, rq, pc->buf, pc->req_xfer,
126 rq->data_len = pc->req_xfer; 98 GFP_NOIO);
99 if (error)
100 goto put_req;
127 } 101 }
128 102
129 memcpy(rq->cmd, pc->c, 12); 103 memcpy(rq->cmd, pc->c, 12);
130 if (drive->media == ide_tape) 104 if (drive->media == ide_tape)
131 rq->cmd[13] = REQ_IDETAPE_PC1; 105 rq->cmd[13] = REQ_IDETAPE_PC1;
132 error = blk_execute_rq(drive->queue, disk, rq, 0); 106 error = blk_execute_rq(drive->queue, disk, rq, 0);
107put_req:
133 blk_put_request(rq); 108 blk_put_request(rq);
134
135 return error; 109 return error;
136} 110}
137EXPORT_SYMBOL_GPL(ide_queue_pc_tail); 111EXPORT_SYMBOL_GPL(ide_queue_pc_tail);
@@ -191,20 +165,113 @@ void ide_create_request_sense_cmd(ide_drive_t *drive, struct ide_atapi_pc *pc)
191} 165}
192EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd); 166EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
193 167
168void ide_prep_sense(ide_drive_t *drive, struct request *rq)
169{
170 struct request_sense *sense = &drive->sense_data;
171 struct request *sense_rq = &drive->sense_rq;
172 unsigned int cmd_len, sense_len;
173 int err;
174
175 debug_log("%s: enter\n", __func__);
176
177 switch (drive->media) {
178 case ide_floppy:
179 cmd_len = 255;
180 sense_len = 18;
181 break;
182 case ide_tape:
183 cmd_len = 20;
184 sense_len = 20;
185 break;
186 default:
187 cmd_len = 18;
188 sense_len = 18;
189 }
190
191 BUG_ON(sense_len > sizeof(*sense));
192
193 if (blk_sense_request(rq) || drive->sense_rq_armed)
194 return;
195
196 memset(sense, 0, sizeof(*sense));
197
198 blk_rq_init(rq->q, sense_rq);
199
200 err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
201 GFP_NOIO);
202 if (unlikely(err)) {
203 if (printk_ratelimit())
204 printk(KERN_WARNING "%s: failed to map sense buffer\n",
205 drive->name);
206 return;
207 }
208
209 sense_rq->rq_disk = rq->rq_disk;
210 sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
211 sense_rq->cmd[4] = cmd_len;
212 sense_rq->cmd_type = REQ_TYPE_SENSE;
213 sense_rq->cmd_flags |= REQ_PREEMPT;
214
215 if (drive->media == ide_tape)
216 sense_rq->cmd[13] = REQ_IDETAPE_PC1;
217
218 drive->sense_rq_armed = true;
219}
220EXPORT_SYMBOL_GPL(ide_prep_sense);
221
222int ide_queue_sense_rq(ide_drive_t *drive, void *special)
223{
224 /* deferred failure from ide_prep_sense() */
225 if (!drive->sense_rq_armed) {
226 printk(KERN_WARNING "%s: failed queue sense request\n",
227 drive->name);
228 return -ENOMEM;
229 }
230
231 drive->sense_rq.special = special;
232 drive->sense_rq_armed = false;
233
234 drive->hwif->rq = NULL;
235
236 elv_add_request(drive->queue, &drive->sense_rq,
237 ELEVATOR_INSERT_FRONT, 0);
238 return 0;
239}
240EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
241
194/* 242/*
195 * Called when an error was detected during the last packet command. 243 * Called when an error was detected during the last packet command.
196 * We queue a request sense packet command in the head of the request list. 244 * We queue a request sense packet command at the head of the request
245 * queue.
197 */ 246 */
198void ide_retry_pc(ide_drive_t *drive, struct gendisk *disk) 247void ide_retry_pc(ide_drive_t *drive)
199{ 248{
200 struct request *rq = &drive->request_sense_rq; 249 struct request *failed_rq = drive->hwif->rq;
250 struct request *sense_rq = &drive->sense_rq;
201 struct ide_atapi_pc *pc = &drive->request_sense_pc; 251 struct ide_atapi_pc *pc = &drive->request_sense_pc;
202 252
203 (void)ide_read_error(drive); 253 (void)ide_read_error(drive);
204 ide_create_request_sense_cmd(drive, pc); 254
255 /* init pc from sense_rq */
256 ide_init_pc(pc);
257 memcpy(pc->c, sense_rq->cmd, 12);
258 pc->buf = bio_data(sense_rq->bio); /* pointer to mapped address */
259 pc->req_xfer = blk_rq_bytes(sense_rq);
260
205 if (drive->media == ide_tape) 261 if (drive->media == ide_tape)
206 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); 262 drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
207 ide_queue_pc_head(drive, disk, pc, rq); 263
264 /*
265 * Push back the failed request and put request sense on top
266 * of it. The failed command will be retried after sense data
267 * is acquired.
268 */
269 blk_requeue_request(failed_rq->q, failed_rq);
270 drive->hwif->rq = NULL;
271 if (ide_queue_sense_rq(drive, pc)) {
272 blk_start_request(failed_rq);
273 ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
274 }
208} 275}
209EXPORT_SYMBOL_GPL(ide_retry_pc); 276EXPORT_SYMBOL_GPL(ide_retry_pc);
210 277
@@ -246,7 +313,7 @@ int ide_cd_get_xferlen(struct request *rq)
246 return 32768; 313 return 32768;
247 else if (blk_sense_request(rq) || blk_pc_request(rq) || 314 else if (blk_sense_request(rq) || blk_pc_request(rq) ||
248 rq->cmd_type == REQ_TYPE_ATA_PC) 315 rq->cmd_type == REQ_TYPE_ATA_PC)
249 return rq->data_len; 316 return blk_rq_bytes(rq);
250 else 317 else
251 return 0; 318 return 0;
252} 319}
@@ -276,7 +343,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
276 struct ide_cmd *cmd = &hwif->cmd; 343 struct ide_cmd *cmd = &hwif->cmd;
277 struct request *rq = hwif->rq; 344 struct request *rq = hwif->rq;
278 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 345 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
279 xfer_func_t *xferfunc;
280 unsigned int timeout, done; 346 unsigned int timeout, done;
281 u16 bcount; 347 u16 bcount;
282 u8 stat, ireason, dsc = 0; 348 u8 stat, ireason, dsc = 0;
@@ -303,18 +369,14 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
303 drive->name, rq_data_dir(pc->rq) 369 drive->name, rq_data_dir(pc->rq)
304 ? "write" : "read"); 370 ? "write" : "read");
305 pc->flags |= PC_FLAG_DMA_ERROR; 371 pc->flags |= PC_FLAG_DMA_ERROR;
306 } else { 372 } else
307 pc->xferred = pc->req_xfer; 373 pc->xferred = pc->req_xfer;
308 if (drive->pc_update_buffers)
309 drive->pc_update_buffers(drive, pc);
310 }
311 debug_log("%s: DMA finished\n", drive->name); 374 debug_log("%s: DMA finished\n", drive->name);
312 } 375 }
313 376
314 /* No more interrupts */ 377 /* No more interrupts */
315 if ((stat & ATA_DRQ) == 0) { 378 if ((stat & ATA_DRQ) == 0) {
316 int uptodate, error; 379 int uptodate, error;
317 unsigned int done;
318 380
319 debug_log("Packet command completed, %d bytes transferred\n", 381 debug_log("Packet command completed, %d bytes transferred\n",
320 pc->xferred); 382 pc->xferred);
@@ -343,7 +405,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
343 debug_log("[cmd %x]: check condition\n", rq->cmd[0]); 405 debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
344 406
345 /* Retry operation */ 407 /* Retry operation */
346 ide_retry_pc(drive, rq->rq_disk); 408 ide_retry_pc(drive);
347 409
348 /* queued, but not started */ 410 /* queued, but not started */
349 return ide_stopped; 411 return ide_stopped;
@@ -353,6 +415,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
353 if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0) 415 if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0)
354 dsc = 1; 416 dsc = 1;
355 417
418 /*
419 * ->pc_callback() might change rq->data_len for
420 * residual count, cache total length.
421 */
422 done = blk_rq_bytes(rq);
423
356 /* Command finished - Call the callback function */ 424 /* Command finished - Call the callback function */
357 uptodate = drive->pc_callback(drive, dsc); 425 uptodate = drive->pc_callback(drive, dsc);
358 426
@@ -361,7 +429,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
361 429
362 if (blk_special_request(rq)) { 430 if (blk_special_request(rq)) {
363 rq->errors = 0; 431 rq->errors = 0;
364 done = blk_rq_bytes(rq);
365 error = 0; 432 error = 0;
366 } else { 433 } else {
367 434
@@ -370,15 +437,10 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
370 rq->errors = -EIO; 437 rq->errors = -EIO;
371 } 438 }
372 439
373 if (drive->media == ide_tape)
374 done = ide_rq_bytes(rq); /* FIXME */
375 else
376 done = blk_rq_bytes(rq);
377
378 error = uptodate ? 0 : -EIO; 440 error = uptodate ? 0 : -EIO;
379 } 441 }
380 442
381 ide_complete_rq(drive, error, done); 443 ide_complete_rq(drive, error, blk_rq_bytes(rq));
382 return ide_stopped; 444 return ide_stopped;
383 } 445 }
384 446
@@ -407,21 +469,11 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
407 return ide_do_reset(drive); 469 return ide_do_reset(drive);
408 } 470 }
409 471
410 xferfunc = write ? tp_ops->output_data : tp_ops->input_data; 472 done = min_t(unsigned int, bcount, cmd->nleft);
411 473 ide_pio_bytes(drive, cmd, write, done);
412 if (drive->media == ide_floppy && pc->buf == NULL) {
413 done = min_t(unsigned int, bcount, cmd->nleft);
414 ide_pio_bytes(drive, cmd, write, done);
415 } else if (drive->media == ide_tape && pc->bh) {
416 done = drive->pc_io_buffers(drive, pc, bcount, write);
417 } else {
418 done = min_t(unsigned int, bcount, pc->req_xfer - pc->xferred);
419 xferfunc(drive, NULL, pc->cur_pos, done);
420 }
421 474
422 /* Update the current position */ 475 /* Update transferred byte count */
423 pc->xferred += done; 476 pc->xferred += done;
424 pc->cur_pos += done;
425 477
426 bcount -= done; 478 bcount -= done;
427 479
@@ -525,7 +577,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
525 577
526 /* 578 /*
527 * If necessary schedule the packet transfer to occur 'timeout' 579 * If necessary schedule the packet transfer to occur 'timeout'
528 * miliseconds later in ide_delayed_transfer_pc() after the 580 * milliseconds later in ide_delayed_transfer_pc() after the
529 * device says it's ready for a packet. 581 * device says it's ready for a packet.
530 */ 582 */
531 if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) { 583 if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) {
@@ -599,7 +651,6 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
599 651
600 /* We haven't transferred any data yet */ 652 /* We haven't transferred any data yet */
601 pc->xferred = 0; 653 pc->xferred = 0;
602 pc->cur_pos = pc->buf;
603 654
604 valid_tf = IDE_VALID_DEVICE; 655 valid_tf = IDE_VALID_DEVICE;
605 bcount = ((drive->media == ide_tape) ? 656 bcount = ((drive->media == ide_tape) ?
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 925eb9e245d..424140c6c40 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -182,7 +182,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
182 (sense->information[2] << 8) | 182 (sense->information[2] << 8) |
183 (sense->information[3]); 183 (sense->information[3]);
184 184
185 if (drive->queue->hardsect_size == 2048) 185 if (queue_logical_block_size(drive->queue) == 2048)
186 /* device sector size is 2K */ 186 /* device sector size is 2K */
187 sector <<= 2; 187 sector <<= 2;
188 188
@@ -206,54 +206,25 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
206 ide_cd_log_error(drive->name, failed_command, sense); 206 ide_cd_log_error(drive->name, failed_command, sense);
207} 207}
208 208
209static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
210 struct request *failed_command)
211{
212 struct cdrom_info *info = drive->driver_data;
213 struct request *rq = &drive->request_sense_rq;
214
215 ide_debug_log(IDE_DBG_SENSE, "enter");
216
217 if (sense == NULL)
218 sense = &info->sense_data;
219
220 /* stuff the sense request in front of our current request */
221 blk_rq_init(NULL, rq);
222 rq->cmd_type = REQ_TYPE_ATA_PC;
223 rq->rq_disk = info->disk;
224
225 rq->data = sense;
226 rq->cmd[0] = GPCMD_REQUEST_SENSE;
227 rq->cmd[4] = 18;
228 rq->data_len = 18;
229
230 rq->cmd_type = REQ_TYPE_SENSE;
231 rq->cmd_flags |= REQ_PREEMPT;
232
233 /* NOTE! Save the failed command in "rq->buffer" */
234 rq->buffer = (void *) failed_command;
235
236 if (failed_command)
237 ide_debug_log(IDE_DBG_SENSE, "failed_cmd: 0x%x",
238 failed_command->cmd[0]);
239
240 drive->hwif->rq = NULL;
241
242 elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
243}
244
245static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) 209static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
246{ 210{
247 /* 211 /*
248 * For REQ_TYPE_SENSE, "rq->buffer" points to the original 212 * For REQ_TYPE_SENSE, "rq->special" points to the original
249 * failed request 213 * failed request. Also, the sense data should be read
214 * directly from rq which might be different from the original
215 * sense buffer if it got copied during mapping.
250 */ 216 */
251 struct request *failed = (struct request *)rq->buffer; 217 struct request *failed = (struct request *)rq->special;
252 struct cdrom_info *info = drive->driver_data; 218 void *sense = bio_data(rq->bio);
253 void *sense = &info->sense_data;
254 219
255 if (failed) { 220 if (failed) {
256 if (failed->sense) { 221 if (failed->sense) {
222 /*
223 * Sense is always read into drive->sense_data.
224 * Copy back if the failed request has its
225 * sense pointer set.
226 */
227 memcpy(failed->sense, sense, 18);
257 sense = failed->sense; 228 sense = failed->sense;
258 failed->sense_len = rq->sense_len; 229 failed->sense_len = rq->sense_len;
259 } 230 }
@@ -428,22 +399,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
428 399
429 /* if we got a CHECK_CONDITION status, queue a request sense command */ 400 /* if we got a CHECK_CONDITION status, queue a request sense command */
430 if (stat & ATA_ERR) 401 if (stat & ATA_ERR)
431 cdrom_queue_request_sense(drive, NULL, NULL); 402 return ide_queue_sense_rq(drive, NULL) ? 2 : 1;
432 return 1; 403 return 1;
433 404
434end_request: 405end_request:
435 if (stat & ATA_ERR) { 406 if (stat & ATA_ERR) {
436 struct request_queue *q = drive->queue;
437 unsigned long flags;
438
439 spin_lock_irqsave(q->queue_lock, flags);
440 blkdev_dequeue_request(rq);
441 spin_unlock_irqrestore(q->queue_lock, flags);
442
443 hwif->rq = NULL; 407 hwif->rq = NULL;
444 408 return ide_queue_sense_rq(drive, rq) ? 2 : 1;
445 cdrom_queue_request_sense(drive, rq->sense, rq);
446 return 1;
447 } else 409 } else
448 return 2; 410 return 2;
449} 411}
@@ -503,14 +465,8 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
503 * and some drives don't send them. Sigh. 465 * and some drives don't send them. Sigh.
504 */ 466 */
505 if (rq->cmd[0] == GPCMD_REQUEST_SENSE && 467 if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
506 cmd->nleft > 0 && cmd->nleft <= 5) { 468 cmd->nleft > 0 && cmd->nleft <= 5)
507 unsigned int ofs = cmd->nbytes - cmd->nleft; 469 cmd->nleft = 0;
508
509 while (cmd->nleft > 0) {
510 *((u8 *)rq->data + ofs++) = 0;
511 cmd->nleft--;
512 }
513 }
514} 470}
515 471
516int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, 472int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
@@ -543,14 +499,18 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
543 rq->cmd_flags |= cmd_flags; 499 rq->cmd_flags |= cmd_flags;
544 rq->timeout = timeout; 500 rq->timeout = timeout;
545 if (buffer) { 501 if (buffer) {
546 rq->data = buffer; 502 error = blk_rq_map_kern(drive->queue, rq, buffer,
547 rq->data_len = *bufflen; 503 *bufflen, GFP_NOIO);
504 if (error) {
505 blk_put_request(rq);
506 return error;
507 }
548 } 508 }
549 509
550 error = blk_execute_rq(drive->queue, info->disk, rq, 0); 510 error = blk_execute_rq(drive->queue, info->disk, rq, 0);
551 511
552 if (buffer) 512 if (buffer)
553 *bufflen = rq->data_len; 513 *bufflen = rq->resid_len;
554 514
555 flags = rq->cmd_flags; 515 flags = rq->cmd_flags;
556 blk_put_request(rq); 516 blk_put_request(rq);
@@ -608,7 +568,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
608 struct request *rq = hwif->rq; 568 struct request *rq = hwif->rq;
609 ide_expiry_t *expiry = NULL; 569 ide_expiry_t *expiry = NULL;
610 int dma_error = 0, dma, thislen, uptodate = 0; 570 int dma_error = 0, dma, thislen, uptodate = 0;
611 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0, nsectors; 571 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
612 int sense = blk_sense_request(rq); 572 int sense = blk_sense_request(rq);
613 unsigned int timeout; 573 unsigned int timeout;
614 u16 len; 574 u16 len;
@@ -738,13 +698,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
738 698
739out_end: 699out_end:
740 if (blk_pc_request(rq) && rc == 0) { 700 if (blk_pc_request(rq) && rc == 0) {
741 unsigned int dlen = rq->data_len; 701 rq->resid_len = 0;
742 702 blk_end_request_all(rq, 0);
743 rq->data_len = 0;
744
745 if (blk_end_request(rq, 0, dlen))
746 BUG();
747
748 hwif->rq = NULL; 703 hwif->rq = NULL;
749 } else { 704 } else {
750 if (sense && uptodate) 705 if (sense && uptodate)
@@ -762,21 +717,13 @@ out_end:
762 ide_cd_error_cmd(drive, cmd); 717 ide_cd_error_cmd(drive, cmd);
763 718
764 /* make sure it's fully ended */ 719 /* make sure it's fully ended */
765 if (blk_pc_request(rq))
766 nsectors = (rq->data_len + 511) >> 9;
767 else
768 nsectors = rq->hard_nr_sectors;
769
770 if (nsectors == 0)
771 nsectors = 1;
772
773 if (blk_fs_request(rq) == 0) { 720 if (blk_fs_request(rq) == 0) {
774 rq->data_len -= (cmd->nbytes - cmd->nleft); 721 rq->resid_len -= cmd->nbytes - cmd->nleft;
775 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) 722 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
776 rq->data_len += cmd->last_xfer_len; 723 rq->resid_len += cmd->last_xfer_len;
777 } 724 }
778 725
779 ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9); 726 ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
780 727
781 if (sense && rc == 2) 728 if (sense && rc == 2)
782 ide_error(drive, "request sense failure", stat); 729 ide_error(drive, "request sense failure", stat);
@@ -790,7 +737,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
790 struct request_queue *q = drive->queue; 737 struct request_queue *q = drive->queue;
791 int write = rq_data_dir(rq) == WRITE; 738 int write = rq_data_dir(rq) == WRITE;
792 unsigned short sectors_per_frame = 739 unsigned short sectors_per_frame =
793 queue_hardsect_size(q) >> SECTOR_BITS; 740 queue_logical_block_size(q) >> SECTOR_BITS;
794 741
795 ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " 742 ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
796 "secs_per_frame: %u", 743 "secs_per_frame: %u",
@@ -809,8 +756,8 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
809 } 756 }
810 757
811 /* fs requests *must* be hardware frame aligned */ 758 /* fs requests *must* be hardware frame aligned */
812 if ((rq->nr_sectors & (sectors_per_frame - 1)) || 759 if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
813 (rq->sector & (sectors_per_frame - 1))) 760 (blk_rq_pos(rq) & (sectors_per_frame - 1)))
814 return ide_stopped; 761 return ide_stopped;
815 762
816 /* use DMA, if possible */ 763 /* use DMA, if possible */
@@ -838,15 +785,10 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
838 drive->dma = 0; 785 drive->dma = 0;
839 786
840 /* sg request */ 787 /* sg request */
841 if (rq->bio || ((rq->cmd_type == REQ_TYPE_ATA_PC) && rq->data_len)) { 788 if (rq->bio) {
842 struct request_queue *q = drive->queue; 789 struct request_queue *q = drive->queue;
790 char *buf = bio_data(rq->bio);
843 unsigned int alignment; 791 unsigned int alignment;
844 char *buf;
845
846 if (rq->bio)
847 buf = bio_data(rq->bio);
848 else
849 buf = rq->data;
850 792
851 drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 793 drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
852 794
@@ -858,7 +800,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
858 */ 800 */
859 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 801 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
860 if ((unsigned long)buf & alignment 802 if ((unsigned long)buf & alignment
861 || rq->data_len & q->dma_pad_mask 803 || blk_rq_bytes(rq) & q->dma_pad_mask
862 || object_is_on_stack(buf)) 804 || object_is_on_stack(buf))
863 drive->dma = 0; 805 drive->dma = 0;
864 } 806 }
@@ -896,6 +838,9 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
896 goto out_end; 838 goto out_end;
897 } 839 }
898 840
841 /* prepare sense request for this command */
842 ide_prep_sense(drive, rq);
843
899 memset(&cmd, 0, sizeof(cmd)); 844 memset(&cmd, 0, sizeof(cmd));
900 845
901 if (rq_data_dir(rq)) 846 if (rq_data_dir(rq))
@@ -903,15 +848,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
903 848
904 cmd.rq = rq; 849 cmd.rq = rq;
905 850
906 if (blk_fs_request(rq) || rq->data_len) { 851 if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
907 ide_init_sg_cmd(&cmd, blk_fs_request(rq) ? (rq->nr_sectors << 9) 852 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
908 : rq->data_len);
909 ide_map_sg(drive, &cmd); 853 ide_map_sg(drive, &cmd);
910 } 854 }
911 855
912 return ide_issue_pc(drive, &cmd); 856 return ide_issue_pc(drive, &cmd);
913out_end: 857out_end:
914 nsectors = rq->hard_nr_sectors; 858 nsectors = blk_rq_sectors(rq);
915 859
916 if (nsectors == 0) 860 if (nsectors == 0)
917 nsectors = 1; 861 nsectors = 1;
@@ -1077,8 +1021,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1077 /* save a private copy of the TOC capacity for error handling */ 1021 /* save a private copy of the TOC capacity for error handling */
1078 drive->probed_capacity = toc->capacity * sectors_per_frame; 1022 drive->probed_capacity = toc->capacity * sectors_per_frame;
1079 1023
1080 blk_queue_hardsect_size(drive->queue, 1024 blk_queue_logical_block_size(drive->queue,
1081 sectors_per_frame << SECTOR_BITS); 1025 sectors_per_frame << SECTOR_BITS);
1082 1026
1083 /* first read just the header, so we know how long the TOC is */ 1027 /* first read just the header, so we know how long the TOC is */
1084 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, 1028 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
@@ -1394,9 +1338,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1394/* standard prep_rq_fn that builds 10 byte cmds */ 1338/* standard prep_rq_fn that builds 10 byte cmds */
1395static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) 1339static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1396{ 1340{
1397 int hard_sect = queue_hardsect_size(q); 1341 int hard_sect = queue_logical_block_size(q);
1398 long block = (long)rq->hard_sector / (hard_sect >> 9); 1342 long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
1399 unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9); 1343 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
1400 1344
1401 memset(rq->cmd, 0, BLK_MAX_CDB); 1345 memset(rq->cmd, 0, BLK_MAX_CDB);
1402 1346
@@ -1599,7 +1543,7 @@ static int ide_cdrom_setup(ide_drive_t *drive)
1599 1543
1600 nslots = ide_cdrom_probe_capabilities(drive); 1544 nslots = ide_cdrom_probe_capabilities(drive);
1601 1545
1602 blk_queue_hardsect_size(q, CD_FRAMESIZE); 1546 blk_queue_logical_block_size(q, CD_FRAMESIZE);
1603 1547
1604 if (ide_cdrom_register(drive, nslots)) { 1548 if (ide_cdrom_register(drive, nslots)) {
1605 printk(KERN_ERR PFX "%s: %s failed to register device with the" 1549 printk(KERN_ERR PFX "%s: %s failed to register device with the"
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 1d97101099c..93a3cf1b0f3 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -87,10 +87,6 @@ struct cdrom_info {
87 87
88 struct atapi_toc *toc; 88 struct atapi_toc *toc;
89 89
90 /* The result of the last successful request sense command
91 on this device. */
92 struct request_sense sense_data;
93
94 u8 max_speed; /* Max speed of the drive. */ 90 u8 max_speed; /* Max speed of the drive. */
95 u8 current_speed; /* Current speed of the drive. */ 91 u8 current_speed; /* Current speed of the drive. */
96 92
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 9e47f3529d5..527908ff298 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -155,6 +155,7 @@ static const struct ide_port_info idecs_port_info = {
155 .port_ops = &idecs_port_ops, 155 .port_ops = &idecs_port_ops,
156 .host_flags = IDE_HFLAG_NO_DMA, 156 .host_flags = IDE_HFLAG_NO_DMA,
157 .irq_flags = IRQF_SHARED, 157 .irq_flags = IRQF_SHARED,
158 .chipset = ide_pci,
158}; 159};
159 160
160static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, 161static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
@@ -163,7 +164,7 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
163 struct ide_host *host; 164 struct ide_host *host;
164 ide_hwif_t *hwif; 165 ide_hwif_t *hwif;
165 int i, rc; 166 int i, rc;
166 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 167 struct ide_hw hw, *hws[] = { &hw };
167 168
168 if (!request_region(io, 8, DRV_NAME)) { 169 if (!request_region(io, 8, DRV_NAME)) {
169 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", 170 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -181,10 +182,9 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
181 memset(&hw, 0, sizeof(hw)); 182 memset(&hw, 0, sizeof(hw));
182 ide_std_init_ports(&hw, io, ctl); 183 ide_std_init_ports(&hw, io, ctl);
183 hw.irq = irq; 184 hw.irq = irq;
184 hw.chipset = ide_pci;
185 hw.dev = &handle->dev; 185 hw.dev = &handle->dev;
186 186
187 rc = ide_host_add(&idecs_port_info, hws, &host); 187 rc = ide_host_add(&idecs_port_info, hws, 1, &host);
188 if (rc) 188 if (rc)
189 goto out_release; 189 goto out_release;
190 190
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index a9fbe2c3121..6a1de216970 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -82,7 +82,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
82 sector_t block) 82 sector_t block)
83{ 83{
84 ide_hwif_t *hwif = drive->hwif; 84 ide_hwif_t *hwif = drive->hwif;
85 u16 nsectors = (u16)rq->nr_sectors; 85 u16 nsectors = (u16)blk_rq_sectors(rq);
86 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); 86 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
87 u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 87 u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
88 struct ide_cmd cmd; 88 struct ide_cmd cmd;
@@ -90,7 +90,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
90 ide_startstop_t rc; 90 ide_startstop_t rc;
91 91
92 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) { 92 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
93 if (block + rq->nr_sectors > 1ULL << 28) 93 if (block + blk_rq_sectors(rq) > 1ULL << 28)
94 dma = 0; 94 dma = 0;
95 else 95 else
96 lba48 = 0; 96 lba48 = 0;
@@ -195,9 +195,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
195 195
196 ledtrig_ide_activity(); 196 ledtrig_ide_activity();
197 197
198 pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n", 198 pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
199 drive->name, rq_data_dir(rq) == READ ? "read" : "writ", 199 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
200 (unsigned long long)block, rq->nr_sectors, 200 (unsigned long long)block, blk_rq_sectors(rq),
201 (unsigned long)rq->buffer); 201 (unsigned long)rq->buffer);
202 202
203 if (hwif->rw_disk) 203 if (hwif->rw_disk)
@@ -302,14 +302,12 @@ static const struct drive_list_entry hpa_list[] = {
302 { NULL, NULL } 302 { NULL, NULL }
303}; 303};
304 304
305static void idedisk_check_hpa(ide_drive_t *drive) 305static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48)
306{ 306{
307 unsigned long long capacity, set_max; 307 u64 capacity, set_max;
308 int lba48 = ata_id_lba48_enabled(drive->id);
309 308
310 capacity = drive->capacity64; 309 capacity = drive->capacity64;
311 310 set_max = idedisk_read_native_max_address(drive, lba48);
312 set_max = idedisk_read_native_max_address(drive, lba48);
313 311
314 if (ide_in_drive_list(drive->id, hpa_list)) { 312 if (ide_in_drive_list(drive->id, hpa_list)) {
315 /* 313 /*
@@ -320,9 +318,31 @@ static void idedisk_check_hpa(ide_drive_t *drive)
320 set_max--; 318 set_max--;
321 } 319 }
322 320
321 return set_max;
322}
323
324static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48)
325{
326 set_max = idedisk_set_max_address(drive, set_max, lba48);
327 if (set_max)
328 drive->capacity64 = set_max;
329
330 return set_max;
331}
332
333static void idedisk_check_hpa(ide_drive_t *drive)
334{
335 u64 capacity, set_max;
336 int lba48 = ata_id_lba48_enabled(drive->id);
337
338 capacity = drive->capacity64;
339 set_max = ide_disk_hpa_get_native_capacity(drive, lba48);
340
323 if (set_max <= capacity) 341 if (set_max <= capacity)
324 return; 342 return;
325 343
344 drive->probed_capacity = set_max;
345
326 printk(KERN_INFO "%s: Host Protected Area detected.\n" 346 printk(KERN_INFO "%s: Host Protected Area detected.\n"
327 "\tcurrent capacity is %llu sectors (%llu MB)\n" 347 "\tcurrent capacity is %llu sectors (%llu MB)\n"
328 "\tnative capacity is %llu sectors (%llu MB)\n", 348 "\tnative capacity is %llu sectors (%llu MB)\n",
@@ -330,13 +350,13 @@ static void idedisk_check_hpa(ide_drive_t *drive)
330 capacity, sectors_to_MB(capacity), 350 capacity, sectors_to_MB(capacity),
331 set_max, sectors_to_MB(set_max)); 351 set_max, sectors_to_MB(set_max));
332 352
333 set_max = idedisk_set_max_address(drive, set_max, lba48); 353 if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0)
354 return;
334 355
335 if (set_max) { 356 set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48);
336 drive->capacity64 = set_max; 357 if (set_max)
337 printk(KERN_INFO "%s: Host Protected Area disabled.\n", 358 printk(KERN_INFO "%s: Host Protected Area disabled.\n",
338 drive->name); 359 drive->name);
339 }
340} 360}
341 361
342static int ide_disk_get_capacity(ide_drive_t *drive) 362static int ide_disk_get_capacity(ide_drive_t *drive)
@@ -358,6 +378,8 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
358 drive->capacity64 = drive->cyl * drive->head * drive->sect; 378 drive->capacity64 = drive->cyl * drive->head * drive->sect;
359 } 379 }
360 380
381 drive->probed_capacity = drive->capacity64;
382
361 if (lba) { 383 if (lba) {
362 drive->dev_flags |= IDE_DFLAG_LBA; 384 drive->dev_flags |= IDE_DFLAG_LBA;
363 385
@@ -376,7 +398,7 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
376 "%llu sectors (%llu MB)\n", 398 "%llu sectors (%llu MB)\n",
377 drive->name, (unsigned long long)drive->capacity64, 399 drive->name, (unsigned long long)drive->capacity64,
378 sectors_to_MB(drive->capacity64)); 400 sectors_to_MB(drive->capacity64));
379 drive->capacity64 = 1ULL << 28; 401 drive->probed_capacity = drive->capacity64 = 1ULL << 28;
380 } 402 }
381 403
382 if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && 404 if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
@@ -392,6 +414,34 @@ static int ide_disk_get_capacity(ide_drive_t *drive)
392 return 0; 414 return 0;
393} 415}
394 416
417static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity)
418{
419 u64 set = min(capacity, drive->probed_capacity);
420 u16 *id = drive->id;
421 int lba48 = ata_id_lba48_enabled(id);
422
423 if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
424 ata_id_hpa_enabled(id) == 0)
425 goto out;
426
427 /*
428 * according to the spec the SET MAX ADDRESS command shall be
429 * immediately preceded by a READ NATIVE MAX ADDRESS command
430 */
431 capacity = ide_disk_hpa_get_native_capacity(drive, lba48);
432 if (capacity == 0)
433 goto out;
434
435 set = ide_disk_hpa_set_capacity(drive, set, lba48);
436 if (set) {
437 /* needed for ->resume to disable HPA */
438 drive->dev_flags |= IDE_DFLAG_NOHPA;
439 return set;
440 }
441out:
442 return drive->capacity64;
443}
444
395static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) 445static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
396{ 446{
397 ide_drive_t *drive = q->queuedata; 447 ide_drive_t *drive = q->queuedata;
@@ -411,7 +461,6 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
411 cmd->protocol = ATA_PROT_NODATA; 461 cmd->protocol = ATA_PROT_NODATA;
412 462
413 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 463 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
414 rq->cmd_flags |= REQ_SOFTBARRIER;
415 rq->special = cmd; 464 rq->special = cmd;
416} 465}
417 466
@@ -429,14 +478,14 @@ static int set_multcount(ide_drive_t *drive, int arg)
429 if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff)) 478 if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
430 return -EINVAL; 479 return -EINVAL;
431 480
432 if (drive->special.b.set_multmode) 481 if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
433 return -EBUSY; 482 return -EBUSY;
434 483
435 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 484 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
436 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 485 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
437 486
438 drive->mult_req = arg; 487 drive->mult_req = arg;
439 drive->special.b.set_multmode = 1; 488 drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
440 error = blk_execute_rq(drive->queue, NULL, rq, 0); 489 error = blk_execute_rq(drive->queue, NULL, rq, 0);
441 blk_put_request(rq); 490 blk_put_request(rq);
442 491
@@ -640,7 +689,7 @@ static void ide_disk_setup(ide_drive_t *drive)
640 } 689 }
641 690
642 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, 691 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
643 q->max_sectors / 2); 692 queue_max_sectors(q) / 2);
644 693
645 if (ata_id_is_ssd(id)) 694 if (ata_id_is_ssd(id))
646 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 695 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
@@ -741,6 +790,7 @@ static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
741 790
742const struct ide_disk_ops ide_ata_disk_ops = { 791const struct ide_disk_ops ide_ata_disk_ops = {
743 .check = ide_disk_check, 792 .check = ide_disk_check,
793 .set_capacity = ide_disk_set_capacity,
744 .get_capacity = ide_disk_get_capacity, 794 .get_capacity = ide_disk_get_capacity,
745 .setup = ide_disk_setup, 795 .setup = ide_disk_setup,
746 .flush = ide_disk_flush, 796 .flush = ide_disk_flush,
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index a0b8cab1d9a..219e6fb78dc 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -103,7 +103,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
103 ide_finish_cmd(drive, cmd, stat); 103 ide_finish_cmd(drive, cmd, stat);
104 else 104 else
105 ide_complete_rq(drive, 0, 105 ide_complete_rq(drive, 0,
106 cmd->rq->nr_sectors << 9); 106 blk_rq_sectors(cmd->rq) << 9);
107 return ide_stopped; 107 return ide_stopped;
108 } 108 }
109 printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n", 109 printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
@@ -347,7 +347,6 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
347 347
348 return mode; 348 return mode;
349} 349}
350EXPORT_SYMBOL_GPL(ide_find_dma_mode);
351 350
352static int ide_tune_dma(ide_drive_t *drive) 351static int ide_tune_dma(ide_drive_t *drive)
353{ 352{
@@ -510,23 +509,11 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
510 /* 509 /*
511 * un-busy drive etc and make sure request is sane 510 * un-busy drive etc and make sure request is sane
512 */ 511 */
513
514 rq = hwif->rq; 512 rq = hwif->rq;
515 if (!rq) 513 if (rq) {
516 goto out; 514 hwif->rq = NULL;
517 515 rq->errors = 0;
518 hwif->rq = NULL; 516 }
519
520 rq->errors = 0;
521
522 if (!rq->bio)
523 goto out;
524
525 rq->sector = rq->bio->bi_sector;
526 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
527 rq->hard_cur_sectors = rq->current_nr_sectors;
528 rq->buffer = bio_data(rq->bio);
529out:
530 return ret; 517 return ret;
531} 518}
532 519
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index 5d5fb961b5c..2b914197961 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -52,7 +52,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq,
52 } 52 }
53 53
54 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 54 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
55 drive->special.b.recalibrate = 1; 55 drive->special_flags |= IDE_SFLAG_RECALIBRATE;
56 56
57 ++rq->errors; 57 ++rq->errors;
58 58
@@ -268,9 +268,8 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
268{ 268{
269 int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1; 269 int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
270 270
271 drive->special.all = 0; 271 drive->special_flags =
272 drive->special.b.set_geometry = legacy; 272 legacy ? (IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE) : 0;
273 drive->special.b.recalibrate = legacy;
274 273
275 drive->mult_count = 0; 274 drive->mult_count = 0;
276 drive->dev_flags &= ~IDE_DFLAG_PARKED; 275 drive->dev_flags &= ~IDE_DFLAG_PARKED;
@@ -280,7 +279,7 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
280 drive->mult_req = 0; 279 drive->mult_req = 0;
281 280
282 if (drive->mult_req != drive->mult_count) 281 if (drive->mult_req != drive->mult_count)
283 drive->special.b.set_multmode = 1; 282 drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
284} 283}
285 284
286static void pre_reset(ide_drive_t *drive) 285static void pre_reset(ide_drive_t *drive)
@@ -408,8 +407,9 @@ static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
408 /* more than enough time */ 407 /* more than enough time */
409 udelay(10); 408 udelay(10);
410 /* clear SRST, leave nIEN (unless device is on the quirk list) */ 409 /* clear SRST, leave nIEN (unless device is on the quirk list) */
411 tp_ops->write_devctl(hwif, (drive->quirk_list == 2 ? 0 : ATA_NIEN) | 410 tp_ops->write_devctl(hwif,
412 ATA_DEVCTL_OBS); 411 ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) ? 0 : ATA_NIEN) |
412 ATA_DEVCTL_OBS);
413 /* more than enough time */ 413 /* more than enough time */
414 udelay(10); 414 udelay(10);
415 hwif->poll_timeout = jiffies + WAIT_WORSTCASE; 415 hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 2b4868d95f8..650981758f1 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -134,13 +134,17 @@ static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive,
134 drive->pc = pc; 134 drive->pc = pc;
135 135
136 if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) { 136 if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) {
137 unsigned int done = blk_rq_bytes(drive->hwif->rq);
138
137 if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR)) 139 if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR))
138 ide_floppy_report_error(floppy, pc); 140 ide_floppy_report_error(floppy, pc);
141
139 /* Giving up */ 142 /* Giving up */
140 pc->error = IDE_DRV_ERROR_GENERAL; 143 pc->error = IDE_DRV_ERROR_GENERAL;
141 144
142 drive->failed_pc = NULL; 145 drive->failed_pc = NULL;
143 drive->pc_callback(drive, 0); 146 drive->pc_callback(drive, 0);
147 ide_complete_rq(drive, -EIO, done);
144 return ide_stopped; 148 return ide_stopped;
145 } 149 }
146 150
@@ -190,7 +194,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
190{ 194{
191 struct ide_disk_obj *floppy = drive->driver_data; 195 struct ide_disk_obj *floppy = drive->driver_data;
192 int block = sector / floppy->bs_factor; 196 int block = sector / floppy->bs_factor;
193 int blocks = rq->nr_sectors / floppy->bs_factor; 197 int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
194 int cmd = rq_data_dir(rq); 198 int cmd = rq_data_dir(rq);
195 199
196 ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks); 200 ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
@@ -216,16 +220,14 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
216 ide_init_pc(pc); 220 ide_init_pc(pc);
217 memcpy(pc->c, rq->cmd, sizeof(pc->c)); 221 memcpy(pc->c, rq->cmd, sizeof(pc->c));
218 pc->rq = rq; 222 pc->rq = rq;
219 if (rq->data_len && rq_data_dir(rq) == WRITE) 223 if (blk_rq_bytes(rq)) {
220 pc->flags |= PC_FLAG_WRITING;
221 pc->buf = rq->data;
222 if (rq->bio)
223 pc->flags |= PC_FLAG_DMA_OK; 224 pc->flags |= PC_FLAG_DMA_OK;
224 /* 225 if (rq_data_dir(rq) == WRITE)
225 * possibly problematic, doesn't look like ide-floppy correctly 226 pc->flags |= PC_FLAG_WRITING;
226 * handled scattered requests if dma fails... 227 }
227 */ 228 /* pio will be performed by ide_pio_bytes() which handles sg fine */
228 pc->req_xfer = pc->buf_size = rq->data_len; 229 pc->buf = NULL;
230 pc->req_xfer = pc->buf_size = blk_rq_bytes(rq);
229} 231}
230 232
231static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, 233static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
@@ -257,16 +259,16 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
257 goto out_end; 259 goto out_end;
258 } 260 }
259 if (blk_fs_request(rq)) { 261 if (blk_fs_request(rq)) {
260 if (((long)rq->sector % floppy->bs_factor) || 262 if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
261 (rq->nr_sectors % floppy->bs_factor)) { 263 (blk_rq_sectors(rq) % floppy->bs_factor)) {
262 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", 264 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
263 drive->name); 265 drive->name);
264 goto out_end; 266 goto out_end;
265 } 267 }
266 pc = &floppy->queued_pc; 268 pc = &floppy->queued_pc;
267 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); 269 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
268 } else if (blk_special_request(rq)) { 270 } else if (blk_special_request(rq) || blk_sense_request(rq)) {
269 pc = (struct ide_atapi_pc *) rq->buffer; 271 pc = (struct ide_atapi_pc *)rq->special;
270 } else if (blk_pc_request(rq)) { 272 } else if (blk_pc_request(rq)) {
271 pc = &floppy->queued_pc; 273 pc = &floppy->queued_pc;
272 idefloppy_blockpc_cmd(floppy, pc, rq); 274 idefloppy_blockpc_cmd(floppy, pc, rq);
@@ -275,6 +277,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
275 goto out_end; 277 goto out_end;
276 } 278 }
277 279
280 ide_prep_sense(drive, rq);
281
278 memset(&cmd, 0, sizeof(cmd)); 282 memset(&cmd, 0, sizeof(cmd));
279 283
280 if (rq_data_dir(rq)) 284 if (rq_data_dir(rq))
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 4b6b71e2cdf..214119026b3 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -287,6 +287,19 @@ static int ide_gd_media_changed(struct gendisk *disk)
287 return ret; 287 return ret;
288} 288}
289 289
290static unsigned long long ide_gd_set_capacity(struct gendisk *disk,
291 unsigned long long capacity)
292{
293 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
294 ide_drive_t *drive = idkp->drive;
295 const struct ide_disk_ops *disk_ops = drive->disk_ops;
296
297 if (disk_ops->set_capacity)
298 return disk_ops->set_capacity(drive, capacity);
299
300 return drive->capacity64;
301}
302
290static int ide_gd_revalidate_disk(struct gendisk *disk) 303static int ide_gd_revalidate_disk(struct gendisk *disk)
291{ 304{
292 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); 305 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
@@ -315,6 +328,7 @@ static struct block_device_operations ide_gd_ops = {
315 .locked_ioctl = ide_gd_ioctl, 328 .locked_ioctl = ide_gd_ioctl,
316 .getgeo = ide_gd_getgeo, 329 .getgeo = ide_gd_getgeo,
317 .media_changed = ide_gd_media_changed, 330 .media_changed = ide_gd_media_changed,
331 .set_capacity = ide_gd_set_capacity,
318 .revalidate_disk = ide_gd_revalidate_disk 332 .revalidate_disk = ide_gd_revalidate_disk
319}; 333};
320 334
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 7812ca0be13..54d7c4685d2 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -29,6 +29,7 @@ MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
29 29
30static const struct ide_port_info ide_generic_port_info = { 30static const struct ide_port_info ide_generic_port_info = {
31 .host_flags = IDE_HFLAG_NO_DMA, 31 .host_flags = IDE_HFLAG_NO_DMA,
32 .chipset = ide_generic,
32}; 33};
33 34
34#ifdef CONFIG_ARM 35#ifdef CONFIG_ARM
@@ -85,7 +86,7 @@ static void ide_generic_check_pci_legacy_iobases(int *primary, int *secondary)
85 86
86static int __init ide_generic_init(void) 87static int __init ide_generic_init(void)
87{ 88{
88 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 89 struct ide_hw hw, *hws[] = { &hw };
89 unsigned long io_addr; 90 unsigned long io_addr;
90 int i, rc = 0, primary = 0, secondary = 0; 91 int i, rc = 0, primary = 0, secondary = 0;
91 92
@@ -132,9 +133,7 @@ static int __init ide_generic_init(void)
132#else 133#else
133 hw.irq = legacy_irqs[i]; 134 hw.irq = legacy_irqs[i];
134#endif 135#endif
135 hw.chipset = ide_generic; 136 rc = ide_host_add(&ide_generic_port_info, hws, 1, NULL);
136
137 rc = ide_host_add(&ide_generic_port_info, hws, NULL);
138 if (rc) { 137 if (rc) {
139 release_region(io_addr + 0x206, 1); 138 release_region(io_addr + 0x206, 1);
140 release_region(io_addr, 8); 139 release_region(io_addr, 8);
diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c
index c06ebdc4a13..520f42c5445 100644
--- a/drivers/ide/ide-h8300.c
+++ b/drivers/ide/ide-h8300.c
@@ -64,26 +64,26 @@ static const struct ide_tp_ops h8300_tp_ops = {
64 64
65#define H8300_IDE_GAP (2) 65#define H8300_IDE_GAP (2)
66 66
67static inline void hw_setup(hw_regs_t *hw) 67static inline void hw_setup(struct ide_hw *hw)
68{ 68{
69 int i; 69 int i;
70 70
71 memset(hw, 0, sizeof(hw_regs_t)); 71 memset(hw, 0, sizeof(*hw));
72 for (i = 0; i <= 7; i++) 72 for (i = 0; i <= 7; i++)
73 hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i; 73 hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
74 hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT; 74 hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
75 hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ; 75 hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
76 hw->chipset = ide_generic;
77} 76}
78 77
79static const struct ide_port_info h8300_port_info = { 78static const struct ide_port_info h8300_port_info = {
80 .tp_ops = &h8300_tp_ops, 79 .tp_ops = &h8300_tp_ops,
81 .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA, 80 .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
81 .chipset = ide_generic,
82}; 82};
83 83
84static int __init h8300_ide_init(void) 84static int __init h8300_ide_init(void)
85{ 85{
86 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 86 struct ide_hw hw, *hws[] = { &hw };
87 87
88 printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); 88 printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
89 89
@@ -96,7 +96,7 @@ static int __init h8300_ide_init(void)
96 96
97 hw_setup(&hw); 97 hw_setup(&hw);
98 98
99 return ide_host_add(&h8300_port_info, hws, NULL); 99 return ide_host_add(&h8300_port_info, hws, 1, NULL);
100 100
101out_busy: 101out_busy:
102 printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); 102 printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 35dc38d3b2c..272cc38f6db 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -116,9 +116,9 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
116unsigned int ide_rq_bytes(struct request *rq) 116unsigned int ide_rq_bytes(struct request *rq)
117{ 117{
118 if (blk_pc_request(rq)) 118 if (blk_pc_request(rq))
119 return rq->data_len; 119 return blk_rq_bytes(rq);
120 else 120 else
121 return rq->hard_cur_sectors << 9; 121 return blk_rq_cur_sectors(rq) << 9;
122} 122}
123EXPORT_SYMBOL_GPL(ide_rq_bytes); 123EXPORT_SYMBOL_GPL(ide_rq_bytes);
124 124
@@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
133 * and complete the whole request right now 133 * and complete the whole request right now
134 */ 134 */
135 if (blk_noretry_request(rq) && error <= 0) 135 if (blk_noretry_request(rq) && error <= 0)
136 nr_bytes = rq->hard_nr_sectors << 9; 136 nr_bytes = blk_rq_sectors(rq) << 9;
137 137
138 rc = ide_end_rq(drive, rq, error, nr_bytes); 138 rc = ide_end_rq(drive, rq, error, nr_bytes);
139 if (rc == 0) 139 if (rc == 0)
@@ -184,29 +184,42 @@ static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
184 tf->command = ATA_CMD_SET_MULTI; 184 tf->command = ATA_CMD_SET_MULTI;
185} 185}
186 186
187static ide_startstop_t ide_disk_special(ide_drive_t *drive) 187/**
188 * do_special - issue some special commands
189 * @drive: drive the command is for
190 *
191 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
192 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
193 */
194
195static ide_startstop_t do_special(ide_drive_t *drive)
188{ 196{
189 special_t *s = &drive->special;
190 struct ide_cmd cmd; 197 struct ide_cmd cmd;
191 198
199#ifdef DEBUG
200 printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__,
201 drive->special_flags);
202#endif
203 if (drive->media != ide_disk) {
204 drive->special_flags = 0;
205 drive->mult_req = 0;
206 return ide_stopped;
207 }
208
192 memset(&cmd, 0, sizeof(cmd)); 209 memset(&cmd, 0, sizeof(cmd));
193 cmd.protocol = ATA_PROT_NODATA; 210 cmd.protocol = ATA_PROT_NODATA;
194 211
195 if (s->b.set_geometry) { 212 if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) {
196 s->b.set_geometry = 0; 213 drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY;
197 ide_tf_set_specify_cmd(drive, &cmd.tf); 214 ide_tf_set_specify_cmd(drive, &cmd.tf);
198 } else if (s->b.recalibrate) { 215 } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) {
199 s->b.recalibrate = 0; 216 drive->special_flags &= ~IDE_SFLAG_RECALIBRATE;
200 ide_tf_set_restore_cmd(drive, &cmd.tf); 217 ide_tf_set_restore_cmd(drive, &cmd.tf);
201 } else if (s->b.set_multmode) { 218 } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) {
202 s->b.set_multmode = 0; 219 drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE;
203 ide_tf_set_setmult_cmd(drive, &cmd.tf); 220 ide_tf_set_setmult_cmd(drive, &cmd.tf);
204 } else if (s->all) { 221 } else
205 int special = s->all; 222 BUG();
206 s->all = 0;
207 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
208 return ide_stopped;
209 }
210 223
211 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 224 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
212 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; 225 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
@@ -217,45 +230,13 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
217 return ide_started; 230 return ide_started;
218} 231}
219 232
220/**
221 * do_special - issue some special commands
222 * @drive: drive the command is for
223 *
224 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
225 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
226 *
227 * It used to do much more, but has been scaled back.
228 */
229
230static ide_startstop_t do_special (ide_drive_t *drive)
231{
232 special_t *s = &drive->special;
233
234#ifdef DEBUG
235 printk("%s: do_special: 0x%02x\n", drive->name, s->all);
236#endif
237 if (drive->media == ide_disk)
238 return ide_disk_special(drive);
239
240 s->all = 0;
241 drive->mult_req = 0;
242 return ide_stopped;
243}
244
245void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) 233void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
246{ 234{
247 ide_hwif_t *hwif = drive->hwif; 235 ide_hwif_t *hwif = drive->hwif;
248 struct scatterlist *sg = hwif->sg_table; 236 struct scatterlist *sg = hwif->sg_table;
249 struct request *rq = cmd->rq; 237 struct request *rq = cmd->rq;
250 238
251 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 239 cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
252 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
253 cmd->sg_nents = 1;
254 } else if (!rq->bio) {
255 sg_init_one(sg, rq->data, rq->data_len);
256 cmd->sg_nents = 1;
257 } else
258 cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
259} 240}
260EXPORT_SYMBOL_GPL(ide_map_sg); 241EXPORT_SYMBOL_GPL(ide_map_sg);
261 242
@@ -286,7 +267,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
286 267
287 if (cmd) { 268 if (cmd) {
288 if (cmd->protocol == ATA_PROT_PIO) { 269 if (cmd->protocol == ATA_PROT_PIO) {
289 ide_init_sg_cmd(cmd, rq->nr_sectors << 9); 270 ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
290 ide_map_sg(drive, cmd); 271 ide_map_sg(drive, cmd);
291 } 272 }
292 273
@@ -358,7 +339,8 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
358 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 339 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
359 return startstop; 340 return startstop;
360 } 341 }
361 if (!drive->special.all) { 342
343 if (drive->special_flags == 0) {
362 struct ide_driver *drv; 344 struct ide_driver *drv;
363 345
364 /* 346 /*
@@ -371,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
371 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 353 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
372 return execute_drive_cmd(drive, rq); 354 return execute_drive_cmd(drive, rq);
373 else if (blk_pm_request(rq)) { 355 else if (blk_pm_request(rq)) {
374 struct request_pm_state *pm = rq->data; 356 struct request_pm_state *pm = rq->special;
375#ifdef DEBUG_PM 357#ifdef DEBUG_PM
376 printk("%s: start_power_step(step: %d)\n", 358 printk("%s: start_power_step(step: %d)\n",
377 drive->name, pm->pm_step); 359 drive->name, pm->pm_step);
@@ -394,7 +376,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
394 376
395 drv = *(struct ide_driver **)rq->rq_disk->private_data; 377 drv = *(struct ide_driver **)rq->rq_disk->private_data;
396 378
397 return drv->do_request(drive, rq, rq->sector); 379 return drv->do_request(drive, rq, blk_rq_pos(rq));
398 } 380 }
399 return do_special(drive); 381 return do_special(drive);
400kill_rq: 382kill_rq:
@@ -484,6 +466,9 @@ void do_ide_request(struct request_queue *q)
484 466
485 spin_unlock_irq(q->queue_lock); 467 spin_unlock_irq(q->queue_lock);
486 468
469 /* HLD do_request() callback might sleep, make sure it's okay */
470 might_sleep();
471
487 if (ide_lock_host(host, hwif)) 472 if (ide_lock_host(host, hwif))
488 goto plug_device_2; 473 goto plug_device_2;
489 474
@@ -491,10 +476,10 @@ void do_ide_request(struct request_queue *q)
491 476
492 if (!ide_lock_port(hwif)) { 477 if (!ide_lock_port(hwif)) {
493 ide_hwif_t *prev_port; 478 ide_hwif_t *prev_port;
479
480 WARN_ON_ONCE(hwif->rq);
494repeat: 481repeat:
495 prev_port = hwif->host->cur_port; 482 prev_port = hwif->host->cur_port;
496 hwif->rq = NULL;
497
498 if (drive->dev_flags & IDE_DFLAG_SLEEPING && 483 if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
499 time_after(drive->sleep, jiffies)) { 484 time_after(drive->sleep, jiffies)) {
500 ide_unlock_port(hwif); 485 ide_unlock_port(hwif);
@@ -503,11 +488,15 @@ repeat:
503 488
504 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && 489 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
505 hwif != prev_port) { 490 hwif != prev_port) {
491 ide_drive_t *cur_dev =
492 prev_port ? prev_port->cur_dev : NULL;
493
506 /* 494 /*
507 * set nIEN for previous port, drives in the 495 * set nIEN for previous port, drives in the
508 * quirk_list may not like intr setups/cleanups 496 * quirk list may not like intr setups/cleanups
509 */ 497 */
510 if (prev_port && prev_port->cur_dev->quirk_list == 0) 498 if (cur_dev &&
499 (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
511 prev_port->tp_ops->write_devctl(prev_port, 500 prev_port->tp_ops->write_devctl(prev_port,
512 ATA_NIEN | 501 ATA_NIEN |
513 ATA_DEVCTL_OBS); 502 ATA_DEVCTL_OBS);
@@ -523,7 +512,9 @@ repeat:
523 * we know that the queue isn't empty, but this can happen 512 * we know that the queue isn't empty, but this can happen
524 * if the q->prep_rq_fn() decides to kill a request 513 * if the q->prep_rq_fn() decides to kill a request
525 */ 514 */
526 rq = elv_next_request(drive->queue); 515 if (!rq)
516 rq = blk_fetch_request(drive->queue);
517
527 spin_unlock_irq(q->queue_lock); 518 spin_unlock_irq(q->queue_lock);
528 spin_lock_irq(&hwif->lock); 519 spin_lock_irq(&hwif->lock);
529 520
@@ -535,7 +526,7 @@ repeat:
535 /* 526 /*
536 * Sanity: don't accept a request that isn't a PM request 527 * Sanity: don't accept a request that isn't a PM request
537 * if we are currently power managed. This is very important as 528 * if we are currently power managed. This is very important as
538 * blk_stop_queue() doesn't prevent the elv_next_request() 529 * blk_stop_queue() doesn't prevent the blk_fetch_request()
539 * above to return us whatever is in the queue. Since we call 530 * above to return us whatever is in the queue. Since we call
540 * ide_do_request() ourselves, we end up taking requests while 531 * ide_do_request() ourselves, we end up taking requests while
541 * the queue is blocked... 532 * the queue is blocked...
@@ -559,8 +550,11 @@ repeat:
559 startstop = start_request(drive, rq); 550 startstop = start_request(drive, rq);
560 spin_lock_irq(&hwif->lock); 551 spin_lock_irq(&hwif->lock);
561 552
562 if (startstop == ide_stopped) 553 if (startstop == ide_stopped) {
554 rq = hwif->rq;
555 hwif->rq = NULL;
563 goto repeat; 556 goto repeat;
557 }
564 } else 558 } else
565 goto plug_device; 559 goto plug_device;
566out: 560out:
@@ -576,18 +570,24 @@ plug_device:
576plug_device_2: 570plug_device_2:
577 spin_lock_irq(q->queue_lock); 571 spin_lock_irq(q->queue_lock);
578 572
573 if (rq)
574 blk_requeue_request(q, rq);
579 if (!elv_queue_empty(q)) 575 if (!elv_queue_empty(q))
580 blk_plug_device(q); 576 blk_plug_device(q);
581} 577}
582 578
583static void ide_plug_device(ide_drive_t *drive) 579static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
584{ 580{
585 struct request_queue *q = drive->queue; 581 struct request_queue *q = drive->queue;
586 unsigned long flags; 582 unsigned long flags;
587 583
588 spin_lock_irqsave(q->queue_lock, flags); 584 spin_lock_irqsave(q->queue_lock, flags);
585
586 if (rq)
587 blk_requeue_request(q, rq);
589 if (!elv_queue_empty(q)) 588 if (!elv_queue_empty(q))
590 blk_plug_device(q); 589 blk_plug_device(q);
590
591 spin_unlock_irqrestore(q->queue_lock, flags); 591 spin_unlock_irqrestore(q->queue_lock, flags);
592} 592}
593 593
@@ -636,6 +636,7 @@ void ide_timer_expiry (unsigned long data)
636 unsigned long flags; 636 unsigned long flags;
637 int wait = -1; 637 int wait = -1;
638 int plug_device = 0; 638 int plug_device = 0;
639 struct request *uninitialized_var(rq_in_flight);
639 640
640 spin_lock_irqsave(&hwif->lock, flags); 641 spin_lock_irqsave(&hwif->lock, flags);
641 642
@@ -696,7 +697,9 @@ void ide_timer_expiry (unsigned long data)
696 } 697 }
697 spin_lock_irq(&hwif->lock); 698 spin_lock_irq(&hwif->lock);
698 enable_irq(hwif->irq); 699 enable_irq(hwif->irq);
699 if (startstop == ide_stopped) { 700 if (startstop == ide_stopped && hwif->polling == 0) {
701 rq_in_flight = hwif->rq;
702 hwif->rq = NULL;
700 ide_unlock_port(hwif); 703 ide_unlock_port(hwif);
701 plug_device = 1; 704 plug_device = 1;
702 } 705 }
@@ -705,7 +708,7 @@ void ide_timer_expiry (unsigned long data)
705 708
706 if (plug_device) { 709 if (plug_device) {
707 ide_unlock_host(hwif->host); 710 ide_unlock_host(hwif->host);
708 ide_plug_device(drive); 711 ide_requeue_and_plug(drive, rq_in_flight);
709 } 712 }
710} 713}
711 714
@@ -791,6 +794,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
791 ide_startstop_t startstop; 794 ide_startstop_t startstop;
792 irqreturn_t irq_ret = IRQ_NONE; 795 irqreturn_t irq_ret = IRQ_NONE;
793 int plug_device = 0; 796 int plug_device = 0;
797 struct request *uninitialized_var(rq_in_flight);
794 798
795 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 799 if (host->host_flags & IDE_HFLAG_SERIALIZE) {
796 if (hwif != host->cur_port) 800 if (hwif != host->cur_port)
@@ -868,8 +872,10 @@ irqreturn_t ide_intr (int irq, void *dev_id)
868 * same irq as is currently being serviced here, and Linux 872 * same irq as is currently being serviced here, and Linux
869 * won't allow another of the same (on any CPU) until we return. 873 * won't allow another of the same (on any CPU) until we return.
870 */ 874 */
871 if (startstop == ide_stopped) { 875 if (startstop == ide_stopped && hwif->polling == 0) {
872 BUG_ON(hwif->handler); 876 BUG_ON(hwif->handler);
877 rq_in_flight = hwif->rq;
878 hwif->rq = NULL;
873 ide_unlock_port(hwif); 879 ide_unlock_port(hwif);
874 plug_device = 1; 880 plug_device = 1;
875 } 881 }
@@ -879,7 +885,7 @@ out:
879out_early: 885out_early:
880 if (plug_device) { 886 if (plug_device) {
881 ide_unlock_host(hwif->host); 887 ide_unlock_host(hwif->host);
882 ide_plug_device(drive); 888 ide_requeue_and_plug(drive, rq_in_flight);
883 } 889 }
884 890
885 return irq_ret; 891 return irq_ret;
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index c1c25ebbaa1..5991b23793f 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -231,7 +231,6 @@ static int generic_drive_reset(ide_drive_t *drive)
231 rq->cmd_type = REQ_TYPE_SPECIAL; 231 rq->cmd_type = REQ_TYPE_SPECIAL;
232 rq->cmd_len = 1; 232 rq->cmd_len = 1;
233 rq->cmd[0] = REQ_DRIVE_RESET; 233 rq->cmd[0] = REQ_DRIVE_RESET;
234 rq->cmd_flags |= REQ_SOFTBARRIER;
235 if (blk_execute_rq(drive->queue, NULL, rq, 1)) 234 if (blk_execute_rq(drive->queue, NULL, rq, 1))
236 ret = rq->errors; 235 ret = rq->errors;
237 blk_put_request(rq); 236 blk_put_request(rq);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index c19a221b1e1..fa047150a1c 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -206,8 +206,6 @@ EXPORT_SYMBOL_GPL(ide_in_drive_list);
206 206
207/* 207/*
208 * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid. 208 * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
209 * We list them here and depend on the device side cable detection for them.
210 *
211 * Some optical devices with the buggy firmwares have the same problem. 209 * Some optical devices with the buggy firmwares have the same problem.
212 */ 210 */
213static const struct drive_list_entry ivb_list[] = { 211static const struct drive_list_entry ivb_list[] = {
@@ -251,10 +249,25 @@ u8 eighty_ninty_three(ide_drive_t *drive)
251 * - force bit13 (80c cable present) check also for !ivb devices 249 * - force bit13 (80c cable present) check also for !ivb devices
252 * (unless the slave device is pre-ATA3) 250 * (unless the slave device is pre-ATA3)
253 */ 251 */
254 if ((id[ATA_ID_HW_CONFIG] & 0x4000) || 252 if (id[ATA_ID_HW_CONFIG] & 0x4000)
255 (ivb && (id[ATA_ID_HW_CONFIG] & 0x2000)))
256 return 1; 253 return 1;
257 254
255 if (ivb) {
256 const char *model = (char *)&id[ATA_ID_PROD];
257
258 if (strstr(model, "TSSTcorp CDDVDW SH-S202")) {
259 /*
260 * These ATAPI devices always report 80c cable
261 * so we have to depend on the host in this case.
262 */
263 if (hwif->cbl == ATA_CBL_PATA80)
264 return 1;
265 } else {
266 /* Depend on the device side cable detection. */
267 if (id[ATA_ID_HW_CONFIG] & 0x2000)
268 return 1;
269 }
270 }
258no_80w: 271no_80w:
259 if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED) 272 if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
260 return 0; 273 return 0;
@@ -269,6 +282,29 @@ no_80w:
269 return 0; 282 return 0;
270} 283}
271 284
285static const char *nien_quirk_list[] = {
286 "QUANTUM FIREBALLlct08 08",
287 "QUANTUM FIREBALLP KA6.4",
288 "QUANTUM FIREBALLP KA9.1",
289 "QUANTUM FIREBALLP KX13.6",
290 "QUANTUM FIREBALLP KX20.5",
291 "QUANTUM FIREBALLP KX27.3",
292 "QUANTUM FIREBALLP LM20.4",
293 "QUANTUM FIREBALLP LM20.5",
294 NULL
295};
296
297void ide_check_nien_quirk_list(ide_drive_t *drive)
298{
299 const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
300
301 for (list = nien_quirk_list; *list != NULL; list++)
302 if (strstr(m, *list) != NULL) {
303 drive->dev_flags |= IDE_DFLAG_NIEN_QUIRK;
304 return;
305 }
306}
307
272int ide_driveid_update(ide_drive_t *drive) 308int ide_driveid_update(ide_drive_t *drive)
273{ 309{
274 u16 *id; 310 u16 *id;
@@ -298,7 +334,6 @@ int ide_driveid_update(ide_drive_t *drive)
298 334
299 return 1; 335 return 1;
300out_err: 336out_err:
301 SELECT_MASK(drive, 0);
302 if (rc == 2) 337 if (rc == 2)
303 printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__); 338 printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__);
304 kfree(id); 339 kfree(id);
@@ -352,7 +387,7 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
352 387
353 tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES); 388 tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);
354 389
355 if (drive->quirk_list == 2) 390 if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK)
356 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); 391 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
357 392
358 error = __ide_wait_stat(drive, drive->ready_stat, 393 error = __ide_wait_stat(drive, drive->ready_stat,
diff --git a/drivers/ide/ide-legacy.c b/drivers/ide/ide-legacy.c
index 8c5dcbf2254..b9654a7bb7b 100644
--- a/drivers/ide/ide-legacy.c
+++ b/drivers/ide/ide-legacy.c
@@ -1,7 +1,7 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/ide.h> 2#include <linux/ide.h>
3 3
4static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw, 4static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw,
5 u8 port_no, const struct ide_port_info *d, 5 u8 port_no, const struct ide_port_info *d,
6 unsigned long config) 6 unsigned long config)
7{ 7{
@@ -33,7 +33,6 @@ static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
33 33
34 ide_std_init_ports(hw, base, ctl); 34 ide_std_init_ports(hw, base, ctl);
35 hw->irq = irq; 35 hw->irq = irq;
36 hw->chipset = d->chipset;
37 hw->config = config; 36 hw->config = config;
38 37
39 hws[port_no] = hw; 38 hws[port_no] = hw;
@@ -41,7 +40,7 @@ static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
41 40
42int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) 41int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
43{ 42{
44 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL }; 43 struct ide_hw hw[2], *hws[] = { NULL, NULL };
45 44
46 memset(&hw, 0, sizeof(hw)); 45 memset(&hw, 0, sizeof(hw));
47 46
@@ -53,6 +52,6 @@ int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
53 (d->host_flags & IDE_HFLAG_SINGLE)) 52 (d->host_flags & IDE_HFLAG_SINGLE))
54 return -ENOENT; 53 return -ENOENT;
55 54
56 return ide_host_add(d, hws, NULL); 55 return ide_host_add(d, hws, 2, NULL);
57} 56}
58EXPORT_SYMBOL_GPL(ide_legacy_device_add); 57EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 56ff8c46c7d..e386a32dc9b 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -31,24 +31,6 @@ void ide_toggle_bounce(ide_drive_t *drive, int on)
31 blk_queue_bounce_limit(drive->queue, addr); 31 blk_queue_bounce_limit(drive->queue, addr);
32} 32}
33 33
34static void ide_dump_opcode(ide_drive_t *drive)
35{
36 struct request *rq = drive->hwif->rq;
37 struct ide_cmd *cmd = NULL;
38
39 if (!rq)
40 return;
41
42 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
43 cmd = rq->special;
44
45 printk(KERN_ERR "ide: failed opcode was: ");
46 if (cmd == NULL)
47 printk(KERN_CONT "unknown\n");
48 else
49 printk(KERN_CONT "0x%02x\n", cmd->tf.command);
50}
51
52u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48) 34u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48)
53{ 35{
54 struct ide_taskfile *tf = &cmd->tf; 36 struct ide_taskfile *tf = &cmd->tf;
@@ -91,7 +73,7 @@ static void ide_dump_sector(ide_drive_t *drive)
91 73
92static void ide_dump_ata_error(ide_drive_t *drive, u8 err) 74static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
93{ 75{
94 printk(KERN_ERR "{ "); 76 printk(KERN_CONT "{ ");
95 if (err & ATA_ABORTED) 77 if (err & ATA_ABORTED)
96 printk(KERN_CONT "DriveStatusError "); 78 printk(KERN_CONT "DriveStatusError ");
97 if (err & ATA_ICRC) 79 if (err & ATA_ICRC)
@@ -114,14 +96,14 @@ static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
114 96
115 if (rq) 97 if (rq)
116 printk(KERN_CONT ", sector=%llu", 98 printk(KERN_CONT ", sector=%llu",
117 (unsigned long long)rq->sector); 99 (unsigned long long)blk_rq_pos(rq));
118 } 100 }
119 printk(KERN_CONT "\n"); 101 printk(KERN_CONT "\n");
120} 102}
121 103
122static void ide_dump_atapi_error(ide_drive_t *drive, u8 err) 104static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
123{ 105{
124 printk(KERN_ERR "{ "); 106 printk(KERN_CONT "{ ");
125 if (err & ATAPI_ILI) 107 if (err & ATAPI_ILI)
126 printk(KERN_CONT "IllegalLengthIndication "); 108 printk(KERN_CONT "IllegalLengthIndication ");
127 if (err & ATAPI_EOM) 109 if (err & ATAPI_EOM)
@@ -179,7 +161,10 @@ u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
179 else 161 else
180 ide_dump_atapi_error(drive, err); 162 ide_dump_atapi_error(drive, err);
181 } 163 }
182 ide_dump_opcode(drive); 164
165 printk(KERN_ERR "%s: possibly failed opcode: 0x%02x\n",
166 drive->name, drive->hwif->cmd.tf.command);
167
183 return err; 168 return err;
184} 169}
185EXPORT_SYMBOL(ide_dump_status); 170EXPORT_SYMBOL(ide_dump_status);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 310d03f2b5b..a914023d6d0 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -24,11 +24,8 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
24 start_queue = 1; 24 start_queue = 1;
25 spin_unlock_irq(&hwif->lock); 25 spin_unlock_irq(&hwif->lock);
26 26
27 if (start_queue) { 27 if (start_queue)
28 spin_lock_irq(q->queue_lock); 28 blk_run_queue(q);
29 blk_start_queueing(q);
30 spin_unlock_irq(q->queue_lock);
31 }
32 return; 29 return;
33 } 30 }
34 spin_unlock_irq(&hwif->lock); 31 spin_unlock_irq(&hwif->lock);
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
index 61111fd2713..39d4e01f5c9 100644
--- a/drivers/ide/ide-pci-generic.c
+++ b/drivers/ide/ide-pci-generic.c
@@ -33,6 +33,16 @@ static int ide_generic_all; /* Set to claim all devices */
33module_param_named(all_generic_ide, ide_generic_all, bool, 0444); 33module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
34MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers."); 34MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
35 35
36static void netcell_quirkproc(ide_drive_t *drive)
37{
38 /* mark words 85-87 as valid */
39 drive->id[ATA_ID_CSF_DEFAULT] |= 0x4000;
40}
41
42static const struct ide_port_ops netcell_port_ops = {
43 .quirkproc = netcell_quirkproc,
44};
45
36#define DECLARE_GENERIC_PCI_DEV(extra_flags) \ 46#define DECLARE_GENERIC_PCI_DEV(extra_flags) \
37 { \ 47 { \
38 .name = DRV_NAME, \ 48 .name = DRV_NAME, \
@@ -74,6 +84,7 @@ static const struct ide_port_info generic_chipsets[] __devinitdata = {
74 84
75 { /* 6: Revolution */ 85 { /* 6: Revolution */
76 .name = DRV_NAME, 86 .name = DRV_NAME,
87 .port_ops = &netcell_port_ops,
77 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | 88 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
78 IDE_HFLAG_TRUST_BIOS_FOR_DMA | 89 IDE_HFLAG_TRUST_BIOS_FOR_DMA |
79 IDE_HFLAG_OFF_BOARD, 90 IDE_HFLAG_OFF_BOARD,
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 0d8a151c0a0..ba1488bd843 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -7,7 +7,6 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
7 ide_hwif_t *hwif = drive->hwif; 7 ide_hwif_t *hwif = drive->hwif;
8 struct request *rq; 8 struct request *rq;
9 struct request_pm_state rqpm; 9 struct request_pm_state rqpm;
10 struct ide_cmd cmd;
11 int ret; 10 int ret;
12 11
13 /* call ACPI _GTM only once */ 12 /* call ACPI _GTM only once */
@@ -15,11 +14,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
15 ide_acpi_get_timing(hwif); 14 ide_acpi_get_timing(hwif);
16 15
17 memset(&rqpm, 0, sizeof(rqpm)); 16 memset(&rqpm, 0, sizeof(rqpm));
18 memset(&cmd, 0, sizeof(cmd));
19 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 17 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
20 rq->cmd_type = REQ_TYPE_PM_SUSPEND; 18 rq->cmd_type = REQ_TYPE_PM_SUSPEND;
21 rq->special = &cmd; 19 rq->special = &rqpm;
22 rq->data = &rqpm;
23 rqpm.pm_step = IDE_PM_START_SUSPEND; 20 rqpm.pm_step = IDE_PM_START_SUSPEND;
24 if (mesg.event == PM_EVENT_PRETHAW) 21 if (mesg.event == PM_EVENT_PRETHAW)
25 mesg.event = PM_EVENT_FREEZE; 22 mesg.event = PM_EVENT_FREEZE;
@@ -41,7 +38,6 @@ int generic_ide_resume(struct device *dev)
41 ide_hwif_t *hwif = drive->hwif; 38 ide_hwif_t *hwif = drive->hwif;
42 struct request *rq; 39 struct request *rq;
43 struct request_pm_state rqpm; 40 struct request_pm_state rqpm;
44 struct ide_cmd cmd;
45 int err; 41 int err;
46 42
47 /* call ACPI _PS0 / _STM only once */ 43 /* call ACPI _PS0 / _STM only once */
@@ -53,12 +49,10 @@ int generic_ide_resume(struct device *dev)
53 ide_acpi_exec_tfs(drive); 49 ide_acpi_exec_tfs(drive);
54 50
55 memset(&rqpm, 0, sizeof(rqpm)); 51 memset(&rqpm, 0, sizeof(rqpm));
56 memset(&cmd, 0, sizeof(cmd));
57 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 52 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
58 rq->cmd_type = REQ_TYPE_PM_RESUME; 53 rq->cmd_type = REQ_TYPE_PM_RESUME;
59 rq->cmd_flags |= REQ_PREEMPT; 54 rq->cmd_flags |= REQ_PREEMPT;
60 rq->special = &cmd; 55 rq->special = &rqpm;
61 rq->data = &rqpm;
62 rqpm.pm_step = IDE_PM_START_RESUME; 56 rqpm.pm_step = IDE_PM_START_RESUME;
63 rqpm.pm_state = PM_EVENT_ON; 57 rqpm.pm_state = PM_EVENT_ON;
64 58
@@ -77,7 +71,7 @@ int generic_ide_resume(struct device *dev)
77 71
78void ide_complete_power_step(ide_drive_t *drive, struct request *rq) 72void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
79{ 73{
80 struct request_pm_state *pm = rq->data; 74 struct request_pm_state *pm = rq->special;
81 75
82#ifdef DEBUG_PM 76#ifdef DEBUG_PM
83 printk(KERN_INFO "%s: complete_power_step(step: %d)\n", 77 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
@@ -107,10 +101,8 @@ void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
107 101
108ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 102ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
109{ 103{
110 struct request_pm_state *pm = rq->data; 104 struct request_pm_state *pm = rq->special;
111 struct ide_cmd *cmd = rq->special; 105 struct ide_cmd cmd = { };
112
113 memset(cmd, 0, sizeof(*cmd));
114 106
115 switch (pm->pm_step) { 107 switch (pm->pm_step) {
116 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 108 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
@@ -123,12 +115,12 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
123 return ide_stopped; 115 return ide_stopped;
124 } 116 }
125 if (ata_id_flush_ext_enabled(drive->id)) 117 if (ata_id_flush_ext_enabled(drive->id))
126 cmd->tf.command = ATA_CMD_FLUSH_EXT; 118 cmd.tf.command = ATA_CMD_FLUSH_EXT;
127 else 119 else
128 cmd->tf.command = ATA_CMD_FLUSH; 120 cmd.tf.command = ATA_CMD_FLUSH;
129 goto out_do_tf; 121 goto out_do_tf;
130 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 122 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
131 cmd->tf.command = ATA_CMD_STANDBYNOW1; 123 cmd.tf.command = ATA_CMD_STANDBYNOW1;
132 goto out_do_tf; 124 goto out_do_tf;
133 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 125 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
134 ide_set_max_pio(drive); 126 ide_set_max_pio(drive);
@@ -141,7 +133,7 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
141 ide_complete_power_step(drive, rq); 133 ide_complete_power_step(drive, rq);
142 return ide_stopped; 134 return ide_stopped;
143 case IDE_PM_IDLE: /* Resume step 2 (idle) */ 135 case IDE_PM_IDLE: /* Resume step 2 (idle) */
144 cmd->tf.command = ATA_CMD_IDLEIMMEDIATE; 136 cmd.tf.command = ATA_CMD_IDLEIMMEDIATE;
145 goto out_do_tf; 137 goto out_do_tf;
146 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */ 138 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
147 /* 139 /*
@@ -163,11 +155,11 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
163 return ide_stopped; 155 return ide_stopped;
164 156
165out_do_tf: 157out_do_tf:
166 cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 158 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
167 cmd->valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; 159 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
168 cmd->protocol = ATA_PROT_NODATA; 160 cmd.protocol = ATA_PROT_NODATA;
169 161
170 return do_rw_taskfile(drive, cmd); 162 return do_rw_taskfile(drive, &cmd);
171} 163}
172 164
173/** 165/**
@@ -181,7 +173,7 @@ out_do_tf:
181void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) 173void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
182{ 174{
183 struct request_queue *q = drive->queue; 175 struct request_queue *q = drive->queue;
184 struct request_pm_state *pm = rq->data; 176 struct request_pm_state *pm = rq->special;
185 unsigned long flags; 177 unsigned long flags;
186 178
187 ide_complete_power_step(drive, rq); 179 ide_complete_power_step(drive, rq);
@@ -207,7 +199,7 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
207 199
208void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 200void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
209{ 201{
210 struct request_pm_state *pm = rq->data; 202 struct request_pm_state *pm = rq->special;
211 203
212 if (blk_pm_suspend_request(rq) && 204 if (blk_pm_suspend_request(rq) &&
213 pm->pm_step == IDE_PM_START_SUSPEND) 205 pm->pm_step == IDE_PM_START_SUSPEND)
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 6e80b774e88..017b1df3b80 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -29,6 +29,7 @@ static struct pnp_device_id idepnp_devices[] = {
29 29
30static const struct ide_port_info ide_pnp_port_info = { 30static const struct ide_port_info ide_pnp_port_info = {
31 .host_flags = IDE_HFLAG_NO_DMA, 31 .host_flags = IDE_HFLAG_NO_DMA,
32 .chipset = ide_generic,
32}; 33};
33 34
34static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) 35static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
@@ -36,7 +37,7 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
36 struct ide_host *host; 37 struct ide_host *host;
37 unsigned long base, ctl; 38 unsigned long base, ctl;
38 int rc; 39 int rc;
39 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 40 struct ide_hw hw, *hws[] = { &hw };
40 41
41 printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); 42 printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
42 43
@@ -62,9 +63,8 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
62 memset(&hw, 0, sizeof(hw)); 63 memset(&hw, 0, sizeof(hw));
63 ide_std_init_ports(&hw, base, ctl); 64 ide_std_init_ports(&hw, base, ctl);
64 hw.irq = pnp_irq(dev, 0); 65 hw.irq = pnp_irq(dev, 0);
65 hw.chipset = ide_generic;
66 66
67 rc = ide_host_add(&ide_pnp_port_info, hws, &host); 67 rc = ide_host_add(&ide_pnp_port_info, hws, 1, &host);
68 if (rc) 68 if (rc)
69 goto out; 69 goto out;
70 70
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 7f264ed1141..f371b0de314 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -97,7 +97,7 @@ static void ide_disk_init_mult_count(ide_drive_t *drive)
97 drive->mult_req = id[ATA_ID_MULTSECT] & 0xff; 97 drive->mult_req = id[ATA_ID_MULTSECT] & 0xff;
98 98
99 if (drive->mult_req) 99 if (drive->mult_req)
100 drive->special.b.set_multmode = 1; 100 drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
101 } 101 }
102} 102}
103 103
@@ -295,7 +295,7 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id)
295 295
296 timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; 296 timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
297 297
298 if (ide_busy_sleep(hwif, timeout, use_altstatus)) 298 if (ide_busy_sleep(drive, timeout, use_altstatus))
299 return 1; 299 return 1;
300 300
301 /* wait for IRQ and ATA_DRQ */ 301 /* wait for IRQ and ATA_DRQ */
@@ -316,8 +316,9 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id)
316 return rc; 316 return rc;
317} 317}
318 318
319int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus) 319int ide_busy_sleep(ide_drive_t *drive, unsigned long timeout, int altstatus)
320{ 320{
321 ide_hwif_t *hwif = drive->hwif;
321 u8 stat; 322 u8 stat;
322 323
323 timeout += jiffies; 324 timeout += jiffies;
@@ -330,6 +331,8 @@ int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus)
330 return 0; 331 return 0;
331 } while (time_before(jiffies, timeout)); 332 } while (time_before(jiffies, timeout));
332 333
334 printk(KERN_ERR "%s: timeout in %s\n", drive->name, __func__);
335
333 return 1; /* drive timed-out */ 336 return 1; /* drive timed-out */
334} 337}
335 338
@@ -420,7 +423,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
420 tp_ops->dev_select(drive); 423 tp_ops->dev_select(drive);
421 msleep(50); 424 msleep(50);
422 tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET); 425 tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
423 (void)ide_busy_sleep(hwif, WAIT_WORSTCASE, 0); 426 (void)ide_busy_sleep(drive, WAIT_WORSTCASE, 0);
424 rc = ide_dev_read_id(drive, cmd, id); 427 rc = ide_dev_read_id(drive, cmd, id);
425 } 428 }
426 429
@@ -462,23 +465,8 @@ static u8 probe_for_drive(ide_drive_t *drive)
462 int rc; 465 int rc;
463 u8 cmd; 466 u8 cmd;
464 467
465 /*
466 * In order to keep things simple we have an id
467 * block for all drives at all times. If the device
468 * is pre ATA or refuses ATA/ATAPI identify we
469 * will add faked data to this.
470 *
471 * Also note that 0 everywhere means "can't do X"
472 */
473
474 drive->dev_flags &= ~IDE_DFLAG_ID_READ; 468 drive->dev_flags &= ~IDE_DFLAG_ID_READ;
475 469
476 drive->id = kzalloc(SECTOR_SIZE, GFP_KERNEL);
477 if (drive->id == NULL) {
478 printk(KERN_ERR "ide: out of memory for id data.\n");
479 return 0;
480 }
481
482 m = (char *)&drive->id[ATA_ID_PROD]; 470 m = (char *)&drive->id[ATA_ID_PROD];
483 strcpy(m, "UNKNOWN"); 471 strcpy(m, "UNKNOWN");
484 472
@@ -494,7 +482,7 @@ static u8 probe_for_drive(ide_drive_t *drive)
494 } 482 }
495 483
496 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 484 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
497 goto out_free; 485 return 0;
498 486
499 /* identification failed? */ 487 /* identification failed? */
500 if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) { 488 if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
@@ -518,7 +506,7 @@ static u8 probe_for_drive(ide_drive_t *drive)
518 } 506 }
519 507
520 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 508 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
521 goto out_free; 509 return 0;
522 510
523 /* The drive wasn't being helpful. Add generic info only */ 511 /* The drive wasn't being helpful. Add generic info only */
524 if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) { 512 if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
@@ -532,9 +520,6 @@ static u8 probe_for_drive(ide_drive_t *drive)
532 } 520 }
533 521
534 return 1; 522 return 1;
535out_free:
536 kfree(drive->id);
537 return 0;
538} 523}
539 524
540static void hwif_release_dev(struct device *dev) 525static void hwif_release_dev(struct device *dev)
@@ -699,8 +684,14 @@ static int ide_probe_port(ide_hwif_t *hwif)
699 if (irqd) 684 if (irqd)
700 disable_irq(hwif->irq); 685 disable_irq(hwif->irq);
701 686
702 if (ide_port_wait_ready(hwif) == -EBUSY) 687 rc = ide_port_wait_ready(hwif);
703 printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name); 688 if (rc == -ENODEV) {
689 printk(KERN_INFO "%s: no devices on the port\n", hwif->name);
690 goto out;
691 } else if (rc == -EBUSY)
692 printk(KERN_ERR "%s: not ready before the probe\n", hwif->name);
693 else
694 rc = -ENODEV;
704 695
705 /* 696 /*
706 * Second drive should only exist if first drive was found, 697 * Second drive should only exist if first drive was found,
@@ -711,7 +702,7 @@ static int ide_probe_port(ide_hwif_t *hwif)
711 if (drive->dev_flags & IDE_DFLAG_PRESENT) 702 if (drive->dev_flags & IDE_DFLAG_PRESENT)
712 rc = 0; 703 rc = 0;
713 } 704 }
714 705out:
715 /* 706 /*
716 * Use cached IRQ number. It might be (and is...) changed by probe 707 * Use cached IRQ number. It might be (and is...) changed by probe
717 * code above 708 * code above
@@ -729,6 +720,8 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
729 int i; 720 int i;
730 721
731 ide_port_for_each_present_dev(i, drive, hwif) { 722 ide_port_for_each_present_dev(i, drive, hwif) {
723 ide_check_nien_quirk_list(drive);
724
732 if (port_ops && port_ops->quirkproc) 725 if (port_ops && port_ops->quirkproc)
733 port_ops->quirkproc(drive); 726 port_ops->quirkproc(drive);
734 } 727 }
@@ -814,8 +807,6 @@ static int ide_port_setup_devices(ide_hwif_t *hwif)
814 if (ide_init_queue(drive)) { 807 if (ide_init_queue(drive)) {
815 printk(KERN_ERR "ide: failed to init %s\n", 808 printk(KERN_ERR "ide: failed to init %s\n",
816 drive->name); 809 drive->name);
817 kfree(drive->id);
818 drive->id = NULL;
819 drive->dev_flags &= ~IDE_DFLAG_PRESENT; 810 drive->dev_flags &= ~IDE_DFLAG_PRESENT;
820 continue; 811 continue;
821 } 812 }
@@ -944,9 +935,6 @@ static void drive_release_dev (struct device *dev)
944 blk_cleanup_queue(drive->queue); 935 blk_cleanup_queue(drive->queue);
945 drive->queue = NULL; 936 drive->queue = NULL;
946 937
947 kfree(drive->id);
948 drive->id = NULL;
949
950 drive->dev_flags &= ~IDE_DFLAG_PRESENT; 938 drive->dev_flags &= ~IDE_DFLAG_PRESENT;
951 939
952 complete(&drive->gendev_rel_comp); 940 complete(&drive->gendev_rel_comp);
@@ -1032,6 +1020,15 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
1032 if (port_ops && port_ops->init_dev) 1020 if (port_ops && port_ops->init_dev)
1033 port_ops->init_dev(drive); 1021 port_ops->init_dev(drive);
1034 } 1022 }
1023
1024 ide_port_for_each_dev(i, drive, hwif) {
1025 /*
1026 * default to PIO Mode 0 before we figure out
1027 * the most suited mode for the attached device
1028 */
1029 if (port_ops && port_ops->set_pio_mode)
1030 port_ops->set_pio_mode(drive, 0);
1031 }
1035} 1032}
1036 1033
1037static void ide_init_port(ide_hwif_t *hwif, unsigned int port, 1034static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
@@ -1039,8 +1036,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1039{ 1036{
1040 hwif->channel = port; 1037 hwif->channel = port;
1041 1038
1042 if (d->chipset) 1039 hwif->chipset = d->chipset ? d->chipset : ide_pci;
1043 hwif->chipset = d->chipset;
1044 1040
1045 if (d->init_iops) 1041 if (d->init_iops)
1046 d->init_iops(hwif); 1042 d->init_iops(hwif);
@@ -1121,16 +1117,19 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
1121 1117
1122 ide_port_for_each_dev(i, drive, hwif) { 1118 ide_port_for_each_dev(i, drive, hwif) {
1123 u8 j = (hwif->index * MAX_DRIVES) + i; 1119 u8 j = (hwif->index * MAX_DRIVES) + i;
1120 u16 *saved_id = drive->id;
1124 1121
1125 memset(drive, 0, sizeof(*drive)); 1122 memset(drive, 0, sizeof(*drive));
1123 memset(saved_id, 0, SECTOR_SIZE);
1124 drive->id = saved_id;
1126 1125
1127 drive->media = ide_disk; 1126 drive->media = ide_disk;
1128 drive->select = (i << 4) | ATA_DEVICE_OBS; 1127 drive->select = (i << 4) | ATA_DEVICE_OBS;
1129 drive->hwif = hwif; 1128 drive->hwif = hwif;
1130 drive->ready_stat = ATA_DRDY; 1129 drive->ready_stat = ATA_DRDY;
1131 drive->bad_wstat = BAD_W_STAT; 1130 drive->bad_wstat = BAD_W_STAT;
1132 drive->special.b.recalibrate = 1; 1131 drive->special_flags = IDE_SFLAG_RECALIBRATE |
1133 drive->special.b.set_geometry = 1; 1132 IDE_SFLAG_SET_GEOMETRY;
1134 drive->name[0] = 'h'; 1133 drive->name[0] = 'h';
1135 drive->name[1] = 'd'; 1134 drive->name[1] = 'd';
1136 drive->name[2] = 'a' + j; 1135 drive->name[2] = 'a' + j;
@@ -1165,11 +1164,10 @@ static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
1165 ide_port_init_devices_data(hwif); 1164 ide_port_init_devices_data(hwif);
1166} 1165}
1167 1166
1168static void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) 1167static void ide_init_port_hw(ide_hwif_t *hwif, struct ide_hw *hw)
1169{ 1168{
1170 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); 1169 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
1171 hwif->irq = hw->irq; 1170 hwif->irq = hw->irq;
1172 hwif->chipset = hw->chipset;
1173 hwif->dev = hw->dev; 1171 hwif->dev = hw->dev;
1174 hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; 1172 hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
1175 hwif->ack_intr = hw->ack_intr; 1173 hwif->ack_intr = hw->ack_intr;
@@ -1230,8 +1228,10 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
1230 ide_drive_t *drive; 1228 ide_drive_t *drive;
1231 int i; 1229 int i;
1232 1230
1233 ide_port_for_each_dev(i, drive, hwif) 1231 ide_port_for_each_dev(i, drive, hwif) {
1232 kfree(drive->id);
1234 kfree(drive); 1233 kfree(drive);
1234 }
1235} 1235}
1236 1236
1237static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) 1237static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
@@ -1245,6 +1245,18 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
1245 if (drive == NULL) 1245 if (drive == NULL)
1246 goto out_nomem; 1246 goto out_nomem;
1247 1247
1248 /*
1249 * In order to keep things simple we have an id
1250 * block for all drives at all times. If the device
1251 * is pre ATA or refuses ATA/ATAPI identify we
1252 * will add faked data to this.
1253 *
1254 * Also note that 0 everywhere means "can't do X"
1255 */
1256 drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node);
1257 if (drive->id == NULL)
1258 goto out_nomem;
1259
1248 hwif->devices[i] = drive; 1260 hwif->devices[i] = drive;
1249 } 1261 }
1250 return 0; 1262 return 0;
@@ -1254,7 +1266,8 @@ out_nomem:
1254 return -ENOMEM; 1266 return -ENOMEM;
1255} 1267}
1256 1268
1257struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws) 1269struct ide_host *ide_host_alloc(const struct ide_port_info *d,
1270 struct ide_hw **hws, unsigned int n_ports)
1258{ 1271{
1259 struct ide_host *host; 1272 struct ide_host *host;
1260 struct device *dev = hws[0] ? hws[0]->dev : NULL; 1273 struct device *dev = hws[0] ? hws[0]->dev : NULL;
@@ -1265,7 +1278,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
1265 if (host == NULL) 1278 if (host == NULL)
1266 return NULL; 1279 return NULL;
1267 1280
1268 for (i = 0; i < MAX_HOST_PORTS; i++) { 1281 for (i = 0; i < n_ports; i++) {
1269 ide_hwif_t *hwif; 1282 ide_hwif_t *hwif;
1270 int idx; 1283 int idx;
1271 1284
@@ -1285,6 +1298,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
1285 if (idx < 0) { 1298 if (idx < 0) {
1286 printk(KERN_ERR "%s: no free slot for interface\n", 1299 printk(KERN_ERR "%s: no free slot for interface\n",
1287 d ? d->name : "ide"); 1300 d ? d->name : "ide");
1301 ide_port_free_devices(hwif);
1288 kfree(hwif); 1302 kfree(hwif);
1289 continue; 1303 continue;
1290 } 1304 }
@@ -1341,7 +1355,7 @@ static void ide_disable_port(ide_hwif_t *hwif)
1341} 1355}
1342 1356
1343int ide_host_register(struct ide_host *host, const struct ide_port_info *d, 1357int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1344 hw_regs_t **hws) 1358 struct ide_hw **hws)
1345{ 1359{
1346 ide_hwif_t *hwif, *mate = NULL; 1360 ide_hwif_t *hwif, *mate = NULL;
1347 int i, j = 0; 1361 int i, j = 0;
@@ -1435,13 +1449,13 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1435} 1449}
1436EXPORT_SYMBOL_GPL(ide_host_register); 1450EXPORT_SYMBOL_GPL(ide_host_register);
1437 1451
1438int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws, 1452int ide_host_add(const struct ide_port_info *d, struct ide_hw **hws,
1439 struct ide_host **hostp) 1453 unsigned int n_ports, struct ide_host **hostp)
1440{ 1454{
1441 struct ide_host *host; 1455 struct ide_host *host;
1442 int rc; 1456 int rc;
1443 1457
1444 host = ide_host_alloc(d, hws); 1458 host = ide_host_alloc(d, hws, n_ports);
1445 if (host == NULL) 1459 if (host == NULL)
1446 return -ENOMEM; 1460 return -ENOMEM;
1447 1461
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 3a53e0834cf..4b447a8a49d 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -131,13 +131,6 @@ enum {
131 IDETAPE_DIR_WRITE = (1 << 2), 131 IDETAPE_DIR_WRITE = (1 << 2),
132}; 132};
133 133
134struct idetape_bh {
135 u32 b_size;
136 atomic_t b_count;
137 struct idetape_bh *b_reqnext;
138 char *b_data;
139};
140
141/* Tape door status */ 134/* Tape door status */
142#define DOOR_UNLOCKED 0 135#define DOOR_UNLOCKED 0
143#define DOOR_LOCKED 1 136#define DOOR_LOCKED 1
@@ -219,18 +212,12 @@ typedef struct ide_tape_obj {
219 212
220 /* Data buffer size chosen based on the tape's recommendation */ 213 /* Data buffer size chosen based on the tape's recommendation */
221 int buffer_size; 214 int buffer_size;
222 /* merge buffer */ 215 /* Staging buffer of buffer_size bytes */
223 struct idetape_bh *merge_bh; 216 void *buf;
224 /* size of the merge buffer */ 217 /* The read/write cursor */
225 int merge_bh_size; 218 void *cur;
226 /* pointer to current buffer head within the merge buffer */ 219 /* The number of valid bytes in buf */
227 struct idetape_bh *bh; 220 size_t valid;
228 char *b_data;
229 int b_count;
230
231 int pages_per_buffer;
232 /* Wasted space in each stage */
233 int excess_bh_size;
234 221
235 /* Measures average tape speed */ 222 /* Measures average tape speed */
236 unsigned long avg_time; 223 unsigned long avg_time;
@@ -253,18 +240,27 @@ static struct class *idetape_sysfs_class;
253 240
254static void ide_tape_release(struct device *); 241static void ide_tape_release(struct device *);
255 242
256static struct ide_tape_obj *ide_tape_get(struct gendisk *disk) 243static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
244
245static struct ide_tape_obj *ide_tape_get(struct gendisk *disk, bool cdev,
246 unsigned int i)
257{ 247{
258 struct ide_tape_obj *tape = NULL; 248 struct ide_tape_obj *tape = NULL;
259 249
260 mutex_lock(&idetape_ref_mutex); 250 mutex_lock(&idetape_ref_mutex);
261 tape = ide_drv_g(disk, ide_tape_obj); 251
252 if (cdev)
253 tape = idetape_devs[i];
254 else
255 tape = ide_drv_g(disk, ide_tape_obj);
256
262 if (tape) { 257 if (tape) {
263 if (ide_device_get(tape->drive)) 258 if (ide_device_get(tape->drive))
264 tape = NULL; 259 tape = NULL;
265 else 260 else
266 get_device(&tape->dev); 261 get_device(&tape->dev);
267 } 262 }
263
268 mutex_unlock(&idetape_ref_mutex); 264 mutex_unlock(&idetape_ref_mutex);
269 return tape; 265 return tape;
270} 266}
@@ -280,102 +276,6 @@ static void ide_tape_put(struct ide_tape_obj *tape)
280} 276}
281 277
282/* 278/*
283 * The variables below are used for the character device interface. Additional
284 * state variables are defined in our ide_drive_t structure.
285 */
286static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
287
288static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
289{
290 struct ide_tape_obj *tape = NULL;
291
292 mutex_lock(&idetape_ref_mutex);
293 tape = idetape_devs[i];
294 if (tape)
295 get_device(&tape->dev);
296 mutex_unlock(&idetape_ref_mutex);
297 return tape;
298}
299
300static int idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
301 unsigned int bcount)
302{
303 struct idetape_bh *bh = pc->bh;
304 int count;
305
306 while (bcount) {
307 if (bh == NULL)
308 break;
309 count = min(
310 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
311 bcount);
312 drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data +
313 atomic_read(&bh->b_count), count);
314 bcount -= count;
315 atomic_add(count, &bh->b_count);
316 if (atomic_read(&bh->b_count) == bh->b_size) {
317 bh = bh->b_reqnext;
318 if (bh)
319 atomic_set(&bh->b_count, 0);
320 }
321 }
322
323 pc->bh = bh;
324
325 return bcount;
326}
327
328static int idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
329 unsigned int bcount)
330{
331 struct idetape_bh *bh = pc->bh;
332 int count;
333
334 while (bcount) {
335 if (bh == NULL)
336 break;
337 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
338 drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count);
339 bcount -= count;
340 pc->b_data += count;
341 pc->b_count -= count;
342 if (!pc->b_count) {
343 bh = bh->b_reqnext;
344 pc->bh = bh;
345 if (bh) {
346 pc->b_data = bh->b_data;
347 pc->b_count = atomic_read(&bh->b_count);
348 }
349 }
350 }
351
352 return bcount;
353}
354
355static void idetape_update_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc)
356{
357 struct idetape_bh *bh = pc->bh;
358 int count;
359 unsigned int bcount = pc->xferred;
360
361 if (pc->flags & PC_FLAG_WRITING)
362 return;
363 while (bcount) {
364 if (bh == NULL) {
365 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
366 __func__);
367 return;
368 }
369 count = min((unsigned int)bh->b_size, (unsigned int)bcount);
370 atomic_set(&bh->b_count, count);
371 if (atomic_read(&bh->b_count) == bh->b_size)
372 bh = bh->b_reqnext;
373 bcount -= count;
374 }
375 pc->bh = bh;
376}
377
378/*
379 * called on each failed packet command retry to analyze the request sense. We 279 * called on each failed packet command retry to analyze the request sense. We
380 * currently do not utilize this information. 280 * currently do not utilize this information.
381 */ 281 */
@@ -392,12 +292,10 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
392 pc->c[0], tape->sense_key, tape->asc, tape->ascq); 292 pc->c[0], tape->sense_key, tape->asc, tape->ascq);
393 293
394 /* Correct pc->xferred by asking the tape. */ 294 /* Correct pc->xferred by asking the tape. */
395 if (pc->flags & PC_FLAG_DMA_ERROR) { 295 if (pc->flags & PC_FLAG_DMA_ERROR)
396 pc->xferred = pc->req_xfer - 296 pc->xferred = pc->req_xfer -
397 tape->blk_size * 297 tape->blk_size *
398 get_unaligned_be32(&sense[3]); 298 get_unaligned_be32(&sense[3]);
399 idetape_update_buffers(drive, pc);
400 }
401 299
402 /* 300 /*
403 * If error was the result of a zero-length read or write command, 301 * If error was the result of a zero-length read or write command,
@@ -436,29 +334,6 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
436 } 334 }
437} 335}
438 336
439/* Free data buffers completely. */
440static void ide_tape_kfree_buffer(idetape_tape_t *tape)
441{
442 struct idetape_bh *prev_bh, *bh = tape->merge_bh;
443
444 while (bh) {
445 u32 size = bh->b_size;
446
447 while (size) {
448 unsigned int order = fls(size >> PAGE_SHIFT)-1;
449
450 if (bh->b_data)
451 free_pages((unsigned long)bh->b_data, order);
452
453 size &= (order-1);
454 bh->b_data += (1 << order) * PAGE_SIZE;
455 }
456 prev_bh = bh;
457 bh = bh->b_reqnext;
458 kfree(prev_bh);
459 }
460}
461
462static void ide_tape_handle_dsc(ide_drive_t *); 337static void ide_tape_handle_dsc(ide_drive_t *);
463 338
464static int ide_tape_callback(ide_drive_t *drive, int dsc) 339static int ide_tape_callback(ide_drive_t *drive, int dsc)
@@ -496,7 +371,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
496 } 371 }
497 372
498 tape->first_frame += blocks; 373 tape->first_frame += blocks;
499 rq->current_nr_sectors -= blocks; 374 rq->resid_len -= blocks * tape->blk_size;
500 375
501 if (pc->error) { 376 if (pc->error) {
502 uptodate = 0; 377 uptodate = 0;
@@ -513,7 +388,8 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
513 if (readpos[0] & 0x4) { 388 if (readpos[0] & 0x4) {
514 printk(KERN_INFO "ide-tape: Block location is unknown" 389 printk(KERN_INFO "ide-tape: Block location is unknown"
515 "to the tape\n"); 390 "to the tape\n");
516 clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags); 391 clear_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
392 &drive->atapi_flags);
517 uptodate = 0; 393 uptodate = 0;
518 err = IDE_DRV_ERROR_GENERAL; 394 err = IDE_DRV_ERROR_GENERAL;
519 } else { 395 } else {
@@ -522,7 +398,8 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
522 398
523 tape->partition = readpos[1]; 399 tape->partition = readpos[1];
524 tape->first_frame = be32_to_cpup((__be32 *)&readpos[4]); 400 tape->first_frame = be32_to_cpup((__be32 *)&readpos[4]);
525 set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags); 401 set_bit(ilog2(IDE_AFLAG_ADDRESS_VALID),
402 &drive->atapi_flags);
526 } 403 }
527 } 404 }
528 405
@@ -558,19 +435,6 @@ static void ide_tape_handle_dsc(ide_drive_t *drive)
558 idetape_postpone_request(drive); 435 idetape_postpone_request(drive);
559} 436}
560 437
561static int ide_tape_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
562 unsigned int bcount, int write)
563{
564 unsigned int bleft;
565
566 if (write)
567 bleft = idetape_output_buffers(drive, pc, bcount);
568 else
569 bleft = idetape_input_buffers(drive, pc, bcount);
570
571 return bcount - bleft;
572}
573
574/* 438/*
575 * Packet Command Interface 439 * Packet Command Interface
576 * 440 *
@@ -622,6 +486,8 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
622 486
623 if (pc->retries > IDETAPE_MAX_PC_RETRIES || 487 if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
624 (pc->flags & PC_FLAG_ABORT)) { 488 (pc->flags & PC_FLAG_ABORT)) {
489 unsigned int done = blk_rq_bytes(drive->hwif->rq);
490
625 /* 491 /*
626 * We will "abort" retrying a packet command in case legitimate 492 * We will "abort" retrying a packet command in case legitimate
627 * error code was received (crossing a filemark, or end of the 493 * error code was received (crossing a filemark, or end of the
@@ -641,8 +507,10 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
641 /* Giving up */ 507 /* Giving up */
642 pc->error = IDE_DRV_ERROR_GENERAL; 508 pc->error = IDE_DRV_ERROR_GENERAL;
643 } 509 }
510
644 drive->failed_pc = NULL; 511 drive->failed_pc = NULL;
645 drive->pc_callback(drive, 0); 512 drive->pc_callback(drive, 0);
513 ide_complete_rq(drive, -EIO, done);
646 return ide_stopped; 514 return ide_stopped;
647 } 515 }
648 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]); 516 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
@@ -695,7 +563,7 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
695 printk(KERN_ERR "ide-tape: %s: I/O error, ", 563 printk(KERN_ERR "ide-tape: %s: I/O error, ",
696 tape->name); 564 tape->name);
697 /* Retry operation */ 565 /* Retry operation */
698 ide_retry_pc(drive, tape->disk); 566 ide_retry_pc(drive);
699 return ide_stopped; 567 return ide_stopped;
700 } 568 }
701 pc->error = 0; 569 pc->error = 0;
@@ -711,27 +579,22 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
711 struct ide_atapi_pc *pc, struct request *rq, 579 struct ide_atapi_pc *pc, struct request *rq,
712 u8 opcode) 580 u8 opcode)
713{ 581{
714 struct idetape_bh *bh = (struct idetape_bh *)rq->special; 582 unsigned int length = blk_rq_sectors(rq);
715 unsigned int length = rq->current_nr_sectors;
716 583
717 ide_init_pc(pc); 584 ide_init_pc(pc);
718 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]); 585 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
719 pc->c[1] = 1; 586 pc->c[1] = 1;
720 pc->bh = bh;
721 pc->buf = NULL; 587 pc->buf = NULL;
722 pc->buf_size = length * tape->blk_size; 588 pc->buf_size = length * tape->blk_size;
723 pc->req_xfer = pc->buf_size; 589 pc->req_xfer = pc->buf_size;
724 if (pc->req_xfer == tape->buffer_size) 590 if (pc->req_xfer == tape->buffer_size)
725 pc->flags |= PC_FLAG_DMA_OK; 591 pc->flags |= PC_FLAG_DMA_OK;
726 592
727 if (opcode == READ_6) { 593 if (opcode == READ_6)
728 pc->c[0] = READ_6; 594 pc->c[0] = READ_6;
729 atomic_set(&bh->b_count, 0); 595 else if (opcode == WRITE_6) {
730 } else if (opcode == WRITE_6) {
731 pc->c[0] = WRITE_6; 596 pc->c[0] = WRITE_6;
732 pc->flags |= PC_FLAG_WRITING; 597 pc->flags |= PC_FLAG_WRITING;
733 pc->b_data = bh->b_data;
734 pc->b_count = atomic_read(&bh->b_count);
735 } 598 }
736 599
737 memcpy(rq->cmd, pc->c, 12); 600 memcpy(rq->cmd, pc->c, 12);
@@ -747,12 +610,10 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
747 struct ide_cmd cmd; 610 struct ide_cmd cmd;
748 u8 stat; 611 u8 stat;
749 612
750 debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %lu," 613 debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %u\n"
751 " current_nr_sectors: %u\n", 614 (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq));
752 (unsigned long long)rq->sector, rq->nr_sectors,
753 rq->current_nr_sectors);
754 615
755 if (!blk_special_request(rq)) { 616 if (!(blk_special_request(rq) || blk_sense_request(rq))) {
756 /* We do not support buffer cache originated requests. */ 617 /* We do not support buffer cache originated requests. */
757 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in " 618 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
758 "request queue (%d)\n", drive->name, rq->cmd_type); 619 "request queue (%d)\n", drive->name, rq->cmd_type);
@@ -788,15 +649,15 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
788 649
789 if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 && 650 if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 &&
790 (rq->cmd[13] & REQ_IDETAPE_PC2) == 0) 651 (rq->cmd[13] & REQ_IDETAPE_PC2) == 0)
791 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); 652 drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
792 653
793 if (drive->dev_flags & IDE_DFLAG_POST_RESET) { 654 if (drive->dev_flags & IDE_DFLAG_POST_RESET) {
794 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); 655 drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
795 drive->dev_flags &= ~IDE_DFLAG_POST_RESET; 656 drive->dev_flags &= ~IDE_DFLAG_POST_RESET;
796 } 657 }
797 658
798 if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) && 659 if (!(drive->atapi_flags & IDE_AFLAG_IGNORE_DSC) &&
799 (stat & ATA_DSC) == 0) { 660 !(stat & ATA_DSC)) {
800 if (postponed_rq == NULL) { 661 if (postponed_rq == NULL) {
801 tape->dsc_polling_start = jiffies; 662 tape->dsc_polling_start = jiffies;
802 tape->dsc_poll_freq = tape->best_dsc_rw_freq; 663 tape->dsc_poll_freq = tape->best_dsc_rw_freq;
@@ -816,7 +677,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
816 tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW; 677 tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
817 idetape_postpone_request(drive); 678 idetape_postpone_request(drive);
818 return ide_stopped; 679 return ide_stopped;
819 } 680 } else
681 drive->atapi_flags &= ~IDE_AFLAG_IGNORE_DSC;
682
820 if (rq->cmd[13] & REQ_IDETAPE_READ) { 683 if (rq->cmd[13] & REQ_IDETAPE_READ) {
821 pc = &tape->queued_pc; 684 pc = &tape->queued_pc;
822 ide_tape_create_rw_cmd(tape, pc, rq, READ_6); 685 ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
@@ -828,7 +691,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
828 goto out; 691 goto out;
829 } 692 }
830 if (rq->cmd[13] & REQ_IDETAPE_PC1) { 693 if (rq->cmd[13] & REQ_IDETAPE_PC1) {
831 pc = (struct ide_atapi_pc *) rq->buffer; 694 pc = (struct ide_atapi_pc *)rq->special;
832 rq->cmd[13] &= ~(REQ_IDETAPE_PC1); 695 rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
833 rq->cmd[13] |= REQ_IDETAPE_PC2; 696 rq->cmd[13] |= REQ_IDETAPE_PC2;
834 goto out; 697 goto out;
@@ -840,6 +703,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
840 BUG(); 703 BUG();
841 704
842out: 705out:
706 /* prepare sense request for this command */
707 ide_prep_sense(drive, rq);
708
843 memset(&cmd, 0, sizeof(cmd)); 709 memset(&cmd, 0, sizeof(cmd));
844 710
845 if (rq_data_dir(rq)) 711 if (rq_data_dir(rq))
@@ -847,167 +713,10 @@ out:
847 713
848 cmd.rq = rq; 714 cmd.rq = rq;
849 715
850 return ide_tape_issue_pc(drive, &cmd, pc); 716 ide_init_sg_cmd(&cmd, pc->req_xfer);
851} 717 ide_map_sg(drive, &cmd);
852
853/*
854 * The function below uses __get_free_pages to allocate a data buffer of size
855 * tape->buffer_size (or a bit more). We attempt to combine sequential pages as
856 * much as possible.
857 *
858 * It returns a pointer to the newly allocated buffer, or NULL in case of
859 * failure.
860 */
861static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape,
862 int full, int clear)
863{
864 struct idetape_bh *prev_bh, *bh, *merge_bh;
865 int pages = tape->pages_per_buffer;
866 unsigned int order, b_allocd;
867 char *b_data = NULL;
868
869 merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
870 bh = merge_bh;
871 if (bh == NULL)
872 goto abort;
873
874 order = fls(pages) - 1;
875 bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
876 if (!bh->b_data)
877 goto abort;
878 b_allocd = (1 << order) * PAGE_SIZE;
879 pages &= (order-1);
880
881 if (clear)
882 memset(bh->b_data, 0, b_allocd);
883 bh->b_reqnext = NULL;
884 bh->b_size = b_allocd;
885 atomic_set(&bh->b_count, full ? bh->b_size : 0);
886
887 while (pages) {
888 order = fls(pages) - 1;
889 b_data = (char *) __get_free_pages(GFP_KERNEL, order);
890 if (!b_data)
891 goto abort;
892 b_allocd = (1 << order) * PAGE_SIZE;
893
894 if (clear)
895 memset(b_data, 0, b_allocd);
896
897 /* newly allocated page frames below buffer header or ...*/
898 if (bh->b_data == b_data + b_allocd) {
899 bh->b_size += b_allocd;
900 bh->b_data -= b_allocd;
901 if (full)
902 atomic_add(b_allocd, &bh->b_count);
903 continue;
904 }
905 /* they are above the header */
906 if (b_data == bh->b_data + bh->b_size) {
907 bh->b_size += b_allocd;
908 if (full)
909 atomic_add(b_allocd, &bh->b_count);
910 continue;
911 }
912 prev_bh = bh;
913 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
914 if (!bh) {
915 free_pages((unsigned long) b_data, order);
916 goto abort;
917 }
918 bh->b_reqnext = NULL;
919 bh->b_data = b_data;
920 bh->b_size = b_allocd;
921 atomic_set(&bh->b_count, full ? bh->b_size : 0);
922 prev_bh->b_reqnext = bh;
923
924 pages &= (order-1);
925 }
926
927 bh->b_size -= tape->excess_bh_size;
928 if (full)
929 atomic_sub(tape->excess_bh_size, &bh->b_count);
930 return merge_bh;
931abort:
932 ide_tape_kfree_buffer(tape);
933 return NULL;
934}
935
936static int idetape_copy_stage_from_user(idetape_tape_t *tape,
937 const char __user *buf, int n)
938{
939 struct idetape_bh *bh = tape->bh;
940 int count;
941 int ret = 0;
942
943 while (n) {
944 if (bh == NULL) {
945 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
946 __func__);
947 return 1;
948 }
949 count = min((unsigned int)
950 (bh->b_size - atomic_read(&bh->b_count)),
951 (unsigned int)n);
952 if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
953 count))
954 ret = 1;
955 n -= count;
956 atomic_add(count, &bh->b_count);
957 buf += count;
958 if (atomic_read(&bh->b_count) == bh->b_size) {
959 bh = bh->b_reqnext;
960 if (bh)
961 atomic_set(&bh->b_count, 0);
962 }
963 }
964 tape->bh = bh;
965 return ret;
966}
967 718
968static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf, 719 return ide_tape_issue_pc(drive, &cmd, pc);
969 int n)
970{
971 struct idetape_bh *bh = tape->bh;
972 int count;
973 int ret = 0;
974
975 while (n) {
976 if (bh == NULL) {
977 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
978 __func__);
979 return 1;
980 }
981 count = min(tape->b_count, n);
982 if (copy_to_user(buf, tape->b_data, count))
983 ret = 1;
984 n -= count;
985 tape->b_data += count;
986 tape->b_count -= count;
987 buf += count;
988 if (!tape->b_count) {
989 bh = bh->b_reqnext;
990 tape->bh = bh;
991 if (bh) {
992 tape->b_data = bh->b_data;
993 tape->b_count = atomic_read(&bh->b_count);
994 }
995 }
996 }
997 return ret;
998}
999
1000static void idetape_init_merge_buffer(idetape_tape_t *tape)
1001{
1002 struct idetape_bh *bh = tape->merge_bh;
1003 tape->bh = tape->merge_bh;
1004
1005 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
1006 atomic_set(&bh->b_count, 0);
1007 else {
1008 tape->b_data = bh->b_data;
1009 tape->b_count = atomic_read(&bh->b_count);
1010 }
1011} 720}
1012 721
1013/* 722/*
@@ -1030,7 +739,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1030 int load_attempted = 0; 739 int load_attempted = 0;
1031 740
1032 /* Wait for the tape to become ready */ 741 /* Wait for the tape to become ready */
1033 set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags); 742 set_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), &drive->atapi_flags);
1034 timeout += jiffies; 743 timeout += jiffies;
1035 while (time_before(jiffies, timeout)) { 744 while (time_before(jiffies, timeout)) {
1036 if (ide_do_test_unit_ready(drive, disk) == 0) 745 if (ide_do_test_unit_ready(drive, disk) == 0)
@@ -1106,11 +815,11 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
1106 if (tape->chrdev_dir != IDETAPE_DIR_READ) 815 if (tape->chrdev_dir != IDETAPE_DIR_READ)
1107 return; 816 return;
1108 817
1109 clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags); 818 clear_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags);
1110 tape->merge_bh_size = 0; 819 tape->valid = 0;
1111 if (tape->merge_bh != NULL) { 820 if (tape->buf != NULL) {
1112 ide_tape_kfree_buffer(tape); 821 kfree(tape->buf);
1113 tape->merge_bh = NULL; 822 tape->buf = NULL;
1114 } 823 }
1115 824
1116 tape->chrdev_dir = IDETAPE_DIR_NONE; 825 tape->chrdev_dir = IDETAPE_DIR_NONE;
@@ -1164,36 +873,44 @@ static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
1164 * Generate a read/write request for the block device interface and wait for it 873 * Generate a read/write request for the block device interface and wait for it
1165 * to be serviced. 874 * to be serviced.
1166 */ 875 */
1167static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks, 876static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
1168 struct idetape_bh *bh)
1169{ 877{
1170 idetape_tape_t *tape = drive->driver_data; 878 idetape_tape_t *tape = drive->driver_data;
1171 struct request *rq; 879 struct request *rq;
1172 int ret, errors; 880 int ret;
1173 881
1174 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd); 882 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
883 BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
884 BUG_ON(size < 0 || size % tape->blk_size);
1175 885
1176 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 886 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
1177 rq->cmd_type = REQ_TYPE_SPECIAL; 887 rq->cmd_type = REQ_TYPE_SPECIAL;
1178 rq->cmd[13] = cmd; 888 rq->cmd[13] = cmd;
1179 rq->rq_disk = tape->disk; 889 rq->rq_disk = tape->disk;
1180 rq->special = (void *)bh; 890 rq->__sector = tape->first_frame;
1181 rq->sector = tape->first_frame;
1182 rq->nr_sectors = blocks;
1183 rq->current_nr_sectors = blocks;
1184 blk_execute_rq(drive->queue, tape->disk, rq, 0);
1185 891
1186 errors = rq->errors; 892 if (size) {
1187 ret = tape->blk_size * (blocks - rq->current_nr_sectors); 893 ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
1188 blk_put_request(rq); 894 __GFP_WAIT);
895 if (ret)
896 goto out_put;
897 }
1189 898
1190 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0) 899 blk_execute_rq(drive->queue, tape->disk, rq, 0);
1191 return 0;
1192 900
1193 if (tape->merge_bh) 901 /* calculate the number of transferred bytes and update buffer state */
1194 idetape_init_merge_buffer(tape); 902 size -= rq->resid_len;
1195 if (errors == IDE_DRV_ERROR_GENERAL) 903 tape->cur = tape->buf;
1196 return -EIO; 904 if (cmd == REQ_IDETAPE_READ)
905 tape->valid = size;
906 else
907 tape->valid = 0;
908
909 ret = size;
910 if (rq->errors == IDE_DRV_ERROR_GENERAL)
911 ret = -EIO;
912out_put:
913 blk_put_request(rq);
1197 return ret; 914 return ret;
1198} 915}
1199 916
@@ -1230,153 +947,87 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
1230 pc->flags |= PC_FLAG_WAIT_FOR_DSC; 947 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1231} 948}
1232 949
1233/* Queue up a character device originated write request. */
1234static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
1235{
1236 idetape_tape_t *tape = drive->driver_data;
1237
1238 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
1239
1240 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
1241 blocks, tape->merge_bh);
1242}
1243
1244static void ide_tape_flush_merge_buffer(ide_drive_t *drive) 950static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
1245{ 951{
1246 idetape_tape_t *tape = drive->driver_data; 952 idetape_tape_t *tape = drive->driver_data;
1247 int blocks, min;
1248 struct idetape_bh *bh;
1249 953
1250 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { 954 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
1251 printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer" 955 printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
1252 " but we are not writing.\n"); 956 " but we are not writing.\n");
1253 return; 957 return;
1254 } 958 }
1255 if (tape->merge_bh_size > tape->buffer_size) { 959 if (tape->buf) {
1256 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n"); 960 size_t aligned = roundup(tape->valid, tape->blk_size);
1257 tape->merge_bh_size = tape->buffer_size; 961
1258 } 962 memset(tape->cur, 0, aligned - tape->valid);
1259 if (tape->merge_bh_size) { 963 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, aligned);
1260 blocks = tape->merge_bh_size / tape->blk_size; 964 kfree(tape->buf);
1261 if (tape->merge_bh_size % tape->blk_size) { 965 tape->buf = NULL;
1262 unsigned int i;
1263
1264 blocks++;
1265 i = tape->blk_size - tape->merge_bh_size %
1266 tape->blk_size;
1267 bh = tape->bh->b_reqnext;
1268 while (bh) {
1269 atomic_set(&bh->b_count, 0);
1270 bh = bh->b_reqnext;
1271 }
1272 bh = tape->bh;
1273 while (i) {
1274 if (bh == NULL) {
1275 printk(KERN_INFO "ide-tape: bug,"
1276 " bh NULL\n");
1277 break;
1278 }
1279 min = min(i, (unsigned int)(bh->b_size -
1280 atomic_read(&bh->b_count)));
1281 memset(bh->b_data + atomic_read(&bh->b_count),
1282 0, min);
1283 atomic_add(min, &bh->b_count);
1284 i -= min;
1285 bh = bh->b_reqnext;
1286 }
1287 }
1288 (void) idetape_add_chrdev_write_request(drive, blocks);
1289 tape->merge_bh_size = 0;
1290 }
1291 if (tape->merge_bh != NULL) {
1292 ide_tape_kfree_buffer(tape);
1293 tape->merge_bh = NULL;
1294 } 966 }
1295 tape->chrdev_dir = IDETAPE_DIR_NONE; 967 tape->chrdev_dir = IDETAPE_DIR_NONE;
1296} 968}
1297 969
1298static int idetape_init_read(ide_drive_t *drive) 970static int idetape_init_rw(ide_drive_t *drive, int dir)
1299{ 971{
1300 idetape_tape_t *tape = drive->driver_data; 972 idetape_tape_t *tape = drive->driver_data;
1301 int bytes_read; 973 int rc;
1302
1303 /* Initialize read operation */
1304 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
1305 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
1306 ide_tape_flush_merge_buffer(drive);
1307 idetape_flush_tape_buffers(drive);
1308 }
1309 if (tape->merge_bh || tape->merge_bh_size) {
1310 printk(KERN_ERR "ide-tape: merge_bh_size should be"
1311 " 0 now\n");
1312 tape->merge_bh_size = 0;
1313 }
1314 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
1315 if (!tape->merge_bh)
1316 return -ENOMEM;
1317 tape->chrdev_dir = IDETAPE_DIR_READ;
1318 974
1319 /* 975 BUG_ON(dir != IDETAPE_DIR_READ && dir != IDETAPE_DIR_WRITE);
1320 * Issue a read 0 command to ensure that DSC handshake is
1321 * switched from completion mode to buffer available mode.
1322 * No point in issuing this if DSC overlap isn't supported, some
1323 * drives (Seagate STT3401A) will return an error.
1324 */
1325 if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
1326 bytes_read = idetape_queue_rw_tail(drive,
1327 REQ_IDETAPE_READ, 0,
1328 tape->merge_bh);
1329 if (bytes_read < 0) {
1330 ide_tape_kfree_buffer(tape);
1331 tape->merge_bh = NULL;
1332 tape->chrdev_dir = IDETAPE_DIR_NONE;
1333 return bytes_read;
1334 }
1335 }
1336 }
1337 976
1338 return 0; 977 if (tape->chrdev_dir == dir)
1339} 978 return 0;
1340 979
1341/* called from idetape_chrdev_read() to service a chrdev read request. */ 980 if (tape->chrdev_dir == IDETAPE_DIR_READ)
1342static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks) 981 ide_tape_discard_merge_buffer(drive, 1);
1343{ 982 else if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
1344 idetape_tape_t *tape = drive->driver_data; 983 ide_tape_flush_merge_buffer(drive);
984 idetape_flush_tape_buffers(drive);
985 }
1345 986
1346 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); 987 if (tape->buf || tape->valid) {
988 printk(KERN_ERR "ide-tape: valid should be 0 now\n");
989 tape->valid = 0;
990 }
1347 991
1348 /* If we are at a filemark, return a read length of 0 */ 992 tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
1349 if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) 993 if (!tape->buf)
1350 return 0; 994 return -ENOMEM;
995 tape->chrdev_dir = dir;
996 tape->cur = tape->buf;
1351 997
1352 idetape_init_read(drive); 998 /*
999 * Issue a 0 rw command to ensure that DSC handshake is
1000 * switched from completion mode to buffer available mode. No
1001 * point in issuing this if DSC overlap isn't supported, some
1002 * drives (Seagate STT3401A) will return an error.
1003 */
1004 if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) {
1005 int cmd = dir == IDETAPE_DIR_READ ? REQ_IDETAPE_READ
1006 : REQ_IDETAPE_WRITE;
1007
1008 rc = idetape_queue_rw_tail(drive, cmd, 0);
1009 if (rc < 0) {
1010 kfree(tape->buf);
1011 tape->buf = NULL;
1012 tape->chrdev_dir = IDETAPE_DIR_NONE;
1013 return rc;
1014 }
1015 }
1353 1016
1354 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks, 1017 return 0;
1355 tape->merge_bh);
1356} 1018}
1357 1019
1358static void idetape_pad_zeros(ide_drive_t *drive, int bcount) 1020static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
1359{ 1021{
1360 idetape_tape_t *tape = drive->driver_data; 1022 idetape_tape_t *tape = drive->driver_data;
1361 struct idetape_bh *bh; 1023
1362 int blocks; 1024 memset(tape->buf, 0, tape->buffer_size);
1363 1025
1364 while (bcount) { 1026 while (bcount) {
1365 unsigned int count; 1027 unsigned int count = min(tape->buffer_size, bcount);
1366 1028
1367 bh = tape->merge_bh; 1029 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, count);
1368 count = min(tape->buffer_size, bcount);
1369 bcount -= count; 1030 bcount -= count;
1370 blocks = count / tape->blk_size;
1371 while (count) {
1372 atomic_set(&bh->b_count,
1373 min(count, (unsigned int)bh->b_size));
1374 memset(bh->b_data, 0, atomic_read(&bh->b_count));
1375 count -= atomic_read(&bh->b_count);
1376 bh = bh->b_reqnext;
1377 }
1378 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
1379 tape->merge_bh);
1380 } 1031 }
1381} 1032}
1382 1033
@@ -1456,8 +1107,9 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
1456 } 1107 }
1457 1108
1458 if (tape->chrdev_dir == IDETAPE_DIR_READ) { 1109 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
1459 tape->merge_bh_size = 0; 1110 tape->valid = 0;
1460 if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) 1111 if (test_and_clear_bit(ilog2(IDE_AFLAG_FILEMARK),
1112 &drive->atapi_flags))
1461 ++count; 1113 ++count;
1462 ide_tape_discard_merge_buffer(drive, 0); 1114 ide_tape_discard_merge_buffer(drive, 0);
1463 } 1115 }
@@ -1505,61 +1157,56 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1505{ 1157{
1506 struct ide_tape_obj *tape = file->private_data; 1158 struct ide_tape_obj *tape = file->private_data;
1507 ide_drive_t *drive = tape->drive; 1159 ide_drive_t *drive = tape->drive;
1508 ssize_t bytes_read, temp, actually_read = 0, rc; 1160 size_t done = 0;
1509 ssize_t ret = 0; 1161 ssize_t ret = 0;
1510 u16 ctl = *(u16 *)&tape->caps[12]; 1162 int rc;
1511 1163
1512 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); 1164 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
1513 1165
1514 if (tape->chrdev_dir != IDETAPE_DIR_READ) { 1166 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
1515 if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags)) 1167 if (test_bit(ilog2(IDE_AFLAG_DETECT_BS), &drive->atapi_flags))
1516 if (count > tape->blk_size && 1168 if (count > tape->blk_size &&
1517 (count % tape->blk_size) == 0) 1169 (count % tape->blk_size) == 0)
1518 tape->user_bs_factor = count / tape->blk_size; 1170 tape->user_bs_factor = count / tape->blk_size;
1519 } 1171 }
1520 rc = idetape_init_read(drive); 1172
1173 rc = idetape_init_rw(drive, IDETAPE_DIR_READ);
1521 if (rc < 0) 1174 if (rc < 0)
1522 return rc; 1175 return rc;
1523 if (count == 0) 1176
1524 return (0); 1177 while (done < count) {
1525 if (tape->merge_bh_size) { 1178 size_t todo;
1526 actually_read = min((unsigned int)(tape->merge_bh_size), 1179
1527 (unsigned int)count); 1180 /* refill if staging buffer is empty */
1528 if (idetape_copy_stage_to_user(tape, buf, actually_read)) 1181 if (!tape->valid) {
1529 ret = -EFAULT; 1182 /* If we are at a filemark, nothing more to read */
1530 buf += actually_read; 1183 if (test_bit(ilog2(IDE_AFLAG_FILEMARK),
1531 tape->merge_bh_size -= actually_read; 1184 &drive->atapi_flags))
1532 count -= actually_read; 1185 break;
1533 } 1186 /* read */
1534 while (count >= tape->buffer_size) { 1187 if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ,
1535 bytes_read = idetape_add_chrdev_read_request(drive, ctl); 1188 tape->buffer_size) <= 0)
1536 if (bytes_read <= 0) 1189 break;
1537 goto finish; 1190 }
1538 if (idetape_copy_stage_to_user(tape, buf, bytes_read)) 1191
1539 ret = -EFAULT; 1192 /* copy out */
1540 buf += bytes_read; 1193 todo = min_t(size_t, count - done, tape->valid);
1541 count -= bytes_read; 1194 if (copy_to_user(buf + done, tape->cur, todo))
1542 actually_read += bytes_read;
1543 }
1544 if (count) {
1545 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
1546 if (bytes_read <= 0)
1547 goto finish;
1548 temp = min((unsigned long)count, (unsigned long)bytes_read);
1549 if (idetape_copy_stage_to_user(tape, buf, temp))
1550 ret = -EFAULT; 1195 ret = -EFAULT;
1551 actually_read += temp; 1196
1552 tape->merge_bh_size = bytes_read-temp; 1197 tape->cur += todo;
1198 tape->valid -= todo;
1199 done += todo;
1553 } 1200 }
1554finish: 1201
1555 if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) { 1202 if (!done && test_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags)) {
1556 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name); 1203 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
1557 1204
1558 idetape_space_over_filemarks(drive, MTFSF, 1); 1205 idetape_space_over_filemarks(drive, MTFSF, 1);
1559 return 0; 1206 return 0;
1560 } 1207 }
1561 1208
1562 return ret ? ret : actually_read; 1209 return ret ? ret : done;
1563} 1210}
1564 1211
1565static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf, 1212static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
@@ -1567,9 +1214,9 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
1567{ 1214{
1568 struct ide_tape_obj *tape = file->private_data; 1215 struct ide_tape_obj *tape = file->private_data;
1569 ide_drive_t *drive = tape->drive; 1216 ide_drive_t *drive = tape->drive;
1570 ssize_t actually_written = 0; 1217 size_t done = 0;
1571 ssize_t ret = 0; 1218 ssize_t ret = 0;
1572 u16 ctl = *(u16 *)&tape->caps[12]; 1219 int rc;
1573 1220
1574 /* The drive is write protected. */ 1221 /* The drive is write protected. */
1575 if (tape->write_prot) 1222 if (tape->write_prot)
@@ -1578,80 +1225,31 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
1578 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); 1225 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
1579 1226
1580 /* Initialize write operation */ 1227 /* Initialize write operation */
1581 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { 1228 rc = idetape_init_rw(drive, IDETAPE_DIR_WRITE);
1582 if (tape->chrdev_dir == IDETAPE_DIR_READ) 1229 if (rc < 0)
1583 ide_tape_discard_merge_buffer(drive, 1); 1230 return rc;
1584 if (tape->merge_bh || tape->merge_bh_size) {
1585 printk(KERN_ERR "ide-tape: merge_bh_size "
1586 "should be 0 now\n");
1587 tape->merge_bh_size = 0;
1588 }
1589 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
1590 if (!tape->merge_bh)
1591 return -ENOMEM;
1592 tape->chrdev_dir = IDETAPE_DIR_WRITE;
1593 idetape_init_merge_buffer(tape);
1594 1231
1595 /* 1232 while (done < count) {
1596 * Issue a write 0 command to ensure that DSC handshake is 1233 size_t todo;
1597 * switched from completion mode to buffer available mode. No 1234
1598 * point in issuing this if DSC overlap isn't supported, some 1235 /* flush if staging buffer is full */
1599 * drives (Seagate STT3401A) will return an error. 1236 if (tape->valid == tape->buffer_size &&
1600 */ 1237 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
1601 if (drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) { 1238 tape->buffer_size) <= 0)
1602 ssize_t retval = idetape_queue_rw_tail(drive, 1239 return rc;
1603 REQ_IDETAPE_WRITE, 0, 1240
1604 tape->merge_bh); 1241 /* copy in */
1605 if (retval < 0) { 1242 todo = min_t(size_t, count - done,
1606 ide_tape_kfree_buffer(tape); 1243 tape->buffer_size - tape->valid);
1607 tape->merge_bh = NULL; 1244 if (copy_from_user(tape->cur, buf + done, todo))
1608 tape->chrdev_dir = IDETAPE_DIR_NONE;
1609 return retval;
1610 }
1611 }
1612 }
1613 if (count == 0)
1614 return (0);
1615 if (tape->merge_bh_size) {
1616 if (tape->merge_bh_size >= tape->buffer_size) {
1617 printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
1618 tape->merge_bh_size = 0;
1619 }
1620 actually_written = min((unsigned int)
1621 (tape->buffer_size - tape->merge_bh_size),
1622 (unsigned int)count);
1623 if (idetape_copy_stage_from_user(tape, buf, actually_written))
1624 ret = -EFAULT;
1625 buf += actually_written;
1626 tape->merge_bh_size += actually_written;
1627 count -= actually_written;
1628
1629 if (tape->merge_bh_size == tape->buffer_size) {
1630 ssize_t retval;
1631 tape->merge_bh_size = 0;
1632 retval = idetape_add_chrdev_write_request(drive, ctl);
1633 if (retval <= 0)
1634 return (retval);
1635 }
1636 }
1637 while (count >= tape->buffer_size) {
1638 ssize_t retval;
1639 if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size))
1640 ret = -EFAULT;
1641 buf += tape->buffer_size;
1642 count -= tape->buffer_size;
1643 retval = idetape_add_chrdev_write_request(drive, ctl);
1644 actually_written += tape->buffer_size;
1645 if (retval <= 0)
1646 return (retval);
1647 }
1648 if (count) {
1649 actually_written += count;
1650 if (idetape_copy_stage_from_user(tape, buf, count))
1651 ret = -EFAULT; 1245 ret = -EFAULT;
1652 tape->merge_bh_size += count; 1246
1247 tape->cur += todo;
1248 tape->valid += todo;
1249 done += todo;
1653 } 1250 }
1654 return ret ? ret : actually_written; 1251
1252 return ret ? ret : done;
1655} 1253}
1656 1254
1657static int idetape_write_filemark(ide_drive_t *drive) 1255static int idetape_write_filemark(ide_drive_t *drive)
@@ -1735,7 +1333,8 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
1735 ide_tape_discard_merge_buffer(drive, 0); 1333 ide_tape_discard_merge_buffer(drive, 0);
1736 retval = ide_do_start_stop(drive, disk, !IDETAPE_LU_LOAD_MASK); 1334 retval = ide_do_start_stop(drive, disk, !IDETAPE_LU_LOAD_MASK);
1737 if (!retval) 1335 if (!retval)
1738 clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags); 1336 clear_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
1337 &drive->atapi_flags);
1739 return retval; 1338 return retval;
1740 case MTNOP: 1339 case MTNOP:
1741 ide_tape_discard_merge_buffer(drive, 0); 1340 ide_tape_discard_merge_buffer(drive, 0);
@@ -1757,9 +1356,11 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
1757 mt_count % tape->blk_size) 1356 mt_count % tape->blk_size)
1758 return -EIO; 1357 return -EIO;
1759 tape->user_bs_factor = mt_count / tape->blk_size; 1358 tape->user_bs_factor = mt_count / tape->blk_size;
1760 clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags); 1359 clear_bit(ilog2(IDE_AFLAG_DETECT_BS),
1360 &drive->atapi_flags);
1761 } else 1361 } else
1762 set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags); 1362 set_bit(ilog2(IDE_AFLAG_DETECT_BS),
1363 &drive->atapi_flags);
1763 return 0; 1364 return 0;
1764 case MTSEEK: 1365 case MTSEEK:
1765 ide_tape_discard_merge_buffer(drive, 0); 1366 ide_tape_discard_merge_buffer(drive, 0);
@@ -1812,7 +1413,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
1812 idetape_flush_tape_buffers(drive); 1413 idetape_flush_tape_buffers(drive);
1813 } 1414 }
1814 if (cmd == MTIOCGET || cmd == MTIOCPOS) { 1415 if (cmd == MTIOCGET || cmd == MTIOCPOS) {
1815 block_offset = tape->merge_bh_size / 1416 block_offset = tape->valid /
1816 (tape->blk_size * tape->user_bs_factor); 1417 (tape->blk_size * tape->user_bs_factor);
1817 position = idetape_read_position(drive); 1418 position = idetape_read_position(drive);
1818 if (position < 0) 1419 if (position < 0)
@@ -1885,7 +1486,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
1885 return -ENXIO; 1486 return -ENXIO;
1886 1487
1887 lock_kernel(); 1488 lock_kernel();
1888 tape = ide_tape_chrdev_get(i); 1489 tape = ide_tape_get(NULL, true, i);
1889 if (!tape) { 1490 if (!tape) {
1890 unlock_kernel(); 1491 unlock_kernel();
1891 return -ENXIO; 1492 return -ENXIO;
@@ -1904,20 +1505,20 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
1904 1505
1905 filp->private_data = tape; 1506 filp->private_data = tape;
1906 1507
1907 if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) { 1508 if (test_and_set_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags)) {
1908 retval = -EBUSY; 1509 retval = -EBUSY;
1909 goto out_put_tape; 1510 goto out_put_tape;
1910 } 1511 }
1911 1512
1912 retval = idetape_wait_ready(drive, 60 * HZ); 1513 retval = idetape_wait_ready(drive, 60 * HZ);
1913 if (retval) { 1514 if (retval) {
1914 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); 1515 clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
1915 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name); 1516 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
1916 goto out_put_tape; 1517 goto out_put_tape;
1917 } 1518 }
1918 1519
1919 idetape_read_position(drive); 1520 idetape_read_position(drive);
1920 if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags)) 1521 if (!test_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), &drive->atapi_flags))
1921 (void)idetape_rewind_tape(drive); 1522 (void)idetape_rewind_tape(drive);
1922 1523
1923 /* Read block size and write protect status from drive. */ 1524 /* Read block size and write protect status from drive. */
@@ -1933,7 +1534,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
1933 if (tape->write_prot) { 1534 if (tape->write_prot) {
1934 if ((filp->f_flags & O_ACCMODE) == O_WRONLY || 1535 if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
1935 (filp->f_flags & O_ACCMODE) == O_RDWR) { 1536 (filp->f_flags & O_ACCMODE) == O_RDWR) {
1936 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); 1537 clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
1937 retval = -EROFS; 1538 retval = -EROFS;
1938 goto out_put_tape; 1539 goto out_put_tape;
1939 } 1540 }
@@ -1960,12 +1561,12 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
1960 idetape_tape_t *tape = drive->driver_data; 1561 idetape_tape_t *tape = drive->driver_data;
1961 1562
1962 ide_tape_flush_merge_buffer(drive); 1563 ide_tape_flush_merge_buffer(drive);
1963 tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0); 1564 tape->buf = kmalloc(tape->buffer_size, GFP_KERNEL);
1964 if (tape->merge_bh != NULL) { 1565 if (tape->buf != NULL) {
1965 idetape_pad_zeros(drive, tape->blk_size * 1566 idetape_pad_zeros(drive, tape->blk_size *
1966 (tape->user_bs_factor - 1)); 1567 (tape->user_bs_factor - 1));
1967 ide_tape_kfree_buffer(tape); 1568 kfree(tape->buf);
1968 tape->merge_bh = NULL; 1569 tape->buf = NULL;
1969 } 1570 }
1970 idetape_write_filemark(drive); 1571 idetape_write_filemark(drive);
1971 idetape_flush_tape_buffers(drive); 1572 idetape_flush_tape_buffers(drive);
@@ -1990,15 +1591,17 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
1990 ide_tape_discard_merge_buffer(drive, 1); 1591 ide_tape_discard_merge_buffer(drive, 1);
1991 } 1592 }
1992 1593
1993 if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags)) 1594 if (minor < 128 && test_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT),
1595 &drive->atapi_flags))
1994 (void) idetape_rewind_tape(drive); 1596 (void) idetape_rewind_tape(drive);
1597
1995 if (tape->chrdev_dir == IDETAPE_DIR_NONE) { 1598 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
1996 if (tape->door_locked == DOOR_LOCKED) { 1599 if (tape->door_locked == DOOR_LOCKED) {
1997 if (!ide_set_media_lock(drive, tape->disk, 0)) 1600 if (!ide_set_media_lock(drive, tape->disk, 0))
1998 tape->door_locked = DOOR_UNLOCKED; 1601 tape->door_locked = DOOR_UNLOCKED;
1999 } 1602 }
2000 } 1603 }
2001 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); 1604 clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags);
2002 ide_tape_put(tape); 1605 ide_tape_put(tape);
2003 unlock_kernel(); 1606 unlock_kernel();
2004 return 0; 1607 return 0;
@@ -2159,8 +1762,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2159 u16 *ctl = (u16 *)&tape->caps[12]; 1762 u16 *ctl = (u16 *)&tape->caps[12];
2160 1763
2161 drive->pc_callback = ide_tape_callback; 1764 drive->pc_callback = ide_tape_callback;
2162 drive->pc_update_buffers = idetape_update_buffers;
2163 drive->pc_io_buffers = ide_tape_io_buffers;
2164 1765
2165 drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP; 1766 drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
2166 1767
@@ -2191,11 +1792,6 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2191 tape->buffer_size = *ctl * tape->blk_size; 1792 tape->buffer_size = *ctl * tape->blk_size;
2192 } 1793 }
2193 buffer_size = tape->buffer_size; 1794 buffer_size = tape->buffer_size;
2194 tape->pages_per_buffer = buffer_size / PAGE_SIZE;
2195 if (buffer_size % PAGE_SIZE) {
2196 tape->pages_per_buffer++;
2197 tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE;
2198 }
2199 1795
2200 /* select the "best" DSC read/write polling freq */ 1796 /* select the "best" DSC read/write polling freq */
2201 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]); 1797 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
@@ -2238,7 +1834,7 @@ static void ide_tape_release(struct device *dev)
2238 ide_drive_t *drive = tape->drive; 1834 ide_drive_t *drive = tape->drive;
2239 struct gendisk *g = tape->disk; 1835 struct gendisk *g = tape->disk;
2240 1836
2241 BUG_ON(tape->merge_bh_size); 1837 BUG_ON(tape->valid);
2242 1838
2243 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP; 1839 drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
2244 drive->driver_data = NULL; 1840 drive->driver_data = NULL;
@@ -2311,7 +1907,7 @@ static const struct file_operations idetape_fops = {
2311 1907
2312static int idetape_open(struct block_device *bdev, fmode_t mode) 1908static int idetape_open(struct block_device *bdev, fmode_t mode)
2313{ 1909{
2314 struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk); 1910 struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk, false, 0);
2315 1911
2316 if (!tape) 1912 if (!tape)
2317 return -ENXIO; 1913 return -ENXIO;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 4aa6223c11b..75b85a8cd2d 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -98,7 +98,6 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
98 if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { 98 if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
99 ide_tf_dump(drive->name, cmd); 99 ide_tf_dump(drive->name, cmd);
100 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); 100 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
101 SELECT_MASK(drive, 0);
102 101
103 if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) { 102 if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) {
104 u8 data[2] = { cmd->tf.data, cmd->hob.data }; 103 u8 data[2] = { cmd->tf.data, cmd->hob.data };
@@ -166,7 +165,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
166 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { 165 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
167 if (custom && tf->command == ATA_CMD_SET_MULTI) { 166 if (custom && tf->command == ATA_CMD_SET_MULTI) {
168 drive->mult_req = drive->mult_count = 0; 167 drive->mult_req = drive->mult_count = 0;
169 drive->special.b.recalibrate = 1; 168 drive->special_flags |= IDE_SFLAG_RECALIBRATE;
170 (void)ide_dump_status(drive, __func__, stat); 169 (void)ide_dump_status(drive, __func__, stat);
171 return ide_stopped; 170 return ide_stopped;
172 } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) { 171 } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
@@ -385,7 +384,7 @@ out_end:
385 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) 384 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
386 ide_finish_cmd(drive, cmd, stat); 385 ide_finish_cmd(drive, cmd, stat);
387 else 386 else
388 ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9); 387 ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
389 return ide_stopped; 388 return ide_stopped;
390out_err: 389out_err:
391 ide_error_cmd(drive, cmd); 390 ide_error_cmd(drive, cmd);
@@ -424,7 +423,9 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
424 423
425 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 424 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
426 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 425 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
427 rq->buffer = buf; 426
427 if (cmd->tf_flags & IDE_TFLAG_WRITE)
428 rq->cmd_flags |= REQ_RW;
428 429
429 /* 430 /*
430 * (ks) We transfer currently only whole sectors. 431 * (ks) We transfer currently only whole sectors.
@@ -432,18 +433,20 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
432 * if we would find a solution to transfer any size. 433 * if we would find a solution to transfer any size.
433 * To support special commands like READ LONG. 434 * To support special commands like READ LONG.
434 */ 435 */
435 rq->hard_nr_sectors = rq->nr_sectors = nsect; 436 if (nsect) {
436 rq->hard_cur_sectors = rq->current_nr_sectors = nsect; 437 error = blk_rq_map_kern(drive->queue, rq, buf,
437 438 nsect * SECTOR_SIZE, __GFP_WAIT);
438 if (cmd->tf_flags & IDE_TFLAG_WRITE) 439 if (error)
439 rq->cmd_flags |= REQ_RW; 440 goto put_req;
441 }
440 442
441 rq->special = cmd; 443 rq->special = cmd;
442 cmd->rq = rq; 444 cmd->rq = rq;
443 445
444 error = blk_execute_rq(drive->queue, NULL, rq, 0); 446 error = blk_execute_rq(drive->queue, NULL, rq, 0);
445 blk_put_request(rq);
446 447
448put_req:
449 blk_put_request(rq);
447 return error; 450 return error;
448} 451}
449 452
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 92c9b90931e..16d056939f9 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -211,6 +211,11 @@ static unsigned int ide_noflush;
211module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0); 211module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
212MODULE_PARM_DESC(noflush, "disable flush requests for a device"); 212MODULE_PARM_DESC(noflush, "disable flush requests for a device");
213 213
214static unsigned int ide_nohpa;
215
216module_param_call(nohpa, ide_set_dev_param_mask, NULL, &ide_nohpa, 0);
217MODULE_PARM_DESC(nohpa, "disable Host Protected Area for a device");
218
214static unsigned int ide_noprobe; 219static unsigned int ide_noprobe;
215 220
216module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0); 221module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
@@ -281,6 +286,11 @@ static void ide_dev_apply_params(ide_drive_t *drive, u8 unit)
281 drive->name); 286 drive->name);
282 drive->dev_flags |= IDE_DFLAG_NOFLUSH; 287 drive->dev_flags |= IDE_DFLAG_NOFLUSH;
283 } 288 }
289 if (ide_nohpa & (1 << i)) {
290 printk(KERN_INFO "ide: disabling Host Protected Area for %s\n",
291 drive->name);
292 drive->dev_flags |= IDE_DFLAG_NOHPA;
293 }
284 if (ide_noprobe & (1 << i)) { 294 if (ide_noprobe & (1 << i)) {
285 printk(KERN_INFO "ide: skipping probe for %s\n", drive->name); 295 printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
286 drive->dev_flags |= IDE_DFLAG_NOPROBE; 296 drive->dev_flags |= IDE_DFLAG_NOPROBE;
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 051b4ab0f35..ee9b55ecc62 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -21,7 +21,7 @@
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/io.h> 22#include <linux/io.h>
23 23
24static void __devinit plat_ide_setup_ports(hw_regs_t *hw, 24static void __devinit plat_ide_setup_ports(struct ide_hw *hw,
25 void __iomem *base, 25 void __iomem *base,
26 void __iomem *ctrl, 26 void __iomem *ctrl,
27 struct pata_platform_info *pdata, 27 struct pata_platform_info *pdata,
@@ -40,12 +40,11 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
40 hw->io_ports.ctl_addr = (unsigned long)ctrl; 40 hw->io_ports.ctl_addr = (unsigned long)ctrl;
41 41
42 hw->irq = irq; 42 hw->irq = irq;
43
44 hw->chipset = ide_generic;
45} 43}
46 44
47static const struct ide_port_info platform_ide_port_info = { 45static const struct ide_port_info platform_ide_port_info = {
48 .host_flags = IDE_HFLAG_NO_DMA, 46 .host_flags = IDE_HFLAG_NO_DMA,
47 .chipset = ide_generic,
49}; 48};
50 49
51static int __devinit plat_ide_probe(struct platform_device *pdev) 50static int __devinit plat_ide_probe(struct platform_device *pdev)
@@ -55,7 +54,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
55 struct pata_platform_info *pdata; 54 struct pata_platform_info *pdata;
56 struct ide_host *host; 55 struct ide_host *host;
57 int ret = 0, mmio = 0; 56 int ret = 0, mmio = 0;
58 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 57 struct ide_hw hw, *hws[] = { &hw };
59 struct ide_port_info d = platform_ide_port_info; 58 struct ide_port_info d = platform_ide_port_info;
60 59
61 pdata = pdev->dev.platform_data; 60 pdata = pdev->dev.platform_data;
@@ -99,7 +98,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
99 if (mmio) 98 if (mmio)
100 d.host_flags |= IDE_HFLAG_MMIO; 99 d.host_flags |= IDE_HFLAG_MMIO;
101 100
102 ret = ide_host_add(&d, hws, &host); 101 ret = ide_host_add(&d, hws, 1, &host);
103 if (ret) 102 if (ret)
104 goto out; 103 goto out;
105 104
diff --git a/drivers/ide/macide.c b/drivers/ide/macide.c
index 4b1718e8328..1447c8c9056 100644
--- a/drivers/ide/macide.c
+++ b/drivers/ide/macide.c
@@ -62,7 +62,7 @@ int macide_ack_intr(ide_hwif_t* hwif)
62 return 0; 62 return 0;
63} 63}
64 64
65static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base, 65static void __init macide_setup_ports(struct ide_hw *hw, unsigned long base,
66 int irq, ide_ack_intr_t *ack_intr) 66 int irq, ide_ack_intr_t *ack_intr)
67{ 67{
68 int i; 68 int i;
@@ -76,13 +76,12 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
76 76
77 hw->irq = irq; 77 hw->irq = irq;
78 hw->ack_intr = ack_intr; 78 hw->ack_intr = ack_intr;
79
80 hw->chipset = ide_generic;
81} 79}
82 80
83static const struct ide_port_info macide_port_info = { 81static const struct ide_port_info macide_port_info = {
84 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, 82 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
85 .irq_flags = IRQF_SHARED, 83 .irq_flags = IRQF_SHARED,
84 .chipset = ide_generic,
86}; 85};
87 86
88static const char *mac_ide_name[] = 87static const char *mac_ide_name[] =
@@ -97,7 +96,7 @@ static int __init macide_init(void)
97 ide_ack_intr_t *ack_intr; 96 ide_ack_intr_t *ack_intr;
98 unsigned long base; 97 unsigned long base;
99 int irq; 98 int irq;
100 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 99 struct ide_hw hw, *hws[] = { &hw };
101 100
102 if (!MACH_IS_MAC) 101 if (!MACH_IS_MAC)
103 return -ENODEV; 102 return -ENODEV;
@@ -127,7 +126,7 @@ static int __init macide_init(void)
127 126
128 macide_setup_ports(&hw, base, irq, ack_intr); 127 macide_setup_ports(&hw, base, irq, ack_intr);
129 128
130 return ide_host_add(&macide_port_info, hws, NULL); 129 return ide_host_add(&macide_port_info, hws, 1, NULL);
131} 130}
132 131
133module_init(macide_init); 132module_init(macide_init);
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 09d813d313f..3c1dc015215 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -306,6 +306,7 @@ static struct ide_port_info __devinitdata palm_bk3710_port_info = {
306 .host_flags = IDE_HFLAG_MMIO, 306 .host_flags = IDE_HFLAG_MMIO,
307 .pio_mask = ATA_PIO4, 307 .pio_mask = ATA_PIO4,
308 .mwdma_mask = ATA_MWDMA2, 308 .mwdma_mask = ATA_MWDMA2,
309 .chipset = ide_palm3710,
309}; 310};
310 311
311static int __init palm_bk3710_probe(struct platform_device *pdev) 312static int __init palm_bk3710_probe(struct platform_device *pdev)
@@ -315,7 +316,7 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
315 void __iomem *base; 316 void __iomem *base;
316 unsigned long rate, mem_size; 317 unsigned long rate, mem_size;
317 int i, rc; 318 int i, rc;
318 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 319 struct ide_hw hw, *hws[] = { &hw };
319 320
320 clk = clk_get(&pdev->dev, "IDECLK"); 321 clk = clk_get(&pdev->dev, "IDECLK");
321 if (IS_ERR(clk)) 322 if (IS_ERR(clk))
@@ -363,13 +364,12 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
363 (base + IDE_PALM_ATA_PRI_CTL_OFFSET); 364 (base + IDE_PALM_ATA_PRI_CTL_OFFSET);
364 hw.irq = irq->start; 365 hw.irq = irq->start;
365 hw.dev = &pdev->dev; 366 hw.dev = &pdev->dev;
366 hw.chipset = ide_palm3710;
367 367
368 palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 : 368 palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 :
369 ATA_UDMA5; 369 ATA_UDMA5;
370 370
371 /* Register the IDE interface with Linux */ 371 /* Register the IDE interface with Linux */
372 rc = ide_host_add(&palm_bk3710_port_info, hws, NULL); 372 rc = ide_host_add(&palm_bk3710_port_info, hws, 1, NULL);
373 if (rc) 373 if (rc)
374 goto out; 374 goto out;
375 375
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index b68906c3c17..65ba8239e7b 100644
--- a/drivers/ide/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
@@ -40,18 +40,6 @@
40#define DBG(fmt, args...) 40#define DBG(fmt, args...)
41#endif 41#endif
42 42
43static const char *pdc_quirk_drives[] = {
44 "QUANTUM FIREBALLlct08 08",
45 "QUANTUM FIREBALLP KA6.4",
46 "QUANTUM FIREBALLP KA9.1",
47 "QUANTUM FIREBALLP LM20.4",
48 "QUANTUM FIREBALLP KX13.6",
49 "QUANTUM FIREBALLP KX20.5",
50 "QUANTUM FIREBALLP KX27.3",
51 "QUANTUM FIREBALLP LM20.5",
52 NULL
53};
54
55static u8 max_dma_rate(struct pci_dev *pdev) 43static u8 max_dma_rate(struct pci_dev *pdev)
56{ 44{
57 u8 mode; 45 u8 mode;
@@ -200,19 +188,6 @@ static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
200 return ATA_CBL_PATA80; 188 return ATA_CBL_PATA80;
201} 189}
202 190
203static void pdcnew_quirkproc(ide_drive_t *drive)
204{
205 const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
206
207 for (list = pdc_quirk_drives; *list != NULL; list++)
208 if (strstr(m, *list) != NULL) {
209 drive->quirk_list = 2;
210 return;
211 }
212
213 drive->quirk_list = 0;
214}
215
216static void pdcnew_reset(ide_drive_t *drive) 191static void pdcnew_reset(ide_drive_t *drive)
217{ 192{
218 /* 193 /*
@@ -473,7 +448,6 @@ static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
473static const struct ide_port_ops pdcnew_port_ops = { 448static const struct ide_port_ops pdcnew_port_ops = {
474 .set_pio_mode = pdcnew_set_pio_mode, 449 .set_pio_mode = pdcnew_set_pio_mode,
475 .set_dma_mode = pdcnew_set_dma_mode, 450 .set_dma_mode = pdcnew_set_dma_mode,
476 .quirkproc = pdcnew_quirkproc,
477 .resetproc = pdcnew_reset, 451 .resetproc = pdcnew_reset,
478 .cable_detect = pdcnew_cable_detect, 452 .cable_detect = pdcnew_cable_detect,
479}; 453};
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 248a54bd238..b6abf7e52ca 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org> 2 * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 2006-2007 MontaVista Software, Inc. 3 * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc.
4 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 4 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
5 * 5 *
6 * Portions Copyright (C) 1999 Promise Technology, Inc. 6 * Portions Copyright (C) 1999 Promise Technology, Inc.
@@ -23,18 +23,6 @@
23 23
24#define PDC202XX_DEBUG_DRIVE_INFO 0 24#define PDC202XX_DEBUG_DRIVE_INFO 0
25 25
26static const char *pdc_quirk_drives[] = {
27 "QUANTUM FIREBALLlct08 08",
28 "QUANTUM FIREBALLP KA6.4",
29 "QUANTUM FIREBALLP KA9.1",
30 "QUANTUM FIREBALLP LM20.4",
31 "QUANTUM FIREBALLP KX13.6",
32 "QUANTUM FIREBALLP KX20.5",
33 "QUANTUM FIREBALLP KX27.3",
34 "QUANTUM FIREBALLP LM20.5",
35 NULL
36};
37
38static void pdc_old_disable_66MHz_clock(ide_hwif_t *); 26static void pdc_old_disable_66MHz_clock(ide_hwif_t *);
39 27
40static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed) 28static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed)
@@ -151,19 +139,6 @@ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif)
151 outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); 139 outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
152} 140}
153 141
154static void pdc202xx_quirkproc(ide_drive_t *drive)
155{
156 const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
157
158 for (list = pdc_quirk_drives; *list != NULL; list++)
159 if (strstr(m, *list) != NULL) {
160 drive->quirk_list = 2;
161 return;
162 }
163
164 drive->quirk_list = 0;
165}
166
167static void pdc202xx_dma_start(ide_drive_t *drive) 142static void pdc202xx_dma_start(ide_drive_t *drive)
168{ 143{
169 if (drive->current_speed > XFER_UDMA_2) 144 if (drive->current_speed > XFER_UDMA_2)
@@ -177,7 +152,7 @@ static void pdc202xx_dma_start(ide_drive_t *drive)
177 u8 clock = inb(high_16 + 0x11); 152 u8 clock = inb(high_16 + 0x11);
178 153
179 outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11); 154 outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
180 word_count = (rq->nr_sectors << 8); 155 word_count = (blk_rq_sectors(rq) << 8);
181 word_count = (rq_data_dir(rq) == READ) ? 156 word_count = (rq_data_dir(rq) == READ) ?
182 word_count | 0x05000000 : 157 word_count | 0x05000000 :
183 word_count | 0x06000000; 158 word_count | 0x06000000;
@@ -203,61 +178,6 @@ static int pdc202xx_dma_end(ide_drive_t *drive)
203 return ide_dma_end(drive); 178 return ide_dma_end(drive);
204} 179}
205 180
206static int pdc202xx_dma_test_irq(ide_drive_t *drive)
207{
208 ide_hwif_t *hwif = drive->hwif;
209 unsigned long high_16 = hwif->extra_base - 16;
210 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
211 u8 sc1d = inb(high_16 + 0x001d);
212
213 if (hwif->channel) {
214 /* bit7: Error, bit6: Interrupting, bit5: FIFO Full, bit4: FIFO Empty */
215 if ((sc1d & 0x50) == 0x50)
216 goto somebody_else;
217 else if ((sc1d & 0x40) == 0x40)
218 return (dma_stat & 4) == 4;
219 } else {
220 /* bit3: Error, bit2: Interrupting, bit1: FIFO Full, bit0: FIFO Empty */
221 if ((sc1d & 0x05) == 0x05)
222 goto somebody_else;
223 else if ((sc1d & 0x04) == 0x04)
224 return (dma_stat & 4) == 4;
225 }
226somebody_else:
227 return (dma_stat & 4) == 4; /* return 1 if INTR asserted */
228}
229
230static void pdc202xx_reset_host (ide_hwif_t *hwif)
231{
232 unsigned long high_16 = hwif->extra_base - 16;
233 u8 udma_speed_flag = inb(high_16 | 0x001f);
234
235 outb(udma_speed_flag | 0x10, high_16 | 0x001f);
236 mdelay(100);
237 outb(udma_speed_flag & ~0x10, high_16 | 0x001f);
238 mdelay(2000); /* 2 seconds ?! */
239
240 printk(KERN_WARNING "PDC202XX: %s channel reset.\n",
241 hwif->channel ? "Secondary" : "Primary");
242}
243
244static void pdc202xx_reset (ide_drive_t *drive)
245{
246 ide_hwif_t *hwif = drive->hwif;
247 ide_hwif_t *mate = hwif->mate;
248
249 pdc202xx_reset_host(hwif);
250 pdc202xx_reset_host(mate);
251
252 ide_set_max_pio(drive);
253}
254
255static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
256{
257 pdc202xx_reset(drive);
258 ide_dma_lost_irq(drive);
259}
260
261static int init_chipset_pdc202xx(struct pci_dev *dev) 181static int init_chipset_pdc202xx(struct pci_dev *dev)
262{ 182{
263 unsigned long dmabase = pci_resource_start(dev, 4); 183 unsigned long dmabase = pci_resource_start(dev, 4);
@@ -311,38 +231,22 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
311static const struct ide_port_ops pdc20246_port_ops = { 231static const struct ide_port_ops pdc20246_port_ops = {
312 .set_pio_mode = pdc202xx_set_pio_mode, 232 .set_pio_mode = pdc202xx_set_pio_mode,
313 .set_dma_mode = pdc202xx_set_mode, 233 .set_dma_mode = pdc202xx_set_mode,
314 .quirkproc = pdc202xx_quirkproc,
315}; 234};
316 235
317static const struct ide_port_ops pdc2026x_port_ops = { 236static const struct ide_port_ops pdc2026x_port_ops = {
318 .set_pio_mode = pdc202xx_set_pio_mode, 237 .set_pio_mode = pdc202xx_set_pio_mode,
319 .set_dma_mode = pdc202xx_set_mode, 238 .set_dma_mode = pdc202xx_set_mode,
320 .quirkproc = pdc202xx_quirkproc,
321 .resetproc = pdc202xx_reset,
322 .cable_detect = pdc2026x_cable_detect, 239 .cable_detect = pdc2026x_cable_detect,
323}; 240};
324 241
325static const struct ide_dma_ops pdc20246_dma_ops = {
326 .dma_host_set = ide_dma_host_set,
327 .dma_setup = ide_dma_setup,
328 .dma_start = ide_dma_start,
329 .dma_end = ide_dma_end,
330 .dma_test_irq = pdc202xx_dma_test_irq,
331 .dma_lost_irq = pdc202xx_dma_lost_irq,
332 .dma_timer_expiry = ide_dma_sff_timer_expiry,
333 .dma_clear = pdc202xx_reset,
334 .dma_sff_read_status = ide_dma_sff_read_status,
335};
336
337static const struct ide_dma_ops pdc2026x_dma_ops = { 242static const struct ide_dma_ops pdc2026x_dma_ops = {
338 .dma_host_set = ide_dma_host_set, 243 .dma_host_set = ide_dma_host_set,
339 .dma_setup = ide_dma_setup, 244 .dma_setup = ide_dma_setup,
340 .dma_start = pdc202xx_dma_start, 245 .dma_start = pdc202xx_dma_start,
341 .dma_end = pdc202xx_dma_end, 246 .dma_end = pdc202xx_dma_end,
342 .dma_test_irq = pdc202xx_dma_test_irq, 247 .dma_test_irq = ide_dma_test_irq,
343 .dma_lost_irq = pdc202xx_dma_lost_irq, 248 .dma_lost_irq = ide_dma_lost_irq,
344 .dma_timer_expiry = ide_dma_sff_timer_expiry, 249 .dma_timer_expiry = ide_dma_sff_timer_expiry,
345 .dma_clear = pdc202xx_reset,
346 .dma_sff_read_status = ide_dma_sff_read_status, 250 .dma_sff_read_status = ide_dma_sff_read_status,
347}; 251};
348 252
@@ -364,7 +268,7 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
364 .name = DRV_NAME, 268 .name = DRV_NAME,
365 .init_chipset = init_chipset_pdc202xx, 269 .init_chipset = init_chipset_pdc202xx,
366 .port_ops = &pdc20246_port_ops, 270 .port_ops = &pdc20246_port_ops,
367 .dma_ops = &pdc20246_dma_ops, 271 .dma_ops = &sff_dma_ops,
368 .host_flags = IDE_HFLAGS_PDC202XX, 272 .host_flags = IDE_HFLAGS_PDC202XX,
369 .pio_mask = ATA_PIO4, 273 .pio_mask = ATA_PIO4,
370 .mwdma_mask = ATA_MWDMA2, 274 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index f76e4e6b408..97642a7a79c 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1023,13 +1023,14 @@ static const struct ide_port_info pmac_port_info = {
1023 * Setup, register & probe an IDE channel driven by this driver, this is 1023 * Setup, register & probe an IDE channel driven by this driver, this is
1024 * called by one of the 2 probe functions (macio or PCI). 1024 * called by one of the 2 probe functions (macio or PCI).
1025 */ 1025 */
1026static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw) 1026static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
1027 struct ide_hw *hw)
1027{ 1028{
1028 struct device_node *np = pmif->node; 1029 struct device_node *np = pmif->node;
1029 const int *bidp; 1030 const int *bidp;
1030 struct ide_host *host; 1031 struct ide_host *host;
1031 ide_hwif_t *hwif; 1032 ide_hwif_t *hwif;
1032 hw_regs_t *hws[] = { hw, NULL, NULL, NULL }; 1033 struct ide_hw *hws[] = { hw };
1033 struct ide_port_info d = pmac_port_info; 1034 struct ide_port_info d = pmac_port_info;
1034 int rc; 1035 int rc;
1035 1036
@@ -1077,7 +1078,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
1077 /* Make sure we have sane timings */ 1078 /* Make sure we have sane timings */
1078 sanitize_timings(pmif); 1079 sanitize_timings(pmif);
1079 1080
1080 host = ide_host_alloc(&d, hws); 1081 host = ide_host_alloc(&d, hws, 1);
1081 if (host == NULL) 1082 if (host == NULL)
1082 return -ENOMEM; 1083 return -ENOMEM;
1083 hwif = host->ports[0]; 1084 hwif = host->ports[0];
@@ -1124,7 +1125,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
1124 return 0; 1125 return 0;
1125} 1126}
1126 1127
1127static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base) 1128static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
1128{ 1129{
1129 int i; 1130 int i;
1130 1131
@@ -1144,7 +1145,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1144 unsigned long regbase; 1145 unsigned long regbase;
1145 pmac_ide_hwif_t *pmif; 1146 pmac_ide_hwif_t *pmif;
1146 int irq, rc; 1147 int irq, rc;
1147 hw_regs_t hw; 1148 struct ide_hw hw;
1148 1149
1149 pmif = kzalloc(sizeof(*pmif), GFP_KERNEL); 1150 pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
1150 if (pmif == NULL) 1151 if (pmif == NULL)
@@ -1268,7 +1269,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1268 void __iomem *base; 1269 void __iomem *base;
1269 unsigned long rbase, rlen; 1270 unsigned long rbase, rlen;
1270 int rc; 1271 int rc;
1271 hw_regs_t hw; 1272 struct ide_hw hw;
1272 1273
1273 np = pci_device_to_OF_node(pdev); 1274 np = pci_device_to_OF_node(pdev);
1274 if (np == NULL) { 1275 if (np == NULL) {
diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c
index c7934667924..ab49a97023d 100644
--- a/drivers/ide/q40ide.c
+++ b/drivers/ide/q40ide.c
@@ -51,11 +51,11 @@ static int q40ide_default_irq(unsigned long base)
51/* 51/*
52 * Addresses are pretranslated for Q40 ISA access. 52 * Addresses are pretranslated for Q40 ISA access.
53 */ 53 */
54static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base, 54static void q40_ide_setup_ports(struct ide_hw *hw, unsigned long base,
55 ide_ack_intr_t *ack_intr, 55 ide_ack_intr_t *ack_intr,
56 int irq) 56 int irq)
57{ 57{
58 memset(hw, 0, sizeof(hw_regs_t)); 58 memset(hw, 0, sizeof(*hw));
59 /* BIG FAT WARNING: 59 /* BIG FAT WARNING:
60 assumption: only DATA port is ever used in 16 bit mode */ 60 assumption: only DATA port is ever used in 16 bit mode */
61 hw->io_ports.data_addr = Q40_ISA_IO_W(base); 61 hw->io_ports.data_addr = Q40_ISA_IO_W(base);
@@ -70,8 +70,6 @@ static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base,
70 70
71 hw->irq = irq; 71 hw->irq = irq;
72 hw->ack_intr = ack_intr; 72 hw->ack_intr = ack_intr;
73
74 hw->chipset = ide_generic;
75} 73}
76 74
77static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, 75static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
@@ -119,6 +117,7 @@ static const struct ide_port_info q40ide_port_info = {
119 .tp_ops = &q40ide_tp_ops, 117 .tp_ops = &q40ide_tp_ops,
120 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, 118 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
121 .irq_flags = IRQF_SHARED, 119 .irq_flags = IRQF_SHARED,
120 .chipset = ide_generic,
122}; 121};
123 122
124/* 123/*
@@ -136,7 +135,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
136static int __init q40ide_init(void) 135static int __init q40ide_init(void)
137{ 136{
138 int i; 137 int i;
139 hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; 138 struct ide_hw hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL };
140 139
141 if (!MACH_IS_Q40) 140 if (!MACH_IS_Q40)
142 return -ENODEV; 141 return -ENODEV;
@@ -163,7 +162,7 @@ static int __init q40ide_init(void)
163 hws[i] = &hw[i]; 162 hws[i] = &hw[i];
164 } 163 }
165 164
166 return ide_host_add(&q40ide_port_info, hws, NULL); 165 return ide_host_add(&q40ide_port_info, hws, Q40IDE_NUM_HWIFS, NULL);
167} 166}
168 167
169module_init(q40ide_init); 168module_init(q40ide_init);
diff --git a/drivers/ide/rapide.c b/drivers/ide/rapide.c
index d5003ca6980..00f54248f41 100644
--- a/drivers/ide/rapide.c
+++ b/drivers/ide/rapide.c
@@ -13,9 +13,10 @@
13 13
14static const struct ide_port_info rapide_port_info = { 14static const struct ide_port_info rapide_port_info = {
15 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, 15 .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
16 .chipset = ide_generic,
16}; 17};
17 18
18static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base, 19static void rapide_setup_ports(struct ide_hw *hw, void __iomem *base,
19 void __iomem *ctrl, unsigned int sz, int irq) 20 void __iomem *ctrl, unsigned int sz, int irq)
20{ 21{
21 unsigned long port = (unsigned long)base; 22 unsigned long port = (unsigned long)base;
@@ -35,7 +36,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
35 void __iomem *base; 36 void __iomem *base;
36 struct ide_host *host; 37 struct ide_host *host;
37 int ret; 38 int ret;
38 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 39 struct ide_hw hw, *hws[] = { &hw };
39 40
40 ret = ecard_request_resources(ec); 41 ret = ecard_request_resources(ec);
41 if (ret) 42 if (ret)
@@ -49,10 +50,9 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
49 50
50 memset(&hw, 0, sizeof(hw)); 51 memset(&hw, 0, sizeof(hw));
51 rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); 52 rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
52 hw.chipset = ide_generic;
53 hw.dev = &ec->dev; 53 hw.dev = &ec->dev;
54 54
55 ret = ide_host_add(&rapide_port_info, hws, &host); 55 ret = ide_host_add(&rapide_port_info, hws, 1, &host);
56 if (ret) 56 if (ret)
57 goto release; 57 goto release;
58 58
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
index 5be41f25204..1104bb301eb 100644
--- a/drivers/ide/scc_pata.c
+++ b/drivers/ide/scc_pata.c
@@ -559,7 +559,7 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
559{ 559{
560 struct scc_ports *ports = pci_get_drvdata(dev); 560 struct scc_ports *ports = pci_get_drvdata(dev);
561 struct ide_host *host; 561 struct ide_host *host;
562 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 562 struct ide_hw hw, *hws[] = { &hw };
563 int i, rc; 563 int i, rc;
564 564
565 memset(&hw, 0, sizeof(hw)); 565 memset(&hw, 0, sizeof(hw));
@@ -567,9 +567,8 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
567 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4; 567 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
568 hw.irq = dev->irq; 568 hw.irq = dev->irq;
569 hw.dev = &dev->dev; 569 hw.dev = &dev->dev;
570 hw.chipset = ide_pci;
571 570
572 rc = ide_host_add(d, hws, &host); 571 rc = ide_host_add(d, hws, 1, &host);
573 if (rc) 572 if (rc)
574 return rc; 573 return rc;
575 574
@@ -823,6 +822,7 @@ static const struct ide_port_info scc_chipset __devinitdata = {
823 .host_flags = IDE_HFLAG_SINGLE, 822 .host_flags = IDE_HFLAG_SINGLE,
824 .irq_flags = IRQF_SHARED, 823 .irq_flags = IRQF_SHARED,
825 .pio_mask = ATA_PIO4, 824 .pio_mask = ATA_PIO4,
825 .chipset = ide_pci,
826}; 826};
827 827
828/** 828/**
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 7a3a12d6e63..ab3db61d2ba 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 2 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 1995-1998 Mark Lord 3 * Copyright (C) 1995-1998 Mark Lord
4 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 4 * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz
5 * 5 *
6 * May be copied or modified under the terms of the GNU General Public License 6 * May be copied or modified under the terms of the GNU General Public License
7 */ 7 */
@@ -301,11 +301,11 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
301} 301}
302 302
303/** 303/**
304 * ide_hw_configure - configure a hw_regs_t instance 304 * ide_hw_configure - configure a struct ide_hw instance
305 * @dev: PCI device holding interface 305 * @dev: PCI device holding interface
306 * @d: IDE port info 306 * @d: IDE port info
307 * @port: port number 307 * @port: port number
308 * @hw: hw_regs_t instance corresponding to this port 308 * @hw: struct ide_hw instance corresponding to this port
309 * 309 *
310 * Perform the initial set up for the hardware interface structure. This 310 * Perform the initial set up for the hardware interface structure. This
311 * is done per interface port rather than per PCI device. There may be 311 * is done per interface port rather than per PCI device. There may be
@@ -315,7 +315,7 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
315 */ 315 */
316 316
317static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d, 317static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
318 unsigned int port, hw_regs_t *hw) 318 unsigned int port, struct ide_hw *hw)
319{ 319{
320 unsigned long ctl = 0, base = 0; 320 unsigned long ctl = 0, base = 0;
321 321
@@ -344,7 +344,6 @@ static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
344 344
345 memset(hw, 0, sizeof(*hw)); 345 memset(hw, 0, sizeof(*hw));
346 hw->dev = &dev->dev; 346 hw->dev = &dev->dev;
347 hw->chipset = d->chipset ? d->chipset : ide_pci;
348 ide_std_init_ports(hw, base, ctl | 2); 347 ide_std_init_ports(hw, base, ctl | 2);
349 348
350 return 0; 349 return 0;
@@ -446,8 +445,8 @@ out:
446 * ide_pci_setup_ports - configure ports/devices on PCI IDE 445 * ide_pci_setup_ports - configure ports/devices on PCI IDE
447 * @dev: PCI device 446 * @dev: PCI device
448 * @d: IDE port info 447 * @d: IDE port info
449 * @hw: hw_regs_t instances corresponding to this PCI IDE device 448 * @hw: struct ide_hw instances corresponding to this PCI IDE device
450 * @hws: hw_regs_t pointers table to update 449 * @hws: struct ide_hw pointers table to update
451 * 450 *
452 * Scan the interfaces attached to this device and do any 451 * Scan the interfaces attached to this device and do any
453 * necessary per port setup. Attach the devices and ask the 452 * necessary per port setup. Attach the devices and ask the
@@ -459,7 +458,7 @@ out:
459 */ 458 */
460 459
461void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, 460void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
462 hw_regs_t *hw, hw_regs_t **hws) 461 struct ide_hw *hw, struct ide_hw **hws)
463{ 462{
464 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port; 463 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
465 u8 tmp; 464 u8 tmp;
@@ -535,61 +534,15 @@ out:
535 return ret; 534 return ret;
536} 535}
537 536
538int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
539 void *priv)
540{
541 struct ide_host *host;
542 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
543 int ret;
544
545 ret = ide_setup_pci_controller(dev, d, 1);
546 if (ret < 0)
547 goto out;
548
549 ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
550
551 host = ide_host_alloc(d, hws);
552 if (host == NULL) {
553 ret = -ENOMEM;
554 goto out;
555 }
556
557 host->dev[0] = &dev->dev;
558
559 host->host_priv = priv;
560
561 host->irq_flags = IRQF_SHARED;
562
563 pci_set_drvdata(dev, host);
564
565 ret = do_ide_setup_pci_device(dev, d, 1);
566 if (ret < 0)
567 goto out;
568
569 /* fixup IRQ */
570 if (ide_pci_is_in_compatibility_mode(dev)) {
571 hw[0].irq = pci_get_legacy_ide_irq(dev, 0);
572 hw[1].irq = pci_get_legacy_ide_irq(dev, 1);
573 } else
574 hw[1].irq = hw[0].irq = ret;
575
576 ret = ide_host_register(host, d, hws);
577 if (ret)
578 ide_host_free(host);
579out:
580 return ret;
581}
582EXPORT_SYMBOL_GPL(ide_pci_init_one);
583
584int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, 537int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
585 const struct ide_port_info *d, void *priv) 538 const struct ide_port_info *d, void *priv)
586{ 539{
587 struct pci_dev *pdev[] = { dev1, dev2 }; 540 struct pci_dev *pdev[] = { dev1, dev2 };
588 struct ide_host *host; 541 struct ide_host *host;
589 int ret, i; 542 int ret, i, n_ports = dev2 ? 4 : 2;
590 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; 543 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
591 544
592 for (i = 0; i < 2; i++) { 545 for (i = 0; i < n_ports / 2; i++) {
593 ret = ide_setup_pci_controller(pdev[i], d, !i); 546 ret = ide_setup_pci_controller(pdev[i], d, !i);
594 if (ret < 0) 547 if (ret < 0)
595 goto out; 548 goto out;
@@ -597,23 +550,24 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
597 ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); 550 ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
598 } 551 }
599 552
600 host = ide_host_alloc(d, hws); 553 host = ide_host_alloc(d, hws, n_ports);
601 if (host == NULL) { 554 if (host == NULL) {
602 ret = -ENOMEM; 555 ret = -ENOMEM;
603 goto out; 556 goto out;
604 } 557 }
605 558
606 host->dev[0] = &dev1->dev; 559 host->dev[0] = &dev1->dev;
607 host->dev[1] = &dev2->dev; 560 if (dev2)
561 host->dev[1] = &dev2->dev;
608 562
609 host->host_priv = priv; 563 host->host_priv = priv;
610
611 host->irq_flags = IRQF_SHARED; 564 host->irq_flags = IRQF_SHARED;
612 565
613 pci_set_drvdata(pdev[0], host); 566 pci_set_drvdata(pdev[0], host);
614 pci_set_drvdata(pdev[1], host); 567 if (dev2)
568 pci_set_drvdata(pdev[1], host);
615 569
616 for (i = 0; i < 2; i++) { 570 for (i = 0; i < n_ports / 2; i++) {
617 ret = do_ide_setup_pci_device(pdev[i], d, !i); 571 ret = do_ide_setup_pci_device(pdev[i], d, !i);
618 572
619 /* 573 /*
@@ -639,6 +593,13 @@ out:
639} 593}
640EXPORT_SYMBOL_GPL(ide_pci_init_two); 594EXPORT_SYMBOL_GPL(ide_pci_init_two);
641 595
596int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
597 void *priv)
598{
599 return ide_pci_init_two(dev, NULL, d, priv);
600}
601EXPORT_SYMBOL_GPL(ide_pci_init_one);
602
642void ide_pci_remove(struct pci_dev *dev) 603void ide_pci_remove(struct pci_dev *dev)
643{ 604{
644 struct ide_host *host = pci_get_drvdata(dev); 605 struct ide_host *host = pci_get_drvdata(dev);
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index e5d2a48a84d..5f37f168f94 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -91,7 +91,7 @@ typedef struct {
91 91
92 92
93static void 93static void
94sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port, 94sgiioc4_init_hwif_ports(struct ide_hw *hw, unsigned long data_port,
95 unsigned long ctrl_port, unsigned long irq_port) 95 unsigned long ctrl_port, unsigned long irq_port)
96{ 96{
97 unsigned long reg = data_port; 97 unsigned long reg = data_port;
@@ -546,7 +546,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
546 unsigned long cmd_base, irqport; 546 unsigned long cmd_base, irqport;
547 unsigned long bar0, cmd_phys_base, ctl; 547 unsigned long bar0, cmd_phys_base, ctl;
548 void __iomem *virt_base; 548 void __iomem *virt_base;
549 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 549 struct ide_hw hw, *hws[] = { &hw };
550 int rc; 550 int rc;
551 551
552 /* Get the CmdBlk and CtrlBlk Base Registers */ 552 /* Get the CmdBlk and CtrlBlk Base Registers */
@@ -575,13 +575,12 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
575 memset(&hw, 0, sizeof(hw)); 575 memset(&hw, 0, sizeof(hw));
576 sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport); 576 sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
577 hw.irq = dev->irq; 577 hw.irq = dev->irq;
578 hw.chipset = ide_pci;
579 hw.dev = &dev->dev; 578 hw.dev = &dev->dev;
580 579
581 /* Initializing chipset IRQ Registers */ 580 /* Initializing chipset IRQ Registers */
582 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); 581 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
583 582
584 rc = ide_host_add(&sgiioc4_port_info, hws, NULL); 583 rc = ide_host_add(&sgiioc4_port_info, hws, 1, NULL);
585 if (!rc) 584 if (!rc)
586 return 0; 585 return 0;
587 586
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index e4973cd1fba..bd82d228608 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -451,8 +451,8 @@ static int sil_sata_reset_poll(ide_drive_t *drive)
451static void sil_sata_pre_reset(ide_drive_t *drive) 451static void sil_sata_pre_reset(ide_drive_t *drive)
452{ 452{
453 if (drive->media == ide_disk) { 453 if (drive->media == ide_disk) {
454 drive->special.b.set_geometry = 0; 454 drive->special_flags &=
455 drive->special.b.recalibrate = 0; 455 ~(IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE);
456 } 456 }
457} 457}
458 458
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
index b0a46062533..0924abff52f 100644
--- a/drivers/ide/sl82c105.c
+++ b/drivers/ide/sl82c105.c
@@ -10,7 +10,7 @@
10 * with the timing registers setup. 10 * with the timing registers setup.
11 * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org 11 * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org
12 * 12 *
13 * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com> 13 * Copyright (C) 2006-2007,2009 MontaVista Software, Inc. <source@mvista.com>
14 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 14 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
15 */ 15 */
16 16
@@ -146,14 +146,15 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive)
146 u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA; 146 u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA;
147 u8 dma_cmd; 147 u8 dma_cmd;
148 148
149 printk("sl82c105: lost IRQ, resetting host\n"); 149 printk(KERN_WARNING "sl82c105: lost IRQ, resetting host\n");
150 150
151 /* 151 /*
152 * Check the raw interrupt from the drive. 152 * Check the raw interrupt from the drive.
153 */ 153 */
154 pci_read_config_dword(dev, 0x40, &val); 154 pci_read_config_dword(dev, 0x40, &val);
155 if (val & mask) 155 if (val & mask)
156 printk("sl82c105: drive was requesting IRQ, but host lost it\n"); 156 printk(KERN_INFO "sl82c105: drive was requesting IRQ, "
157 "but host lost it\n");
157 158
158 /* 159 /*
159 * Was DMA enabled? If so, disable it - we're resetting the 160 * Was DMA enabled? If so, disable it - we're resetting the
@@ -162,7 +163,7 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive)
162 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 163 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
163 if (dma_cmd & 1) { 164 if (dma_cmd & 1) {
164 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); 165 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
165 printk("sl82c105: DMA was enabled\n"); 166 printk(KERN_INFO "sl82c105: DMA was enabled\n");
166 } 167 }
167 168
168 sl82c105_reset_host(dev); 169 sl82c105_reset_host(dev);
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index b4cf42dc8a6..05a93d6baec 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -112,7 +112,7 @@ static void tc86c001_dma_start(ide_drive_t *drive)
112 ide_hwif_t *hwif = drive->hwif; 112 ide_hwif_t *hwif = drive->hwif;
113 unsigned long sc_base = hwif->config_data; 113 unsigned long sc_base = hwif->config_data;
114 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); 114 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
115 unsigned long nsectors = hwif->rq->nr_sectors; 115 unsigned long nsectors = blk_rq_sectors(hwif->rq);
116 116
117 /* 117 /*
118 * We have to manually load the sector count and size into 118 * We have to manually load the sector count and size into
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index e33d764e294..ea89fddeed9 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -130,8 +130,7 @@ static const struct ide_port_info tx4938ide_port_info __initdata = {
130 130
131static int __init tx4938ide_probe(struct platform_device *pdev) 131static int __init tx4938ide_probe(struct platform_device *pdev)
132{ 132{
133 hw_regs_t hw; 133 struct ide_hw hw, *hws[] = { &hw };
134 hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
135 struct ide_host *host; 134 struct ide_host *host;
136 struct resource *res; 135 struct resource *res;
137 struct tx4938ide_platform_info *pdata = pdev->dev.platform_data; 136 struct tx4938ide_platform_info *pdata = pdev->dev.platform_data;
@@ -183,7 +182,7 @@ static int __init tx4938ide_probe(struct platform_device *pdev)
183 tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, 0); 182 tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, 0);
184 else 183 else
185 d.port_ops = NULL; 184 d.port_ops = NULL;
186 ret = ide_host_add(&d, hws, &host); 185 ret = ide_host_add(&d, hws, 1, &host);
187 if (!ret) 186 if (!ret)
188 platform_set_drvdata(pdev, host); 187 platform_set_drvdata(pdev, host);
189 return ret; 188 return ret;
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 564422d2397..64b58ecc3f0 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -307,7 +307,7 @@ static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
307 tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ? 307 tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
308 TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1); 308 TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
309 309
310 tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt); 310 tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
311 311
312 return 0; 312 return 0;
313} 313}
@@ -537,8 +537,7 @@ static const struct ide_port_info tx4939ide_port_info __initdata = {
537 537
538static int __init tx4939ide_probe(struct platform_device *pdev) 538static int __init tx4939ide_probe(struct platform_device *pdev)
539{ 539{
540 hw_regs_t hw; 540 struct ide_hw hw, *hws[] = { &hw };
541 hw_regs_t *hws[] = { &hw, NULL, NULL, NULL };
542 struct ide_host *host; 541 struct ide_host *host;
543 struct resource *res; 542 struct resource *res;
544 int irq, ret; 543 int irq, ret;
@@ -581,7 +580,7 @@ static int __init tx4939ide_probe(struct platform_device *pdev)
581 hw.dev = &pdev->dev; 580 hw.dev = &pdev->dev;
582 581
583 pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq); 582 pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq);
584 host = ide_host_alloc(&tx4939ide_port_info, hws); 583 host = ide_host_alloc(&tx4939ide_port_info, hws, 1);
585 if (!host) 584 if (!host)
586 return -ENOMEM; 585 return -ENOMEM;
587 /* use extra_base for base address of the all registers */ 586 /* use extra_base for base address of the all registers */
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 3ff7231e485..028de26a25f 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -67,6 +67,7 @@ static struct via_isa_bridge {
67 u8 udma_mask; 67 u8 udma_mask;
68 u8 flags; 68 u8 flags;
69} via_isa_bridges[] = { 69} via_isa_bridges[] = {
70 { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
70 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 71 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
71 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 72 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
72 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 73 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
@@ -474,6 +475,7 @@ static const struct pci_device_id via_pci_tbl[] = {
474 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 }, 475 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 },
475 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 }, 476 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 },
476 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 }, 477 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 },
478 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), 0 },
477 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 }, 479 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 },
478 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 }, 480 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 },
479 { 0, }, 481 { 0, },
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index bf740394d70..949c97ff57e 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -41,6 +41,10 @@ static int debug;
41module_param_named(debug, debug, uint, 0644); 41module_param_named(debug, debug, uint, 0644);
42MODULE_PARM_DESC(debug, "Enable debug printks in this driver"); 42MODULE_PARM_DESC(debug, "Enable debug printks in this driver");
43 43
44static int forceload;
45module_param_named(forceload, forceload, uint, 0644);
46MODULE_PARM_DESC(debug, "Enable driver testing on unvalidated i5000");
47
44#define dprintk(fmt, arg...) \ 48#define dprintk(fmt, arg...) \
45 do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0) 49 do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0)
46 50
@@ -552,7 +556,7 @@ static int __init i7300_idle_init(void)
552 cpus_clear(idle_cpumask); 556 cpus_clear(idle_cpumask);
553 total_us = 0; 557 total_us = 0;
554 558
555 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev)) 559 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
556 return -ENODEV; 560 return -ENODEV;
557 561
558 if (i7300_idle_thrt_save()) 562 if (i7300_idle_thrt_save())
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 823a6297a1a..2cd00b5b45b 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1789,12 +1789,13 @@ static int dv1394_open(struct inode *inode, struct file *file)
1789 } else { 1789 } else {
1790 /* look up the card by ID */ 1790 /* look up the card by ID */
1791 unsigned long flags; 1791 unsigned long flags;
1792 int idx = ieee1394_file_to_instance(file);
1792 1793
1793 spin_lock_irqsave(&dv1394_cards_lock, flags); 1794 spin_lock_irqsave(&dv1394_cards_lock, flags);
1794 if (!list_empty(&dv1394_cards)) { 1795 if (!list_empty(&dv1394_cards)) {
1795 struct video_card *p; 1796 struct video_card *p;
1796 list_for_each_entry(p, &dv1394_cards, list) { 1797 list_for_each_entry(p, &dv1394_cards, list) {
1797 if ((p->id) == ieee1394_file_to_instance(file)) { 1798 if ((p->id) == idx) {
1798 video = p; 1799 video = p;
1799 break; 1800 break;
1800 } 1801 }
@@ -1803,7 +1804,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
1803 spin_unlock_irqrestore(&dv1394_cards_lock, flags); 1804 spin_unlock_irqrestore(&dv1394_cards_lock, flags);
1804 1805
1805 if (!video) { 1806 if (!video) {
1806 debug_printk("dv1394: OHCI card %d not found", ieee1394_file_to_instance(file)); 1807 debug_printk("dv1394: OHCI card %d not found", idx);
1807 return -ENODEV; 1808 return -ENODEV;
1808 } 1809 }
1809 1810
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
index 21d50f73a21..28b9f58bafd 100644
--- a/drivers/ieee1394/ieee1394_core.h
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -5,6 +5,7 @@
5#include <linux/fs.h> 5#include <linux/fs.h>
6#include <linux/list.h> 6#include <linux/list.h>
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/cdev.h>
8#include <asm/atomic.h> 9#include <asm/atomic.h>
9 10
10#include "hosts.h" 11#include "hosts.h"
@@ -155,7 +156,10 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
155 */ 156 */
156static inline unsigned char ieee1394_file_to_instance(struct file *file) 157static inline unsigned char ieee1394_file_to_instance(struct file *file)
157{ 158{
158 return file->f_path.dentry->d_inode->i_cindex; 159 int idx = cdev_index(file->f_path.dentry->d_inode);
160 if (idx < 0)
161 idx = 0;
162 return idx;
159} 163}
160 164
161extern int hpsb_disable_irm; 165extern int hpsb_disable_irm;
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index bb17cce3cb5..f5c45b194f5 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -133,7 +133,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
133 struct c2_qp *qp; 133 struct c2_qp *qp;
134 int is_recv = 0; 134 int is_recv = 0;
135 135
136 ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); 136 ce = c2_mq_consume(&cq->mq);
137 if (!ce) { 137 if (!ce) {
138 return -EAGAIN; 138 return -EAGAIN;
139 } 139 }
@@ -146,7 +146,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
146 while ((qp = 146 while ((qp =
147 (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { 147 (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
148 c2_mq_free(&cq->mq); 148 c2_mq_free(&cq->mq);
149 ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); 149 ce = c2_mq_consume(&cq->mq);
150 if (!ce) 150 if (!ce)
151 return -EAGAIN; 151 return -EAGAIN;
152 } 152 }
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index ff9be1a1310..32e3b1461d8 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -176,7 +176,7 @@ struct t3_send_wr {
176 struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */ 176 struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
177}; 177};
178 178
179#define T3_MAX_FASTREG_DEPTH 24 179#define T3_MAX_FASTREG_DEPTH 10
180#define T3_MAX_FASTREG_FRAG 10 180#define T3_MAX_FASTREG_FRAG 10
181 181
182struct t3_fastreg_wr { 182struct t3_fastreg_wr {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 160ef482712..e2a63214008 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -40,6 +40,7 @@
40#include <linux/spinlock.h> 40#include <linux/spinlock.h>
41#include <linux/ethtool.h> 41#include <linux/ethtool.h>
42#include <linux/rtnetlink.h> 42#include <linux/rtnetlink.h>
43#include <linux/inetdevice.h>
43 44
44#include <asm/io.h> 45#include <asm/io.h>
45#include <asm/irq.h> 46#include <asm/irq.h>
@@ -1152,12 +1153,39 @@ static int iwch_query_device(struct ib_device *ibdev,
1152static int iwch_query_port(struct ib_device *ibdev, 1153static int iwch_query_port(struct ib_device *ibdev,
1153 u8 port, struct ib_port_attr *props) 1154 u8 port, struct ib_port_attr *props)
1154{ 1155{
1156 struct iwch_dev *dev;
1157 struct net_device *netdev;
1158 struct in_device *inetdev;
1159
1155 PDBG("%s ibdev %p\n", __func__, ibdev); 1160 PDBG("%s ibdev %p\n", __func__, ibdev);
1156 1161
1162 dev = to_iwch_dev(ibdev);
1163 netdev = dev->rdev.port_info.lldevs[port-1];
1164
1157 memset(props, 0, sizeof(struct ib_port_attr)); 1165 memset(props, 0, sizeof(struct ib_port_attr));
1158 props->max_mtu = IB_MTU_4096; 1166 props->max_mtu = IB_MTU_4096;
1159 props->active_mtu = IB_MTU_2048; 1167 if (netdev->mtu >= 4096)
1160 props->state = IB_PORT_ACTIVE; 1168 props->active_mtu = IB_MTU_4096;
1169 else if (netdev->mtu >= 2048)
1170 props->active_mtu = IB_MTU_2048;
1171 else if (netdev->mtu >= 1024)
1172 props->active_mtu = IB_MTU_1024;
1173 else if (netdev->mtu >= 512)
1174 props->active_mtu = IB_MTU_512;
1175 else
1176 props->active_mtu = IB_MTU_256;
1177
1178 if (!netif_carrier_ok(netdev))
1179 props->state = IB_PORT_DOWN;
1180 else {
1181 inetdev = in_dev_get(netdev);
1182 if (inetdev->ifa_list)
1183 props->state = IB_PORT_ACTIVE;
1184 else
1185 props->state = IB_PORT_INIT;
1186 in_dev_put(inetdev);
1187 }
1188
1161 props->port_cap_flags = 1189 props->port_cap_flags =
1162 IB_PORT_CM_SUP | 1190 IB_PORT_CM_SUP |
1163 IB_PORT_SNMP_TUNNEL_SUP | 1191 IB_PORT_SNMP_TUNNEL_SUP |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
index 1798e6466bd..689c35786dd 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
@@ -165,7 +165,6 @@ struct hcp_modify_qp_control_block {
165#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) 165#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
166#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) 166#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
167#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) 167#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
168#define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31)
169#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) 168#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
170#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) 169#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
171#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) 170#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
@@ -176,60 +175,33 @@ struct hcp_modify_qp_control_block {
176#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) 175#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
177#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) 176#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
178#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) 177#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
179#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31)
180#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) 178#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
181#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31)
182#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) 179#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
183#define MQPCB_DLID EHCA_BMASK_IBM(16, 31)
184#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) 180#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
185#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31)
186#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) 181#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
187#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31)
188#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) 182#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
189#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31)
190#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) 183#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
191#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31)
192#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) 184#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
193#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31)
194#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) 185#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
195#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31)
196#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) 186#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
197#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) 187#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
198#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31)
199#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) 188#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
200#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31)
201#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) 189#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
202#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
203#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) 190#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
204#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31)
205#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) 191#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
206#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31)
207#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) 192#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
208#define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31)
209#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) 193#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
210#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
211#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) 194#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
212#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31)
213#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) 195#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
214#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31)
215#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) 196#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
216#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31)
217#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) 197#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
218#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31)
219#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) 198#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
220#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31)
221#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) 199#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
222#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) 200#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
223#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
224#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) 201#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
225#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
226#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) 202#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
227#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31)
228#define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
229#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) 203#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
230#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31)
231#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) 204#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
232#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31)
233#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) 205#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
234#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) 206#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
235 207
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 99bcbd7ffb0..4b89b791be6 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -479,13 +479,13 @@ void ehca_tasklet_neq(unsigned long data)
479 struct ehca_eqe *eqe; 479 struct ehca_eqe *eqe;
480 u64 ret; 480 u64 ret;
481 481
482 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); 482 eqe = ehca_poll_eq(shca, &shca->neq);
483 483
484 while (eqe) { 484 while (eqe) {
485 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) 485 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
486 parse_ec(shca, eqe->entry); 486 parse_ec(shca, eqe->entry);
487 487
488 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); 488 eqe = ehca_poll_eq(shca, &shca->neq);
489 } 489 }
490 490
491 ret = hipz_h_reset_event(shca->ipz_hca_handle, 491 ret = hipz_h_reset_event(shca->ipz_hca_handle,
@@ -572,8 +572,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
572 eqe_cnt = 0; 572 eqe_cnt = 0;
573 do { 573 do {
574 u32 token; 574 u32 token;
575 eqe_cache[eqe_cnt].eqe = 575 eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
576 (struct ehca_eqe *)ehca_poll_eq(shca, eq);
577 if (!eqe_cache[eqe_cnt].eqe) 576 if (!eqe_cache[eqe_cnt].eqe)
578 break; 577 break;
579 eqe_value = eqe_cache[eqe_cnt].eqe->entry; 578 eqe_value = eqe_cache[eqe_cnt].eqe->entry;
@@ -637,7 +636,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
637 goto unlock_irq_spinlock; 636 goto unlock_irq_spinlock;
638 do { 637 do {
639 struct ehca_eqe *eqe; 638 struct ehca_eqe *eqe;
640 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); 639 eqe = ehca_poll_eq(shca, &shca->eq);
641 if (!eqe) 640 if (!eqe)
642 break; 641 break;
643 process_eqe(shca, eqe); 642 process_eqe(shca, eqe);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 368311ce332..85905ab9391 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52#include "ehca_tools.h" 52#include "ehca_tools.h"
53#include "hcp_if.h" 53#include "hcp_if.h"
54 54
55#define HCAD_VERSION "0026" 55#define HCAD_VERSION "0027"
56 56
57MODULE_LICENSE("Dual BSD/GPL"); 57MODULE_LICENSE("Dual BSD/GPL");
58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 00c10815971..0338f1fabe8 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp(
461 ib_device); 461 ib_device);
462 struct ib_ucontext *context = NULL; 462 struct ib_ucontext *context = NULL;
463 u64 h_ret; 463 u64 h_ret;
464 int is_llqp = 0, has_srq = 0; 464 int is_llqp = 0, has_srq = 0, is_user = 0;
465 int qp_type, max_send_sge, max_recv_sge, ret; 465 int qp_type, max_send_sge, max_recv_sge, ret;
466 466
467 /* h_call's out parameters */ 467 /* h_call's out parameters */
@@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp(
609 } 609 }
610 } 610 }
611 611
612 if (pd->uobject && udata)
613 context = pd->uobject->context;
614
615 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); 612 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
616 if (!my_qp) { 613 if (!my_qp) {
617 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); 614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
@@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp(
619 return ERR_PTR(-ENOMEM); 616 return ERR_PTR(-ENOMEM);
620 } 617 }
621 618
619 if (pd->uobject && udata) {
620 is_user = 1;
621 context = pd->uobject->context;
622 }
623
622 atomic_set(&my_qp->nr_events, 0); 624 atomic_set(&my_qp->nr_events, 0);
623 init_waitqueue_head(&my_qp->wait_completion); 625 init_waitqueue_head(&my_qp->wait_completion);
624 spin_lock_init(&my_qp->spinlock_s); 626 spin_lock_init(&my_qp->spinlock_s);
@@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
707 (parms.squeue.is_small || parms.rqueue.is_small); 709 (parms.squeue.is_small || parms.rqueue.is_small);
708 } 710 }
709 711
710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); 712 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
711 if (h_ret != H_SUCCESS) { 713 if (h_ret != H_SUCCESS) {
712 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", 714 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
713 h_ret); 715 h_ret);
@@ -769,18 +771,20 @@ static struct ehca_qp *internal_create_qp(
769 goto create_qp_exit2; 771 goto create_qp_exit2;
770 } 772 }
771 773
772 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / 774 if (!is_user) {
773 my_qp->ipz_squeue.qe_size; 775 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
774 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * 776 my_qp->ipz_squeue.qe_size;
775 sizeof(struct ehca_qmap_entry)); 777 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
776 if (!my_qp->sq_map.map) { 778 sizeof(struct ehca_qmap_entry));
777 ehca_err(pd->device, "Couldn't allocate squeue " 779 if (!my_qp->sq_map.map) {
778 "map ret=%i", ret); 780 ehca_err(pd->device, "Couldn't allocate squeue "
779 goto create_qp_exit3; 781 "map ret=%i", ret);
782 goto create_qp_exit3;
783 }
784 INIT_LIST_HEAD(&my_qp->sq_err_node);
785 /* to avoid the generation of bogus flush CQEs */
786 reset_queue_map(&my_qp->sq_map);
780 } 787 }
781 INIT_LIST_HEAD(&my_qp->sq_err_node);
782 /* to avoid the generation of bogus flush CQEs */
783 reset_queue_map(&my_qp->sq_map);
784 } 788 }
785 789
786 if (HAS_RQ(my_qp)) { 790 if (HAS_RQ(my_qp)) {
@@ -792,20 +796,21 @@ static struct ehca_qp *internal_create_qp(
792 "and pages ret=%i", ret); 796 "and pages ret=%i", ret);
793 goto create_qp_exit4; 797 goto create_qp_exit4;
794 } 798 }
795 799 if (!is_user) {
796 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / 800 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
797 my_qp->ipz_rqueue.qe_size; 801 my_qp->ipz_rqueue.qe_size;
798 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * 802 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
799 sizeof(struct ehca_qmap_entry)); 803 sizeof(struct ehca_qmap_entry));
800 if (!my_qp->rq_map.map) { 804 if (!my_qp->rq_map.map) {
801 ehca_err(pd->device, "Couldn't allocate squeue " 805 ehca_err(pd->device, "Couldn't allocate squeue "
802 "map ret=%i", ret); 806 "map ret=%i", ret);
803 goto create_qp_exit5; 807 goto create_qp_exit5;
808 }
809 INIT_LIST_HEAD(&my_qp->rq_err_node);
810 /* to avoid the generation of bogus flush CQEs */
811 reset_queue_map(&my_qp->rq_map);
804 } 812 }
805 INIT_LIST_HEAD(&my_qp->rq_err_node); 813 } else if (init_attr->srq && !is_user) {
806 /* to avoid the generation of bogus flush CQEs */
807 reset_queue_map(&my_qp->rq_map);
808 } else if (init_attr->srq) {
809 /* this is a base QP, use the queue map of the SRQ */ 814 /* this is a base QP, use the queue map of the SRQ */
810 my_qp->rq_map = my_srq->rq_map; 815 my_qp->rq_map = my_srq->rq_map;
811 INIT_LIST_HEAD(&my_qp->rq_err_node); 816 INIT_LIST_HEAD(&my_qp->rq_err_node);
@@ -918,7 +923,7 @@ create_qp_exit7:
918 kfree(my_qp->mod_qp_parm); 923 kfree(my_qp->mod_qp_parm);
919 924
920create_qp_exit6: 925create_qp_exit6:
921 if (HAS_RQ(my_qp)) 926 if (HAS_RQ(my_qp) && !is_user)
922 vfree(my_qp->rq_map.map); 927 vfree(my_qp->rq_map.map);
923 928
924create_qp_exit5: 929create_qp_exit5:
@@ -926,7 +931,7 @@ create_qp_exit5:
926 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 931 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
927 932
928create_qp_exit4: 933create_qp_exit4:
929 if (HAS_SQ(my_qp)) 934 if (HAS_SQ(my_qp) && !is_user)
930 vfree(my_qp->sq_map.map); 935 vfree(my_qp->sq_map.map);
931 936
932create_qp_exit3: 937create_qp_exit3:
@@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1244 u64 update_mask; 1249 u64 update_mask;
1245 u64 h_ret; 1250 u64 h_ret;
1246 int bad_wqe_cnt = 0; 1251 int bad_wqe_cnt = 0;
1252 int is_user = 0;
1247 int squeue_locked = 0; 1253 int squeue_locked = 0;
1248 unsigned long flags = 0; 1254 unsigned long flags = 0;
1249 1255
@@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1266 ret = ehca2ib_return_code(h_ret); 1272 ret = ehca2ib_return_code(h_ret);
1267 goto modify_qp_exit1; 1273 goto modify_qp_exit1;
1268 } 1274 }
1275 if (ibqp->uobject)
1276 is_user = 1;
1269 1277
1270 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); 1278 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
1271 1279
@@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1728 goto modify_qp_exit2; 1736 goto modify_qp_exit2;
1729 } 1737 }
1730 } 1738 }
1731 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) { 1739 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
1740 && !is_user) {
1732 ret = check_for_left_cqes(my_qp, shca); 1741 ret = check_for_left_cqes(my_qp, shca);
1733 if (ret) 1742 if (ret)
1734 goto modify_qp_exit2; 1743 goto modify_qp_exit2;
@@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1738 ipz_qeit_reset(&my_qp->ipz_rqueue); 1747 ipz_qeit_reset(&my_qp->ipz_rqueue);
1739 ipz_qeit_reset(&my_qp->ipz_squeue); 1748 ipz_qeit_reset(&my_qp->ipz_squeue);
1740 1749
1741 if (qp_cur_state == IB_QPS_ERR) { 1750 if (qp_cur_state == IB_QPS_ERR && !is_user) {
1742 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); 1751 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1743 1752
1744 if (HAS_RQ(my_qp)) 1753 if (HAS_RQ(my_qp))
1745 del_from_err_list(my_qp->recv_cq, 1754 del_from_err_list(my_qp->recv_cq,
1746 &my_qp->rq_err_node); 1755 &my_qp->rq_err_node);
1747 } 1756 }
1748 reset_queue_map(&my_qp->sq_map); 1757 if (!is_user)
1758 reset_queue_map(&my_qp->sq_map);
1749 1759
1750 if (HAS_RQ(my_qp)) 1760 if (HAS_RQ(my_qp) && !is_user)
1751 reset_queue_map(&my_qp->rq_map); 1761 reset_queue_map(&my_qp->rq_map);
1752 } 1762 }
1753 1763
@@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp,
1952 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; 1962 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
1953 qp_attr->dest_qp_num = qpcb->dest_qp_nr; 1963 qp_attr->dest_qp_num = qpcb->dest_qp_nr;
1954 1964
1955 qp_attr->pkey_index = 1965 qp_attr->pkey_index = qpcb->prim_p_key_idx;
1956 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx); 1966 qp_attr->port_num = qpcb->prim_phys_port;
1957
1958 qp_attr->port_num =
1959 EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port);
1960
1961 qp_attr->timeout = qpcb->timeout; 1967 qp_attr->timeout = qpcb->timeout;
1962 qp_attr->retry_cnt = qpcb->retry_count; 1968 qp_attr->retry_cnt = qpcb->retry_count;
1963 qp_attr->rnr_retry = qpcb->rnr_retry_count; 1969 qp_attr->rnr_retry = qpcb->rnr_retry_count;
1964 1970
1965 qp_attr->alt_pkey_index = 1971 qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
1966 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx);
1967
1968 qp_attr->alt_port_num = qpcb->alt_phys_port; 1972 qp_attr->alt_port_num = qpcb->alt_phys_port;
1969 qp_attr->alt_timeout = qpcb->timeout_al; 1973 qp_attr->alt_timeout = qpcb->timeout_al;
1970 1974
@@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2051 update_mask |= 2055 update_mask |=
2052 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) 2056 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
2053 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); 2057 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
2054 mqpcb->curr_srq_limit = 2058 mqpcb->curr_srq_limit = attr->srq_limit;
2055 EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit);
2056 mqpcb->qp_aff_asyn_ev_log_reg = 2059 mqpcb->qp_aff_asyn_ev_log_reg =
2057 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); 2060 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
2058 } 2061 }
@@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2115 2118
2116 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; 2119 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
2117 srq_attr->max_sge = 3; 2120 srq_attr->max_sge = 3;
2118 srq_attr->srq_limit = EHCA_BMASK_GET( 2121 srq_attr->srq_limit = qpcb->curr_srq_limit;
2119 MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
2120 2122
2121 if (ehca_debug_level >= 2) 2123 if (ehca_debug_level >= 2)
2122 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); 2124 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
@@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2138 int ret; 2140 int ret;
2139 u64 h_ret; 2141 u64 h_ret;
2140 u8 port_num; 2142 u8 port_num;
2143 int is_user = 0;
2141 enum ib_qp_type qp_type; 2144 enum ib_qp_type qp_type;
2142 unsigned long flags; 2145 unsigned long flags;
2143 2146
2144 if (uobject) { 2147 if (uobject) {
2148 is_user = 1;
2145 if (my_qp->mm_count_galpa || 2149 if (my_qp->mm_count_galpa ||
2146 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { 2150 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
2147 ehca_err(dev, "Resources still referenced in " 2151 ehca_err(dev, "Resources still referenced in "
@@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2168 * SRQs will never get into an error list and do not have a recv_cq, 2172 * SRQs will never get into an error list and do not have a recv_cq,
2169 * so we need to skip them here. 2173 * so we need to skip them here.
2170 */ 2174 */
2171 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp)) 2175 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
2172 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); 2176 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2173 2177
2174 if (HAS_SQ(my_qp)) 2178 if (HAS_SQ(my_qp) && !is_user)
2175 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); 2179 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2176 2180
2177 /* now wait until all pending events have completed */ 2181 /* now wait until all pending events have completed */
@@ -2209,13 +2213,13 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2209 2213
2210 if (HAS_RQ(my_qp)) { 2214 if (HAS_RQ(my_qp)) {
2211 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 2215 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2212 2216 if (!is_user)
2213 vfree(my_qp->rq_map.map); 2217 vfree(my_qp->rq_map.map);
2214 } 2218 }
2215 if (HAS_SQ(my_qp)) { 2219 if (HAS_SQ(my_qp)) {
2216 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 2220 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
2217 2221 if (!is_user)
2218 vfree(my_qp->sq_map.map); 2222 vfree(my_qp->sq_map.map);
2219 } 2223 }
2220 kmem_cache_free(qp_cache, my_qp); 2224 kmem_cache_free(qp_cache, my_qp);
2221 atomic_dec(&shca->num_qps); 2225 atomic_dec(&shca->num_qps);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index d0ab0c0d5e9..4d5dc3304d4 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -284,7 +284,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
284 param->act_pages = (u32)outs[4]; 284 param->act_pages = (u32)outs[4];
285 285
286 if (ret == H_SUCCESS) 286 if (ret == H_SUCCESS)
287 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); 287 hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
288 288
289 if (ret == H_NOT_ENOUGH_RESOURCES) 289 if (ret == H_NOT_ENOUGH_RESOURCES)
290 ehca_gen_err("Not enough resources. ret=%lli", ret); 290 ehca_gen_err("Not enough resources. ret=%lli", ret);
@@ -293,7 +293,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
293} 293}
294 294
295u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, 295u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
296 struct ehca_alloc_qp_parms *parms) 296 struct ehca_alloc_qp_parms *parms, int is_user)
297{ 297{
298 u64 ret; 298 u64 ret;
299 u64 allocate_controls, max_r10_reg, r11, r12; 299 u64 allocate_controls, max_r10_reg, r11, r12;
@@ -359,7 +359,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
359 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); 359 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
360 360
361 if (ret == H_SUCCESS) 361 if (ret == H_SUCCESS)
362 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); 362 hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
363 363
364 if (ret == H_NOT_ENOUGH_RESOURCES) 364 if (ret == H_NOT_ENOUGH_RESOURCES)
365 ehca_gen_err("Not enough resources. ret=%lli", ret); 365 ehca_gen_err("Not enough resources. ret=%lli", ret);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h
index 2c3c6e0ea5c..39c1c3618ec 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.h
+++ b/drivers/infiniband/hw/ehca/hcp_if.h
@@ -78,7 +78,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
78 * initialize resources, create empty QPPTs (2 rings). 78 * initialize resources, create empty QPPTs (2 rings).
79 */ 79 */
80u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, 80u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
81 struct ehca_alloc_qp_parms *parms); 81 struct ehca_alloc_qp_parms *parms, int is_user);
82 82
83u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, 83u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
84 const u8 port_id, 84 const u8 port_id,
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c
index 214821095cb..b3e0e72e8a7 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.c
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.c
@@ -54,12 +54,15 @@ int hcall_unmap_page(u64 mapaddr)
54 return 0; 54 return 0;
55} 55}
56 56
57int hcp_galpas_ctor(struct h_galpas *galpas, 57int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
58 u64 paddr_kernel, u64 paddr_user) 58 u64 paddr_kernel, u64 paddr_user)
59{ 59{
60 int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); 60 if (!is_user) {
61 if (ret) 61 int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
62 return ret; 62 if (ret)
63 return ret;
64 } else
65 galpas->kernel.fw_handle = 0;
63 66
64 galpas->user.fw_handle = paddr_user; 67 galpas->user.fw_handle = paddr_user;
65 68
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h
index 5305c2a3ed9..204227d5303 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.h
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.h
@@ -78,7 +78,7 @@ static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
78 *(volatile u64 __force *)addr = value; 78 *(volatile u64 __force *)addr = value;
79} 79}
80 80
81int hcp_galpas_ctor(struct h_galpas *galpas, 81int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
82 u64 paddr_kernel, u64 paddr_user); 82 u64 paddr_kernel, u64 paddr_user);
83 83
84int hcp_galpas_dtor(struct h_galpas *galpas); 84int hcp_galpas_dtor(struct h_galpas *galpas);
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index c3a32846543..1227c593627 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -220,10 +220,13 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
220 queue->small_page = NULL; 220 queue->small_page = NULL;
221 221
222 /* allocate queue page pointers */ 222 /* allocate queue page pointers */
223 queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); 223 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
224 if (!queue->queue_pages) { 224 if (!queue->queue_pages) {
225 ehca_gen_err("Couldn't allocate queue page list"); 225 queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
226 return 0; 226 if (!queue->queue_pages) {
227 ehca_gen_err("Couldn't allocate queue page list");
228 return 0;
229 }
227 } 230 }
228 memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); 231 memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
229 232
@@ -240,7 +243,10 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
240ipz_queue_ctor_exit0: 243ipz_queue_ctor_exit0:
241 ehca_gen_err("Couldn't alloc pages queue=%p " 244 ehca_gen_err("Couldn't alloc pages queue=%p "
242 "nr_of_pages=%x", queue, nr_of_pages); 245 "nr_of_pages=%x", queue, nr_of_pages);
243 vfree(queue->queue_pages); 246 if (is_vmalloc_addr(queue->queue_pages))
247 vfree(queue->queue_pages);
248 else
249 kfree(queue->queue_pages);
244 250
245 return 0; 251 return 0;
246} 252}
@@ -262,7 +268,10 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
262 free_page((unsigned long)queue->queue_pages[i]); 268 free_page((unsigned long)queue->queue_pages[i]);
263 } 269 }
264 270
265 vfree(queue->queue_pages); 271 if (is_vmalloc_addr(queue->queue_pages))
272 vfree(queue->queue_pages);
273 else
274 kfree(queue->queue_pages);
266 275
267 return 1; 276 return 1;
268} 277}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 20724aee76f..c4a02648c8a 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1585,12 +1585,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1585 break; 1585 break;
1586 1586
1587 case IB_WR_LOCAL_INV: 1587 case IB_WR_LOCAL_INV:
1588 ctrl->srcrb_flags |=
1589 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
1588 set_local_inv_seg(wqe, wr->ex.invalidate_rkey); 1590 set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
1589 wqe += sizeof (struct mlx4_wqe_local_inval_seg); 1591 wqe += sizeof (struct mlx4_wqe_local_inval_seg);
1590 size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; 1592 size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
1591 break; 1593 break;
1592 1594
1593 case IB_WR_FAST_REG_MR: 1595 case IB_WR_FAST_REG_MR:
1596 ctrl->srcrb_flags |=
1597 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
1594 set_fmr_seg(wqe, wr); 1598 set_fmr_seg(wqe, wr);
1595 wqe += sizeof (struct mlx4_wqe_fmr_seg); 1599 wqe += sizeof (struct mlx4_wqe_fmr_seg);
1596 size += sizeof (struct mlx4_wqe_fmr_seg) / 16; 1600 size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 6d55f9d748f..8c2ed994d54 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1059,7 +1059,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1059 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET); 1059 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
1060 if (mthca_is_memfree(dev)) 1060 if (mthca_is_memfree(dev))
1061 dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64), 1061 dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64),
1062 MTHCA_MTT_SEG_SIZE) / MTHCA_MTT_SEG_SIZE; 1062 dev->limits.mtt_seg_size) / dev->limits.mtt_seg_size;
1063 else 1063 else
1064 dev_lim->reserved_mtts = 1 << (field >> 4); 1064 dev_lim->reserved_mtts = 1 << (field >> 4);
1065 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET); 1065 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 252590116df..9ef611f6dd3 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -159,6 +159,7 @@ struct mthca_limits {
159 int reserved_eqs; 159 int reserved_eqs;
160 int num_mpts; 160 int num_mpts;
161 int num_mtt_segs; 161 int num_mtt_segs;
162 int mtt_seg_size;
162 int fmr_reserved_mtts; 163 int fmr_reserved_mtts;
163 int reserved_mtts; 164 int reserved_mtts;
164 int reserved_mrws; 165 int reserved_mrws;
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 28f0e0c40d7..90e4e450a12 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -641,9 +641,11 @@ static void mthca_free_irqs(struct mthca_dev *dev)
641 if (dev->eq_table.have_irq) 641 if (dev->eq_table.have_irq)
642 free_irq(dev->pdev->irq, dev); 642 free_irq(dev->pdev->irq, dev);
643 for (i = 0; i < MTHCA_NUM_EQ; ++i) 643 for (i = 0; i < MTHCA_NUM_EQ; ++i)
644 if (dev->eq_table.eq[i].have_irq) 644 if (dev->eq_table.eq[i].have_irq) {
645 free_irq(dev->eq_table.eq[i].msi_x_vector, 645 free_irq(dev->eq_table.eq[i].msi_x_vector,
646 dev->eq_table.eq + i); 646 dev->eq_table.eq + i);
647 dev->eq_table.eq[i].have_irq = 0;
648 }
647} 649}
648 650
649static int mthca_map_reg(struct mthca_dev *dev, 651static int mthca_map_reg(struct mthca_dev *dev,
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 1d83cf7caf3..13da9f1d24c 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -125,6 +125,10 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
125MODULE_PARM_DESC(fmr_reserved_mtts, 125MODULE_PARM_DESC(fmr_reserved_mtts,
126 "number of memory translation table segments reserved for FMR"); 126 "number of memory translation table segments reserved for FMR");
127 127
128static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
129module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
130MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
131
128static char mthca_version[] __devinitdata = 132static char mthca_version[] __devinitdata =
129 DRV_NAME ": Mellanox InfiniBand HCA driver v" 133 DRV_NAME ": Mellanox InfiniBand HCA driver v"
130 DRV_VERSION " (" DRV_RELDATE ")\n"; 134 DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -162,6 +166,7 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
162 int err; 166 int err;
163 u8 status; 167 u8 status;
164 168
169 mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
165 err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status); 170 err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
166 if (err) { 171 if (err) {
167 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); 172 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
@@ -460,11 +465,11 @@ static int mthca_init_icm(struct mthca_dev *mdev,
460 } 465 }
461 466
462 /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */ 467 /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
463 mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * MTHCA_MTT_SEG_SIZE, 468 mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
464 dma_get_cache_alignment()) / MTHCA_MTT_SEG_SIZE; 469 dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
465 470
466 mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, 471 mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
467 MTHCA_MTT_SEG_SIZE, 472 mdev->limits.mtt_seg_size,
468 mdev->limits.num_mtt_segs, 473 mdev->limits.num_mtt_segs,
469 mdev->limits.reserved_mtts, 474 mdev->limits.reserved_mtts,
470 1, 0); 475 1, 0);
@@ -1315,6 +1320,12 @@ static void __init mthca_validate_profile(void)
1315 printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n", 1320 printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
1316 hca_profile.fmr_reserved_mtts); 1321 hca_profile.fmr_reserved_mtts);
1317 } 1322 }
1323
1324 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
1325 printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n",
1326 log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
1327 log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
1328 }
1318} 1329}
1319 1330
1320static int __init mthca_init(void) 1331static int __init mthca_init(void)
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 882e6b73591..d606edf1085 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -220,7 +220,7 @@ static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
220 220
221 mtt->buddy = buddy; 221 mtt->buddy = buddy;
222 mtt->order = 0; 222 mtt->order = 0;
223 for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1) 223 for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
224 ++mtt->order; 224 ++mtt->order;
225 225
226 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); 226 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
@@ -267,7 +267,7 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
267 267
268 while (list_len > 0) { 268 while (list_len > 0) {
269 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + 269 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
270 mtt->first_seg * MTHCA_MTT_SEG_SIZE + 270 mtt->first_seg * dev->limits.mtt_seg_size +
271 start_index * 8); 271 start_index * 8);
272 mtt_entry[1] = 0; 272 mtt_entry[1] = 0;
273 for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) 273 for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
@@ -326,7 +326,7 @@ static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
326 u64 __iomem *mtts; 326 u64 __iomem *mtts;
327 int i; 327 int i;
328 328
329 mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * MTHCA_MTT_SEG_SIZE + 329 mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size +
330 start_index * sizeof (u64); 330 start_index * sizeof (u64);
331 for (i = 0; i < list_len; ++i) 331 for (i = 0; i < list_len; ++i)
332 mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT), 332 mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT),
@@ -345,10 +345,10 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
345 /* For Arbel, all MTTs must fit in the same page. */ 345 /* For Arbel, all MTTs must fit in the same page. */
346 BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE); 346 BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE);
347 /* Require full segments */ 347 /* Require full segments */
348 BUG_ON(s % MTHCA_MTT_SEG_SIZE); 348 BUG_ON(s % dev->limits.mtt_seg_size);
349 349
350 mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg + 350 mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg +
351 s / MTHCA_MTT_SEG_SIZE, &dma_handle); 351 s / dev->limits.mtt_seg_size, &dma_handle);
352 352
353 BUG_ON(!mtts); 353 BUG_ON(!mtts);
354 354
@@ -479,7 +479,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
479 if (mr->mtt) 479 if (mr->mtt)
480 mpt_entry->mtt_seg = 480 mpt_entry->mtt_seg =
481 cpu_to_be64(dev->mr_table.mtt_base + 481 cpu_to_be64(dev->mr_table.mtt_base +
482 mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE); 482 mr->mtt->first_seg * dev->limits.mtt_seg_size);
483 483
484 if (0) { 484 if (0) {
485 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); 485 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
@@ -626,7 +626,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
626 goto err_out_table; 626 goto err_out_table;
627 } 627 }
628 628
629 mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE; 629 mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
630 630
631 if (mthca_is_memfree(dev)) { 631 if (mthca_is_memfree(dev)) {
632 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table, 632 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
@@ -908,7 +908,7 @@ int mthca_init_mr_table(struct mthca_dev *dev)
908 dev->mr_table.mtt_base); 908 dev->mr_table.mtt_base);
909 909
910 dev->mr_table.tavor_fmr.mtt_base = 910 dev->mr_table.tavor_fmr.mtt_base =
911 ioremap(addr, mtts * MTHCA_MTT_SEG_SIZE); 911 ioremap(addr, mtts * dev->limits.mtt_seg_size);
912 if (!dev->mr_table.tavor_fmr.mtt_base) { 912 if (!dev->mr_table.tavor_fmr.mtt_base) {
913 mthca_warn(dev, "MTT ioremap for FMR failed.\n"); 913 mthca_warn(dev, "MTT ioremap for FMR failed.\n");
914 err = -ENOMEM; 914 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index d168c254061..8edb28a9a0e 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -94,7 +94,7 @@ s64 mthca_make_profile(struct mthca_dev *dev,
94 profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE; 94 profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE;
95 profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE; 95 profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE;
96 profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz; 96 profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz;
97 profile[MTHCA_RES_MTT].size = MTHCA_MTT_SEG_SIZE; 97 profile[MTHCA_RES_MTT].size = dev->limits.mtt_seg_size;
98 profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz; 98 profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz;
99 profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE; 99 profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE;
100 profile[MTHCA_RES_UARC].size = request->uarc_size; 100 profile[MTHCA_RES_UARC].size = request->uarc_size;
@@ -232,7 +232,7 @@ s64 mthca_make_profile(struct mthca_dev *dev,
232 dev->limits.num_mtt_segs = profile[i].num; 232 dev->limits.num_mtt_segs = profile[i].num;
233 dev->mr_table.mtt_base = profile[i].start; 233 dev->mr_table.mtt_base = profile[i].start;
234 init_hca->mtt_base = profile[i].start; 234 init_hca->mtt_base = profile[i].start;
235 init_hca->mtt_seg_sz = ffs(MTHCA_MTT_SEG_SIZE) - 7; 235 init_hca->mtt_seg_sz = ffs(dev->limits.mtt_seg_size) - 7;
236 break; 236 break;
237 case MTHCA_RES_UAR: 237 case MTHCA_RES_UAR:
238 dev->limits.num_uars = profile[i].num; 238 dev->limits.num_uars = profile[i].num;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index b832a7b814a..4a84d02ece0 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -667,7 +667,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
667 i = 0; 667 i = 0;
668 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) 668 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
669 mdelay(1); 669 mdelay(1);
670 if (i >= 10000) { 670 if (i > 10000) {
671 nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); 671 nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
672 return 0; 672 return 0;
673 } 673 }
@@ -675,7 +675,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
675 i = 0; 675 i = 0;
676 while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) 676 while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
677 mdelay(1); 677 mdelay(1);
678 if (i >= 10000) { 678 if (i > 10000) {
679 printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", 679 printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
680 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); 680 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
681 return 0; 681 return 0;
@@ -701,7 +701,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
701 i = 0; 701 i = 0;
702 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) 702 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
703 mdelay(1); 703 mdelay(1);
704 if (i >= 10000) { 704 if (i > 10000) {
705 nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n"); 705 nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n");
706 return 0; 706 return 0;
707 } 707 }
@@ -711,7 +711,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
711 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) 711 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
712 & 0x0000000f)) != 0x0000000f) && i++ < 5000) 712 & 0x0000000f)) != 0x0000000f) && i++ < 5000)
713 mdelay(1); 713 mdelay(1);
714 if (i >= 5000) { 714 if (i > 5000) {
715 nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp); 715 nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp);
716 return 0; 716 return 0;
717 } 717 }
@@ -722,7 +722,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
722 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) 722 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
723 & 0x0000000f)) != 0x0000000f) && i++ < 5000) 723 & 0x0000000f)) != 0x0000000f) && i++ < 5000)
724 mdelay(1); 724 mdelay(1);
725 if (i >= 5000) { 725 if (i > 5000) {
726 nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp); 726 nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp);
727 return 0; 727 return 0;
728 } 728 }
@@ -792,7 +792,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
792 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) 792 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
793 & 0x0000000f)) != 0x0000000f) && i++ < 5000) 793 & 0x0000000f)) != 0x0000000f) && i++ < 5000)
794 mdelay(1); 794 mdelay(1);
795 if (i >= 5000) { 795 if (i > 5000) {
796 nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp); 796 nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp);
797 return 1; 797 return 1;
798 } 798 }
@@ -815,7 +815,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
815 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) 815 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
816 & 0x0000000f)) != 0x0000000f) && (i++ < 5000)) 816 & 0x0000000f)) != 0x0000000f) && (i++ < 5000))
817 mdelay(1); 817 mdelay(1);
818 if (i >= 5000) { 818 if (i > 5000) {
819 printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp); 819 printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp);
820 /* return 1; */ 820 /* return 1; */
821 } 821 }
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 75223f50de5..0ba6ec87629 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -257,11 +257,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
257{ 257{
258 struct iscsi_iser_task *iser_task = task->dd_data; 258 struct iscsi_iser_task *iser_task = task->dd_data;
259 259
260 /* 260 /* mgmt tasks do not need special cleanup */
261 * mgmt tasks do not need special cleanup and we do not 261 if (!task->sc)
262 * allocate anything in the init task callout
263 */
264 if (!task->sc || task->state == ISCSI_TASK_PENDING)
265 return; 262 return;
266 263
267 if (iser_task->status == ISER_TASK_STATUS_STARTED) { 264 if (iser_task->status == ISER_TASK_STATUS_STARTED) {
@@ -517,7 +514,8 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
517} 514}
518 515
519static struct iscsi_endpoint * 516static struct iscsi_endpoint *
520iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking) 517iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
518 int non_blocking)
521{ 519{
522 int err; 520 int err;
523 struct iser_conn *ib_conn; 521 struct iser_conn *ib_conn;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e54e002665b..5d445f48789 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -42,6 +42,7 @@ static unsigned int input_abs_bypass_init_data[] __initdata = {
42 ABS_MT_POSITION_Y, 42 ABS_MT_POSITION_Y,
43 ABS_MT_TOOL_TYPE, 43 ABS_MT_TOOL_TYPE,
44 ABS_MT_BLOB_ID, 44 ABS_MT_BLOB_ID,
45 ABS_MT_TRACKING_ID,
45 0 46 0
46}; 47};
47static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)]; 48static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)];
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 5c0a631d145..06f46fcc077 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -232,7 +232,7 @@ config INPUT_GPIO_ROTARY_ENCODER
232 depends on GPIOLIB && GENERIC_GPIO 232 depends on GPIOLIB && GENERIC_GPIO
233 help 233 help
234 Say Y here to add support for rotary encoders connected to GPIO lines. 234 Say Y here to add support for rotary encoders connected to GPIO lines.
235 Check file:Documentation/incput/rotary_encoder.txt for more 235 Check file:Documentation/input/rotary-encoder.txt for more
236 information. 236 information.
237 237
238 To compile this driver as a module, choose M here: the 238 To compile this driver as a module, choose M here: the
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index da3c3a5d268..c4b3fbd1a80 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -192,7 +192,7 @@ config SERIO_RAW
192 192
193config SERIO_XILINX_XPS_PS2 193config SERIO_XILINX_XPS_PS2
194 tristate "Xilinx XPS PS/2 Controller Support" 194 tristate "Xilinx XPS PS/2 Controller Support"
195 depends on PPC 195 depends on PPC || MICROBLAZE
196 help 196 help
197 This driver supports XPS PS/2 IP from the Xilinx EDK on 197 This driver supports XPS PS/2 IP from the Xilinx EDK on
198 PowerPC platform. 198 PowerPC platform.
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index e29cdc13a19..89b394183a7 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,7 @@ static void amba_kmi_close(struct serio *io)
107 clk_disable(kmi->clk); 107 clk_disable(kmi->clk);
108} 108}
109 109
110static int amba_kmi_probe(struct amba_device *dev, void *id) 110static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
111{ 111{
112 struct amba_kmi_port *kmi; 112 struct amba_kmi_port *kmi;
113 struct serio *io; 113 struct serio *io;
@@ -135,7 +135,7 @@ static int amba_kmi_probe(struct amba_device *dev, void *id)
135 io->dev.parent = &dev->dev; 135 io->dev.parent = &dev->dev;
136 136
137 kmi->io = io; 137 kmi->io = io;
138 kmi->base = ioremap(dev->res.start, KMI_SIZE); 138 kmi->base = ioremap(dev->res.start, resource_size(&dev->res));
139 if (!kmi->base) { 139 if (!kmi->base) {
140 ret = -ENOMEM; 140 ret = -ENOMEM;
141 goto out; 141 goto out;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 67248c31e19..be5bbbb8ae4 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -210,7 +210,7 @@ int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
210 timeout = wait_event_timeout(ps2dev->wait, 210 timeout = wait_event_timeout(ps2dev->wait,
211 !(ps2dev->flags & PS2_FLAG_CMD1), timeout); 211 !(ps2dev->flags & PS2_FLAG_CMD1), timeout);
212 212
213 if (ps2dev->cmdcnt && timeout > 0) { 213 if (ps2dev->cmdcnt && !(ps2dev->flags & PS2_FLAG_CMD1)) {
214 214
215 timeout = ps2_adjust_timeout(ps2dev, command, timeout); 215 timeout = ps2_adjust_timeout(ps2dev, command, timeout);
216 wait_event_timeout(ps2dev->wait, 216 wait_event_timeout(ps2dev->wait,
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index f100c7f4c1d..6954f550010 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -419,7 +419,7 @@ static int ucb1400_ts_remove(struct platform_device *dev)
419#ifdef CONFIG_PM 419#ifdef CONFIG_PM
420static int ucb1400_ts_resume(struct platform_device *dev) 420static int ucb1400_ts_resume(struct platform_device *dev)
421{ 421{
422 struct ucb1400_ts *ucb = platform_get_drvdata(dev); 422 struct ucb1400_ts *ucb = dev->dev.platform_data;
423 423
424 if (ucb->ts_task) { 424 if (ucb->ts_task) {
425 /* 425 /*
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 7d97d54588d..77e9fdda059 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -183,7 +183,7 @@ int cf_command(int drvid, int mode,
183 (mode != 1) ? "" : " 0 ", 183 (mode != 1) ? "" : " 0 ",
184 (mode != 1) ? "" : fwd_nr); 184 (mode != 1) ? "" : fwd_nr);
185 185
186 retval = divert_if.ll_cmd(&cs->ics); /* excute command */ 186 retval = divert_if.ll_cmd(&cs->ics); /* execute command */
187 187
188 if (!retval) 188 if (!retval)
189 { cs->prev = NULL; 189 { cs->prev = NULL;
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index c12cd2f9425..77ee2867c8b 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -506,7 +506,7 @@ tone_off:
506 break; 506 break;
507 } 507 }
508 dsp->cmx_delay = (*((int *)data)) << 3; 508 dsp->cmx_delay = (*((int *)data)) << 3;
509 /* miliseconds to samples */ 509 /* milliseconds to samples */
510 if (dsp->cmx_delay >= (CMX_BUFF_HALF>>1)) 510 if (dsp->cmx_delay >= (CMX_BUFF_HALF>>1))
511 /* clip to half of maximum usable buffer 511 /* clip to half of maximum usable buffer
512 (half of half buffer) */ 512 (half of half buffer) */
diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c
index 1aa46a390a0..173d104d9ff 100644
--- a/drivers/leds/leds-h1940.c
+++ b/drivers/leds/leds-h1940.c
@@ -16,6 +16,8 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <linux/leds.h> 18#include <linux/leds.h>
19#include <linux/gpio.h>
20
19#include <mach/regs-gpio.h> 21#include <mach/regs-gpio.h>
20#include <mach/hardware.h> 22#include <mach/hardware.h>
21#include <mach/h1940-latch.h> 23#include <mach/h1940-latch.h>
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index aa2e7ae0cda..aa7acf3b922 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/leds.h> 17#include <linux/leds.h>
18#include <linux/gpio.h>
18 19
19#include <mach/hardware.h> 20#include <mach/hardware.h>
20#include <mach/regs-gpio.h> 21#include <mach/regs-gpio.h>
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index a3d3cbab359..0aaa0597a62 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
1config LGUEST 1config LGUEST
2 tristate "Linux hypervisor example code" 2 tristate "Linux hypervisor example code"
3 depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX 3 depends on X86_32 && EXPERIMENTAL && EVENTFD
4 select HVC_DRIVER 4 select HVC_DRIVER
5 ---help--- 5 ---help---
6 This is a very simple module which allows you to run 6 This is a very simple module which allows you to run
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 4845fb3cf74..a6974e9b8eb 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -95,7 +95,7 @@ static __init int map_switcher(void)
95 * array of struct pages. It increments that pointer, but we don't 95 * array of struct pages. It increments that pointer, but we don't
96 * care. */ 96 * care. */
97 pagep = switcher_page; 97 pagep = switcher_page;
98 err = map_vm_area(switcher_vma, PAGE_KERNEL, &pagep); 98 err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
99 if (err) { 99 if (err) {
100 printk("lguest: map_vm_area failed: %i\n", err); 100 printk("lguest: map_vm_area failed: %i\n", err);
101 goto free_vma; 101 goto free_vma;
@@ -188,6 +188,9 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
188{ 188{
189 /* We stop running once the Guest is dead. */ 189 /* We stop running once the Guest is dead. */
190 while (!cpu->lg->dead) { 190 while (!cpu->lg->dead) {
191 unsigned int irq;
192 bool more;
193
191 /* First we run any hypercalls the Guest wants done. */ 194 /* First we run any hypercalls the Guest wants done. */
192 if (cpu->hcall) 195 if (cpu->hcall)
193 do_hypercalls(cpu); 196 do_hypercalls(cpu);
@@ -195,23 +198,23 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
195 /* It's possible the Guest did a NOTIFY hypercall to the 198 /* It's possible the Guest did a NOTIFY hypercall to the
196 * Launcher, in which case we return from the read() now. */ 199 * Launcher, in which case we return from the read() now. */
197 if (cpu->pending_notify) { 200 if (cpu->pending_notify) {
198 if (put_user(cpu->pending_notify, user)) 201 if (!send_notify_to_eventfd(cpu)) {
199 return -EFAULT; 202 if (put_user(cpu->pending_notify, user))
200 return sizeof(cpu->pending_notify); 203 return -EFAULT;
204 return sizeof(cpu->pending_notify);
205 }
201 } 206 }
202 207
203 /* Check for signals */ 208 /* Check for signals */
204 if (signal_pending(current)) 209 if (signal_pending(current))
205 return -ERESTARTSYS; 210 return -ERESTARTSYS;
206 211
207 /* If Waker set break_out, return to Launcher. */
208 if (cpu->break_out)
209 return -EAGAIN;
210
211 /* Check if there are any interrupts which can be delivered now: 212 /* Check if there are any interrupts which can be delivered now:
212 * if so, this sets up the hander to be executed when we next 213 * if so, this sets up the hander to be executed when we next
213 * run the Guest. */ 214 * run the Guest. */
214 maybe_do_interrupt(cpu); 215 irq = interrupt_pending(cpu, &more);
216 if (irq < LGUEST_IRQS)
217 try_deliver_interrupt(cpu, irq, more);
215 218
216 /* All long-lived kernel loops need to check with this horrible 219 /* All long-lived kernel loops need to check with this horrible
217 * thing called the freezer. If the Host is trying to suspend, 220 * thing called the freezer. If the Host is trying to suspend,
@@ -224,10 +227,15 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
224 break; 227 break;
225 228
226 /* If the Guest asked to be stopped, we sleep. The Guest's 229 /* If the Guest asked to be stopped, we sleep. The Guest's
227 * clock timer or LHREQ_BREAK from the Waker will wake us. */ 230 * clock timer will wake us. */
228 if (cpu->halted) { 231 if (cpu->halted) {
229 set_current_state(TASK_INTERRUPTIBLE); 232 set_current_state(TASK_INTERRUPTIBLE);
230 schedule(); 233 /* Just before we sleep, make sure no interrupt snuck in
234 * which we should be doing. */
235 if (interrupt_pending(cpu, &more) < LGUEST_IRQS)
236 set_current_state(TASK_RUNNING);
237 else
238 schedule();
231 continue; 239 continue;
232 } 240 }
233 241
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 54d66f05fef..c29ffa19cb7 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -37,6 +37,10 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
37 /* This call does nothing, except by breaking out of the Guest 37 /* This call does nothing, except by breaking out of the Guest
38 * it makes us process all the asynchronous hypercalls. */ 38 * it makes us process all the asynchronous hypercalls. */
39 break; 39 break;
40 case LHCALL_SEND_INTERRUPTS:
41 /* This call does nothing too, but by breaking out of the Guest
42 * it makes us process any pending interrupts. */
43 break;
40 case LHCALL_LGUEST_INIT: 44 case LHCALL_LGUEST_INIT:
41 /* You can't get here unless you're already initialized. Don't 45 /* You can't get here unless you're already initialized. Don't
42 * do that. */ 46 * do that. */
@@ -73,11 +77,21 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
73 guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); 77 guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
74 break; 78 break;
75 case LHCALL_SET_PTE: 79 case LHCALL_SET_PTE:
80#ifdef CONFIG_X86_PAE
81 guest_set_pte(cpu, args->arg1, args->arg2,
82 __pte(args->arg3 | (u64)args->arg4 << 32));
83#else
76 guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); 84 guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
85#endif
86 break;
87 case LHCALL_SET_PGD:
88 guest_set_pgd(cpu->lg, args->arg1, args->arg2);
77 break; 89 break;
90#ifdef CONFIG_X86_PAE
78 case LHCALL_SET_PMD: 91 case LHCALL_SET_PMD:
79 guest_set_pmd(cpu->lg, args->arg1, args->arg2); 92 guest_set_pmd(cpu->lg, args->arg1, args->arg2);
80 break; 93 break;
94#endif
81 case LHCALL_SET_CLOCKEVENT: 95 case LHCALL_SET_CLOCKEVENT:
82 guest_set_clockevent(cpu, args->arg1); 96 guest_set_clockevent(cpu, args->arg1);
83 break; 97 break;
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 6e99adbe194..0e9067b0d50 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -128,30 +128,39 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
128/*H:205 128/*H:205
129 * Virtual Interrupts. 129 * Virtual Interrupts.
130 * 130 *
131 * maybe_do_interrupt() gets called before every entry to the Guest, to see if 131 * interrupt_pending() returns the first pending interrupt which isn't blocked
132 * we should divert the Guest to running an interrupt handler. */ 132 * by the Guest. It is called before every entry to the Guest, and just before
133void maybe_do_interrupt(struct lg_cpu *cpu) 133 * we go to sleep when the Guest has halted itself. */
134unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
134{ 135{
135 unsigned int irq; 136 unsigned int irq;
136 DECLARE_BITMAP(blk, LGUEST_IRQS); 137 DECLARE_BITMAP(blk, LGUEST_IRQS);
137 struct desc_struct *idt;
138 138
139 /* If the Guest hasn't even initialized yet, we can do nothing. */ 139 /* If the Guest hasn't even initialized yet, we can do nothing. */
140 if (!cpu->lg->lguest_data) 140 if (!cpu->lg->lguest_data)
141 return; 141 return LGUEST_IRQS;
142 142
143 /* Take our "irqs_pending" array and remove any interrupts the Guest 143 /* Take our "irqs_pending" array and remove any interrupts the Guest
144 * wants blocked: the result ends up in "blk". */ 144 * wants blocked: the result ends up in "blk". */
145 if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, 145 if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
146 sizeof(blk))) 146 sizeof(blk)))
147 return; 147 return LGUEST_IRQS;
148 bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS); 148 bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);
149 149
150 /* Find the first interrupt. */ 150 /* Find the first interrupt. */
151 irq = find_first_bit(blk, LGUEST_IRQS); 151 irq = find_first_bit(blk, LGUEST_IRQS);
152 /* None? Nothing to do */ 152 *more = find_next_bit(blk, LGUEST_IRQS, irq+1);
153 if (irq >= LGUEST_IRQS) 153
154 return; 154 return irq;
155}
156
157/* This actually diverts the Guest to running an interrupt handler, once an
158 * interrupt has been identified by interrupt_pending(). */
159void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
160{
161 struct desc_struct *idt;
162
163 BUG_ON(irq >= LGUEST_IRQS);
155 164
156 /* They may be in the middle of an iret, where they asked us never to 165 /* They may be in the middle of an iret, where they asked us never to
157 * deliver interrupts. */ 166 * deliver interrupts. */
@@ -170,8 +179,12 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
170 u32 irq_enabled; 179 u32 irq_enabled;
171 if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled)) 180 if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
172 irq_enabled = 0; 181 irq_enabled = 0;
173 if (!irq_enabled) 182 if (!irq_enabled) {
183 /* Make sure they know an IRQ is pending. */
184 put_user(X86_EFLAGS_IF,
185 &cpu->lg->lguest_data->irq_pending);
174 return; 186 return;
187 }
175 } 188 }
176 189
177 /* Look at the IDT entry the Guest gave us for this interrupt. The 190 /* Look at the IDT entry the Guest gave us for this interrupt. The
@@ -194,6 +207,25 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
194 * here is a compromise which means at least it gets updated every 207 * here is a compromise which means at least it gets updated every
195 * timer interrupt. */ 208 * timer interrupt. */
196 write_timestamp(cpu); 209 write_timestamp(cpu);
210
211 /* If there are no other interrupts we want to deliver, clear
212 * the pending flag. */
213 if (!more)
214 put_user(0, &cpu->lg->lguest_data->irq_pending);
215}
216
217/* And this is the routine when we want to set an interrupt for the Guest. */
218void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
219{
220 /* Next time the Guest runs, the core code will see if it can deliver
221 * this interrupt. */
222 set_bit(irq, cpu->irqs_pending);
223
224 /* Make sure it sees it; it might be asleep (eg. halted), or
225 * running the Guest right now, in which case kick_process()
226 * will knock it out. */
227 if (!wake_up_process(cpu->tsk))
228 kick_process(cpu->tsk);
197} 229}
198/*:*/ 230/*:*/
199 231
@@ -510,10 +542,7 @@ static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
510 struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt); 542 struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt);
511 543
512 /* Remember the first interrupt is the timer interrupt. */ 544 /* Remember the first interrupt is the timer interrupt. */
513 set_bit(0, cpu->irqs_pending); 545 set_interrupt(cpu, 0);
514 /* If the Guest is actually stopped, we need to wake it up. */
515 if (cpu->halted)
516 wake_up_process(cpu->tsk);
517 return HRTIMER_NORESTART; 546 return HRTIMER_NORESTART;
518} 547}
519 548
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index af92a176697..d4e8979735c 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -49,7 +49,7 @@ struct lg_cpu {
49 u32 cr2; 49 u32 cr2;
50 int ts; 50 int ts;
51 u32 esp1; 51 u32 esp1;
52 u8 ss1; 52 u16 ss1;
53 53
54 /* Bitmap of what has changed: see CHANGED_* above. */ 54 /* Bitmap of what has changed: see CHANGED_* above. */
55 int changed; 55 int changed;
@@ -71,9 +71,7 @@ struct lg_cpu {
71 /* Virtual clock device */ 71 /* Virtual clock device */
72 struct hrtimer hrt; 72 struct hrtimer hrt;
73 73
74 /* Do we need to stop what we're doing and return to userspace? */ 74 /* Did the Guest tell us to halt? */
75 int break_out;
76 wait_queue_head_t break_wq;
77 int halted; 75 int halted;
78 76
79 /* Pending virtual interrupts */ 77 /* Pending virtual interrupts */
@@ -82,6 +80,16 @@ struct lg_cpu {
82 struct lg_cpu_arch arch; 80 struct lg_cpu_arch arch;
83}; 81};
84 82
83struct lg_eventfd {
84 unsigned long addr;
85 struct file *event;
86};
87
88struct lg_eventfd_map {
89 unsigned int num;
90 struct lg_eventfd map[];
91};
92
85/* The private info the thread maintains about the guest. */ 93/* The private info the thread maintains about the guest. */
86struct lguest 94struct lguest
87{ 95{
@@ -102,6 +110,8 @@ struct lguest
102 unsigned int stack_pages; 110 unsigned int stack_pages;
103 u32 tsc_khz; 111 u32 tsc_khz;
104 112
113 struct lg_eventfd_map *eventfds;
114
105 /* Dead? */ 115 /* Dead? */
106 const char *dead; 116 const char *dead;
107}; 117};
@@ -137,9 +147,13 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
137 * in the kernel. */ 147 * in the kernel. */
138#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) 148#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
139#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) 149#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
150#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK)
151#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT)
140 152
141/* interrupts_and_traps.c: */ 153/* interrupts_and_traps.c: */
142void maybe_do_interrupt(struct lg_cpu *cpu); 154unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more);
155void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more);
156void set_interrupt(struct lg_cpu *cpu, unsigned int irq);
143bool deliver_trap(struct lg_cpu *cpu, unsigned int num); 157bool deliver_trap(struct lg_cpu *cpu, unsigned int num);
144void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i, 158void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
145 u32 low, u32 hi); 159 u32 low, u32 hi);
@@ -150,6 +164,7 @@ void setup_default_idt_entries(struct lguest_ro_state *state,
150void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, 164void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
151 const unsigned long *def); 165 const unsigned long *def);
152void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta); 166void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
167bool send_notify_to_eventfd(struct lg_cpu *cpu);
153void init_clockdev(struct lg_cpu *cpu); 168void init_clockdev(struct lg_cpu *cpu);
154bool check_syscall_vector(struct lguest *lg); 169bool check_syscall_vector(struct lguest *lg);
155int init_interrupts(void); 170int init_interrupts(void);
@@ -168,7 +183,10 @@ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
168int init_guest_pagetable(struct lguest *lg); 183int init_guest_pagetable(struct lguest *lg);
169void free_guest_pagetable(struct lguest *lg); 184void free_guest_pagetable(struct lguest *lg);
170void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); 185void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
186void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i);
187#ifdef CONFIG_X86_PAE
171void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); 188void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
189#endif
172void guest_pagetable_clear_all(struct lg_cpu *cpu); 190void guest_pagetable_clear_all(struct lg_cpu *cpu);
173void guest_pagetable_flush_user(struct lg_cpu *cpu); 191void guest_pagetable_flush_user(struct lg_cpu *cpu);
174void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, 192void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index df44d962626..e082cdac88b 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -228,7 +228,8 @@ extern void lguest_setup_irq(unsigned int irq);
228 * function. */ 228 * function. */
229static struct virtqueue *lg_find_vq(struct virtio_device *vdev, 229static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
230 unsigned index, 230 unsigned index,
231 void (*callback)(struct virtqueue *vq)) 231 void (*callback)(struct virtqueue *vq),
232 const char *name)
232{ 233{
233 struct lguest_device *ldev = to_lgdev(vdev); 234 struct lguest_device *ldev = to_lgdev(vdev);
234 struct lguest_vq_info *lvq; 235 struct lguest_vq_info *lvq;
@@ -263,7 +264,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
263 /* OK, tell virtio_ring.c to set up a virtqueue now we know its size 264 /* OK, tell virtio_ring.c to set up a virtqueue now we know its size
264 * and we've got a pointer to its pages. */ 265 * and we've got a pointer to its pages. */
265 vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, 266 vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN,
266 vdev, lvq->pages, lg_notify, callback); 267 vdev, lvq->pages, lg_notify, callback, name);
267 if (!vq) { 268 if (!vq) {
268 err = -ENOMEM; 269 err = -ENOMEM;
269 goto unmap; 270 goto unmap;
@@ -312,6 +313,38 @@ static void lg_del_vq(struct virtqueue *vq)
312 kfree(lvq); 313 kfree(lvq);
313} 314}
314 315
316static void lg_del_vqs(struct virtio_device *vdev)
317{
318 struct virtqueue *vq, *n;
319
320 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
321 lg_del_vq(vq);
322}
323
324static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs,
325 struct virtqueue *vqs[],
326 vq_callback_t *callbacks[],
327 const char *names[])
328{
329 struct lguest_device *ldev = to_lgdev(vdev);
330 int i;
331
332 /* We must have this many virtqueues. */
333 if (nvqs > ldev->desc->num_vq)
334 return -ENOENT;
335
336 for (i = 0; i < nvqs; ++i) {
337 vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]);
338 if (IS_ERR(vqs[i]))
339 goto error;
340 }
341 return 0;
342
343error:
344 lg_del_vqs(vdev);
345 return PTR_ERR(vqs[i]);
346}
347
315/* The ops structure which hooks everything together. */ 348/* The ops structure which hooks everything together. */
316static struct virtio_config_ops lguest_config_ops = { 349static struct virtio_config_ops lguest_config_ops = {
317 .get_features = lg_get_features, 350 .get_features = lg_get_features,
@@ -321,8 +354,8 @@ static struct virtio_config_ops lguest_config_ops = {
321 .get_status = lg_get_status, 354 .get_status = lg_get_status,
322 .set_status = lg_set_status, 355 .set_status = lg_set_status,
323 .reset = lg_reset, 356 .reset = lg_reset,
324 .find_vq = lg_find_vq, 357 .find_vqs = lg_find_vqs,
325 .del_vq = lg_del_vq, 358 .del_vqs = lg_del_vqs,
326}; 359};
327 360
328/* The root device for the lguest virtio devices. This makes them appear as 361/* The root device for the lguest virtio devices. This makes them appear as
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index b8ee103eed5..32e29712105 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -7,32 +7,83 @@
7#include <linux/miscdevice.h> 7#include <linux/miscdevice.h>
8#include <linux/fs.h> 8#include <linux/fs.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/eventfd.h>
11#include <linux/file.h>
10#include "lg.h" 12#include "lg.h"
11 13
12/*L:055 When something happens, the Waker process needs a way to stop the 14bool send_notify_to_eventfd(struct lg_cpu *cpu)
13 * kernel running the Guest and return to the Launcher. So the Waker writes
14 * LHREQ_BREAK and the value "1" to /dev/lguest to do this. Once the Launcher
15 * has done whatever needs attention, it writes LHREQ_BREAK and "0" to release
16 * the Waker. */
17static int break_guest_out(struct lg_cpu *cpu, const unsigned long __user*input)
18{ 15{
19 unsigned long on; 16 unsigned int i;
17 struct lg_eventfd_map *map;
18
19 /* lg->eventfds is RCU-protected */
20 rcu_read_lock();
21 map = rcu_dereference(cpu->lg->eventfds);
22 for (i = 0; i < map->num; i++) {
23 if (map->map[i].addr == cpu->pending_notify) {
24 eventfd_signal(map->map[i].event, 1);
25 cpu->pending_notify = 0;
26 break;
27 }
28 }
29 rcu_read_unlock();
30 return cpu->pending_notify == 0;
31}
20 32
21 /* Fetch whether they're turning break on or off. */ 33static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
22 if (get_user(on, input) != 0) 34{
23 return -EFAULT; 35 struct lg_eventfd_map *new, *old = lg->eventfds;
24 36
25 if (on) { 37 if (!addr)
26 cpu->break_out = 1; 38 return -EINVAL;
27 /* Pop it out of the Guest (may be running on different CPU) */ 39
28 wake_up_process(cpu->tsk); 40 /* Replace the old array with the new one, carefully: others can
29 /* Wait for them to reset it */ 41 * be accessing it at the same time */
30 return wait_event_interruptible(cpu->break_wq, !cpu->break_out); 42 new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1),
31 } else { 43 GFP_KERNEL);
32 cpu->break_out = 0; 44 if (!new)
33 wake_up(&cpu->break_wq); 45 return -ENOMEM;
34 return 0; 46
47 /* First make identical copy. */
48 memcpy(new->map, old->map, sizeof(old->map[0]) * old->num);
49 new->num = old->num;
50
51 /* Now append new entry. */
52 new->map[new->num].addr = addr;
53 new->map[new->num].event = eventfd_fget(fd);
54 if (IS_ERR(new->map[new->num].event)) {
55 kfree(new);
56 return PTR_ERR(new->map[new->num].event);
35 } 57 }
58 new->num++;
59
60 /* Now put new one in place. */
61 rcu_assign_pointer(lg->eventfds, new);
62
63 /* We're not in a big hurry. Wait until noone's looking at old
64 * version, then delete it. */
65 synchronize_rcu();
66 kfree(old);
67
68 return 0;
69}
70
71static int attach_eventfd(struct lguest *lg, const unsigned long __user *input)
72{
73 unsigned long addr, fd;
74 int err;
75
76 if (get_user(addr, input) != 0)
77 return -EFAULT;
78 input++;
79 if (get_user(fd, input) != 0)
80 return -EFAULT;
81
82 mutex_lock(&lguest_lock);
83 err = add_eventfd(lg, addr, fd);
84 mutex_unlock(&lguest_lock);
85
86 return 0;
36} 87}
37 88
38/*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt 89/*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
@@ -45,9 +96,8 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
45 return -EFAULT; 96 return -EFAULT;
46 if (irq >= LGUEST_IRQS) 97 if (irq >= LGUEST_IRQS)
47 return -EINVAL; 98 return -EINVAL;
48 /* Next time the Guest runs, the core code will see if it can deliver 99
49 * this interrupt. */ 100 set_interrupt(cpu, irq);
50 set_bit(irq, cpu->irqs_pending);
51 return 0; 101 return 0;
52} 102}
53 103
@@ -126,9 +176,6 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
126 * address. */ 176 * address. */
127 lguest_arch_setup_regs(cpu, start_ip); 177 lguest_arch_setup_regs(cpu, start_ip);
128 178
129 /* Initialize the queue for the Waker to wait on */
130 init_waitqueue_head(&cpu->break_wq);
131
132 /* We keep a pointer to the Launcher task (ie. current task) for when 179 /* We keep a pointer to the Launcher task (ie. current task) for when
133 * other Guests want to wake this one (eg. console input). */ 180 * other Guests want to wake this one (eg. console input). */
134 cpu->tsk = current; 181 cpu->tsk = current;
@@ -185,6 +232,13 @@ static int initialize(struct file *file, const unsigned long __user *input)
185 goto unlock; 232 goto unlock;
186 } 233 }
187 234
235 lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL);
236 if (!lg->eventfds) {
237 err = -ENOMEM;
238 goto free_lg;
239 }
240 lg->eventfds->num = 0;
241
188 /* Populate the easy fields of our "struct lguest" */ 242 /* Populate the easy fields of our "struct lguest" */
189 lg->mem_base = (void __user *)args[0]; 243 lg->mem_base = (void __user *)args[0];
190 lg->pfn_limit = args[1]; 244 lg->pfn_limit = args[1];
@@ -192,7 +246,7 @@ static int initialize(struct file *file, const unsigned long __user *input)
192 /* This is the first cpu (cpu 0) and it will start booting at args[2] */ 246 /* This is the first cpu (cpu 0) and it will start booting at args[2] */
193 err = lg_cpu_start(&lg->cpus[0], 0, args[2]); 247 err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
194 if (err) 248 if (err)
195 goto release_guest; 249 goto free_eventfds;
196 250
197 /* Initialize the Guest's shadow page tables, using the toplevel 251 /* Initialize the Guest's shadow page tables, using the toplevel
198 * address the Launcher gave us. This allocates memory, so can fail. */ 252 * address the Launcher gave us. This allocates memory, so can fail. */
@@ -211,7 +265,9 @@ static int initialize(struct file *file, const unsigned long __user *input)
211free_regs: 265free_regs:
212 /* FIXME: This should be in free_vcpu */ 266 /* FIXME: This should be in free_vcpu */
213 free_page(lg->cpus[0].regs_page); 267 free_page(lg->cpus[0].regs_page);
214release_guest: 268free_eventfds:
269 kfree(lg->eventfds);
270free_lg:
215 kfree(lg); 271 kfree(lg);
216unlock: 272unlock:
217 mutex_unlock(&lguest_lock); 273 mutex_unlock(&lguest_lock);
@@ -252,11 +308,6 @@ static ssize_t write(struct file *file, const char __user *in,
252 /* Once the Guest is dead, you can only read() why it died. */ 308 /* Once the Guest is dead, you can only read() why it died. */
253 if (lg->dead) 309 if (lg->dead)
254 return -ENOENT; 310 return -ENOENT;
255
256 /* If you're not the task which owns the Guest, all you can do
257 * is break the Launcher out of running the Guest. */
258 if (current != cpu->tsk && req != LHREQ_BREAK)
259 return -EPERM;
260 } 311 }
261 312
262 switch (req) { 313 switch (req) {
@@ -264,8 +315,8 @@ static ssize_t write(struct file *file, const char __user *in,
264 return initialize(file, input); 315 return initialize(file, input);
265 case LHREQ_IRQ: 316 case LHREQ_IRQ:
266 return user_send_irq(cpu, input); 317 return user_send_irq(cpu, input);
267 case LHREQ_BREAK: 318 case LHREQ_EVENTFD:
268 return break_guest_out(cpu, input); 319 return attach_eventfd(lg, input);
269 default: 320 default:
270 return -EINVAL; 321 return -EINVAL;
271 } 322 }
@@ -303,6 +354,12 @@ static int close(struct inode *inode, struct file *file)
303 * the Launcher's memory management structure. */ 354 * the Launcher's memory management structure. */
304 mmput(lg->cpus[i].mm); 355 mmput(lg->cpus[i].mm);
305 } 356 }
357
358 /* Release any eventfds they registered. */
359 for (i = 0; i < lg->eventfds->num; i++)
360 fput(lg->eventfds->map[i].event);
361 kfree(lg->eventfds);
362
306 /* If lg->dead doesn't contain an error code it will be NULL or a 363 /* If lg->dead doesn't contain an error code it will be NULL or a
307 * kmalloc()ed string, either of which is ok to hand to kfree(). */ 364 * kmalloc()ed string, either of which is ok to hand to kfree(). */
308 if (!IS_ERR(lg->dead)) 365 if (!IS_ERR(lg->dead))
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index a059cf9980f..a6fe1abda24 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -53,6 +53,17 @@
53 * page. */ 53 * page. */
54#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) 54#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
55 55
56/* For PAE we need the PMD index as well. We use the last 2MB, so we
57 * will need the last pmd entry of the last pmd page. */
58#ifdef CONFIG_X86_PAE
59#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
60#define RESERVE_MEM 2U
61#define CHECK_GPGD_MASK _PAGE_PRESENT
62#else
63#define RESERVE_MEM 4U
64#define CHECK_GPGD_MASK _PAGE_TABLE
65#endif
66
56/* We actually need a separate PTE page for each CPU. Remember that after the 67/* We actually need a separate PTE page for each CPU. Remember that after the
57 * Switcher code itself comes two pages for each CPU, and we don't want this 68 * Switcher code itself comes two pages for each CPU, and we don't want this
58 * CPU's guest to see the pages of any other CPU. */ 69 * CPU's guest to see the pages of any other CPU. */
@@ -73,24 +84,59 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
73{ 84{
74 unsigned int index = pgd_index(vaddr); 85 unsigned int index = pgd_index(vaddr);
75 86
87#ifndef CONFIG_X86_PAE
76 /* We kill any Guest trying to touch the Switcher addresses. */ 88 /* We kill any Guest trying to touch the Switcher addresses. */
77 if (index >= SWITCHER_PGD_INDEX) { 89 if (index >= SWITCHER_PGD_INDEX) {
78 kill_guest(cpu, "attempt to access switcher pages"); 90 kill_guest(cpu, "attempt to access switcher pages");
79 index = 0; 91 index = 0;
80 } 92 }
93#endif
81 /* Return a pointer index'th pgd entry for the i'th page table. */ 94 /* Return a pointer index'th pgd entry for the i'th page table. */
82 return &cpu->lg->pgdirs[i].pgdir[index]; 95 return &cpu->lg->pgdirs[i].pgdir[index];
83} 96}
84 97
98#ifdef CONFIG_X86_PAE
99/* This routine then takes the PGD entry given above, which contains the
100 * address of the PMD page. It then returns a pointer to the PMD entry for the
101 * given address. */
102static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
103{
104 unsigned int index = pmd_index(vaddr);
105 pmd_t *page;
106
107 /* We kill any Guest trying to touch the Switcher addresses. */
108 if (pgd_index(vaddr) == SWITCHER_PGD_INDEX &&
109 index >= SWITCHER_PMD_INDEX) {
110 kill_guest(cpu, "attempt to access switcher pages");
111 index = 0;
112 }
113
114 /* You should never call this if the PGD entry wasn't valid */
115 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
116 page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
117
118 return &page[index];
119}
120#endif
121
85/* This routine then takes the page directory entry returned above, which 122/* This routine then takes the page directory entry returned above, which
86 * contains the address of the page table entry (PTE) page. It then returns a 123 * contains the address of the page table entry (PTE) page. It then returns a
87 * pointer to the PTE entry for the given address. */ 124 * pointer to the PTE entry for the given address. */
88static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr) 125static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
89{ 126{
127#ifdef CONFIG_X86_PAE
128 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
129 pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
130
131 /* You should never call this if the PMD entry wasn't valid */
132 BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
133#else
90 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); 134 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
91 /* You should never call this if the PGD entry wasn't valid */ 135 /* You should never call this if the PGD entry wasn't valid */
92 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); 136 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
93 return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE]; 137#endif
138
139 return &page[pte_index(vaddr)];
94} 140}
95 141
96/* These two functions just like the above two, except they access the Guest 142/* These two functions just like the above two, except they access the Guest
@@ -101,12 +147,32 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
101 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); 147 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
102} 148}
103 149
104static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) 150#ifdef CONFIG_X86_PAE
151static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
152{
153 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
154 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
155 return gpage + pmd_index(vaddr) * sizeof(pmd_t);
156}
157
158static unsigned long gpte_addr(struct lg_cpu *cpu,
159 pmd_t gpmd, unsigned long vaddr)
160{
161 unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
162
163 BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
164 return gpage + pte_index(vaddr) * sizeof(pte_t);
165}
166#else
167static unsigned long gpte_addr(struct lg_cpu *cpu,
168 pgd_t gpgd, unsigned long vaddr)
105{ 169{
106 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; 170 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
171
107 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); 172 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
108 return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); 173 return gpage + pte_index(vaddr) * sizeof(pte_t);
109} 174}
175#endif
110/*:*/ 176/*:*/
111 177
112/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as 178/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as
@@ -171,7 +237,7 @@ static void release_pte(pte_t pte)
171 /* Remember that get_user_pages_fast() took a reference to the page, in 237 /* Remember that get_user_pages_fast() took a reference to the page, in
172 * get_pfn()? We have to put it back now. */ 238 * get_pfn()? We have to put it back now. */
173 if (pte_flags(pte) & _PAGE_PRESENT) 239 if (pte_flags(pte) & _PAGE_PRESENT)
174 put_page(pfn_to_page(pte_pfn(pte))); 240 put_page(pte_page(pte));
175} 241}
176/*:*/ 242/*:*/
177 243
@@ -184,11 +250,20 @@ static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
184 250
185static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) 251static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
186{ 252{
187 if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || 253 if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
188 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) 254 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
189 kill_guest(cpu, "bad page directory entry"); 255 kill_guest(cpu, "bad page directory entry");
190} 256}
191 257
258#ifdef CONFIG_X86_PAE
259static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
260{
261 if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
262 (pmd_pfn(gpmd) >= cpu->lg->pfn_limit))
263 kill_guest(cpu, "bad page middle directory entry");
264}
265#endif
266
192/*H:330 267/*H:330
193 * (i) Looking up a page table entry when the Guest faults. 268 * (i) Looking up a page table entry when the Guest faults.
194 * 269 *
@@ -207,6 +282,11 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
207 pte_t gpte; 282 pte_t gpte;
208 pte_t *spte; 283 pte_t *spte;
209 284
285#ifdef CONFIG_X86_PAE
286 pmd_t *spmd;
287 pmd_t gpmd;
288#endif
289
210 /* First step: get the top-level Guest page table entry. */ 290 /* First step: get the top-level Guest page table entry. */
211 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 291 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
212 /* Toplevel not present? We can't map it in. */ 292 /* Toplevel not present? We can't map it in. */
@@ -228,12 +308,45 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
228 check_gpgd(cpu, gpgd); 308 check_gpgd(cpu, gpgd);
229 /* And we copy the flags to the shadow PGD entry. The page 309 /* And we copy the flags to the shadow PGD entry. The page
230 * number in the shadow PGD is the page we just allocated. */ 310 * number in the shadow PGD is the page we just allocated. */
231 *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd)); 311 set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd)));
232 } 312 }
233 313
314#ifdef CONFIG_X86_PAE
315 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
316 /* middle level not present? We can't map it in. */
317 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
318 return false;
319
320 /* Now look at the matching shadow entry. */
321 spmd = spmd_addr(cpu, *spgd, vaddr);
322
323 if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
324 /* No shadow entry: allocate a new shadow PTE page. */
325 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
326
327 /* This is not really the Guest's fault, but killing it is
328 * simple for this corner case. */
329 if (!ptepage) {
330 kill_guest(cpu, "out of memory allocating pte page");
331 return false;
332 }
333
334 /* We check that the Guest pmd is OK. */
335 check_gpmd(cpu, gpmd);
336
337 /* And we copy the flags to the shadow PMD entry. The page
338 * number in the shadow PMD is the page we just allocated. */
339 native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
340 }
341
342 /* OK, now we look at the lower level in the Guest page table: keep its
343 * address, because we might update it later. */
344 gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
345#else
234 /* OK, now we look at the lower level in the Guest page table: keep its 346 /* OK, now we look at the lower level in the Guest page table: keep its
235 * address, because we might update it later. */ 347 * address, because we might update it later. */
236 gpte_ptr = gpte_addr(gpgd, vaddr); 348 gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
349#endif
237 gpte = lgread(cpu, gpte_ptr, pte_t); 350 gpte = lgread(cpu, gpte_ptr, pte_t);
238 351
239 /* If this page isn't in the Guest page tables, we can't page it in. */ 352 /* If this page isn't in the Guest page tables, we can't page it in. */
@@ -259,7 +372,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
259 gpte = pte_mkdirty(gpte); 372 gpte = pte_mkdirty(gpte);
260 373
261 /* Get the pointer to the shadow PTE entry we're going to set. */ 374 /* Get the pointer to the shadow PTE entry we're going to set. */
262 spte = spte_addr(*spgd, vaddr); 375 spte = spte_addr(cpu, *spgd, vaddr);
263 /* If there was a valid shadow PTE entry here before, we release it. 376 /* If there was a valid shadow PTE entry here before, we release it.
264 * This can happen with a write to a previously read-only entry. */ 377 * This can happen with a write to a previously read-only entry. */
265 release_pte(*spte); 378 release_pte(*spte);
@@ -273,7 +386,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
273 * table entry, even if the Guest says it's writable. That way 386 * table entry, even if the Guest says it's writable. That way
274 * we will come back here when a write does actually occur, so 387 * we will come back here when a write does actually occur, so
275 * we can update the Guest's _PAGE_DIRTY flag. */ 388 * we can update the Guest's _PAGE_DIRTY flag. */
276 *spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0); 389 native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
277 390
278 /* Finally, we write the Guest PTE entry back: we've set the 391 /* Finally, we write the Guest PTE entry back: we've set the
279 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ 392 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
@@ -301,14 +414,23 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
301 pgd_t *spgd; 414 pgd_t *spgd;
302 unsigned long flags; 415 unsigned long flags;
303 416
417#ifdef CONFIG_X86_PAE
418 pmd_t *spmd;
419#endif
304 /* Look at the current top level entry: is it present? */ 420 /* Look at the current top level entry: is it present? */
305 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); 421 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
306 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) 422 if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
307 return false; 423 return false;
308 424
425#ifdef CONFIG_X86_PAE
426 spmd = spmd_addr(cpu, *spgd, vaddr);
427 if (!(pmd_flags(*spmd) & _PAGE_PRESENT))
428 return false;
429#endif
430
309 /* Check the flags on the pte entry itself: it must be present and 431 /* Check the flags on the pte entry itself: it must be present and
310 * writable. */ 432 * writable. */
311 flags = pte_flags(*(spte_addr(*spgd, vaddr))); 433 flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));
312 434
313 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); 435 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
314} 436}
@@ -322,8 +444,43 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
322 kill_guest(cpu, "bad stack page %#lx", vaddr); 444 kill_guest(cpu, "bad stack page %#lx", vaddr);
323} 445}
324 446
447#ifdef CONFIG_X86_PAE
448static void release_pmd(pmd_t *spmd)
449{
450 /* If the entry's not present, there's nothing to release. */
451 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
452 unsigned int i;
453 pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
454 /* For each entry in the page, we might need to release it. */
455 for (i = 0; i < PTRS_PER_PTE; i++)
456 release_pte(ptepage[i]);
457 /* Now we can free the page of PTEs */
458 free_page((long)ptepage);
459 /* And zero out the PMD entry so we never release it twice. */
460 native_set_pmd(spmd, __pmd(0));
461 }
462}
463
464static void release_pgd(pgd_t *spgd)
465{
466 /* If the entry's not present, there's nothing to release. */
467 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
468 unsigned int i;
469 pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
470
471 for (i = 0; i < PTRS_PER_PMD; i++)
472 release_pmd(&pmdpage[i]);
473
474 /* Now we can free the page of PMDs */
475 free_page((long)pmdpage);
476 /* And zero out the PGD entry so we never release it twice. */
477 set_pgd(spgd, __pgd(0));
478 }
479}
480
481#else /* !CONFIG_X86_PAE */
325/*H:450 If we chase down the release_pgd() code, it looks like this: */ 482/*H:450 If we chase down the release_pgd() code, it looks like this: */
326static void release_pgd(struct lguest *lg, pgd_t *spgd) 483static void release_pgd(pgd_t *spgd)
327{ 484{
328 /* If the entry's not present, there's nothing to release. */ 485 /* If the entry's not present, there's nothing to release. */
329 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 486 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
@@ -341,7 +498,7 @@ static void release_pgd(struct lguest *lg, pgd_t *spgd)
341 *spgd = __pgd(0); 498 *spgd = __pgd(0);
342 } 499 }
343} 500}
344 501#endif
345/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() 502/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings()
346 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. 503 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
347 * It simply releases every PTE page from 0 up to the Guest's kernel address. */ 504 * It simply releases every PTE page from 0 up to the Guest's kernel address. */
@@ -350,7 +507,7 @@ static void flush_user_mappings(struct lguest *lg, int idx)
350 unsigned int i; 507 unsigned int i;
351 /* Release every pgd entry up to the kernel's address. */ 508 /* Release every pgd entry up to the kernel's address. */
352 for (i = 0; i < pgd_index(lg->kernel_address); i++) 509 for (i = 0; i < pgd_index(lg->kernel_address); i++)
353 release_pgd(lg, lg->pgdirs[idx].pgdir + i); 510 release_pgd(lg->pgdirs[idx].pgdir + i);
354} 511}
355 512
356/*H:440 (v) Flushing (throwing away) page tables, 513/*H:440 (v) Flushing (throwing away) page tables,
@@ -369,7 +526,9 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
369{ 526{
370 pgd_t gpgd; 527 pgd_t gpgd;
371 pte_t gpte; 528 pte_t gpte;
372 529#ifdef CONFIG_X86_PAE
530 pmd_t gpmd;
531#endif
373 /* First step: get the top-level Guest page table entry. */ 532 /* First step: get the top-level Guest page table entry. */
374 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 533 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
375 /* Toplevel not present? We can't map it in. */ 534 /* Toplevel not present? We can't map it in. */
@@ -378,7 +537,14 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
378 return -1UL; 537 return -1UL;
379 } 538 }
380 539
381 gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t); 540#ifdef CONFIG_X86_PAE
541 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
542 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
543 kill_guest(cpu, "Bad address %#lx", vaddr);
544 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
545#else
546 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
547#endif
382 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 548 if (!(pte_flags(gpte) & _PAGE_PRESENT))
383 kill_guest(cpu, "Bad address %#lx", vaddr); 549 kill_guest(cpu, "Bad address %#lx", vaddr);
384 550
@@ -405,6 +571,9 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
405 int *blank_pgdir) 571 int *blank_pgdir)
406{ 572{
407 unsigned int next; 573 unsigned int next;
574#ifdef CONFIG_X86_PAE
575 pmd_t *pmd_table;
576#endif
408 577
409 /* We pick one entry at random to throw out. Choosing the Least 578 /* We pick one entry at random to throw out. Choosing the Least
410 * Recently Used might be better, but this is easy. */ 579 * Recently Used might be better, but this is easy. */
@@ -416,10 +585,27 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
416 /* If the allocation fails, just keep using the one we have */ 585 /* If the allocation fails, just keep using the one we have */
417 if (!cpu->lg->pgdirs[next].pgdir) 586 if (!cpu->lg->pgdirs[next].pgdir)
418 next = cpu->cpu_pgd; 587 next = cpu->cpu_pgd;
419 else 588 else {
420 /* This is a blank page, so there are no kernel 589#ifdef CONFIG_X86_PAE
421 * mappings: caller must map the stack! */ 590 /* In PAE mode, allocate a pmd page and populate the
591 * last pgd entry. */
592 pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
593 if (!pmd_table) {
594 free_page((long)cpu->lg->pgdirs[next].pgdir);
595 set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0));
596 next = cpu->cpu_pgd;
597 } else {
598 set_pgd(cpu->lg->pgdirs[next].pgdir +
599 SWITCHER_PGD_INDEX,
600 __pgd(__pa(pmd_table) | _PAGE_PRESENT));
601 /* This is a blank page, so there are no kernel
602 * mappings: caller must map the stack! */
603 *blank_pgdir = 1;
604 }
605#else
422 *blank_pgdir = 1; 606 *blank_pgdir = 1;
607#endif
608 }
423 } 609 }
424 /* Record which Guest toplevel this shadows. */ 610 /* Record which Guest toplevel this shadows. */
425 cpu->lg->pgdirs[next].gpgdir = gpgdir; 611 cpu->lg->pgdirs[next].gpgdir = gpgdir;
@@ -431,7 +617,7 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
431 617
432/*H:430 (iv) Switching page tables 618/*H:430 (iv) Switching page tables
433 * 619 *
434 * Now we've seen all the page table setting and manipulation, let's see what 620 * Now we've seen all the page table setting and manipulation, let's see
435 * what happens when the Guest changes page tables (ie. changes the top-level 621 * what happens when the Guest changes page tables (ie. changes the top-level
436 * pgdir). This occurs on almost every context switch. */ 622 * pgdir). This occurs on almost every context switch. */
437void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) 623void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
@@ -460,10 +646,25 @@ static void release_all_pagetables(struct lguest *lg)
460 646
461 /* Every shadow pagetable this Guest has */ 647 /* Every shadow pagetable this Guest has */
462 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 648 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
463 if (lg->pgdirs[i].pgdir) 649 if (lg->pgdirs[i].pgdir) {
650#ifdef CONFIG_X86_PAE
651 pgd_t *spgd;
652 pmd_t *pmdpage;
653 unsigned int k;
654
655 /* Get the last pmd page. */
656 spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
657 pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
658
659 /* And release the pmd entries of that pmd page,
660 * except for the switcher pmd. */
661 for (k = 0; k < SWITCHER_PMD_INDEX; k++)
662 release_pmd(&pmdpage[k]);
663#endif
464 /* Every PGD entry except the Switcher at the top */ 664 /* Every PGD entry except the Switcher at the top */
465 for (j = 0; j < SWITCHER_PGD_INDEX; j++) 665 for (j = 0; j < SWITCHER_PGD_INDEX; j++)
466 release_pgd(lg, lg->pgdirs[i].pgdir + j); 666 release_pgd(lg->pgdirs[i].pgdir + j);
667 }
467} 668}
468 669
469/* We also throw away everything when a Guest tells us it's changed a kernel 670/* We also throw away everything when a Guest tells us it's changed a kernel
@@ -504,24 +705,37 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
504{ 705{
505 /* Look up the matching shadow page directory entry. */ 706 /* Look up the matching shadow page directory entry. */
506 pgd_t *spgd = spgd_addr(cpu, idx, vaddr); 707 pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
708#ifdef CONFIG_X86_PAE
709 pmd_t *spmd;
710#endif
507 711
508 /* If the top level isn't present, there's no entry to update. */ 712 /* If the top level isn't present, there's no entry to update. */
509 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 713 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
510 /* Otherwise, we start by releasing the existing entry. */ 714#ifdef CONFIG_X86_PAE
511 pte_t *spte = spte_addr(*spgd, vaddr); 715 spmd = spmd_addr(cpu, *spgd, vaddr);
512 release_pte(*spte); 716 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
513 717#endif
514 /* If they're setting this entry as dirty or accessed, we might 718 /* Otherwise, we start by releasing
515 * as well put that entry they've given us in now. This shaves 719 * the existing entry. */
516 * 10% off a copy-on-write micro-benchmark. */ 720 pte_t *spte = spte_addr(cpu, *spgd, vaddr);
517 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 721 release_pte(*spte);
518 check_gpte(cpu, gpte); 722
519 *spte = gpte_to_spte(cpu, gpte, 723 /* If they're setting this entry as dirty or accessed,
520 pte_flags(gpte) & _PAGE_DIRTY); 724 * we might as well put that entry they've given us
521 } else 725 * in now. This shaves 10% off a
522 /* Otherwise kill it and we can demand_page() it in 726 * copy-on-write micro-benchmark. */
523 * later. */ 727 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
524 *spte = __pte(0); 728 check_gpte(cpu, gpte);
729 native_set_pte(spte,
730 gpte_to_spte(cpu, gpte,
731 pte_flags(gpte) & _PAGE_DIRTY));
732 } else
733 /* Otherwise kill it and we can demand_page()
734 * it in later. */
735 native_set_pte(spte, __pte(0));
736#ifdef CONFIG_X86_PAE
737 }
738#endif
525 } 739 }
526} 740}
527 741
@@ -568,12 +782,10 @@ void guest_set_pte(struct lg_cpu *cpu,
568 * 782 *
569 * So with that in mind here's our code to to update a (top-level) PGD entry: 783 * So with that in mind here's our code to to update a (top-level) PGD entry:
570 */ 784 */
571void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) 785void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
572{ 786{
573 int pgdir; 787 int pgdir;
574 788
575 /* The kernel seems to try to initialize this early on: we ignore its
576 * attempts to map over the Switcher. */
577 if (idx >= SWITCHER_PGD_INDEX) 789 if (idx >= SWITCHER_PGD_INDEX)
578 return; 790 return;
579 791
@@ -581,8 +793,14 @@ void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
581 pgdir = find_pgdir(lg, gpgdir); 793 pgdir = find_pgdir(lg, gpgdir);
582 if (pgdir < ARRAY_SIZE(lg->pgdirs)) 794 if (pgdir < ARRAY_SIZE(lg->pgdirs))
583 /* ... throw it away. */ 795 /* ... throw it away. */
584 release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); 796 release_pgd(lg->pgdirs[pgdir].pgdir + idx);
585} 797}
798#ifdef CONFIG_X86_PAE
799void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
800{
801 guest_pagetable_clear_all(&lg->cpus[0]);
802}
803#endif
586 804
587/* Once we know how much memory we have we can construct simple identity 805/* Once we know how much memory we have we can construct simple identity
588 * (which set virtual == physical) and linear mappings 806 * (which set virtual == physical) and linear mappings
@@ -596,8 +814,16 @@ static unsigned long setup_pagetables(struct lguest *lg,
596{ 814{
597 pgd_t __user *pgdir; 815 pgd_t __user *pgdir;
598 pte_t __user *linear; 816 pte_t __user *linear;
599 unsigned int mapped_pages, i, linear_pages, phys_linear;
600 unsigned long mem_base = (unsigned long)lg->mem_base; 817 unsigned long mem_base = (unsigned long)lg->mem_base;
818 unsigned int mapped_pages, i, linear_pages;
819#ifdef CONFIG_X86_PAE
820 pmd_t __user *pmds;
821 unsigned int j;
822 pgd_t pgd;
823 pmd_t pmd;
824#else
825 unsigned int phys_linear;
826#endif
601 827
602 /* We have mapped_pages frames to map, so we need 828 /* We have mapped_pages frames to map, so we need
603 * linear_pages page tables to map them. */ 829 * linear_pages page tables to map them. */
@@ -610,6 +836,9 @@ static unsigned long setup_pagetables(struct lguest *lg,
610 /* Now we use the next linear_pages pages as pte pages */ 836 /* Now we use the next linear_pages pages as pte pages */
611 linear = (void *)pgdir - linear_pages * PAGE_SIZE; 837 linear = (void *)pgdir - linear_pages * PAGE_SIZE;
612 838
839#ifdef CONFIG_X86_PAE
840 pmds = (void *)linear - PAGE_SIZE;
841#endif
613 /* Linear mapping is easy: put every page's address into the 842 /* Linear mapping is easy: put every page's address into the
614 * mapping in order. */ 843 * mapping in order. */
615 for (i = 0; i < mapped_pages; i++) { 844 for (i = 0; i < mapped_pages; i++) {
@@ -621,6 +850,22 @@ static unsigned long setup_pagetables(struct lguest *lg,
621 850
622 /* The top level points to the linear page table pages above. 851 /* The top level points to the linear page table pages above.
623 * We setup the identity and linear mappings here. */ 852 * We setup the identity and linear mappings here. */
853#ifdef CONFIG_X86_PAE
854 for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD;
855 i += PTRS_PER_PTE, j++) {
856 native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i)
857 - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
858
859 if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0)
860 return -EFAULT;
861 }
862
863 set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT));
864 if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0)
865 return -EFAULT;
866 if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0)
867 return -EFAULT;
868#else
624 phys_linear = (unsigned long)linear - mem_base; 869 phys_linear = (unsigned long)linear - mem_base;
625 for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { 870 for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
626 pgd_t pgd; 871 pgd_t pgd;
@@ -633,6 +878,7 @@ static unsigned long setup_pagetables(struct lguest *lg,
633 &pgd, sizeof(pgd))) 878 &pgd, sizeof(pgd)))
634 return -EFAULT; 879 return -EFAULT;
635 } 880 }
881#endif
636 882
637 /* We return the top level (guest-physical) address: remember where 883 /* We return the top level (guest-physical) address: remember where
638 * this is. */ 884 * this is. */
@@ -648,7 +894,10 @@ int init_guest_pagetable(struct lguest *lg)
648 u64 mem; 894 u64 mem;
649 u32 initrd_size; 895 u32 initrd_size;
650 struct boot_params __user *boot = (struct boot_params *)lg->mem_base; 896 struct boot_params __user *boot = (struct boot_params *)lg->mem_base;
651 897#ifdef CONFIG_X86_PAE
898 pgd_t *pgd;
899 pmd_t *pmd_table;
900#endif
652 /* Get the Guest memory size and the ramdisk size from the boot header 901 /* Get the Guest memory size and the ramdisk size from the boot header
653 * located at lg->mem_base (Guest address 0). */ 902 * located at lg->mem_base (Guest address 0). */
654 if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) 903 if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem))
@@ -663,6 +912,15 @@ int init_guest_pagetable(struct lguest *lg)
663 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); 912 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
664 if (!lg->pgdirs[0].pgdir) 913 if (!lg->pgdirs[0].pgdir)
665 return -ENOMEM; 914 return -ENOMEM;
915#ifdef CONFIG_X86_PAE
916 pgd = lg->pgdirs[0].pgdir;
917 pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
918 if (!pmd_table)
919 return -ENOMEM;
920
921 set_pgd(pgd + SWITCHER_PGD_INDEX,
922 __pgd(__pa(pmd_table) | _PAGE_PRESENT));
923#endif
666 lg->cpus[0].cpu_pgd = 0; 924 lg->cpus[0].cpu_pgd = 0;
667 return 0; 925 return 0;
668} 926}
@@ -672,17 +930,24 @@ void page_table_guest_data_init(struct lg_cpu *cpu)
672{ 930{
673 /* We get the kernel address: above this is all kernel memory. */ 931 /* We get the kernel address: above this is all kernel memory. */
674 if (get_user(cpu->lg->kernel_address, 932 if (get_user(cpu->lg->kernel_address,
675 &cpu->lg->lguest_data->kernel_address) 933 &cpu->lg->lguest_data->kernel_address)
676 /* We tell the Guest that it can't use the top 4MB of virtual 934 /* We tell the Guest that it can't use the top 2 or 4 MB
677 * addresses used by the Switcher. */ 935 * of virtual addresses used by the Switcher. */
678 || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem) 936 || put_user(RESERVE_MEM * 1024 * 1024,
679 || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir)) 937 &cpu->lg->lguest_data->reserve_mem)
938 || put_user(cpu->lg->pgdirs[0].gpgdir,
939 &cpu->lg->lguest_data->pgdir))
680 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); 940 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
681 941
682 /* In flush_user_mappings() we loop from 0 to 942 /* In flush_user_mappings() we loop from 0 to
683 * "pgd_index(lg->kernel_address)". This assumes it won't hit the 943 * "pgd_index(lg->kernel_address)". This assumes it won't hit the
684 * Switcher mappings, so check that now. */ 944 * Switcher mappings, so check that now. */
945#ifdef CONFIG_X86_PAE
946 if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX &&
947 pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
948#else
685 if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) 949 if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
950#endif
686 kill_guest(cpu, "bad kernel address %#lx", 951 kill_guest(cpu, "bad kernel address %#lx",
687 cpu->lg->kernel_address); 952 cpu->lg->kernel_address);
688} 953}
@@ -708,16 +973,30 @@ void free_guest_pagetable(struct lguest *lg)
708void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) 973void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
709{ 974{
710 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); 975 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
711 pgd_t switcher_pgd;
712 pte_t regs_pte; 976 pte_t regs_pte;
713 unsigned long pfn; 977 unsigned long pfn;
714 978
979#ifdef CONFIG_X86_PAE
980 pmd_t switcher_pmd;
981 pmd_t *pmd_table;
982
983 native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >>
984 PAGE_SHIFT, PAGE_KERNEL_EXEC));
985
986 pmd_table = __va(pgd_pfn(cpu->lg->
987 pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
988 << PAGE_SHIFT);
989 native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
990#else
991 pgd_t switcher_pgd;
992
715 /* Make the last PGD entry for this Guest point to the Switcher's PTE 993 /* Make the last PGD entry for this Guest point to the Switcher's PTE
716 * page for this CPU (with appropriate flags). */ 994 * page for this CPU (with appropriate flags). */
717 switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL); 995 switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC);
718 996
719 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; 997 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
720 998
999#endif
721 /* We also change the Switcher PTE page. When we're running the Guest, 1000 /* We also change the Switcher PTE page. When we're running the Guest,
722 * we want the Guest's "regs" page to appear where the first Switcher 1001 * we want the Guest's "regs" page to appear where the first Switcher
723 * page for this CPU is. This is an optimization: when the Switcher 1002 * page for this CPU is. This is an optimization: when the Switcher
@@ -726,8 +1005,9 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
726 * page is already mapped there, we don't have to copy them out 1005 * page is already mapped there, we don't have to copy them out
727 * again. */ 1006 * again. */
728 pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; 1007 pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
729 regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); 1008 native_set_pte(&regs_pte, pfn_pte(pfn, PAGE_KERNEL));
730 switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; 1009 native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)],
1010 regs_pte);
731} 1011}
732/*:*/ 1012/*:*/
733 1013
@@ -752,21 +1032,21 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
752 1032
753 /* The first entries are easy: they map the Switcher code. */ 1033 /* The first entries are easy: they map the Switcher code. */
754 for (i = 0; i < pages; i++) { 1034 for (i = 0; i < pages; i++) {
755 pte[i] = mk_pte(switcher_page[i], 1035 native_set_pte(&pte[i], mk_pte(switcher_page[i],
756 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); 1036 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
757 } 1037 }
758 1038
759 /* The only other thing we map is this CPU's pair of pages. */ 1039 /* The only other thing we map is this CPU's pair of pages. */
760 i = pages + cpu*2; 1040 i = pages + cpu*2;
761 1041
762 /* First page (Guest registers) is writable from the Guest */ 1042 /* First page (Guest registers) is writable from the Guest */
763 pte[i] = pfn_pte(page_to_pfn(switcher_page[i]), 1043 native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
764 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)); 1044 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
765 1045
766 /* The second page contains the "struct lguest_ro_state", and is 1046 /* The second page contains the "struct lguest_ro_state", and is
767 * read-only. */ 1047 * read-only. */
768 pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]), 1048 native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
769 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); 1049 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
770} 1050}
771 1051
772/* We've made it through the page table code. Perhaps our tired brains are 1052/* We've made it through the page table code. Perhaps our tired brains are
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 7ede64ffeef..482ed5a1875 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -150,7 +150,7 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
150{ 150{
151 /* We assume the Guest has the same number of GDT entries as the 151 /* We assume the Guest has the same number of GDT entries as the
152 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ 152 * Host, otherwise we'd have to dynamically allocate the Guest GDT. */
153 if (num > ARRAY_SIZE(cpu->arch.gdt)) 153 if (num >= ARRAY_SIZE(cpu->arch.gdt))
154 kill_guest(cpu, "too many gdt entries %i", num); 154 kill_guest(cpu, "too many gdt entries %i", num);
155 155
156 /* Set it up, then fix it. */ 156 /* Set it up, then fix it. */
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 1a83910f674..eaf722fe309 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -358,6 +358,16 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
358 if (emulate_insn(cpu)) 358 if (emulate_insn(cpu))
359 return; 359 return;
360 } 360 }
361 /* If KVM is active, the vmcall instruction triggers a
362 * General Protection Fault. Normally it triggers an
363 * invalid opcode fault (6): */
364 case 6:
365 /* We need to check if ring == GUEST_PL and
366 * faulting instruction == vmcall. */
367 if (is_hypercall(cpu)) {
368 rewrite_hypercall(cpu);
369 return;
370 }
361 break; 371 break;
362 case 14: /* We've intercepted a Page Fault. */ 372 case 14: /* We've intercepted a Page Fault. */
363 /* The Guest accessed a virtual address that wasn't mapped. 373 /* The Guest accessed a virtual address that wasn't mapped.
@@ -403,15 +413,6 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
403 * up the pointer now to indicate a hypercall is pending. */ 413 * up the pointer now to indicate a hypercall is pending. */
404 cpu->hcall = (struct hcall_args *)cpu->regs; 414 cpu->hcall = (struct hcall_args *)cpu->regs;
405 return; 415 return;
406 case 6:
407 /* kvm hypercalls trigger an invalid opcode fault (6).
408 * We need to check if ring == GUEST_PL and
409 * faulting instruction == vmcall. */
410 if (is_hypercall(cpu)) {
411 rewrite_hypercall(cpu);
412 return;
413 }
414 break;
415 } 416 }
416 417
417 /* We didn't handle the trap, so it needs to go to the Guest. */ 418 /* We didn't handle the trap, so it needs to go to the Guest. */
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 47c68bc75a1..3319c2fec28 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset,
232 target = rdev->sb_start + offset + index * (PAGE_SIZE/512); 232 target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
233 233
234 if (sync_page_io(rdev->bdev, target, 234 if (sync_page_io(rdev->bdev, target,
235 roundup(size, bdev_hardsect_size(rdev->bdev)), 235 roundup(size, bdev_logical_block_size(rdev->bdev)),
236 page, READ)) { 236 page, READ)) {
237 page->index = index; 237 page->index = index;
238 attach_page_buffers(page, NULL); /* so that free_buffer will 238 attach_page_buffers(page, NULL); /* so that free_buffer will
@@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
287 int size = PAGE_SIZE; 287 int size = PAGE_SIZE;
288 if (page->index == bitmap->file_pages-1) 288 if (page->index == bitmap->file_pages-1)
289 size = roundup(bitmap->last_page_size, 289 size = roundup(bitmap->last_page_size,
290 bdev_hardsect_size(rdev->bdev)); 290 bdev_logical_block_size(rdev->bdev));
291 /* Just make sure we aren't corrupting data or 291 /* Just make sure we aren't corrupting data or
292 * metadata 292 * metadata
293 */ 293 */
@@ -1097,14 +1097,12 @@ void bitmap_daemon_work(struct bitmap *bitmap)
1097 } 1097 }
1098 bitmap->allclean = 1; 1098 bitmap->allclean = 1;
1099 1099
1100 spin_lock_irqsave(&bitmap->lock, flags);
1100 for (j = 0; j < bitmap->chunks; j++) { 1101 for (j = 0; j < bitmap->chunks; j++) {
1101 bitmap_counter_t *bmc; 1102 bitmap_counter_t *bmc;
1102 spin_lock_irqsave(&bitmap->lock, flags); 1103 if (!bitmap->filemap)
1103 if (!bitmap->filemap) {
1104 /* error or shutdown */ 1104 /* error or shutdown */
1105 spin_unlock_irqrestore(&bitmap->lock, flags);
1106 break; 1105 break;
1107 }
1108 1106
1109 page = filemap_get_page(bitmap, j); 1107 page = filemap_get_page(bitmap, j);
1110 1108
@@ -1121,6 +1119,8 @@ void bitmap_daemon_work(struct bitmap *bitmap)
1121 write_page(bitmap, page, 0); 1119 write_page(bitmap, page, 0);
1122 bitmap->allclean = 0; 1120 bitmap->allclean = 0;
1123 } 1121 }
1122 spin_lock_irqsave(&bitmap->lock, flags);
1123 j |= (PAGE_BITS - 1);
1124 continue; 1124 continue;
1125 } 1125 }
1126 1126
@@ -1181,9 +1181,10 @@ void bitmap_daemon_work(struct bitmap *bitmap)
1181 ext2_clear_bit(file_page_offset(j), paddr); 1181 ext2_clear_bit(file_page_offset(j), paddr);
1182 kunmap_atomic(paddr, KM_USER0); 1182 kunmap_atomic(paddr, KM_USER0);
1183 } 1183 }
1184 } 1184 } else
1185 spin_unlock_irqrestore(&bitmap->lock, flags); 1185 j |= PAGE_COUNTER_MASK;
1186 } 1186 }
1187 spin_unlock_irqrestore(&bitmap->lock, flags);
1187 1188
1188 /* now sync the final page */ 1189 /* now sync the final page */
1189 if (lastpage != NULL) { 1190 if (lastpage != NULL) {
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index a2e26c24214..75d8081a904 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store,
178 } 178 }
179 179
180 /* Validate the chunk size against the device block size */ 180 /* Validate the chunk size against the device block size */
181 if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) { 181 if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
182 *error = "Chunk size is not a multiple of device blocksize"; 182 *error = "Chunk size is not a multiple of device blocksize";
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index be233bc4d91..6fa8ccf91c7 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
413 * Buffer holds both header and bitset. 413 * Buffer holds both header and bitset.
414 */ 414 */
415 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + 415 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
416 bitset_size, ti->limits.hardsect_size); 416 bitset_size,
417 ti->limits.logical_block_size);
417 418
418 if (buf_size > dev->bdev->bd_inode->i_size) { 419 if (buf_size > dev->bdev->bd_inode->i_size) {
419 DMWARN("log device %s too small: need %llu bytes", 420 DMWARN("log device %s too small: need %llu bytes",
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index e75c6dd76a9..2662a41337e 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
282 */ 282 */
283 if (!ps->store->chunk_size) { 283 if (!ps->store->chunk_size) {
284 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, 284 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
285 bdev_hardsect_size(ps->store->cow->bdev) >> 9); 285 bdev_logical_block_size(ps->store->cow->bdev) >> 9);
286 ps->store->chunk_mask = ps->store->chunk_size - 1; 286 ps->store->chunk_mask = ps->store->chunk_size - 1;
287 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; 287 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
288 chunk_size_supplied = 0; 288 chunk_size_supplied = 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 429b50b975d..e9a73bb242b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
108 lhs->max_hw_segments = 108 lhs->max_hw_segments =
109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); 109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
110 110
111 lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); 111 lhs->logical_block_size = max(lhs->logical_block_size,
112 rhs->logical_block_size);
112 113
113 lhs->max_segment_size = 114 lhs->max_segment_size =
114 min_not_zero(lhs->max_segment_size, rhs->max_segment_size); 115 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
@@ -509,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
509 * combine_restrictions_low() 510 * combine_restrictions_low()
510 */ 511 */
511 rs->max_sectors = 512 rs->max_sectors =
512 min_not_zero(rs->max_sectors, q->max_sectors); 513 min_not_zero(rs->max_sectors, queue_max_sectors(q));
513 514
514 /* 515 /*
515 * Check if merge fn is supported. 516 * Check if merge fn is supported.
@@ -524,24 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
524 525
525 rs->max_phys_segments = 526 rs->max_phys_segments =
526 min_not_zero(rs->max_phys_segments, 527 min_not_zero(rs->max_phys_segments,
527 q->max_phys_segments); 528 queue_max_phys_segments(q));
528 529
529 rs->max_hw_segments = 530 rs->max_hw_segments =
530 min_not_zero(rs->max_hw_segments, q->max_hw_segments); 531 min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
531 532
532 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); 533 rs->logical_block_size = max(rs->logical_block_size,
534 queue_logical_block_size(q));
533 535
534 rs->max_segment_size = 536 rs->max_segment_size =
535 min_not_zero(rs->max_segment_size, q->max_segment_size); 537 min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
536 538
537 rs->max_hw_sectors = 539 rs->max_hw_sectors =
538 min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); 540 min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
539 541
540 rs->seg_boundary_mask = 542 rs->seg_boundary_mask =
541 min_not_zero(rs->seg_boundary_mask, 543 min_not_zero(rs->seg_boundary_mask,
542 q->seg_boundary_mask); 544 queue_segment_boundary(q));
543 545
544 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); 546 rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
545 547
546 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 548 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
547} 549}
@@ -683,8 +685,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
683 rs->max_phys_segments = MAX_PHYS_SEGMENTS; 685 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
684 if (!rs->max_hw_segments) 686 if (!rs->max_hw_segments)
685 rs->max_hw_segments = MAX_HW_SEGMENTS; 687 rs->max_hw_segments = MAX_HW_SEGMENTS;
686 if (!rs->hardsect_size) 688 if (!rs->logical_block_size)
687 rs->hardsect_size = 1 << SECTOR_SHIFT; 689 rs->logical_block_size = 1 << SECTOR_SHIFT;
688 if (!rs->max_segment_size) 690 if (!rs->max_segment_size)
689 rs->max_segment_size = MAX_SEGMENT_SIZE; 691 rs->max_segment_size = MAX_SEGMENT_SIZE;
690 if (!rs->seg_boundary_mask) 692 if (!rs->seg_boundary_mask)
@@ -912,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
912 * restrictions. 914 * restrictions.
913 */ 915 */
914 blk_queue_max_sectors(q, t->limits.max_sectors); 916 blk_queue_max_sectors(q, t->limits.max_sectors);
915 q->max_phys_segments = t->limits.max_phys_segments; 917 blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
916 q->max_hw_segments = t->limits.max_hw_segments; 918 blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
917 q->hardsect_size = t->limits.hardsect_size; 919 blk_queue_logical_block_size(q, t->limits.logical_block_size);
918 q->max_segment_size = t->limits.max_segment_size; 920 blk_queue_max_segment_size(q, t->limits.max_segment_size);
919 q->max_hw_sectors = t->limits.max_hw_sectors; 921 blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
920 q->seg_boundary_mask = t->limits.seg_boundary_mask; 922 blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
921 q->bounce_pfn = t->limits.bounce_pfn; 923 blk_queue_bounce_limit(q, t->limits.bounce_pfn);
922 924
923 if (t->limits.no_cluster) 925 if (t->limits.no_cluster)
924 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 926 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 424f7b048c3..3fd8b1e6548 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -20,7 +20,8 @@
20#include <linux/idr.h> 20#include <linux/idr.h>
21#include <linux/hdreg.h> 21#include <linux/hdreg.h>
22#include <linux/blktrace_api.h> 22#include <linux/blktrace_api.h>
23#include <trace/block.h> 23
24#include <trace/events/block.h>
24 25
25#define DM_MSG_PREFIX "core" 26#define DM_MSG_PREFIX "core"
26 27
@@ -53,8 +54,6 @@ struct dm_target_io {
53 union map_info info; 54 union map_info info;
54}; 55};
55 56
56DEFINE_TRACE(block_bio_complete);
57
58/* 57/*
59 * For request-based dm. 58 * For request-based dm.
60 * One of these is allocated per request. 59 * One of these is allocated per request.
@@ -656,8 +655,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
656 /* the bio has been remapped so dispatch it */ 655 /* the bio has been remapped so dispatch it */
657 656
658 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, 657 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
659 tio->io->bio->bi_bdev->bd_dev, 658 tio->io->bio->bi_bdev->bd_dev, sector);
660 clone->bi_sector, sector);
661 659
662 generic_make_request(clone); 660 generic_make_request(clone);
663 } else if (r < 0 || r == DM_MAPIO_REQUEUE) { 661 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7a36e38393a..64f1f3e046e 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
146 * a one page request is never in violation. 146 * a one page request is never in violation.
147 */ 147 */
148 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 148 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
149 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 149 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
151 151
152 disk->num_sectors = rdev->sectors; 152 disk->num_sectors = rdev->sectors;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fccc8343a25..20f6ac33834 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1202,7 +1202,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1203 1203
1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1205 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1205 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1206 if (rdev->sb_size & bmask) 1206 if (rdev->sb_size & bmask)
1207 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1207 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1208 1208
@@ -1375,6 +1375,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1375 1375
1376 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1376 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1377 sb->size = cpu_to_le64(mddev->dev_sectors); 1377 sb->size = cpu_to_le64(mddev->dev_sectors);
1378 sb->chunksize = cpu_to_le32(mddev->chunk_size >> 9);
1379 sb->level = cpu_to_le32(mddev->level);
1380 sb->layout = cpu_to_le32(mddev->layout);
1378 1381
1379 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1382 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1380 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1383 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
@@ -3303,7 +3306,9 @@ static ssize_t
3303action_show(mddev_t *mddev, char *page) 3306action_show(mddev_t *mddev, char *page)
3304{ 3307{
3305 char *type = "idle"; 3308 char *type = "idle";
3306 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3309 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3310 type = "frozen";
3311 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3307 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 3312 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3308 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3313 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3309 type = "reshape"; 3314 type = "reshape";
@@ -3326,7 +3331,12 @@ action_store(mddev_t *mddev, const char *page, size_t len)
3326 if (!mddev->pers || !mddev->pers->sync_request) 3331 if (!mddev->pers || !mddev->pers->sync_request)
3327 return -EINVAL; 3332 return -EINVAL;
3328 3333
3329 if (cmd_match(page, "idle")) { 3334 if (cmd_match(page, "frozen"))
3335 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3336 else
3337 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3338
3339 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3330 if (mddev->sync_thread) { 3340 if (mddev->sync_thread) {
3331 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3341 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3332 md_unregister_thread(mddev->sync_thread); 3342 md_unregister_thread(mddev->sync_thread);
@@ -3680,7 +3690,7 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
3680 if (strict_blocks_to_sectors(buf, &sectors) < 0) 3690 if (strict_blocks_to_sectors(buf, &sectors) < 0)
3681 return -EINVAL; 3691 return -EINVAL;
3682 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 3692 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
3683 return -EINVAL; 3693 return -E2BIG;
3684 3694
3685 mddev->external_size = 1; 3695 mddev->external_size = 1;
3686 } 3696 }
@@ -5557,7 +5567,7 @@ static struct block_device_operations md_fops =
5557 .owner = THIS_MODULE, 5567 .owner = THIS_MODULE,
5558 .open = md_open, 5568 .open = md_open,
5559 .release = md_release, 5569 .release = md_release,
5560 .locked_ioctl = md_ioctl, 5570 .ioctl = md_ioctl,
5561 .getgeo = md_getgeo, 5571 .getgeo = md_getgeo,
5562 .media_changed = md_media_changed, 5572 .media_changed = md_media_changed,
5563 .revalidate_disk= md_revalidate, 5573 .revalidate_disk= md_revalidate,
@@ -6352,12 +6362,13 @@ void md_do_sync(mddev_t *mddev)
6352 6362
6353 skipped = 0; 6363 skipped = 0;
6354 6364
6355 if ((mddev->curr_resync > mddev->curr_resync_completed && 6365 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6356 (mddev->curr_resync - mddev->curr_resync_completed) 6366 ((mddev->curr_resync > mddev->curr_resync_completed &&
6357 > (max_sectors >> 4)) || 6367 (mddev->curr_resync - mddev->curr_resync_completed)
6358 (j - mddev->curr_resync_completed)*2 6368 > (max_sectors >> 4)) ||
6359 >= mddev->resync_max - mddev->curr_resync_completed 6369 (j - mddev->curr_resync_completed)*2
6360 ) { 6370 >= mddev->resync_max - mddev->curr_resync_completed
6371 )) {
6361 /* time to update curr_resync_completed */ 6372 /* time to update curr_resync_completed */
6362 blk_unplug(mddev->queue); 6373 blk_unplug(mddev->queue);
6363 wait_event(mddev->recovery_wait, 6374 wait_event(mddev->recovery_wait,
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 41ced0cbe82..4ee31aa13c4 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
303 * merge_bvec_fn will be involved in multipath.) 303 * merge_bvec_fn will be involved in multipath.)
304 */ 304 */
305 if (q->merge_bvec_fn && 305 if (q->merge_bvec_fn &&
306 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 306 queue_max_sectors(q) > (PAGE_SIZE>>9))
307 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 307 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
308 308
309 conf->working_disks++; 309 conf->working_disks++;
@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
467 * violating it, not that we ever expect a device with 467 * violating it, not that we ever expect a device with
468 * a merge_bvec_fn to be involved in multipath */ 468 * a merge_bvec_fn to be involved in multipath */
469 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 469 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
470 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 470 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
471 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 471 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
472 472
473 if (!test_bit(Faulty, &rdev->flags)) 473 if (!test_bit(Faulty, &rdev->flags))
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c08d7559be5..925507e7d67 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
144 */ 144 */
145 145
146 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && 146 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
147 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 147 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
148 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 148 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
149 149
150 if (!smallest || (rdev1->sectors < smallest->sectors)) 150 if (!smallest || (rdev1->sectors < smallest->sectors))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 36df9109cde..e23758b4a34 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1130 * a one page request is never in violation. 1130 * a one page request is never in violation.
1131 */ 1131 */
1132 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1132 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1133 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1133 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1134 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1134 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1135 1135
1136 p->head_position = 0; 1136 p->head_position = 0;
@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
1996 * a one page request is never in violation. 1996 * a one page request is never in violation.
1997 */ 1997 */
1998 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1998 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1999 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1999 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2000 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 2000 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2001 2001
2002 disk->head_position = 0; 2002 disk->head_position = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 499620afb44..750550c1166 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1158 * a one page request is never in violation. 1158 * a one page request is never in violation.
1159 */ 1159 */
1160 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1160 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1161 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1161 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
1162 mddev->queue->max_sectors = (PAGE_SIZE>>9); 1162 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1163 1163
1164 p->head_position = 0; 1164 p->head_position = 0;
1165 rdev->raid_disk = mirror; 1165 rdev->raid_disk = mirror;
@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
2145 * a one page request is never in violation. 2145 * a one page request is never in violation.
2146 */ 2146 */
2147 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 2147 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2148 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 2148 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
2149 mddev->queue->max_sectors = (PAGE_SIZE>>9); 2149 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
2150 2150
2151 disk->head_position = 0; 2151 disk->head_position = 0;
2152 } 2152 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4616bc3a6e7..bef87669823 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -362,7 +362,7 @@ static void raid5_unplug_device(struct request_queue *q);
362 362
363static struct stripe_head * 363static struct stripe_head *
364get_active_stripe(raid5_conf_t *conf, sector_t sector, 364get_active_stripe(raid5_conf_t *conf, sector_t sector,
365 int previous, int noblock) 365 int previous, int noblock, int noquiesce)
366{ 366{
367 struct stripe_head *sh; 367 struct stripe_head *sh;
368 368
@@ -372,7 +372,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
372 372
373 do { 373 do {
374 wait_event_lock_irq(conf->wait_for_stripe, 374 wait_event_lock_irq(conf->wait_for_stripe,
375 conf->quiesce == 0, 375 conf->quiesce == 0 || noquiesce,
376 conf->device_lock, /* nothing */); 376 conf->device_lock, /* nothing */);
377 sh = __find_stripe(conf, sector, conf->generation - previous); 377 sh = __find_stripe(conf, sector, conf->generation - previous);
378 if (!sh) { 378 if (!sh) {
@@ -2671,7 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2671 sector_t bn = compute_blocknr(sh, i, 1); 2671 sector_t bn = compute_blocknr(sh, i, 1);
2672 sector_t s = raid5_compute_sector(conf, bn, 0, 2672 sector_t s = raid5_compute_sector(conf, bn, 0,
2673 &dd_idx, NULL); 2673 &dd_idx, NULL);
2674 sh2 = get_active_stripe(conf, s, 0, 1); 2674 sh2 = get_active_stripe(conf, s, 0, 1, 1);
2675 if (sh2 == NULL) 2675 if (sh2 == NULL)
2676 /* so far only the early blocks of this stripe 2676 /* so far only the early blocks of this stripe
2677 * have been requested. When later blocks 2677 * have been requested. When later blocks
@@ -2944,7 +2944,7 @@ static bool handle_stripe5(struct stripe_head *sh)
2944 /* Finish reconstruct operations initiated by the expansion process */ 2944 /* Finish reconstruct operations initiated by the expansion process */
2945 if (sh->reconstruct_state == reconstruct_state_result) { 2945 if (sh->reconstruct_state == reconstruct_state_result) {
2946 struct stripe_head *sh2 2946 struct stripe_head *sh2
2947 = get_active_stripe(conf, sh->sector, 1, 1); 2947 = get_active_stripe(conf, sh->sector, 1, 1, 1);
2948 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { 2948 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
2949 /* sh cannot be written until sh2 has been read. 2949 /* sh cannot be written until sh2 has been read.
2950 * so arrange for sh to be delayed a little 2950 * so arrange for sh to be delayed a little
@@ -3189,7 +3189,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
3189 3189
3190 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 3190 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
3191 struct stripe_head *sh2 3191 struct stripe_head *sh2
3192 = get_active_stripe(conf, sh->sector, 1, 1); 3192 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3193 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { 3193 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3194 /* sh cannot be written until sh2 has been read. 3194 /* sh cannot be written until sh2 has been read.
3195 * so arrange for sh to be delayed a little 3195 * so arrange for sh to be delayed a little
@@ -3288,7 +3288,7 @@ static void unplug_slaves(mddev_t *mddev)
3288 int i; 3288 int i;
3289 3289
3290 rcu_read_lock(); 3290 rcu_read_lock();
3291 for (i=0; i<mddev->raid_disks; i++) { 3291 for (i = 0; i < conf->raid_disks; i++) {
3292 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3292 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3293 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3293 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
3294 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3294 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
3463{ 3463{
3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3465 3465
3466 if ((bi->bi_size>>9) > q->max_sectors) 3466 if ((bi->bi_size>>9) > queue_max_sectors(q))
3467 return 0; 3467 return 0;
3468 blk_recount_segments(q, bi); 3468 blk_recount_segments(q, bi);
3469 if (bi->bi_phys_segments > q->max_phys_segments) 3469 if (bi->bi_phys_segments > queue_max_phys_segments(q))
3470 return 0; 3470 return 0;
3471 3471
3472 if (q->merge_bvec_fn) 3472 if (q->merge_bvec_fn)
@@ -3675,7 +3675,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3675 (unsigned long long)logical_sector); 3675 (unsigned long long)logical_sector);
3676 3676
3677 sh = get_active_stripe(conf, new_sector, previous, 3677 sh = get_active_stripe(conf, new_sector, previous,
3678 (bi->bi_rw&RWA_MASK)); 3678 (bi->bi_rw&RWA_MASK), 0);
3679 if (sh) { 3679 if (sh) {
3680 if (unlikely(previous)) { 3680 if (unlikely(previous)) {
3681 /* expansion might have moved on while waiting for a 3681 /* expansion might have moved on while waiting for a
@@ -3811,13 +3811,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3811 safepos = conf->reshape_safe; 3811 safepos = conf->reshape_safe;
3812 sector_div(safepos, data_disks); 3812 sector_div(safepos, data_disks);
3813 if (mddev->delta_disks < 0) { 3813 if (mddev->delta_disks < 0) {
3814 writepos -= reshape_sectors; 3814 writepos -= min_t(sector_t, reshape_sectors, writepos);
3815 readpos += reshape_sectors; 3815 readpos += reshape_sectors;
3816 safepos += reshape_sectors; 3816 safepos += reshape_sectors;
3817 } else { 3817 } else {
3818 writepos += reshape_sectors; 3818 writepos += reshape_sectors;
3819 readpos -= reshape_sectors; 3819 readpos -= min_t(sector_t, reshape_sectors, readpos);
3820 safepos -= reshape_sectors; 3820 safepos -= min_t(sector_t, reshape_sectors, safepos);
3821 } 3821 }
3822 3822
3823 /* 'writepos' is the most advanced device address we might write. 3823 /* 'writepos' is the most advanced device address we might write.
@@ -3873,7 +3873,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3873 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 3873 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
3874 int j; 3874 int j;
3875 int skipped = 0; 3875 int skipped = 0;
3876 sh = get_active_stripe(conf, stripe_addr+i, 0, 0); 3876 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
3877 set_bit(STRIPE_EXPANDING, &sh->state); 3877 set_bit(STRIPE_EXPANDING, &sh->state);
3878 atomic_inc(&conf->reshape_stripes); 3878 atomic_inc(&conf->reshape_stripes);
3879 /* If any of this stripe is beyond the end of the old 3879 /* If any of this stripe is beyond the end of the old
@@ -3916,13 +3916,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3916 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 3916 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
3917 1, &dd_idx, NULL); 3917 1, &dd_idx, NULL);
3918 last_sector = 3918 last_sector =
3919 raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512) 3919 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
3920 *(new_data_disks) - 1), 3920 *(new_data_disks) - 1),
3921 1, &dd_idx, NULL); 3921 1, &dd_idx, NULL);
3922 if (last_sector >= mddev->dev_sectors) 3922 if (last_sector >= mddev->dev_sectors)
3923 last_sector = mddev->dev_sectors - 1; 3923 last_sector = mddev->dev_sectors - 1;
3924 while (first_sector <= last_sector) { 3924 while (first_sector <= last_sector) {
3925 sh = get_active_stripe(conf, first_sector, 1, 0); 3925 sh = get_active_stripe(conf, first_sector, 1, 0, 1);
3926 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3926 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3927 set_bit(STRIPE_HANDLE, &sh->state); 3927 set_bit(STRIPE_HANDLE, &sh->state);
3928 release_stripe(sh); 3928 release_stripe(sh);
@@ -4022,9 +4022,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
4022 4022
4023 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 4023 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4024 4024
4025 sh = get_active_stripe(conf, sector_nr, 0, 1); 4025 sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
4026 if (sh == NULL) { 4026 if (sh == NULL) {
4027 sh = get_active_stripe(conf, sector_nr, 0, 0); 4027 sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
4028 /* make sure we don't swamp the stripe cache if someone else 4028 /* make sure we don't swamp the stripe cache if someone else
4029 * is trying to get access 4029 * is trying to get access
4030 */ 4030 */
@@ -4034,7 +4034,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
4034 * We don't need to check the 'failed' flag as when that gets set, 4034 * We don't need to check the 'failed' flag as when that gets set,
4035 * recovery aborts. 4035 * recovery aborts.
4036 */ 4036 */
4037 for (i=0; i<mddev->raid_disks; i++) 4037 for (i = 0; i < conf->raid_disks; i++)
4038 if (conf->disks[i].rdev == NULL) 4038 if (conf->disks[i].rdev == NULL)
4039 still_degraded = 1; 4039 still_degraded = 1;
4040 4040
@@ -4086,7 +4086,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4086 /* already done this stripe */ 4086 /* already done this stripe */
4087 continue; 4087 continue;
4088 4088
4089 sh = get_active_stripe(conf, sector, 0, 1); 4089 sh = get_active_stripe(conf, sector, 0, 1, 0);
4090 4090
4091 if (!sh) { 4091 if (!sh) {
4092 /* failed to get a stripe - must wait */ 4092 /* failed to get a stripe - must wait */
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 60955a70d88..1bb66e1ed5a 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -216,7 +216,7 @@ config DVB_USB_TTUSB2
216 help 216 help
217 Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver. The 217 Say Y here to support the Pinnacle 400e DVB-S USB2.0 receiver. The
218 firmware protocol used by this module is similar to the one used by the 218 firmware protocol used by this module is similar to the one used by the
219 old ttusb-driver - that's why the module is called dvb-usb-ttusb2.ko. 219 old ttusb-driver - that's why the module is called dvb-usb-ttusb2.
220 220
221config DVB_USB_DTT200U 221config DVB_USB_DTT200U
222 tristate "WideView WT-200U and WT-220U (pen) DVB-T USB2.0 support (Yakumo/Hama/Typhoon/Yuan)" 222 tristate "WideView WT-200U and WT-220U (pen) DVB-T USB2.0 support (Yakumo/Hama/Typhoon/Yuan)"
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 9d48da2fb01..57835f5715f 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -758,10 +758,14 @@ config VIDEO_MX1
758 ---help--- 758 ---help---
759 This is a v4l2 driver for the i.MX1/i.MXL CMOS Sensor Interface 759 This is a v4l2 driver for the i.MX1/i.MXL CMOS Sensor Interface
760 760
761config MX3_VIDEO
762 bool
763
761config VIDEO_MX3 764config VIDEO_MX3
762 tristate "i.MX3x Camera Sensor Interface driver" 765 tristate "i.MX3x Camera Sensor Interface driver"
763 depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA 766 depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA
764 select VIDEOBUF_DMA_CONTIG 767 select VIDEOBUF_DMA_CONTIG
768 select MX3_VIDEO
765 ---help--- 769 ---help---
766 This is a v4l2 driver for the i.MX3x Camera Sensor Interface 770 This is a v4l2 driver for the i.MX3x Camera Sensor Interface
767 771
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index 3e6ffee8dfe..ccd47f57f42 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -181,7 +181,7 @@ static int hdpvr_submit_buffers(struct hdpvr_device *dev)
181 buff_list); 181 buff_list);
182 if (buf->status != BUFSTAT_AVAILABLE) { 182 if (buf->status != BUFSTAT_AVAILABLE) {
183 v4l2_err(&dev->v4l2_dev, 183 v4l2_err(&dev->v4l2_dev,
184 "buffer not marked as availbale\n"); 184 "buffer not marked as available\n");
185 ret = -EFAULT; 185 ret = -EFAULT;
186 goto err; 186 goto err;
187 } 187 }
diff --git a/drivers/media/video/ivtv/ivtv-queue.c b/drivers/media/video/ivtv/ivtv-queue.c
index ff7b7deded4..7fde36e6d22 100644
--- a/drivers/media/video/ivtv/ivtv-queue.c
+++ b/drivers/media/video/ivtv/ivtv-queue.c
@@ -230,7 +230,8 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
230 return -ENOMEM; 230 return -ENOMEM;
231 } 231 }
232 if (ivtv_might_use_dma(s)) { 232 if (ivtv_might_use_dma(s)) {
233 s->sg_handle = pci_map_single(itv->pdev, s->sg_dma, sizeof(struct ivtv_sg_element), s->dma); 233 s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
234 sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
234 ivtv_stream_sync_for_cpu(s); 235 ivtv_stream_sync_for_cpu(s);
235 } 236 }
236 237
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index de143deb06f..7847bbc1440 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -672,15 +672,14 @@ try_again:
672 msb->req_sg); 672 msb->req_sg);
673 673
674 if (!msb->seg_count) { 674 if (!msb->seg_count) {
675 chunk = __blk_end_request(msb->block_req, -ENOMEM, 675 chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
676 blk_rq_cur_bytes(msb->block_req));
677 continue; 676 continue;
678 } 677 }
679 678
680 t_sec = msb->block_req->sector << 9; 679 t_sec = blk_rq_pos(msb->block_req) << 9;
681 sector_div(t_sec, msb->page_size); 680 sector_div(t_sec, msb->page_size);
682 681
683 count = msb->block_req->nr_sectors << 9; 682 count = blk_rq_bytes(msb->block_req);
684 count /= msb->page_size; 683 count /= msb->page_size;
685 684
686 param.system = msb->system; 685 param.system = msb->system;
@@ -705,8 +704,8 @@ try_again:
705 return 0; 704 return 0;
706 } 705 }
707 706
708 dev_dbg(&card->dev, "elv_next\n"); 707 dev_dbg(&card->dev, "blk_fetch\n");
709 msb->block_req = elv_next_request(msb->queue); 708 msb->block_req = blk_fetch_request(msb->queue);
710 if (!msb->block_req) { 709 if (!msb->block_req) {
711 dev_dbg(&card->dev, "issue end\n"); 710 dev_dbg(&card->dev, "issue end\n");
712 return -EAGAIN; 711 return -EAGAIN;
@@ -745,7 +744,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
745 t_len *= msb->page_size; 744 t_len *= msb->page_size;
746 } 745 }
747 } else 746 } else
748 t_len = msb->block_req->nr_sectors << 9; 747 t_len = blk_rq_bytes(msb->block_req);
749 748
750 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error); 749 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
751 750
@@ -825,8 +824,8 @@ static void mspro_block_submit_req(struct request_queue *q)
825 return; 824 return;
826 825
827 if (msb->eject) { 826 if (msb->eject) {
828 while ((req = elv_next_request(q)) != NULL) 827 while ((req = blk_fetch_request(q)) != NULL)
829 __blk_end_request(req, -ENODEV, blk_rq_bytes(req)); 828 __blk_end_request_all(req, -ENODEV);
830 829
831 return; 830 return;
832 } 831 }
@@ -1243,7 +1242,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
1243 1242
1244 sprintf(msb->disk->disk_name, "mspblk%d", disk_id); 1243 sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
1245 1244
1246 blk_queue_hardsect_size(msb->queue, msb->page_size); 1245 blk_queue_logical_block_size(msb->queue, msb->page_size);
1247 1246
1248 capacity = be16_to_cpu(sys_info->user_block_count); 1247 capacity = be16_to_cpu(sys_info->user_block_count);
1249 capacity *= be16_to_cpu(sys_info->block_size); 1248 capacity *= be16_to_cpu(sys_info->block_size);
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index 693e4b51135..fa9249b4971 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -130,7 +130,7 @@ mpi_ioc.h
130 * 08-08-01 01.02.01 Original release for v1.2 work. 130 * 08-08-01 01.02.01 Original release for v1.2 work.
131 * New format for FWVersion and ProductId in 131 * New format for FWVersion and ProductId in
132 * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER. 132 * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER.
133 * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and 133 * 08-31-01 01.02.02 Added event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
134 * related structure and defines. 134 * related structure and defines.
135 * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED. 135 * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED.
136 * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE. 136 * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE.
@@ -190,7 +190,7 @@ mpi_ioc.h
190 * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED. 190 * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
191 * Added MaxInitiators field to PortFacts reply. 191 * Added MaxInitiators field to PortFacts reply.
192 * Added SAS Device Status Change ReasonCode for 192 * Added SAS Device Status Change ReasonCode for
193 * asynchronous notificaiton. 193 * asynchronous notification.
194 * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event 194 * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
195 * data structure. 195 * data structure.
196 * Added new ImageType values for FWDownload and FWUpload 196 * Added new ImageType values for FWDownload and FWUpload
@@ -623,7 +623,7 @@ mpi_fc.h
623 * 11-02-00 01.01.01 Original release for post 1.0 work 623 * 11-02-00 01.01.01 Original release for post 1.0 work
624 * 12-04-00 01.01.02 Added messages for Common Transport Send and 624 * 12-04-00 01.01.02 Added messages for Common Transport Send and
625 * Primitive Send. 625 * Primitive Send.
626 * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix 626 * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix
627 * and modified the FcPrimitiveSend flags. 627 * and modified the FcPrimitiveSend flags.
628 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger 628 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
629 * field. 629 * field.
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5d496a99e03..0df065275cd 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -146,7 +146,6 @@ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
146static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 146static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
147static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; 147static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
148 148
149static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq);
150 149
151/* 150/*
152 * Driver Callback Index's 151 * Driver Callback Index's
@@ -159,7 +158,8 @@ static u8 last_drv_idx;
159 * Forward protos... 158 * Forward protos...
160 */ 159 */
161static irqreturn_t mpt_interrupt(int irq, void *bus_id); 160static irqreturn_t mpt_interrupt(int irq, void *bus_id);
162static int mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); 161static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
162 MPT_FRAME_HDR *reply);
163static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, 163static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
164 u32 *req, int replyBytes, u16 *u16reply, int maxwait, 164 u32 *req, int replyBytes, u16 *u16reply, int maxwait,
165 int sleepFlag); 165 int sleepFlag);
@@ -190,9 +190,9 @@ static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
190static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); 190static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
191static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); 191static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
192static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc); 192static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
193static void mpt_timer_expired(unsigned long data);
194static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc); 193static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
195static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); 194static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
195 int sleepFlag);
196static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); 196static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
197static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag); 197static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
198static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init); 198static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
@@ -207,8 +207,8 @@ static int procmpt_iocinfo_read(char *buf, char **start, off_t offset,
207#endif 207#endif
208static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc); 208static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
209 209
210//int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); 210static int ProcessEventNotification(MPT_ADAPTER *ioc,
211static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers); 211 EventNotificationReply_t *evReply, int *evHandlers);
212static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); 212static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
213static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); 213static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
214static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info); 214static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
@@ -277,6 +277,56 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
277} 277}
278 278
279/** 279/**
280 * mpt_is_discovery_complete - determine if discovery has completed
281 * @ioc: per adatper instance
282 *
283 * Returns 1 when discovery completed, else zero.
284 */
285static int
286mpt_is_discovery_complete(MPT_ADAPTER *ioc)
287{
288 ConfigExtendedPageHeader_t hdr;
289 CONFIGPARMS cfg;
290 SasIOUnitPage0_t *buffer;
291 dma_addr_t dma_handle;
292 int rc = 0;
293
294 memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
295 memset(&cfg, 0, sizeof(CONFIGPARMS));
296 hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
297 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
298 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
299 cfg.cfghdr.ehdr = &hdr;
300 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
301
302 if ((mpt_config(ioc, &cfg)))
303 goto out;
304 if (!hdr.ExtPageLength)
305 goto out;
306
307 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
308 &dma_handle);
309 if (!buffer)
310 goto out;
311
312 cfg.physAddr = dma_handle;
313 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
314
315 if ((mpt_config(ioc, &cfg)))
316 goto out_free_consistent;
317
318 if (!(buffer->PhyData[0].PortFlags &
319 MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS))
320 rc = 1;
321
322 out_free_consistent:
323 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
324 buffer, dma_handle);
325 out:
326 return rc;
327}
328
329/**
280 * mpt_fault_reset_work - work performed on workq after ioc fault 330 * mpt_fault_reset_work - work performed on workq after ioc fault
281 * @work: input argument, used to derive ioc 331 * @work: input argument, used to derive ioc
282 * 332 *
@@ -290,7 +340,7 @@ mpt_fault_reset_work(struct work_struct *work)
290 int rc; 340 int rc;
291 unsigned long flags; 341 unsigned long flags;
292 342
293 if (ioc->diagPending || !ioc->active) 343 if (ioc->ioc_reset_in_progress || !ioc->active)
294 goto out; 344 goto out;
295 345
296 ioc_raw_state = mpt_GetIocState(ioc, 0); 346 ioc_raw_state = mpt_GetIocState(ioc, 0);
@@ -307,6 +357,12 @@ mpt_fault_reset_work(struct work_struct *work)
307 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after " 357 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
308 "reset (%04xh)\n", ioc->name, ioc_raw_state & 358 "reset (%04xh)\n", ioc->name, ioc_raw_state &
309 MPI_DOORBELL_DATA_MASK); 359 MPI_DOORBELL_DATA_MASK);
360 } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
361 if ((mpt_is_discovery_complete(ioc))) {
362 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
363 "discovery_quiesce_io flag\n", ioc->name));
364 ioc->sas_discovery_quiesce_io = 0;
365 }
310 } 366 }
311 367
312 out: 368 out:
@@ -317,11 +373,11 @@ mpt_fault_reset_work(struct work_struct *work)
317 ioc = ioc->alt_ioc; 373 ioc = ioc->alt_ioc;
318 374
319 /* rearm the timer */ 375 /* rearm the timer */
320 spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); 376 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
321 if (ioc->reset_work_q) 377 if (ioc->reset_work_q)
322 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work, 378 queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
323 msecs_to_jiffies(MPT_POLLING_INTERVAL)); 379 msecs_to_jiffies(MPT_POLLING_INTERVAL));
324 spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); 380 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
325} 381}
326 382
327 383
@@ -501,9 +557,9 @@ mpt_interrupt(int irq, void *bus_id)
501 557
502/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 558/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
503/** 559/**
504 * mpt_base_reply - MPT base driver's callback routine 560 * mptbase_reply - MPT base driver's callback routine
505 * @ioc: Pointer to MPT_ADAPTER structure 561 * @ioc: Pointer to MPT_ADAPTER structure
506 * @mf: Pointer to original MPT request frame 562 * @req: Pointer to original MPT request frame
507 * @reply: Pointer to MPT reply frame (NULL if TurboReply) 563 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
508 * 564 *
509 * MPT base driver's callback routine; all base driver 565 * MPT base driver's callback routine; all base driver
@@ -514,122 +570,49 @@ mpt_interrupt(int irq, void *bus_id)
514 * should be freed, or 0 if it shouldn't. 570 * should be freed, or 0 if it shouldn't.
515 */ 571 */
516static int 572static int
517mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) 573mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
518{ 574{
575 EventNotificationReply_t *pEventReply;
576 u8 event;
577 int evHandlers;
519 int freereq = 1; 578 int freereq = 1;
520 u8 func;
521 579
522 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply() called\n", ioc->name)); 580 switch (reply->u.hdr.Function) {
523#ifdef CONFIG_FUSION_LOGGING 581 case MPI_FUNCTION_EVENT_NOTIFICATION:
524 if ((ioc->debug_level & MPT_DEBUG_MSG_FRAME) && 582 pEventReply = (EventNotificationReply_t *)reply;
525 !(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) { 583 evHandlers = 0;
526 dmfprintk(ioc, printk(MYIOC_s_INFO_FMT ": Original request frame (@%p) header\n", 584 ProcessEventNotification(ioc, pEventReply, &evHandlers);
527 ioc->name, mf)); 585 event = le32_to_cpu(pEventReply->Event) & 0xFF;
528 DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)mf); 586 if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
529 }
530#endif
531
532 func = reply->u.hdr.Function;
533 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, Function=%02Xh\n",
534 ioc->name, func));
535
536 if (func == MPI_FUNCTION_EVENT_NOTIFICATION) {
537 EventNotificationReply_t *pEvReply = (EventNotificationReply_t *) reply;
538 int evHandlers = 0;
539 int results;
540
541 results = ProcessEventNotification(ioc, pEvReply, &evHandlers);
542 if (results != evHandlers) {
543 /* CHECKME! Any special handling needed here? */
544 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n",
545 ioc->name, evHandlers, results));
546 }
547
548 /*
549 * Hmmm... It seems that EventNotificationReply is an exception
550 * to the rule of one reply per request.
551 */
552 if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
553 freereq = 0; 587 freereq = 0;
554 } else { 588 if (event != MPI_EVENT_EVENT_CHANGE)
555 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n", 589 break;
556 ioc->name, pEvReply)); 590 case MPI_FUNCTION_CONFIG:
557 } 591 case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
558 592 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
559#ifdef CONFIG_PROC_FS 593 if (reply) {
560// LogEvent(ioc, pEvReply); 594 ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
561#endif 595 memcpy(ioc->mptbase_cmds.reply, reply,
562 596 min(MPT_DEFAULT_FRAME_SIZE,
563 } else if (func == MPI_FUNCTION_EVENT_ACK) { 597 4 * reply->u.reply.MsgLength));
564 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, EventAck reply received\n",
565 ioc->name));
566 } else if (func == MPI_FUNCTION_CONFIG) {
567 CONFIGPARMS *pCfg;
568 unsigned long flags;
569
570 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "config_complete (mf=%p,mr=%p)\n",
571 ioc->name, mf, reply));
572
573 pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *)));
574
575 if (pCfg) {
576 /* disable timer and remove from linked list */
577 del_timer(&pCfg->timer);
578
579 spin_lock_irqsave(&ioc->FreeQlock, flags);
580 list_del(&pCfg->linkage);
581 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
582
583 /*
584 * If IOC Status is SUCCESS, save the header
585 * and set the status code to GOOD.
586 */
587 pCfg->status = MPT_CONFIG_ERROR;
588 if (reply) {
589 ConfigReply_t *pReply = (ConfigReply_t *)reply;
590 u16 status;
591
592 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
593 dcprintk(ioc, printk(MYIOC_s_NOTE_FMT " IOCStatus=%04xh, IOCLogInfo=%08xh\n",
594 ioc->name, status, le32_to_cpu(pReply->IOCLogInfo)));
595
596 pCfg->status = status;
597 if (status == MPI_IOCSTATUS_SUCCESS) {
598 if ((pReply->Header.PageType &
599 MPI_CONFIG_PAGETYPE_MASK) ==
600 MPI_CONFIG_PAGETYPE_EXTENDED) {
601 pCfg->cfghdr.ehdr->ExtPageLength =
602 le16_to_cpu(pReply->ExtPageLength);
603 pCfg->cfghdr.ehdr->ExtPageType =
604 pReply->ExtPageType;
605 }
606 pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
607
608 /* If this is a regular header, save PageLength. */
609 /* LMP Do this better so not using a reserved field! */
610 pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
611 pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
612 pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
613 }
614 }
615
616 /*
617 * Wake up the original calling thread
618 */
619 pCfg->wait_done = 1;
620 wake_up(&mpt_waitq);
621 } 598 }
622 } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) { 599 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
623 /* we should be always getting a reply frame */ 600 ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
624 memcpy(ioc->persist_reply_frame, reply, 601 complete(&ioc->mptbase_cmds.done);
625 min(MPT_DEFAULT_FRAME_SIZE, 602 } else
626 4*reply->u.reply.MsgLength)); 603 freereq = 0;
627 del_timer(&ioc->persist_timer); 604 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
628 ioc->persist_wait_done = 1; 605 freereq = 1;
629 wake_up(&mpt_waitq); 606 break;
630 } else { 607 case MPI_FUNCTION_EVENT_ACK:
631 printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n", 608 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
632 ioc->name, func); 609 "EventAck reply received\n", ioc->name));
610 break;
611 default:
612 printk(MYIOC_s_ERR_FMT
613 "Unexpected msg function (=%02Xh) reply received!\n",
614 ioc->name, reply->u.hdr.Function);
615 break;
633 } 616 }
634 617
635 /* 618 /*
@@ -988,17 +971,21 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
988 971
989 /* Put Request back on FreeQ! */ 972 /* Put Request back on FreeQ! */
990 spin_lock_irqsave(&ioc->FreeQlock, flags); 973 spin_lock_irqsave(&ioc->FreeQlock, flags);
991 mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */ 974 if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
975 goto out;
976 /* signature to know if this mf is freed */
977 mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
992 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ); 978 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
993#ifdef MFCNT 979#ifdef MFCNT
994 ioc->mfcnt--; 980 ioc->mfcnt--;
995#endif 981#endif
982 out:
996 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 983 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
997} 984}
998 985
999/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 986/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1000/** 987/**
1001 * mpt_add_sge - Place a simple SGE at address pAddr. 988 * mpt_add_sge - Place a simple 32 bit SGE at address pAddr.
1002 * @pAddr: virtual address for SGE 989 * @pAddr: virtual address for SGE
1003 * @flagslength: SGE flags and data transfer length 990 * @flagslength: SGE flags and data transfer length
1004 * @dma_addr: Physical address 991 * @dma_addr: Physical address
@@ -1006,23 +993,116 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
1006 * This routine places a MPT request frame back on the MPT adapter's 993 * This routine places a MPT request frame back on the MPT adapter's
1007 * FreeQ. 994 * FreeQ.
1008 */ 995 */
1009void 996static void
1010mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr) 997mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1011{ 998{
1012 if (sizeof(dma_addr_t) == sizeof(u64)) { 999 SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
1013 SGESimple64_t *pSge = (SGESimple64_t *) pAddr; 1000 pSge->FlagsLength = cpu_to_le32(flagslength);
1001 pSge->Address = cpu_to_le32(dma_addr);
1002}
1003
1004/**
1005 * mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr.
1006 * @pAddr: virtual address for SGE
1007 * @flagslength: SGE flags and data transfer length
1008 * @dma_addr: Physical address
1009 *
1010 * This routine places a MPT request frame back on the MPT adapter's
1011 * FreeQ.
1012 **/
1013static void
1014mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1015{
1016 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1017 pSge->Address.Low = cpu_to_le32
1018 (lower_32_bits((unsigned long)(dma_addr)));
1019 pSge->Address.High = cpu_to_le32
1020 (upper_32_bits((unsigned long)dma_addr));
1021 pSge->FlagsLength = cpu_to_le32
1022 ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1023}
1024
1025/**
1026 * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr (1078 workaround).
1027 * @pAddr: virtual address for SGE
1028 * @flagslength: SGE flags and data transfer length
1029 * @dma_addr: Physical address
1030 *
1031 * This routine places a MPT request frame back on the MPT adapter's
1032 * FreeQ.
1033 **/
1034static void
1035mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1036{
1037 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
1038 u32 tmp;
1039
1040 pSge->Address.Low = cpu_to_le32
1041 (lower_32_bits((unsigned long)(dma_addr)));
1042 tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
1043
1044 /*
1045 * 1078 errata workaround for the 36GB limitation
1046 */
1047 if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) {
1048 flagslength |=
1049 MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS);
1050 tmp |= (1<<31);
1051 if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
1052 printk(KERN_DEBUG "1078 P0M2 addressing for "
1053 "addr = 0x%llx len = %d\n",
1054 (unsigned long long)dma_addr,
1055 MPI_SGE_LENGTH(flagslength));
1056 }
1057
1058 pSge->Address.High = cpu_to_le32(tmp);
1059 pSge->FlagsLength = cpu_to_le32(
1060 (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
1061}
1062
1063/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1064/**
1065 * mpt_add_chain - Place a 32 bit chain SGE at address pAddr.
1066 * @pAddr: virtual address for SGE
1067 * @next: nextChainOffset value (u32's)
1068 * @length: length of next SGL segment
1069 * @dma_addr: Physical address
1070 *
1071 */
1072static void
1073mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1074{
1075 SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
1076 pChain->Length = cpu_to_le16(length);
1077 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1078 pChain->NextChainOffset = next;
1079 pChain->Address = cpu_to_le32(dma_addr);
1080}
1081
1082/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1083/**
1084 * mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr.
1085 * @pAddr: virtual address for SGE
1086 * @next: nextChainOffset value (u32's)
1087 * @length: length of next SGL segment
1088 * @dma_addr: Physical address
1089 *
1090 */
1091static void
1092mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1093{
1094 SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
1014 u32 tmp = dma_addr & 0xFFFFFFFF; 1095 u32 tmp = dma_addr & 0xFFFFFFFF;
1015 1096
1016 pSge->FlagsLength = cpu_to_le32(flagslength); 1097 pChain->Length = cpu_to_le16(length);
1017 pSge->Address.Low = cpu_to_le32(tmp); 1098 pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
1018 tmp = (u32) ((u64)dma_addr >> 32); 1099 MPI_SGE_FLAGS_64_BIT_ADDRESSING);
1019 pSge->Address.High = cpu_to_le32(tmp);
1020 1100
1021 } else { 1101 pChain->NextChainOffset = next;
1022 SGESimple32_t *pSge = (SGESimple32_t *) pAddr; 1102
1023 pSge->FlagsLength = cpu_to_le32(flagslength); 1103 pChain->Address.Low = cpu_to_le32(tmp);
1024 pSge->Address = cpu_to_le32(dma_addr); 1104 tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
1025 } 1105 pChain->Address.High = cpu_to_le32(tmp);
1026} 1106}
1027 1107
1028/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1108/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1225,7 +1305,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1225 } 1305 }
1226 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT; 1306 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
1227 flags_length |= ioc->HostPageBuffer_sz; 1307 flags_length |= ioc->HostPageBuffer_sz;
1228 mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma); 1308 ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
1229 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE; 1309 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
1230 1310
1231return 0; 1311return 0;
@@ -1534,21 +1614,42 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1534 1614
1535 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1615 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1536 1616
1537 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1617 if (sizeof(dma_addr_t) > 4) {
1538 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 1618 const uint64_t required_mask = dma_get_required_mask
1539 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1619 (&pdev->dev);
1540 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1620 if (required_mask > DMA_BIT_MASK(32)
1541 ioc->name)); 1621 && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1542 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1622 && !pci_set_consistent_dma_mask(pdev,
1543 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1623 DMA_BIT_MASK(64))) {
1544 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT 1624 ioc->dma_mask = DMA_BIT_MASK(64);
1545 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", 1625 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1546 ioc->name)); 1626 ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1627 ioc->name));
1628 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1629 && !pci_set_consistent_dma_mask(pdev,
1630 DMA_BIT_MASK(32))) {
1631 ioc->dma_mask = DMA_BIT_MASK(32);
1632 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1633 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1634 ioc->name));
1635 } else {
1636 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1637 ioc->name, pci_name(pdev));
1638 return r;
1639 }
1547 } else { 1640 } else {
1548 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", 1641 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1549 ioc->name, pci_name(pdev)); 1642 && !pci_set_consistent_dma_mask(pdev,
1550 pci_release_selected_regions(pdev, ioc->bars); 1643 DMA_BIT_MASK(32))) {
1551 return r; 1644 ioc->dma_mask = DMA_BIT_MASK(32);
1645 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
1646 ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
1647 ioc->name));
1648 } else {
1649 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1650 ioc->name, pci_name(pdev));
1651 return r;
1652 }
1552 } 1653 }
1553 1654
1554 mem_phys = msize = 0; 1655 mem_phys = msize = 0;
@@ -1632,6 +1733,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1632 1733
1633 ioc->id = mpt_ids++; 1734 ioc->id = mpt_ids++;
1634 sprintf(ioc->name, "ioc%d", ioc->id); 1735 sprintf(ioc->name, "ioc%d", ioc->id);
1736 dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
1635 1737
1636 /* 1738 /*
1637 * set initial debug level 1739 * set initial debug level
@@ -1650,14 +1752,36 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1650 return r; 1752 return r;
1651 } 1753 }
1652 1754
1755 /*
1756 * Setting up proper handlers for scatter gather handling
1757 */
1758 if (ioc->dma_mask == DMA_BIT_MASK(64)) {
1759 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
1760 ioc->add_sge = &mpt_add_sge_64bit_1078;
1761 else
1762 ioc->add_sge = &mpt_add_sge_64bit;
1763 ioc->add_chain = &mpt_add_chain_64bit;
1764 ioc->sg_addr_size = 8;
1765 } else {
1766 ioc->add_sge = &mpt_add_sge;
1767 ioc->add_chain = &mpt_add_chain;
1768 ioc->sg_addr_size = 4;
1769 }
1770 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
1771
1653 ioc->alloc_total = sizeof(MPT_ADAPTER); 1772 ioc->alloc_total = sizeof(MPT_ADAPTER);
1654 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ 1773 ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
1655 ioc->reply_sz = MPT_REPLY_FRAME_SIZE; 1774 ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
1656 1775
1657 ioc->pcidev = pdev; 1776 ioc->pcidev = pdev;
1658 ioc->diagPending = 0; 1777
1659 spin_lock_init(&ioc->diagLock); 1778 spin_lock_init(&ioc->taskmgmt_lock);
1660 spin_lock_init(&ioc->initializing_hba_lock); 1779 mutex_init(&ioc->internal_cmds.mutex);
1780 init_completion(&ioc->internal_cmds.done);
1781 mutex_init(&ioc->mptbase_cmds.mutex);
1782 init_completion(&ioc->mptbase_cmds.done);
1783 mutex_init(&ioc->taskmgmt_cmds.mutex);
1784 init_completion(&ioc->taskmgmt_cmds.done);
1661 1785
1662 /* Initialize the event logging. 1786 /* Initialize the event logging.
1663 */ 1787 */
@@ -1670,16 +1794,13 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1670 ioc->mfcnt = 0; 1794 ioc->mfcnt = 0;
1671#endif 1795#endif
1672 1796
1797 ioc->sh = NULL;
1673 ioc->cached_fw = NULL; 1798 ioc->cached_fw = NULL;
1674 1799
1675 /* Initilize SCSI Config Data structure 1800 /* Initilize SCSI Config Data structure
1676 */ 1801 */
1677 memset(&ioc->spi_data, 0, sizeof(SpiCfgData)); 1802 memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
1678 1803
1679 /* Initialize the running configQ head.
1680 */
1681 INIT_LIST_HEAD(&ioc->configQ);
1682
1683 /* Initialize the fc rport list head. 1804 /* Initialize the fc rport list head.
1684 */ 1805 */
1685 INIT_LIST_HEAD(&ioc->fc_rports); 1806 INIT_LIST_HEAD(&ioc->fc_rports);
@@ -1690,9 +1811,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1690 1811
1691 /* Initialize workqueue */ 1812 /* Initialize workqueue */
1692 INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); 1813 INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
1693 spin_lock_init(&ioc->fault_reset_work_lock);
1694 1814
1695 snprintf(ioc->reset_work_q_name, sizeof(ioc->reset_work_q_name), 1815 snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
1696 "mpt_poll_%d", ioc->id); 1816 "mpt_poll_%d", ioc->id);
1697 ioc->reset_work_q = 1817 ioc->reset_work_q =
1698 create_singlethread_workqueue(ioc->reset_work_q_name); 1818 create_singlethread_workqueue(ioc->reset_work_q_name);
@@ -1767,11 +1887,14 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1767 case MPI_MANUFACTPAGE_DEVID_SAS1064: 1887 case MPI_MANUFACTPAGE_DEVID_SAS1064:
1768 case MPI_MANUFACTPAGE_DEVID_SAS1068: 1888 case MPI_MANUFACTPAGE_DEVID_SAS1068:
1769 ioc->errata_flag_1064 = 1; 1889 ioc->errata_flag_1064 = 1;
1890 ioc->bus_type = SAS;
1891 break;
1770 1892
1771 case MPI_MANUFACTPAGE_DEVID_SAS1064E: 1893 case MPI_MANUFACTPAGE_DEVID_SAS1064E:
1772 case MPI_MANUFACTPAGE_DEVID_SAS1068E: 1894 case MPI_MANUFACTPAGE_DEVID_SAS1068E:
1773 case MPI_MANUFACTPAGE_DEVID_SAS1078: 1895 case MPI_MANUFACTPAGE_DEVID_SAS1078:
1774 ioc->bus_type = SAS; 1896 ioc->bus_type = SAS;
1897 break;
1775 } 1898 }
1776 1899
1777 1900
@@ -1813,6 +1936,11 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1813 */ 1936 */
1814 mpt_detect_bound_ports(ioc, pdev); 1937 mpt_detect_bound_ports(ioc, pdev);
1815 1938
1939 INIT_LIST_HEAD(&ioc->fw_event_list);
1940 spin_lock_init(&ioc->fw_event_lock);
1941 snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
1942 ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
1943
1816 if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, 1944 if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
1817 CAN_SLEEP)) != 0){ 1945 CAN_SLEEP)) != 0){
1818 printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n", 1946 printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
@@ -1885,13 +2013,18 @@ mpt_detach(struct pci_dev *pdev)
1885 /* 2013 /*
1886 * Stop polling ioc for fault condition 2014 * Stop polling ioc for fault condition
1887 */ 2015 */
1888 spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); 2016 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
1889 wq = ioc->reset_work_q; 2017 wq = ioc->reset_work_q;
1890 ioc->reset_work_q = NULL; 2018 ioc->reset_work_q = NULL;
1891 spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); 2019 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1892 cancel_delayed_work(&ioc->fault_reset_work); 2020 cancel_delayed_work(&ioc->fault_reset_work);
1893 destroy_workqueue(wq); 2021 destroy_workqueue(wq);
1894 2022
2023 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2024 wq = ioc->fw_event_q;
2025 ioc->fw_event_q = NULL;
2026 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2027 destroy_workqueue(wq);
1895 2028
1896 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); 2029 sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
1897 remove_proc_entry(pname, NULL); 2030 remove_proc_entry(pname, NULL);
@@ -1994,6 +2127,21 @@ mpt_resume(struct pci_dev *pdev)
1994 if (err) 2127 if (err)
1995 return err; 2128 return err;
1996 2129
2130 if (ioc->dma_mask == DMA_BIT_MASK(64)) {
2131 if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
2132 ioc->add_sge = &mpt_add_sge_64bit_1078;
2133 else
2134 ioc->add_sge = &mpt_add_sge_64bit;
2135 ioc->add_chain = &mpt_add_chain_64bit;
2136 ioc->sg_addr_size = 8;
2137 } else {
2138
2139 ioc->add_sge = &mpt_add_sge;
2140 ioc->add_chain = &mpt_add_chain;
2141 ioc->sg_addr_size = 4;
2142 }
2143 ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
2144
1997 printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n", 2145 printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
1998 ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT), 2146 ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
1999 CHIPREG_READ32(&ioc->chip->Doorbell)); 2147 CHIPREG_READ32(&ioc->chip->Doorbell));
@@ -2091,12 +2239,16 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2091 ioc->active = 0; 2239 ioc->active = 0;
2092 2240
2093 if (ioc->alt_ioc) { 2241 if (ioc->alt_ioc) {
2094 if (ioc->alt_ioc->active) 2242 if (ioc->alt_ioc->active ||
2243 reason == MPT_HOSTEVENT_IOC_RECOVER) {
2095 reset_alt_ioc_active = 1; 2244 reset_alt_ioc_active = 1;
2096 2245 /* Disable alt-IOC's reply interrupts
2097 /* Disable alt-IOC's reply interrupts (and FreeQ) for a bit ... */ 2246 * (and FreeQ) for a bit
2098 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF); 2247 **/
2099 ioc->alt_ioc->active = 0; 2248 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2249 0xFFFFFFFF);
2250 ioc->alt_ioc->active = 0;
2251 }
2100 } 2252 }
2101 2253
2102 hard = 1; 2254 hard = 1;
@@ -2117,9 +2269,11 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2117 } 2269 }
2118 2270
2119 } else { 2271 } else {
2120 printk(MYIOC_s_WARN_FMT "NOT READY!\n", ioc->name); 2272 printk(MYIOC_s_WARN_FMT
2273 "NOT READY WARNING!\n", ioc->name);
2121 } 2274 }
2122 return -1; 2275 ret = -1;
2276 goto out;
2123 } 2277 }
2124 2278
2125 /* hard_reset_done = 0 if a soft reset was performed 2279 /* hard_reset_done = 0 if a soft reset was performed
@@ -2129,7 +2283,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2129 if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0) 2283 if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
2130 alt_ioc_ready = 1; 2284 alt_ioc_ready = 1;
2131 else 2285 else
2132 printk(MYIOC_s_WARN_FMT "alt_ioc not ready!\n", ioc->alt_ioc->name); 2286 printk(MYIOC_s_WARN_FMT
2287 ": alt-ioc Not ready WARNING!\n",
2288 ioc->alt_ioc->name);
2133 } 2289 }
2134 2290
2135 for (ii=0; ii<5; ii++) { 2291 for (ii=0; ii<5; ii++) {
@@ -2150,7 +2306,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2150 if (alt_ioc_ready) { 2306 if (alt_ioc_ready) {
2151 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) { 2307 if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
2152 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2308 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2153 "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc)); 2309 "Initial Alt IocFacts failed rc=%x\n",
2310 ioc->name, rc));
2154 /* Retry - alt IOC was initialized once 2311 /* Retry - alt IOC was initialized once
2155 */ 2312 */
2156 rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason); 2313 rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
@@ -2194,16 +2351,20 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2194 IRQF_SHARED, ioc->name, ioc); 2351 IRQF_SHARED, ioc->name, ioc);
2195 if (rc < 0) { 2352 if (rc < 0) {
2196 printk(MYIOC_s_ERR_FMT "Unable to allocate " 2353 printk(MYIOC_s_ERR_FMT "Unable to allocate "
2197 "interrupt %d!\n", ioc->name, ioc->pcidev->irq); 2354 "interrupt %d!\n",
2355 ioc->name, ioc->pcidev->irq);
2198 if (ioc->msi_enable) 2356 if (ioc->msi_enable)
2199 pci_disable_msi(ioc->pcidev); 2357 pci_disable_msi(ioc->pcidev);
2200 return -EBUSY; 2358 ret = -EBUSY;
2359 goto out;
2201 } 2360 }
2202 irq_allocated = 1; 2361 irq_allocated = 1;
2203 ioc->pci_irq = ioc->pcidev->irq; 2362 ioc->pci_irq = ioc->pcidev->irq;
2204 pci_set_master(ioc->pcidev); /* ?? */ 2363 pci_set_master(ioc->pcidev); /* ?? */
2205 dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " 2364 pci_set_drvdata(ioc->pcidev, ioc);
2206 "%d\n", ioc->name, ioc->pcidev->irq)); 2365 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2366 "installed at interrupt %d\n", ioc->name,
2367 ioc->pcidev->irq));
2207 } 2368 }
2208 } 2369 }
2209 2370
@@ -2212,17 +2373,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2212 * init as upper addresses are needed for init. 2373 * init as upper addresses are needed for init.
2213 * If fails, continue with alt-ioc processing 2374 * If fails, continue with alt-ioc processing
2214 */ 2375 */
2376 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
2377 ioc->name));
2215 if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0)) 2378 if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
2216 ret = -3; 2379 ret = -3;
2217 2380
2218 /* May need to check/upload firmware & data here! 2381 /* May need to check/upload firmware & data here!
2219 * If fails, continue with alt-ioc processing 2382 * If fails, continue with alt-ioc processing
2220 */ 2383 */
2384 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
2385 ioc->name));
2221 if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0)) 2386 if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
2222 ret = -4; 2387 ret = -4;
2223// NEW! 2388// NEW!
2224 if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) { 2389 if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
2225 printk(MYIOC_s_WARN_FMT ": alt_ioc (%d) FIFO mgmt alloc!\n", 2390 printk(MYIOC_s_WARN_FMT
2391 ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
2226 ioc->alt_ioc->name, rc); 2392 ioc->alt_ioc->name, rc);
2227 alt_ioc_ready = 0; 2393 alt_ioc_ready = 0;
2228 reset_alt_ioc_active = 0; 2394 reset_alt_ioc_active = 0;
@@ -2232,8 +2398,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2232 if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) { 2398 if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
2233 alt_ioc_ready = 0; 2399 alt_ioc_ready = 0;
2234 reset_alt_ioc_active = 0; 2400 reset_alt_ioc_active = 0;
2235 printk(MYIOC_s_WARN_FMT "alt_ioc (%d) init failure!\n", 2401 printk(MYIOC_s_WARN_FMT
2236 ioc->alt_ioc->name, rc); 2402 ": alt-ioc: (%d) init failure WARNING!\n",
2403 ioc->alt_ioc->name, rc);
2237 } 2404 }
2238 } 2405 }
2239 2406
@@ -2269,28 +2436,36 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2269 } 2436 }
2270 } 2437 }
2271 2438
2439 /* Enable MPT base driver management of EventNotification
2440 * and EventAck handling.
2441 */
2442 if ((ret == 0) && (!ioc->facts.EventState)) {
2443 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2444 "SendEventNotification\n",
2445 ioc->name));
2446 ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
2447 }
2448
2449 if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
2450 rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
2451
2272 if (ret == 0) { 2452 if (ret == 0) {
2273 /* Enable! (reply interrupt) */ 2453 /* Enable! (reply interrupt) */
2274 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); 2454 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
2275 ioc->active = 1; 2455 ioc->active = 1;
2276 } 2456 }
2277 2457 if (rc == 0) { /* alt ioc */
2278 if (reset_alt_ioc_active && ioc->alt_ioc) { 2458 if (reset_alt_ioc_active && ioc->alt_ioc) {
2279 /* (re)Enable alt-IOC! (reply interrupt) */ 2459 /* (re)Enable alt-IOC! (reply interrupt) */
2280 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "alt_ioc reply irq re-enabled\n", 2460 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
2281 ioc->alt_ioc->name)); 2461 "reply irq re-enabled\n",
2282 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM); 2462 ioc->alt_ioc->name));
2283 ioc->alt_ioc->active = 1; 2463 CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
2464 MPI_HIM_DIM);
2465 ioc->alt_ioc->active = 1;
2466 }
2284 } 2467 }
2285 2468
2286 /* Enable MPT base driver management of EventNotification
2287 * and EventAck handling.
2288 */
2289 if ((ret == 0) && (!ioc->facts.EventState))
2290 (void) SendEventNotification(ioc, 1); /* 1=Enable EventNotification */
2291
2292 if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
2293 (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */
2294 2469
2295 /* Add additional "reason" check before call to GetLanConfigPages 2470 /* Add additional "reason" check before call to GetLanConfigPages
2296 * (combined with GetIoUnitPage2 call). This prevents a somewhat 2471 * (combined with GetIoUnitPage2 call). This prevents a somewhat
@@ -2306,8 +2481,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2306 mutex_init(&ioc->raid_data.inactive_list_mutex); 2481 mutex_init(&ioc->raid_data.inactive_list_mutex);
2307 INIT_LIST_HEAD(&ioc->raid_data.inactive_list); 2482 INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
2308 2483
2309 if (ioc->bus_type == SAS) { 2484 switch (ioc->bus_type) {
2310 2485
2486 case SAS:
2311 /* clear persistency table */ 2487 /* clear persistency table */
2312 if(ioc->facts.IOCExceptions & 2488 if(ioc->facts.IOCExceptions &
2313 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) { 2489 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
@@ -2321,8 +2497,15 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2321 */ 2497 */
2322 mpt_findImVolumes(ioc); 2498 mpt_findImVolumes(ioc);
2323 2499
2324 } else if (ioc->bus_type == FC) { 2500 /* Check, and possibly reset, the coalescing value
2325 if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) && 2501 */
2502 mpt_read_ioc_pg_1(ioc);
2503
2504 break;
2505
2506 case FC:
2507 if ((ioc->pfacts[0].ProtocolFlags &
2508 MPI_PORTFACTS_PROTOCOL_LAN) &&
2326 (ioc->lan_cnfg_page0.Header.PageLength == 0)) { 2509 (ioc->lan_cnfg_page0.Header.PageLength == 0)) {
2327 /* 2510 /*
2328 * Pre-fetch the ports LAN MAC address! 2511 * Pre-fetch the ports LAN MAC address!
@@ -2331,11 +2514,14 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2331 (void) GetLanConfigPages(ioc); 2514 (void) GetLanConfigPages(ioc);
2332 a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; 2515 a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
2333 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2516 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2334 "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", 2517 "LanAddr = %02X:%02X:%02X"
2335 ioc->name, a[5], a[4], a[3], a[2], a[1], a[0])); 2518 ":%02X:%02X:%02X\n",
2336 2519 ioc->name, a[5], a[4],
2520 a[3], a[2], a[1], a[0]));
2337 } 2521 }
2338 } else { 2522 break;
2523
2524 case SPI:
2339 /* Get NVRAM and adapter maximums from SPP 0 and 2 2525 /* Get NVRAM and adapter maximums from SPP 0 and 2
2340 */ 2526 */
2341 mpt_GetScsiPortSettings(ioc, 0); 2527 mpt_GetScsiPortSettings(ioc, 0);
@@ -2354,6 +2540,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
2354 mpt_read_ioc_pg_1(ioc); 2540 mpt_read_ioc_pg_1(ioc);
2355 2541
2356 mpt_read_ioc_pg_4(ioc); 2542 mpt_read_ioc_pg_4(ioc);
2543
2544 break;
2357 } 2545 }
2358 2546
2359 GetIoUnitPage2(ioc); 2547 GetIoUnitPage2(ioc);
@@ -2435,16 +2623,20 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
2435 if (_pcidev == peer) { 2623 if (_pcidev == peer) {
2436 /* Paranoia checks */ 2624 /* Paranoia checks */
2437 if (ioc->alt_ioc != NULL) { 2625 if (ioc->alt_ioc != NULL) {
2438 printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", 2626 printk(MYIOC_s_WARN_FMT
2439 ioc->name, ioc->alt_ioc->name); 2627 "Oops, already bound (%s <==> %s)!\n",
2628 ioc->name, ioc->name, ioc->alt_ioc->name);
2440 break; 2629 break;
2441 } else if (ioc_srch->alt_ioc != NULL) { 2630 } else if (ioc_srch->alt_ioc != NULL) {
2442 printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", 2631 printk(MYIOC_s_WARN_FMT
2443 ioc_srch->name, ioc_srch->alt_ioc->name); 2632 "Oops, already bound (%s <==> %s)!\n",
2633 ioc_srch->name, ioc_srch->name,
2634 ioc_srch->alt_ioc->name);
2444 break; 2635 break;
2445 } 2636 }
2446 dprintk(ioc, printk(MYIOC_s_INFO_FMT "FOUND! binding to %s\n", 2637 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2447 ioc->name, ioc_srch->name)); 2638 "FOUND! binding %s <==> %s\n",
2639 ioc->name, ioc->name, ioc_srch->name));
2448 ioc_srch->alt_ioc = ioc; 2640 ioc_srch->alt_ioc = ioc;
2449 ioc->alt_ioc = ioc_srch; 2641 ioc->alt_ioc = ioc_srch;
2450 } 2642 }
@@ -2464,8 +2656,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2464 int ret; 2656 int ret;
2465 2657
2466 if (ioc->cached_fw != NULL) { 2658 if (ioc->cached_fw != NULL) {
2467 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto " 2659 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2468 "adapter\n", __func__, ioc->name)); 2660 "%s: Pushing FW onto adapter\n", __func__, ioc->name));
2469 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *) 2661 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
2470 ioc->cached_fw, CAN_SLEEP)) < 0) { 2662 ioc->cached_fw, CAN_SLEEP)) < 0) {
2471 printk(MYIOC_s_WARN_FMT 2663 printk(MYIOC_s_WARN_FMT
@@ -2474,11 +2666,30 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2474 } 2666 }
2475 } 2667 }
2476 2668
2669 /*
2670 * Put the controller into ready state (if its not already)
2671 */
2672 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
2673 if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
2674 CAN_SLEEP)) {
2675 if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
2676 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit "
2677 "reset failed to put ioc in ready state!\n",
2678 ioc->name, __func__);
2679 } else
2680 printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset "
2681 "failed!\n", ioc->name, __func__);
2682 }
2683
2684
2477 /* Disable adapter interrupts! */ 2685 /* Disable adapter interrupts! */
2686 synchronize_irq(ioc->pcidev->irq);
2478 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); 2687 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
2479 ioc->active = 0; 2688 ioc->active = 0;
2689
2480 /* Clear any lingering interrupt */ 2690 /* Clear any lingering interrupt */
2481 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 2691 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
2692 CHIPREG_READ32(&ioc->chip->IntStatus);
2482 2693
2483 if (ioc->alloc != NULL) { 2694 if (ioc->alloc != NULL) {
2484 sz = ioc->alloc_sz; 2695 sz = ioc->alloc_sz;
@@ -2538,19 +2749,22 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2538 if((ret = mpt_host_page_access_control(ioc, 2749 if((ret = mpt_host_page_access_control(ioc,
2539 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) { 2750 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
2540 printk(MYIOC_s_ERR_FMT 2751 printk(MYIOC_s_ERR_FMT
2541 "host page buffers free failed (%d)!\n", 2752 ": %s: host page buffers free failed (%d)!\n",
2542 ioc->name, ret); 2753 ioc->name, __func__, ret);
2543 } 2754 }
2544 dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "HostPageBuffer free @ %p, sz=%d bytes\n", 2755 dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2545 ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz)); 2756 "HostPageBuffer free @ %p, sz=%d bytes\n",
2757 ioc->name, ioc->HostPageBuffer,
2758 ioc->HostPageBuffer_sz));
2546 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz, 2759 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
2547 ioc->HostPageBuffer, ioc->HostPageBuffer_dma); 2760 ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
2548 ioc->HostPageBuffer = NULL; 2761 ioc->HostPageBuffer = NULL;
2549 ioc->HostPageBuffer_sz = 0; 2762 ioc->HostPageBuffer_sz = 0;
2550 ioc->alloc_total -= ioc->HostPageBuffer_sz; 2763 ioc->alloc_total -= ioc->HostPageBuffer_sz;
2551 } 2764 }
2552}
2553 2765
2766 pci_set_drvdata(ioc->pcidev, NULL);
2767}
2554/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2768/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2555/** 2769/**
2556 * mpt_adapter_dispose - Free all resources associated with an MPT adapter 2770 * mpt_adapter_dispose - Free all resources associated with an MPT adapter
@@ -2690,8 +2904,12 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2690 } 2904 }
2691 2905
2692 /* Is it already READY? */ 2906 /* Is it already READY? */
2693 if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY) 2907 if (!statefault &&
2908 ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
2909 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
2910 "IOC is in READY state\n", ioc->name));
2694 return 0; 2911 return 0;
2912 }
2695 2913
2696 /* 2914 /*
2697 * Check to see if IOC is in FAULT state. 2915 * Check to see if IOC is in FAULT state.
@@ -2764,8 +2982,9 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2764 2982
2765 ii++; cntdn--; 2983 ii++; cntdn--;
2766 if (!cntdn) { 2984 if (!cntdn) {
2767 printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", 2985 printk(MYIOC_s_ERR_FMT
2768 ioc->name, (int)((ii+5)/HZ)); 2986 "Wait IOC_READY state (0x%x) timeout(%d)!\n",
2987 ioc->name, ioc_state, (int)((ii+5)/HZ));
2769 return -ETIME; 2988 return -ETIME;
2770 } 2989 }
2771 2990
@@ -2778,9 +2997,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2778 } 2997 }
2779 2998
2780 if (statefault < 3) { 2999 if (statefault < 3) {
2781 printk(MYIOC_s_INFO_FMT "Recovered from %s\n", 3000 printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
2782 ioc->name, 3001 statefault == 1 ? "stuck handshake" : "IOC FAULT");
2783 statefault==1 ? "stuck handshake" : "IOC FAULT");
2784 } 3002 }
2785 3003
2786 return hard_reset_done; 3004 return hard_reset_done;
@@ -2833,8 +3051,9 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2833 3051
2834 /* IOC *must* NOT be in RESET state! */ 3052 /* IOC *must* NOT be in RESET state! */
2835 if (ioc->last_state == MPI_IOC_STATE_RESET) { 3053 if (ioc->last_state == MPI_IOC_STATE_RESET) {
2836 printk(MYIOC_s_ERR_FMT "Can't get IOCFacts NOT READY! (%08x)\n", 3054 printk(KERN_ERR MYNAM
2837 ioc->name, ioc->last_state ); 3055 ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
3056 ioc->name, ioc->last_state);
2838 return -44; 3057 return -44;
2839 } 3058 }
2840 3059
@@ -2896,7 +3115,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2896 * Old: u16{Major(4),Minor(4),SubMinor(8)} 3115 * Old: u16{Major(4),Minor(4),SubMinor(8)}
2897 * New: u32{Major(8),Minor(8),Unit(8),Dev(8)} 3116 * New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
2898 */ 3117 */
2899 if (facts->MsgVersion < 0x0102) { 3118 if (facts->MsgVersion < MPI_VERSION_01_02) {
2900 /* 3119 /*
2901 * Handle old FC f/w style, convert to new... 3120 * Handle old FC f/w style, convert to new...
2902 */ 3121 */
@@ -2908,9 +3127,11 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2908 facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word); 3127 facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
2909 3128
2910 facts->ProductID = le16_to_cpu(facts->ProductID); 3129 facts->ProductID = le16_to_cpu(facts->ProductID);
3130
2911 if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK) 3131 if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
2912 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI) 3132 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
2913 ioc->ir_firmware = 1; 3133 ioc->ir_firmware = 1;
3134
2914 facts->CurrentHostMfaHighAddr = 3135 facts->CurrentHostMfaHighAddr =
2915 le32_to_cpu(facts->CurrentHostMfaHighAddr); 3136 le32_to_cpu(facts->CurrentHostMfaHighAddr);
2916 facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits); 3137 facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
@@ -2926,7 +3147,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2926 * to 14 in MPI-1.01.0x. 3147 * to 14 in MPI-1.01.0x.
2927 */ 3148 */
2928 if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 && 3149 if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
2929 facts->MsgVersion > 0x0100) { 3150 facts->MsgVersion > MPI_VERSION_01_00) {
2930 facts->FWImageSize = le32_to_cpu(facts->FWImageSize); 3151 facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
2931 } 3152 }
2932 3153
@@ -3108,6 +3329,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
3108 3329
3109 ioc_init.MaxDevices = (U8)ioc->devices_per_bus; 3330 ioc_init.MaxDevices = (U8)ioc->devices_per_bus;
3110 ioc_init.MaxBuses = (U8)ioc->number_of_buses; 3331 ioc_init.MaxBuses = (U8)ioc->number_of_buses;
3332
3111 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n", 3333 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n",
3112 ioc->name, ioc->facts.MsgVersion)); 3334 ioc->name, ioc->facts.MsgVersion));
3113 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) { 3335 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
@@ -3122,7 +3344,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
3122 } 3344 }
3123 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ 3345 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
3124 3346
3125 if (sizeof(dma_addr_t) == sizeof(u64)) { 3347 if (ioc->sg_addr_size == sizeof(u64)) {
3126 /* Save the upper 32-bits of the request 3348 /* Save the upper 32-bits of the request
3127 * (reply) and sense buffers. 3349 * (reply) and sense buffers.
3128 */ 3350 */
@@ -3325,11 +3547,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
3325 FWUpload_t *prequest; 3547 FWUpload_t *prequest;
3326 FWUploadReply_t *preply; 3548 FWUploadReply_t *preply;
3327 FWUploadTCSGE_t *ptcsge; 3549 FWUploadTCSGE_t *ptcsge;
3328 int sgeoffset;
3329 u32 flagsLength; 3550 u32 flagsLength;
3330 int ii, sz, reply_sz; 3551 int ii, sz, reply_sz;
3331 int cmdStatus; 3552 int cmdStatus;
3332 3553 int request_size;
3333 /* If the image size is 0, we are done. 3554 /* If the image size is 0, we are done.
3334 */ 3555 */
3335 if ((sz = ioc->facts.FWImageSize) == 0) 3556 if ((sz = ioc->facts.FWImageSize) == 0)
@@ -3364,42 +3585,41 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
3364 ptcsge->ImageSize = cpu_to_le32(sz); 3585 ptcsge->ImageSize = cpu_to_le32(sz);
3365 ptcsge++; 3586 ptcsge++;
3366 3587
3367 sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t);
3368
3369 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz; 3588 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
3370 mpt_add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma); 3589 ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
3371 3590 request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) +
3372 sgeoffset += sizeof(u32) + sizeof(dma_addr_t); 3591 ioc->SGE_size;
3373 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": Sending FW Upload (req @ %p) sgeoffset=%d \n", 3592 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload "
3374 ioc->name, prequest, sgeoffset)); 3593 " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest,
3594 ioc->facts.FWImageSize, request_size));
3375 DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest); 3595 DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest);
3376 3596
3377 ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest, 3597 ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest,
3378 reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag); 3598 reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag);
3379 3599
3380 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Upload completed rc=%x \n", ioc->name, ii)); 3600 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed "
3601 "rc=%x \n", ioc->name, ii));
3381 3602
3382 cmdStatus = -EFAULT; 3603 cmdStatus = -EFAULT;
3383 if (ii == 0) { 3604 if (ii == 0) {
3384 /* Handshake transfer was complete and successful. 3605 /* Handshake transfer was complete and successful.
3385 * Check the Reply Frame. 3606 * Check the Reply Frame.
3386 */ 3607 */
3387 int status, transfer_sz; 3608 int status;
3388 status = le16_to_cpu(preply->IOCStatus); 3609 status = le16_to_cpu(preply->IOCStatus) &
3389 if (status == MPI_IOCSTATUS_SUCCESS) { 3610 MPI_IOCSTATUS_MASK;
3390 transfer_sz = le32_to_cpu(preply->ActualImageSize); 3611 if (status == MPI_IOCSTATUS_SUCCESS &&
3391 if (transfer_sz == sz) 3612 ioc->facts.FWImageSize ==
3613 le32_to_cpu(preply->ActualImageSize))
3392 cmdStatus = 0; 3614 cmdStatus = 0;
3393 }
3394 } 3615 }
3395 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n", 3616 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n",
3396 ioc->name, cmdStatus)); 3617 ioc->name, cmdStatus));
3397 3618
3398 3619
3399 if (cmdStatus) { 3620 if (cmdStatus) {
3400 3621 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, "
3401 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": fw upload failed, freeing image \n", 3622 "freeing image \n", ioc->name));
3402 ioc->name));
3403 mpt_free_fw_memory(ioc); 3623 mpt_free_fw_memory(ioc);
3404 } 3624 }
3405 kfree(prequest); 3625 kfree(prequest);
@@ -3723,6 +3943,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3723 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 3943 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
3724 3944
3725 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) { 3945 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
3946
3947 if (!ignore)
3948 return 0;
3949
3726 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset " 3950 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
3727 "address=%p\n", ioc->name, __func__, 3951 "address=%p\n", ioc->name, __func__,
3728 &ioc->chip->Doorbell, &ioc->chip->Reset_1078)); 3952 &ioc->chip->Doorbell, &ioc->chip->Reset_1078));
@@ -3740,6 +3964,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3740 "looking for READY STATE: doorbell=%x" 3964 "looking for READY STATE: doorbell=%x"
3741 " count=%d\n", 3965 " count=%d\n",
3742 ioc->name, doorbell, count)); 3966 ioc->name, doorbell, count));
3967
3743 if (doorbell == MPI_IOC_STATE_READY) { 3968 if (doorbell == MPI_IOC_STATE_READY) {
3744 return 1; 3969 return 1;
3745 } 3970 }
@@ -3890,6 +4115,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3890 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); 4115 doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
3891 doorbell &= MPI_IOC_STATE_MASK; 4116 doorbell &= MPI_IOC_STATE_MASK;
3892 4117
4118 drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4119 "looking for READY STATE: doorbell=%x"
4120 " count=%d\n", ioc->name, doorbell, count));
4121
3893 if (doorbell == MPI_IOC_STATE_READY) { 4122 if (doorbell == MPI_IOC_STATE_READY) {
3894 break; 4123 break;
3895 } 4124 }
@@ -3901,6 +4130,11 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3901 mdelay (1000); 4130 mdelay (1000);
3902 } 4131 }
3903 } 4132 }
4133
4134 if (doorbell != MPI_IOC_STATE_READY)
4135 printk(MYIOC_s_ERR_FMT "Failed to come READY "
4136 "after reset! IocState=%x", ioc->name,
4137 doorbell);
3904 } 4138 }
3905 } 4139 }
3906 4140
@@ -4019,8 +4253,9 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
4019 if (sleepFlag != CAN_SLEEP) 4253 if (sleepFlag != CAN_SLEEP)
4020 count *= 10; 4254 count *= 10;
4021 4255
4022 printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", 4256 printk(MYIOC_s_ERR_FMT
4023 ioc->name, (int)((count+5)/HZ)); 4257 "Wait IOC_READY state (0x%x) timeout(%d)!\n",
4258 ioc->name, state, (int)((count+5)/HZ));
4024 return -ETIME; 4259 return -ETIME;
4025 } 4260 }
4026 4261
@@ -4090,24 +4325,29 @@ initChainBuffers(MPT_ADAPTER *ioc)
4090 * num_sge = num sge in request frame + last chain buffer 4325 * num_sge = num sge in request frame + last chain buffer
4091 * scale = num sge per chain buffer if no chain element 4326 * scale = num sge per chain buffer if no chain element
4092 */ 4327 */
4093 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 4328 scale = ioc->req_sz / ioc->SGE_size;
4094 if (sizeof(dma_addr_t) == sizeof(u64)) 4329 if (ioc->sg_addr_size == sizeof(u64))
4095 num_sge = scale + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); 4330 num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size;
4096 else 4331 else
4097 num_sge = 1+ scale + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); 4332 num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size;
4098 4333
4099 if (sizeof(dma_addr_t) == sizeof(u64)) { 4334 if (ioc->sg_addr_size == sizeof(u64)) {
4100 numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + 4335 numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
4101 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); 4336 (ioc->req_sz - 60) / ioc->SGE_size;
4102 } else { 4337 } else {
4103 numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + 4338 numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) +
4104 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); 4339 scale + (ioc->req_sz - 64) / ioc->SGE_size;
4105 } 4340 }
4106 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n", 4341 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n",
4107 ioc->name, num_sge, numSGE)); 4342 ioc->name, num_sge, numSGE));
4108 4343
4109 if ( numSGE > MPT_SCSI_SG_DEPTH ) 4344 if (ioc->bus_type == FC) {
4110 numSGE = MPT_SCSI_SG_DEPTH; 4345 if (numSGE > MPT_SCSI_FC_SG_DEPTH)
4346 numSGE = MPT_SCSI_FC_SG_DEPTH;
4347 } else {
4348 if (numSGE > MPT_SCSI_SG_DEPTH)
4349 numSGE = MPT_SCSI_SG_DEPTH;
4350 }
4111 4351
4112 num_chain = 1; 4352 num_chain = 1;
4113 while (numSGE - num_sge > 0) { 4353 while (numSGE - num_sge > 0) {
@@ -4161,12 +4401,42 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
4161 dma_addr_t alloc_dma; 4401 dma_addr_t alloc_dma;
4162 u8 *mem; 4402 u8 *mem;
4163 int i, reply_sz, sz, total_size, num_chain; 4403 int i, reply_sz, sz, total_size, num_chain;
4404 u64 dma_mask;
4405
4406 dma_mask = 0;
4164 4407
4165 /* Prime reply FIFO... */ 4408 /* Prime reply FIFO... */
4166 4409
4167 if (ioc->reply_frames == NULL) { 4410 if (ioc->reply_frames == NULL) {
4168 if ( (num_chain = initChainBuffers(ioc)) < 0) 4411 if ( (num_chain = initChainBuffers(ioc)) < 0)
4169 return -1; 4412 return -1;
4413 /*
4414 * 1078 errata workaround for the 36GB limitation
4415 */
4416 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
4417 ioc->dma_mask > DMA_35BIT_MASK) {
4418 if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
4419 && !pci_set_consistent_dma_mask(ioc->pcidev,
4420 DMA_BIT_MASK(32))) {
4421 dma_mask = DMA_35BIT_MASK;
4422 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4423 "setting 35 bit addressing for "
4424 "Request/Reply/Chain and Sense Buffers\n",
4425 ioc->name));
4426 } else {
4427 /*Reseting DMA mask to 64 bit*/
4428 pci_set_dma_mask(ioc->pcidev,
4429 DMA_BIT_MASK(64));
4430 pci_set_consistent_dma_mask(ioc->pcidev,
4431 DMA_BIT_MASK(64));
4432
4433 printk(MYIOC_s_ERR_FMT
4434 "failed setting 35 bit addressing for "
4435 "Request/Reply/Chain and Sense Buffers\n",
4436 ioc->name);
4437 return -1;
4438 }
4439 }
4170 4440
4171 total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth); 4441 total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
4172 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n", 4442 dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
@@ -4305,9 +4575,16 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
4305 alloc_dma += ioc->reply_sz; 4575 alloc_dma += ioc->reply_sz;
4306 } 4576 }
4307 4577
4578 if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
4579 ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
4580 ioc->dma_mask))
4581 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4582 "restoring 64 bit addressing\n", ioc->name));
4583
4308 return 0; 4584 return 0;
4309 4585
4310out_fail: 4586out_fail:
4587
4311 if (ioc->alloc != NULL) { 4588 if (ioc->alloc != NULL) {
4312 sz = ioc->alloc_sz; 4589 sz = ioc->alloc_sz;
4313 pci_free_consistent(ioc->pcidev, 4590 pci_free_consistent(ioc->pcidev,
@@ -4324,6 +4601,13 @@ out_fail:
4324 ioc->sense_buf_pool, ioc->sense_buf_pool_dma); 4601 ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
4325 ioc->sense_buf_pool = NULL; 4602 ioc->sense_buf_pool = NULL;
4326 } 4603 }
4604
4605 if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
4606 DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
4607 DMA_BIT_MASK(64)))
4608 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4609 "restoring 64 bit addressing\n", ioc->name));
4610
4327 return -1; 4611 return -1;
4328} 4612}
4329 4613
@@ -4759,7 +5043,14 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4759 SasIoUnitControlReply_t *sasIoUnitCntrReply; 5043 SasIoUnitControlReply_t *sasIoUnitCntrReply;
4760 MPT_FRAME_HDR *mf = NULL; 5044 MPT_FRAME_HDR *mf = NULL;
4761 MPIHeader_t *mpi_hdr; 5045 MPIHeader_t *mpi_hdr;
5046 int ret = 0;
5047 unsigned long timeleft;
5048
5049 mutex_lock(&ioc->mptbase_cmds.mutex);
4762 5050
5051 /* init the internal cmd struct */
5052 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
5053 INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
4763 5054
4764 /* insure garbage is not sent to fw */ 5055 /* insure garbage is not sent to fw */
4765 switch(persist_opcode) { 5056 switch(persist_opcode) {
@@ -4769,17 +5060,19 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4769 break; 5060 break;
4770 5061
4771 default: 5062 default:
4772 return -1; 5063 ret = -1;
4773 break; 5064 goto out;
4774 } 5065 }
4775 5066
4776 printk("%s: persist_opcode=%x\n",__func__, persist_opcode); 5067 printk(KERN_DEBUG "%s: persist_opcode=%x\n",
5068 __func__, persist_opcode);
4777 5069
4778 /* Get a MF for this command. 5070 /* Get a MF for this command.
4779 */ 5071 */
4780 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5072 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4781 printk("%s: no msg frames!\n",__func__); 5073 printk(KERN_DEBUG "%s: no msg frames!\n", __func__);
4782 return -1; 5074 ret = -1;
5075 goto out;
4783 } 5076 }
4784 5077
4785 mpi_hdr = (MPIHeader_t *) mf; 5078 mpi_hdr = (MPIHeader_t *) mf;
@@ -4789,27 +5082,42 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4789 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext; 5082 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
4790 sasIoUnitCntrReq->Operation = persist_opcode; 5083 sasIoUnitCntrReq->Operation = persist_opcode;
4791 5084
4792 init_timer(&ioc->persist_timer);
4793 ioc->persist_timer.data = (unsigned long) ioc;
4794 ioc->persist_timer.function = mpt_timer_expired;
4795 ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */;
4796 ioc->persist_wait_done=0;
4797 add_timer(&ioc->persist_timer);
4798 mpt_put_msg_frame(mpt_base_index, ioc, mf); 5085 mpt_put_msg_frame(mpt_base_index, ioc, mf);
4799 wait_event(mpt_waitq, ioc->persist_wait_done); 5086 timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ);
5087 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
5088 ret = -ETIME;
5089 printk(KERN_DEBUG "%s: failed\n", __func__);
5090 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
5091 goto out;
5092 if (!timeleft) {
5093 printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
5094 ioc->name, __func__);
5095 mpt_HardResetHandler(ioc, CAN_SLEEP);
5096 mpt_free_msg_frame(ioc, mf);
5097 }
5098 goto out;
5099 }
5100
5101 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
5102 ret = -1;
5103 goto out;
5104 }
4800 5105
4801 sasIoUnitCntrReply = 5106 sasIoUnitCntrReply =
4802 (SasIoUnitControlReply_t *)ioc->persist_reply_frame; 5107 (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply;
4803 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) { 5108 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
4804 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", 5109 printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
4805 __func__, 5110 __func__, sasIoUnitCntrReply->IOCStatus,
4806 sasIoUnitCntrReply->IOCStatus,
4807 sasIoUnitCntrReply->IOCLogInfo); 5111 sasIoUnitCntrReply->IOCLogInfo);
4808 return -1; 5112 printk(KERN_DEBUG "%s: failed\n", __func__);
4809 } 5113 ret = -1;
5114 } else
5115 printk(KERN_DEBUG "%s: success\n", __func__);
5116 out:
4810 5117
4811 printk("%s: success\n",__func__); 5118 CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
4812 return 0; 5119 mutex_unlock(&ioc->mptbase_cmds.mutex);
5120 return ret;
4813} 5121}
4814 5122
4815/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5123/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5394,17 +5702,20 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
5394 * -ENOMEM if pci_alloc failed 5702 * -ENOMEM if pci_alloc failed
5395 **/ 5703 **/
5396int 5704int
5397mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk) 5705mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
5706 RaidPhysDiskPage0_t *phys_disk)
5398{ 5707{
5399 CONFIGPARMS cfg; 5708 CONFIGPARMS cfg;
5400 ConfigPageHeader_t hdr; 5709 ConfigPageHeader_t hdr;
5401 dma_addr_t dma_handle; 5710 dma_addr_t dma_handle;
5402 pRaidPhysDiskPage0_t buffer = NULL; 5711 pRaidPhysDiskPage0_t buffer = NULL;
5403 int rc; 5712 int rc;
5404 5713
5405 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 5714 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5406 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 5715 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5716 memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t));
5407 5717
5718 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION;
5408 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; 5719 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5409 cfg.cfghdr.hdr = &hdr; 5720 cfg.cfghdr.hdr = &hdr;
5410 cfg.physAddr = -1; 5721 cfg.physAddr = -1;
@@ -5451,6 +5762,161 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t
5451} 5762}
5452 5763
5453/** 5764/**
5765 * mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num
5766 * @ioc: Pointer to a Adapter Structure
5767 * @phys_disk_num: io unit unique phys disk num generated by the ioc
5768 *
5769 * Return:
5770 * returns number paths
5771 **/
5772int
5773mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
5774{
5775 CONFIGPARMS cfg;
5776 ConfigPageHeader_t hdr;
5777 dma_addr_t dma_handle;
5778 pRaidPhysDiskPage1_t buffer = NULL;
5779 int rc;
5780
5781 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5782 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5783
5784 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
5785 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5786 hdr.PageNumber = 1;
5787 cfg.cfghdr.hdr = &hdr;
5788 cfg.physAddr = -1;
5789 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5790
5791 if (mpt_config(ioc, &cfg) != 0) {
5792 rc = 0;
5793 goto out;
5794 }
5795
5796 if (!hdr.PageLength) {
5797 rc = 0;
5798 goto out;
5799 }
5800
5801 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5802 &dma_handle);
5803
5804 if (!buffer) {
5805 rc = 0;
5806 goto out;
5807 }
5808
5809 cfg.physAddr = dma_handle;
5810 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
5811 cfg.pageAddr = phys_disk_num;
5812
5813 if (mpt_config(ioc, &cfg) != 0) {
5814 rc = 0;
5815 goto out;
5816 }
5817
5818 rc = buffer->NumPhysDiskPaths;
5819 out:
5820
5821 if (buffer)
5822 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5823 dma_handle);
5824
5825 return rc;
5826}
5827EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths);
5828
5829/**
5830 * mpt_raid_phys_disk_pg1 - returns phys disk page 1
5831 * @ioc: Pointer to a Adapter Structure
5832 * @phys_disk_num: io unit unique phys disk num generated by the ioc
5833 * @phys_disk: requested payload data returned
5834 *
5835 * Return:
5836 * 0 on success
5837 * -EFAULT if read of config page header fails or data pointer not NULL
5838 * -ENOMEM if pci_alloc failed
5839 **/
5840int
5841mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
5842 RaidPhysDiskPage1_t *phys_disk)
5843{
5844 CONFIGPARMS cfg;
5845 ConfigPageHeader_t hdr;
5846 dma_addr_t dma_handle;
5847 pRaidPhysDiskPage1_t buffer = NULL;
5848 int rc;
5849 int i;
5850 __le64 sas_address;
5851
5852 memset(&cfg, 0 , sizeof(CONFIGPARMS));
5853 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
5854 rc = 0;
5855
5856 hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
5857 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
5858 hdr.PageNumber = 1;
5859 cfg.cfghdr.hdr = &hdr;
5860 cfg.physAddr = -1;
5861 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
5862
5863 if (mpt_config(ioc, &cfg) != 0) {
5864 rc = -EFAULT;
5865 goto out;
5866 }
5867
5868 if (!hdr.PageLength) {
5869 rc = -EFAULT;
5870 goto out;
5871 }
5872
5873 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
5874 &dma_handle);
5875
5876 if (!buffer) {
5877 rc = -ENOMEM;
5878 goto out;
5879 }
5880
5881 cfg.physAddr = dma_handle;
5882 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
5883 cfg.pageAddr = phys_disk_num;
5884
5885 if (mpt_config(ioc, &cfg) != 0) {
5886 rc = -EFAULT;
5887 goto out;
5888 }
5889
5890 phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths;
5891 phys_disk->PhysDiskNum = phys_disk_num;
5892 for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) {
5893 phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID;
5894 phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus;
5895 phys_disk->Path[i].OwnerIdentifier =
5896 buffer->Path[i].OwnerIdentifier;
5897 phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags);
5898 memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64));
5899 sas_address = le64_to_cpu(sas_address);
5900 memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64));
5901 memcpy(&sas_address,
5902 &buffer->Path[i].OwnerWWID, sizeof(__le64));
5903 sas_address = le64_to_cpu(sas_address);
5904 memcpy(&phys_disk->Path[i].OwnerWWID,
5905 &sas_address, sizeof(__le64));
5906 }
5907
5908 out:
5909
5910 if (buffer)
5911 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
5912 dma_handle);
5913
5914 return rc;
5915}
5916EXPORT_SYMBOL(mpt_raid_phys_disk_pg1);
5917
5918
5919/**
5454 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes 5920 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
5455 * @ioc: Pointer to a Adapter Strucutre 5921 * @ioc: Pointer to a Adapter Strucutre
5456 * 5922 *
@@ -5775,30 +6241,28 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
5775 * SendEventNotification - Send EventNotification (on or off) request to adapter 6241 * SendEventNotification - Send EventNotification (on or off) request to adapter
5776 * @ioc: Pointer to MPT_ADAPTER structure 6242 * @ioc: Pointer to MPT_ADAPTER structure
5777 * @EvSwitch: Event switch flags 6243 * @EvSwitch: Event switch flags
6244 * @sleepFlag: Specifies whether the process can sleep
5778 */ 6245 */
5779static int 6246static int
5780SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch) 6247SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag)
5781{ 6248{
5782 EventNotification_t *evnp; 6249 EventNotification_t evn;
6250 MPIDefaultReply_t reply_buf;
5783 6251
5784 evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc); 6252 memset(&evn, 0, sizeof(EventNotification_t));
5785 if (evnp == NULL) { 6253 memset(&reply_buf, 0, sizeof(MPIDefaultReply_t));
5786 devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
5787 ioc->name));
5788 return 0;
5789 }
5790 memset(evnp, 0, sizeof(*evnp));
5791
5792 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventNotification (%d) request %p\n", ioc->name, EvSwitch, evnp));
5793 6254
5794 evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 6255 evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION;
5795 evnp->ChainOffset = 0; 6256 evn.Switch = EvSwitch;
5796 evnp->MsgFlags = 0; 6257 evn.MsgContext = cpu_to_le32(mpt_base_index << 16);
5797 evnp->Switch = EvSwitch;
5798 6258
5799 mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)evnp); 6259 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6260 "Sending EventNotification (%d) request %p\n",
6261 ioc->name, EvSwitch, &evn));
5800 6262
5801 return 0; 6263 return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t),
6264 (u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30,
6265 sleepFlag);
5802} 6266}
5803 6267
5804/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6268/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5814,7 +6278,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
5814 6278
5815 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 6279 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5816 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 6280 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
5817 ioc->name,__func__)); 6281 ioc->name, __func__));
5818 return -1; 6282 return -1;
5819 } 6283 }
5820 6284
@@ -5851,12 +6315,19 @@ int
5851mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) 6315mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5852{ 6316{
5853 Config_t *pReq; 6317 Config_t *pReq;
6318 ConfigReply_t *pReply;
5854 ConfigExtendedPageHeader_t *pExtHdr = NULL; 6319 ConfigExtendedPageHeader_t *pExtHdr = NULL;
5855 MPT_FRAME_HDR *mf; 6320 MPT_FRAME_HDR *mf;
5856 unsigned long flags; 6321 int ii;
5857 int ii, rc;
5858 int flagsLength; 6322 int flagsLength;
5859 int in_isr; 6323 long timeout;
6324 int ret;
6325 u8 page_type = 0, extend_page;
6326 unsigned long timeleft;
6327 unsigned long flags;
6328 int in_isr;
6329 u8 issue_hard_reset = 0;
6330 u8 retry_count = 0;
5860 6331
5861 /* Prevent calling wait_event() (below), if caller happens 6332 /* Prevent calling wait_event() (below), if caller happens
5862 * to be in ISR context, because that is fatal! 6333 * to be in ISR context, because that is fatal!
@@ -5866,15 +6337,43 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5866 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n", 6337 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
5867 ioc->name)); 6338 ioc->name));
5868 return -EPERM; 6339 return -EPERM;
6340 }
6341
6342 /* don't send a config page during diag reset */
6343 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6344 if (ioc->ioc_reset_in_progress) {
6345 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6346 "%s: busy with host reset\n", ioc->name, __func__));
6347 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6348 return -EBUSY;
6349 }
6350 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6351
6352 /* don't send if no chance of success */
6353 if (!ioc->active ||
6354 mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) {
6355 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6356 "%s: ioc not operational, %d, %xh\n",
6357 ioc->name, __func__, ioc->active,
6358 mpt_GetIocState(ioc, 0)));
6359 return -EFAULT;
5869 } 6360 }
5870 6361
6362 retry_config:
6363 mutex_lock(&ioc->mptbase_cmds.mutex);
6364 /* init the internal cmd struct */
6365 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
6366 INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
6367
5871 /* Get and Populate a free Frame 6368 /* Get and Populate a free Frame
5872 */ 6369 */
5873 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 6370 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5874 dcprintk(ioc, printk(MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n", 6371 dcprintk(ioc, printk(MYIOC_s_WARN_FMT
5875 ioc->name)); 6372 "mpt_config: no msg frames!\n", ioc->name));
5876 return -EAGAIN; 6373 ret = -EAGAIN;
6374 goto out;
5877 } 6375 }
6376
5878 pReq = (Config_t *)mf; 6377 pReq = (Config_t *)mf;
5879 pReq->Action = pCfg->action; 6378 pReq->Action = pCfg->action;
5880 pReq->Reserved = 0; 6379 pReq->Reserved = 0;
@@ -5900,7 +6399,9 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5900 pReq->ExtPageType = pExtHdr->ExtPageType; 6399 pReq->ExtPageType = pExtHdr->ExtPageType;
5901 pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 6400 pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
5902 6401
5903 /* Page Length must be treated as a reserved field for the extended header. */ 6402 /* Page Length must be treated as a reserved field for the
6403 * extended header.
6404 */
5904 pReq->Header.PageLength = 0; 6405 pReq->Header.PageLength = 0;
5905 } 6406 }
5906 6407
@@ -5913,78 +6414,91 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
5913 else 6414 else
5914 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 6415 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
5915 6416
5916 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) { 6417 if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
6418 MPI_CONFIG_PAGETYPE_EXTENDED) {
5917 flagsLength |= pExtHdr->ExtPageLength * 4; 6419 flagsLength |= pExtHdr->ExtPageLength * 4;
5918 6420 page_type = pReq->ExtPageType;
5919 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", 6421 extend_page = 1;
5920 ioc->name, pReq->ExtPageType, pReq->Header.PageNumber, pReq->Action)); 6422 } else {
5921 }
5922 else {
5923 flagsLength |= pCfg->cfghdr.hdr->PageLength * 4; 6423 flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
5924 6424 page_type = pReq->Header.PageType;
5925 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", 6425 extend_page = 0;
5926 ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
5927 } 6426 }
5928 6427
5929 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); 6428 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5930 6429 "Sending Config request type 0x%x, page 0x%x and action %d\n",
5931 /* Append pCfg pointer to end of mf 6430 ioc->name, page_type, pReq->Header.PageNumber, pReq->Action));
5932 */
5933 *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
5934
5935 /* Initalize the timer
5936 */
5937 init_timer_on_stack(&pCfg->timer);
5938 pCfg->timer.data = (unsigned long) ioc;
5939 pCfg->timer.function = mpt_timer_expired;
5940 pCfg->wait_done = 0;
5941
5942 /* Set the timer; ensure 10 second minimum */
5943 if (pCfg->timeout < 10)
5944 pCfg->timer.expires = jiffies + HZ*10;
5945 else
5946 pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
5947
5948 /* Add to end of Q, set timer and then issue this command */
5949 spin_lock_irqsave(&ioc->FreeQlock, flags);
5950 list_add_tail(&pCfg->linkage, &ioc->configQ);
5951 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
5952 6431
5953 add_timer(&pCfg->timer); 6432 ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
6433 timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout;
5954 mpt_put_msg_frame(mpt_base_index, ioc, mf); 6434 mpt_put_msg_frame(mpt_base_index, ioc, mf);
5955 wait_event(mpt_waitq, pCfg->wait_done); 6435 timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done,
6436 timeout);
6437 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
6438 ret = -ETIME;
6439 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6440 "Failed Sending Config request type 0x%x, page 0x%x,"
6441 " action %d, status %xh, time left %ld\n\n",
6442 ioc->name, page_type, pReq->Header.PageNumber,
6443 pReq->Action, ioc->mptbase_cmds.status, timeleft));
6444 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
6445 goto out;
6446 if (!timeleft)
6447 issue_hard_reset = 1;
6448 goto out;
6449 }
5956 6450
5957 /* mf has been freed - do not access */ 6451 if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
6452 ret = -1;
6453 goto out;
6454 }
6455 pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply;
6456 ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
6457 if (ret == MPI_IOCSTATUS_SUCCESS) {
6458 if (extend_page) {
6459 pCfg->cfghdr.ehdr->ExtPageLength =
6460 le16_to_cpu(pReply->ExtPageLength);
6461 pCfg->cfghdr.ehdr->ExtPageType =
6462 pReply->ExtPageType;
6463 }
6464 pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
6465 pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
6466 pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
6467 pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
5958 6468
5959 rc = pCfg->status; 6469 }
5960 6470
5961 return rc; 6471 if (retry_count)
5962} 6472 printk(MYIOC_s_INFO_FMT "Retry completed "
6473 "ret=0x%x timeleft=%ld\n",
6474 ioc->name, ret, timeleft);
5963 6475
5964/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6476 dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n",
5965/** 6477 ret, le32_to_cpu(pReply->IOCLogInfo)));
5966 * mpt_timer_expired - Callback for timer process.
5967 * Used only internal config functionality.
5968 * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long
5969 */
5970static void
5971mpt_timer_expired(unsigned long data)
5972{
5973 MPT_ADAPTER *ioc = (MPT_ADAPTER *) data;
5974
5975 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired! \n", ioc->name));
5976 6478
5977 /* Perform a FW reload */ 6479out:
5978 if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
5979 printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name);
5980 6480
5981 /* No more processing. 6481 CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
5982 * Hard reset clean-up will wake up 6482 mutex_unlock(&ioc->mptbase_cmds.mutex);
5983 * process and free all resources. 6483 if (issue_hard_reset) {
5984 */ 6484 issue_hard_reset = 0;
5985 dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired complete!\n", ioc->name)); 6485 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
6486 ioc->name, __func__);
6487 mpt_HardResetHandler(ioc, CAN_SLEEP);
6488 mpt_free_msg_frame(ioc, mf);
6489 /* attempt one retry for a timed out command */
6490 if (!retry_count) {
6491 printk(MYIOC_s_INFO_FMT
6492 "Attempting Retry Config request"
6493 " type 0x%x, page 0x%x,"
6494 " action %d\n", ioc->name, page_type,
6495 pCfg->cfghdr.hdr->PageNumber, pCfg->action);
6496 retry_count++;
6497 goto retry_config;
6498 }
6499 }
6500 return ret;
5986 6501
5987 return;
5988} 6502}
5989 6503
5990/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6504/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5998,41 +6512,34 @@ mpt_timer_expired(unsigned long data)
5998static int 6512static int
5999mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 6513mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
6000{ 6514{
6001 CONFIGPARMS *pCfg; 6515 switch (reset_phase) {
6002 unsigned long flags; 6516 case MPT_IOC_SETUP_RESET:
6003 6517 ioc->taskmgmt_quiesce_io = 1;
6004 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT 6518 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6005 ": IOC %s_reset routed to MPT base driver!\n", 6519 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
6006 ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( 6520 break;
6007 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); 6521 case MPT_IOC_PRE_RESET:
6008 6522 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6009 if (reset_phase == MPT_IOC_SETUP_RESET) { 6523 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
6010 ; 6524 break;
6011 } else if (reset_phase == MPT_IOC_PRE_RESET) { 6525 case MPT_IOC_POST_RESET:
6012 /* If the internal config Q is not empty - 6526 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6013 * delete timer. MF resources will be freed when 6527 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
6014 * the FIFO's are primed. 6528/* wake up mptbase_cmds */
6015 */ 6529 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
6016 spin_lock_irqsave(&ioc->FreeQlock, flags); 6530 ioc->mptbase_cmds.status |=
6017 list_for_each_entry(pCfg, &ioc->configQ, linkage) 6531 MPT_MGMT_STATUS_DID_IOCRESET;
6018 del_timer(&pCfg->timer); 6532 complete(&ioc->mptbase_cmds.done);
6019 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
6020
6021 } else {
6022 CONFIGPARMS *pNext;
6023
6024 /* Search the configQ for internal commands.
6025 * Flush the Q, and wake up all suspended threads.
6026 */
6027 spin_lock_irqsave(&ioc->FreeQlock, flags);
6028 list_for_each_entry_safe(pCfg, pNext, &ioc->configQ, linkage) {
6029 list_del(&pCfg->linkage);
6030
6031 pCfg->status = MPT_CONFIG_ERROR;
6032 pCfg->wait_done = 1;
6033 wake_up(&mpt_waitq);
6034 } 6533 }
6035 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 6534/* wake up taskmgmt_cmds */
6535 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
6536 ioc->taskmgmt_cmds.status |=
6537 MPT_MGMT_STATUS_DID_IOCRESET;
6538 complete(&ioc->taskmgmt_cmds.done);
6539 }
6540 break;
6541 default:
6542 break;
6036 } 6543 }
6037 6544
6038 return 1; /* currently means nothing really */ 6545 return 1; /* currently means nothing really */
@@ -6344,6 +6851,59 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
6344 6851
6345 *size = y; 6852 *size = y;
6346} 6853}
6854/**
6855 * mpt_set_taskmgmt_in_progress_flag - set flags associated with task managment
6856 * @ioc: Pointer to MPT_ADAPTER structure
6857 *
6858 * Returns 0 for SUCCESS or -1 if FAILED.
6859 *
6860 * If -1 is return, then it was not possible to set the flags
6861 **/
6862int
6863mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
6864{
6865 unsigned long flags;
6866 int retval;
6867
6868 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6869 if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress ||
6870 (ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) {
6871 retval = -1;
6872 goto out;
6873 }
6874 retval = 0;
6875 ioc->taskmgmt_in_progress = 1;
6876 ioc->taskmgmt_quiesce_io = 1;
6877 if (ioc->alt_ioc) {
6878 ioc->alt_ioc->taskmgmt_in_progress = 1;
6879 ioc->alt_ioc->taskmgmt_quiesce_io = 1;
6880 }
6881 out:
6882 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6883 return retval;
6884}
6885EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag);
6886
6887/**
6888 * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task managment
6889 * @ioc: Pointer to MPT_ADAPTER structure
6890 *
6891 **/
6892void
6893mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
6894{
6895 unsigned long flags;
6896
6897 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6898 ioc->taskmgmt_in_progress = 0;
6899 ioc->taskmgmt_quiesce_io = 0;
6900 if (ioc->alt_ioc) {
6901 ioc->alt_ioc->taskmgmt_in_progress = 0;
6902 ioc->alt_ioc->taskmgmt_quiesce_io = 0;
6903 }
6904 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6905}
6906EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag);
6347 6907
6348 6908
6349/** 6909/**
@@ -6397,7 +6957,9 @@ int
6397mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) 6957mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6398{ 6958{
6399 int rc; 6959 int rc;
6960 u8 cb_idx;
6400 unsigned long flags; 6961 unsigned long flags;
6962 unsigned long time_count;
6401 6963
6402 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name)); 6964 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name));
6403#ifdef MFCNT 6965#ifdef MFCNT
@@ -6410,14 +6972,15 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6410 /* Reset the adapter. Prevent more than 1 call to 6972 /* Reset the adapter. Prevent more than 1 call to
6411 * mpt_do_ioc_recovery at any instant in time. 6973 * mpt_do_ioc_recovery at any instant in time.
6412 */ 6974 */
6413 spin_lock_irqsave(&ioc->diagLock, flags); 6975 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6414 if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){ 6976 if (ioc->ioc_reset_in_progress) {
6415 spin_unlock_irqrestore(&ioc->diagLock, flags); 6977 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6416 return 0; 6978 return 0;
6417 } else {
6418 ioc->diagPending = 1;
6419 } 6979 }
6420 spin_unlock_irqrestore(&ioc->diagLock, flags); 6980 ioc->ioc_reset_in_progress = 1;
6981 if (ioc->alt_ioc)
6982 ioc->alt_ioc->ioc_reset_in_progress = 1;
6983 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6421 6984
6422 /* FIXME: If do_ioc_recovery fails, repeat.... 6985 /* FIXME: If do_ioc_recovery fails, repeat....
6423 */ 6986 */
@@ -6427,47 +6990,57 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6427 * Prevents timeouts occurring during a diagnostic reset...very bad. 6990 * Prevents timeouts occurring during a diagnostic reset...very bad.
6428 * For all other protocol drivers, this is a no-op. 6991 * For all other protocol drivers, this is a no-op.
6429 */ 6992 */
6430 { 6993 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
6431 u8 cb_idx; 6994 if (MptResetHandlers[cb_idx]) {
6432 int r = 0; 6995 mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
6433 6996 if (ioc->alt_ioc)
6434 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 6997 mpt_signal_reset(cb_idx, ioc->alt_ioc,
6435 if (MptResetHandlers[cb_idx]) { 6998 MPT_IOC_SETUP_RESET);
6436 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling IOC reset_setup handler #%d\n",
6437 ioc->name, cb_idx));
6438 r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
6439 if (ioc->alt_ioc) {
6440 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling alt-%s setup reset handler #%d\n",
6441 ioc->name, ioc->alt_ioc->name, cb_idx));
6442 r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_SETUP_RESET);
6443 }
6444 }
6445 } 6999 }
6446 } 7000 }
6447 7001
6448 if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) { 7002 time_count = jiffies;
6449 printk(MYIOC_s_WARN_FMT "Cannot recover rc = %d!\n", ioc->name, rc); 7003 rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag);
7004 if (rc != 0) {
7005 printk(KERN_WARNING MYNAM
7006 ": WARNING - (%d) Cannot recover %s\n", rc, ioc->name);
7007 } else {
7008 if (ioc->hard_resets < -1)
7009 ioc->hard_resets++;
6450 } 7010 }
6451 ioc->reload_fw = 0;
6452 if (ioc->alt_ioc)
6453 ioc->alt_ioc->reload_fw = 0;
6454 7011
6455 spin_lock_irqsave(&ioc->diagLock, flags); 7012 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6456 ioc->diagPending = 0; 7013 ioc->ioc_reset_in_progress = 0;
6457 if (ioc->alt_ioc) 7014 ioc->taskmgmt_quiesce_io = 0;
6458 ioc->alt_ioc->diagPending = 0; 7015 ioc->taskmgmt_in_progress = 0;
6459 spin_unlock_irqrestore(&ioc->diagLock, flags); 7016 if (ioc->alt_ioc) {
7017 ioc->alt_ioc->ioc_reset_in_progress = 0;
7018 ioc->alt_ioc->taskmgmt_quiesce_io = 0;
7019 ioc->alt_ioc->taskmgmt_in_progress = 0;
7020 }
7021 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6460 7022
6461 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler rc = %d!\n", ioc->name, rc)); 7023 dtmprintk(ioc,
7024 printk(MYIOC_s_DEBUG_FMT
7025 "HardResetHandler: completed (%d seconds): %s\n", ioc->name,
7026 jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ?
7027 "SUCCESS" : "FAILED")));
6462 7028
6463 return rc; 7029 return rc;
6464} 7030}
6465 7031
6466/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7032#ifdef CONFIG_FUSION_LOGGING
6467static void 7033static void
6468EventDescriptionStr(u8 event, u32 evData0, char *evStr) 7034mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
6469{ 7035{
6470 char *ds = NULL; 7036 char *ds = NULL;
7037 u32 evData0;
7038 int ii;
7039 u8 event;
7040 char *evStr = ioc->evStr;
7041
7042 event = le32_to_cpu(pEventReply->Event) & 0xFF;
7043 evData0 = le32_to_cpu(pEventReply->Data[0]);
6471 7044
6472 switch(event) { 7045 switch(event) {
6473 case MPI_EVENT_NONE: 7046 case MPI_EVENT_NONE:
@@ -6501,9 +7074,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6501 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP) 7074 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
6502 ds = "Loop State(LIP) Change"; 7075 ds = "Loop State(LIP) Change";
6503 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE) 7076 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
6504 ds = "Loop State(LPE) Change"; /* ??? */ 7077 ds = "Loop State(LPE) Change";
6505 else 7078 else
6506 ds = "Loop State(LPB) Change"; /* ??? */ 7079 ds = "Loop State(LPB) Change";
6507 break; 7080 break;
6508 case MPI_EVENT_LOGOUT: 7081 case MPI_EVENT_LOGOUT:
6509 ds = "Logout"; 7082 ds = "Logout";
@@ -6703,28 +7276,65 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6703 } 7276 }
6704 case MPI_EVENT_IR2: 7277 case MPI_EVENT_IR2:
6705 { 7278 {
7279 u8 id = (u8)(evData0);
7280 u8 channel = (u8)(evData0 >> 8);
7281 u8 phys_num = (u8)(evData0 >> 24);
6706 u8 ReasonCode = (u8)(evData0 >> 16); 7282 u8 ReasonCode = (u8)(evData0 >> 16);
7283
6707 switch (ReasonCode) { 7284 switch (ReasonCode) {
6708 case MPI_EVENT_IR2_RC_LD_STATE_CHANGED: 7285 case MPI_EVENT_IR2_RC_LD_STATE_CHANGED:
6709 ds = "IR2: LD State Changed"; 7286 snprintf(evStr, EVENT_DESCR_STR_SZ,
7287 "IR2: LD State Changed: "
7288 "id=%d channel=%d phys_num=%d",
7289 id, channel, phys_num);
6710 break; 7290 break;
6711 case MPI_EVENT_IR2_RC_PD_STATE_CHANGED: 7291 case MPI_EVENT_IR2_RC_PD_STATE_CHANGED:
6712 ds = "IR2: PD State Changed"; 7292 snprintf(evStr, EVENT_DESCR_STR_SZ,
7293 "IR2: PD State Changed "
7294 "id=%d channel=%d phys_num=%d",
7295 id, channel, phys_num);
6713 break; 7296 break;
6714 case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL: 7297 case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL:
6715 ds = "IR2: Bad Block Table Full"; 7298 snprintf(evStr, EVENT_DESCR_STR_SZ,
7299 "IR2: Bad Block Table Full: "
7300 "id=%d channel=%d phys_num=%d",
7301 id, channel, phys_num);
6716 break; 7302 break;
6717 case MPI_EVENT_IR2_RC_PD_INSERTED: 7303 case MPI_EVENT_IR2_RC_PD_INSERTED:
6718 ds = "IR2: PD Inserted"; 7304 snprintf(evStr, EVENT_DESCR_STR_SZ,
7305 "IR2: PD Inserted: "
7306 "id=%d channel=%d phys_num=%d",
7307 id, channel, phys_num);
6719 break; 7308 break;
6720 case MPI_EVENT_IR2_RC_PD_REMOVED: 7309 case MPI_EVENT_IR2_RC_PD_REMOVED:
6721 ds = "IR2: PD Removed"; 7310 snprintf(evStr, EVENT_DESCR_STR_SZ,
7311 "IR2: PD Removed: "
7312 "id=%d channel=%d phys_num=%d",
7313 id, channel, phys_num);
6722 break; 7314 break;
6723 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED: 7315 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
6724 ds = "IR2: Foreign CFG Detected"; 7316 snprintf(evStr, EVENT_DESCR_STR_SZ,
7317 "IR2: Foreign CFG Detected: "
7318 "id=%d channel=%d phys_num=%d",
7319 id, channel, phys_num);
6725 break; 7320 break;
6726 case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR: 7321 case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR:
6727 ds = "IR2: Rebuild Medium Error"; 7322 snprintf(evStr, EVENT_DESCR_STR_SZ,
7323 "IR2: Rebuild Medium Error: "
7324 "id=%d channel=%d phys_num=%d",
7325 id, channel, phys_num);
7326 break;
7327 case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
7328 snprintf(evStr, EVENT_DESCR_STR_SZ,
7329 "IR2: Dual Port Added: "
7330 "id=%d channel=%d phys_num=%d",
7331 id, channel, phys_num);
7332 break;
7333 case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
7334 snprintf(evStr, EVENT_DESCR_STR_SZ,
7335 "IR2: Dual Port Removed: "
7336 "id=%d channel=%d phys_num=%d",
7337 id, channel, phys_num);
6728 break; 7338 break;
6729 default: 7339 default:
6730 ds = "IR2"; 7340 ds = "IR2";
@@ -6760,13 +7370,18 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6760 case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 7370 case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
6761 { 7371 {
6762 u8 reason = (u8)(evData0); 7372 u8 reason = (u8)(evData0);
6763 u8 port_num = (u8)(evData0 >> 8);
6764 u16 handle = le16_to_cpu(evData0 >> 16);
6765 7373
6766 snprintf(evStr, EVENT_DESCR_STR_SZ, 7374 switch (reason) {
6767 "SAS Initiator Device Status Change: reason=0x%02x " 7375 case MPI_EVENT_SAS_INIT_RC_ADDED:
6768 "port=%d handle=0x%04x", 7376 ds = "SAS Initiator Status Change: Added";
6769 reason, port_num, handle); 7377 break;
7378 case MPI_EVENT_SAS_INIT_RC_REMOVED:
7379 ds = "SAS Initiator Status Change: Deleted";
7380 break;
7381 default:
7382 ds = "SAS Initiator Status Change";
7383 break;
7384 }
6770 break; 7385 break;
6771 } 7386 }
6772 7387
@@ -6814,6 +7429,24 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6814 break; 7429 break;
6815 } 7430 }
6816 7431
7432 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
7433 {
7434 u8 reason = (u8)(evData0);
7435
7436 switch (reason) {
7437 case MPI_EVENT_SAS_EXP_RC_ADDED:
7438 ds = "Expander Status Change: Added";
7439 break;
7440 case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING:
7441 ds = "Expander Status Change: Deleted";
7442 break;
7443 default:
7444 ds = "Expander Status Change";
7445 break;
7446 }
7447 break;
7448 }
7449
6817 /* 7450 /*
6818 * MPT base "custom" events may be added here... 7451 * MPT base "custom" events may be added here...
6819 */ 7452 */
@@ -6823,8 +7456,20 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
6823 } 7456 }
6824 if (ds) 7457 if (ds)
6825 strncpy(evStr, ds, EVENT_DESCR_STR_SZ); 7458 strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
6826}
6827 7459
7460
7461 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
7462 "MPT event:(%02Xh) : %s\n",
7463 ioc->name, event, evStr));
7464
7465 devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM
7466 ": Event data:\n"));
7467 for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++)
7468 devtverboseprintk(ioc, printk(" %08x",
7469 le32_to_cpu(pEventReply->Data[ii])));
7470 devtverboseprintk(ioc, printk(KERN_DEBUG "\n"));
7471}
7472#endif
6828/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7473/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6829/** 7474/**
6830 * ProcessEventNotification - Route EventNotificationReply to all event handlers 7475 * ProcessEventNotification - Route EventNotificationReply to all event handlers
@@ -6841,37 +7486,24 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6841{ 7486{
6842 u16 evDataLen; 7487 u16 evDataLen;
6843 u32 evData0 = 0; 7488 u32 evData0 = 0;
6844// u32 evCtx;
6845 int ii; 7489 int ii;
6846 u8 cb_idx; 7490 u8 cb_idx;
6847 int r = 0; 7491 int r = 0;
6848 int handlers = 0; 7492 int handlers = 0;
6849 char evStr[EVENT_DESCR_STR_SZ];
6850 u8 event; 7493 u8 event;
6851 7494
6852 /* 7495 /*
6853 * Do platform normalization of values 7496 * Do platform normalization of values
6854 */ 7497 */
6855 event = le32_to_cpu(pEventReply->Event) & 0xFF; 7498 event = le32_to_cpu(pEventReply->Event) & 0xFF;
6856// evCtx = le32_to_cpu(pEventReply->EventContext);
6857 evDataLen = le16_to_cpu(pEventReply->EventDataLength); 7499 evDataLen = le16_to_cpu(pEventReply->EventDataLength);
6858 if (evDataLen) { 7500 if (evDataLen) {
6859 evData0 = le32_to_cpu(pEventReply->Data[0]); 7501 evData0 = le32_to_cpu(pEventReply->Data[0]);
6860 } 7502 }
6861 7503
6862 EventDescriptionStr(event, evData0, evStr);
6863 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event:(%02Xh) : %s\n",
6864 ioc->name,
6865 event,
6866 evStr));
6867
6868#ifdef CONFIG_FUSION_LOGGING 7504#ifdef CONFIG_FUSION_LOGGING
6869 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT 7505 if (evDataLen)
6870 ": Event data:\n", ioc->name)); 7506 mpt_display_event_info(ioc, pEventReply);
6871 for (ii = 0; ii < evDataLen; ii++)
6872 devtverboseprintk(ioc, printk(" %08x",
6873 le32_to_cpu(pEventReply->Data[ii])));
6874 devtverboseprintk(ioc, printk("\n"));
6875#endif 7507#endif
6876 7508
6877 /* 7509 /*
@@ -6926,8 +7558,9 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
6926 */ 7558 */
6927 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { 7559 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
6928 if (MptEvHandlers[cb_idx]) { 7560 if (MptEvHandlers[cb_idx]) {
6929 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Routing Event to event handler #%d\n", 7561 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6930 ioc->name, cb_idx)); 7562 "Routing Event to event handler #%d\n",
7563 ioc->name, cb_idx));
6931 r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply); 7564 r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply);
6932 handlers++; 7565 handlers++;
6933 } 7566 }
@@ -7011,8 +7644,6 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
7011 switch (info) { 7644 switch (info) {
7012 case 0x00010000: 7645 case 0x00010000:
7013 desc = "bug! MID not found"; 7646 desc = "bug! MID not found";
7014 if (ioc->reload_fw == 0)
7015 ioc->reload_fw++;
7016 break; 7647 break;
7017 7648
7018 case 0x00020000: 7649 case 0x00020000:
@@ -7613,7 +8244,6 @@ EXPORT_SYMBOL(mpt_get_msg_frame);
7613EXPORT_SYMBOL(mpt_put_msg_frame); 8244EXPORT_SYMBOL(mpt_put_msg_frame);
7614EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri); 8245EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri);
7615EXPORT_SYMBOL(mpt_free_msg_frame); 8246EXPORT_SYMBOL(mpt_free_msg_frame);
7616EXPORT_SYMBOL(mpt_add_sge);
7617EXPORT_SYMBOL(mpt_send_handshake_request); 8247EXPORT_SYMBOL(mpt_send_handshake_request);
7618EXPORT_SYMBOL(mpt_verify_adapter); 8248EXPORT_SYMBOL(mpt_verify_adapter);
7619EXPORT_SYMBOL(mpt_GetIocState); 8249EXPORT_SYMBOL(mpt_GetIocState);
@@ -7650,7 +8280,7 @@ fusion_init(void)
7650 /* Register ourselves (mptbase) in order to facilitate 8280 /* Register ourselves (mptbase) in order to facilitate
7651 * EventNotification handling. 8281 * EventNotification handling.
7652 */ 8282 */
7653 mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER); 8283 mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER);
7654 8284
7655 /* Register for hard reset handling callbacks. 8285 /* Register for hard reset handling callbacks.
7656 */ 8286 */
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b3e981d2a50..1c8514dc31c 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.07" 79#define MPT_LINUX_VERSION_COMMON "3.04.10"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -104,6 +104,7 @@
104#endif 104#endif
105 105
106#define MPT_NAME_LENGTH 32 106#define MPT_NAME_LENGTH 32
107#define MPT_KOBJ_NAME_LEN 20
107 108
108#define MPT_PROCFS_MPTBASEDIR "mpt" 109#define MPT_PROCFS_MPTBASEDIR "mpt"
109 /* chg it to "driver/fusion" ? */ 110 /* chg it to "driver/fusion" ? */
@@ -134,6 +135,7 @@
134 135
135#define MPT_COALESCING_TIMEOUT 0x10 136#define MPT_COALESCING_TIMEOUT 0x10
136 137
138
137/* 139/*
138 * SCSI transfer rate defines. 140 * SCSI transfer rate defines.
139 */ 141 */
@@ -161,10 +163,10 @@
161/* 163/*
162 * Set the MAX_SGE value based on user input. 164 * Set the MAX_SGE value based on user input.
163 */ 165 */
164#ifdef CONFIG_FUSION_MAX_SGE 166#ifdef CONFIG_FUSION_MAX_SGE
165#if CONFIG_FUSION_MAX_SGE < 16 167#if CONFIG_FUSION_MAX_SGE < 16
166#define MPT_SCSI_SG_DEPTH 16 168#define MPT_SCSI_SG_DEPTH 16
167#elif CONFIG_FUSION_MAX_SGE > 128 169#elif CONFIG_FUSION_MAX_SGE > 128
168#define MPT_SCSI_SG_DEPTH 128 170#define MPT_SCSI_SG_DEPTH 128
169#else 171#else
170#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE 172#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE
@@ -173,6 +175,18 @@
173#define MPT_SCSI_SG_DEPTH 40 175#define MPT_SCSI_SG_DEPTH 40
174#endif 176#endif
175 177
178#ifdef CONFIG_FUSION_MAX_FC_SGE
179#if CONFIG_FUSION_MAX_FC_SGE < 16
180#define MPT_SCSI_FC_SG_DEPTH 16
181#elif CONFIG_FUSION_MAX_FC_SGE > 256
182#define MPT_SCSI_FC_SG_DEPTH 256
183#else
184#define MPT_SCSI_FC_SG_DEPTH CONFIG_FUSION_MAX_FC_SGE
185#endif
186#else
187#define MPT_SCSI_FC_SG_DEPTH 40
188#endif
189
176/* debug print string length used for events and iocstatus */ 190/* debug print string length used for events and iocstatus */
177# define EVENT_DESCR_STR_SZ 100 191# define EVENT_DESCR_STR_SZ 100
178 192
@@ -431,38 +445,36 @@ do { \
431 * IOCTL structure and associated defines 445 * IOCTL structure and associated defines
432 */ 446 */
433 447
434#define MPT_IOCTL_STATUS_DID_IOCRESET 0x01 /* IOC Reset occurred on the current*/
435#define MPT_IOCTL_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */
436#define MPT_IOCTL_STATUS_TIMER_ACTIVE 0x04 /* The timer is running */
437#define MPT_IOCTL_STATUS_SENSE_VALID 0x08 /* Sense data is valid */
438#define MPT_IOCTL_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */
439#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE 0x20 /* The TM timer is running */
440#define MPT_IOCTL_STATUS_TM_FAILED 0x40 /* User TM request failed */
441
442#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */ 448#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */
443 449
444typedef struct _MPT_IOCTL { 450#define MPT_MGMT_STATUS_RF_VALID 0x01 /* The Reply Frame is VALID */
445 struct _MPT_ADAPTER *ioc; 451#define MPT_MGMT_STATUS_COMMAND_GOOD 0x02 /* Command Status GOOD */
446 u8 ReplyFrame[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ 452#define MPT_MGMT_STATUS_PENDING 0x04 /* command is pending */
447 u8 sense[MPT_SENSE_BUFFER_ALLOC]; 453#define MPT_MGMT_STATUS_DID_IOCRESET 0x08 /* IOC Reset occurred
448 int wait_done; /* wake-up value for this ioc */ 454 on the current*/
449 u8 rsvd; 455#define MPT_MGMT_STATUS_SENSE_VALID 0x10 /* valid sense info */
450 u8 status; /* current command status */ 456#define MPT_MGMT_STATUS_TIMER_ACTIVE 0x20 /* obsolete */
451 u8 reset; /* 1 if bus reset allowed */ 457#define MPT_MGMT_STATUS_FREE_MF 0x40 /* free the mf from
452 u8 id; /* target for reset */ 458 complete routine */
453 struct mutex ioctl_mutex; 459
454} MPT_IOCTL; 460#define INITIALIZE_MGMT_STATUS(status) \
455 461 status = MPT_MGMT_STATUS_PENDING;
456#define MPT_SAS_MGMT_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */ 462#define CLEAR_MGMT_STATUS(status) \
457#define MPT_SAS_MGMT_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */ 463 status = 0;
458#define MPT_SAS_MGMT_STATUS_TM_FAILED 0x40 /* User TM request failed */ 464#define CLEAR_MGMT_PENDING_STATUS(status) \
459 465 status &= ~MPT_MGMT_STATUS_PENDING;
460typedef struct _MPT_SAS_MGMT { 466#define SET_MGMT_MSG_CONTEXT(msg_context, value) \
467 msg_context = value;
468
469typedef struct _MPT_MGMT {
461 struct mutex mutex; 470 struct mutex mutex;
462 struct completion done; 471 struct completion done;
463 u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ 472 u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
473 u8 sense[MPT_SENSE_BUFFER_ALLOC];
464 u8 status; /* current command status */ 474 u8 status; /* current command status */
465}MPT_SAS_MGMT; 475 int completion_code;
476 u32 msg_context;
477} MPT_MGMT;
466 478
467/* 479/*
468 * Event Structure and define 480 * Event Structure and define
@@ -564,6 +576,10 @@ struct mptfc_rport_info
564 u8 flags; 576 u8 flags;
565}; 577};
566 578
579typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
580typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
581 dma_addr_t dma_addr);
582
567/* 583/*
568 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS 584 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
569 */ 585 */
@@ -573,6 +589,10 @@ typedef struct _MPT_ADAPTER
573 int pci_irq; /* This irq */ 589 int pci_irq; /* This irq */
574 char name[MPT_NAME_LENGTH]; /* "iocN" */ 590 char name[MPT_NAME_LENGTH]; /* "iocN" */
575 char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */ 591 char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */
592#ifdef CONFIG_FUSION_LOGGING
593 /* used in mpt_display_event_info */
594 char evStr[EVENT_DESCR_STR_SZ];
595#endif
576 char board_name[16]; 596 char board_name[16];
577 char board_assembly[16]; 597 char board_assembly[16];
578 char board_tracer[16]; 598 char board_tracer[16];
@@ -600,6 +620,10 @@ typedef struct _MPT_ADAPTER
600 int reply_depth; /* Num Allocated reply frames */ 620 int reply_depth; /* Num Allocated reply frames */
601 int reply_sz; /* Reply frame size */ 621 int reply_sz; /* Reply frame size */
602 int num_chain; /* Number of chain buffers */ 622 int num_chain; /* Number of chain buffers */
623 MPT_ADD_SGE add_sge; /* Pointer to add_sge
624 function */
625 MPT_ADD_CHAIN add_chain; /* Pointer to add_chain
626 function */
603 /* Pool of buffers for chaining. ReqToChain 627 /* Pool of buffers for chaining. ReqToChain
604 * and ChainToChain track index of chain buffers. 628 * and ChainToChain track index of chain buffers.
605 * ChainBuffer (DMA) virt/phys addresses. 629 * ChainBuffer (DMA) virt/phys addresses.
@@ -640,11 +664,8 @@ typedef struct _MPT_ADAPTER
640 RaidCfgData raid_data; /* Raid config. data */ 664 RaidCfgData raid_data; /* Raid config. data */
641 SasCfgData sas_data; /* Sas config. data */ 665 SasCfgData sas_data; /* Sas config. data */
642 FcCfgData fc_data; /* Fc config. data */ 666 FcCfgData fc_data; /* Fc config. data */
643 MPT_IOCTL *ioctl; /* ioctl data pointer */
644 struct proc_dir_entry *ioc_dentry; 667 struct proc_dir_entry *ioc_dentry;
645 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ 668 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
646 spinlock_t diagLock; /* diagnostic reset lock */
647 int diagPending;
648 u32 biosVersion; /* BIOS version from IO Unit Page 2 */ 669 u32 biosVersion; /* BIOS version from IO Unit Page 2 */
649 int eventTypes; /* Event logging parameters */ 670 int eventTypes; /* Event logging parameters */
650 int eventContext; /* Next event context */ 671 int eventContext; /* Next event context */
@@ -652,7 +673,6 @@ typedef struct _MPT_ADAPTER
652 struct _mpt_ioctl_events *events; /* pointer to event log */ 673 struct _mpt_ioctl_events *events; /* pointer to event log */
653 u8 *cached_fw; /* Pointer to FW */ 674 u8 *cached_fw; /* Pointer to FW */
654 dma_addr_t cached_fw_dma; 675 dma_addr_t cached_fw_dma;
655 struct list_head configQ; /* linked list of config. requests */
656 int hs_reply_idx; 676 int hs_reply_idx;
657#ifndef MFCNT 677#ifndef MFCNT
658 u32 pad0; 678 u32 pad0;
@@ -665,9 +685,6 @@ typedef struct _MPT_ADAPTER
665 IOCFactsReply_t facts; 685 IOCFactsReply_t facts;
666 PortFactsReply_t pfacts[2]; 686 PortFactsReply_t pfacts[2];
667 FCPortPage0_t fc_port_page0[2]; 687 FCPortPage0_t fc_port_page0[2];
668 struct timer_list persist_timer; /* persist table timer */
669 int persist_wait_done; /* persist completion flag */
670 u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
671 LANPage0_t lan_cnfg_page0; 688 LANPage0_t lan_cnfg_page0;
672 LANPage1_t lan_cnfg_page1; 689 LANPage1_t lan_cnfg_page1;
673 690
@@ -682,23 +699,44 @@ typedef struct _MPT_ADAPTER
682 int aen_event_read_flag; /* flag to indicate event log was read*/ 699 int aen_event_read_flag; /* flag to indicate event log was read*/
683 u8 FirstWhoInit; 700 u8 FirstWhoInit;
684 u8 upload_fw; /* If set, do a fw upload */ 701 u8 upload_fw; /* If set, do a fw upload */
685 u8 reload_fw; /* Force a FW Reload on next reset */
686 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */ 702 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
687 u8 pad1[4]; 703 u8 pad1[4];
688 u8 DoneCtx; 704 u8 DoneCtx;
689 u8 TaskCtx; 705 u8 TaskCtx;
690 u8 InternalCtx; 706 u8 InternalCtx;
691 spinlock_t initializing_hba_lock;
692 int initializing_hba_lock_flag;
693 struct list_head list; 707 struct list_head list;
694 struct net_device *netdev; 708 struct net_device *netdev;
695 struct list_head sas_topology; 709 struct list_head sas_topology;
696 struct mutex sas_topology_mutex; 710 struct mutex sas_topology_mutex;
711
712 struct workqueue_struct *fw_event_q;
713 struct list_head fw_event_list;
714 spinlock_t fw_event_lock;
715 u8 fw_events_off; /* if '1', then ignore events */
716 char fw_event_q_name[MPT_KOBJ_NAME_LEN];
717
697 struct mutex sas_discovery_mutex; 718 struct mutex sas_discovery_mutex;
698 u8 sas_discovery_runtime; 719 u8 sas_discovery_runtime;
699 u8 sas_discovery_ignore_events; 720 u8 sas_discovery_ignore_events;
721
722 /* port_info object for the host */
723 struct mptsas_portinfo *hba_port_info;
724 u64 hba_port_sas_addr;
725 u16 hba_port_num_phy;
726 struct list_head sas_device_info_list;
727 struct mutex sas_device_info_mutex;
728 u8 old_sas_discovery_protocal;
729 u8 sas_discovery_quiesce_io;
700 int sas_index; /* index refrencing */ 730 int sas_index; /* index refrencing */
701 MPT_SAS_MGMT sas_mgmt; 731 MPT_MGMT sas_mgmt;
732 MPT_MGMT mptbase_cmds; /* for sending config pages */
733 MPT_MGMT internal_cmds;
734 MPT_MGMT taskmgmt_cmds;
735 MPT_MGMT ioctl_cmds;
736 spinlock_t taskmgmt_lock; /* diagnostic reset lock */
737 int taskmgmt_in_progress;
738 u8 taskmgmt_quiesce_io;
739 u8 ioc_reset_in_progress;
702 struct work_struct sas_persist_task; 740 struct work_struct sas_persist_task;
703 741
704 struct work_struct fc_setup_reset_work; 742 struct work_struct fc_setup_reset_work;
@@ -707,15 +745,27 @@ typedef struct _MPT_ADAPTER
707 u8 fc_link_speed[2]; 745 u8 fc_link_speed[2];
708 spinlock_t fc_rescan_work_lock; 746 spinlock_t fc_rescan_work_lock;
709 struct work_struct fc_rescan_work; 747 struct work_struct fc_rescan_work;
710 char fc_rescan_work_q_name[20]; 748 char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
711 struct workqueue_struct *fc_rescan_work_q; 749 struct workqueue_struct *fc_rescan_work_q;
750
751 /* driver forced bus resets count */
752 unsigned long hard_resets;
753 /* fw/external bus resets count */
754 unsigned long soft_resets;
755 /* cmd timeouts */
756 unsigned long timeouts;
757
712 struct scsi_cmnd **ScsiLookup; 758 struct scsi_cmnd **ScsiLookup;
713 spinlock_t scsi_lookup_lock; 759 spinlock_t scsi_lookup_lock;
714 760 u64 dma_mask;
715 char reset_work_q_name[20]; 761 u32 broadcast_aen_busy;
762 char reset_work_q_name[MPT_KOBJ_NAME_LEN];
716 struct workqueue_struct *reset_work_q; 763 struct workqueue_struct *reset_work_q;
717 struct delayed_work fault_reset_work; 764 struct delayed_work fault_reset_work;
718 spinlock_t fault_reset_work_lock; 765
766 u8 sg_addr_size;
767 u8 in_rescan;
768 u8 SGE_size;
719 769
720} MPT_ADAPTER; 770} MPT_ADAPTER;
721 771
@@ -753,13 +803,14 @@ typedef struct _mpt_sge {
753 dma_addr_t Address; 803 dma_addr_t Address;
754} MptSge_t; 804} MptSge_t;
755 805
756#define mpt_addr_size() \
757 ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \
758 MPI_SGE_FLAGS_32_BIT_ADDRESSING)
759 806
760#define mpt_msg_flags() \ 807#define mpt_msg_flags(ioc) \
761 ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \ 808 (ioc->sg_addr_size == sizeof(u64)) ? \
762 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32) 809 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
810 MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32
811
812#define MPT_SGE_FLAGS_64_BIT_ADDRESSING \
813 (MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT)
763 814
764/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 815/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
765/* 816/*
@@ -835,22 +886,14 @@ typedef struct _MPT_SCSI_HOST {
835 /* Pool of memory for holding SCpnts before doing 886 /* Pool of memory for holding SCpnts before doing
836 * OS callbacks. freeQ is the free pool. 887 * OS callbacks. freeQ is the free pool.
837 */ 888 */
838 u8 tmPending;
839 u8 resetPending;
840 u8 negoNvram; /* DV disabled, nego NVRAM */ 889 u8 negoNvram; /* DV disabled, nego NVRAM */
841 u8 pad1; 890 u8 pad1;
842 u8 tmState;
843 u8 rsvd[2]; 891 u8 rsvd[2];
844 MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */ 892 MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */
845 struct scsi_cmnd *abortSCpnt; 893 struct scsi_cmnd *abortSCpnt;
846 MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */ 894 MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */
847 unsigned long hard_resets; /* driver forced bus resets count */
848 unsigned long soft_resets; /* fw/external bus resets count */
849 unsigned long timeouts; /* cmd timeouts */
850 ushort sel_timeout[MPT_MAX_FC_DEVICES]; 895 ushort sel_timeout[MPT_MAX_FC_DEVICES];
851 char *info_kbuf; 896 char *info_kbuf;
852 wait_queue_head_t scandv_waitq;
853 int scandv_wait_done;
854 long last_queue_full; 897 long last_queue_full;
855 u16 tm_iocstatus; 898 u16 tm_iocstatus;
856 u16 spi_pending; 899 u16 spi_pending;
@@ -870,21 +913,16 @@ struct scsi_cmnd;
870 * Generic structure passed to the base mpt_config function. 913 * Generic structure passed to the base mpt_config function.
871 */ 914 */
872typedef struct _x_config_parms { 915typedef struct _x_config_parms {
873 struct list_head linkage; /* linked list */
874 struct timer_list timer; /* timer function for this request */
875 union { 916 union {
876 ConfigExtendedPageHeader_t *ehdr; 917 ConfigExtendedPageHeader_t *ehdr;
877 ConfigPageHeader_t *hdr; 918 ConfigPageHeader_t *hdr;
878 } cfghdr; 919 } cfghdr;
879 dma_addr_t physAddr; 920 dma_addr_t physAddr;
880 int wait_done; /* wait for this request */
881 u32 pageAddr; /* properly formatted */ 921 u32 pageAddr; /* properly formatted */
922 u16 status;
882 u8 action; 923 u8 action;
883 u8 dir; 924 u8 dir;
884 u8 timeout; /* seconds */ 925 u8 timeout; /* seconds */
885 u8 pad1;
886 u16 status;
887 u16 pad2;
888} CONFIGPARMS; 926} CONFIGPARMS;
889 927
890/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 928/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -909,7 +947,6 @@ extern MPT_FRAME_HDR *mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc);
909extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 947extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
910extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 948extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
911extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); 949extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
912extern void mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr);
913 950
914extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag); 951extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
915extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp); 952extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
@@ -922,6 +959,12 @@ extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
922extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 959extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
923extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); 960extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
924extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk); 961extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
962extern int mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
963 pRaidPhysDiskPage1_t phys_disk);
964extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc,
965 u8 phys_disk_num);
966extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
967extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
925extern void mpt_halt_firmware(MPT_ADAPTER *ioc); 968extern void mpt_halt_firmware(MPT_ADAPTER *ioc);
926 969
927 970
@@ -959,7 +1002,6 @@ extern int mpt_fwfault_debug;
959#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000) 1002#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000)
960#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000) 1003#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000)
961#define MPT_SGE_FLAGS_DIRECTION (0x04000000) 1004#define MPT_SGE_FLAGS_DIRECTION (0x04000000)
962#define MPT_SGE_FLAGS_ADDRESSING (mpt_addr_size() << MPI_SGE_FLAGS_SHIFT)
963#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000) 1005#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000)
964 1006
965#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000) 1007#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000)
@@ -972,14 +1014,12 @@ extern int mpt_fwfault_debug;
972 MPT_SGE_FLAGS_END_OF_BUFFER | \ 1014 MPT_SGE_FLAGS_END_OF_BUFFER | \
973 MPT_SGE_FLAGS_END_OF_LIST | \ 1015 MPT_SGE_FLAGS_END_OF_LIST | \
974 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ 1016 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
975 MPT_SGE_FLAGS_ADDRESSING | \
976 MPT_TRANSFER_IOC_TO_HOST) 1017 MPT_TRANSFER_IOC_TO_HOST)
977#define MPT_SGE_FLAGS_SSIMPLE_WRITE \ 1018#define MPT_SGE_FLAGS_SSIMPLE_WRITE \
978 (MPT_SGE_FLAGS_LAST_ELEMENT | \ 1019 (MPT_SGE_FLAGS_LAST_ELEMENT | \
979 MPT_SGE_FLAGS_END_OF_BUFFER | \ 1020 MPT_SGE_FLAGS_END_OF_BUFFER | \
980 MPT_SGE_FLAGS_END_OF_LIST | \ 1021 MPT_SGE_FLAGS_END_OF_LIST | \
981 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ 1022 MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
982 MPT_SGE_FLAGS_ADDRESSING | \
983 MPT_TRANSFER_HOST_TO_IOC) 1023 MPT_TRANSFER_HOST_TO_IOC)
984 1024
985/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1025/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index c63817117c0..9b2e2198aee 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -84,6 +84,7 @@ MODULE_VERSION(my_VERSION);
84/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 84/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
85 85
86static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS; 86static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS;
87static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS;
87 88
88static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait ); 89static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait );
89 90
@@ -127,10 +128,7 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags
127 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); 128 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
128static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, 129static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
129 struct buflist *buflist, MPT_ADAPTER *ioc); 130 struct buflist *buflist, MPT_ADAPTER *ioc);
130static void mptctl_timeout_expired (MPT_IOCTL *ioctl); 131static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function);
131static int mptctl_bus_reset(MPT_IOCTL *ioctl);
132static int mptctl_set_tm_flags(MPT_SCSI_HOST *hd);
133static void mptctl_free_tm_flags(MPT_ADAPTER *ioc);
134 132
135/* 133/*
136 * Reset Handler cleanup function 134 * Reset Handler cleanup function
@@ -183,10 +181,10 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
183 int rc = 0; 181 int rc = 0;
184 182
185 if (nonblock) { 183 if (nonblock) {
186 if (!mutex_trylock(&ioc->ioctl->ioctl_mutex)) 184 if (!mutex_trylock(&ioc->ioctl_cmds.mutex))
187 rc = -EAGAIN; 185 rc = -EAGAIN;
188 } else { 186 } else {
189 if (mutex_lock_interruptible(&ioc->ioctl->ioctl_mutex)) 187 if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex))
190 rc = -ERESTARTSYS; 188 rc = -ERESTARTSYS;
191 } 189 }
192 return rc; 190 return rc;
@@ -202,99 +200,78 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
202static int 200static int
203mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) 201mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
204{ 202{
205 char *sense_data; 203 char *sense_data;
206 int sz, req_index; 204 int req_index;
207 u16 iocStatus; 205 int sz;
208 u8 cmd;
209 206
210 if (req) 207 if (!req)
211 cmd = req->u.hdr.Function; 208 return 0;
212 else
213 return 1;
214 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tcompleting mpi function (0x%02X), req=%p, "
215 "reply=%p\n", ioc->name, req->u.hdr.Function, req, reply));
216
217 if (ioc->ioctl) {
218
219 if (reply==NULL) {
220
221 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_reply() NULL Reply "
222 "Function=%x!\n", ioc->name, cmd));
223 209
224 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; 210 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function "
225 ioc->ioctl->reset &= ~MPTCTL_RESET_OK; 211 "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function,
212 req, reply));
226 213
227 /* We are done, issue wake up 214 /*
228 */ 215 * Handling continuation of the same reply. Processing the first
229 ioc->ioctl->wait_done = 1; 216 * reply, and eating the other replys that come later.
230 wake_up (&mptctl_wait); 217 */
231 return 1; 218 if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext)
219 goto out_continuation;
232 220
233 } 221 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
234 222
235 /* Copy the reply frame (which much exist 223 if (!reply)
236 * for non-SCSI I/O) to the IOC structure. 224 goto out;
237 */
238 memcpy(ioc->ioctl->ReplyFrame, reply,
239 min(ioc->reply_sz, 4*reply->u.reply.MsgLength));
240 ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID;
241 225
242 /* Set the command status to GOOD if IOC Status is GOOD 226 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
243 * OR if SCSI I/O cmd and data underrun or recovered error. 227 sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength);
244 */ 228 memcpy(ioc->ioctl_cmds.reply, reply, sz);
245 iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK;
246 if (iocStatus == MPI_IOCSTATUS_SUCCESS)
247 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
248
249 if (iocStatus || reply->u.reply.IOCLogInfo)
250 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tiocstatus (0x%04X), "
251 "loginfo (0x%08X)\n", ioc->name,
252 iocStatus,
253 le32_to_cpu(reply->u.reply.IOCLogInfo)));
254
255 if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
256 (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
257
258 if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
259 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
260 "\tscsi_status (0x%02x), scsi_state (0x%02x), "
261 "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
262 reply->u.sreply.SCSIStatus,
263 reply->u.sreply.SCSIState,
264 le16_to_cpu(reply->u.sreply.TaskTag),
265 le32_to_cpu(reply->u.sreply.TransferCount)));
266
267 ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
268
269 if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) ||
270 (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) {
271 ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
272 }
273 }
274 229
275 /* Copy the sense data - if present 230 if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo)
276 */ 231 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
277 if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) && 232 "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name,
278 (reply->u.sreply.SCSIState & 233 le16_to_cpu(reply->u.reply.IOCStatus),
279 MPI_SCSI_STATE_AUTOSENSE_VALID)){ 234 le32_to_cpu(reply->u.reply.IOCLogInfo)));
235
236 if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
237 (req->u.hdr.Function ==
238 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
239
240 if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
241 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
242 "scsi_status (0x%02x), scsi_state (0x%02x), "
243 "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
244 reply->u.sreply.SCSIStatus,
245 reply->u.sreply.SCSIState,
246 le16_to_cpu(reply->u.sreply.TaskTag),
247 le32_to_cpu(reply->u.sreply.TransferCount)));
248
249 if (reply->u.sreply.SCSIState &
250 MPI_SCSI_STATE_AUTOSENSE_VALID) {
280 sz = req->u.scsireq.SenseBufferLength; 251 sz = req->u.scsireq.SenseBufferLength;
281 req_index = 252 req_index =
282 le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); 253 le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
283 sense_data = 254 sense_data = ((u8 *)ioc->sense_buf_pool +
284 ((u8 *)ioc->sense_buf_pool +
285 (req_index * MPT_SENSE_BUFFER_ALLOC)); 255 (req_index * MPT_SENSE_BUFFER_ALLOC));
286 memcpy(ioc->ioctl->sense, sense_data, sz); 256 memcpy(ioc->ioctl_cmds.sense, sense_data, sz);
287 ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID; 257 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID;
288 } 258 }
259 }
289 260
290 if (cmd == MPI_FUNCTION_SCSI_TASK_MGMT) 261 out:
291 mptctl_free_tm_flags(ioc); 262 /* We are done, issue wake up
292 263 */
293 /* We are done, issue wake up 264 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
294 */ 265 if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT)
295 ioc->ioctl->wait_done = 1; 266 mpt_clear_taskmgmt_in_progress_flag(ioc);
296 wake_up (&mptctl_wait); 267 ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
268 complete(&ioc->ioctl_cmds.done);
297 } 269 }
270
271 out_continuation:
272 if (reply && (reply->u.reply.MsgFlags &
273 MPI_MSGFLAGS_CONTINUATION_REPLY))
274 return 0;
298 return 1; 275 return 1;
299} 276}
300 277
@@ -304,30 +281,66 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
304 * Expecting an interrupt, however timed out. 281 * Expecting an interrupt, however timed out.
305 * 282 *
306 */ 283 */
307static void mptctl_timeout_expired (MPT_IOCTL *ioctl) 284static void
285mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
308{ 286{
309 int rc = 1; 287 unsigned long flags;
310 288
311 if (ioctl == NULL) 289 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
312 return; 290 ioc->name, __func__));
313 dctlprintk(ioctl->ioc,
314 printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
315 ioctl->ioc->name, ioctl->ioc->id));
316 291
317 ioctl->wait_done = 0; 292 if (mpt_fwfault_debug)
318 if (ioctl->reset & MPTCTL_RESET_OK) 293 mpt_halt_firmware(ioc);
319 rc = mptctl_bus_reset(ioctl);
320 294
321 if (rc) { 295 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
322 /* Issue a reset for this device. 296 if (ioc->ioc_reset_in_progress) {
323 * The IOC is not responding. 297 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
324 */ 298 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
325 dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", 299 mpt_free_msg_frame(ioc, mf);
326 ioctl->ioc->name)); 300 return;
327 mpt_HardResetHandler(ioctl->ioc, CAN_SLEEP);
328 } 301 }
329 return; 302 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
303
330 304
305 if (!mptctl_bus_reset(ioc, mf->u.hdr.Function))
306 return;
307
308 /* Issue a reset for this device.
309 * The IOC is not responding.
310 */
311 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
312 ioc->name));
313 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
314 mpt_HardResetHandler(ioc, CAN_SLEEP);
315 mpt_free_msg_frame(ioc, mf);
316}
317
318static int
319mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
320{
321 if (!mf)
322 return 0;
323
324 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
325 "TaskMgmt completed (mf=%p, mr=%p)\n",
326 ioc->name, mf, mr));
327
328 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
329
330 if (!mr)
331 goto out;
332
333 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
334 memcpy(ioc->taskmgmt_cmds.reply, mr,
335 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
336 out:
337 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
338 mpt_clear_taskmgmt_in_progress_flag(ioc);
339 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
340 complete(&ioc->taskmgmt_cmds.done);
341 return 1;
342 }
343 return 0;
331} 344}
332 345
333/* mptctl_bus_reset 346/* mptctl_bus_reset
@@ -335,133 +348,150 @@ static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
335 * Bus reset code. 348 * Bus reset code.
336 * 349 *
337 */ 350 */
338static int mptctl_bus_reset(MPT_IOCTL *ioctl) 351static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
339{ 352{
340 MPT_FRAME_HDR *mf; 353 MPT_FRAME_HDR *mf;
341 SCSITaskMgmt_t *pScsiTm; 354 SCSITaskMgmt_t *pScsiTm;
342 MPT_SCSI_HOST *hd; 355 SCSITaskMgmtReply_t *pScsiTmReply;
343 int ii; 356 int ii;
344 int retval=0; 357 int retval;
345 358 unsigned long timeout;
346 359 unsigned long time_count;
347 ioctl->reset &= ~MPTCTL_RESET_OK; 360 u16 iocstatus;
348 361
349 if (ioctl->ioc->sh == NULL) 362 /* bus reset is only good for SCSI IO, RAID PASSTHRU */
363 if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) ||
364 (function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
365 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
366 "TaskMgmt, not SCSI_IO!!\n", ioc->name));
350 return -EPERM; 367 return -EPERM;
368 }
351 369
352 hd = shost_priv(ioctl->ioc->sh); 370 mutex_lock(&ioc->taskmgmt_cmds.mutex);
353 if (hd == NULL) 371 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
372 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
354 return -EPERM; 373 return -EPERM;
374 }
355 375
356 /* Single threading .... 376 retval = 0;
357 */
358 if (mptctl_set_tm_flags(hd) != 0)
359 return -EPERM;
360 377
361 /* Send request 378 /* Send request
362 */ 379 */
363 if ((mf = mpt_get_msg_frame(mptctl_id, ioctl->ioc)) == NULL) { 380 mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
364 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt, no msg frames!!\n", 381 if (mf == NULL) {
365 ioctl->ioc->name)); 382 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
366 383 "TaskMgmt, no msg frames!!\n", ioc->name));
367 mptctl_free_tm_flags(ioctl->ioc); 384 mpt_clear_taskmgmt_in_progress_flag(ioc);
368 return -ENOMEM; 385 retval = -ENOMEM;
386 goto mptctl_bus_reset_done;
369 } 387 }
370 388
371 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", 389 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
372 ioctl->ioc->name, mf)); 390 ioc->name, mf));
373 391
374 pScsiTm = (SCSITaskMgmt_t *) mf; 392 pScsiTm = (SCSITaskMgmt_t *) mf;
375 pScsiTm->TargetID = ioctl->id; 393 memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
376 pScsiTm->Bus = hd->port; /* 0 */
377 pScsiTm->ChainOffset = 0;
378 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 394 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
379 pScsiTm->Reserved = 0;
380 pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; 395 pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
381 pScsiTm->Reserved1 = 0;
382 pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; 396 pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
383 397 pScsiTm->TargetID = 0;
398 pScsiTm->Bus = 0;
399 pScsiTm->ChainOffset = 0;
400 pScsiTm->Reserved = 0;
401 pScsiTm->Reserved1 = 0;
402 pScsiTm->TaskMsgContext = 0;
384 for (ii= 0; ii < 8; ii++) 403 for (ii= 0; ii < 8; ii++)
385 pScsiTm->LUN[ii] = 0; 404 pScsiTm->LUN[ii] = 0;
386
387 for (ii=0; ii < 7; ii++) 405 for (ii=0; ii < 7; ii++)
388 pScsiTm->Reserved2[ii] = 0; 406 pScsiTm->Reserved2[ii] = 0;
389 407
390 pScsiTm->TaskMsgContext = 0; 408 switch (ioc->bus_type) {
391 dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT 409 case FC:
392 "mptctl_bus_reset: issued.\n", ioctl->ioc->name)); 410 timeout = 40;
393 411 break;
394 DBG_DUMP_TM_REQUEST_FRAME(ioctl->ioc, (u32 *)mf); 412 case SAS:
413 timeout = 30;
414 break;
415 case SPI:
416 default:
417 timeout = 2;
418 break;
419 }
395 420
396 ioctl->wait_done=0; 421 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
422 "TaskMgmt type=%d timeout=%ld\n",
423 ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout));
397 424
398 if ((ioctl->ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 425 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
399 (ioctl->ioc->facts.MsgVersion >= MPI_VERSION_01_05)) 426 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
400 mpt_put_msg_frame_hi_pri(mptctl_id, ioctl->ioc, mf); 427 time_count = jiffies;
428 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
429 (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
430 mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf);
401 else { 431 else {
402 retval = mpt_send_handshake_request(mptctl_id, ioctl->ioc, 432 retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
403 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); 433 sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
404 if (retval != 0) { 434 if (retval != 0) {
405 dfailprintk(ioctl->ioc, printk(MYIOC_s_ERR_FMT "_send_handshake FAILED!" 435 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
406 " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd, 436 "TaskMgmt send_handshake FAILED!"
407 hd->ioc, mf)); 437 " (ioc %p, mf %p, rc=%d) \n", ioc->name,
438 ioc, mf, retval));
439 mpt_clear_taskmgmt_in_progress_flag(ioc);
408 goto mptctl_bus_reset_done; 440 goto mptctl_bus_reset_done;
409 } 441 }
410 } 442 }
411 443
412 /* Now wait for the command to complete */ 444 /* Now wait for the command to complete */
413 ii = wait_event_timeout(mptctl_wait, 445 ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
414 ioctl->wait_done == 1, 446 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
415 HZ*5 /* 5 second timeout */); 447 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
448 "TaskMgmt failed\n", ioc->name));
449 mpt_free_msg_frame(ioc, mf);
450 mpt_clear_taskmgmt_in_progress_flag(ioc);
451 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
452 retval = 0;
453 else
454 retval = -1; /* return failure */
455 goto mptctl_bus_reset_done;
456 }
416 457
417 if(ii <=0 && (ioctl->wait_done != 1 )) { 458 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
418 mpt_free_msg_frame(hd->ioc, mf); 459 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
419 ioctl->wait_done = 0; 460 "TaskMgmt failed\n", ioc->name));
461 retval = -1; /* return failure */
462 goto mptctl_bus_reset_done;
463 }
464
465 pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
466 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
467 "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
468 "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
469 "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
470 pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
471 le16_to_cpu(pScsiTmReply->IOCStatus),
472 le32_to_cpu(pScsiTmReply->IOCLogInfo),
473 pScsiTmReply->ResponseCode,
474 le32_to_cpu(pScsiTmReply->TerminationCount)));
475
476 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
477
478 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
479 iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED ||
480 iocstatus == MPI_IOCSTATUS_SUCCESS)
481 retval = 0;
482 else {
483 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
484 "TaskMgmt failed\n", ioc->name));
420 retval = -1; /* return failure */ 485 retval = -1; /* return failure */
421 } 486 }
422 487
423mptctl_bus_reset_done:
424 488
425 mptctl_free_tm_flags(ioctl->ioc); 489 mptctl_bus_reset_done:
490 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
491 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
426 return retval; 492 return retval;
427} 493}
428 494
429static int
430mptctl_set_tm_flags(MPT_SCSI_HOST *hd) {
431 unsigned long flags;
432
433 spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
434
435 if (hd->tmState == TM_STATE_NONE) {
436 hd->tmState = TM_STATE_IN_PROGRESS;
437 hd->tmPending = 1;
438 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
439 } else {
440 spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
441 return -EBUSY;
442 }
443
444 return 0;
445}
446
447static void
448mptctl_free_tm_flags(MPT_ADAPTER *ioc)
449{
450 MPT_SCSI_HOST * hd;
451 unsigned long flags;
452
453 hd = shost_priv(ioc->sh);
454 if (hd == NULL)
455 return;
456
457 spin_lock_irqsave(&ioc->FreeQlock, flags);
458
459 hd->tmState = TM_STATE_NONE;
460 hd->tmPending = 0;
461 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
462
463 return;
464}
465 495
466/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 496/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
467/* mptctl_ioc_reset 497/* mptctl_ioc_reset
@@ -473,22 +503,23 @@ mptctl_free_tm_flags(MPT_ADAPTER *ioc)
473static int 503static int
474mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 504mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
475{ 505{
476 MPT_IOCTL *ioctl = ioc->ioctl;
477 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC %s_reset routed to IOCTL driver!\n", ioc->name,
478 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
479 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
480
481 if(ioctl == NULL)
482 return 1;
483
484 switch(reset_phase) { 506 switch(reset_phase) {
485 case MPT_IOC_SETUP_RESET: 507 case MPT_IOC_SETUP_RESET:
486 ioctl->status |= MPT_IOCTL_STATUS_DID_IOCRESET; 508 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
509 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
510 break;
511 case MPT_IOC_PRE_RESET:
512 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
513 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
487 break; 514 break;
488 case MPT_IOC_POST_RESET: 515 case MPT_IOC_POST_RESET:
489 ioctl->status &= ~MPT_IOCTL_STATUS_DID_IOCRESET; 516 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
517 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
518 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
519 ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET;
520 complete(&ioc->ioctl_cmds.done);
521 }
490 break; 522 break;
491 case MPT_IOC_PRE_RESET:
492 default: 523 default:
493 break; 524 break;
494 } 525 }
@@ -642,7 +673,7 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
642 else 673 else
643 ret = -EINVAL; 674 ret = -EINVAL;
644 675
645 mutex_unlock(&iocp->ioctl->ioctl_mutex); 676 mutex_unlock(&iocp->ioctl_cmds.mutex);
646 677
647 return ret; 678 return ret;
648} 679}
@@ -758,6 +789,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
758 int sge_offset = 0; 789 int sge_offset = 0;
759 u16 iocstat; 790 u16 iocstat;
760 pFWDownloadReply_t ReplyMsg = NULL; 791 pFWDownloadReply_t ReplyMsg = NULL;
792 unsigned long timeleft;
761 793
762 if (mpt_verify_adapter(ioc, &iocp) < 0) { 794 if (mpt_verify_adapter(ioc, &iocp) < 0) {
763 printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", 795 printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
@@ -841,8 +873,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
841 * 96 8 873 * 96 8
842 * 64 4 874 * 64 4
843 */ 875 */
844 maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) 876 maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) -
845 / (sizeof(dma_addr_t) + sizeof(u32)); 877 sizeof(FWDownloadTCSGE_t))
878 / iocp->SGE_size;
846 if (numfrags > maxfrags) { 879 if (numfrags > maxfrags) {
847 ret = -EMLINK; 880 ret = -EMLINK;
848 goto fwdl_out; 881 goto fwdl_out;
@@ -870,7 +903,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
870 if (nib == 0 || nib == 3) { 903 if (nib == 0 || nib == 3) {
871 ; 904 ;
872 } else if (sgIn->Address) { 905 } else if (sgIn->Address) {
873 mpt_add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); 906 iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
874 n++; 907 n++;
875 if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { 908 if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) {
876 printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " 909 printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - "
@@ -882,7 +915,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
882 } 915 }
883 sgIn++; 916 sgIn++;
884 bl++; 917 bl++;
885 sgOut += (sizeof(dma_addr_t) + sizeof(u32)); 918 sgOut += iocp->SGE_size;
886 } 919 }
887 920
888 DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags); 921 DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags);
@@ -891,16 +924,30 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
891 * Finally, perform firmware download. 924 * Finally, perform firmware download.
892 */ 925 */
893 ReplyMsg = NULL; 926 ReplyMsg = NULL;
927 SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext);
928 INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status)
894 mpt_put_msg_frame(mptctl_id, iocp, mf); 929 mpt_put_msg_frame(mptctl_id, iocp, mf);
895 930
896 /* Now wait for the command to complete */ 931 /* Now wait for the command to complete */
897 ret = wait_event_timeout(mptctl_wait, 932retry_wait:
898 iocp->ioctl->wait_done == 1, 933 timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60);
899 HZ*60); 934 if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
935 ret = -ETIME;
936 printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
937 if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
938 mpt_free_msg_frame(iocp, mf);
939 goto fwdl_out;
940 }
941 if (!timeleft)
942 mptctl_timeout_expired(iocp, mf);
943 else
944 goto retry_wait;
945 goto fwdl_out;
946 }
900 947
901 if(ret <=0 && (iocp->ioctl->wait_done != 1 )) { 948 if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
902 /* Now we need to reset the board */ 949 printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
903 mptctl_timeout_expired(iocp->ioctl); 950 mpt_free_msg_frame(iocp, mf);
904 ret = -ENODATA; 951 ret = -ENODATA;
905 goto fwdl_out; 952 goto fwdl_out;
906 } 953 }
@@ -908,7 +955,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
908 if (sgl) 955 if (sgl)
909 kfree_sgl(sgl, sgl_dma, buflist, iocp); 956 kfree_sgl(sgl, sgl_dma, buflist, iocp);
910 957
911 ReplyMsg = (pFWDownloadReply_t)iocp->ioctl->ReplyFrame; 958 ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply;
912 iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; 959 iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK;
913 if (iocstat == MPI_IOCSTATUS_SUCCESS) { 960 if (iocstat == MPI_IOCSTATUS_SUCCESS) {
914 printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name); 961 printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name);
@@ -932,6 +979,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
932 return 0; 979 return 0;
933 980
934fwdl_out: 981fwdl_out:
982
983 CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status);
984 SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0);
935 kfree_sgl(sgl, sgl_dma, buflist, iocp); 985 kfree_sgl(sgl, sgl_dma, buflist, iocp);
936 return ret; 986 return ret;
937} 987}
@@ -1003,7 +1053,7 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
1003 * 1053 *
1004 */ 1054 */
1005 sgl = sglbuf; 1055 sgl = sglbuf;
1006 sg_spill = ((ioc->req_sz - sge_offset)/(sizeof(dma_addr_t) + sizeof(u32))) - 1; 1056 sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1;
1007 while (bytes_allocd < bytes) { 1057 while (bytes_allocd < bytes) {
1008 this_alloc = min(alloc_sz, bytes-bytes_allocd); 1058 this_alloc = min(alloc_sz, bytes-bytes_allocd);
1009 buflist[buflist_ent].len = this_alloc; 1059 buflist[buflist_ent].len = this_alloc;
@@ -1024,8 +1074,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
1024 dma_addr_t dma_addr; 1074 dma_addr_t dma_addr;
1025 1075
1026 bytes_allocd += this_alloc; 1076 bytes_allocd += this_alloc;
1027 sgl->FlagsLength = (0x10000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|this_alloc); 1077 sgl->FlagsLength = (0x10000000|sgdir|this_alloc);
1028 dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir); 1078 dma_addr = pci_map_single(ioc->pcidev,
1079 buflist[buflist_ent].kptr, this_alloc, dir);
1029 sgl->Address = dma_addr; 1080 sgl->Address = dma_addr;
1030 1081
1031 fragcnt++; 1082 fragcnt++;
@@ -1771,7 +1822,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1771 int msgContext; 1822 int msgContext;
1772 u16 req_idx; 1823 u16 req_idx;
1773 ulong timeout; 1824 ulong timeout;
1825 unsigned long timeleft;
1774 struct scsi_device *sdev; 1826 struct scsi_device *sdev;
1827 unsigned long flags;
1828 u8 function;
1775 1829
1776 /* bufIn and bufOut are used for user to kernel space transfers 1830 /* bufIn and bufOut are used for user to kernel space transfers
1777 */ 1831 */
@@ -1784,24 +1838,23 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1784 __FILE__, __LINE__, iocnum); 1838 __FILE__, __LINE__, iocnum);
1785 return -ENODEV; 1839 return -ENODEV;
1786 } 1840 }
1787 if (!ioc->ioctl) { 1841
1788 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " 1842 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
1789 "No memory available during driver init.\n", 1843 if (ioc->ioc_reset_in_progress) {
1790 __FILE__, __LINE__); 1844 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1791 return -ENOMEM;
1792 } else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_IOCRESET) {
1793 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " 1845 printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - "
1794 "Busy with IOC Reset \n", __FILE__, __LINE__); 1846 "Busy with diagnostic reset\n", __FILE__, __LINE__);
1795 return -EBUSY; 1847 return -EBUSY;
1796 } 1848 }
1849 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1797 1850
1798 /* Verify that the final request frame will not be too large. 1851 /* Verify that the final request frame will not be too large.
1799 */ 1852 */
1800 sz = karg.dataSgeOffset * 4; 1853 sz = karg.dataSgeOffset * 4;
1801 if (karg.dataInSize > 0) 1854 if (karg.dataInSize > 0)
1802 sz += sizeof(dma_addr_t) + sizeof(u32); 1855 sz += ioc->SGE_size;
1803 if (karg.dataOutSize > 0) 1856 if (karg.dataOutSize > 0)
1804 sz += sizeof(dma_addr_t) + sizeof(u32); 1857 sz += ioc->SGE_size;
1805 1858
1806 if (sz > ioc->req_sz) { 1859 if (sz > ioc->req_sz) {
1807 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1860 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1827,10 +1880,12 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1827 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1880 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
1828 "Unable to read MF from mpt_ioctl_command struct @ %p\n", 1881 "Unable to read MF from mpt_ioctl_command struct @ %p\n",
1829 ioc->name, __FILE__, __LINE__, mfPtr); 1882 ioc->name, __FILE__, __LINE__, mfPtr);
1883 function = -1;
1830 rc = -EFAULT; 1884 rc = -EFAULT;
1831 goto done_free_mem; 1885 goto done_free_mem;
1832 } 1886 }
1833 hdr->MsgContext = cpu_to_le32(msgContext); 1887 hdr->MsgContext = cpu_to_le32(msgContext);
1888 function = hdr->Function;
1834 1889
1835 1890
1836 /* Verify that this request is allowed. 1891 /* Verify that this request is allowed.
@@ -1838,7 +1893,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1838 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", 1893 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n",
1839 ioc->name, hdr->Function, mf)); 1894 ioc->name, hdr->Function, mf));
1840 1895
1841 switch (hdr->Function) { 1896 switch (function) {
1842 case MPI_FUNCTION_IOC_FACTS: 1897 case MPI_FUNCTION_IOC_FACTS:
1843 case MPI_FUNCTION_PORT_FACTS: 1898 case MPI_FUNCTION_PORT_FACTS:
1844 karg.dataOutSize = karg.dataInSize = 0; 1899 karg.dataOutSize = karg.dataInSize = 0;
@@ -1893,7 +1948,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1893 } 1948 }
1894 1949
1895 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 1950 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
1896 pScsiReq->MsgFlags |= mpt_msg_flags(); 1951 pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
1897 1952
1898 1953
1899 /* verify that app has not requested 1954 /* verify that app has not requested
@@ -1935,8 +1990,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1935 pScsiReq->Control = cpu_to_le32(scsidir | qtag); 1990 pScsiReq->Control = cpu_to_le32(scsidir | qtag);
1936 pScsiReq->DataLength = cpu_to_le32(dataSize); 1991 pScsiReq->DataLength = cpu_to_le32(dataSize);
1937 1992
1938 ioc->ioctl->reset = MPTCTL_RESET_OK;
1939 ioc->ioctl->id = pScsiReq->TargetID;
1940 1993
1941 } else { 1994 } else {
1942 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 1995 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1979,7 +2032,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1979 int dataSize; 2032 int dataSize;
1980 2033
1981 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; 2034 pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
1982 pScsiReq->MsgFlags |= mpt_msg_flags(); 2035 pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
1983 2036
1984 2037
1985 /* verify that app has not requested 2038 /* verify that app has not requested
@@ -2014,8 +2067,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2014 pScsiReq->Control = cpu_to_le32(scsidir | qtag); 2067 pScsiReq->Control = cpu_to_le32(scsidir | qtag);
2015 pScsiReq->DataLength = cpu_to_le32(dataSize); 2068 pScsiReq->DataLength = cpu_to_le32(dataSize);
2016 2069
2017 ioc->ioctl->reset = MPTCTL_RESET_OK;
2018 ioc->ioctl->id = pScsiReq->TargetID;
2019 } else { 2070 } else {
2020 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2071 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
2021 "SCSI driver is not loaded. \n", 2072 "SCSI driver is not loaded. \n",
@@ -2026,20 +2077,17 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2026 break; 2077 break;
2027 2078
2028 case MPI_FUNCTION_SCSI_TASK_MGMT: 2079 case MPI_FUNCTION_SCSI_TASK_MGMT:
2029 { 2080 {
2030 MPT_SCSI_HOST *hd = NULL; 2081 SCSITaskMgmt_t *pScsiTm;
2031 if ((ioc->sh == NULL) || ((hd = shost_priv(ioc->sh)) == NULL)) { 2082 pScsiTm = (SCSITaskMgmt_t *)mf;
2032 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2083 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2033 "SCSI driver not loaded or SCSI host not found. \n", 2084 "\tTaskType=0x%x MsgFlags=0x%x "
2034 ioc->name, __FILE__, __LINE__); 2085 "TaskMsgContext=0x%x id=%d channel=%d\n",
2035 rc = -EFAULT; 2086 ioc->name, pScsiTm->TaskType, le32_to_cpu
2036 goto done_free_mem; 2087 (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags,
2037 } else if (mptctl_set_tm_flags(hd) != 0) { 2088 pScsiTm->TargetID, pScsiTm->Bus));
2038 rc = -EPERM;
2039 goto done_free_mem;
2040 }
2041 }
2042 break; 2089 break;
2090 }
2043 2091
2044 case MPI_FUNCTION_IOC_INIT: 2092 case MPI_FUNCTION_IOC_INIT:
2045 { 2093 {
@@ -2123,8 +2171,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2123 if (karg.dataInSize > 0) { 2171 if (karg.dataInSize > 0) {
2124 flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2172 flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2125 MPI_SGE_FLAGS_END_OF_BUFFER | 2173 MPI_SGE_FLAGS_END_OF_BUFFER |
2126 MPI_SGE_FLAGS_DIRECTION | 2174 MPI_SGE_FLAGS_DIRECTION)
2127 mpt_addr_size() )
2128 << MPI_SGE_FLAGS_SHIFT; 2175 << MPI_SGE_FLAGS_SHIFT;
2129 } else { 2176 } else {
2130 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; 2177 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
@@ -2141,8 +2188,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2141 /* Set up this SGE. 2188 /* Set up this SGE.
2142 * Copy to MF and to sglbuf 2189 * Copy to MF and to sglbuf
2143 */ 2190 */
2144 mpt_add_sge(psge, flagsLength, dma_addr_out); 2191 ioc->add_sge(psge, flagsLength, dma_addr_out);
2145 psge += (sizeof(u32) + sizeof(dma_addr_t)); 2192 psge += ioc->SGE_size;
2146 2193
2147 /* Copy user data to kernel space. 2194 /* Copy user data to kernel space.
2148 */ 2195 */
@@ -2175,18 +2222,25 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2175 /* Set up this SGE 2222 /* Set up this SGE
2176 * Copy to MF and to sglbuf 2223 * Copy to MF and to sglbuf
2177 */ 2224 */
2178 mpt_add_sge(psge, flagsLength, dma_addr_in); 2225 ioc->add_sge(psge, flagsLength, dma_addr_in);
2179 } 2226 }
2180 } 2227 }
2181 } else { 2228 } else {
2182 /* Add a NULL SGE 2229 /* Add a NULL SGE
2183 */ 2230 */
2184 mpt_add_sge(psge, flagsLength, (dma_addr_t) -1); 2231 ioc->add_sge(psge, flagsLength, (dma_addr_t) -1);
2185 } 2232 }
2186 2233
2187 ioc->ioctl->wait_done = 0; 2234 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext);
2235 INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
2188 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { 2236 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
2189 2237
2238 mutex_lock(&ioc->taskmgmt_cmds.mutex);
2239 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
2240 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2241 goto done_free_mem;
2242 }
2243
2190 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); 2244 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
2191 2245
2192 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 2246 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
@@ -2197,10 +2251,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2197 sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); 2251 sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP);
2198 if (rc != 0) { 2252 if (rc != 0) {
2199 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2253 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2200 "_send_handshake FAILED! (ioc %p, mf %p)\n", 2254 "send_handshake FAILED! (ioc %p, mf %p)\n",
2201 ioc->name, ioc, mf)); 2255 ioc->name, ioc, mf));
2202 mptctl_free_tm_flags(ioc); 2256 mpt_clear_taskmgmt_in_progress_flag(ioc);
2203 rc = -ENODATA; 2257 rc = -ENODATA;
2258 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2204 goto done_free_mem; 2259 goto done_free_mem;
2205 } 2260 }
2206 } 2261 }
@@ -2210,36 +2265,47 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2210 2265
2211 /* Now wait for the command to complete */ 2266 /* Now wait for the command to complete */
2212 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; 2267 timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
2213 timeout = wait_event_timeout(mptctl_wait, 2268retry_wait:
2214 ioc->ioctl->wait_done == 1, 2269 timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
2215 HZ*timeout); 2270 HZ*timeout);
2216 2271 if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
2217 if(timeout <=0 && (ioc->ioctl->wait_done != 1 )) { 2272 rc = -ETIME;
2218 /* Now we need to reset the board */ 2273 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n",
2219 2274 ioc->name, __func__));
2220 if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) 2275 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
2221 mptctl_free_tm_flags(ioc); 2276 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2222 2277 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2223 mptctl_timeout_expired(ioc->ioctl); 2278 goto done_free_mem;
2224 rc = -ENODATA; 2279 }
2280 if (!timeleft) {
2281 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2282 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2283 mptctl_timeout_expired(ioc, mf);
2284 mf = NULL;
2285 } else
2286 goto retry_wait;
2225 goto done_free_mem; 2287 goto done_free_mem;
2226 } 2288 }
2227 2289
2290 if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
2291 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
2292
2293
2228 mf = NULL; 2294 mf = NULL;
2229 2295
2230 /* If a valid reply frame, copy to the user. 2296 /* If a valid reply frame, copy to the user.
2231 * Offset 2: reply length in U32's 2297 * Offset 2: reply length in U32's
2232 */ 2298 */
2233 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) { 2299 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) {
2234 if (karg.maxReplyBytes < ioc->reply_sz) { 2300 if (karg.maxReplyBytes < ioc->reply_sz) {
2235 sz = min(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]); 2301 sz = min(karg.maxReplyBytes,
2302 4*ioc->ioctl_cmds.reply[2]);
2236 } else { 2303 } else {
2237 sz = min(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]); 2304 sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]);
2238 } 2305 }
2239
2240 if (sz > 0) { 2306 if (sz > 0) {
2241 if (copy_to_user(karg.replyFrameBufPtr, 2307 if (copy_to_user(karg.replyFrameBufPtr,
2242 &ioc->ioctl->ReplyFrame, sz)){ 2308 ioc->ioctl_cmds.reply, sz)){
2243 printk(MYIOC_s_ERR_FMT 2309 printk(MYIOC_s_ERR_FMT
2244 "%s@%d::mptctl_do_mpt_command - " 2310 "%s@%d::mptctl_do_mpt_command - "
2245 "Unable to write out reply frame %p\n", 2311 "Unable to write out reply frame %p\n",
@@ -2252,10 +2318,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2252 2318
2253 /* If valid sense data, copy to user. 2319 /* If valid sense data, copy to user.
2254 */ 2320 */
2255 if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) { 2321 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) {
2256 sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); 2322 sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE);
2257 if (sz > 0) { 2323 if (sz > 0) {
2258 if (copy_to_user(karg.senseDataPtr, ioc->ioctl->sense, sz)) { 2324 if (copy_to_user(karg.senseDataPtr,
2325 ioc->ioctl_cmds.sense, sz)) {
2259 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " 2326 printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
2260 "Unable to write sense data to user %p\n", 2327 "Unable to write sense data to user %p\n",
2261 ioc->name, __FILE__, __LINE__, 2328 ioc->name, __FILE__, __LINE__,
@@ -2269,7 +2336,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2269 /* If the overall status is _GOOD and data in, copy data 2336 /* If the overall status is _GOOD and data in, copy data
2270 * to user. 2337 * to user.
2271 */ 2338 */
2272 if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) && 2339 if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) &&
2273 (karg.dataInSize > 0) && (bufIn.kptr)) { 2340 (karg.dataInSize > 0) && (bufIn.kptr)) {
2274 2341
2275 if (copy_to_user(karg.dataInBufPtr, 2342 if (copy_to_user(karg.dataInBufPtr,
@@ -2284,9 +2351,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
2284 2351
2285done_free_mem: 2352done_free_mem:
2286 2353
2287 ioc->ioctl->status &= ~(MPT_IOCTL_STATUS_COMMAND_GOOD | 2354 CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
2288 MPT_IOCTL_STATUS_SENSE_VALID | 2355 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
2289 MPT_IOCTL_STATUS_RF_VALID );
2290 2356
2291 /* Free the allocated memory. 2357 /* Free the allocated memory.
2292 */ 2358 */
@@ -2336,6 +2402,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2336 ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; 2402 ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
2337 MPT_FRAME_HDR *mf = NULL; 2403 MPT_FRAME_HDR *mf = NULL;
2338 MPIHeader_t *mpi_hdr; 2404 MPIHeader_t *mpi_hdr;
2405 unsigned long timeleft;
2406 int retval;
2339 2407
2340 /* Reset long to int. Should affect IA64 and SPARC only 2408 /* Reset long to int. Should affect IA64 and SPARC only
2341 */ 2409 */
@@ -2466,9 +2534,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2466 MPT_SCSI_HOST *hd = shost_priv(ioc->sh); 2534 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
2467 2535
2468 if (hd && (cim_rev == 1)) { 2536 if (hd && (cim_rev == 1)) {
2469 karg.hard_resets = hd->hard_resets; 2537 karg.hard_resets = ioc->hard_resets;
2470 karg.soft_resets = hd->soft_resets; 2538 karg.soft_resets = ioc->soft_resets;
2471 karg.timeouts = hd->timeouts; 2539 karg.timeouts = ioc->timeouts;
2472 } 2540 }
2473 } 2541 }
2474 2542
@@ -2476,8 +2544,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2476 * Gather ISTWI(Industry Standard Two Wire Interface) Data 2544 * Gather ISTWI(Industry Standard Two Wire Interface) Data
2477 */ 2545 */
2478 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { 2546 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
2479 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 2547 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
2480 ioc->name,__func__)); 2548 "%s, no msg frames!!\n", ioc->name, __func__));
2481 goto out; 2549 goto out;
2482 } 2550 }
2483 2551
@@ -2498,22 +2566,29 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2498 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); 2566 pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
2499 if (!pbuf) 2567 if (!pbuf)
2500 goto out; 2568 goto out;
2501 mpt_add_sge((char *)&IstwiRWRequest->SGL, 2569 ioc->add_sge((char *)&IstwiRWRequest->SGL,
2502 (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); 2570 (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
2503 2571
2504 ioc->ioctl->wait_done = 0; 2572 retval = 0;
2573 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context,
2574 IstwiRWRequest->MsgContext);
2575 INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
2505 mpt_put_msg_frame(mptctl_id, ioc, mf); 2576 mpt_put_msg_frame(mptctl_id, ioc, mf);
2506 2577
2507 rc = wait_event_timeout(mptctl_wait, 2578retry_wait:
2508 ioc->ioctl->wait_done == 1, 2579 timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
2509 HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */); 2580 HZ*MPT_IOCTL_DEFAULT_TIMEOUT);
2510 2581 if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
2511 if(rc <=0 && (ioc->ioctl->wait_done != 1 )) { 2582 retval = -ETIME;
2512 /* 2583 printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__);
2513 * Now we need to reset the board 2584 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
2514 */ 2585 mpt_free_msg_frame(ioc, mf);
2515 mpt_free_msg_frame(ioc, mf); 2586 goto out;
2516 mptctl_timeout_expired(ioc->ioctl); 2587 }
2588 if (!timeleft)
2589 mptctl_timeout_expired(ioc, mf);
2590 else
2591 goto retry_wait;
2517 goto out; 2592 goto out;
2518 } 2593 }
2519 2594
@@ -2526,10 +2601,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2526 * bays have drives in them 2601 * bays have drives in them
2527 * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) 2602 * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3)
2528 */ 2603 */
2529 if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) 2604 if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)
2530 karg.rsvd = *(u32 *)pbuf; 2605 karg.rsvd = *(u32 *)pbuf;
2531 2606
2532 out: 2607 out:
2608 CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
2609 SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
2610
2533 if (pbuf) 2611 if (pbuf)
2534 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); 2612 pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
2535 2613
@@ -2753,7 +2831,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
2753 2831
2754 ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); 2832 ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
2755 2833
2756 mutex_unlock(&iocp->ioctl->ioctl_mutex); 2834 mutex_unlock(&iocp->ioctl_cmds.mutex);
2757 2835
2758 return ret; 2836 return ret;
2759} 2837}
@@ -2807,7 +2885,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
2807 */ 2885 */
2808 ret = mptctl_do_mpt_command (karg, &uarg->MF); 2886 ret = mptctl_do_mpt_command (karg, &uarg->MF);
2809 2887
2810 mutex_unlock(&iocp->ioctl->ioctl_mutex); 2888 mutex_unlock(&iocp->ioctl_cmds.mutex);
2811 2889
2812 return ret; 2890 return ret;
2813} 2891}
@@ -2859,21 +2937,10 @@ static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long a
2859static int 2937static int
2860mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2938mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2861{ 2939{
2862 MPT_IOCTL *mem;
2863 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 2940 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2864 2941
2865 /* 2942 mutex_init(&ioc->ioctl_cmds.mutex);
2866 * Allocate and inite a MPT_IOCTL structure 2943 init_completion(&ioc->ioctl_cmds.done);
2867 */
2868 mem = kzalloc(sizeof(MPT_IOCTL), GFP_KERNEL);
2869 if (!mem) {
2870 mptctl_remove(pdev);
2871 return -ENOMEM;
2872 }
2873
2874 ioc->ioctl = mem;
2875 ioc->ioctl->ioc = ioc;
2876 mutex_init(&ioc->ioctl->ioctl_mutex);
2877 return 0; 2944 return 0;
2878} 2945}
2879 2946
@@ -2887,9 +2954,6 @@ mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2887static void 2954static void
2888mptctl_remove(struct pci_dev *pdev) 2955mptctl_remove(struct pci_dev *pdev)
2889{ 2956{
2890 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
2891
2892 kfree ( ioc->ioctl );
2893} 2957}
2894 2958
2895static struct mpt_pci_driver mptctl_driver = { 2959static struct mpt_pci_driver mptctl_driver = {
@@ -2929,6 +2993,7 @@ static int __init mptctl_init(void)
2929 goto out_fail; 2993 goto out_fail;
2930 } 2994 }
2931 2995
2996 mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
2932 mpt_reset_register(mptctl_id, mptctl_ioc_reset); 2997 mpt_reset_register(mptctl_id, mptctl_ioc_reset);
2933 mpt_event_register(mptctl_id, mptctl_event_process); 2998 mpt_event_register(mptctl_id, mptctl_event_process);
2934 2999
@@ -2953,6 +3018,7 @@ static void mptctl_exit(void)
2953 3018
2954 /* De-register callback handler from base module */ 3019 /* De-register callback handler from base module */
2955 mpt_deregister(mptctl_id); 3020 mpt_deregister(mptctl_id);
3021 mpt_reset_deregister(mptctl_taskmgmt_id);
2956 3022
2957 mpt_device_driver_deregister(MPTCTL_DRIVER); 3023 mpt_device_driver_deregister(MPTCTL_DRIVER);
2958 3024
diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h
index 510b9f49209..28e47887928 100644
--- a/drivers/message/fusion/mptdebug.h
+++ b/drivers/message/fusion/mptdebug.h
@@ -58,6 +58,7 @@
58#define MPT_DEBUG_FC 0x00080000 58#define MPT_DEBUG_FC 0x00080000
59#define MPT_DEBUG_SAS 0x00100000 59#define MPT_DEBUG_SAS 0x00100000
60#define MPT_DEBUG_SAS_WIDE 0x00200000 60#define MPT_DEBUG_SAS_WIDE 0x00200000
61#define MPT_DEBUG_36GB_MEM 0x00400000
61 62
62/* 63/*
63 * CONFIG_FUSION_LOGGING - enabled in Kconfig 64 * CONFIG_FUSION_LOGGING - enabled in Kconfig
@@ -135,6 +136,8 @@
135#define dsaswideprintk(IOC, CMD) \ 136#define dsaswideprintk(IOC, CMD) \
136 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE) 137 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
137 138
139#define d36memprintk(IOC, CMD) \
140 MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM)
138 141
139 142
140/* 143/*
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index c3c24fdf9fb..e61df133a59 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1251,17 +1251,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1251 * A slightly different algorithm is required for 1251 * A slightly different algorithm is required for
1252 * 64bit SGEs. 1252 * 64bit SGEs.
1253 */ 1253 */
1254 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 1254 scale = ioc->req_sz/ioc->SGE_size;
1255 if (sizeof(dma_addr_t) == sizeof(u64)) { 1255 if (ioc->sg_addr_size == sizeof(u64)) {
1256 numSGE = (scale - 1) * 1256 numSGE = (scale - 1) *
1257 (ioc->facts.MaxChainDepth-1) + scale + 1257 (ioc->facts.MaxChainDepth-1) + scale +
1258 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 1258 (ioc->req_sz - 60) / ioc->SGE_size;
1259 sizeof(u32));
1260 } else { 1259 } else {
1261 numSGE = 1 + (scale - 1) * 1260 numSGE = 1 + (scale - 1) *
1262 (ioc->facts.MaxChainDepth-1) + scale + 1261 (ioc->facts.MaxChainDepth-1) + scale +
1263 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 1262 (ioc->req_sz - 64) / ioc->SGE_size;
1264 sizeof(u32));
1265 } 1263 }
1266 1264
1267 if (numSGE < sh->sg_tablesize) { 1265 if (numSGE < sh->sg_tablesize) {
@@ -1292,9 +1290,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1292 1290
1293 /* Clear the TM flags 1291 /* Clear the TM flags
1294 */ 1292 */
1295 hd->tmPending = 0;
1296 hd->tmState = TM_STATE_NONE;
1297 hd->resetPending = 0;
1298 hd->abortSCpnt = NULL; 1293 hd->abortSCpnt = NULL;
1299 1294
1300 /* Clear the pointer used to store 1295 /* Clear the pointer used to store
@@ -1312,8 +1307,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1312 hd->timer.data = (unsigned long) hd; 1307 hd->timer.data = (unsigned long) hd;
1313 hd->timer.function = mptscsih_timer_expired; 1308 hd->timer.function = mptscsih_timer_expired;
1314 1309
1315 init_waitqueue_head(&hd->scandv_waitq);
1316 hd->scandv_wait_done = 0;
1317 hd->last_queue_full = 0; 1310 hd->last_queue_full = 0;
1318 1311
1319 sh->transportt = mptfc_transport_template; 1312 sh->transportt = mptfc_transport_template;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index a9019f081b9..20e0b447e8e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -93,8 +93,37 @@ static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
93static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; 93static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
94static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */ 94static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
95static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS; 95static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS;
96 96static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS;
97static void mptsas_hotplug_work(struct work_struct *work); 97
98static void mptsas_firmware_event_work(struct work_struct *work);
99static void mptsas_send_sas_event(struct fw_event_work *fw_event);
100static void mptsas_send_raid_event(struct fw_event_work *fw_event);
101static void mptsas_send_ir2_event(struct fw_event_work *fw_event);
102static void mptsas_parse_device_info(struct sas_identify *identify,
103 struct mptsas_devinfo *device_info);
104static inline void mptsas_set_rphy(MPT_ADAPTER *ioc,
105 struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy);
106static struct mptsas_phyinfo *mptsas_find_phyinfo_by_sas_address
107 (MPT_ADAPTER *ioc, u64 sas_address);
108static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc,
109 struct mptsas_devinfo *device_info, u32 form, u32 form_specific);
110static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc,
111 struct mptsas_enclosure *enclosure, u32 form, u32 form_specific);
112static int mptsas_add_end_device(MPT_ADAPTER *ioc,
113 struct mptsas_phyinfo *phy_info);
114static void mptsas_del_end_device(MPT_ADAPTER *ioc,
115 struct mptsas_phyinfo *phy_info);
116static void mptsas_send_link_status_event(struct fw_event_work *fw_event);
117static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address
118 (MPT_ADAPTER *ioc, u64 sas_address);
119static void mptsas_expander_delete(MPT_ADAPTER *ioc,
120 struct mptsas_portinfo *port_info, u8 force);
121static void mptsas_send_expander_event(struct fw_event_work *fw_event);
122static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
123static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
124static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
125static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
126static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
98 127
99static void mptsas_print_phy_data(MPT_ADAPTER *ioc, 128static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
100 MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) 129 MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
@@ -218,30 +247,125 @@ static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1)
218 le16_to_cpu(pg1->AttachedDevHandle))); 247 le16_to_cpu(pg1->AttachedDevHandle)));
219} 248}
220 249
221static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy) 250/* inhibit sas firmware event handling */
251static void
252mptsas_fw_event_off(MPT_ADAPTER *ioc)
222{ 253{
223 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 254 unsigned long flags;
224 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; 255
256 spin_lock_irqsave(&ioc->fw_event_lock, flags);
257 ioc->fw_events_off = 1;
258 ioc->sas_discovery_quiesce_io = 0;
259 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
260
225} 261}
226 262
227static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy) 263/* enable sas firmware event handling */
264static void
265mptsas_fw_event_on(MPT_ADAPTER *ioc)
228{ 266{
229 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); 267 unsigned long flags;
230 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; 268
269 spin_lock_irqsave(&ioc->fw_event_lock, flags);
270 ioc->fw_events_off = 0;
271 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
231} 272}
232 273
233static struct mptsas_portinfo * 274/* queue a sas firmware event */
234mptsas_get_hba_portinfo(MPT_ADAPTER *ioc) 275static void
276mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
277 unsigned long delay)
235{ 278{
236 struct list_head *head = &ioc->sas_topology; 279 unsigned long flags;
237 struct mptsas_portinfo *pi = NULL; 280
281 spin_lock_irqsave(&ioc->fw_event_lock, flags);
282 list_add_tail(&fw_event->list, &ioc->fw_event_list);
283 INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
284 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n",
285 ioc->name, __func__, fw_event));
286 queue_delayed_work(ioc->fw_event_q, &fw_event->work,
287 delay);
288 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
289}
290
291/* requeue a sas firmware event */
292static void
293mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
294 unsigned long delay)
295{
296 unsigned long flags;
297 spin_lock_irqsave(&ioc->fw_event_lock, flags);
298 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
299 "(fw_event=0x%p)\n", ioc->name, __func__, fw_event));
300 fw_event->retries++;
301 queue_delayed_work(ioc->fw_event_q, &fw_event->work,
302 msecs_to_jiffies(delay));
303 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
304}
305
306/* free memory assoicated to a sas firmware event */
307static void
308mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
309{
310 unsigned long flags;
311
312 spin_lock_irqsave(&ioc->fw_event_lock, flags);
313 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
314 ioc->name, __func__, fw_event));
315 list_del(&fw_event->list);
316 kfree(fw_event);
317 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
318}
319
320/* walk the firmware event queue, and either stop or wait for
321 * outstanding events to complete */
322static void
323mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
324{
325 struct fw_event_work *fw_event, *next;
326 struct mptsas_target_reset_event *target_reset_list, *n;
327 u8 flush_q;
328 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
329
330 /* flush the target_reset_list */
331 if (!list_empty(&hd->target_reset_list)) {
332 list_for_each_entry_safe(target_reset_list, n,
333 &hd->target_reset_list, list) {
334 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
335 "%s: removing target reset for id=%d\n",
336 ioc->name, __func__,
337 target_reset_list->sas_event_data.TargetID));
338 list_del(&target_reset_list->list);
339 kfree(target_reset_list);
340 }
341 }
342
343 if (list_empty(&ioc->fw_event_list) ||
344 !ioc->fw_event_q || in_interrupt())
345 return;
238 346
239 /* always the first entry on sas_topology list */ 347 flush_q = 0;
348 list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
349 if (cancel_delayed_work(&fw_event->work))
350 mptsas_free_fw_event(ioc, fw_event);
351 else
352 flush_q = 1;
353 }
354 if (flush_q)
355 flush_workqueue(ioc->fw_event_q);
356}
240 357
241 if (!list_empty(head))
242 pi = list_entry(head->next, struct mptsas_portinfo, list);
243 358
244 return pi; 359static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
360{
361 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
362 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
363}
364
365static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
366{
367 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
368 return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
245} 369}
246 370
247/* 371/*
@@ -265,6 +389,38 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
265 return rc; 389 return rc;
266} 390}
267 391
392/**
393 * mptsas_find_portinfo_by_sas_address -
394 * @ioc: Pointer to MPT_ADAPTER structure
395 * @handle:
396 *
397 * This function should be called with the sas_topology_mutex already held
398 *
399 **/
400static struct mptsas_portinfo *
401mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
402{
403 struct mptsas_portinfo *port_info, *rc = NULL;
404 int i;
405
406 if (sas_address >= ioc->hba_port_sas_addr &&
407 sas_address < (ioc->hba_port_sas_addr +
408 ioc->hba_port_num_phy))
409 return ioc->hba_port_info;
410
411 mutex_lock(&ioc->sas_topology_mutex);
412 list_for_each_entry(port_info, &ioc->sas_topology, list)
413 for (i = 0; i < port_info->num_phys; i++)
414 if (port_info->phy_info[i].identify.sas_address ==
415 sas_address) {
416 rc = port_info;
417 goto out;
418 }
419 out:
420 mutex_unlock(&ioc->sas_topology_mutex);
421 return rc;
422}
423
268/* 424/*
269 * Returns true if there is a scsi end device 425 * Returns true if there is a scsi end device
270 */ 426 */
@@ -308,6 +464,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai
308 if(phy_info->port_details != port_details) 464 if(phy_info->port_details != port_details)
309 continue; 465 continue;
310 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); 466 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
467 mptsas_set_rphy(ioc, phy_info, NULL);
311 phy_info->port_details = NULL; 468 phy_info->port_details = NULL;
312 } 469 }
313 kfree(port_details); 470 kfree(port_details);
@@ -379,6 +536,285 @@ starget)
379 phy_info->port_details->starget = starget; 536 phy_info->port_details->starget = starget;
380} 537}
381 538
539/**
540 * mptsas_add_device_component -
541 * @ioc: Pointer to MPT_ADAPTER structure
542 * @channel: fw mapped id's
543 * @id:
544 * @sas_address:
545 * @device_info:
546 *
547 **/
548static void
549mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
550 u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id)
551{
552 struct mptsas_device_info *sas_info, *next;
553 struct scsi_device *sdev;
554 struct scsi_target *starget;
555 struct sas_rphy *rphy;
556
557 /*
558 * Delete all matching devices out of the list
559 */
560 mutex_lock(&ioc->sas_device_info_mutex);
561 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
562 list) {
563 if (!sas_info->is_logical_volume &&
564 (sas_info->sas_address == sas_address ||
565 (sas_info->fw.channel == channel &&
566 sas_info->fw.id == id))) {
567 list_del(&sas_info->list);
568 kfree(sas_info);
569 }
570 }
571
572 sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
573 if (!sas_info)
574 goto out;
575
576 /*
577 * Set Firmware mapping
578 */
579 sas_info->fw.id = id;
580 sas_info->fw.channel = channel;
581
582 sas_info->sas_address = sas_address;
583 sas_info->device_info = device_info;
584 sas_info->slot = slot;
585 sas_info->enclosure_logical_id = enclosure_logical_id;
586 INIT_LIST_HEAD(&sas_info->list);
587 list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
588
589 /*
590 * Set OS mapping
591 */
592 shost_for_each_device(sdev, ioc->sh) {
593 starget = scsi_target(sdev);
594 rphy = dev_to_rphy(starget->dev.parent);
595 if (rphy->identify.sas_address == sas_address) {
596 sas_info->os.id = starget->id;
597 sas_info->os.channel = starget->channel;
598 }
599 }
600
601 out:
602 mutex_unlock(&ioc->sas_device_info_mutex);
603 return;
604}
605
606/**
607 * mptsas_add_device_component_by_fw -
608 * @ioc: Pointer to MPT_ADAPTER structure
609 * @channel: fw mapped id's
610 * @id:
611 *
612 **/
613static void
614mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
615{
616 struct mptsas_devinfo sas_device;
617 struct mptsas_enclosure enclosure_info;
618 int rc;
619
620 rc = mptsas_sas_device_pg0(ioc, &sas_device,
621 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
622 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
623 (channel << 8) + id);
624 if (rc)
625 return;
626
627 memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
628 mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
629 (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
630 MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
631 sas_device.handle_enclosure);
632
633 mptsas_add_device_component(ioc, sas_device.channel,
634 sas_device.id, sas_device.sas_address, sas_device.device_info,
635 sas_device.slot, enclosure_info.enclosure_logical_id);
636}
637
638/**
639 * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
640 * @ioc: Pointer to MPT_ADAPTER structure
641 * @channel: fw mapped id's
642 * @id:
643 *
644 **/
645static void
646mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
647 struct scsi_target *starget)
648{
649 CONFIGPARMS cfg;
650 ConfigPageHeader_t hdr;
651 dma_addr_t dma_handle;
652 pRaidVolumePage0_t buffer = NULL;
653 int i;
654 RaidPhysDiskPage0_t phys_disk;
655 struct mptsas_device_info *sas_info, *next;
656
657 memset(&cfg, 0 , sizeof(CONFIGPARMS));
658 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
659 hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
660 /* assumption that all volumes on channel = 0 */
661 cfg.pageAddr = starget->id;
662 cfg.cfghdr.hdr = &hdr;
663 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
664 cfg.timeout = 10;
665
666 if (mpt_config(ioc, &cfg) != 0)
667 goto out;
668
669 if (!hdr.PageLength)
670 goto out;
671
672 buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
673 &dma_handle);
674
675 if (!buffer)
676 goto out;
677
678 cfg.physAddr = dma_handle;
679 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
680
681 if (mpt_config(ioc, &cfg) != 0)
682 goto out;
683
684 if (!buffer->NumPhysDisks)
685 goto out;
686
687 /*
688 * Adding entry for hidden components
689 */
690 for (i = 0; i < buffer->NumPhysDisks; i++) {
691
692 if (mpt_raid_phys_disk_pg0(ioc,
693 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
694 continue;
695
696 mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus,
697 phys_disk.PhysDiskID);
698
699 mutex_lock(&ioc->sas_device_info_mutex);
700 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
701 list) {
702 if (!sas_info->is_logical_volume &&
703 (sas_info->fw.channel == phys_disk.PhysDiskBus &&
704 sas_info->fw.id == phys_disk.PhysDiskID)) {
705 sas_info->is_hidden_raid_component = 1;
706 sas_info->volume_id = starget->id;
707 }
708 }
709 mutex_unlock(&ioc->sas_device_info_mutex);
710
711 }
712
713 /*
714 * Delete all matching devices out of the list
715 */
716 mutex_lock(&ioc->sas_device_info_mutex);
717 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
718 list) {
719 if (sas_info->is_logical_volume && sas_info->fw.id ==
720 starget->id) {
721 list_del(&sas_info->list);
722 kfree(sas_info);
723 }
724 }
725
726 sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
727 if (sas_info) {
728 sas_info->fw.id = starget->id;
729 sas_info->os.id = starget->id;
730 sas_info->os.channel = starget->channel;
731 sas_info->is_logical_volume = 1;
732 INIT_LIST_HEAD(&sas_info->list);
733 list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
734 }
735 mutex_unlock(&ioc->sas_device_info_mutex);
736
737 out:
738 if (buffer)
739 pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
740 dma_handle);
741}
742
743/**
744 * mptsas_add_device_component_starget -
745 * @ioc: Pointer to MPT_ADAPTER structure
746 * @starget:
747 *
748 **/
749static void
750mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
751 struct scsi_target *starget)
752{
753 VirtTarget *vtarget;
754 struct sas_rphy *rphy;
755 struct mptsas_phyinfo *phy_info = NULL;
756 struct mptsas_enclosure enclosure_info;
757
758 rphy = dev_to_rphy(starget->dev.parent);
759 vtarget = starget->hostdata;
760 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
761 rphy->identify.sas_address);
762 if (!phy_info)
763 return;
764
765 memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
766 mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
767 (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
768 MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
769 phy_info->attached.handle_enclosure);
770
771 mptsas_add_device_component(ioc, phy_info->attached.channel,
772 phy_info->attached.id, phy_info->attached.sas_address,
773 phy_info->attached.device_info,
774 phy_info->attached.slot, enclosure_info.enclosure_logical_id);
775}
776
777/**
778 * mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
779 * @ioc: Pointer to MPT_ADAPTER structure
780 * @channel: os mapped id's
781 * @id:
782 *
783 **/
784static void
785mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id)
786{
787 struct mptsas_device_info *sas_info, *next;
788
789 /*
790 * Set is_cached flag
791 */
792 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
793 list) {
794 if (sas_info->os.channel == channel && sas_info->os.id == id)
795 sas_info->is_cached = 1;
796 }
797}
798
799/**
800 * mptsas_del_device_components - Cleaning the list
801 * @ioc: Pointer to MPT_ADAPTER structure
802 *
803 **/
804static void
805mptsas_del_device_components(MPT_ADAPTER *ioc)
806{
807 struct mptsas_device_info *sas_info, *next;
808
809 mutex_lock(&ioc->sas_device_info_mutex);
810 list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
811 list) {
812 list_del(&sas_info->list);
813 kfree(sas_info);
814 }
815 mutex_unlock(&ioc->sas_device_info_mutex);
816}
817
382 818
383/* 819/*
384 * mptsas_setup_wide_ports 820 * mptsas_setup_wide_ports
@@ -434,8 +870,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
434 * Forming a port 870 * Forming a port
435 */ 871 */
436 if (!port_details) { 872 if (!port_details) {
437 port_details = kzalloc(sizeof(*port_details), 873 port_details = kzalloc(sizeof(struct
438 GFP_KERNEL); 874 mptsas_portinfo_details), GFP_KERNEL);
439 if (!port_details) 875 if (!port_details)
440 goto out; 876 goto out;
441 port_details->num_phys = 1; 877 port_details->num_phys = 1;
@@ -523,15 +959,62 @@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id)
523 VirtTarget *vtarget = NULL; 959 VirtTarget *vtarget = NULL;
524 960
525 shost_for_each_device(sdev, ioc->sh) { 961 shost_for_each_device(sdev, ioc->sh) {
526 if ((vdevice = sdev->hostdata) == NULL) 962 vdevice = sdev->hostdata;
963 if ((vdevice == NULL) ||
964 (vdevice->vtarget == NULL))
965 continue;
966 if ((vdevice->vtarget->tflags &
967 MPT_TARGET_FLAGS_RAID_COMPONENT ||
968 vdevice->vtarget->raidVolume))
527 continue; 969 continue;
528 if (vdevice->vtarget->id == id && 970 if (vdevice->vtarget->id == id &&
529 vdevice->vtarget->channel == channel) 971 vdevice->vtarget->channel == channel)
530 vtarget = vdevice->vtarget; 972 vtarget = vdevice->vtarget;
531 } 973 }
532 return vtarget; 974 return vtarget;
533} 975}
534 976
977static void
978mptsas_queue_device_delete(MPT_ADAPTER *ioc,
979 MpiEventDataSasDeviceStatusChange_t *sas_event_data)
980{
981 struct fw_event_work *fw_event;
982 int sz;
983
984 sz = offsetof(struct fw_event_work, event_data) +
985 sizeof(MpiEventDataSasDeviceStatusChange_t);
986 fw_event = kzalloc(sz, GFP_ATOMIC);
987 if (!fw_event) {
988 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
989 ioc->name, __func__, __LINE__);
990 return;
991 }
992 memcpy(fw_event->event_data, sas_event_data,
993 sizeof(MpiEventDataSasDeviceStatusChange_t));
994 fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE;
995 fw_event->ioc = ioc;
996 mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
997}
998
999static void
1000mptsas_queue_rescan(MPT_ADAPTER *ioc)
1001{
1002 struct fw_event_work *fw_event;
1003 int sz;
1004
1005 sz = offsetof(struct fw_event_work, event_data);
1006 fw_event = kzalloc(sz, GFP_ATOMIC);
1007 if (!fw_event) {
1008 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
1009 ioc->name, __func__, __LINE__);
1010 return;
1011 }
1012 fw_event->event = -1;
1013 fw_event->ioc = ioc;
1014 mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
1015}
1016
1017
535/** 1018/**
536 * mptsas_target_reset 1019 * mptsas_target_reset
537 * 1020 *
@@ -550,13 +1033,21 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
550{ 1033{
551 MPT_FRAME_HDR *mf; 1034 MPT_FRAME_HDR *mf;
552 SCSITaskMgmt_t *pScsiTm; 1035 SCSITaskMgmt_t *pScsiTm;
553 1036 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0)
554 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
555 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
556 ioc->name,__func__, __LINE__));
557 return 0; 1037 return 0;
1038
1039
1040 mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
1041 if (mf == NULL) {
1042 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
1043 "%s, no msg frames @%d!!\n", ioc->name,
1044 __func__, __LINE__));
1045 goto out_fail;
558 } 1046 }
559 1047
1048 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
1049 ioc->name, mf));
1050
560 /* Format the Request 1051 /* Format the Request
561 */ 1052 */
562 pScsiTm = (SCSITaskMgmt_t *) mf; 1053 pScsiTm = (SCSITaskMgmt_t *) mf;
@@ -569,9 +1060,18 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
569 1060
570 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); 1061 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
571 1062
572 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); 1063 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1064 "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n",
1065 ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id));
1066
1067 mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
573 1068
574 return 1; 1069 return 1;
1070
1071 out_fail:
1072
1073 mpt_clear_taskmgmt_in_progress_flag(ioc);
1074 return 0;
575} 1075}
576 1076
577/** 1077/**
@@ -602,11 +1102,12 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
602 1102
603 vtarget->deleted = 1; /* block IO */ 1103 vtarget->deleted = 1; /* block IO */
604 1104
605 target_reset_list = kzalloc(sizeof(*target_reset_list), 1105 target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
606 GFP_ATOMIC); 1106 GFP_ATOMIC);
607 if (!target_reset_list) { 1107 if (!target_reset_list) {
608 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", 1108 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
609 ioc->name,__func__, __LINE__)); 1109 "%s, failed to allocate mem @%d..!!\n",
1110 ioc->name, __func__, __LINE__));
610 return; 1111 return;
611 } 1112 }
612 1113
@@ -614,84 +1115,101 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
614 sizeof(*sas_event_data)); 1115 sizeof(*sas_event_data));
615 list_add_tail(&target_reset_list->list, &hd->target_reset_list); 1116 list_add_tail(&target_reset_list->list, &hd->target_reset_list);
616 1117
617 if (hd->resetPending) 1118 target_reset_list->time_count = jiffies;
618 return;
619 1119
620 if (mptsas_target_reset(ioc, channel, id)) { 1120 if (mptsas_target_reset(ioc, channel, id)) {
621 target_reset_list->target_reset_issued = 1; 1121 target_reset_list->target_reset_issued = 1;
622 hd->resetPending = 1;
623 } 1122 }
624} 1123}
625 1124
626/** 1125/**
627 * mptsas_dev_reset_complete 1126 * mptsas_taskmgmt_complete - complete SAS task management function
628 * 1127 * @ioc: Pointer to MPT_ADAPTER structure
629 * Completion for TARGET_RESET after NOT_RESPONDING_EVENT,
630 * enable work queue to finish off removing device from upper layers.
631 * then send next TARGET_RESET in the queue.
632 *
633 * @ioc
634 * 1128 *
1129 * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
1130 * queue to finish off removing device from upper layers. then send next
1131 * TARGET_RESET in the queue.
635 **/ 1132 **/
636static void 1133static int
637mptsas_dev_reset_complete(MPT_ADAPTER *ioc) 1134mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
638{ 1135{
639 MPT_SCSI_HOST *hd = shost_priv(ioc->sh); 1136 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
640 struct list_head *head = &hd->target_reset_list; 1137 struct list_head *head = &hd->target_reset_list;
641 struct mptsas_target_reset_event *target_reset_list;
642 struct mptsas_hotplug_event *ev;
643 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
644 u8 id, channel; 1138 u8 id, channel;
645 __le64 sas_address; 1139 struct mptsas_target_reset_event *target_reset_list;
1140 SCSITaskMgmtReply_t *pScsiTmReply;
1141
1142 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: "
1143 "(mf = %p, mr = %p)\n", ioc->name, mf, mr));
1144
1145 pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
1146 if (pScsiTmReply) {
1147 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1148 "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
1149 "\ttask_type = 0x%02X, iocstatus = 0x%04X "
1150 "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
1151 "term_cmnds = %d\n", ioc->name,
1152 pScsiTmReply->Bus, pScsiTmReply->TargetID,
1153 pScsiTmReply->TaskType,
1154 le16_to_cpu(pScsiTmReply->IOCStatus),
1155 le32_to_cpu(pScsiTmReply->IOCLogInfo),
1156 pScsiTmReply->ResponseCode,
1157 le32_to_cpu(pScsiTmReply->TerminationCount)));
1158
1159 if (pScsiTmReply->ResponseCode)
1160 mptscsih_taskmgmt_response_code(ioc,
1161 pScsiTmReply->ResponseCode);
1162 }
1163
1164 if (pScsiTmReply && (pScsiTmReply->TaskType ==
1165 MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
1166 MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) {
1167 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
1168 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
1169 memcpy(ioc->taskmgmt_cmds.reply, mr,
1170 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
1171 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
1172 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
1173 complete(&ioc->taskmgmt_cmds.done);
1174 return 1;
1175 }
1176 return 0;
1177 }
1178
1179 mpt_clear_taskmgmt_in_progress_flag(ioc);
646 1180
647 if (list_empty(head)) 1181 if (list_empty(head))
648 return; 1182 return 1;
649 1183
650 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, list); 1184 target_reset_list = list_entry(head->next,
1185 struct mptsas_target_reset_event, list);
651 1186
652 sas_event_data = &target_reset_list->sas_event_data; 1187 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
653 id = sas_event_data->TargetID; 1188 "TaskMgmt: completed (%d seconds)\n",
654 channel = sas_event_data->Bus; 1189 ioc->name, jiffies_to_msecs(jiffies -
655 hd->resetPending = 0; 1190 target_reset_list->time_count)/1000));
1191
1192 id = pScsiTmReply->TargetID;
1193 channel = pScsiTmReply->Bus;
1194 target_reset_list->time_count = jiffies;
656 1195
657 /* 1196 /*
658 * retry target reset 1197 * retry target reset
659 */ 1198 */
660 if (!target_reset_list->target_reset_issued) { 1199 if (!target_reset_list->target_reset_issued) {
661 if (mptsas_target_reset(ioc, channel, id)) { 1200 if (mptsas_target_reset(ioc, channel, id))
662 target_reset_list->target_reset_issued = 1; 1201 target_reset_list->target_reset_issued = 1;
663 hd->resetPending = 1; 1202 return 1;
664 }
665 return;
666 } 1203 }
667 1204
668 /* 1205 /*
669 * enable work queue to remove device from upper layers 1206 * enable work queue to remove device from upper layers
670 */ 1207 */
671 list_del(&target_reset_list->list); 1208 list_del(&target_reset_list->list);
1209 if ((mptsas_find_vtarget(ioc, channel, id)) && !ioc->fw_events_off)
1210 mptsas_queue_device_delete(ioc,
1211 &target_reset_list->sas_event_data);
672 1212
673 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
674 if (!ev) {
675 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
676 ioc->name,__func__, __LINE__));
677 return;
678 }
679
680 INIT_WORK(&ev->work, mptsas_hotplug_work);
681 ev->ioc = ioc;
682 ev->handle = le16_to_cpu(sas_event_data->DevHandle);
683 ev->parent_handle =
684 le16_to_cpu(sas_event_data->ParentDevHandle);
685 ev->channel = channel;
686 ev->id =id;
687 ev->phy_id = sas_event_data->PhyNum;
688 memcpy(&sas_address, &sas_event_data->SASAddress,
689 sizeof(__le64));
690 ev->sas_address = le64_to_cpu(sas_address);
691 ev->device_info = le32_to_cpu(sas_event_data->DeviceInfo);
692 ev->event_type = MPTSAS_DEL_DEVICE;
693 schedule_work(&ev->work);
694 kfree(target_reset_list);
695 1213
696 /* 1214 /*
697 * issue target reset to next device in the queue 1215 * issue target reset to next device in the queue
@@ -699,34 +1217,19 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
699 1217
700 head = &hd->target_reset_list; 1218 head = &hd->target_reset_list;
701 if (list_empty(head)) 1219 if (list_empty(head))
702 return; 1220 return 1;
703 1221
704 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, 1222 target_reset_list = list_entry(head->next, struct mptsas_target_reset_event,
705 list); 1223 list);
706 1224
707 sas_event_data = &target_reset_list->sas_event_data; 1225 id = target_reset_list->sas_event_data.TargetID;
708 id = sas_event_data->TargetID; 1226 channel = target_reset_list->sas_event_data.Bus;
709 channel = sas_event_data->Bus; 1227 target_reset_list->time_count = jiffies;
710 1228
711 if (mptsas_target_reset(ioc, channel, id)) { 1229 if (mptsas_target_reset(ioc, channel, id))
712 target_reset_list->target_reset_issued = 1; 1230 target_reset_list->target_reset_issued = 1;
713 hd->resetPending = 1;
714 }
715}
716 1231
717/** 1232 return 1;
718 * mptsas_taskmgmt_complete
719 *
720 * @ioc
721 * @mf
722 * @mr
723 *
724 **/
725static int
726mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
727{
728 mptsas_dev_reset_complete(ioc);
729 return mptscsih_taskmgmt_complete(ioc, mf, mr);
730} 1233}
731 1234
732/** 1235/**
@@ -740,37 +1243,59 @@ static int
740mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 1243mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
741{ 1244{
742 MPT_SCSI_HOST *hd; 1245 MPT_SCSI_HOST *hd;
743 struct mptsas_target_reset_event *target_reset_list, *n;
744 int rc; 1246 int rc;
745 1247
746 rc = mptscsih_ioc_reset(ioc, reset_phase); 1248 rc = mptscsih_ioc_reset(ioc, reset_phase);
1249 if ((ioc->bus_type != SAS) || (!rc))
1250 return rc;
747 1251
748 if (ioc->bus_type != SAS)
749 goto out;
750
751 if (reset_phase != MPT_IOC_POST_RESET)
752 goto out;
753
754 if (!ioc->sh || !ioc->sh->hostdata)
755 goto out;
756 hd = shost_priv(ioc->sh); 1252 hd = shost_priv(ioc->sh);
757 if (!hd->ioc) 1253 if (!hd->ioc)
758 goto out; 1254 goto out;
759 1255
760 if (list_empty(&hd->target_reset_list)) 1256 switch (reset_phase) {
761 goto out; 1257 case MPT_IOC_SETUP_RESET:
762 1258 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
763 /* flush the target_reset_list */ 1259 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
764 list_for_each_entry_safe(target_reset_list, n, 1260 mptsas_fw_event_off(ioc);
765 &hd->target_reset_list, list) { 1261 break;
766 list_del(&target_reset_list->list); 1262 case MPT_IOC_PRE_RESET:
767 kfree(target_reset_list); 1263 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1264 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
1265 break;
1266 case MPT_IOC_POST_RESET:
1267 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1268 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
1269 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
1270 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET;
1271 complete(&ioc->sas_mgmt.done);
1272 }
1273 mptsas_cleanup_fw_event_q(ioc);
1274 mptsas_queue_rescan(ioc);
1275 mptsas_fw_event_on(ioc);
1276 break;
1277 default:
1278 break;
768 } 1279 }
769 1280
770 out: 1281 out:
771 return rc; 1282 return rc;
772} 1283}
773 1284
1285
1286/**
1287 * enum device_state -
1288 * @DEVICE_RETRY: need to retry the TUR
1289 * @DEVICE_ERROR: TUR return error, don't add device
1290 * @DEVICE_READY: device can be added
1291 *
1292 */
1293enum device_state{
1294 DEVICE_RETRY,
1295 DEVICE_ERROR,
1296 DEVICE_READY,
1297};
1298
774static int 1299static int
775mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, 1300mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
776 u32 form, u32 form_specific) 1301 u32 form, u32 form_specific)
@@ -836,15 +1361,308 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
836 return error; 1361 return error;
837} 1362}
838 1363
1364/**
1365 * mptsas_add_end_device - report a new end device to sas transport layer
1366 * @ioc: Pointer to MPT_ADAPTER structure
1367 * @phy_info: decribes attached device
1368 *
1369 * return (0) success (1) failure
1370 *
1371 **/
1372static int
1373mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
1374{
1375 struct sas_rphy *rphy;
1376 struct sas_port *port;
1377 struct sas_identify identify;
1378 char *ds = NULL;
1379 u8 fw_id;
1380
1381 if (!phy_info) {
1382 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1383 "%s: exit at line=%d\n", ioc->name,
1384 __func__, __LINE__));
1385 return 1;
1386 }
1387
1388 fw_id = phy_info->attached.id;
1389
1390 if (mptsas_get_rphy(phy_info)) {
1391 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1392 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1393 __func__, fw_id, __LINE__));
1394 return 2;
1395 }
1396
1397 port = mptsas_get_port(phy_info);
1398 if (!port) {
1399 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1400 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1401 __func__, fw_id, __LINE__));
1402 return 3;
1403 }
1404
1405 if (phy_info->attached.device_info &
1406 MPI_SAS_DEVICE_INFO_SSP_TARGET)
1407 ds = "ssp";
1408 if (phy_info->attached.device_info &
1409 MPI_SAS_DEVICE_INFO_STP_TARGET)
1410 ds = "stp";
1411 if (phy_info->attached.device_info &
1412 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
1413 ds = "sata";
1414
1415 printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d,"
1416 " phy %d, sas_addr 0x%llx\n", ioc->name, ds,
1417 phy_info->attached.channel, phy_info->attached.id,
1418 phy_info->attached.phy_id, (unsigned long long)
1419 phy_info->attached.sas_address);
1420
1421 mptsas_parse_device_info(&identify, &phy_info->attached);
1422 rphy = sas_end_device_alloc(port);
1423 if (!rphy) {
1424 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1425 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1426 __func__, fw_id, __LINE__));
1427 return 5; /* non-fatal: an rphy can be added later */
1428 }
1429
1430 rphy->identify = identify;
1431 if (sas_rphy_add(rphy)) {
1432 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1433 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1434 __func__, fw_id, __LINE__));
1435 sas_rphy_free(rphy);
1436 return 6;
1437 }
1438 mptsas_set_rphy(ioc, phy_info, rphy);
1439 return 0;
1440}
1441
1442/**
1443 * mptsas_del_end_device - report a deleted end device to sas transport layer
1444 * @ioc: Pointer to MPT_ADAPTER structure
1445 * @phy_info: decribes attached device
1446 *
1447 **/
1448static void
1449mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
1450{
1451 struct sas_rphy *rphy;
1452 struct sas_port *port;
1453 struct mptsas_portinfo *port_info;
1454 struct mptsas_phyinfo *phy_info_parent;
1455 int i;
1456 char *ds = NULL;
1457 u8 fw_id;
1458 u64 sas_address;
1459
1460 if (!phy_info)
1461 return;
1462
1463 fw_id = phy_info->attached.id;
1464 sas_address = phy_info->attached.sas_address;
1465
1466 if (!phy_info->port_details) {
1467 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1468 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1469 __func__, fw_id, __LINE__));
1470 return;
1471 }
1472 rphy = mptsas_get_rphy(phy_info);
1473 if (!rphy) {
1474 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1475 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1476 __func__, fw_id, __LINE__));
1477 return;
1478 }
1479
1480 if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR
1481 || phy_info->attached.device_info
1482 & MPI_SAS_DEVICE_INFO_SMP_INITIATOR
1483 || phy_info->attached.device_info
1484 & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
1485 ds = "initiator";
1486 if (phy_info->attached.device_info &
1487 MPI_SAS_DEVICE_INFO_SSP_TARGET)
1488 ds = "ssp";
1489 if (phy_info->attached.device_info &
1490 MPI_SAS_DEVICE_INFO_STP_TARGET)
1491 ds = "stp";
1492 if (phy_info->attached.device_info &
1493 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
1494 ds = "sata";
1495
1496 dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT
1497 "removing %s device: fw_channel %d, fw_id %d, phy %d,"
1498 "sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel,
1499 phy_info->attached.id, phy_info->attached.phy_id,
1500 (unsigned long long) sas_address);
1501
1502 port = mptsas_get_port(phy_info);
1503 if (!port) {
1504 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1505 "%s: fw_id=%d exit at line=%d\n", ioc->name,
1506 __func__, fw_id, __LINE__));
1507 return;
1508 }
1509 port_info = phy_info->portinfo;
1510 phy_info_parent = port_info->phy_info;
1511 for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) {
1512 if (!phy_info_parent->phy)
1513 continue;
1514 if (phy_info_parent->attached.sas_address !=
1515 sas_address)
1516 continue;
1517 dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev,
1518 MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n",
1519 ioc->name, phy_info_parent->phy_id,
1520 phy_info_parent->phy);
1521 sas_port_delete_phy(port, phy_info_parent->phy);
1522 }
1523
1524 dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
1525 "delete port %d, sas_addr (0x%llx)\n", ioc->name,
1526 port->port_identifier, (unsigned long long)sas_address);
1527 sas_port_delete(port);
1528 mptsas_set_port(ioc, phy_info, NULL);
1529 mptsas_port_delete(ioc, phy_info->port_details);
1530}
1531
1532struct mptsas_phyinfo *
1533mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
1534 struct mptsas_devinfo *sas_device)
1535{
1536 struct mptsas_phyinfo *phy_info;
1537 struct mptsas_portinfo *port_info;
1538 int i;
1539
1540 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
1541 sas_device->sas_address);
1542 if (!phy_info)
1543 goto out;
1544 port_info = phy_info->portinfo;
1545 if (!port_info)
1546 goto out;
1547 mutex_lock(&ioc->sas_topology_mutex);
1548 for (i = 0; i < port_info->num_phys; i++) {
1549 if (port_info->phy_info[i].attached.sas_address !=
1550 sas_device->sas_address)
1551 continue;
1552 port_info->phy_info[i].attached.channel = sas_device->channel;
1553 port_info->phy_info[i].attached.id = sas_device->id;
1554 port_info->phy_info[i].attached.sas_address =
1555 sas_device->sas_address;
1556 port_info->phy_info[i].attached.handle = sas_device->handle;
1557 port_info->phy_info[i].attached.handle_parent =
1558 sas_device->handle_parent;
1559 port_info->phy_info[i].attached.handle_enclosure =
1560 sas_device->handle_enclosure;
1561 }
1562 mutex_unlock(&ioc->sas_topology_mutex);
1563 out:
1564 return phy_info;
1565}
1566
1567/**
1568 * mptsas_firmware_event_work - work thread for processing fw events
1569 * @work: work queue payload containing info describing the event
1570 * Context: user
1571 *
1572 */
1573static void
1574mptsas_firmware_event_work(struct work_struct *work)
1575{
1576 struct fw_event_work *fw_event =
1577 container_of(work, struct fw_event_work, work.work);
1578 MPT_ADAPTER *ioc = fw_event->ioc;
1579
1580 /* special rescan topology handling */
1581 if (fw_event->event == -1) {
1582 if (ioc->in_rescan) {
1583 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1584 "%s: rescan ignored as it is in progress\n",
1585 ioc->name, __func__));
1586 return;
1587 }
1588 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after "
1589 "reset\n", ioc->name, __func__));
1590 ioc->in_rescan = 1;
1591 mptsas_not_responding_devices(ioc);
1592 mptsas_scan_sas_topology(ioc);
1593 ioc->in_rescan = 0;
1594 mptsas_free_fw_event(ioc, fw_event);
1595 return;
1596 }
1597
1598 /* events handling turned off during host reset */
1599 if (ioc->fw_events_off) {
1600 mptsas_free_fw_event(ioc, fw_event);
1601 return;
1602 }
1603
1604 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), "
1605 "event = (0x%02x)\n", ioc->name, __func__, fw_event,
1606 (fw_event->event & 0xFF)));
1607
1608 switch (fw_event->event) {
1609 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1610 mptsas_send_sas_event(fw_event);
1611 break;
1612 case MPI_EVENT_INTEGRATED_RAID:
1613 mptsas_send_raid_event(fw_event);
1614 break;
1615 case MPI_EVENT_IR2:
1616 mptsas_send_ir2_event(fw_event);
1617 break;
1618 case MPI_EVENT_PERSISTENT_TABLE_FULL:
1619 mptbase_sas_persist_operation(ioc,
1620 MPI_SAS_OP_CLEAR_NOT_PRESENT);
1621 mptsas_free_fw_event(ioc, fw_event);
1622 break;
1623 case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
1624 mptsas_broadcast_primative_work(fw_event);
1625 break;
1626 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
1627 mptsas_send_expander_event(fw_event);
1628 break;
1629 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1630 mptsas_send_link_status_event(fw_event);
1631 break;
1632 case MPI_EVENT_QUEUE_FULL:
1633 mptsas_handle_queue_full_event(fw_event);
1634 break;
1635 }
1636}
1637
1638
1639
839static int 1640static int
840mptsas_slave_configure(struct scsi_device *sdev) 1641mptsas_slave_configure(struct scsi_device *sdev)
841{ 1642{
1643 struct Scsi_Host *host = sdev->host;
1644 MPT_SCSI_HOST *hd = shost_priv(host);
1645 MPT_ADAPTER *ioc = hd->ioc;
1646 VirtDevice *vdevice = sdev->hostdata;
842 1647
843 if (sdev->channel == MPTSAS_RAID_CHANNEL) 1648 if (vdevice->vtarget->deleted) {
1649 sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n");
1650 vdevice->vtarget->deleted = 0;
1651 }
1652
1653 /*
1654 * RAID volumes placed beyond the last expected port.
1655 * Ignore sending sas mode pages in that case..
1656 */
1657 if (sdev->channel == MPTSAS_RAID_CHANNEL) {
1658 mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev));
844 goto out; 1659 goto out;
1660 }
845 1661
846 sas_read_port_mode_page(sdev); 1662 sas_read_port_mode_page(sdev);
847 1663
1664 mptsas_add_device_component_starget(ioc, scsi_target(sdev));
1665
848 out: 1666 out:
849 return mptscsih_slave_configure(sdev); 1667 return mptscsih_slave_configure(sdev);
850} 1668}
@@ -875,9 +1693,18 @@ mptsas_target_alloc(struct scsi_target *starget)
875 * RAID volumes placed beyond the last expected port. 1693 * RAID volumes placed beyond the last expected port.
876 */ 1694 */
877 if (starget->channel == MPTSAS_RAID_CHANNEL) { 1695 if (starget->channel == MPTSAS_RAID_CHANNEL) {
878 for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) 1696 if (!ioc->raid_data.pIocPg2) {
879 if (id == ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID) 1697 kfree(vtarget);
880 channel = ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus; 1698 return -ENXIO;
1699 }
1700 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
1701 if (id == ioc->raid_data.pIocPg2->
1702 RaidVolume[i].VolumeID) {
1703 channel = ioc->raid_data.pIocPg2->
1704 RaidVolume[i].VolumeBus;
1705 }
1706 }
1707 vtarget->raidVolume = 1;
881 goto out; 1708 goto out;
882 } 1709 }
883 1710
@@ -926,11 +1753,18 @@ mptsas_target_destroy(struct scsi_target *starget)
926 struct sas_rphy *rphy; 1753 struct sas_rphy *rphy;
927 struct mptsas_portinfo *p; 1754 struct mptsas_portinfo *p;
928 int i; 1755 int i;
929 MPT_ADAPTER *ioc = hd->ioc; 1756 MPT_ADAPTER *ioc = hd->ioc;
1757 VirtTarget *vtarget;
930 1758
931 if (!starget->hostdata) 1759 if (!starget->hostdata)
932 return; 1760 return;
933 1761
1762 vtarget = starget->hostdata;
1763
1764 mptsas_del_device_component_by_os(ioc, starget->channel,
1765 starget->id);
1766
1767
934 if (starget->channel == MPTSAS_RAID_CHANNEL) 1768 if (starget->channel == MPTSAS_RAID_CHANNEL)
935 goto out; 1769 goto out;
936 1770
@@ -940,12 +1774,21 @@ mptsas_target_destroy(struct scsi_target *starget)
940 if (p->phy_info[i].attached.sas_address != 1774 if (p->phy_info[i].attached.sas_address !=
941 rphy->identify.sas_address) 1775 rphy->identify.sas_address)
942 continue; 1776 continue;
1777
1778 starget_printk(KERN_INFO, starget, MYIOC_s_FMT
1779 "delete device: fw_channel %d, fw_id %d, phy %d, "
1780 "sas_addr 0x%llx\n", ioc->name,
1781 p->phy_info[i].attached.channel,
1782 p->phy_info[i].attached.id,
1783 p->phy_info[i].attached.phy_id, (unsigned long long)
1784 p->phy_info[i].attached.sas_address);
1785
943 mptsas_set_starget(&p->phy_info[i], NULL); 1786 mptsas_set_starget(&p->phy_info[i], NULL);
944 goto out;
945 } 1787 }
946 } 1788 }
947 1789
948 out: 1790 out:
1791 vtarget->starget = NULL;
949 kfree(starget->hostdata); 1792 kfree(starget->hostdata);
950 starget->hostdata = NULL; 1793 starget->hostdata = NULL;
951} 1794}
@@ -1008,6 +1851,8 @@ mptsas_slave_alloc(struct scsi_device *sdev)
1008static int 1851static int
1009mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) 1852mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1010{ 1853{
1854 MPT_SCSI_HOST *hd;
1855 MPT_ADAPTER *ioc;
1011 VirtDevice *vdevice = SCpnt->device->hostdata; 1856 VirtDevice *vdevice = SCpnt->device->hostdata;
1012 1857
1013 if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) { 1858 if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
@@ -1016,6 +1861,12 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1016 return 0; 1861 return 0;
1017 } 1862 }
1018 1863
1864 hd = shost_priv(SCpnt->device->host);
1865 ioc = hd->ioc;
1866
1867 if (ioc->sas_discovery_quiesce_io)
1868 return SCSI_MLQUEUE_HOST_BUSY;
1869
1019// scsi_print_command(SCpnt); 1870// scsi_print_command(SCpnt);
1020 1871
1021 return mptscsih_qcmd(SCpnt,done); 1872 return mptscsih_qcmd(SCpnt,done);
@@ -1114,14 +1965,19 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
1114static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, 1965static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
1115 MPT_FRAME_HDR *reply) 1966 MPT_FRAME_HDR *reply)
1116{ 1967{
1117 ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_COMMAND_GOOD; 1968 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
1118 if (reply != NULL) { 1969 if (reply != NULL) {
1119 ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_RF_VALID; 1970 ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID;
1120 memcpy(ioc->sas_mgmt.reply, reply, 1971 memcpy(ioc->sas_mgmt.reply, reply,
1121 min(ioc->reply_sz, 4 * reply->u.reply.MsgLength)); 1972 min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
1122 } 1973 }
1123 complete(&ioc->sas_mgmt.done); 1974
1124 return 1; 1975 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
1976 ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING;
1977 complete(&ioc->sas_mgmt.done);
1978 return 1;
1979 }
1980 return 0;
1125} 1981}
1126 1982
1127static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) 1983static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
@@ -1160,6 +2016,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1160 MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET; 2016 MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
1161 req->PhyNum = phy->identify.phy_identifier; 2017 req->PhyNum = phy->identify.phy_identifier;
1162 2018
2019 INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
1163 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); 2020 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
1164 2021
1165 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 2022 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
@@ -1174,7 +2031,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1174 2031
1175 /* a reply frame is expected */ 2032 /* a reply frame is expected */
1176 if ((ioc->sas_mgmt.status & 2033 if ((ioc->sas_mgmt.status &
1177 MPT_IOCTL_STATUS_RF_VALID) == 0) { 2034 MPT_MGMT_STATUS_RF_VALID) == 0) {
1178 error = -ENXIO; 2035 error = -ENXIO;
1179 goto out_unlock; 2036 goto out_unlock;
1180 } 2037 }
@@ -1191,6 +2048,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1191 error = 0; 2048 error = 0;
1192 2049
1193 out_unlock: 2050 out_unlock:
2051 CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
1194 mutex_unlock(&ioc->sas_mgmt.mutex); 2052 mutex_unlock(&ioc->sas_mgmt.mutex);
1195 out: 2053 out:
1196 return error; 2054 return error;
@@ -1277,8 +2135,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1277 /* do we need to support multiple segments? */ 2135 /* do we need to support multiple segments? */
1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 2136 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 2137 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
1280 ioc->name, __func__, req->bio->bi_vcnt, req->data_len, 2138 ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
1281 rsp->bio->bi_vcnt, rsp->data_len); 2139 rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1282 return -EINVAL; 2140 return -EINVAL;
1283 } 2141 }
1284 2142
@@ -1295,7 +2153,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1295 smpreq = (SmpPassthroughRequest_t *)mf; 2153 smpreq = (SmpPassthroughRequest_t *)mf;
1296 memset(smpreq, 0, sizeof(*smpreq)); 2154 memset(smpreq, 0, sizeof(*smpreq));
1297 2155
1298 smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4); 2156 smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
1299 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; 2157 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
1300 2158
1301 if (rphy) 2159 if (rphy)
@@ -1304,7 +2162,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1304 struct mptsas_portinfo *port_info; 2162 struct mptsas_portinfo *port_info;
1305 2163
1306 mutex_lock(&ioc->sas_topology_mutex); 2164 mutex_lock(&ioc->sas_topology_mutex);
1307 port_info = mptsas_get_hba_portinfo(ioc); 2165 port_info = ioc->hba_port_info;
1308 if (port_info && port_info->phy_info) 2166 if (port_info && port_info->phy_info)
1309 sas_address = 2167 sas_address =
1310 port_info->phy_info[0].phy->identify.sas_address; 2168 port_info->phy_info[0].phy->identify.sas_address;
@@ -1319,26 +2177,32 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1319 /* request */ 2177 /* request */
1320 flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2178 flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1321 MPI_SGE_FLAGS_END_OF_BUFFER | 2179 MPI_SGE_FLAGS_END_OF_BUFFER |
1322 MPI_SGE_FLAGS_DIRECTION | 2180 MPI_SGE_FLAGS_DIRECTION)
1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; 2181 << MPI_SGE_FLAGS_SHIFT;
1324 flagsLength |= (req->data_len - 4); 2182 flagsLength |= (blk_rq_bytes(req) - 4);
1325 2183
1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), 2184 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
1327 req->data_len, PCI_DMA_BIDIRECTIONAL); 2185 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1328 if (!dma_addr_out) 2186 if (!dma_addr_out)
1329 goto put_mf; 2187 goto put_mf;
1330 mpt_add_sge(psge, flagsLength, dma_addr_out); 2188 ioc->add_sge(psge, flagsLength, dma_addr_out);
1331 psge += (sizeof(u32) + sizeof(dma_addr_t)); 2189 psge += ioc->SGE_size;
1332 2190
1333 /* response */ 2191 /* response */
1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 2192 flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1335 flagsLength |= rsp->data_len + 4; 2193 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
2194 MPI_SGE_FLAGS_IOC_TO_HOST |
2195 MPI_SGE_FLAGS_END_OF_BUFFER;
2196
2197 flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
2198 flagsLength |= blk_rq_bytes(rsp) + 4;
1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), 2199 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
1337 rsp->data_len, PCI_DMA_BIDIRECTIONAL); 2200 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1338 if (!dma_addr_in) 2201 if (!dma_addr_in)
1339 goto unmap; 2202 goto unmap;
1340 mpt_add_sge(psge, flagsLength, dma_addr_in); 2203 ioc->add_sge(psge, flagsLength, dma_addr_in);
1341 2204
2205 INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
1342 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); 2206 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
1343 2207
1344 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); 2208 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
@@ -1351,30 +2215,32 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1351 } 2215 }
1352 mf = NULL; 2216 mf = NULL;
1353 2217
1354 if (ioc->sas_mgmt.status & MPT_IOCTL_STATUS_RF_VALID) { 2218 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
1355 SmpPassthroughReply_t *smprep; 2219 SmpPassthroughReply_t *smprep;
1356 2220
1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; 2221 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
1358 memcpy(req->sense, smprep, sizeof(*smprep)); 2222 memcpy(req->sense, smprep, sizeof(*smprep));
1359 req->sense_len = sizeof(*smprep); 2223 req->sense_len = sizeof(*smprep);
1360 req->data_len = 0; 2224 req->resid_len = 0;
1361 rsp->data_len -= smprep->ResponseDataLength; 2225 rsp->resid_len -= smprep->ResponseDataLength;
1362 } else { 2226 } else {
1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 2227 printk(MYIOC_s_ERR_FMT
2228 "%s: smp passthru reply failed to be returned\n",
1364 ioc->name, __func__); 2229 ioc->name, __func__);
1365 ret = -ENXIO; 2230 ret = -ENXIO;
1366 } 2231 }
1367unmap: 2232unmap:
1368 if (dma_addr_out) 2233 if (dma_addr_out)
1369 pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len, 2234 pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
1370 PCI_DMA_BIDIRECTIONAL); 2235 PCI_DMA_BIDIRECTIONAL);
1371 if (dma_addr_in) 2236 if (dma_addr_in)
1372 pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len, 2237 pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
1373 PCI_DMA_BIDIRECTIONAL); 2238 PCI_DMA_BIDIRECTIONAL);
1374put_mf: 2239put_mf:
1375 if (mf) 2240 if (mf)
1376 mpt_free_msg_frame(ioc, mf); 2241 mpt_free_msg_frame(ioc, mf);
1377out_unlock: 2242out_unlock:
2243 CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
1378 mutex_unlock(&ioc->sas_mgmt.mutex); 2244 mutex_unlock(&ioc->sas_mgmt.mutex);
1379out: 2245out:
1380 return ret; 2246 return ret;
@@ -1438,7 +2304,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
1438 2304
1439 port_info->num_phys = buffer->NumPhys; 2305 port_info->num_phys = buffer->NumPhys;
1440 port_info->phy_info = kcalloc(port_info->num_phys, 2306 port_info->phy_info = kcalloc(port_info->num_phys,
1441 sizeof(*port_info->phy_info),GFP_KERNEL); 2307 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
1442 if (!port_info->phy_info) { 2308 if (!port_info->phy_info) {
1443 error = -ENOMEM; 2309 error = -ENOMEM;
1444 goto out_free_consistent; 2310 goto out_free_consistent;
@@ -1600,10 +2466,6 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
1600 __le64 sas_address; 2466 __le64 sas_address;
1601 int error=0; 2467 int error=0;
1602 2468
1603 if (ioc->sas_discovery_runtime &&
1604 mptsas_is_end_device(device_info))
1605 goto out;
1606
1607 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; 2469 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
1608 hdr.ExtPageLength = 0; 2470 hdr.ExtPageLength = 0;
1609 hdr.PageNumber = 0; 2471 hdr.PageNumber = 0;
@@ -1644,6 +2506,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
1644 2506
1645 mptsas_print_device_pg0(ioc, buffer); 2507 mptsas_print_device_pg0(ioc, buffer);
1646 2508
2509 memset(device_info, 0, sizeof(struct mptsas_devinfo));
1647 device_info->handle = le16_to_cpu(buffer->DevHandle); 2510 device_info->handle = le16_to_cpu(buffer->DevHandle);
1648 device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle); 2511 device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
1649 device_info->handle_enclosure = 2512 device_info->handle_enclosure =
@@ -1675,7 +2538,9 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
1675 SasExpanderPage0_t *buffer; 2538 SasExpanderPage0_t *buffer;
1676 dma_addr_t dma_handle; 2539 dma_addr_t dma_handle;
1677 int i, error; 2540 int i, error;
2541 __le64 sas_address;
1678 2542
2543 memset(port_info, 0, sizeof(struct mptsas_portinfo));
1679 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; 2544 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
1680 hdr.ExtPageLength = 0; 2545 hdr.ExtPageLength = 0;
1681 hdr.PageNumber = 0; 2546 hdr.PageNumber = 0;
@@ -1721,18 +2586,23 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
1721 } 2586 }
1722 2587
1723 /* save config data */ 2588 /* save config data */
1724 port_info->num_phys = buffer->NumPhys; 2589 port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
1725 port_info->phy_info = kcalloc(port_info->num_phys, 2590 port_info->phy_info = kcalloc(port_info->num_phys,
1726 sizeof(*port_info->phy_info),GFP_KERNEL); 2591 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
1727 if (!port_info->phy_info) { 2592 if (!port_info->phy_info) {
1728 error = -ENOMEM; 2593 error = -ENOMEM;
1729 goto out_free_consistent; 2594 goto out_free_consistent;
1730 } 2595 }
1731 2596
2597 memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
1732 for (i = 0; i < port_info->num_phys; i++) { 2598 for (i = 0; i < port_info->num_phys; i++) {
1733 port_info->phy_info[i].portinfo = port_info; 2599 port_info->phy_info[i].portinfo = port_info;
1734 port_info->phy_info[i].handle = 2600 port_info->phy_info[i].handle =
1735 le16_to_cpu(buffer->DevHandle); 2601 le16_to_cpu(buffer->DevHandle);
2602 port_info->phy_info[i].identify.sas_address =
2603 le64_to_cpu(sas_address);
2604 port_info->phy_info[i].identify.handle_parent =
2605 le16_to_cpu(buffer->ParentDevHandle);
1736 } 2606 }
1737 2607
1738 out_free_consistent: 2608 out_free_consistent:
@@ -1752,11 +2622,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1752 dma_addr_t dma_handle; 2622 dma_addr_t dma_handle;
1753 int error=0; 2623 int error=0;
1754 2624
1755 if (ioc->sas_discovery_runtime && 2625 hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION;
1756 mptsas_is_end_device(&phy_info->attached))
1757 goto out;
1758
1759 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
1760 hdr.ExtPageLength = 0; 2626 hdr.ExtPageLength = 0;
1761 hdr.PageNumber = 1; 2627 hdr.PageNumber = 1;
1762 hdr.Reserved1 = 0; 2628 hdr.Reserved1 = 0;
@@ -1791,6 +2657,12 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1791 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2657 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
1792 2658
1793 error = mpt_config(ioc, &cfg); 2659 error = mpt_config(ioc, &cfg);
2660
2661 if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
2662 error = -ENODEV;
2663 goto out;
2664 }
2665
1794 if (error) 2666 if (error)
1795 goto out_free_consistent; 2667 goto out_free_consistent;
1796 2668
@@ -2010,16 +2882,21 @@ static int mptsas_probe_one_phy(struct device *dev,
2010 goto out; 2882 goto out;
2011 } 2883 }
2012 mptsas_set_port(ioc, phy_info, port); 2884 mptsas_set_port(ioc, phy_info, port);
2013 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2885 devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev,
2014 "sas_port_alloc: port=%p dev=%p port_id=%d\n", 2886 MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n",
2015 ioc->name, port, dev, port->port_identifier)); 2887 ioc->name, port->port_identifier,
2888 (unsigned long long)phy_info->
2889 attached.sas_address));
2016 } 2890 }
2017 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_port_add_phy: phy_id=%d\n", 2891 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2018 ioc->name, phy_info->phy_id)); 2892 "sas_port_add_phy: phy_id=%d\n",
2893 ioc->name, phy_info->phy_id));
2019 sas_port_add_phy(port, phy_info->phy); 2894 sas_port_add_phy(port, phy_info->phy);
2020 phy_info->sas_port_add_phy = 0; 2895 phy_info->sas_port_add_phy = 0;
2896 devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev,
2897 MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name,
2898 phy_info->phy_id, phy_info->phy));
2021 } 2899 }
2022
2023 if (!mptsas_get_rphy(phy_info) && port && !port->rphy) { 2900 if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
2024 2901
2025 struct sas_rphy *rphy; 2902 struct sas_rphy *rphy;
@@ -2032,18 +2909,17 @@ static int mptsas_probe_one_phy(struct device *dev,
2032 * the adding/removing of devices that occur 2909 * the adding/removing of devices that occur
2033 * after start of day. 2910 * after start of day.
2034 */ 2911 */
2035 if (ioc->sas_discovery_runtime && 2912 if (mptsas_is_end_device(&phy_info->attached) &&
2036 mptsas_is_end_device(&phy_info->attached)) 2913 phy_info->attached.handle_parent) {
2037 goto out; 2914 goto out;
2915 }
2038 2916
2039 mptsas_parse_device_info(&identify, &phy_info->attached); 2917 mptsas_parse_device_info(&identify, &phy_info->attached);
2040 if (scsi_is_host_device(parent)) { 2918 if (scsi_is_host_device(parent)) {
2041 struct mptsas_portinfo *port_info; 2919 struct mptsas_portinfo *port_info;
2042 int i; 2920 int i;
2043 2921
2044 mutex_lock(&ioc->sas_topology_mutex); 2922 port_info = ioc->hba_port_info;
2045 port_info = mptsas_get_hba_portinfo(ioc);
2046 mutex_unlock(&ioc->sas_topology_mutex);
2047 2923
2048 for (i = 0; i < port_info->num_phys; i++) 2924 for (i = 0; i < port_info->num_phys; i++)
2049 if (port_info->phy_info[i].identify.sas_address == 2925 if (port_info->phy_info[i].identify.sas_address ==
@@ -2102,7 +2978,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2102 struct mptsas_portinfo *port_info, *hba; 2978 struct mptsas_portinfo *port_info, *hba;
2103 int error = -ENOMEM, i; 2979 int error = -ENOMEM, i;
2104 2980
2105 hba = kzalloc(sizeof(*port_info), GFP_KERNEL); 2981 hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
2106 if (! hba) 2982 if (! hba)
2107 goto out; 2983 goto out;
2108 2984
@@ -2112,9 +2988,10 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2112 2988
2113 mptsas_sas_io_unit_pg1(ioc); 2989 mptsas_sas_io_unit_pg1(ioc);
2114 mutex_lock(&ioc->sas_topology_mutex); 2990 mutex_lock(&ioc->sas_topology_mutex);
2115 port_info = mptsas_get_hba_portinfo(ioc); 2991 port_info = ioc->hba_port_info;
2116 if (!port_info) { 2992 if (!port_info) {
2117 port_info = hba; 2993 ioc->hba_port_info = port_info = hba;
2994 ioc->hba_port_num_phy = port_info->num_phys;
2118 list_add_tail(&port_info->list, &ioc->sas_topology); 2995 list_add_tail(&port_info->list, &ioc->sas_topology);
2119 } else { 2996 } else {
2120 for (i = 0; i < hba->num_phys; i++) { 2997 for (i = 0; i < hba->num_phys; i++) {
@@ -2130,15 +3007,22 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2130 hba = NULL; 3007 hba = NULL;
2131 } 3008 }
2132 mutex_unlock(&ioc->sas_topology_mutex); 3009 mutex_unlock(&ioc->sas_topology_mutex);
3010#if defined(CPQ_CIM)
3011 ioc->num_ports = port_info->num_phys;
3012#endif
2133 for (i = 0; i < port_info->num_phys; i++) { 3013 for (i = 0; i < port_info->num_phys; i++) {
2134 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], 3014 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
2135 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 3015 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
2136 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 3016 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
2137 3017 port_info->phy_info[i].identify.handle =
3018 port_info->phy_info[i].handle;
2138 mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify, 3019 mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
2139 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3020 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2140 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3021 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2141 port_info->phy_info[i].handle); 3022 port_info->phy_info[i].identify.handle);
3023 if (!ioc->hba_port_sas_addr)
3024 ioc->hba_port_sas_addr =
3025 port_info->phy_info[i].identify.sas_address;
2142 port_info->phy_info[i].identify.phy_id = 3026 port_info->phy_info[i].identify.phy_id =
2143 port_info->phy_info[i].phy_id = i; 3027 port_info->phy_info[i].phy_id = i;
2144 if (port_info->phy_info[i].attached.handle) 3028 if (port_info->phy_info[i].attached.handle)
@@ -2163,248 +3047,721 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
2163 return error; 3047 return error;
2164} 3048}
2165 3049
2166static int 3050static void
2167mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle) 3051mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
2168{ 3052{
2169 struct mptsas_portinfo *port_info, *p, *ex; 3053 struct mptsas_portinfo *parent;
2170 struct device *parent; 3054 struct device *parent_dev;
2171 struct sas_rphy *rphy; 3055 struct sas_rphy *rphy;
2172 int error = -ENOMEM, i, j; 3056 int i;
2173 3057 u64 sas_address; /* expander sas address */
2174 ex = kzalloc(sizeof(*port_info), GFP_KERNEL); 3058 u32 handle;
2175 if (!ex) 3059
2176 goto out; 3060 handle = port_info->phy_info[0].handle;
2177 3061 sas_address = port_info->phy_info[0].identify.sas_address;
2178 error = mptsas_sas_expander_pg0(ioc, ex,
2179 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
2180 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
2181 if (error)
2182 goto out_free_port_info;
2183
2184 *handle = ex->phy_info[0].handle;
2185
2186 mutex_lock(&ioc->sas_topology_mutex);
2187 port_info = mptsas_find_portinfo_by_handle(ioc, *handle);
2188 if (!port_info) {
2189 port_info = ex;
2190 list_add_tail(&port_info->list, &ioc->sas_topology);
2191 } else {
2192 for (i = 0; i < ex->num_phys; i++) {
2193 port_info->phy_info[i].handle =
2194 ex->phy_info[i].handle;
2195 port_info->phy_info[i].port_id =
2196 ex->phy_info[i].port_id;
2197 }
2198 kfree(ex->phy_info);
2199 kfree(ex);
2200 ex = NULL;
2201 }
2202 mutex_unlock(&ioc->sas_topology_mutex);
2203
2204 for (i = 0; i < port_info->num_phys; i++) { 3062 for (i = 0; i < port_info->num_phys; i++) {
2205 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i], 3063 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
2206 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM << 3064 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
2207 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle); 3065 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle);
2208 3066
2209 if (port_info->phy_info[i].identify.handle) { 3067 mptsas_sas_device_pg0(ioc,
2210 mptsas_sas_device_pg0(ioc, 3068 &port_info->phy_info[i].identify,
2211 &port_info->phy_info[i].identify, 3069 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2212 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3070 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2213 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3071 port_info->phy_info[i].identify.handle);
2214 port_info->phy_info[i].identify.handle); 3072 port_info->phy_info[i].identify.phy_id =
2215 port_info->phy_info[i].identify.phy_id = 3073 port_info->phy_info[i].phy_id;
2216 port_info->phy_info[i].phy_id;
2217 }
2218 3074
2219 if (port_info->phy_info[i].attached.handle) { 3075 if (port_info->phy_info[i].attached.handle) {
2220 mptsas_sas_device_pg0(ioc, 3076 mptsas_sas_device_pg0(ioc,
2221 &port_info->phy_info[i].attached, 3077 &port_info->phy_info[i].attached,
2222 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 3078 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
2223 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3079 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2224 port_info->phy_info[i].attached.handle); 3080 port_info->phy_info[i].attached.handle);
2225 port_info->phy_info[i].attached.phy_id = 3081 port_info->phy_info[i].attached.phy_id =
2226 port_info->phy_info[i].phy_id; 3082 port_info->phy_info[i].phy_id;
2227 } 3083 }
2228 } 3084 }
2229 3085
2230 parent = &ioc->sh->shost_gendev; 3086 mutex_lock(&ioc->sas_topology_mutex);
2231 for (i = 0; i < port_info->num_phys; i++) { 3087 parent = mptsas_find_portinfo_by_handle(ioc,
2232 mutex_lock(&ioc->sas_topology_mutex); 3088 port_info->phy_info[0].identify.handle_parent);
2233 list_for_each_entry(p, &ioc->sas_topology, list) { 3089 if (!parent) {
2234 for (j = 0; j < p->num_phys; j++) {
2235 if (port_info->phy_info[i].identify.handle !=
2236 p->phy_info[j].attached.handle)
2237 continue;
2238 rphy = mptsas_get_rphy(&p->phy_info[j]);
2239 parent = &rphy->dev;
2240 }
2241 }
2242 mutex_unlock(&ioc->sas_topology_mutex); 3090 mutex_unlock(&ioc->sas_topology_mutex);
3091 return;
2243 } 3092 }
3093 for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev;
3094 i++) {
3095 if (parent->phy_info[i].attached.sas_address == sas_address) {
3096 rphy = mptsas_get_rphy(&parent->phy_info[i]);
3097 parent_dev = &rphy->dev;
3098 }
3099 }
3100 mutex_unlock(&ioc->sas_topology_mutex);
2244 3101
2245 mptsas_setup_wide_ports(ioc, port_info); 3102 mptsas_setup_wide_ports(ioc, port_info);
2246
2247 for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++) 3103 for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
2248 mptsas_probe_one_phy(parent, &port_info->phy_info[i], 3104 mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i],
2249 ioc->sas_index, 0); 3105 ioc->sas_index, 0);
3106}
2250 3107
2251 return 0; 3108static void
3109mptsas_expander_event_add(MPT_ADAPTER *ioc,
3110 MpiEventDataSasExpanderStatusChange_t *expander_data)
3111{
3112 struct mptsas_portinfo *port_info;
3113 int i;
3114 __le64 sas_address;
2252 3115
2253 out_free_port_info: 3116 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
2254 if (ex) { 3117 if (!port_info)
2255 kfree(ex->phy_info); 3118 BUG();
2256 kfree(ex); 3119 port_info->num_phys = (expander_data->NumPhys) ?
3120 expander_data->NumPhys : 1;
3121 port_info->phy_info = kcalloc(port_info->num_phys,
3122 sizeof(struct mptsas_phyinfo), GFP_KERNEL);
3123 if (!port_info->phy_info)
3124 BUG();
3125 memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
3126 for (i = 0; i < port_info->num_phys; i++) {
3127 port_info->phy_info[i].portinfo = port_info;
3128 port_info->phy_info[i].handle =
3129 le16_to_cpu(expander_data->DevHandle);
3130 port_info->phy_info[i].identify.sas_address =
3131 le64_to_cpu(sas_address);
3132 port_info->phy_info[i].identify.handle_parent =
3133 le16_to_cpu(expander_data->ParentDevHandle);
3134 }
3135
3136 mutex_lock(&ioc->sas_topology_mutex);
3137 list_add_tail(&port_info->list, &ioc->sas_topology);
3138 mutex_unlock(&ioc->sas_topology_mutex);
3139
3140 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3141 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3142 (unsigned long long)sas_address);
3143
3144 mptsas_expander_refresh(ioc, port_info);
3145}
3146
3147/**
3148 * mptsas_delete_expander_siblings - remove siblings attached to expander
3149 * @ioc: Pointer to MPT_ADAPTER structure
3150 * @parent: the parent port_info object
3151 * @expander: the expander port_info object
3152 **/
3153static void
3154mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo
3155 *parent, struct mptsas_portinfo *expander)
3156{
3157 struct mptsas_phyinfo *phy_info;
3158 struct mptsas_portinfo *port_info;
3159 struct sas_rphy *rphy;
3160 int i;
3161
3162 phy_info = expander->phy_info;
3163 for (i = 0; i < expander->num_phys; i++, phy_info++) {
3164 rphy = mptsas_get_rphy(phy_info);
3165 if (!rphy)
3166 continue;
3167 if (rphy->identify.device_type == SAS_END_DEVICE)
3168 mptsas_del_end_device(ioc, phy_info);
3169 }
3170
3171 phy_info = expander->phy_info;
3172 for (i = 0; i < expander->num_phys; i++, phy_info++) {
3173 rphy = mptsas_get_rphy(phy_info);
3174 if (!rphy)
3175 continue;
3176 if (rphy->identify.device_type ==
3177 MPI_SAS_DEVICE_INFO_EDGE_EXPANDER ||
3178 rphy->identify.device_type ==
3179 MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
3180 port_info = mptsas_find_portinfo_by_sas_address(ioc,
3181 rphy->identify.sas_address);
3182 if (!port_info)
3183 continue;
3184 if (port_info == parent) /* backlink rphy */
3185 continue;
3186 /*
3187 Delete this expander even if the expdevpage is exists
3188 because the parent expander is already deleted
3189 */
3190 mptsas_expander_delete(ioc, port_info, 1);
3191 }
3192 }
3193}
3194
3195
3196/**
3197 * mptsas_expander_delete - remove this expander
3198 * @ioc: Pointer to MPT_ADAPTER structure
3199 * @port_info: expander port_info struct
3200 * @force: Flag to forcefully delete the expander
3201 *
3202 **/
3203
3204static void mptsas_expander_delete(MPT_ADAPTER *ioc,
3205 struct mptsas_portinfo *port_info, u8 force)
3206{
3207
3208 struct mptsas_portinfo *parent;
3209 int i;
3210 u64 expander_sas_address;
3211 struct mptsas_phyinfo *phy_info;
3212 struct mptsas_portinfo buffer;
3213 struct mptsas_portinfo_details *port_details;
3214 struct sas_port *port;
3215
3216 if (!port_info)
3217 return;
3218
3219 /* see if expander is still there before deleting */
3220 mptsas_sas_expander_pg0(ioc, &buffer,
3221 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
3222 MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
3223 port_info->phy_info[0].identify.handle);
3224
3225 if (buffer.num_phys) {
3226 kfree(buffer.phy_info);
3227 if (!force)
3228 return;
3229 }
3230
3231
3232 /*
3233 * Obtain the port_info instance to the parent port
3234 */
3235 port_details = NULL;
3236 expander_sas_address =
3237 port_info->phy_info[0].identify.sas_address;
3238 parent = mptsas_find_portinfo_by_handle(ioc,
3239 port_info->phy_info[0].identify.handle_parent);
3240 mptsas_delete_expander_siblings(ioc, parent, port_info);
3241 if (!parent)
3242 goto out;
3243
3244 /*
3245 * Delete rphys in the parent that point
3246 * to this expander.
3247 */
3248 phy_info = parent->phy_info;
3249 port = NULL;
3250 for (i = 0; i < parent->num_phys; i++, phy_info++) {
3251 if (!phy_info->phy)
3252 continue;
3253 if (phy_info->attached.sas_address !=
3254 expander_sas_address)
3255 continue;
3256 if (!port) {
3257 port = mptsas_get_port(phy_info);
3258 port_details = phy_info->port_details;
3259 }
3260 dev_printk(KERN_DEBUG, &phy_info->phy->dev,
3261 MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name,
3262 phy_info->phy_id, phy_info->phy);
3263 sas_port_delete_phy(port, phy_info->phy);
3264 }
3265 if (port) {
3266 dev_printk(KERN_DEBUG, &port->dev,
3267 MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n",
3268 ioc->name, port->port_identifier,
3269 (unsigned long long)expander_sas_address);
3270 sas_port_delete(port);
3271 mptsas_port_delete(ioc, port_details);
2257 } 3272 }
2258 out: 3273 out:
2259 return error; 3274
3275 printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, "
3276 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3277 (unsigned long long)expander_sas_address);
3278
3279 /*
3280 * free link
3281 */
3282 list_del(&port_info->list);
3283 kfree(port_info->phy_info);
3284 kfree(port_info);
2260} 3285}
2261 3286
2262/* 3287
2263 * mptsas_delete_expander_phys 3288/**
3289 * mptsas_send_expander_event - expanders events
3290 * @ioc: Pointer to MPT_ADAPTER structure
3291 * @expander_data: event data
2264 * 3292 *
2265 * 3293 *
2266 * This will traverse topology, and remove expanders 3294 * This function handles adding, removing, and refreshing
2267 * that are no longer present 3295 * device handles within the expander objects.
2268 */ 3296 */
2269static void 3297static void
2270mptsas_delete_expander_phys(MPT_ADAPTER *ioc) 3298mptsas_send_expander_event(struct fw_event_work *fw_event)
2271{ 3299{
2272 struct mptsas_portinfo buffer; 3300 MPT_ADAPTER *ioc;
2273 struct mptsas_portinfo *port_info, *n, *parent; 3301 MpiEventDataSasExpanderStatusChange_t *expander_data;
2274 struct mptsas_phyinfo *phy_info; 3302 struct mptsas_portinfo *port_info;
2275 struct sas_port * port; 3303 __le64 sas_address;
2276 int i; 3304 int i;
2277 u64 expander_sas_address;
2278 3305
3306 ioc = fw_event->ioc;
3307 expander_data = (MpiEventDataSasExpanderStatusChange_t *)
3308 fw_event->event_data;
3309 memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
3310 port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
3311
3312 if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
3313 if (port_info) {
3314 for (i = 0; i < port_info->num_phys; i++) {
3315 port_info->phy_info[i].portinfo = port_info;
3316 port_info->phy_info[i].handle =
3317 le16_to_cpu(expander_data->DevHandle);
3318 port_info->phy_info[i].identify.sas_address =
3319 le64_to_cpu(sas_address);
3320 port_info->phy_info[i].identify.handle_parent =
3321 le16_to_cpu(expander_data->ParentDevHandle);
3322 }
3323 mptsas_expander_refresh(ioc, port_info);
3324 } else if (!port_info && expander_data->NumPhys)
3325 mptsas_expander_event_add(ioc, expander_data);
3326 } else if (expander_data->ReasonCode ==
3327 MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING)
3328 mptsas_expander_delete(ioc, port_info, 0);
3329
3330 mptsas_free_fw_event(ioc, fw_event);
3331}
3332
3333
3334/**
3335 * mptsas_expander_add -
3336 * @ioc: Pointer to MPT_ADAPTER structure
3337 * @handle:
3338 *
3339 */
3340struct mptsas_portinfo *
3341mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
3342{
3343 struct mptsas_portinfo buffer, *port_info;
3344 int i;
3345
3346 if ((mptsas_sas_expander_pg0(ioc, &buffer,
3347 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
3348 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
3349 return NULL;
3350
3351 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
3352 if (!port_info) {
3353 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
3354 "%s: exit at line=%d\n", ioc->name,
3355 __func__, __LINE__));
3356 return NULL;
3357 }
3358 port_info->num_phys = buffer.num_phys;
3359 port_info->phy_info = buffer.phy_info;
3360 for (i = 0; i < port_info->num_phys; i++)
3361 port_info->phy_info[i].portinfo = port_info;
2279 mutex_lock(&ioc->sas_topology_mutex); 3362 mutex_lock(&ioc->sas_topology_mutex);
2280 list_for_each_entry_safe(port_info, n, &ioc->sas_topology, list) { 3363 list_add_tail(&port_info->list, &ioc->sas_topology);
3364 mutex_unlock(&ioc->sas_topology_mutex);
3365 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3366 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3367 (unsigned long long)buffer.phy_info[0].identify.sas_address);
3368 mptsas_expander_refresh(ioc, port_info);
3369 return port_info;
3370}
2281 3371
2282 if (!(port_info->phy_info[0].identify.device_info & 3372static void
2283 MPI_SAS_DEVICE_INFO_SMP_TARGET)) 3373mptsas_send_link_status_event(struct fw_event_work *fw_event)
3374{
3375 MPT_ADAPTER *ioc;
3376 MpiEventDataSasPhyLinkStatus_t *link_data;
3377 struct mptsas_portinfo *port_info;
3378 struct mptsas_phyinfo *phy_info = NULL;
3379 __le64 sas_address;
3380 u8 phy_num;
3381 u8 link_rate;
3382
3383 ioc = fw_event->ioc;
3384 link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data;
3385
3386 memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64));
3387 sas_address = le64_to_cpu(sas_address);
3388 link_rate = link_data->LinkRates >> 4;
3389 phy_num = link_data->PhyNum;
3390
3391 port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
3392 if (port_info) {
3393 phy_info = &port_info->phy_info[phy_num];
3394 if (phy_info)
3395 phy_info->negotiated_link_rate = link_rate;
3396 }
3397
3398 if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
3399 link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
3400
3401 if (!port_info) {
3402 if (ioc->old_sas_discovery_protocal) {
3403 port_info = mptsas_expander_add(ioc,
3404 le16_to_cpu(link_data->DevHandle));
3405 if (port_info)
3406 goto out;
3407 }
3408 goto out;
3409 }
3410
3411 if (port_info == ioc->hba_port_info)
3412 mptsas_probe_hba_phys(ioc);
3413 else
3414 mptsas_expander_refresh(ioc, port_info);
3415 } else if (phy_info && phy_info->phy) {
3416 if (link_rate == MPI_SAS_IOUNIT0_RATE_PHY_DISABLED)
3417 phy_info->phy->negotiated_linkrate =
3418 SAS_PHY_DISABLED;
3419 else if (link_rate ==
3420 MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
3421 phy_info->phy->negotiated_linkrate =
3422 SAS_LINK_RATE_FAILED;
3423 else
3424 phy_info->phy->negotiated_linkrate =
3425 SAS_LINK_RATE_UNKNOWN;
3426 }
3427 out:
3428 mptsas_free_fw_event(ioc, fw_event);
3429}
3430
3431static void
3432mptsas_not_responding_devices(MPT_ADAPTER *ioc)
3433{
3434 struct mptsas_portinfo buffer, *port_info;
3435 struct mptsas_device_info *sas_info;
3436 struct mptsas_devinfo sas_device;
3437 u32 handle;
3438 VirtTarget *vtarget = NULL;
3439 struct mptsas_phyinfo *phy_info;
3440 u8 found_expander;
3441 int retval, retry_count;
3442 unsigned long flags;
3443
3444 mpt_findImVolumes(ioc);
3445
3446 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3447 if (ioc->ioc_reset_in_progress) {
3448 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3449 "%s: exiting due to a parallel reset \n", ioc->name,
3450 __func__));
3451 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3452 return;
3453 }
3454 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3455
3456 /* devices, logical volumes */
3457 mutex_lock(&ioc->sas_device_info_mutex);
3458 redo_device_scan:
3459 list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) {
3460 if (sas_info->is_cached)
2284 continue; 3461 continue;
3462 if (!sas_info->is_logical_volume) {
3463 sas_device.handle = 0;
3464 retry_count = 0;
3465retry_page:
3466 retval = mptsas_sas_device_pg0(ioc, &sas_device,
3467 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID
3468 << MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
3469 (sas_info->fw.channel << 8) +
3470 sas_info->fw.id);
3471
3472 if (sas_device.handle)
3473 continue;
3474 if (retval == -EBUSY) {
3475 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3476 if (ioc->ioc_reset_in_progress) {
3477 dfailprintk(ioc,
3478 printk(MYIOC_s_DEBUG_FMT
3479 "%s: exiting due to reset\n",
3480 ioc->name, __func__));
3481 spin_unlock_irqrestore
3482 (&ioc->taskmgmt_lock, flags);
3483 mutex_unlock(&ioc->
3484 sas_device_info_mutex);
3485 return;
3486 }
3487 spin_unlock_irqrestore(&ioc->taskmgmt_lock,
3488 flags);
3489 }
2285 3490
2286 if (mptsas_sas_expander_pg0(ioc, &buffer, 3491 if (retval && (retval != -ENODEV)) {
2287 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE << 3492 if (retry_count < 10) {
2288 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), 3493 retry_count++;
2289 port_info->phy_info[0].handle)) { 3494 goto retry_page;
3495 } else {
3496 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3497 "%s: Config page retry exceeded retry "
3498 "count deleting device 0x%llx\n",
3499 ioc->name, __func__,
3500 sas_info->sas_address));
3501 }
3502 }
2290 3503
2291 /* 3504 /* delete device */
2292 * Obtain the port_info instance to the parent port 3505 vtarget = mptsas_find_vtarget(ioc,
2293 */ 3506 sas_info->fw.channel, sas_info->fw.id);
2294 parent = mptsas_find_portinfo_by_handle(ioc,
2295 port_info->phy_info[0].identify.handle_parent);
2296 3507
2297 if (!parent) 3508 if (vtarget)
2298 goto next_port; 3509 vtarget->deleted = 1;
2299 3510
2300 expander_sas_address = 3511 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2301 port_info->phy_info[0].identify.sas_address; 3512 sas_info->sas_address);
2302 3513
2303 /* 3514 if (phy_info) {
2304 * Delete rphys in the parent that point 3515 mptsas_del_end_device(ioc, phy_info);
2305 * to this expander. The transport layer will 3516 goto redo_device_scan;
2306 * cleanup all the children.
2307 */
2308 phy_info = parent->phy_info;
2309 for (i = 0; i < parent->num_phys; i++, phy_info++) {
2310 port = mptsas_get_port(phy_info);
2311 if (!port)
2312 continue;
2313 if (phy_info->attached.sas_address !=
2314 expander_sas_address)
2315 continue;
2316 dsaswideprintk(ioc,
2317 dev_printk(KERN_DEBUG, &port->dev,
2318 MYIOC_s_FMT "delete port (%d)\n", ioc->name,
2319 port->port_identifier));
2320 sas_port_delete(port);
2321 mptsas_port_delete(ioc, phy_info->port_details);
2322 } 3517 }
2323 next_port: 3518 } else
3519 mptsas_volume_delete(ioc, sas_info->fw.id);
3520 }
3521 mutex_lock(&ioc->sas_device_info_mutex);
2324 3522
2325 phy_info = port_info->phy_info; 3523 /* expanders */
2326 for (i = 0; i < port_info->num_phys; i++, phy_info++) 3524 mutex_lock(&ioc->sas_topology_mutex);
2327 mptsas_port_delete(ioc, phy_info->port_details); 3525 redo_expander_scan:
3526 list_for_each_entry(port_info, &ioc->sas_topology, list) {
2328 3527
2329 list_del(&port_info->list); 3528 if (port_info->phy_info &&
2330 kfree(port_info->phy_info); 3529 (!(port_info->phy_info[0].identify.device_info &
2331 kfree(port_info); 3530 MPI_SAS_DEVICE_INFO_SMP_TARGET)))
3531 continue;
3532 found_expander = 0;
3533 handle = 0xFFFF;
3534 while (!mptsas_sas_expander_pg0(ioc, &buffer,
3535 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
3536 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) &&
3537 !found_expander) {
3538
3539 handle = buffer.phy_info[0].handle;
3540 if (buffer.phy_info[0].identify.sas_address ==
3541 port_info->phy_info[0].identify.sas_address) {
3542 found_expander = 1;
3543 }
3544 kfree(buffer.phy_info);
3545 }
3546
3547 if (!found_expander) {
3548 mptsas_expander_delete(ioc, port_info, 0);
3549 goto redo_expander_scan;
2332 } 3550 }
2333 /*
2334 * Free this memory allocated from inside
2335 * mptsas_sas_expander_pg0
2336 */
2337 kfree(buffer.phy_info);
2338 } 3551 }
2339 mutex_unlock(&ioc->sas_topology_mutex); 3552 mutex_lock(&ioc->sas_topology_mutex);
3553}
3554
3555/**
3556 * mptsas_probe_expanders - adding expanders
3557 * @ioc: Pointer to MPT_ADAPTER structure
3558 *
3559 **/
3560static void
3561mptsas_probe_expanders(MPT_ADAPTER *ioc)
3562{
3563 struct mptsas_portinfo buffer, *port_info;
3564 u32 handle;
3565 int i;
3566
3567 handle = 0xFFFF;
3568 while (!mptsas_sas_expander_pg0(ioc, &buffer,
3569 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
3570 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) {
3571
3572 handle = buffer.phy_info[0].handle;
3573 port_info = mptsas_find_portinfo_by_sas_address(ioc,
3574 buffer.phy_info[0].identify.sas_address);
3575
3576 if (port_info) {
3577 /* refreshing handles */
3578 for (i = 0; i < buffer.num_phys; i++) {
3579 port_info->phy_info[i].handle = handle;
3580 port_info->phy_info[i].identify.handle_parent =
3581 buffer.phy_info[0].identify.handle_parent;
3582 }
3583 mptsas_expander_refresh(ioc, port_info);
3584 kfree(buffer.phy_info);
3585 continue;
3586 }
3587
3588 port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
3589 if (!port_info) {
3590 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
3591 "%s: exit at line=%d\n", ioc->name,
3592 __func__, __LINE__));
3593 return;
3594 }
3595 port_info->num_phys = buffer.num_phys;
3596 port_info->phy_info = buffer.phy_info;
3597 for (i = 0; i < port_info->num_phys; i++)
3598 port_info->phy_info[i].portinfo = port_info;
3599 mutex_lock(&ioc->sas_topology_mutex);
3600 list_add_tail(&port_info->list, &ioc->sas_topology);
3601 mutex_unlock(&ioc->sas_topology_mutex);
3602 printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
3603 "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
3604 (unsigned long long)buffer.phy_info[0].identify.sas_address);
3605 mptsas_expander_refresh(ioc, port_info);
3606 }
2340} 3607}
2341 3608
2342/* 3609static void
2343 * Start of day discovery 3610mptsas_probe_devices(MPT_ADAPTER *ioc)
2344 */ 3611{
3612 u16 handle;
3613 struct mptsas_devinfo sas_device;
3614 struct mptsas_phyinfo *phy_info;
3615
3616 handle = 0xFFFF;
3617 while (!(mptsas_sas_device_pg0(ioc, &sas_device,
3618 MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
3619
3620 handle = sas_device.handle;
3621
3622 if ((sas_device.device_info &
3623 (MPI_SAS_DEVICE_INFO_SSP_TARGET |
3624 MPI_SAS_DEVICE_INFO_STP_TARGET |
3625 MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
3626 continue;
3627
3628 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
3629 if (!phy_info)
3630 continue;
3631
3632 if (mptsas_get_rphy(phy_info))
3633 continue;
3634
3635 mptsas_add_end_device(ioc, phy_info);
3636 }
3637}
3638
3639/**
3640 * mptsas_scan_sas_topology -
3641 * @ioc: Pointer to MPT_ADAPTER structure
3642 * @sas_address:
3643 *
3644 **/
2345static void 3645static void
2346mptsas_scan_sas_topology(MPT_ADAPTER *ioc) 3646mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
2347{ 3647{
2348 u32 handle = 0xFFFF; 3648 struct scsi_device *sdev;
2349 int i; 3649 int i;
2350 3650
2351 mutex_lock(&ioc->sas_discovery_mutex);
2352 mptsas_probe_hba_phys(ioc); 3651 mptsas_probe_hba_phys(ioc);
2353 while (!mptsas_probe_expander_phys(ioc, &handle)) 3652 mptsas_probe_expanders(ioc);
2354 ; 3653 mptsas_probe_devices(ioc);
3654
2355 /* 3655 /*
2356 Reporting RAID volumes. 3656 Reporting RAID volumes.
2357 */ 3657 */
2358 if (!ioc->ir_firmware) 3658 if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 ||
2359 goto out; 3659 !ioc->raid_data.pIocPg2->NumActiveVolumes)
2360 if (!ioc->raid_data.pIocPg2) 3660 return;
2361 goto out;
2362 if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
2363 goto out;
2364 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { 3661 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
3662 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
3663 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
3664 if (sdev) {
3665 scsi_device_put(sdev);
3666 continue;
3667 }
3668 printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
3669 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
3670 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID);
2365 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, 3671 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
2366 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); 3672 ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
2367 } 3673 }
2368 out:
2369 mutex_unlock(&ioc->sas_discovery_mutex);
2370} 3674}
2371 3675
2372/* 3676
2373 * Work queue thread to handle Runtime discovery
2374 * Mere purpose is the hot add/delete of expanders
2375 *(Mutex UNLOCKED)
2376 */
2377static void 3677static void
2378__mptsas_discovery_work(MPT_ADAPTER *ioc) 3678mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
2379{ 3679{
2380 u32 handle = 0xFFFF; 3680 MPT_ADAPTER *ioc;
3681 EventDataQueueFull_t *qfull_data;
3682 struct mptsas_device_info *sas_info;
3683 struct scsi_device *sdev;
3684 int depth;
3685 int id = -1;
3686 int channel = -1;
3687 int fw_id, fw_channel;
3688 u16 current_depth;
3689
3690
3691 ioc = fw_event->ioc;
3692 qfull_data = (EventDataQueueFull_t *)fw_event->event_data;
3693 fw_id = qfull_data->TargetID;
3694 fw_channel = qfull_data->Bus;
3695 current_depth = le16_to_cpu(qfull_data->CurrentDepth);
3696
3697 /* if hidden raid component, look for the volume id */
3698 mutex_lock(&ioc->sas_device_info_mutex);
3699 if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) {
3700 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
3701 list) {
3702 if (sas_info->is_cached ||
3703 sas_info->is_logical_volume)
3704 continue;
3705 if (sas_info->is_hidden_raid_component &&
3706 (sas_info->fw.channel == fw_channel &&
3707 sas_info->fw.id == fw_id)) {
3708 id = sas_info->volume_id;
3709 channel = MPTSAS_RAID_CHANNEL;
3710 goto out;
3711 }
3712 }
3713 } else {
3714 list_for_each_entry(sas_info, &ioc->sas_device_info_list,
3715 list) {
3716 if (sas_info->is_cached ||
3717 sas_info->is_hidden_raid_component ||
3718 sas_info->is_logical_volume)
3719 continue;
3720 if (sas_info->fw.channel == fw_channel &&
3721 sas_info->fw.id == fw_id) {
3722 id = sas_info->os.id;
3723 channel = sas_info->os.channel;
3724 goto out;
3725 }
3726 }
2381 3727
2382 ioc->sas_discovery_runtime=1; 3728 }
2383 mptsas_delete_expander_phys(ioc);
2384 mptsas_probe_hba_phys(ioc);
2385 while (!mptsas_probe_expander_phys(ioc, &handle))
2386 ;
2387 ioc->sas_discovery_runtime=0;
2388}
2389 3729
2390/* 3730 out:
2391 * Work queue thread to handle Runtime discovery 3731 mutex_unlock(&ioc->sas_device_info_mutex);
2392 * Mere purpose is the hot add/delete of expanders 3732
2393 *(Mutex LOCKED) 3733 if (id != -1) {
2394 */ 3734 shost_for_each_device(sdev, ioc->sh) {
2395static void 3735 if (sdev->id == id && sdev->channel == channel) {
2396mptsas_discovery_work(struct work_struct *work) 3736 if (current_depth > sdev->queue_depth) {
2397{ 3737 sdev_printk(KERN_INFO, sdev,
2398 struct mptsas_discovery_event *ev = 3738 "strange observation, the queue "
2399 container_of(work, struct mptsas_discovery_event, work); 3739 "depth is (%d) meanwhile fw queue "
2400 MPT_ADAPTER *ioc = ev->ioc; 3740 "depth (%d)\n", sdev->queue_depth,
3741 current_depth);
3742 continue;
3743 }
3744 depth = scsi_track_queue_full(sdev,
3745 current_depth - 1);
3746 if (depth > 0)
3747 sdev_printk(KERN_INFO, sdev,
3748 "Queue depth reduced to (%d)\n",
3749 depth);
3750 else if (depth < 0)
3751 sdev_printk(KERN_INFO, sdev,
3752 "Tagged Command Queueing is being "
3753 "disabled\n");
3754 else if (depth == 0)
3755 sdev_printk(KERN_INFO, sdev,
3756 "Queue depth not changed yet\n");
3757 }
3758 }
3759 }
2401 3760
2402 mutex_lock(&ioc->sas_discovery_mutex); 3761 mptsas_free_fw_event(ioc, fw_event);
2403 __mptsas_discovery_work(ioc);
2404 mutex_unlock(&ioc->sas_discovery_mutex);
2405 kfree(ev);
2406} 3762}
2407 3763
3764
2408static struct mptsas_phyinfo * 3765static struct mptsas_phyinfo *
2409mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) 3766mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
2410{ 3767{
@@ -2429,69 +3786,80 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
2429 return phy_info; 3786 return phy_info;
2430} 3787}
2431 3788
3789/**
3790 * mptsas_find_phyinfo_by_phys_disk_num -
3791 * @ioc: Pointer to MPT_ADAPTER structure
3792 * @phys_disk_num:
3793 * @channel:
3794 * @id:
3795 *
3796 **/
2432static struct mptsas_phyinfo * 3797static struct mptsas_phyinfo *
2433mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u8 channel, u8 id) 3798mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
3799 u8 channel, u8 id)
2434{ 3800{
2435 struct mptsas_portinfo *port_info;
2436 struct mptsas_phyinfo *phy_info = NULL; 3801 struct mptsas_phyinfo *phy_info = NULL;
3802 struct mptsas_portinfo *port_info;
3803 RaidPhysDiskPage1_t *phys_disk = NULL;
3804 int num_paths;
3805 u64 sas_address = 0;
2437 int i; 3806 int i;
2438 3807
2439 mutex_lock(&ioc->sas_topology_mutex); 3808 phy_info = NULL;
2440 list_for_each_entry(port_info, &ioc->sas_topology, list) { 3809 if (!ioc->raid_data.pIocPg3)
2441 for (i = 0; i < port_info->num_phys; i++) { 3810 return NULL;
2442 if (!mptsas_is_end_device( 3811 /* dual port support */
2443 &port_info->phy_info[i].attached)) 3812 num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num);
2444 continue; 3813 if (!num_paths)
2445 if (port_info->phy_info[i].attached.id != id) 3814 goto out;
2446 continue; 3815 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2447 if (port_info->phy_info[i].attached.channel != channel) 3816 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2448 continue; 3817 if (!phys_disk)
2449 phy_info = &port_info->phy_info[i]; 3818 goto out;
2450 break; 3819 mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk);
3820 for (i = 0; i < num_paths; i++) {
3821 if ((phys_disk->Path[i].Flags & 1) != 0)
3822 /* entry no longer valid */
3823 continue;
3824 if ((id == phys_disk->Path[i].PhysDiskID) &&
3825 (channel == phys_disk->Path[i].PhysDiskBus)) {
3826 memcpy(&sas_address, &phys_disk->Path[i].WWID,
3827 sizeof(u64));
3828 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
3829 sas_address);
3830 goto out;
2451 } 3831 }
2452 } 3832 }
2453 mutex_unlock(&ioc->sas_topology_mutex);
2454 return phy_info;
2455}
2456 3833
2457static struct mptsas_phyinfo * 3834 out:
2458mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 channel, u8 id) 3835 kfree(phys_disk);
2459{ 3836 if (phy_info)
2460 struct mptsas_portinfo *port_info; 3837 return phy_info;
2461 struct mptsas_phyinfo *phy_info = NULL;
2462 int i;
2463 3838
3839 /*
3840 * Extra code to handle RAID0 case, where the sas_address is not updated
3841 * in phys_disk_page_1 when hotswapped
3842 */
2464 mutex_lock(&ioc->sas_topology_mutex); 3843 mutex_lock(&ioc->sas_topology_mutex);
2465 list_for_each_entry(port_info, &ioc->sas_topology, list) { 3844 list_for_each_entry(port_info, &ioc->sas_topology, list) {
2466 for (i = 0; i < port_info->num_phys; i++) { 3845 for (i = 0; i < port_info->num_phys && !phy_info; i++) {
2467 if (!mptsas_is_end_device( 3846 if (!mptsas_is_end_device(
2468 &port_info->phy_info[i].attached)) 3847 &port_info->phy_info[i].attached))
2469 continue; 3848 continue;
2470 if (port_info->phy_info[i].attached.phys_disk_num == ~0) 3849 if (port_info->phy_info[i].attached.phys_disk_num == ~0)
2471 continue; 3850 continue;
2472 if (port_info->phy_info[i].attached.phys_disk_num != id) 3851 if ((port_info->phy_info[i].attached.phys_disk_num ==
2473 continue; 3852 phys_disk_num) &&
2474 if (port_info->phy_info[i].attached.channel != channel) 3853 (port_info->phy_info[i].attached.id == id) &&
2475 continue; 3854 (port_info->phy_info[i].attached.channel ==
2476 phy_info = &port_info->phy_info[i]; 3855 channel))
2477 break; 3856 phy_info = &port_info->phy_info[i];
2478 } 3857 }
2479 } 3858 }
2480 mutex_unlock(&ioc->sas_topology_mutex); 3859 mutex_unlock(&ioc->sas_topology_mutex);
2481 return phy_info; 3860 return phy_info;
2482} 3861}
2483 3862
2484/*
2485 * Work queue thread to clear the persitency table
2486 */
2487static void
2488mptsas_persist_clear_table(struct work_struct *work)
2489{
2490 MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
2491
2492 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
2493}
2494
2495static void 3863static void
2496mptsas_reprobe_lun(struct scsi_device *sdev, void *data) 3864mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
2497{ 3865{
@@ -2517,7 +3885,8 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2517 pRaidVolumePage0_t buffer = NULL; 3885 pRaidVolumePage0_t buffer = NULL;
2518 RaidPhysDiskPage0_t phys_disk; 3886 RaidPhysDiskPage0_t phys_disk;
2519 int i; 3887 int i;
2520 struct mptsas_hotplug_event *ev; 3888 struct mptsas_phyinfo *phy_info;
3889 struct mptsas_devinfo sas_device;
2521 3890
2522 memset(&cfg, 0 , sizeof(CONFIGPARMS)); 3891 memset(&cfg, 0 , sizeof(CONFIGPARMS));
2523 memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); 3892 memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
@@ -2557,20 +3926,16 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2557 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) 3926 buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
2558 continue; 3927 continue;
2559 3928
2560 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 3929 if (mptsas_sas_device_pg0(ioc, &sas_device,
2561 if (!ev) { 3930 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
2562 printk(MYIOC_s_WARN_FMT "mptsas: lost hotplug event\n", ioc->name); 3931 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2563 goto out; 3932 (phys_disk.PhysDiskBus << 8) +
2564 } 3933 phys_disk.PhysDiskID))
3934 continue;
2565 3935
2566 INIT_WORK(&ev->work, mptsas_hotplug_work); 3936 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2567 ev->ioc = ioc; 3937 sas_device.sas_address);
2568 ev->id = phys_disk.PhysDiskID; 3938 mptsas_add_end_device(ioc, phy_info);
2569 ev->channel = phys_disk.PhysDiskBus;
2570 ev->phys_disk_num_valid = 1;
2571 ev->phys_disk_num = phys_disk.PhysDiskNum;
2572 ev->event_type = MPTSAS_ADD_DEVICE;
2573 schedule_work(&ev->work);
2574 } 3939 }
2575 3940
2576 out: 3941 out:
@@ -2582,417 +3947,386 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
2582 * Work queue thread to handle SAS hotplug events 3947 * Work queue thread to handle SAS hotplug events
2583 */ 3948 */
2584static void 3949static void
2585mptsas_hotplug_work(struct work_struct *work) 3950mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
3951 struct mptsas_hotplug_event *hot_plug_info)
2586{ 3952{
2587 struct mptsas_hotplug_event *ev =
2588 container_of(work, struct mptsas_hotplug_event, work);
2589
2590 MPT_ADAPTER *ioc = ev->ioc;
2591 struct mptsas_phyinfo *phy_info; 3953 struct mptsas_phyinfo *phy_info;
2592 struct sas_rphy *rphy;
2593 struct sas_port *port;
2594 struct scsi_device *sdev;
2595 struct scsi_target * starget; 3954 struct scsi_target * starget;
2596 struct sas_identify identify;
2597 char *ds = NULL;
2598 struct mptsas_devinfo sas_device; 3955 struct mptsas_devinfo sas_device;
2599 VirtTarget *vtarget; 3956 VirtTarget *vtarget;
2600 VirtDevice *vdevice; 3957 int i;
2601 3958
2602 mutex_lock(&ioc->sas_discovery_mutex); 3959 switch (hot_plug_info->event_type) {
2603 switch (ev->event_type) {
2604 case MPTSAS_DEL_DEVICE:
2605 3960
2606 phy_info = NULL; 3961 case MPTSAS_ADD_PHYSDISK:
2607 if (ev->phys_disk_num_valid) { 3962
2608 if (ev->hidden_raid_component){ 3963 if (!ioc->raid_data.pIocPg2)
2609 if (mptsas_sas_device_pg0(ioc, &sas_device, 3964 break;
2610 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << 3965
2611 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 3966 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
2612 (ev->channel << 8) + ev->id)) { 3967 if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
2613 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 3968 hot_plug_info->id) {
2614 "%s: exit at line=%d\n", ioc->name, 3969 printk(MYIOC_s_WARN_FMT "firmware bug: unable "
2615 __func__, __LINE__)); 3970 "to add hidden disk - target_id matchs "
2616 break; 3971 "volume_id\n", ioc->name);
2617 } 3972 mptsas_free_fw_event(ioc, fw_event);
2618 phy_info = mptsas_find_phyinfo_by_sas_address( 3973 return;
2619 ioc, sas_device.sas_address); 3974 }
2620 }else
2621 phy_info = mptsas_find_phyinfo_by_phys_disk_num(
2622 ioc, ev->channel, ev->phys_disk_num);
2623 } 3975 }
3976 mpt_findImVolumes(ioc);
2624 3977
3978 case MPTSAS_ADD_DEVICE:
3979 memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
3980 mptsas_sas_device_pg0(ioc, &sas_device,
3981 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
3982 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
3983 (hot_plug_info->channel << 8) +
3984 hot_plug_info->id);
3985
3986 if (!sas_device.handle)
3987 return;
3988
3989 phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
2625 if (!phy_info) 3990 if (!phy_info)
2626 phy_info = mptsas_find_phyinfo_by_target(ioc, 3991 break;
2627 ev->channel, ev->id);
2628 3992
2629 /* 3993 if (mptsas_get_rphy(phy_info))
2630 * Sanity checks, for non-existing phys and remote rphys. 3994 break;
2631 */ 3995
2632 if (!phy_info){ 3996 mptsas_add_end_device(ioc, phy_info);
3997 break;
3998
3999 case MPTSAS_DEL_DEVICE:
4000 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
4001 hot_plug_info->sas_address);
4002 mptsas_del_end_device(ioc, phy_info);
4003 break;
4004
4005 case MPTSAS_DEL_PHYSDISK:
4006
4007 mpt_findImVolumes(ioc);
4008
4009 phy_info = mptsas_find_phyinfo_by_phys_disk_num(
4010 ioc, hot_plug_info->phys_disk_num,
4011 hot_plug_info->channel,
4012 hot_plug_info->id);
4013 mptsas_del_end_device(ioc, phy_info);
4014 break;
4015
4016 case MPTSAS_ADD_PHYSDISK_REPROBE:
4017
4018 if (mptsas_sas_device_pg0(ioc, &sas_device,
4019 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
4020 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
4021 (hot_plug_info->channel << 8) + hot_plug_info->id)) {
2633 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4022 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2634 "%s: exit at line=%d\n", ioc->name, 4023 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2635 __func__, __LINE__)); 4024 __func__, hot_plug_info->id, __LINE__));
2636 break; 4025 break;
2637 } 4026 }
2638 if (!phy_info->port_details) { 4027
4028 phy_info = mptsas_find_phyinfo_by_sas_address(
4029 ioc, sas_device.sas_address);
4030
4031 if (!phy_info) {
2639 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4032 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2640 "%s: exit at line=%d\n", ioc->name, 4033 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2641 __func__, __LINE__)); 4034 __func__, hot_plug_info->id, __LINE__));
2642 break; 4035 break;
2643 } 4036 }
2644 rphy = mptsas_get_rphy(phy_info); 4037
2645 if (!rphy) { 4038 starget = mptsas_get_starget(phy_info);
4039 if (!starget) {
2646 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4040 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2647 "%s: exit at line=%d\n", ioc->name, 4041 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2648 __func__, __LINE__)); 4042 __func__, hot_plug_info->id, __LINE__));
2649 break; 4043 break;
2650 } 4044 }
2651 4045
2652 port = mptsas_get_port(phy_info); 4046 vtarget = starget->hostdata;
2653 if (!port) { 4047 if (!vtarget) {
2654 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4048 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2655 "%s: exit at line=%d\n", ioc->name, 4049 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2656 __func__, __LINE__)); 4050 __func__, hot_plug_info->id, __LINE__));
2657 break; 4051 break;
2658 } 4052 }
2659 4053
2660 starget = mptsas_get_starget(phy_info); 4054 mpt_findImVolumes(ioc);
2661 if (starget) {
2662 vtarget = starget->hostdata;
2663 4055
2664 if (!vtarget) { 4056 starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: "
2665 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4057 "fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
2666 "%s: exit at line=%d\n", ioc->name, 4058 ioc->name, hot_plug_info->channel, hot_plug_info->id,
2667 __func__, __LINE__)); 4059 hot_plug_info->phys_disk_num, (unsigned long long)
2668 break; 4060 sas_device.sas_address);
2669 }
2670 4061
2671 /* 4062 vtarget->id = hot_plug_info->phys_disk_num;
2672 * Handling RAID components 4063 vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
2673 */ 4064 phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num;
2674 if (ev->phys_disk_num_valid && 4065 mptsas_reprobe_target(starget, 1);
2675 ev->hidden_raid_component) {
2676 printk(MYIOC_s_INFO_FMT
2677 "RAID Hidding: channel=%d, id=%d, "
2678 "physdsk %d \n", ioc->name, ev->channel,
2679 ev->id, ev->phys_disk_num);
2680 vtarget->id = ev->phys_disk_num;
2681 vtarget->tflags |=
2682 MPT_TARGET_FLAGS_RAID_COMPONENT;
2683 mptsas_reprobe_target(starget, 1);
2684 phy_info->attached.phys_disk_num =
2685 ev->phys_disk_num;
2686 break;
2687 }
2688 }
2689
2690 if (phy_info->attached.device_info &
2691 MPI_SAS_DEVICE_INFO_SSP_TARGET)
2692 ds = "ssp";
2693 if (phy_info->attached.device_info &
2694 MPI_SAS_DEVICE_INFO_STP_TARGET)
2695 ds = "stp";
2696 if (phy_info->attached.device_info &
2697 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
2698 ds = "sata";
2699
2700 printk(MYIOC_s_INFO_FMT
2701 "removing %s device, channel %d, id %d, phy %d\n",
2702 ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
2703 dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
2704 "delete port (%d)\n", ioc->name, port->port_identifier);
2705 sas_port_delete(port);
2706 mptsas_port_delete(ioc, phy_info->port_details);
2707 break; 4066 break;
2708 case MPTSAS_ADD_DEVICE:
2709 4067
2710 if (ev->phys_disk_num_valid) 4068 case MPTSAS_DEL_PHYSDISK_REPROBE:
2711 mpt_findImVolumes(ioc);
2712 4069
2713 /*
2714 * Refresh sas device pg0 data
2715 */
2716 if (mptsas_sas_device_pg0(ioc, &sas_device, 4070 if (mptsas_sas_device_pg0(ioc, &sas_device,
2717 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << 4071 (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
2718 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 4072 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
2719 (ev->channel << 8) + ev->id)) { 4073 (hot_plug_info->channel << 8) + hot_plug_info->id)) {
2720 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4074 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2721 "%s: exit at line=%d\n", ioc->name, 4075 "%s: fw_id=%d exit at line=%d\n",
2722 __func__, __LINE__)); 4076 ioc->name, __func__,
4077 hot_plug_info->id, __LINE__));
2723 break; 4078 break;
2724 } 4079 }
2725 4080
2726 __mptsas_discovery_work(ioc);
2727
2728 phy_info = mptsas_find_phyinfo_by_sas_address(ioc, 4081 phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
2729 sas_device.sas_address); 4082 sas_device.sas_address);
2730 4083 if (!phy_info) {
2731 if (!phy_info || !phy_info->port_details) {
2732 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4084 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2733 "%s: exit at line=%d\n", ioc->name, 4085 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2734 __func__, __LINE__)); 4086 __func__, hot_plug_info->id, __LINE__));
2735 break; 4087 break;
2736 } 4088 }
2737 4089
2738 starget = mptsas_get_starget(phy_info); 4090 starget = mptsas_get_starget(phy_info);
2739 if (starget && (!ev->hidden_raid_component)){ 4091 if (!starget) {
2740 4092 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2741 vtarget = starget->hostdata; 4093 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2742 4094 __func__, hot_plug_info->id, __LINE__));
2743 if (!vtarget) {
2744 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2745 "%s: exit at line=%d\n", ioc->name,
2746 __func__, __LINE__));
2747 break;
2748 }
2749 /*
2750 * Handling RAID components
2751 */
2752 if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2753 printk(MYIOC_s_INFO_FMT
2754 "RAID Exposing: channel=%d, id=%d, "
2755 "physdsk %d \n", ioc->name, ev->channel,
2756 ev->id, ev->phys_disk_num);
2757 vtarget->tflags &=
2758 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
2759 vtarget->id = ev->id;
2760 mptsas_reprobe_target(starget, 0);
2761 phy_info->attached.phys_disk_num = ~0;
2762 }
2763 break; 4095 break;
2764 } 4096 }
2765 4097
2766 if (mptsas_get_rphy(phy_info)) { 4098 vtarget = starget->hostdata;
4099 if (!vtarget) {
2767 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4100 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2768 "%s: exit at line=%d\n", ioc->name, 4101 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2769 __func__, __LINE__)); 4102 __func__, hot_plug_info->id, __LINE__));
2770 if (ev->channel) printk("%d\n", __LINE__);
2771 break; 4103 break;
2772 } 4104 }
2773 4105
2774 port = mptsas_get_port(phy_info); 4106 if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) {
2775 if (!port) {
2776 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4107 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2777 "%s: exit at line=%d\n", ioc->name, 4108 "%s: fw_id=%d exit at line=%d\n", ioc->name,
2778 __func__, __LINE__)); 4109 __func__, hot_plug_info->id, __LINE__));
2779 break; 4110 break;
2780 } 4111 }
2781 memcpy(&phy_info->attached, &sas_device,
2782 sizeof(struct mptsas_devinfo));
2783
2784 if (phy_info->attached.device_info &
2785 MPI_SAS_DEVICE_INFO_SSP_TARGET)
2786 ds = "ssp";
2787 if (phy_info->attached.device_info &
2788 MPI_SAS_DEVICE_INFO_STP_TARGET)
2789 ds = "stp";
2790 if (phy_info->attached.device_info &
2791 MPI_SAS_DEVICE_INFO_SATA_DEVICE)
2792 ds = "sata";
2793
2794 printk(MYIOC_s_INFO_FMT
2795 "attaching %s device, channel %d, id %d, phy %d\n",
2796 ioc->name, ds, ev->channel, ev->id, ev->phy_id);
2797 4112
2798 mptsas_parse_device_info(&identify, &phy_info->attached); 4113 mpt_findImVolumes(ioc);
2799 rphy = sas_end_device_alloc(port);
2800 if (!rphy) {
2801 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2802 "%s: exit at line=%d\n", ioc->name,
2803 __func__, __LINE__));
2804 break; /* non-fatal: an rphy can be added later */
2805 }
2806 4114
2807 rphy->identify = identify; 4115 starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:"
2808 if (sas_rphy_add(rphy)) { 4116 " fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
2809 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 4117 ioc->name, hot_plug_info->channel, hot_plug_info->id,
2810 "%s: exit at line=%d\n", ioc->name, 4118 hot_plug_info->phys_disk_num, (unsigned long long)
2811 __func__, __LINE__)); 4119 sas_device.sas_address);
2812 sas_rphy_free(rphy); 4120
2813 break; 4121 vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
2814 } 4122 vtarget->id = hot_plug_info->id;
2815 mptsas_set_rphy(ioc, phy_info, rphy); 4123 phy_info->attached.phys_disk_num = ~0;
4124 mptsas_reprobe_target(starget, 0);
4125 mptsas_add_device_component_by_fw(ioc,
4126 hot_plug_info->channel, hot_plug_info->id);
2816 break; 4127 break;
4128
2817 case MPTSAS_ADD_RAID: 4129 case MPTSAS_ADD_RAID:
2818 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, 4130
2819 ev->id, 0);
2820 if (sdev) {
2821 scsi_device_put(sdev);
2822 break;
2823 }
2824 printk(MYIOC_s_INFO_FMT
2825 "attaching raid volume, channel %d, id %d\n",
2826 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2827 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0);
2828 mpt_findImVolumes(ioc); 4131 mpt_findImVolumes(ioc);
4132 printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
4133 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
4134 hot_plug_info->id);
4135 scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
4136 hot_plug_info->id, 0);
2829 break; 4137 break;
4138
2830 case MPTSAS_DEL_RAID: 4139 case MPTSAS_DEL_RAID:
2831 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, 4140
2832 ev->id, 0);
2833 if (!sdev)
2834 break;
2835 printk(MYIOC_s_INFO_FMT
2836 "removing raid volume, channel %d, id %d\n",
2837 ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
2838 vdevice = sdev->hostdata;
2839 scsi_remove_device(sdev);
2840 scsi_device_put(sdev);
2841 mpt_findImVolumes(ioc); 4141 mpt_findImVolumes(ioc);
4142 printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
4143 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
4144 hot_plug_info->id);
4145 scsi_remove_device(hot_plug_info->sdev);
4146 scsi_device_put(hot_plug_info->sdev);
2842 break; 4147 break;
4148
2843 case MPTSAS_ADD_INACTIVE_VOLUME: 4149 case MPTSAS_ADD_INACTIVE_VOLUME:
4150
4151 mpt_findImVolumes(ioc);
2844 mptsas_adding_inactive_raid_components(ioc, 4152 mptsas_adding_inactive_raid_components(ioc,
2845 ev->channel, ev->id); 4153 hot_plug_info->channel, hot_plug_info->id);
2846 break; 4154 break;
2847 case MPTSAS_IGNORE_EVENT: 4155
2848 default: 4156 default:
2849 break; 4157 break;
2850 } 4158 }
2851 4159
2852 mutex_unlock(&ioc->sas_discovery_mutex); 4160 mptsas_free_fw_event(ioc, fw_event);
2853 kfree(ev);
2854} 4161}
2855 4162
2856static void 4163static void
2857mptsas_send_sas_event(MPT_ADAPTER *ioc, 4164mptsas_send_sas_event(struct fw_event_work *fw_event)
2858 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
2859{ 4165{
2860 struct mptsas_hotplug_event *ev; 4166 MPT_ADAPTER *ioc;
2861 u32 device_info = le32_to_cpu(sas_event_data->DeviceInfo); 4167 struct mptsas_hotplug_event hot_plug_info;
2862 __le64 sas_address; 4168 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
4169 u32 device_info;
4170 u64 sas_address;
4171
4172 ioc = fw_event->ioc;
4173 sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
4174 fw_event->event_data;
4175 device_info = le32_to_cpu(sas_event_data->DeviceInfo);
2863 4176
2864 if ((device_info & 4177 if ((device_info &
2865 (MPI_SAS_DEVICE_INFO_SSP_TARGET | 4178 (MPI_SAS_DEVICE_INFO_SSP_TARGET |
2866 MPI_SAS_DEVICE_INFO_STP_TARGET | 4179 MPI_SAS_DEVICE_INFO_STP_TARGET |
2867 MPI_SAS_DEVICE_INFO_SATA_DEVICE )) == 0) 4180 MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) {
4181 mptsas_free_fw_event(ioc, fw_event);
4182 return;
4183 }
4184
4185 if (sas_event_data->ReasonCode ==
4186 MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) {
4187 mptbase_sas_persist_operation(ioc,
4188 MPI_SAS_OP_CLEAR_NOT_PRESENT);
4189 mptsas_free_fw_event(ioc, fw_event);
2868 return; 4190 return;
4191 }
2869 4192
2870 switch (sas_event_data->ReasonCode) { 4193 switch (sas_event_data->ReasonCode) {
2871 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 4194 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2872
2873 mptsas_target_reset_queue(ioc, sas_event_data);
2874 break;
2875
2876 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 4195 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2877 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4196 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
2878 if (!ev) { 4197 hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle);
2879 printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); 4198 hot_plug_info.channel = sas_event_data->Bus;
2880 break; 4199 hot_plug_info.id = sas_event_data->TargetID;
2881 } 4200 hot_plug_info.phy_id = sas_event_data->PhyNum;
2882
2883 INIT_WORK(&ev->work, mptsas_hotplug_work);
2884 ev->ioc = ioc;
2885 ev->handle = le16_to_cpu(sas_event_data->DevHandle);
2886 ev->parent_handle =
2887 le16_to_cpu(sas_event_data->ParentDevHandle);
2888 ev->channel = sas_event_data->Bus;
2889 ev->id = sas_event_data->TargetID;
2890 ev->phy_id = sas_event_data->PhyNum;
2891 memcpy(&sas_address, &sas_event_data->SASAddress, 4201 memcpy(&sas_address, &sas_event_data->SASAddress,
2892 sizeof(__le64)); 4202 sizeof(u64));
2893 ev->sas_address = le64_to_cpu(sas_address); 4203 hot_plug_info.sas_address = le64_to_cpu(sas_address);
2894 ev->device_info = device_info; 4204 hot_plug_info.device_info = device_info;
2895
2896 if (sas_event_data->ReasonCode & 4205 if (sas_event_data->ReasonCode &
2897 MPI_EVENT_SAS_DEV_STAT_RC_ADDED) 4206 MPI_EVENT_SAS_DEV_STAT_RC_ADDED)
2898 ev->event_type = MPTSAS_ADD_DEVICE; 4207 hot_plug_info.event_type = MPTSAS_ADD_DEVICE;
2899 else 4208 else
2900 ev->event_type = MPTSAS_DEL_DEVICE; 4209 hot_plug_info.event_type = MPTSAS_DEL_DEVICE;
2901 schedule_work(&ev->work); 4210 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
2902 break; 4211 break;
4212
2903 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: 4213 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
2904 /* 4214 mptbase_sas_persist_operation(ioc,
2905 * Persistent table is full. 4215 MPI_SAS_OP_CLEAR_NOT_PRESENT);
2906 */ 4216 mptsas_free_fw_event(ioc, fw_event);
2907 INIT_WORK(&ioc->sas_persist_task,
2908 mptsas_persist_clear_table);
2909 schedule_work(&ioc->sas_persist_task);
2910 break; 4217 break;
2911 /* 4218
2912 * TODO, handle other events
2913 */
2914 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 4219 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
2915 case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 4220 /* TODO */
2916 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 4221 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2917 case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 4222 /* TODO */
2918 case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
2919 case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
2920 case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
2921 default: 4223 default:
4224 mptsas_free_fw_event(ioc, fw_event);
2922 break; 4225 break;
2923 } 4226 }
2924} 4227}
4228
2925static void 4229static void
2926mptsas_send_raid_event(MPT_ADAPTER *ioc, 4230mptsas_send_raid_event(struct fw_event_work *fw_event)
2927 EVENT_DATA_RAID *raid_event_data)
2928{ 4231{
2929 struct mptsas_hotplug_event *ev; 4232 MPT_ADAPTER *ioc;
2930 int status = le32_to_cpu(raid_event_data->SettingsStatus); 4233 EVENT_DATA_RAID *raid_event_data;
2931 int state = (status >> 8) & 0xff; 4234 struct mptsas_hotplug_event hot_plug_info;
2932 4235 int status;
2933 if (ioc->bus_type != SAS) 4236 int state;
2934 return; 4237 struct scsi_device *sdev = NULL;
2935 4238 VirtDevice *vdevice = NULL;
2936 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4239 RaidPhysDiskPage0_t phys_disk;
2937 if (!ev) { 4240
2938 printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); 4241 ioc = fw_event->ioc;
2939 return; 4242 raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data;
4243 status = le32_to_cpu(raid_event_data->SettingsStatus);
4244 state = (status >> 8) & 0xff;
4245
4246 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
4247 hot_plug_info.id = raid_event_data->VolumeID;
4248 hot_plug_info.channel = raid_event_data->VolumeBus;
4249 hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum;
4250
4251 if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED ||
4252 raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED ||
4253 raid_event_data->ReasonCode ==
4254 MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) {
4255 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
4256 hot_plug_info.id, 0);
4257 hot_plug_info.sdev = sdev;
4258 if (sdev)
4259 vdevice = sdev->hostdata;
2940 } 4260 }
2941 4261
2942 INIT_WORK(&ev->work, mptsas_hotplug_work); 4262 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
2943 ev->ioc = ioc; 4263 "ReasonCode=%02x\n", ioc->name, __func__,
2944 ev->id = raid_event_data->VolumeID; 4264 raid_event_data->ReasonCode));
2945 ev->channel = raid_event_data->VolumeBus;
2946 ev->event_type = MPTSAS_IGNORE_EVENT;
2947 4265
2948 switch (raid_event_data->ReasonCode) { 4266 switch (raid_event_data->ReasonCode) {
2949 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: 4267 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
2950 ev->phys_disk_num_valid = 1; 4268 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE;
2951 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2952 ev->event_type = MPTSAS_ADD_DEVICE;
2953 break; 4269 break;
2954 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED: 4270 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
2955 ev->phys_disk_num_valid = 1; 4271 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE;
2956 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2957 ev->hidden_raid_component = 1;
2958 ev->event_type = MPTSAS_DEL_DEVICE;
2959 break; 4272 break;
2960 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: 4273 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
2961 switch (state) { 4274 switch (state) {
2962 case MPI_PD_STATE_ONLINE: 4275 case MPI_PD_STATE_ONLINE:
2963 case MPI_PD_STATE_NOT_COMPATIBLE: 4276 case MPI_PD_STATE_NOT_COMPATIBLE:
2964 ev->phys_disk_num_valid = 1; 4277 mpt_raid_phys_disk_pg0(ioc,
2965 ev->phys_disk_num = raid_event_data->PhysDiskNum; 4278 raid_event_data->PhysDiskNum, &phys_disk);
2966 ev->hidden_raid_component = 1; 4279 hot_plug_info.id = phys_disk.PhysDiskID;
2967 ev->event_type = MPTSAS_ADD_DEVICE; 4280 hot_plug_info.channel = phys_disk.PhysDiskBus;
4281 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
2968 break; 4282 break;
4283 case MPI_PD_STATE_FAILED:
2969 case MPI_PD_STATE_MISSING: 4284 case MPI_PD_STATE_MISSING:
2970 case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST: 4285 case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
2971 case MPI_PD_STATE_FAILED_AT_HOST_REQUEST: 4286 case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
2972 case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON: 4287 case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
2973 ev->phys_disk_num_valid = 1; 4288 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
2974 ev->phys_disk_num = raid_event_data->PhysDiskNum;
2975 ev->event_type = MPTSAS_DEL_DEVICE;
2976 break; 4289 break;
2977 default: 4290 default:
2978 break; 4291 break;
2979 } 4292 }
2980 break; 4293 break;
2981 case MPI_EVENT_RAID_RC_VOLUME_DELETED: 4294 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
2982 ev->event_type = MPTSAS_DEL_RAID; 4295 if (!sdev)
4296 break;
4297 vdevice->vtarget->deleted = 1; /* block IO */
4298 hot_plug_info.event_type = MPTSAS_DEL_RAID;
2983 break; 4299 break;
2984 case MPI_EVENT_RAID_RC_VOLUME_CREATED: 4300 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
2985 ev->event_type = MPTSAS_ADD_RAID; 4301 if (sdev) {
4302 scsi_device_put(sdev);
4303 break;
4304 }
4305 hot_plug_info.event_type = MPTSAS_ADD_RAID;
2986 break; 4306 break;
2987 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: 4307 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
4308 if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) {
4309 if (!sdev)
4310 break;
4311 vdevice->vtarget->deleted = 1; /* block IO */
4312 hot_plug_info.event_type = MPTSAS_DEL_RAID;
4313 break;
4314 }
2988 switch (state) { 4315 switch (state) {
2989 case MPI_RAIDVOL0_STATUS_STATE_FAILED: 4316 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
2990 case MPI_RAIDVOL0_STATUS_STATE_MISSING: 4317 case MPI_RAIDVOL0_STATUS_STATE_MISSING:
2991 ev->event_type = MPTSAS_DEL_RAID; 4318 if (!sdev)
4319 break;
4320 vdevice->vtarget->deleted = 1; /* block IO */
4321 hot_plug_info.event_type = MPTSAS_DEL_RAID;
2992 break; 4322 break;
2993 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: 4323 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
2994 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: 4324 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
2995 ev->event_type = MPTSAS_ADD_RAID; 4325 if (sdev) {
4326 scsi_device_put(sdev);
4327 break;
4328 }
4329 hot_plug_info.event_type = MPTSAS_ADD_RAID;
2996 break; 4330 break;
2997 default: 4331 default:
2998 break; 4332 break;
@@ -3001,32 +4335,188 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc,
3001 default: 4335 default:
3002 break; 4336 break;
3003 } 4337 }
3004 schedule_work(&ev->work); 4338
4339 if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT)
4340 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
4341 else
4342 mptsas_free_fw_event(ioc, fw_event);
3005} 4343}
3006 4344
3007static void 4345/**
3008mptsas_send_discovery_event(MPT_ADAPTER *ioc, 4346 * mptsas_issue_tm - send mptsas internal tm request
3009 EVENT_DATA_SAS_DISCOVERY *discovery_data) 4347 * @ioc: Pointer to MPT_ADAPTER structure
4348 * @type: Task Management type
4349 * @channel: channel number for task management
4350 * @id: Logical Target ID for reset (if appropriate)
4351 * @lun: Logical unit for reset (if appropriate)
4352 * @task_context: Context for the task to be aborted
4353 * @timeout: timeout for task management control
4354 *
4355 * return 0 on success and -1 on failure:
4356 *
4357 */
4358static int
4359mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
4360 int task_context, ulong timeout, u8 *issue_reset)
3010{ 4361{
3011 struct mptsas_discovery_event *ev; 4362 MPT_FRAME_HDR *mf;
4363 SCSITaskMgmt_t *pScsiTm;
4364 int retval;
4365 unsigned long timeleft;
4366
4367 *issue_reset = 0;
4368 mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
4369 if (mf == NULL) {
4370 retval = -1; /* return failure */
4371 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no "
4372 "msg frames!!\n", ioc->name));
4373 goto out;
4374 }
3012 4375
3013 /* 4376 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, "
3014 * DiscoveryStatus 4377 "task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, "
3015 * 4378 "fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf,
3016 * This flag will be non-zero when firmware 4379 type, timeout, channel, id, (unsigned long long)lun,
3017 * kicks off discovery, and return to zero 4380 task_context));
3018 * once its completed. 4381
3019 */ 4382 pScsiTm = (SCSITaskMgmt_t *) mf;
3020 if (discovery_data->DiscoveryStatus) 4383 memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
3021 return; 4384 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4385 pScsiTm->TaskType = type;
4386 pScsiTm->MsgFlags = 0;
4387 pScsiTm->TargetID = id;
4388 pScsiTm->Bus = channel;
4389 pScsiTm->ChainOffset = 0;
4390 pScsiTm->Reserved = 0;
4391 pScsiTm->Reserved1 = 0;
4392 pScsiTm->TaskMsgContext = task_context;
4393 int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
4394
4395 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
4396 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
4397 retval = 0;
4398 mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
4399
4400 /* Now wait for the command to complete */
4401 timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
4402 timeout*HZ);
4403 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
4404 retval = -1; /* return failure */
4405 dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
4406 "TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf));
4407 mpt_free_msg_frame(ioc, mf);
4408 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
4409 goto out;
4410 *issue_reset = 1;
4411 goto out;
4412 }
3022 4413
3023 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4414 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
3024 if (!ev) 4415 retval = -1; /* return failure */
4416 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4417 "TaskMgmt request: failed with no reply\n", ioc->name));
4418 goto out;
4419 }
4420
4421 out:
4422 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
4423 return retval;
4424}
4425
4426/**
4427 * mptsas_broadcast_primative_work - Handle broadcast primitives
4428 * @work: work queue payload containing info describing the event
4429 *
4430 * this will be handled in workqueue context.
4431 */
4432static void
4433mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
4434{
4435 MPT_ADAPTER *ioc = fw_event->ioc;
4436 MPT_FRAME_HDR *mf;
4437 VirtDevice *vdevice;
4438 int ii;
4439 struct scsi_cmnd *sc;
4440 SCSITaskMgmtReply_t *pScsiTmReply;
4441 u8 issue_reset;
4442 int task_context;
4443 u8 channel, id;
4444 int lun;
4445 u32 termination_count;
4446 u32 query_count;
4447
4448 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4449 "%s - enter\n", ioc->name, __func__));
4450
4451 mutex_lock(&ioc->taskmgmt_cmds.mutex);
4452 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
4453 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
4454 mptsas_requeue_fw_event(ioc, fw_event, 1000);
3025 return; 4455 return;
3026 INIT_WORK(&ev->work, mptsas_discovery_work); 4456 }
3027 ev->ioc = ioc; 4457
3028 schedule_work(&ev->work); 4458 issue_reset = 0;
3029}; 4459 termination_count = 0;
4460 query_count = 0;
4461 mpt_findImVolumes(ioc);
4462 pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
4463
4464 for (ii = 0; ii < ioc->req_depth; ii++) {
4465 if (ioc->fw_events_off)
4466 goto out;
4467 sc = mptscsih_get_scsi_lookup(ioc, ii);
4468 if (!sc)
4469 continue;
4470 mf = MPT_INDEX_2_MFPTR(ioc, ii);
4471 if (!mf)
4472 continue;
4473 task_context = mf->u.frame.hwhdr.msgctxu.MsgContext;
4474 vdevice = sc->device->hostdata;
4475 if (!vdevice || !vdevice->vtarget)
4476 continue;
4477 if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
4478 continue; /* skip hidden raid components */
4479 if (vdevice->vtarget->raidVolume)
4480 continue; /* skip hidden raid components */
4481 channel = vdevice->vtarget->channel;
4482 id = vdevice->vtarget->id;
4483 lun = vdevice->lun;
4484 if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
4485 channel, id, (u64)lun, task_context, 30, &issue_reset))
4486 goto out;
4487 query_count++;
4488 termination_count +=
4489 le32_to_cpu(pScsiTmReply->TerminationCount);
4490 if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) &&
4491 (pScsiTmReply->ResponseCode ==
4492 MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
4493 pScsiTmReply->ResponseCode ==
4494 MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
4495 continue;
4496 if (mptsas_issue_tm(ioc,
4497 MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET,
4498 channel, id, (u64)lun, 0, 30, &issue_reset))
4499 goto out;
4500 termination_count +=
4501 le32_to_cpu(pScsiTmReply->TerminationCount);
4502 }
4503
4504 out:
4505 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4506 "%s - exit, query_count = %d termination_count = %d\n",
4507 ioc->name, __func__, query_count, termination_count));
4508
4509 ioc->broadcast_aen_busy = 0;
4510 mpt_clear_taskmgmt_in_progress_flag(ioc);
4511 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
4512
4513 if (issue_reset) {
4514 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
4515 ioc->name, __func__);
4516 mpt_HardResetHandler(ioc, CAN_SLEEP);
4517 }
4518 mptsas_free_fw_event(ioc, fw_event);
4519}
3030 4520
3031/* 4521/*
3032 * mptsas_send_ir2_event - handle exposing hidden disk when 4522 * mptsas_send_ir2_event - handle exposing hidden disk when
@@ -3037,76 +4527,159 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc,
3037 * 4527 *
3038 */ 4528 */
3039static void 4529static void
3040mptsas_send_ir2_event(MPT_ADAPTER *ioc, PTR_MPI_EVENT_DATA_IR2 ir2_data) 4530mptsas_send_ir2_event(struct fw_event_work *fw_event)
3041{ 4531{
3042 struct mptsas_hotplug_event *ev; 4532 MPT_ADAPTER *ioc;
3043 4533 struct mptsas_hotplug_event hot_plug_info;
3044 if (ir2_data->ReasonCode != 4534 MPI_EVENT_DATA_IR2 *ir2_data;
3045 MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED) 4535 u8 reasonCode;
3046 return; 4536 RaidPhysDiskPage0_t phys_disk;
3047 4537
3048 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 4538 ioc = fw_event->ioc;
3049 if (!ev) 4539 ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data;
4540 reasonCode = ir2_data->ReasonCode;
4541
4542 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
4543 "ReasonCode=%02x\n", ioc->name, __func__, reasonCode));
4544
4545 memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
4546 hot_plug_info.id = ir2_data->TargetID;
4547 hot_plug_info.channel = ir2_data->Bus;
4548 switch (reasonCode) {
4549 case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
4550 hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME;
4551 break;
4552 case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
4553 hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
4554 hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
4555 break;
4556 case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
4557 hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
4558 mpt_raid_phys_disk_pg0(ioc,
4559 ir2_data->PhysDiskNum, &phys_disk);
4560 hot_plug_info.id = phys_disk.PhysDiskID;
4561 hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
4562 break;
4563 default:
4564 mptsas_free_fw_event(ioc, fw_event);
3050 return; 4565 return;
3051 4566 }
3052 INIT_WORK(&ev->work, mptsas_hotplug_work); 4567 mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
3053 ev->ioc = ioc; 4568}
3054 ev->id = ir2_data->TargetID;
3055 ev->channel = ir2_data->Bus;
3056 ev->event_type = MPTSAS_ADD_INACTIVE_VOLUME;
3057
3058 schedule_work(&ev->work);
3059};
3060 4569
3061static int 4570static int
3062mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) 4571mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
3063{ 4572{
3064 int rc=1; 4573 u32 event = le32_to_cpu(reply->Event);
3065 u8 event = le32_to_cpu(reply->Event) & 0xFF; 4574 int sz, event_data_sz;
4575 struct fw_event_work *fw_event;
4576 unsigned long delay;
3066 4577
3067 if (!ioc->sh) 4578 /* events turned off due to host reset or driver unloading */
3068 goto out; 4579 if (ioc->fw_events_off)
3069 4580 return 0;
3070 /*
3071 * sas_discovery_ignore_events
3072 *
3073 * This flag is to prevent anymore processing of
3074 * sas events once mptsas_remove function is called.
3075 */
3076 if (ioc->sas_discovery_ignore_events) {
3077 rc = mptscsih_event_process(ioc, reply);
3078 goto out;
3079 }
3080 4581
4582 delay = msecs_to_jiffies(1);
3081 switch (event) { 4583 switch (event) {
4584 case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
4585 {
4586 EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data =
4587 (EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data;
4588 if (broadcast_event_data->Primitive !=
4589 MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
4590 return 0;
4591 if (ioc->broadcast_aen_busy)
4592 return 0;
4593 ioc->broadcast_aen_busy = 1;
4594 break;
4595 }
3082 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 4596 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
3083 mptsas_send_sas_event(ioc, 4597 {
3084 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data); 4598 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
4599 (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
4600
4601 if (sas_event_data->ReasonCode ==
4602 MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
4603 mptsas_target_reset_queue(ioc, sas_event_data);
4604 return 0;
4605 }
3085 break; 4606 break;
3086 case MPI_EVENT_INTEGRATED_RAID: 4607 }
3087 mptsas_send_raid_event(ioc, 4608 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
3088 (EVENT_DATA_RAID *)reply->Data); 4609 {
4610 MpiEventDataSasExpanderStatusChange_t *expander_data =
4611 (MpiEventDataSasExpanderStatusChange_t *)reply->Data;
4612
4613 if (ioc->old_sas_discovery_protocal)
4614 return 0;
4615
4616 if (expander_data->ReasonCode ==
4617 MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING &&
4618 ioc->device_missing_delay)
4619 delay = HZ * ioc->device_missing_delay;
3089 break; 4620 break;
4621 }
4622 case MPI_EVENT_SAS_DISCOVERY:
4623 {
4624 u32 discovery_status;
4625 EventDataSasDiscovery_t *discovery_data =
4626 (EventDataSasDiscovery_t *)reply->Data;
4627
4628 discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus);
4629 ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0;
4630 if (ioc->old_sas_discovery_protocal && !discovery_status)
4631 mptsas_queue_rescan(ioc);
4632 return 0;
4633 }
4634 case MPI_EVENT_INTEGRATED_RAID:
3090 case MPI_EVENT_PERSISTENT_TABLE_FULL: 4635 case MPI_EVENT_PERSISTENT_TABLE_FULL:
3091 INIT_WORK(&ioc->sas_persist_task,
3092 mptsas_persist_clear_table);
3093 schedule_work(&ioc->sas_persist_task);
3094 break;
3095 case MPI_EVENT_SAS_DISCOVERY:
3096 mptsas_send_discovery_event(ioc,
3097 (EVENT_DATA_SAS_DISCOVERY *)reply->Data);
3098 break;
3099 case MPI_EVENT_IR2: 4636 case MPI_EVENT_IR2:
3100 mptsas_send_ir2_event(ioc, 4637 case MPI_EVENT_SAS_PHY_LINK_STATUS:
3101 (PTR_MPI_EVENT_DATA_IR2)reply->Data); 4638 case MPI_EVENT_QUEUE_FULL:
3102 break; 4639 break;
3103 default: 4640 default:
3104 rc = mptscsih_event_process(ioc, reply); 4641 return 0;
3105 break;
3106 } 4642 }
3107 out:
3108 4643
3109 return rc; 4644 event_data_sz = ((reply->MsgLength * 4) -
4645 offsetof(EventNotificationReply_t, Data));
4646 sz = offsetof(struct fw_event_work, event_data) + event_data_sz;
4647 fw_event = kzalloc(sz, GFP_ATOMIC);
4648 if (!fw_event) {
4649 printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
4650 __func__, __LINE__);
4651 return 0;
4652 }
4653 memcpy(fw_event->event_data, reply->Data, event_data_sz);
4654 fw_event->event = event;
4655 fw_event->ioc = ioc;
4656 mptsas_add_fw_event(ioc, fw_event, delay);
4657 return 0;
4658}
4659
4660/* Delete a volume when no longer listed in ioc pg2
4661 */
4662static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id)
4663{
4664 struct scsi_device *sdev;
4665 int i;
4666
4667 sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0);
4668 if (!sdev)
4669 return;
4670 if (!ioc->raid_data.pIocPg2)
4671 goto out;
4672 if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
4673 goto out;
4674 for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
4675 if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id)
4676 goto release_sdev;
4677 out:
4678 printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
4679 "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id);
4680 scsi_remove_device(sdev);
4681 release_sdev:
4682 scsi_device_put(sdev);
3110} 4683}
3111 4684
3112static int 4685static int
@@ -3128,6 +4701,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3128 return r; 4701 return r;
3129 4702
3130 ioc = pci_get_drvdata(pdev); 4703 ioc = pci_get_drvdata(pdev);
4704 mptsas_fw_event_off(ioc);
3131 ioc->DoneCtx = mptsasDoneCtx; 4705 ioc->DoneCtx = mptsasDoneCtx;
3132 ioc->TaskCtx = mptsasTaskCtx; 4706 ioc->TaskCtx = mptsasTaskCtx;
3133 ioc->InternalCtx = mptsasInternalCtx; 4707 ioc->InternalCtx = mptsasInternalCtx;
@@ -3211,17 +4785,15 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3211 * A slightly different algorithm is required for 4785 * A slightly different algorithm is required for
3212 * 64bit SGEs. 4786 * 64bit SGEs.
3213 */ 4787 */
3214 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 4788 scale = ioc->req_sz/ioc->SGE_size;
3215 if (sizeof(dma_addr_t) == sizeof(u64)) { 4789 if (ioc->sg_addr_size == sizeof(u64)) {
3216 numSGE = (scale - 1) * 4790 numSGE = (scale - 1) *
3217 (ioc->facts.MaxChainDepth-1) + scale + 4791 (ioc->facts.MaxChainDepth-1) + scale +
3218 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 4792 (ioc->req_sz - 60) / ioc->SGE_size;
3219 sizeof(u32));
3220 } else { 4793 } else {
3221 numSGE = 1 + (scale - 1) * 4794 numSGE = 1 + (scale - 1) *
3222 (ioc->facts.MaxChainDepth-1) + scale + 4795 (ioc->facts.MaxChainDepth-1) + scale +
3223 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 4796 (ioc->req_sz - 64) / ioc->SGE_size;
3224 sizeof(u32));
3225 } 4797 }
3226 4798
3227 if (numSGE < sh->sg_tablesize) { 4799 if (numSGE < sh->sg_tablesize) {
@@ -3251,9 +4823,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3251 4823
3252 /* Clear the TM flags 4824 /* Clear the TM flags
3253 */ 4825 */
3254 hd->tmPending = 0;
3255 hd->tmState = TM_STATE_NONE;
3256 hd->resetPending = 0;
3257 hd->abortSCpnt = NULL; 4826 hd->abortSCpnt = NULL;
3258 4827
3259 /* Clear the pointer used to store 4828 /* Clear the pointer used to store
@@ -3273,10 +4842,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3273 4842
3274 ioc->sas_data.ptClear = mpt_pt_clear; 4843 ioc->sas_data.ptClear = mpt_pt_clear;
3275 4844
3276 init_waitqueue_head(&hd->scandv_waitq);
3277 hd->scandv_wait_done = 0;
3278 hd->last_queue_full = 0; 4845 hd->last_queue_full = 0;
3279 INIT_LIST_HEAD(&hd->target_reset_list); 4846 INIT_LIST_HEAD(&hd->target_reset_list);
4847 INIT_LIST_HEAD(&ioc->sas_device_info_list);
4848 mutex_init(&ioc->sas_device_info_mutex);
4849
3280 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 4850 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
3281 4851
3282 if (ioc->sas_data.ptClear==1) { 4852 if (ioc->sas_data.ptClear==1) {
@@ -3291,8 +4861,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3291 goto out_mptsas_probe; 4861 goto out_mptsas_probe;
3292 } 4862 }
3293 4863
4864 /* older firmware doesn't support expander events */
4865 if ((ioc->facts.HeaderVersion >> 8) < 0xE)
4866 ioc->old_sas_discovery_protocal = 1;
3294 mptsas_scan_sas_topology(ioc); 4867 mptsas_scan_sas_topology(ioc);
3295 4868 mptsas_fw_event_on(ioc);
3296 return 0; 4869 return 0;
3297 4870
3298 out_mptsas_probe: 4871 out_mptsas_probe:
@@ -3301,12 +4874,25 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3301 return error; 4874 return error;
3302} 4875}
3303 4876
4877void
4878mptsas_shutdown(struct pci_dev *pdev)
4879{
4880 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
4881
4882 mptsas_fw_event_off(ioc);
4883 mptsas_cleanup_fw_event_q(ioc);
4884}
4885
3304static void __devexit mptsas_remove(struct pci_dev *pdev) 4886static void __devexit mptsas_remove(struct pci_dev *pdev)
3305{ 4887{
3306 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 4888 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
3307 struct mptsas_portinfo *p, *n; 4889 struct mptsas_portinfo *p, *n;
3308 int i; 4890 int i;
3309 4891
4892 mptsas_shutdown(pdev);
4893
4894 mptsas_del_device_components(ioc);
4895
3310 ioc->sas_discovery_ignore_events = 1; 4896 ioc->sas_discovery_ignore_events = 1;
3311 sas_remove_host(ioc->sh); 4897 sas_remove_host(ioc->sh);
3312 4898
@@ -3315,11 +4901,12 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
3315 list_del(&p->list); 4901 list_del(&p->list);
3316 for (i = 0 ; i < p->num_phys ; i++) 4902 for (i = 0 ; i < p->num_phys ; i++)
3317 mptsas_port_delete(ioc, p->phy_info[i].port_details); 4903 mptsas_port_delete(ioc, p->phy_info[i].port_details);
4904
3318 kfree(p->phy_info); 4905 kfree(p->phy_info);
3319 kfree(p); 4906 kfree(p);
3320 } 4907 }
3321 mutex_unlock(&ioc->sas_topology_mutex); 4908 mutex_unlock(&ioc->sas_topology_mutex);
3322 4909 ioc->hba_port_info = NULL;
3323 mptscsih_remove(pdev); 4910 mptscsih_remove(pdev);
3324} 4911}
3325 4912
@@ -3344,7 +4931,7 @@ static struct pci_driver mptsas_driver = {
3344 .id_table = mptsas_pci_table, 4931 .id_table = mptsas_pci_table,
3345 .probe = mptsas_probe, 4932 .probe = mptsas_probe,
3346 .remove = __devexit_p(mptsas_remove), 4933 .remove = __devexit_p(mptsas_remove),
3347 .shutdown = mptscsih_shutdown, 4934 .shutdown = mptsas_shutdown,
3348#ifdef CONFIG_PM 4935#ifdef CONFIG_PM
3349 .suspend = mptscsih_suspend, 4936 .suspend = mptscsih_suspend,
3350 .resume = mptscsih_resume, 4937 .resume = mptscsih_resume,
@@ -3364,10 +4951,12 @@ mptsas_init(void)
3364 return -ENODEV; 4951 return -ENODEV;
3365 4952
3366 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER); 4953 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
3367 mptsasTaskCtx = mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER); 4954 mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
3368 mptsasInternalCtx = 4955 mptsasInternalCtx =
3369 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER); 4956 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
3370 mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER); 4957 mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER);
4958 mptsasDeviceResetCtx =
4959 mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER);
3371 4960
3372 mpt_event_register(mptsasDoneCtx, mptsas_event_process); 4961 mpt_event_register(mptsasDoneCtx, mptsas_event_process);
3373 mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset); 4962 mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
@@ -3392,6 +4981,7 @@ mptsas_exit(void)
3392 mpt_deregister(mptsasInternalCtx); 4981 mpt_deregister(mptsasInternalCtx);
3393 mpt_deregister(mptsasTaskCtx); 4982 mpt_deregister(mptsasTaskCtx);
3394 mpt_deregister(mptsasDoneCtx); 4983 mpt_deregister(mptsasDoneCtx);
4984 mpt_deregister(mptsasDeviceResetCtx);
3395} 4985}
3396 4986
3397module_init(mptsas_init); 4987module_init(mptsas_init);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 2b544e0877e..953c2bfcf6a 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -53,6 +53,7 @@ struct mptsas_target_reset_event {
53 struct list_head list; 53 struct list_head list;
54 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data; 54 EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data;
55 u8 target_reset_issued; 55 u8 target_reset_issued;
56 unsigned long time_count;
56}; 57};
57 58
58enum mptsas_hotplug_action { 59enum mptsas_hotplug_action {
@@ -60,12 +61,37 @@ enum mptsas_hotplug_action {
60 MPTSAS_DEL_DEVICE, 61 MPTSAS_DEL_DEVICE,
61 MPTSAS_ADD_RAID, 62 MPTSAS_ADD_RAID,
62 MPTSAS_DEL_RAID, 63 MPTSAS_DEL_RAID,
64 MPTSAS_ADD_PHYSDISK,
65 MPTSAS_ADD_PHYSDISK_REPROBE,
66 MPTSAS_DEL_PHYSDISK,
67 MPTSAS_DEL_PHYSDISK_REPROBE,
63 MPTSAS_ADD_INACTIVE_VOLUME, 68 MPTSAS_ADD_INACTIVE_VOLUME,
64 MPTSAS_IGNORE_EVENT, 69 MPTSAS_IGNORE_EVENT,
65}; 70};
66 71
72struct mptsas_mapping{
73 u8 id;
74 u8 channel;
75};
76
77struct mptsas_device_info {
78 struct list_head list;
79 struct mptsas_mapping os; /* operating system mapping*/
80 struct mptsas_mapping fw; /* firmware mapping */
81 u64 sas_address;
82 u32 device_info; /* specific bits for devices */
83 u16 slot; /* enclosure slot id */
84 u64 enclosure_logical_id; /*enclosure address */
85 u8 is_logical_volume; /* is this logical volume */
86 /* this belongs to volume */
87 u8 is_hidden_raid_component;
88 /* this valid when is_hidden_raid_component set */
89 u8 volume_id;
90 /* cached data for a removed device */
91 u8 is_cached;
92};
93
67struct mptsas_hotplug_event { 94struct mptsas_hotplug_event {
68 struct work_struct work;
69 MPT_ADAPTER *ioc; 95 MPT_ADAPTER *ioc;
70 enum mptsas_hotplug_action event_type; 96 enum mptsas_hotplug_action event_type;
71 u64 sas_address; 97 u64 sas_address;
@@ -73,11 +99,18 @@ struct mptsas_hotplug_event {
73 u8 id; 99 u8 id;
74 u32 device_info; 100 u32 device_info;
75 u16 handle; 101 u16 handle;
76 u16 parent_handle;
77 u8 phy_id; 102 u8 phy_id;
78 u8 phys_disk_num_valid; /* hrc (hidden raid component) */
79 u8 phys_disk_num; /* hrc - unique index*/ 103 u8 phys_disk_num; /* hrc - unique index*/
80 u8 hidden_raid_component; /* hrc - don't expose*/ 104 struct scsi_device *sdev;
105};
106
107struct fw_event_work {
108 struct list_head list;
109 struct delayed_work work;
110 MPT_ADAPTER *ioc;
111 u32 event;
112 u8 retries;
113 u8 event_data[1];
81}; 114};
82 115
83struct mptsas_discovery_event { 116struct mptsas_discovery_event {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index e62c6bc4ad3..8440f78f696 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -80,7 +80,7 @@ MODULE_VERSION(my_VERSION);
80/* 80/*
81 * Other private/forward protos... 81 * Other private/forward protos...
82 */ 82 */
83static struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); 83struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
84static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i); 84static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i);
85static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd); 85static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd);
86static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd); 86static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd);
@@ -92,18 +92,24 @@ static int mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
92 SCSIIORequest_t *pReq, int req_idx); 92 SCSIIORequest_t *pReq, int req_idx);
93static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx); 93static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
94static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); 94static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
95static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
96static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
97 95
98static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout); 96int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id,
97 int lun, int ctx2abort, ulong timeout);
99 98
100int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 99int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
101int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); 100int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
102 101
102void
103mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
104static int mptscsih_get_completion_code(MPT_ADAPTER *ioc,
105 MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
103int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); 106int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
104static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); 107static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
105static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice); 108static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
106 109
110static int
111mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
112 SCSITaskMgmtReply_t *pScsiTmReply);
107void mptscsih_remove(struct pci_dev *); 113void mptscsih_remove(struct pci_dev *);
108void mptscsih_shutdown(struct pci_dev *); 114void mptscsih_shutdown(struct pci_dev *);
109#ifdef CONFIG_PM 115#ifdef CONFIG_PM
@@ -113,69 +119,6 @@ int mptscsih_resume(struct pci_dev *pdev);
113 119
114#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE 120#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
115 121
116/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
117/**
118 * mptscsih_add_sge - Place a simple SGE at address pAddr.
119 * @pAddr: virtual address for SGE
120 * @flagslength: SGE flags and data transfer length
121 * @dma_addr: Physical address
122 *
123 * This routine places a MPT request frame back on the MPT adapter's
124 * FreeQ.
125 */
126static inline void
127mptscsih_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
128{
129 if (sizeof(dma_addr_t) == sizeof(u64)) {
130 SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
131 u32 tmp = dma_addr & 0xFFFFFFFF;
132
133 pSge->FlagsLength = cpu_to_le32(flagslength);
134 pSge->Address.Low = cpu_to_le32(tmp);
135 tmp = (u32) ((u64)dma_addr >> 32);
136 pSge->Address.High = cpu_to_le32(tmp);
137
138 } else {
139 SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
140 pSge->FlagsLength = cpu_to_le32(flagslength);
141 pSge->Address = cpu_to_le32(dma_addr);
142 }
143} /* mptscsih_add_sge() */
144
145/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
146/**
147 * mptscsih_add_chain - Place a chain SGE at address pAddr.
148 * @pAddr: virtual address for SGE
149 * @next: nextChainOffset value (u32's)
150 * @length: length of next SGL segment
151 * @dma_addr: Physical address
152 *
153 * This routine places a MPT request frame back on the MPT adapter's
154 * FreeQ.
155 */
156static inline void
157mptscsih_add_chain(char *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
158{
159 if (sizeof(dma_addr_t) == sizeof(u64)) {
160 SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
161 u32 tmp = dma_addr & 0xFFFFFFFF;
162
163 pChain->Length = cpu_to_le16(length);
164 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
165
166 pChain->NextChainOffset = next;
167
168 pChain->Address.Low = cpu_to_le32(tmp);
169 tmp = (u32) ((u64)dma_addr >> 32);
170 pChain->Address.High = cpu_to_le32(tmp);
171 } else {
172 SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
173 pChain->Length = cpu_to_le16(length);
174 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
175 pChain->NextChainOffset = next;
176 pChain->Address = cpu_to_le32(dma_addr);
177 }
178} /* mptscsih_add_chain() */
179 122
180/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 123/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
181/* 124/*
@@ -281,10 +224,10 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
281 */ 224 */
282 225
283nextSGEset: 226nextSGEset:
284 numSgeSlots = ((frm_sz - sgeOffset) / (sizeof(u32) + sizeof(dma_addr_t)) ); 227 numSgeSlots = ((frm_sz - sgeOffset) / ioc->SGE_size);
285 numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots; 228 numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots;
286 229
287 sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir; 230 sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | sgdir;
288 231
289 /* Get first (num - 1) SG elements 232 /* Get first (num - 1) SG elements
290 * Skip any SG entries with a length of 0 233 * Skip any SG entries with a length of 0
@@ -293,17 +236,19 @@ nextSGEset:
293 for (ii=0; ii < (numSgeThisFrame-1); ii++) { 236 for (ii=0; ii < (numSgeThisFrame-1); ii++) {
294 thisxfer = sg_dma_len(sg); 237 thisxfer = sg_dma_len(sg);
295 if (thisxfer == 0) { 238 if (thisxfer == 0) {
296 sg = sg_next(sg); /* Get next SG element from the OS */ 239 /* Get next SG element from the OS */
240 sg = sg_next(sg);
297 sg_done++; 241 sg_done++;
298 continue; 242 continue;
299 } 243 }
300 244
301 v2 = sg_dma_address(sg); 245 v2 = sg_dma_address(sg);
302 mptscsih_add_sge(psge, sgflags | thisxfer, v2); 246 ioc->add_sge(psge, sgflags | thisxfer, v2);
303 247
304 sg = sg_next(sg); /* Get next SG element from the OS */ 248 /* Get next SG element from the OS */
305 psge += (sizeof(u32) + sizeof(dma_addr_t)); 249 sg = sg_next(sg);
306 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); 250 psge += ioc->SGE_size;
251 sgeOffset += ioc->SGE_size;
307 sg_done++; 252 sg_done++;
308 } 253 }
309 254
@@ -320,12 +265,8 @@ nextSGEset:
320 thisxfer = sg_dma_len(sg); 265 thisxfer = sg_dma_len(sg);
321 266
322 v2 = sg_dma_address(sg); 267 v2 = sg_dma_address(sg);
323 mptscsih_add_sge(psge, sgflags | thisxfer, v2); 268 ioc->add_sge(psge, sgflags | thisxfer, v2);
324 /* 269 sgeOffset += ioc->SGE_size;
325 sg = sg_next(sg);
326 psge += (sizeof(u32) + sizeof(dma_addr_t));
327 */
328 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
329 sg_done++; 270 sg_done++;
330 271
331 if (chainSge) { 272 if (chainSge) {
@@ -334,7 +275,8 @@ nextSGEset:
334 * Update the chain element 275 * Update the chain element
335 * Offset and Length fields. 276 * Offset and Length fields.
336 */ 277 */
337 mptscsih_add_chain((char *)chainSge, 0, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); 278 ioc->add_chain((char *)chainSge, 0, sgeOffset,
279 ioc->ChainBufferDMA + chain_dma_off);
338 } else { 280 } else {
339 /* The current buffer is the original MF 281 /* The current buffer is the original MF
340 * and there is no Chain buffer. 282 * and there is no Chain buffer.
@@ -367,7 +309,7 @@ nextSGEset:
367 * set properly). 309 * set properly).
368 */ 310 */
369 if (sg_done) { 311 if (sg_done) {
370 u32 *ptmp = (u32 *) (psge - (sizeof(u32) + sizeof(dma_addr_t))); 312 u32 *ptmp = (u32 *) (psge - ioc->SGE_size);
371 sgflags = le32_to_cpu(*ptmp); 313 sgflags = le32_to_cpu(*ptmp);
372 sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT; 314 sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT;
373 *ptmp = cpu_to_le32(sgflags); 315 *ptmp = cpu_to_le32(sgflags);
@@ -381,8 +323,9 @@ nextSGEset:
381 * Old chain element is now complete. 323 * Old chain element is now complete.
382 */ 324 */
383 u8 nextChain = (u8) (sgeOffset >> 2); 325 u8 nextChain = (u8) (sgeOffset >> 2);
384 sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); 326 sgeOffset += ioc->SGE_size;
385 mptscsih_add_chain((char *)chainSge, nextChain, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); 327 ioc->add_chain((char *)chainSge, nextChain, sgeOffset,
328 ioc->ChainBufferDMA + chain_dma_off);
386 } else { 329 } else {
387 /* The original MF buffer requires a chain buffer - 330 /* The original MF buffer requires a chain buffer -
388 * set the offset. 331 * set the offset.
@@ -592,14 +535,15 @@ mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pSc
592 } 535 }
593 536
594 scsi_print_command(sc); 537 scsi_print_command(sc);
595 printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d\n", 538 printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %d\n",
596 ioc->name, pScsiReply->Bus, pScsiReply->TargetID); 539 ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun);
597 printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, " 540 printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, "
598 "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow, 541 "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow,
599 scsi_get_resid(sc)); 542 scsi_get_resid(sc));
600 printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, " 543 printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, "
601 "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag), 544 "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag),
602 le32_to_cpu(pScsiReply->TransferCount), sc->result); 545 le32_to_cpu(pScsiReply->TransferCount), sc->result);
546
603 printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), " 547 printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), "
604 "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n", 548 "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n",
605 ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus, 549 ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus,
@@ -654,16 +598,14 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
654 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); 598 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
655 req_idx_MR = (mr != NULL) ? 599 req_idx_MR = (mr != NULL) ?
656 le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx; 600 le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx;
601
602 /* Special case, where already freed message frame is received from
603 * Firmware. It happens with Resetting IOC.
604 * Return immediately. Do not care
605 */
657 if ((req_idx != req_idx_MR) || 606 if ((req_idx != req_idx_MR) ||
658 (mf->u.frame.linkage.arg1 == 0xdeadbeaf)) { 607 (le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf))
659 printk(MYIOC_s_ERR_FMT "Received a mf that was already freed\n",
660 ioc->name);
661 printk (MYIOC_s_ERR_FMT
662 "req_idx=%x req_idx_MR=%x mf=%p mr=%p sc=%p\n",
663 ioc->name, req_idx, req_idx_MR, mf, mr,
664 mptscsih_get_scsi_lookup(ioc, req_idx_MR));
665 return 0; 608 return 0;
666 }
667 609
668 sc = mptscsih_getclear_scsi_lookup(ioc, req_idx); 610 sc = mptscsih_getclear_scsi_lookup(ioc, req_idx);
669 if (sc == NULL) { 611 if (sc == NULL) {
@@ -810,12 +752,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
810 */ 752 */
811 753
812 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ 754 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
813 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
814 /* Linux handles an unsolicited DID_RESET better 755 /* Linux handles an unsolicited DID_RESET better
815 * than an unsolicited DID_ABORT. 756 * than an unsolicited DID_ABORT.
816 */ 757 */
817 sc->result = DID_RESET << 16; 758 sc->result = DID_RESET << 16;
818 759
760 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
761 if (ioc->bus_type == FC)
762 sc->result = DID_ERROR << 16;
763 else
764 sc->result = DID_RESET << 16;
819 break; 765 break;
820 766
821 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ 767 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
@@ -992,9 +938,9 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
992 scsi_dma_unmap(sc); 938 scsi_dma_unmap(sc);
993 sc->result = DID_RESET << 16; 939 sc->result = DID_RESET << 16;
994 sc->host_scribble = NULL; 940 sc->host_scribble = NULL;
995 sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT 941 dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
996 "completing cmds: fw_channel %d, fw_id %d, sc=%p," 942 "completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, "
997 " mf = %p, idx=%x\n", ioc->name, channel, id, sc, mf, ii); 943 "idx=%x\n", ioc->name, channel, id, sc, mf, ii));
998 sc->scsi_done(sc); 944 sc->scsi_done(sc);
999 } 945 }
1000} 946}
@@ -1053,9 +999,11 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
1053 scsi_dma_unmap(sc); 999 scsi_dma_unmap(sc);
1054 sc->host_scribble = NULL; 1000 sc->host_scribble = NULL;
1055 sc->result = DID_NO_CONNECT << 16; 1001 sc->result = DID_NO_CONNECT << 16;
1056 sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT "completing cmds: fw_channel %d," 1002 dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device,
1057 "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, vdevice->vtarget->channel, 1003 MYIOC_s_FMT "completing cmds: fw_channel %d, "
1058 vdevice->vtarget->id, sc, mf, ii); 1004 "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name,
1005 vdevice->vtarget->channel, vdevice->vtarget->id,
1006 sc, mf, ii));
1059 sc->scsi_done(sc); 1007 sc->scsi_done(sc);
1060 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1008 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1061 } 1009 }
@@ -1346,7 +1294,6 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1346 MPT_FRAME_HDR *mf; 1294 MPT_FRAME_HDR *mf;
1347 SCSIIORequest_t *pScsiReq; 1295 SCSIIORequest_t *pScsiReq;
1348 VirtDevice *vdevice = SCpnt->device->hostdata; 1296 VirtDevice *vdevice = SCpnt->device->hostdata;
1349 int lun;
1350 u32 datalen; 1297 u32 datalen;
1351 u32 scsictl; 1298 u32 scsictl;
1352 u32 scsidir; 1299 u32 scsidir;
@@ -1357,13 +1304,12 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1357 1304
1358 hd = shost_priv(SCpnt->device->host); 1305 hd = shost_priv(SCpnt->device->host);
1359 ioc = hd->ioc; 1306 ioc = hd->ioc;
1360 lun = SCpnt->device->lun;
1361 SCpnt->scsi_done = done; 1307 SCpnt->scsi_done = done;
1362 1308
1363 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n", 1309 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
1364 ioc->name, SCpnt, done)); 1310 ioc->name, SCpnt, done));
1365 1311
1366 if (hd->resetPending) { 1312 if (ioc->taskmgmt_quiesce_io) {
1367 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n", 1313 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n",
1368 ioc->name, SCpnt)); 1314 ioc->name, SCpnt));
1369 return SCSI_MLQUEUE_HOST_BUSY; 1315 return SCSI_MLQUEUE_HOST_BUSY;
@@ -1422,7 +1368,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1422 pScsiReq->CDBLength = SCpnt->cmd_len; 1368 pScsiReq->CDBLength = SCpnt->cmd_len;
1423 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; 1369 pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
1424 pScsiReq->Reserved = 0; 1370 pScsiReq->Reserved = 0;
1425 pScsiReq->MsgFlags = mpt_msg_flags(); 1371 pScsiReq->MsgFlags = mpt_msg_flags(ioc);
1426 int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN); 1372 int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN);
1427 pScsiReq->Control = cpu_to_le32(scsictl); 1373 pScsiReq->Control = cpu_to_le32(scsictl);
1428 1374
@@ -1448,7 +1394,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1448 */ 1394 */
1449 if (datalen == 0) { 1395 if (datalen == 0) {
1450 /* Add a NULL SGE */ 1396 /* Add a NULL SGE */
1451 mptscsih_add_sge((char *)&pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_READ | 0, 1397 ioc->add_sge((char *)&pScsiReq->SGL,
1398 MPT_SGE_FLAGS_SSIMPLE_READ | 0,
1452 (dma_addr_t) -1); 1399 (dma_addr_t) -1);
1453 } else { 1400 } else {
1454 /* Add a 32 or 64 bit SGE */ 1401 /* Add a 32 or 64 bit SGE */
@@ -1528,8 +1475,8 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
1528 1475
1529/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1476/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1530/** 1477/**
1531 * mptscsih_TMHandler - Generic handler for SCSI Task Management. 1478 * mptscsih_IssueTaskMgmt - Generic send Task Management function.
1532 * @hd: Pointer to MPT SCSI HOST structure 1479 * @hd: Pointer to MPT_SCSI_HOST structure
1533 * @type: Task Management type 1480 * @type: Task Management type
1534 * @channel: channel number for task management 1481 * @channel: channel number for task management
1535 * @id: Logical Target ID for reset (if appropriate) 1482 * @id: Logical Target ID for reset (if appropriate)
@@ -1537,145 +1484,68 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
1537 * @ctx2abort: Context for the task to be aborted (if appropriate) 1484 * @ctx2abort: Context for the task to be aborted (if appropriate)
1538 * @timeout: timeout for task management control 1485 * @timeout: timeout for task management control
1539 * 1486 *
1540 * Fall through to mpt_HardResetHandler if: not operational, too many 1487 * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
1541 * failed TM requests or handshake failure. 1488 * or a non-interrupt thread. In the former, must not call schedule().
1542 * 1489 *
1543 * Remark: Currently invoked from a non-interrupt thread (_bh). 1490 * Not all fields are meaningfull for all task types.
1544 * 1491 *
1545 * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC 1492 * Returns 0 for SUCCESS, or FAILED.
1546 * will be active.
1547 * 1493 *
1548 * Returns 0 for SUCCESS, or %FAILED.
1549 **/ 1494 **/
1550int 1495int
1551mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout) 1496mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
1497 int ctx2abort, ulong timeout)
1552{ 1498{
1553 MPT_ADAPTER *ioc; 1499 MPT_FRAME_HDR *mf;
1554 int rc = -1; 1500 SCSITaskMgmt_t *pScsiTm;
1501 int ii;
1502 int retval;
1503 MPT_ADAPTER *ioc = hd->ioc;
1504 unsigned long timeleft;
1505 u8 issue_hard_reset;
1555 u32 ioc_raw_state; 1506 u32 ioc_raw_state;
1556 unsigned long flags; 1507 unsigned long time_count;
1557
1558 ioc = hd->ioc;
1559 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler Entered!\n", ioc->name));
1560
1561 // SJR - CHECKME - Can we avoid this here?
1562 // (mpt_HardResetHandler has this check...)
1563 spin_lock_irqsave(&ioc->diagLock, flags);
1564 if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) {
1565 spin_unlock_irqrestore(&ioc->diagLock, flags);
1566 return FAILED;
1567 }
1568 spin_unlock_irqrestore(&ioc->diagLock, flags);
1569
1570 /* Wait a fixed amount of time for the TM pending flag to be cleared.
1571 * If we time out and not bus reset, then we return a FAILED status
1572 * to the caller.
1573 * The call to mptscsih_tm_pending_wait() will set the pending flag
1574 * if we are
1575 * successful. Otherwise, reload the FW.
1576 */
1577 if (mptscsih_tm_pending_wait(hd) == FAILED) {
1578 if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
1579 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler abort: "
1580 "Timed out waiting for last TM (%d) to complete! \n",
1581 ioc->name, hd->tmPending));
1582 return FAILED;
1583 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1584 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler target "
1585 "reset: Timed out waiting for last TM (%d) "
1586 "to complete! \n", ioc->name,
1587 hd->tmPending));
1588 return FAILED;
1589 } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
1590 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler bus reset: "
1591 "Timed out waiting for last TM (%d) to complete! \n",
1592 ioc->name, hd->tmPending));
1593 return FAILED;
1594 }
1595 } else {
1596 spin_lock_irqsave(&ioc->FreeQlock, flags);
1597 hd->tmPending |= (1 << type);
1598 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
1599 }
1600 1508
1509 issue_hard_reset = 0;
1601 ioc_raw_state = mpt_GetIocState(ioc, 0); 1510 ioc_raw_state = mpt_GetIocState(ioc, 0);
1602 1511
1603 if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) { 1512 if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) {
1604 printk(MYIOC_s_WARN_FMT 1513 printk(MYIOC_s_WARN_FMT
1605 "TM Handler for type=%x: IOC Not operational (0x%x)!\n", 1514 "TaskMgmt type=%x: IOC Not operational (0x%x)!\n",
1606 ioc->name, type, ioc_raw_state); 1515 ioc->name, type, ioc_raw_state);
1607 printk(MYIOC_s_WARN_FMT " Issuing HardReset!!\n", ioc->name); 1516 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
1517 ioc->name, __func__);
1608 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) 1518 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0)
1609 printk(MYIOC_s_WARN_FMT "TMHandler: HardReset " 1519 printk(MYIOC_s_WARN_FMT "TaskMgmt HardReset "
1610 "FAILED!!\n", ioc->name); 1520 "FAILED!!\n", ioc->name);
1611 return FAILED; 1521 return 0;
1612 } 1522 }
1613 1523
1614 if (ioc_raw_state & MPI_DOORBELL_ACTIVE) { 1524 if (ioc_raw_state & MPI_DOORBELL_ACTIVE) {
1615 printk(MYIOC_s_WARN_FMT 1525 printk(MYIOC_s_WARN_FMT
1616 "TM Handler for type=%x: ioc_state: " 1526 "TaskMgmt type=%x: ioc_state: "
1617 "DOORBELL_ACTIVE (0x%x)!\n", 1527 "DOORBELL_ACTIVE (0x%x)!\n",
1618 ioc->name, type, ioc_raw_state); 1528 ioc->name, type, ioc_raw_state);
1619 return FAILED; 1529 return FAILED;
1620 } 1530 }
1621 1531
1622 /* Isse the Task Mgmt request. 1532 mutex_lock(&ioc->taskmgmt_cmds.mutex);
1623 */ 1533 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
1624 if (hd->hard_resets < -1) 1534 mf = NULL;
1625 hd->hard_resets++; 1535 retval = FAILED;
1626 1536 goto out;
1627 rc = mptscsih_IssueTaskMgmt(hd, type, channel, id, lun, 1537 }
1628 ctx2abort, timeout);
1629 if (rc)
1630 printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n",
1631 ioc->name);
1632 else
1633 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issue of TaskMgmt Successful!\n",
1634 ioc->name));
1635
1636 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1637 "TMHandler rc = %d!\n", ioc->name, rc));
1638
1639 return rc;
1640}
1641
1642
1643/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1644/**
1645 * mptscsih_IssueTaskMgmt - Generic send Task Management function.
1646 * @hd: Pointer to MPT_SCSI_HOST structure
1647 * @type: Task Management type
1648 * @channel: channel number for task management
1649 * @id: Logical Target ID for reset (if appropriate)
1650 * @lun: Logical Unit for reset (if appropriate)
1651 * @ctx2abort: Context for the task to be aborted (if appropriate)
1652 * @timeout: timeout for task management control
1653 *
1654 * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
1655 * or a non-interrupt thread. In the former, must not call schedule().
1656 *
1657 * Not all fields are meaningfull for all task types.
1658 *
1659 * Returns 0 for SUCCESS, or FAILED.
1660 *
1661 **/
1662static int
1663mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout)
1664{
1665 MPT_FRAME_HDR *mf;
1666 SCSITaskMgmt_t *pScsiTm;
1667 int ii;
1668 int retval;
1669 MPT_ADAPTER *ioc = hd->ioc;
1670 1538
1671 /* Return Fail to calling function if no message frames available. 1539 /* Return Fail to calling function if no message frames available.
1672 */ 1540 */
1673 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { 1541 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
1674 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n", 1542 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1675 ioc->name)); 1543 "TaskMgmt no msg frames!!\n", ioc->name));
1676 return FAILED; 1544 retval = FAILED;
1545 mpt_clear_taskmgmt_in_progress_flag(ioc);
1546 goto out;
1677 } 1547 }
1678 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", 1548 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
1679 ioc->name, mf)); 1549 ioc->name, mf));
1680 1550
1681 /* Format the Request 1551 /* Format the Request
@@ -1699,11 +1569,14 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
1699 1569
1700 pScsiTm->TaskMsgContext = ctx2abort; 1570 pScsiTm->TaskMsgContext = ctx2abort;
1701 1571
1702 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt: ctx2abort (0x%08x) " 1572 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt: ctx2abort (0x%08x) "
1703 "type=%d\n", ioc->name, ctx2abort, type)); 1573 "task_type = 0x%02X, timeout = %ld\n", ioc->name, ctx2abort,
1574 type, timeout));
1704 1575
1705 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm); 1576 DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm);
1706 1577
1578 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
1579 time_count = jiffies;
1707 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 1580 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
1708 (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) 1581 (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
1709 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); 1582 mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
@@ -1711,47 +1584,50 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
1711 retval = mpt_send_handshake_request(ioc->TaskCtx, ioc, 1584 retval = mpt_send_handshake_request(ioc->TaskCtx, ioc,
1712 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); 1585 sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP);
1713 if (retval) { 1586 if (retval) {
1714 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED!" 1587 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
1715 " (hd %p, ioc %p, mf %p, rc=%d) \n", ioc->name, hd, 1588 "TaskMgmt handshake FAILED!(mf=%p, rc=%d) \n",
1716 ioc, mf, retval)); 1589 ioc->name, mf, retval));
1717 goto fail_out; 1590 mpt_free_msg_frame(ioc, mf);
1591 mpt_clear_taskmgmt_in_progress_flag(ioc);
1592 goto out;
1718 } 1593 }
1719 } 1594 }
1720 1595
1721 if(mptscsih_tm_wait_for_completion(hd, timeout) == FAILED) { 1596 timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
1722 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "task management request TIMED OUT!" 1597 timeout*HZ);
1723 " (hd %p, ioc %p, mf %p) \n", ioc->name, hd, 1598 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
1724 ioc, mf)); 1599 retval = FAILED;
1725 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", 1600 dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
1726 ioc->name)); 1601 "TaskMgmt TIMED OUT!(mf=%p)\n", ioc->name, mf));
1727 retval = mpt_HardResetHandler(ioc, CAN_SLEEP); 1602 mpt_clear_taskmgmt_in_progress_flag(ioc);
1728 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rc=%d \n", 1603 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
1729 ioc->name, retval)); 1604 goto out;
1730 goto fail_out; 1605 issue_hard_reset = 1;
1606 goto out;
1731 } 1607 }
1732 1608
1733 /* 1609 retval = mptscsih_taskmgmt_reply(ioc, type,
1734 * Handle success case, see if theres a non-zero ioc_status. 1610 (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply);
1735 */
1736 if (hd->tm_iocstatus == MPI_IOCSTATUS_SUCCESS ||
1737 hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
1738 hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
1739 retval = 0;
1740 else
1741 retval = FAILED;
1742 1611
1743 return retval; 1612 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1613 "TaskMgmt completed (%d seconds)\n",
1614 ioc->name, jiffies_to_msecs(jiffies - time_count)/1000));
1744 1615
1745 fail_out: 1616 out:
1746 1617
1747 /* 1618 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
1748 * Free task management mf, and corresponding tm flags 1619 if (issue_hard_reset) {
1749 */ 1620 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
1750 mpt_free_msg_frame(ioc, mf); 1621 ioc->name, __func__);
1751 hd->tmPending = 0; 1622 retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
1752 hd->tmState = TM_STATE_NONE; 1623 mpt_free_msg_frame(ioc, mf);
1753 return FAILED; 1624 }
1625
1626 retval = (retval == 0) ? 0 : FAILED;
1627 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
1628 return retval;
1754} 1629}
1630EXPORT_SYMBOL(mptscsih_IssueTaskMgmt);
1755 1631
1756static int 1632static int
1757mptscsih_get_tm_timeout(MPT_ADAPTER *ioc) 1633mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
@@ -1838,13 +1714,8 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1838 goto out; 1714 goto out;
1839 } 1715 }
1840 1716
1841 if (hd->resetPending) { 1717 if (ioc->timeouts < -1)
1842 retval = FAILED; 1718 ioc->timeouts++;
1843 goto out;
1844 }
1845
1846 if (hd->timeouts < -1)
1847 hd->timeouts++;
1848 1719
1849 if (mpt_fwfault_debug) 1720 if (mpt_fwfault_debug)
1850 mpt_halt_firmware(ioc); 1721 mpt_halt_firmware(ioc);
@@ -1861,22 +1732,30 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1861 1732
1862 hd->abortSCpnt = SCpnt; 1733 hd->abortSCpnt = SCpnt;
1863 1734
1864 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1735 retval = mptscsih_IssueTaskMgmt(hd,
1865 vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun, 1736 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1866 ctx2abort, mptscsih_get_tm_timeout(ioc)); 1737 vdevice->vtarget->channel,
1738 vdevice->vtarget->id, vdevice->lun,
1739 ctx2abort, mptscsih_get_tm_timeout(ioc));
1867 1740
1868 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx && 1741 if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx &&
1869 SCpnt->serial_number == sn) 1742 SCpnt->serial_number == sn) {
1743 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1744 "task abort: command still in active list! (sc=%p)\n",
1745 ioc->name, SCpnt));
1870 retval = FAILED; 1746 retval = FAILED;
1747 } else {
1748 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1749 "task abort: command cleared from active list! (sc=%p)\n",
1750 ioc->name, SCpnt));
1751 retval = SUCCESS;
1752 }
1871 1753
1872 out: 1754 out:
1873 printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", 1755 printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
1874 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1756 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
1875 1757
1876 if (retval == 0) 1758 return retval;
1877 return SUCCESS;
1878 else
1879 return FAILED;
1880} 1759}
1881 1760
1882/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1761/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1909,14 +1788,9 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1909 ioc->name, SCpnt); 1788 ioc->name, SCpnt);
1910 scsi_print_command(SCpnt); 1789 scsi_print_command(SCpnt);
1911 1790
1912 if (hd->resetPending) {
1913 retval = FAILED;
1914 goto out;
1915 }
1916
1917 vdevice = SCpnt->device->hostdata; 1791 vdevice = SCpnt->device->hostdata;
1918 if (!vdevice || !vdevice->vtarget) { 1792 if (!vdevice || !vdevice->vtarget) {
1919 retval = 0; 1793 retval = SUCCESS;
1920 goto out; 1794 goto out;
1921 } 1795 }
1922 1796
@@ -1927,9 +1801,11 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1927 goto out; 1801 goto out;
1928 } 1802 }
1929 1803
1930 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 1804 retval = mptscsih_IssueTaskMgmt(hd,
1931 vdevice->vtarget->channel, vdevice->vtarget->id, 0, 0, 1805 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
1932 mptscsih_get_tm_timeout(ioc)); 1806 vdevice->vtarget->channel,
1807 vdevice->vtarget->id, 0, 0,
1808 mptscsih_get_tm_timeout(ioc));
1933 1809
1934 out: 1810 out:
1935 printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n", 1811 printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
@@ -1972,12 +1848,16 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
1972 ioc->name, SCpnt); 1848 ioc->name, SCpnt);
1973 scsi_print_command(SCpnt); 1849 scsi_print_command(SCpnt);
1974 1850
1975 if (hd->timeouts < -1) 1851 if (ioc->timeouts < -1)
1976 hd->timeouts++; 1852 ioc->timeouts++;
1977 1853
1978 vdevice = SCpnt->device->hostdata; 1854 vdevice = SCpnt->device->hostdata;
1979 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1855 if (!vdevice || !vdevice->vtarget)
1980 vdevice->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(ioc)); 1856 return SUCCESS;
1857 retval = mptscsih_IssueTaskMgmt(hd,
1858 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1859 vdevice->vtarget->channel, 0, 0, 0,
1860 mptscsih_get_tm_timeout(ioc));
1981 1861
1982 printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n", 1862 printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n",
1983 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1863 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
@@ -2001,8 +1881,9 @@ int
2001mptscsih_host_reset(struct scsi_cmnd *SCpnt) 1881mptscsih_host_reset(struct scsi_cmnd *SCpnt)
2002{ 1882{
2003 MPT_SCSI_HOST * hd; 1883 MPT_SCSI_HOST * hd;
2004 int retval; 1884 int status = SUCCESS;
2005 MPT_ADAPTER *ioc; 1885 MPT_ADAPTER *ioc;
1886 int retval;
2006 1887
2007 /* If we can't locate the host to reset, then we failed. */ 1888 /* If we can't locate the host to reset, then we failed. */
2008 if ((hd = shost_priv(SCpnt->device->host)) == NULL){ 1889 if ((hd = shost_priv(SCpnt->device->host)) == NULL){
@@ -2021,86 +1902,71 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
2021 /* If our attempts to reset the host failed, then return a failed 1902 /* If our attempts to reset the host failed, then return a failed
2022 * status. The host will be taken off line by the SCSI mid-layer. 1903 * status. The host will be taken off line by the SCSI mid-layer.
2023 */ 1904 */
2024 if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) { 1905 retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
2025 retval = FAILED; 1906 if (retval < 0)
2026 } else { 1907 status = FAILED;
2027 /* Make sure TM pending is cleared and TM state is set to 1908 else
2028 * NONE. 1909 status = SUCCESS;
2029 */
2030 retval = 0;
2031 hd->tmPending = 0;
2032 hd->tmState = TM_STATE_NONE;
2033 }
2034 1910
2035 printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n", 1911 printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n",
2036 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); 1912 ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
2037 1913
2038 return retval; 1914 return status;
2039} 1915}
2040 1916
2041/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2042/**
2043 * mptscsih_tm_pending_wait - wait for pending task management request to complete
2044 * @hd: Pointer to MPT host structure.
2045 *
2046 * Returns {SUCCESS,FAILED}.
2047 */
2048static int 1917static int
2049mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd) 1918mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
1919 SCSITaskMgmtReply_t *pScsiTmReply)
2050{ 1920{
2051 unsigned long flags; 1921 u16 iocstatus;
2052 int loop_count = 4 * 10; /* Wait 10 seconds */ 1922 u32 termination_count;
2053 int status = FAILED; 1923 int retval;
2054 MPT_ADAPTER *ioc = hd->ioc;
2055 1924
2056 do { 1925 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
2057 spin_lock_irqsave(&ioc->FreeQlock, flags); 1926 retval = FAILED;
2058 if (hd->tmState == TM_STATE_NONE) { 1927 goto out;
2059 hd->tmState = TM_STATE_IN_PROGRESS; 1928 }
2060 hd->tmPending = 1;
2061 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2062 status = SUCCESS;
2063 break;
2064 }
2065 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2066 msleep(250);
2067 } while (--loop_count);
2068 1929
2069 return status; 1930 DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
2070}
2071 1931
2072/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1932 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2073/** 1933 termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
2074 * mptscsih_tm_wait_for_completion - wait for completion of TM task
2075 * @hd: Pointer to MPT host structure.
2076 * @timeout: timeout value
2077 *
2078 * Returns {SUCCESS,FAILED}.
2079 */
2080static int
2081mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
2082{
2083 unsigned long flags;
2084 int loop_count = 4 * timeout;
2085 int status = FAILED;
2086 MPT_ADAPTER *ioc = hd->ioc;
2087 1934
2088 do { 1935 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2089 spin_lock_irqsave(&ioc->FreeQlock, flags); 1936 "TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n"
2090 if(hd->tmPending == 0) { 1937 "\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n"
2091 status = SUCCESS; 1938 "\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus,
2092 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 1939 pScsiTmReply->TargetID, type, le16_to_cpu(pScsiTmReply->IOCStatus),
2093 break; 1940 le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode,
2094 } 1941 termination_count));
2095 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2096 msleep(250);
2097 } while (--loop_count);
2098 1942
2099 return status; 1943 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
1944 pScsiTmReply->ResponseCode)
1945 mptscsih_taskmgmt_response_code(ioc,
1946 pScsiTmReply->ResponseCode);
1947
1948 if (iocstatus == MPI_IOCSTATUS_SUCCESS) {
1949 retval = 0;
1950 goto out;
1951 }
1952
1953 retval = FAILED;
1954 if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
1955 if (termination_count == 1)
1956 retval = 0;
1957 goto out;
1958 }
1959
1960 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
1961 iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
1962 retval = 0;
1963
1964 out:
1965 return retval;
2100} 1966}
2101 1967
2102/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1968/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2103static void 1969void
2104mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code) 1970mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2105{ 1971{
2106 char *desc; 1972 char *desc;
@@ -2134,6 +2000,7 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2134 printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n", 2000 printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n",
2135 ioc->name, response_code, desc); 2001 ioc->name, response_code, desc);
2136} 2002}
2003EXPORT_SYMBOL(mptscsih_taskmgmt_response_code);
2137 2004
2138/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2005/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2139/** 2006/**
@@ -2150,97 +2017,28 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
2150 * Returns 1 indicating alloc'd request frame ptr should be freed. 2017 * Returns 1 indicating alloc'd request frame ptr should be freed.
2151 **/ 2018 **/
2152int 2019int
2153mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) 2020mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
2021 MPT_FRAME_HDR *mr)
2154{ 2022{
2155 SCSITaskMgmtReply_t *pScsiTmReply; 2023 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2156 SCSITaskMgmt_t *pScsiTmReq; 2024 "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr));
2157 MPT_SCSI_HOST *hd;
2158 unsigned long flags;
2159 u16 iocstatus;
2160 u8 tmType;
2161 u32 termination_count;
2162
2163 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed (mf=%p,mr=%p)\n",
2164 ioc->name, mf, mr));
2165 if (!ioc->sh) {
2166 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
2167 "TaskMgmt Complete: NULL Scsi Host Ptr\n", ioc->name));
2168 return 1;
2169 }
2170
2171 if (mr == NULL) {
2172 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
2173 "ERROR! TaskMgmt Reply: NULL Request %p\n", ioc->name, mf));
2174 return 1;
2175 }
2176
2177 hd = shost_priv(ioc->sh);
2178 pScsiTmReply = (SCSITaskMgmtReply_t*)mr;
2179 pScsiTmReq = (SCSITaskMgmt_t*)mf;
2180 tmType = pScsiTmReq->TaskType;
2181 iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2182 termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
2183 2025
2184 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 && 2026 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
2185 pScsiTmReply->ResponseCode)
2186 mptscsih_taskmgmt_response_code(ioc,
2187 pScsiTmReply->ResponseCode);
2188 DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
2189 2027
2190#ifdef CONFIG_FUSION_LOGGING 2028 if (!mr)
2191 if ((ioc->debug_level & MPT_DEBUG_REPLY) ||
2192 (ioc->debug_level & MPT_DEBUG_TM ))
2193 printk("%s: ha=%d [%d:%d:0] task_type=0x%02X "
2194 "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X "
2195 "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus,
2196 pScsiTmReply->TargetID, pScsiTmReq->TaskType,
2197 le16_to_cpu(pScsiTmReply->IOCStatus),
2198 le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,
2199 le32_to_cpu(pScsiTmReply->TerminationCount));
2200#endif
2201 if (!iocstatus) {
2202 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT " TaskMgmt SUCCESS\n", ioc->name));
2203 hd->abortSCpnt = NULL;
2204 goto out; 2029 goto out;
2205 }
2206
2207 /* Error? (anything non-zero?) */
2208
2209 /* clear flags and continue.
2210 */
2211 switch (tmType) {
2212
2213 case MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2214 if (termination_count == 1)
2215 iocstatus = MPI_IOCSTATUS_SCSI_TASK_TERMINATED;
2216 hd->abortSCpnt = NULL;
2217 break;
2218
2219 case MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS:
2220
2221 /* If an internal command is present
2222 * or the TM failed - reload the FW.
2223 * FC FW may respond FAILED to an ABORT
2224 */
2225 if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED ||
2226 hd->cmdPtr)
2227 if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
2228 printk(MYIOC_s_WARN_FMT " Firmware Reload FAILED!!\n", ioc->name);
2229 break;
2230
2231 case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2232 default:
2233 break;
2234 }
2235 2030
2031 ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
2032 memcpy(ioc->taskmgmt_cmds.reply, mr,
2033 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
2236 out: 2034 out:
2237 spin_lock_irqsave(&ioc->FreeQlock, flags); 2035 if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
2238 hd->tmPending = 0; 2036 mpt_clear_taskmgmt_in_progress_flag(ioc);
2239 hd->tmState = TM_STATE_NONE; 2037 ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
2240 hd->tm_iocstatus = iocstatus; 2038 complete(&ioc->taskmgmt_cmds.done);
2241 spin_unlock_irqrestore(&ioc->FreeQlock, flags); 2039 return 1;
2242 2040 }
2243 return 1; 2041 return 0;
2244} 2042}
2245 2043
2246/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2044/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2290,8 +2088,10 @@ int
2290mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id) 2088mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
2291{ 2089{
2292 struct inactive_raid_component_info *component_info; 2090 struct inactive_raid_component_info *component_info;
2293 int i; 2091 int i, j;
2092 RaidPhysDiskPage1_t *phys_disk;
2294 int rc = 0; 2093 int rc = 0;
2094 int num_paths;
2295 2095
2296 if (!ioc->raid_data.pIocPg3) 2096 if (!ioc->raid_data.pIocPg3)
2297 goto out; 2097 goto out;
@@ -2303,6 +2103,45 @@ mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
2303 } 2103 }
2304 } 2104 }
2305 2105
2106 if (ioc->bus_type != SAS)
2107 goto out;
2108
2109 /*
2110 * Check if dual path
2111 */
2112 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
2113 num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
2114 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
2115 if (num_paths < 2)
2116 continue;
2117 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2118 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2119 if (!phys_disk)
2120 continue;
2121 if ((mpt_raid_phys_disk_pg1(ioc,
2122 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
2123 phys_disk))) {
2124 kfree(phys_disk);
2125 continue;
2126 }
2127 for (j = 0; j < num_paths; j++) {
2128 if ((phys_disk->Path[j].Flags &
2129 MPI_RAID_PHYSDISK1_FLAG_INVALID))
2130 continue;
2131 if ((phys_disk->Path[j].Flags &
2132 MPI_RAID_PHYSDISK1_FLAG_BROKEN))
2133 continue;
2134 if ((id == phys_disk->Path[j].PhysDiskID) &&
2135 (channel == phys_disk->Path[j].PhysDiskBus)) {
2136 rc = 1;
2137 kfree(phys_disk);
2138 goto out;
2139 }
2140 }
2141 kfree(phys_disk);
2142 }
2143
2144
2306 /* 2145 /*
2307 * Check inactive list for matching phys disks 2146 * Check inactive list for matching phys disks
2308 */ 2147 */
@@ -2327,8 +2166,10 @@ u8
2327mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id) 2166mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
2328{ 2167{
2329 struct inactive_raid_component_info *component_info; 2168 struct inactive_raid_component_info *component_info;
2330 int i; 2169 int i, j;
2170 RaidPhysDiskPage1_t *phys_disk;
2331 int rc = -ENXIO; 2171 int rc = -ENXIO;
2172 int num_paths;
2332 2173
2333 if (!ioc->raid_data.pIocPg3) 2174 if (!ioc->raid_data.pIocPg3)
2334 goto out; 2175 goto out;
@@ -2340,6 +2181,44 @@ mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
2340 } 2181 }
2341 } 2182 }
2342 2183
2184 if (ioc->bus_type != SAS)
2185 goto out;
2186
2187 /*
2188 * Check if dual path
2189 */
2190 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
2191 num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
2192 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
2193 if (num_paths < 2)
2194 continue;
2195 phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
2196 (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
2197 if (!phys_disk)
2198 continue;
2199 if ((mpt_raid_phys_disk_pg1(ioc,
2200 ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
2201 phys_disk))) {
2202 kfree(phys_disk);
2203 continue;
2204 }
2205 for (j = 0; j < num_paths; j++) {
2206 if ((phys_disk->Path[j].Flags &
2207 MPI_RAID_PHYSDISK1_FLAG_INVALID))
2208 continue;
2209 if ((phys_disk->Path[j].Flags &
2210 MPI_RAID_PHYSDISK1_FLAG_BROKEN))
2211 continue;
2212 if ((id == phys_disk->Path[j].PhysDiskID) &&
2213 (channel == phys_disk->Path[j].PhysDiskBus)) {
2214 rc = phys_disk->PhysDiskNum;
2215 kfree(phys_disk);
2216 goto out;
2217 }
2218 }
2219 kfree(phys_disk);
2220 }
2221
2343 /* 2222 /*
2344 * Check inactive list for matching phys disks 2223 * Check inactive list for matching phys disks
2345 */ 2224 */
@@ -2457,7 +2336,6 @@ mptscsih_slave_configure(struct scsi_device *sdev)
2457 sdev->ppr, sdev->inquiry_len)); 2336 sdev->ppr, sdev->inquiry_len));
2458 2337
2459 vdevice->configured_lun = 1; 2338 vdevice->configured_lun = 1;
2460 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
2461 2339
2462 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2340 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2463 "Queue depth=%d, tflags=%x\n", 2341 "Queue depth=%d, tflags=%x\n",
@@ -2469,6 +2347,7 @@ mptscsih_slave_configure(struct scsi_device *sdev)
2469 ioc->name, vtarget->negoFlags, vtarget->maxOffset, 2347 ioc->name, vtarget->negoFlags, vtarget->maxOffset,
2470 vtarget->minSyncFactor)); 2348 vtarget->minSyncFactor));
2471 2349
2350 mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
2472 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT 2351 dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2473 "tagged %d, simple %d, ordered %d\n", 2352 "tagged %d, simple %d, ordered %d\n",
2474 ioc->name,sdev->tagged_supported, sdev->simple_tags, 2353 ioc->name,sdev->tagged_supported, sdev->simple_tags,
@@ -2542,15 +2421,13 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2542} 2421}
2543 2422
2544/** 2423/**
2545 * mptscsih_get_scsi_lookup 2424 * mptscsih_get_scsi_lookup - retrieves scmd entry
2546 * @ioc: Pointer to MPT_ADAPTER structure 2425 * @ioc: Pointer to MPT_ADAPTER structure
2547 * @i: index into the array 2426 * @i: index into the array
2548 * 2427 *
2549 * retrieves scmd entry from ScsiLookup[] array list
2550 *
2551 * Returns the scsi_cmd pointer 2428 * Returns the scsi_cmd pointer
2552 **/ 2429 */
2553static struct scsi_cmnd * 2430struct scsi_cmnd *
2554mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) 2431mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
2555{ 2432{
2556 unsigned long flags; 2433 unsigned long flags;
@@ -2562,15 +2439,15 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
2562 2439
2563 return scmd; 2440 return scmd;
2564} 2441}
2442EXPORT_SYMBOL(mptscsih_get_scsi_lookup);
2565 2443
2566/** 2444/**
2567 * mptscsih_getclear_scsi_lookup 2445 * mptscsih_getclear_scsi_lookup - retrieves and clears scmd entry from ScsiLookup[] array list
2568 * @ioc: Pointer to MPT_ADAPTER structure 2446 * @ioc: Pointer to MPT_ADAPTER structure
2569 * @i: index into the array 2447 * @i: index into the array
2570 * 2448 *
2571 * retrieves and clears scmd entry from ScsiLookup[] array list
2572 *
2573 * Returns the scsi_cmd pointer 2449 * Returns the scsi_cmd pointer
2450 *
2574 **/ 2451 **/
2575static struct scsi_cmnd * 2452static struct scsi_cmnd *
2576mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) 2453mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
@@ -2635,94 +2512,33 @@ int
2635mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 2512mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2636{ 2513{
2637 MPT_SCSI_HOST *hd; 2514 MPT_SCSI_HOST *hd;
2638 unsigned long flags;
2639 2515
2640 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2641 ": IOC %s_reset routed to SCSI host driver!\n",
2642 ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
2643 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
2644
2645 /* If a FW reload request arrives after base installed but
2646 * before all scsi hosts have been attached, then an alt_ioc
2647 * may have a NULL sh pointer.
2648 */
2649 if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL) 2516 if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL)
2650 return 0; 2517 return 0;
2651 else
2652 hd = shost_priv(ioc->sh);
2653
2654 if (reset_phase == MPT_IOC_SETUP_RESET) {
2655 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Setup-Diag Reset\n", ioc->name));
2656
2657 /* Clean Up:
2658 * 1. Set Hard Reset Pending Flag
2659 * All new commands go to doneQ
2660 */
2661 hd->resetPending = 1;
2662
2663 } else if (reset_phase == MPT_IOC_PRE_RESET) {
2664 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Diag Reset\n", ioc->name));
2665 2518
2666 /* 2. Flush running commands 2519 hd = shost_priv(ioc->sh);
2667 * Clean ScsiLookup (and associated memory) 2520 switch (reset_phase) {
2668 * AND clean mytaskQ 2521 case MPT_IOC_SETUP_RESET:
2669 */ 2522 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2670 2523 "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
2671 /* 2b. Reply to OS all known outstanding I/O commands. 2524 break;
2672 */ 2525 case MPT_IOC_PRE_RESET:
2526 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2527 "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
2673 mptscsih_flush_running_cmds(hd); 2528 mptscsih_flush_running_cmds(hd);
2674 2529 break;
2675 /* 2c. If there was an internal command that 2530 case MPT_IOC_POST_RESET:
2676 * has not completed, configuration or io request, 2531 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2677 * free these resources. 2532 "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
2678 */ 2533 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING) {
2679 if (hd->cmdPtr) { 2534 ioc->internal_cmds.status |=
2680 del_timer(&hd->timer); 2535 MPT_MGMT_STATUS_DID_IOCRESET;
2681 mpt_free_msg_frame(ioc, hd->cmdPtr); 2536 complete(&ioc->internal_cmds.done);
2682 }
2683
2684 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Reset complete.\n", ioc->name));
2685
2686 } else {
2687 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Diag Reset\n", ioc->name));
2688
2689 /* Once a FW reload begins, all new OS commands are
2690 * redirected to the doneQ w/ a reset status.
2691 * Init all control structures.
2692 */
2693
2694 /* 2. Chain Buffer initialization
2695 */
2696
2697 /* 4. Renegotiate to all devices, if SPI
2698 */
2699
2700 /* 5. Enable new commands to be posted
2701 */
2702 spin_lock_irqsave(&ioc->FreeQlock, flags);
2703 hd->tmPending = 0;
2704 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
2705 hd->resetPending = 0;
2706 hd->tmState = TM_STATE_NONE;
2707
2708 /* 6. If there was an internal command,
2709 * wake this process up.
2710 */
2711 if (hd->cmdPtr) {
2712 /*
2713 * Wake up the original calling thread
2714 */
2715 hd->pLocal = &hd->localReply;
2716 hd->pLocal->completion = MPT_SCANDV_DID_RESET;
2717 hd->scandv_wait_done = 1;
2718 wake_up(&hd->scandv_waitq);
2719 hd->cmdPtr = NULL;
2720 } 2537 }
2721 2538 break;
2722 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Reset complete.\n", ioc->name)); 2539 default:
2723 2540 break;
2724 } 2541 }
2725
2726 return 1; /* currently means nothing really */ 2542 return 1; /* currently means nothing really */
2727} 2543}
2728 2544
@@ -2730,55 +2546,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2730int 2546int
2731mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) 2547mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2732{ 2548{
2733 MPT_SCSI_HOST *hd;
2734 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; 2549 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
2735 2550
2736 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", 2551 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2737 ioc->name, event)); 2552 "MPT event (=%02Xh) routed to SCSI host driver!\n",
2738 2553 ioc->name, event));
2739 if (ioc->sh == NULL ||
2740 ((hd = shost_priv(ioc->sh)) == NULL))
2741 return 1;
2742
2743 switch (event) {
2744 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
2745 /* FIXME! */
2746 break;
2747 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
2748 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
2749 if (hd && (ioc->bus_type == SPI) && (hd->soft_resets < -1))
2750 hd->soft_resets++;
2751 break;
2752 case MPI_EVENT_LOGOUT: /* 09 */
2753 /* FIXME! */
2754 break;
2755
2756 case MPI_EVENT_RESCAN: /* 06 */
2757 break;
2758
2759 /*
2760 * CHECKME! Don't think we need to do
2761 * anything for these, but...
2762 */
2763 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
2764 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
2765 /*
2766 * CHECKME! Falling thru...
2767 */
2768 break;
2769
2770 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
2771 break;
2772 2554
2773 case MPI_EVENT_NONE: /* 00 */ 2555 if ((event == MPI_EVENT_IOC_BUS_RESET ||
2774 case MPI_EVENT_LOG_DATA: /* 01 */ 2556 event == MPI_EVENT_EXT_BUS_RESET) &&
2775 case MPI_EVENT_STATE_CHANGE: /* 02 */ 2557 (ioc->bus_type == SPI) && (ioc->soft_resets < -1))
2776 case MPI_EVENT_EVENT_CHANGE: /* 0A */ 2558 ioc->soft_resets++;
2777 default:
2778 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": Ignoring event (=%02Xh)\n",
2779 ioc->name, event));
2780 break;
2781 }
2782 2559
2783 return 1; /* currently means nothing really */ 2560 return 1; /* currently means nothing really */
2784} 2561}
@@ -2809,153 +2586,44 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2809 * Used ONLY for DV and other internal commands. 2586 * Used ONLY for DV and other internal commands.
2810 */ 2587 */
2811int 2588int
2812mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) 2589mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
2590 MPT_FRAME_HDR *reply)
2813{ 2591{
2814 MPT_SCSI_HOST *hd;
2815 SCSIIORequest_t *pReq; 2592 SCSIIORequest_t *pReq;
2816 int completionCode; 2593 SCSIIOReply_t *pReply;
2594 u8 cmd;
2817 u16 req_idx; 2595 u16 req_idx;
2596 u8 *sense_data;
2597 int sz;
2818 2598
2819 hd = shost_priv(ioc->sh); 2599 ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
2820 2600 ioc->internal_cmds.completion_code = MPT_SCANDV_GOOD;
2821 if ((mf == NULL) || 2601 if (!reply)
2822 (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { 2602 goto out;
2823 printk(MYIOC_s_ERR_FMT
2824 "ScanDvComplete, %s req frame ptr! (=%p)\n",
2825 ioc->name, mf?"BAD":"NULL", (void *) mf);
2826 goto wakeup;
2827 }
2828
2829 del_timer(&hd->timer);
2830 req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
2831 mptscsih_set_scsi_lookup(ioc, req_idx, NULL);
2832 pReq = (SCSIIORequest_t *) mf;
2833 2603
2834 if (mf != hd->cmdPtr) { 2604 pReply = (SCSIIOReply_t *) reply;
2835 printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p, idx=%d)\n", 2605 pReq = (SCSIIORequest_t *) req;
2836 ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx); 2606 ioc->internal_cmds.completion_code =
2607 mptscsih_get_completion_code(ioc, req, reply);
2608 ioc->internal_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
2609 memcpy(ioc->internal_cmds.reply, reply,
2610 min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength));
2611 cmd = reply->u.hdr.Function;
2612 if (((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
2613 (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) &&
2614 (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
2615 req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
2616 sense_data = ((u8 *)ioc->sense_buf_pool +
2617 (req_idx * MPT_SENSE_BUFFER_ALLOC));
2618 sz = min_t(int, pReq->SenseBufferLength,
2619 MPT_SENSE_BUFFER_ALLOC);
2620 memcpy(ioc->internal_cmds.sense, sense_data, sz);
2837 } 2621 }
2838 hd->cmdPtr = NULL; 2622 out:
2839 2623 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING))
2840 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScanDvComplete (mf=%p,mr=%p,idx=%d)\n", 2624 return 0;
2841 ioc->name, mf, mr, req_idx)); 2625 ioc->internal_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
2842 2626 complete(&ioc->internal_cmds.done);
2843 hd->pLocal = &hd->localReply;
2844 hd->pLocal->scsiStatus = 0;
2845
2846 /* If target struct exists, clear sense valid flag.
2847 */
2848 if (mr == NULL) {
2849 completionCode = MPT_SCANDV_GOOD;
2850 } else {
2851 SCSIIOReply_t *pReply;
2852 u16 status;
2853 u8 scsi_status;
2854
2855 pReply = (SCSIIOReply_t *) mr;
2856
2857 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2858 scsi_status = pReply->SCSIStatus;
2859
2860
2861 switch(status) {
2862
2863 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
2864 completionCode = MPT_SCANDV_SELECTION_TIMEOUT;
2865 break;
2866
2867 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
2868 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
2869 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
2870 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
2871 completionCode = MPT_SCANDV_DID_RESET;
2872 break;
2873
2874 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
2875 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
2876 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
2877 if (pReply->Function == MPI_FUNCTION_CONFIG) {
2878 ConfigReply_t *pr = (ConfigReply_t *)mr;
2879 completionCode = MPT_SCANDV_GOOD;
2880 hd->pLocal->header.PageVersion = pr->Header.PageVersion;
2881 hd->pLocal->header.PageLength = pr->Header.PageLength;
2882 hd->pLocal->header.PageNumber = pr->Header.PageNumber;
2883 hd->pLocal->header.PageType = pr->Header.PageType;
2884
2885 } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
2886 /* If the RAID Volume request is successful,
2887 * return GOOD, else indicate that
2888 * some type of error occurred.
2889 */
2890 MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr;
2891 if (le16_to_cpu(pr->ActionStatus) == MPI_RAID_ACTION_ASTATUS_SUCCESS)
2892 completionCode = MPT_SCANDV_GOOD;
2893 else
2894 completionCode = MPT_SCANDV_SOME_ERROR;
2895 memcpy(hd->pLocal->sense, pr, sizeof(hd->pLocal->sense));
2896
2897 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
2898 u8 *sense_data;
2899 int sz;
2900
2901 /* save sense data in global structure
2902 */
2903 completionCode = MPT_SCANDV_SENSE;
2904 hd->pLocal->scsiStatus = scsi_status;
2905 sense_data = ((u8 *)ioc->sense_buf_pool +
2906 (req_idx * MPT_SENSE_BUFFER_ALLOC));
2907
2908 sz = min_t(int, pReq->SenseBufferLength,
2909 SCSI_STD_SENSE_BYTES);
2910 memcpy(hd->pLocal->sense, sense_data, sz);
2911
2912 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Check Condition, sense ptr %p\n",
2913 ioc->name, sense_data));
2914 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
2915 if (pReq->CDB[0] == INQUIRY)
2916 completionCode = MPT_SCANDV_ISSUE_SENSE;
2917 else
2918 completionCode = MPT_SCANDV_DID_RESET;
2919 }
2920 else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
2921 completionCode = MPT_SCANDV_DID_RESET;
2922 else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2923 completionCode = MPT_SCANDV_DID_RESET;
2924 else {
2925 completionCode = MPT_SCANDV_GOOD;
2926 hd->pLocal->scsiStatus = scsi_status;
2927 }
2928 break;
2929
2930 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
2931 if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2932 completionCode = MPT_SCANDV_DID_RESET;
2933 else
2934 completionCode = MPT_SCANDV_SOME_ERROR;
2935 break;
2936
2937 default:
2938 completionCode = MPT_SCANDV_SOME_ERROR;
2939 break;
2940
2941 } /* switch(status) */
2942
2943 } /* end of address reply case */
2944
2945 hd->pLocal->completion = completionCode;
2946
2947 /* MF and RF are freed in mpt_interrupt
2948 */
2949wakeup:
2950 /* Free Chain buffers (will never chain) in scan or dv */
2951 //mptscsih_freeChainBuffers(ioc, req_idx);
2952
2953 /*
2954 * Wake up the original calling thread
2955 */
2956 hd->scandv_wait_done = 1;
2957 wake_up(&hd->scandv_waitq);
2958
2959 return 1; 2627 return 1;
2960} 2628}
2961 2629
@@ -3004,6 +2672,95 @@ mptscsih_timer_expired(unsigned long data)
3004 return; 2672 return;
3005} 2673}
3006 2674
2675/**
2676 * mptscsih_get_completion_code -
2677 * @ioc: Pointer to MPT_ADAPTER structure
2678 * @req: Pointer to original MPT request frame
2679 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
2680 *
2681 **/
2682static int
2683mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
2684 MPT_FRAME_HDR *reply)
2685{
2686 SCSIIOReply_t *pReply;
2687 MpiRaidActionReply_t *pr;
2688 u8 scsi_status;
2689 u16 status;
2690 int completion_code;
2691
2692 pReply = (SCSIIOReply_t *)reply;
2693 status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
2694 scsi_status = pReply->SCSIStatus;
2695
2696 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2697 "IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh,"
2698 "IOCLogInfo=%08xh\n", ioc->name, status, pReply->SCSIState,
2699 scsi_status, le32_to_cpu(pReply->IOCLogInfo)));
2700
2701 switch (status) {
2702
2703 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
2704 completion_code = MPT_SCANDV_SELECTION_TIMEOUT;
2705 break;
2706
2707 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
2708 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
2709 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
2710 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
2711 completion_code = MPT_SCANDV_DID_RESET;
2712 break;
2713
2714 case MPI_IOCSTATUS_BUSY:
2715 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2716 completion_code = MPT_SCANDV_BUSY;
2717 break;
2718
2719 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
2720 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
2721 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
2722 if (pReply->Function == MPI_FUNCTION_CONFIG) {
2723 completion_code = MPT_SCANDV_GOOD;
2724 } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
2725 pr = (MpiRaidActionReply_t *)reply;
2726 if (le16_to_cpu(pr->ActionStatus) ==
2727 MPI_RAID_ACTION_ASTATUS_SUCCESS)
2728 completion_code = MPT_SCANDV_GOOD;
2729 else
2730 completion_code = MPT_SCANDV_SOME_ERROR;
2731 } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)
2732 completion_code = MPT_SCANDV_SENSE;
2733 else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
2734 if (req->u.scsireq.CDB[0] == INQUIRY)
2735 completion_code = MPT_SCANDV_ISSUE_SENSE;
2736 else
2737 completion_code = MPT_SCANDV_DID_RESET;
2738 } else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
2739 completion_code = MPT_SCANDV_DID_RESET;
2740 else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2741 completion_code = MPT_SCANDV_DID_RESET;
2742 else if (scsi_status == MPI_SCSI_STATUS_BUSY)
2743 completion_code = MPT_SCANDV_BUSY;
2744 else
2745 completion_code = MPT_SCANDV_GOOD;
2746 break;
2747
2748 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
2749 if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
2750 completion_code = MPT_SCANDV_DID_RESET;
2751 else
2752 completion_code = MPT_SCANDV_SOME_ERROR;
2753 break;
2754 default:
2755 completion_code = MPT_SCANDV_SOME_ERROR;
2756 break;
2757
2758 } /* switch(status) */
2759
2760 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2761 " completionCode set to %08xh\n", ioc->name, completion_code));
2762 return completion_code;
2763}
3007 2764
3008/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2765/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3009/** 2766/**
@@ -3030,22 +2787,27 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3030{ 2787{
3031 MPT_FRAME_HDR *mf; 2788 MPT_FRAME_HDR *mf;
3032 SCSIIORequest_t *pScsiReq; 2789 SCSIIORequest_t *pScsiReq;
3033 SCSIIORequest_t ReqCopy;
3034 int my_idx, ii, dir; 2790 int my_idx, ii, dir;
3035 int rc, cmdTimeout; 2791 int timeout;
3036 int in_isr;
3037 char cmdLen; 2792 char cmdLen;
3038 char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; 2793 char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
3039 char cmd = io->cmd; 2794 u8 cmd = io->cmd;
3040 MPT_ADAPTER *ioc = hd->ioc; 2795 MPT_ADAPTER *ioc = hd->ioc;
2796 int ret = 0;
2797 unsigned long timeleft;
2798 unsigned long flags;
3041 2799
3042 in_isr = in_interrupt(); 2800 /* don't send internal command during diag reset */
3043 if (in_isr) { 2801 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
3044 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Internal SCSI IO request not allowed in ISR context!\n", 2802 if (ioc->ioc_reset_in_progress) {
3045 ioc->name)); 2803 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3046 return -EPERM; 2804 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
2805 "%s: busy with host reset\n", ioc->name, __func__));
2806 return MPT_SCANDV_BUSY;
3047 } 2807 }
2808 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
3048 2809
2810 mutex_lock(&ioc->internal_cmds.mutex);
3049 2811
3050 /* Set command specific information 2812 /* Set command specific information
3051 */ 2813 */
@@ -3055,13 +2817,13 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3055 dir = MPI_SCSIIO_CONTROL_READ; 2817 dir = MPI_SCSIIO_CONTROL_READ;
3056 CDB[0] = cmd; 2818 CDB[0] = cmd;
3057 CDB[4] = io->size; 2819 CDB[4] = io->size;
3058 cmdTimeout = 10; 2820 timeout = 10;
3059 break; 2821 break;
3060 2822
3061 case TEST_UNIT_READY: 2823 case TEST_UNIT_READY:
3062 cmdLen = 6; 2824 cmdLen = 6;
3063 dir = MPI_SCSIIO_CONTROL_READ; 2825 dir = MPI_SCSIIO_CONTROL_READ;
3064 cmdTimeout = 10; 2826 timeout = 10;
3065 break; 2827 break;
3066 2828
3067 case START_STOP: 2829 case START_STOP:
@@ -3069,7 +2831,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3069 dir = MPI_SCSIIO_CONTROL_READ; 2831 dir = MPI_SCSIIO_CONTROL_READ;
3070 CDB[0] = cmd; 2832 CDB[0] = cmd;
3071 CDB[4] = 1; /*Spin up the disk */ 2833 CDB[4] = 1; /*Spin up the disk */
3072 cmdTimeout = 15; 2834 timeout = 15;
3073 break; 2835 break;
3074 2836
3075 case REQUEST_SENSE: 2837 case REQUEST_SENSE:
@@ -3077,7 +2839,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3077 CDB[0] = cmd; 2839 CDB[0] = cmd;
3078 CDB[4] = io->size; 2840 CDB[4] = io->size;
3079 dir = MPI_SCSIIO_CONTROL_READ; 2841 dir = MPI_SCSIIO_CONTROL_READ;
3080 cmdTimeout = 10; 2842 timeout = 10;
3081 break; 2843 break;
3082 2844
3083 case READ_BUFFER: 2845 case READ_BUFFER:
@@ -3096,7 +2858,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3096 CDB[6] = (io->size >> 16) & 0xFF; 2858 CDB[6] = (io->size >> 16) & 0xFF;
3097 CDB[7] = (io->size >> 8) & 0xFF; 2859 CDB[7] = (io->size >> 8) & 0xFF;
3098 CDB[8] = io->size & 0xFF; 2860 CDB[8] = io->size & 0xFF;
3099 cmdTimeout = 10; 2861 timeout = 10;
3100 break; 2862 break;
3101 2863
3102 case WRITE_BUFFER: 2864 case WRITE_BUFFER:
@@ -3111,21 +2873,21 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3111 CDB[6] = (io->size >> 16) & 0xFF; 2873 CDB[6] = (io->size >> 16) & 0xFF;
3112 CDB[7] = (io->size >> 8) & 0xFF; 2874 CDB[7] = (io->size >> 8) & 0xFF;
3113 CDB[8] = io->size & 0xFF; 2875 CDB[8] = io->size & 0xFF;
3114 cmdTimeout = 10; 2876 timeout = 10;
3115 break; 2877 break;
3116 2878
3117 case RESERVE: 2879 case RESERVE:
3118 cmdLen = 6; 2880 cmdLen = 6;
3119 dir = MPI_SCSIIO_CONTROL_READ; 2881 dir = MPI_SCSIIO_CONTROL_READ;
3120 CDB[0] = cmd; 2882 CDB[0] = cmd;
3121 cmdTimeout = 10; 2883 timeout = 10;
3122 break; 2884 break;
3123 2885
3124 case RELEASE: 2886 case RELEASE:
3125 cmdLen = 6; 2887 cmdLen = 6;
3126 dir = MPI_SCSIIO_CONTROL_READ; 2888 dir = MPI_SCSIIO_CONTROL_READ;
3127 CDB[0] = cmd; 2889 CDB[0] = cmd;
3128 cmdTimeout = 10; 2890 timeout = 10;
3129 break; 2891 break;
3130 2892
3131 case SYNCHRONIZE_CACHE: 2893 case SYNCHRONIZE_CACHE:
@@ -3133,20 +2895,23 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3133 dir = MPI_SCSIIO_CONTROL_READ; 2895 dir = MPI_SCSIIO_CONTROL_READ;
3134 CDB[0] = cmd; 2896 CDB[0] = cmd;
3135// CDB[1] = 0x02; /* set immediate bit */ 2897// CDB[1] = 0x02; /* set immediate bit */
3136 cmdTimeout = 10; 2898 timeout = 10;
3137 break; 2899 break;
3138 2900
3139 default: 2901 default:
3140 /* Error Case */ 2902 /* Error Case */
3141 return -EFAULT; 2903 ret = -EFAULT;
2904 goto out;
3142 } 2905 }
3143 2906
3144 /* Get and Populate a free Frame 2907 /* Get and Populate a free Frame
2908 * MsgContext set in mpt_get_msg_frame call
3145 */ 2909 */
3146 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 2910 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
3147 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "No msg frames!\n", 2911 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: No msg frames!\n",
3148 ioc->name)); 2912 ioc->name, __func__));
3149 return -EBUSY; 2913 ret = MPT_SCANDV_BUSY;
2914 goto out;
3150 } 2915 }
3151 2916
3152 pScsiReq = (SCSIIORequest_t *) mf; 2917 pScsiReq = (SCSIIORequest_t *) mf;
@@ -3172,7 +2937,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3172 2937
3173 pScsiReq->Reserved = 0; 2938 pScsiReq->Reserved = 0;
3174 2939
3175 pScsiReq->MsgFlags = mpt_msg_flags(); 2940 pScsiReq->MsgFlags = mpt_msg_flags(ioc);
3176 /* MsgContext set in mpt_get_msg_fram call */ 2941 /* MsgContext set in mpt_get_msg_fram call */
3177 2942
3178 int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN); 2943 int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN);
@@ -3184,74 +2949,58 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3184 2949
3185 if (cmd == REQUEST_SENSE) { 2950 if (cmd == REQUEST_SENSE) {
3186 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED); 2951 pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
3187 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Untagged! 0x%2x\n", 2952 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3188 ioc->name, cmd)); 2953 "%s: Untagged! 0x%02x\n", ioc->name, __func__, cmd));
3189 } 2954 }
3190 2955
3191 for (ii=0; ii < 16; ii++) 2956 for (ii = 0; ii < 16; ii++)
3192 pScsiReq->CDB[ii] = CDB[ii]; 2957 pScsiReq->CDB[ii] = CDB[ii];
3193 2958
3194 pScsiReq->DataLength = cpu_to_le32(io->size); 2959 pScsiReq->DataLength = cpu_to_le32(io->size);
3195 pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma 2960 pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
3196 + (my_idx * MPT_SENSE_BUFFER_ALLOC)); 2961 + (my_idx * MPT_SENSE_BUFFER_ALLOC));
3197 2962
3198 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Command 0x%x for (%d:%d:%d)\n", 2963 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3199 ioc->name, cmd, io->channel, io->id, io->lun)); 2964 "%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%d\n",
2965 ioc->name, __func__, cmd, io->channel, io->id, io->lun));
3200 2966
3201 if (dir == MPI_SCSIIO_CONTROL_READ) { 2967 if (dir == MPI_SCSIIO_CONTROL_READ)
3202 mpt_add_sge((char *) &pScsiReq->SGL, 2968 ioc->add_sge((char *) &pScsiReq->SGL,
3203 MPT_SGE_FLAGS_SSIMPLE_READ | io->size, 2969 MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma);
3204 io->data_dma); 2970 else
3205 } else { 2971 ioc->add_sge((char *) &pScsiReq->SGL,
3206 mpt_add_sge((char *) &pScsiReq->SGL, 2972 MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma);
3207 MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size,
3208 io->data_dma);
3209 }
3210
3211 /* The ISR will free the request frame, but we need
3212 * the information to initialize the target. Duplicate.
3213 */
3214 memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t));
3215
3216 /* Issue this command after:
3217 * finish init
3218 * add timer
3219 * Wait until the reply has been received
3220 * ScsiScanDvCtx callback function will
3221 * set hd->pLocal;
3222 * set scandv_wait_done and call wake_up
3223 */
3224 hd->pLocal = NULL;
3225 hd->timer.expires = jiffies + HZ*cmdTimeout;
3226 hd->scandv_wait_done = 0;
3227
3228 /* Save cmd pointer, for resource free if timeout or
3229 * FW reload occurs
3230 */
3231 hd->cmdPtr = mf;
3232 2973
3233 add_timer(&hd->timer); 2974 INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
3234 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); 2975 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
3235 wait_event(hd->scandv_waitq, hd->scandv_wait_done); 2976 timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done,
3236 2977 timeout*HZ);
3237 if (hd->pLocal) { 2978 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
3238 rc = hd->pLocal->completion; 2979 ret = MPT_SCANDV_DID_RESET;
3239 hd->pLocal->skip = 0; 2980 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
3240 2981 "%s: TIMED OUT for cmd=0x%02x\n", ioc->name, __func__,
3241 /* Always set fatal error codes in some cases. 2982 cmd));
3242 */ 2983 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
3243 if (rc == MPT_SCANDV_SELECTION_TIMEOUT) 2984 mpt_free_msg_frame(ioc, mf);
3244 rc = -ENXIO; 2985 goto out;
3245 else if (rc == MPT_SCANDV_SOME_ERROR) 2986 }
3246 rc = -rc; 2987 if (!timeleft) {
3247 } else { 2988 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
3248 rc = -EFAULT; 2989 ioc->name, __func__);
3249 /* This should never happen. */ 2990 mpt_HardResetHandler(ioc, CAN_SLEEP);
3250 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "_do_cmd: Null pLocal!!!\n", 2991 mpt_free_msg_frame(ioc, mf);
3251 ioc->name)); 2992 }
2993 goto out;
3252 } 2994 }
3253 2995
3254 return rc; 2996 ret = ioc->internal_cmds.completion_code;
2997 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: success, rc=0x%02x\n",
2998 ioc->name, __func__, ret));
2999
3000 out:
3001 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
3002 mutex_unlock(&ioc->internal_cmds.mutex);
3003 return ret;
3255} 3004}
3256 3005
3257/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3006/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -3491,6 +3240,7 @@ struct device_attribute *mptscsih_host_attrs[] = {
3491 &dev_attr_debug_level, 3240 &dev_attr_debug_level,
3492 NULL, 3241 NULL,
3493}; 3242};
3243
3494EXPORT_SYMBOL(mptscsih_host_attrs); 3244EXPORT_SYMBOL(mptscsih_host_attrs);
3495 3245
3496EXPORT_SYMBOL(mptscsih_remove); 3246EXPORT_SYMBOL(mptscsih_remove);
@@ -3516,6 +3266,5 @@ EXPORT_SYMBOL(mptscsih_event_process);
3516EXPORT_SYMBOL(mptscsih_ioc_reset); 3266EXPORT_SYMBOL(mptscsih_ioc_reset);
3517EXPORT_SYMBOL(mptscsih_change_queue_depth); 3267EXPORT_SYMBOL(mptscsih_change_queue_depth);
3518EXPORT_SYMBOL(mptscsih_timer_expired); 3268EXPORT_SYMBOL(mptscsih_timer_expired);
3519EXPORT_SYMBOL(mptscsih_TMHandler);
3520 3269
3521/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3270/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 319aa303337..eb3f677528a 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -60,6 +60,7 @@
60#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008) 60#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008)
61#define MPT_SCANDV_ISSUE_SENSE (0x00000010) 61#define MPT_SCANDV_ISSUE_SENSE (0x00000010)
62#define MPT_SCANDV_FALLBACK (0x00000020) 62#define MPT_SCANDV_FALLBACK (0x00000020)
63#define MPT_SCANDV_BUSY (0x00000040)
63 64
64#define MPT_SCANDV_MAX_RETRIES (10) 65#define MPT_SCANDV_MAX_RETRIES (10)
65 66
@@ -89,6 +90,7 @@
89 90
90#endif 91#endif
91 92
93
92typedef struct _internal_cmd { 94typedef struct _internal_cmd {
93 char *data; /* data pointer */ 95 char *data; /* data pointer */
94 dma_addr_t data_dma; /* data dma address */ 96 dma_addr_t data_dma; /* data dma address */
@@ -112,6 +114,8 @@ extern int mptscsih_resume(struct pci_dev *pdev);
112extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func); 114extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
113extern const char * mptscsih_info(struct Scsi_Host *SChost); 115extern const char * mptscsih_info(struct Scsi_Host *SChost);
114extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); 116extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
117extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
118 u8 id, int lun, int ctx2abort, ulong timeout);
115extern void mptscsih_slave_destroy(struct scsi_device *device); 119extern void mptscsih_slave_destroy(struct scsi_device *device);
116extern int mptscsih_slave_configure(struct scsi_device *device); 120extern int mptscsih_slave_configure(struct scsi_device *device);
117extern int mptscsih_abort(struct scsi_cmnd * SCpnt); 121extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
@@ -126,7 +130,8 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE
126extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); 130extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
127extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); 131extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
128extern void mptscsih_timer_expired(unsigned long data); 132extern void mptscsih_timer_expired(unsigned long data);
129extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
130extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); 133extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
131extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); 134extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
132extern struct device_attribute *mptscsih_host_attrs[]; 135extern struct device_attribute *mptscsih_host_attrs[];
136extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
137extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 61620144e49..c5b808fd55b 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
300 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | 300 flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
301 (IOCPage4Ptr->Header.PageLength + ii) * 4; 301 (IOCPage4Ptr->Header.PageLength + ii) * 4;
302 302
303 mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma); 303 ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
304 304
305 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT 305 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
306 "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n", 306 "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
@@ -614,19 +614,24 @@ static void mptspi_read_parameters(struct scsi_target *starget)
614 spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0; 614 spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
615} 615}
616 616
617static int 617int
618mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) 618mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
619{ 619{
620 MPT_ADAPTER *ioc = hd->ioc;
620 MpiRaidActionRequest_t *pReq; 621 MpiRaidActionRequest_t *pReq;
621 MPT_FRAME_HDR *mf; 622 MPT_FRAME_HDR *mf;
622 MPT_ADAPTER *ioc = hd->ioc; 623 int ret;
624 unsigned long timeleft;
625
626 mutex_lock(&ioc->internal_cmds.mutex);
623 627
624 /* Get and Populate a free Frame 628 /* Get and Populate a free Frame
625 */ 629 */
626 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 630 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
627 ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n", 631 dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT
628 ioc->name)); 632 "%s: no msg frames!\n", ioc->name, __func__));
629 return -EAGAIN; 633 ret = -EAGAIN;
634 goto out;
630 } 635 }
631 pReq = (MpiRaidActionRequest_t *)mf; 636 pReq = (MpiRaidActionRequest_t *)mf;
632 if (quiesce) 637 if (quiesce)
@@ -643,29 +648,36 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
643 pReq->Reserved2 = 0; 648 pReq->Reserved2 = 0;
644 pReq->ActionDataWord = 0; /* Reserved for this action */ 649 pReq->ActionDataWord = 0; /* Reserved for this action */
645 650
646 mpt_add_sge((char *)&pReq->ActionDataSGE, 651 ioc->add_sge((char *)&pReq->ActionDataSGE,
647 MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); 652 MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
648 653
649 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", 654 ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
650 ioc->name, pReq->Action, channel, id)); 655 ioc->name, pReq->Action, channel, id));
651 656
652 hd->pLocal = NULL; 657 INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
653 hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
654 hd->scandv_wait_done = 0;
655
656 /* Save cmd pointer, for resource free if timeout or
657 * FW reload occurs
658 */
659 hd->cmdPtr = mf;
660
661 add_timer(&hd->timer);
662 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); 658 mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
663 wait_event(hd->scandv_waitq, hd->scandv_wait_done); 659 timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ);
660 if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
661 ret = -ETIME;
662 dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n",
663 ioc->name, __func__));
664 if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
665 goto out;
666 if (!timeleft) {
667 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
668 ioc->name, __func__);
669 mpt_HardResetHandler(ioc, CAN_SLEEP);
670 mpt_free_msg_frame(ioc, mf);
671 }
672 goto out;
673 }
664 674
665 if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0)) 675 ret = ioc->internal_cmds.completion_code;
666 return -1;
667 676
668 return 0; 677 out:
678 CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
679 mutex_unlock(&ioc->internal_cmds.mutex);
680 return ret;
669} 681}
670 682
671static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd, 683static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
@@ -1423,17 +1435,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1423 * A slightly different algorithm is required for 1435 * A slightly different algorithm is required for
1424 * 64bit SGEs. 1436 * 64bit SGEs.
1425 */ 1437 */
1426 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); 1438 scale = ioc->req_sz/ioc->SGE_size;
1427 if (sizeof(dma_addr_t) == sizeof(u64)) { 1439 if (ioc->sg_addr_size == sizeof(u64)) {
1428 numSGE = (scale - 1) * 1440 numSGE = (scale - 1) *
1429 (ioc->facts.MaxChainDepth-1) + scale + 1441 (ioc->facts.MaxChainDepth-1) + scale +
1430 (ioc->req_sz - 60) / (sizeof(dma_addr_t) + 1442 (ioc->req_sz - 60) / ioc->SGE_size;
1431 sizeof(u32));
1432 } else { 1443 } else {
1433 numSGE = 1 + (scale - 1) * 1444 numSGE = 1 + (scale - 1) *
1434 (ioc->facts.MaxChainDepth-1) + scale + 1445 (ioc->facts.MaxChainDepth-1) + scale +
1435 (ioc->req_sz - 64) / (sizeof(dma_addr_t) + 1446 (ioc->req_sz - 64) / ioc->SGE_size;
1436 sizeof(u32));
1437 } 1447 }
1438 1448
1439 if (numSGE < sh->sg_tablesize) { 1449 if (numSGE < sh->sg_tablesize) {
@@ -1464,9 +1474,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1464 1474
1465 /* Clear the TM flags 1475 /* Clear the TM flags
1466 */ 1476 */
1467 hd->tmPending = 0;
1468 hd->tmState = TM_STATE_NONE;
1469 hd->resetPending = 0;
1470 hd->abortSCpnt = NULL; 1477 hd->abortSCpnt = NULL;
1471 1478
1472 /* Clear the pointer used to store 1479 /* Clear the pointer used to store
@@ -1493,8 +1500,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1493 mpt_saf_te)); 1500 mpt_saf_te));
1494 ioc->spi_data.noQas = 0; 1501 ioc->spi_data.noQas = 0;
1495 1502
1496 init_waitqueue_head(&hd->scandv_waitq);
1497 hd->scandv_wait_done = 0;
1498 hd->last_queue_full = 0; 1503 hd->last_queue_full = 0;
1499 hd->spi_pending = 0; 1504 hd->spi_pending = 0;
1500 1505
@@ -1514,7 +1519,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1514 * issue internal bus reset 1519 * issue internal bus reset
1515 */ 1520 */
1516 if (ioc->spi_data.bus_reset) 1521 if (ioc->spi_data.bus_reset)
1517 mptscsih_TMHandler(hd, 1522 mptscsih_IssueTaskMgmt(hd,
1518 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1523 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1519 0, 0, 0, 0, 5); 1524 0, 0, 0, 0, 5);
1520 1525
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index a443e136dc4..335d4c78a77 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error,
426 struct request_queue *q = req->q; 426 struct request_queue *q = req->q;
427 unsigned long flags; 427 unsigned long flags;
428 428
429 if (blk_end_request(req, error, nr_bytes)) { 429 if (blk_end_request(req, error, nr_bytes))
430 int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
431
432 if (blk_pc_request(req))
433 leftover = req->data_len;
434
435 if (error) 430 if (error)
436 blk_end_request(req, -EIO, leftover); 431 blk_end_request_all(req, -EIO);
437 }
438 432
439 spin_lock_irqsave(q->queue_lock, flags); 433 spin_lock_irqsave(q->queue_lock, flags);
440 434
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req)
761 break; 755 break;
762 756
763 case CACHE_SMARTFETCH: 757 case CACHE_SMARTFETCH:
764 if (req->nr_sectors > 16) 758 if (blk_rq_sectors(req) > 16)
765 ctl_flags = 0x201F0008; 759 ctl_flags = 0x201F0008;
766 else 760 else
767 ctl_flags = 0x001F0000; 761 ctl_flags = 0x001F0000;
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req)
781 ctl_flags = 0x001F0010; 775 ctl_flags = 0x001F0010;
782 break; 776 break;
783 case CACHE_SMARTBACK: 777 case CACHE_SMARTBACK:
784 if (req->nr_sectors > 16) 778 if (blk_rq_sectors(req) > 16)
785 ctl_flags = 0x001F0004; 779 ctl_flags = 0x001F0004;
786 else 780 else
787 ctl_flags = 0x001F0010; 781 ctl_flags = 0x001F0010;
788 break; 782 break;
789 case CACHE_SMARTTHROUGH: 783 case CACHE_SMARTTHROUGH:
790 if (req->nr_sectors > 16) 784 if (blk_rq_sectors(req) > 16)
791 ctl_flags = 0x001F0004; 785 ctl_flags = 0x001F0004;
792 else 786 else
793 ctl_flags = 0x001F0010; 787 ctl_flags = 0x001F0010;
@@ -800,8 +794,9 @@ static int i2o_block_transfer(struct request *req)
800 if (c->adaptec) { 794 if (c->adaptec) {
801 u8 cmd[10]; 795 u8 cmd[10];
802 u32 scsi_flags; 796 u32 scsi_flags;
803 u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; 797 u16 hwsec;
804 798
799 hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
805 memset(cmd, 0, 10); 800 memset(cmd, 0, 10);
806 801
807 sgl_offset = SGL_OFFSET_12; 802 sgl_offset = SGL_OFFSET_12;
@@ -827,22 +822,22 @@ static int i2o_block_transfer(struct request *req)
827 822
828 *mptr++ = cpu_to_le32(scsi_flags); 823 *mptr++ = cpu_to_le32(scsi_flags);
829 824
830 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 825 *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
831 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 826 *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
832 827
833 memcpy(mptr, cmd, 10); 828 memcpy(mptr, cmd, 10);
834 mptr += 4; 829 mptr += 4;
835 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 830 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
836 } else 831 } else
837#endif 832#endif
838 { 833 {
839 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); 834 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
840 *mptr++ = cpu_to_le32(ctl_flags); 835 *mptr++ = cpu_to_le32(ctl_flags);
841 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 836 *mptr++ = cpu_to_le32(blk_rq_bytes(req));
842 *mptr++ = 837 *mptr++ =
843 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); 838 cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
844 *mptr++ = 839 *mptr++ =
845 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); 840 cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
846 } 841 }
847 842
848 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 843 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -883,7 +878,7 @@ static void i2o_block_request_fn(struct request_queue *q)
883 struct request *req; 878 struct request *req;
884 879
885 while (!blk_queue_plugged(q)) { 880 while (!blk_queue_plugged(q)) {
886 req = elv_next_request(q); 881 req = blk_peek_request(q);
887 if (!req) 882 if (!req)
888 break; 883 break;
889 884
@@ -896,7 +891,7 @@ static void i2o_block_request_fn(struct request_queue *q)
896 891
897 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { 892 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
898 if (!i2o_block_transfer(req)) { 893 if (!i2o_block_transfer(req)) {
899 blkdev_dequeue_request(req); 894 blk_start_request(req);
900 continue; 895 continue;
901 } else 896 } else
902 osm_info("transfer error\n"); 897 osm_info("transfer error\n");
@@ -922,8 +917,10 @@ static void i2o_block_request_fn(struct request_queue *q)
922 blk_stop_queue(q); 917 blk_stop_queue(q);
923 break; 918 break;
924 } 919 }
925 } else 920 } else {
926 end_request(req, 0); 921 blk_start_request(req);
922 __blk_end_request_all(req, -EIO);
923 }
927 } 924 }
928}; 925};
929 926
@@ -1082,7 +1079,7 @@ static int i2o_block_probe(struct device *dev)
1082 */ 1079 */
1083 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1080 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1084 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1081 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1085 blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); 1082 blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
1086 } else 1083 } else
1087 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1084 osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1088 1085
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 7793932a513..11a6248cc1c 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -443,7 +443,7 @@ static irqreturn_t pcf50633_irq(int irq, void *data)
443 dev_dbg(pcf->dev, "pcf50633_irq\n"); 443 dev_dbg(pcf->dev, "pcf50633_irq\n");
444 444
445 get_device(pcf->dev); 445 get_device(pcf->dev);
446 disable_irq(pcf->irq); 446 disable_irq_nosync(pcf->irq);
447 schedule_work(&pcf->irq_work); 447 schedule_work(&pcf->irq_work);
448 448
449 return IRQ_HANDLED; 449 return IRQ_HANDLED;
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index e9f4323dd2c..875f7a87573 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -108,6 +108,10 @@ static int t7l66xb_mmc_disable(struct platform_device *mmc)
108 108
109/*--------------------------------------------------------------------------*/ 109/*--------------------------------------------------------------------------*/
110 110
111static const struct tmio_mmc_data t7166xb_mmc_data = {
112 .hclk = 24000000,
113};
114
111static const struct resource t7l66xb_mmc_resources[] = { 115static const struct resource t7l66xb_mmc_resources[] = {
112 { 116 {
113 .start = 0x800, 117 .start = 0x800,
@@ -149,6 +153,7 @@ static struct mfd_cell t7l66xb_cells[] = {
149 .name = "tmio-mmc", 153 .name = "tmio-mmc",
150 .enable = t7l66xb_mmc_enable, 154 .enable = t7l66xb_mmc_enable,
151 .disable = t7l66xb_mmc_disable, 155 .disable = t7l66xb_mmc_disable,
156 .driver_data = &t7166xb_mmc_data,
152 .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources), 157 .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources),
153 .resources = t7l66xb_mmc_resources, 158 .resources = t7l66xb_mmc_resources,
154 }, 159 },
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 43222c12fec..c3993ac2054 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -75,6 +75,10 @@ static int tc6387xb_mmc_disable(struct platform_device *mmc)
75 75
76/*--------------------------------------------------------------------------*/ 76/*--------------------------------------------------------------------------*/
77 77
78const static struct tmio_mmc_data tc6387xb_mmc_data = {
79 .hclk = 24000000,
80};
81
78static struct resource tc6387xb_mmc_resources[] = { 82static struct resource tc6387xb_mmc_resources[] = {
79 { 83 {
80 .start = 0x800, 84 .start = 0x800,
@@ -98,6 +102,7 @@ static struct mfd_cell tc6387xb_cells[] = {
98 .name = "tmio-mmc", 102 .name = "tmio-mmc",
99 .enable = tc6387xb_mmc_enable, 103 .enable = tc6387xb_mmc_enable,
100 .disable = tc6387xb_mmc_disable, 104 .disable = tc6387xb_mmc_disable,
105 .driver_data = &tc6387xb_mmc_data,
101 .num_resources = ARRAY_SIZE(tc6387xb_mmc_resources), 106 .num_resources = ARRAY_SIZE(tc6387xb_mmc_resources),
102 .resources = tc6387xb_mmc_resources, 107 .resources = tc6387xb_mmc_resources,
103 }, 108 },
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 77a12fc8045..9d2abb5d6e2 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -136,6 +136,10 @@ static int tc6393xb_nand_enable(struct platform_device *nand)
136 return 0; 136 return 0;
137} 137}
138 138
139const static struct tmio_mmc_data tc6393xb_mmc_data = {
140 .hclk = 24000000,
141};
142
139static struct resource __devinitdata tc6393xb_nand_resources[] = { 143static struct resource __devinitdata tc6393xb_nand_resources[] = {
140 { 144 {
141 .start = 0x1000, 145 .start = 0x1000,
@@ -351,6 +355,7 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = {
351 }, 355 },
352 [TC6393XB_CELL_MMC] = { 356 [TC6393XB_CELL_MMC] = {
353 .name = "tmio-mmc", 357 .name = "tmio-mmc",
358 .driver_data = &tc6393xb_mmc_data,
354 .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources), 359 .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
355 .resources = tc6393xb_mmc_resources, 360 .resources = tc6393xb_mmc_resources,
356 }, 361 },
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index c2be3088e2e..fe24079387c 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -79,10 +79,6 @@ static int wm8350_phys_read(struct wm8350 *wm8350, u8 reg, int num_regs,
79 /* Cache is CPU endian */ 79 /* Cache is CPU endian */
80 dest[i - reg] = be16_to_cpu(dest[i - reg]); 80 dest[i - reg] = be16_to_cpu(dest[i - reg]);
81 81
82 /* Satisfy non-volatile bits from cache */
83 dest[i - reg] &= wm8350_reg_io_map[i].vol;
84 dest[i - reg] |= wm8350->reg_cache[i];
85
86 /* Mask out non-readable bits */ 82 /* Mask out non-readable bits */
87 dest[i - reg] &= wm8350_reg_io_map[i].readable; 83 dest[i - reg] &= wm8350_reg_io_map[i].readable;
88 } 84 }
@@ -182,9 +178,6 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
182 (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable) 178 (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable)
183 | src[i - reg]; 179 | src[i - reg];
184 180
185 /* Don't store volatile bits */
186 wm8350->reg_cache[i] &= ~wm8350_reg_io_map[i].vol;
187
188 src[i - reg] = cpu_to_be16(src[i - reg]); 181 src[i - reg] = cpu_to_be16(src[i - reg]);
189 } 182 }
190 183
@@ -1261,7 +1254,6 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int type, int mode)
1261 (i < WM8350_CLOCK_CONTROL_1 || i > WM8350_AIF_TEST)) { 1254 (i < WM8350_CLOCK_CONTROL_1 || i > WM8350_AIF_TEST)) {
1262 value = be16_to_cpu(wm8350->reg_cache[i]); 1255 value = be16_to_cpu(wm8350->reg_cache[i]);
1263 value &= wm8350_reg_io_map[i].readable; 1256 value &= wm8350_reg_io_map[i].readable;
1264 value &= ~wm8350_reg_io_map[i].vol;
1265 wm8350->reg_cache[i] = value; 1257 wm8350->reg_cache[i] = value;
1266 } else 1258 } else
1267 wm8350->reg_cache[i] = reg_map[i]; 1259 wm8350->reg_cache[i] = reg_map[i];
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6d1ac180f6e..68ab39d7cb3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -235,5 +235,6 @@ config ISL29003
235 235
236source "drivers/misc/c2port/Kconfig" 236source "drivers/misc/c2port/Kconfig"
237source "drivers/misc/eeprom/Kconfig" 237source "drivers/misc/eeprom/Kconfig"
238source "drivers/misc/cb710/Kconfig"
238 239
239endif # MISC_DEVICES 240endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7871f05dcb9..36f733cd60e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -21,3 +21,4 @@ obj-$(CONFIG_HP_ILO) += hpilo.o
21obj-$(CONFIG_ISL29003) += isl29003.o 21obj-$(CONFIG_ISL29003) += isl29003.o
22obj-$(CONFIG_C2PORT) += c2port/ 22obj-$(CONFIG_C2PORT) += c2port/
23obj-y += eeprom/ 23obj-y += eeprom/
24obj-y += cb710/
diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig
new file mode 100644
index 00000000000..22429b8b106
--- /dev/null
+++ b/drivers/misc/cb710/Kconfig
@@ -0,0 +1,25 @@
1config CB710_CORE
2 tristate "ENE CB710/720 Flash memory card reader support"
3 depends on PCI
4 help
5 This option enables support for PCI ENE CB710/720 Flash memory card
6 reader found in some laptops (ie. some versions of HP Compaq nx9500).
7
8 You will also have to select some flash card format drivers (MMC/SD,
9 MemoryStick).
10
11 This driver can also be built as a module. If so, the module
12 will be called cb710.
13
14config CB710_DEBUG
15 bool "Enable driver debugging"
16 depends on CB710_CORE != n
17 default n
18 help
19 This is an option for use by developers; most people should
20 say N here. This adds a lot of debugging output to dmesg.
21
22config CB710_DEBUG_ASSUMPTIONS
23 bool
24 depends on CB710_CORE != n
25 default y
diff --git a/drivers/misc/cb710/Makefile b/drivers/misc/cb710/Makefile
new file mode 100644
index 00000000000..7b80cbf1a60
--- /dev/null
+++ b/drivers/misc/cb710/Makefile
@@ -0,0 +1,8 @@
1ifeq ($(CONFIG_CB710_DEBUG),y)
2 EXTRA_CFLAGS += -DDEBUG
3endif
4
5obj-$(CONFIG_CB710_CORE) += cb710.o
6
7cb710-y := core.o sgbuf2.o
8cb710-$(CONFIG_CB710_DEBUG) += debug.o
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
new file mode 100644
index 00000000000..b14eab0f2ba
--- /dev/null
+++ b/drivers/misc/cb710/core.c
@@ -0,0 +1,357 @@
1/*
2 * cb710/core.c
3 *
4 * Copyright by Michał Mirosław, 2008-2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/pci.h>
14#include <linux/spinlock.h>
15#include <linux/idr.h>
16#include <linux/cb710.h>
17
18static DEFINE_IDA(cb710_ida);
19static DEFINE_SPINLOCK(cb710_ida_lock);
20
21void cb710_pci_update_config_reg(struct pci_dev *pdev,
22 int reg, uint32_t mask, uint32_t xor)
23{
24 u32 rval;
25
26 pci_read_config_dword(pdev, reg, &rval);
27 rval = (rval & mask) ^ xor;
28 pci_write_config_dword(pdev, reg, rval);
29}
30EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg);
31
32/* Some magic writes based on Windows driver init code */
33static int __devinit cb710_pci_configure(struct pci_dev *pdev)
34{
35 unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
36 struct pci_dev *pdev0 = pci_get_slot(pdev->bus, devfn);
37 u32 val;
38
39 cb710_pci_update_config_reg(pdev, 0x48,
40 ~0x000000FF, 0x0000003F);
41
42 pci_read_config_dword(pdev, 0x48, &val);
43 if (val & 0x80000000)
44 return 0;
45
46 if (!pdev0)
47 return -ENODEV;
48
49 if (pdev0->vendor == PCI_VENDOR_ID_ENE
50 && pdev0->device == PCI_DEVICE_ID_ENE_720) {
51 cb710_pci_update_config_reg(pdev0, 0x8C,
52 ~0x00F00000, 0x00100000);
53 cb710_pci_update_config_reg(pdev0, 0xB0,
54 ~0x08000000, 0x08000000);
55 }
56
57 cb710_pci_update_config_reg(pdev0, 0x8C,
58 ~0x00000F00, 0x00000200);
59 cb710_pci_update_config_reg(pdev0, 0x90,
60 ~0x00060000, 0x00040000);
61
62 pci_dev_put(pdev0);
63
64 return 0;
65}
66
67static irqreturn_t cb710_irq_handler(int irq, void *data)
68{
69 struct cb710_chip *chip = data;
70 struct cb710_slot *slot = &chip->slot[0];
71 irqreturn_t handled = IRQ_NONE;
72 unsigned nr;
73
74 spin_lock(&chip->irq_lock); /* incl. smp_rmb() */
75
76 for (nr = chip->slots; nr; ++slot, --nr) {
77 cb710_irq_handler_t handler_func = slot->irq_handler;
78 if (handler_func && handler_func(slot))
79 handled = IRQ_HANDLED;
80 }
81
82 spin_unlock(&chip->irq_lock);
83
84 return handled;
85}
86
87static void cb710_release_slot(struct device *dev)
88{
89#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
90 struct cb710_slot *slot = cb710_pdev_to_slot(to_platform_device(dev));
91 struct cb710_chip *chip = cb710_slot_to_chip(slot);
92
93 /* slot struct can be freed now */
94 atomic_dec(&chip->slot_refs_count);
95#endif
96}
97
98static int __devinit cb710_register_slot(struct cb710_chip *chip,
99 unsigned slot_mask, unsigned io_offset, const char *name)
100{
101 int nr = chip->slots;
102 struct cb710_slot *slot = &chip->slot[nr];
103 int err;
104
105 dev_dbg(cb710_chip_dev(chip),
106 "register: %s.%d; slot %d; mask %d; IO offset: 0x%02X\n",
107 name, chip->platform_id, nr, slot_mask, io_offset);
108
109 /* slot->irq_handler == NULL here; this needs to be
110 * seen before platform_device_register() */
111 ++chip->slots;
112 smp_wmb();
113
114 slot->iobase = chip->iobase + io_offset;
115 slot->pdev.name = name;
116 slot->pdev.id = chip->platform_id;
117 slot->pdev.dev.parent = &chip->pdev->dev;
118 slot->pdev.dev.release = cb710_release_slot;
119
120 err = platform_device_register(&slot->pdev);
121
122#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
123 atomic_inc(&chip->slot_refs_count);
124#endif
125
126 if (err) {
127 /* device_initialize() called from platform_device_register()
128 * wants this on error path */
129 platform_device_put(&slot->pdev);
130
131 /* slot->irq_handler == NULL here anyway, so no lock needed */
132 --chip->slots;
133 return err;
134 }
135
136 chip->slot_mask |= slot_mask;
137
138 return 0;
139}
140
141static void cb710_unregister_slot(struct cb710_chip *chip,
142 unsigned slot_mask)
143{
144 int nr = chip->slots - 1;
145
146 if (!(chip->slot_mask & slot_mask))
147 return;
148
149 platform_device_unregister(&chip->slot[nr].pdev);
150
151 /* complementary to spin_unlock() in cb710_set_irq_handler() */
152 smp_rmb();
153 BUG_ON(chip->slot[nr].irq_handler != NULL);
154
155 /* slot->irq_handler == NULL here, so no lock needed */
156 --chip->slots;
157 chip->slot_mask &= ~slot_mask;
158}
159
160void cb710_set_irq_handler(struct cb710_slot *slot,
161 cb710_irq_handler_t handler)
162{
163 struct cb710_chip *chip = cb710_slot_to_chip(slot);
164 unsigned long flags;
165
166 spin_lock_irqsave(&chip->irq_lock, flags);
167 slot->irq_handler = handler;
168 spin_unlock_irqrestore(&chip->irq_lock, flags);
169}
170EXPORT_SYMBOL_GPL(cb710_set_irq_handler);
171
172#ifdef CONFIG_PM
173
174static int cb710_suspend(struct pci_dev *pdev, pm_message_t state)
175{
176 struct cb710_chip *chip = pci_get_drvdata(pdev);
177
178 free_irq(pdev->irq, chip);
179 pci_save_state(pdev);
180 pci_disable_device(pdev);
181 if (state.event & PM_EVENT_SLEEP)
182 pci_set_power_state(pdev, PCI_D3cold);
183 return 0;
184}
185
186static int cb710_resume(struct pci_dev *pdev)
187{
188 struct cb710_chip *chip = pci_get_drvdata(pdev);
189 int err;
190
191 pci_set_power_state(pdev, PCI_D0);
192 pci_restore_state(pdev);
193 err = pcim_enable_device(pdev);
194 if (err)
195 return err;
196
197 return devm_request_irq(&pdev->dev, pdev->irq,
198 cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
199}
200
201#endif /* CONFIG_PM */
202
203static int __devinit cb710_probe(struct pci_dev *pdev,
204 const struct pci_device_id *ent)
205{
206 struct cb710_chip *chip;
207 unsigned long flags;
208 u32 val;
209 int err;
210 int n = 0;
211
212 err = cb710_pci_configure(pdev);
213 if (err)
214 return err;
215
216 /* this is actually magic... */
217 pci_read_config_dword(pdev, 0x48, &val);
218 if (!(val & 0x80000000)) {
219 pci_write_config_dword(pdev, 0x48, val|0x71000000);
220 pci_read_config_dword(pdev, 0x48, &val);
221 }
222
223 dev_dbg(&pdev->dev, "PCI config[0x48] = 0x%08X\n", val);
224 if (!(val & 0x70000000))
225 return -ENODEV;
226 val = (val >> 28) & 7;
227 if (val & CB710_SLOT_MMC)
228 ++n;
229 if (val & CB710_SLOT_MS)
230 ++n;
231 if (val & CB710_SLOT_SM)
232 ++n;
233
234 chip = devm_kzalloc(&pdev->dev,
235 sizeof(*chip) + n * sizeof(*chip->slot), GFP_KERNEL);
236 if (!chip)
237 return -ENOMEM;
238
239 err = pcim_enable_device(pdev);
240 if (err)
241 return err;
242
243 err = pcim_iomap_regions(pdev, 0x0001, KBUILD_MODNAME);
244 if (err)
245 return err;
246
247 chip->pdev = pdev;
248 chip->iobase = pcim_iomap_table(pdev)[0];
249
250 pci_set_drvdata(pdev, chip);
251
252 err = devm_request_irq(&pdev->dev, pdev->irq,
253 cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip);
254 if (err)
255 return err;
256
257 do {
258 if (!ida_pre_get(&cb710_ida, GFP_KERNEL))
259 return -ENOMEM;
260
261 spin_lock_irqsave(&cb710_ida_lock, flags);
262 err = ida_get_new(&cb710_ida, &chip->platform_id);
263 spin_unlock_irqrestore(&cb710_ida_lock, flags);
264
265 if (err && err != -EAGAIN)
266 return err;
267 } while (err);
268
269
270 dev_info(&pdev->dev, "id %d, IO 0x%p, IRQ %d\n",
271 chip->platform_id, chip->iobase, pdev->irq);
272
273 if (val & CB710_SLOT_MMC) { /* MMC/SD slot */
274 err = cb710_register_slot(chip,
275 CB710_SLOT_MMC, 0x00, "cb710-mmc");
276 if (err)
277 return err;
278 }
279
280 if (val & CB710_SLOT_MS) { /* MemoryStick slot */
281 err = cb710_register_slot(chip,
282 CB710_SLOT_MS, 0x40, "cb710-ms");
283 if (err)
284 goto unreg_mmc;
285 }
286
287 if (val & CB710_SLOT_SM) { /* SmartMedia slot */
288 err = cb710_register_slot(chip,
289 CB710_SLOT_SM, 0x60, "cb710-sm");
290 if (err)
291 goto unreg_ms;
292 }
293
294 return 0;
295unreg_ms:
296 cb710_unregister_slot(chip, CB710_SLOT_MS);
297unreg_mmc:
298 cb710_unregister_slot(chip, CB710_SLOT_MMC);
299
300#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
301 BUG_ON(atomic_read(&chip->slot_refs_count) != 0);
302#endif
303 return err;
304}
305
306static void __devexit cb710_remove_one(struct pci_dev *pdev)
307{
308 struct cb710_chip *chip = pci_get_drvdata(pdev);
309 unsigned long flags;
310
311 cb710_unregister_slot(chip, CB710_SLOT_SM);
312 cb710_unregister_slot(chip, CB710_SLOT_MS);
313 cb710_unregister_slot(chip, CB710_SLOT_MMC);
314#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
315 BUG_ON(atomic_read(&chip->slot_refs_count) != 0);
316#endif
317
318 spin_lock_irqsave(&cb710_ida_lock, flags);
319 ida_remove(&cb710_ida, chip->platform_id);
320 spin_unlock_irqrestore(&cb710_ida_lock, flags);
321}
322
323static const struct pci_device_id cb710_pci_tbl[] = {
324 { PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_CB710_FLASH,
325 PCI_ANY_ID, PCI_ANY_ID, },
326 { 0, }
327};
328
329static struct pci_driver cb710_driver = {
330 .name = KBUILD_MODNAME,
331 .id_table = cb710_pci_tbl,
332 .probe = cb710_probe,
333 .remove = __devexit_p(cb710_remove_one),
334#ifdef CONFIG_PM
335 .suspend = cb710_suspend,
336 .resume = cb710_resume,
337#endif
338};
339
340static int __init cb710_init_module(void)
341{
342 return pci_register_driver(&cb710_driver);
343}
344
345static void __exit cb710_cleanup_module(void)
346{
347 pci_unregister_driver(&cb710_driver);
348 ida_destroy(&cb710_ida);
349}
350
351module_init(cb710_init_module);
352module_exit(cb710_cleanup_module);
353
354MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>");
355MODULE_DESCRIPTION("ENE CB710 memory card reader driver");
356MODULE_LICENSE("GPL");
357MODULE_DEVICE_TABLE(pci, cb710_pci_tbl);
diff --git a/drivers/misc/cb710/debug.c b/drivers/misc/cb710/debug.c
new file mode 100644
index 00000000000..02358d086e0
--- /dev/null
+++ b/drivers/misc/cb710/debug.c
@@ -0,0 +1,119 @@
1/*
2 * cb710/debug.c
3 *
4 * Copyright by Michał Mirosław, 2008-2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/cb710.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14
15#define CB710_REG_COUNT 0x80
16
17static const u16 allow[CB710_REG_COUNT/16] = {
18 0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF,
19 0xFFF0, 0xFFFF, 0xFFFF, 0xFFFF,
20};
21static const char *const prefix[ARRAY_SIZE(allow)] = {
22 "MMC", "MMC", "MMC", "MMC",
23 "MS?", "MS?", "SM?", "SM?"
24};
25
26static inline int allow_reg_read(unsigned block, unsigned offset, unsigned bits)
27{
28 unsigned mask = (1 << bits/8) - 1;
29 offset *= bits/8;
30 return ((allow[block] >> offset) & mask) == mask;
31}
32
33#define CB710_READ_REGS_TEMPLATE(t) \
34static void cb710_read_regs_##t(void __iomem *iobase, \
35 u##t *reg, unsigned select) \
36{ \
37 unsigned i, j; \
38 \
39 for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \
40 if (!(select & (1 << i))) \
41 continue; \
42 \
43 for (j = 0; j < 0x10/(t/8); ++j) { \
44 if (!allow_reg_read(i, j, t)) \
45 continue; \
46 reg[j] = ioread##t(iobase \
47 + (i << 4) + (j * (t/8))); \
48 } \
49 } \
50}
51
52static const char cb710_regf_8[] = "%02X";
53static const char cb710_regf_16[] = "%04X";
54static const char cb710_regf_32[] = "%08X";
55static const char cb710_xes[] = "xxxxxxxx";
56
57#define CB710_DUMP_REGS_TEMPLATE(t) \
58static void cb710_dump_regs_##t(struct device *dev, \
59 const u##t *reg, unsigned select) \
60{ \
61 const char *const xp = &cb710_xes[8 - t/4]; \
62 const char *const format = cb710_regf_##t; \
63 \
64 char msg[100], *p; \
65 unsigned i, j; \
66 \
67 for (i = 0; i < ARRAY_SIZE(allow); ++i, reg += 16/(t/8)) { \
68 if (!(select & (1 << i))) \
69 continue; \
70 p = msg; \
71 for (j = 0; j < 0x10/(t/8); ++j) { \
72 *p++ = ' '; \
73 if (j == 8/(t/8)) \
74 *p++ = ' '; \
75 if (allow_reg_read(i, j, t)) \
76 p += sprintf(p, format, reg[j]); \
77 else \
78 p += sprintf(p, "%s", xp); \
79 } \
80 dev_dbg(dev, "%s 0x%02X %s\n", prefix[i], i << 4, msg); \
81 } \
82}
83
84#define CB710_READ_AND_DUMP_REGS_TEMPLATE(t) \
85static void cb710_read_and_dump_regs_##t(struct cb710_chip *chip, \
86 unsigned select) \
87{ \
88 u##t regs[CB710_REG_COUNT/sizeof(u##t)]; \
89 \
90 memset(&regs, 0, sizeof(regs)); \
91 cb710_read_regs_##t(chip->iobase, regs, select); \
92 cb710_dump_regs_##t(cb710_chip_dev(chip), regs, select); \
93}
94
95#define CB710_REG_ACCESS_TEMPLATES(t) \
96 CB710_READ_REGS_TEMPLATE(t) \
97 CB710_DUMP_REGS_TEMPLATE(t) \
98 CB710_READ_AND_DUMP_REGS_TEMPLATE(t)
99
100CB710_REG_ACCESS_TEMPLATES(8)
101CB710_REG_ACCESS_TEMPLATES(16)
102CB710_REG_ACCESS_TEMPLATES(32)
103
104void cb710_dump_regs(struct cb710_chip *chip, unsigned select)
105{
106 if (!(select & CB710_DUMP_REGS_MASK))
107 select = CB710_DUMP_REGS_ALL;
108 if (!(select & CB710_DUMP_ACCESS_MASK))
109 select |= CB710_DUMP_ACCESS_8;
110
111 if (select & CB710_DUMP_ACCESS_32)
112 cb710_read_and_dump_regs_32(chip, select);
113 if (select & CB710_DUMP_ACCESS_16)
114 cb710_read_and_dump_regs_16(chip, select);
115 if (select & CB710_DUMP_ACCESS_8)
116 cb710_read_and_dump_regs_8(chip, select);
117}
118EXPORT_SYMBOL_GPL(cb710_dump_regs);
119
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
new file mode 100644
index 00000000000..d38a7acdb6e
--- /dev/null
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -0,0 +1,150 @@
1/*
2 * cb710/sgbuf2.c
3 *
4 * Copyright by Michał Mirosław, 2008-2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/cb710.h>
13
14static bool sg_dwiter_next(struct sg_mapping_iter *miter)
15{
16 if (sg_miter_next(miter)) {
17 miter->consumed = 0;
18 return true;
19 } else
20 return false;
21}
22
23static bool sg_dwiter_is_at_end(struct sg_mapping_iter *miter)
24{
25 return miter->length == miter->consumed && !sg_dwiter_next(miter);
26}
27
28static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter)
29{
30 size_t len, left = 4;
31 uint32_t data;
32 void *addr = &data;
33
34 do {
35 len = min(miter->length - miter->consumed, left);
36 memcpy(addr, miter->addr + miter->consumed, len);
37 miter->consumed += len;
38 left -= len;
39 if (!left)
40 return data;
41 addr += len;
42 } while (sg_dwiter_next(miter));
43
44 memset(addr, 0, left);
45 return data;
46}
47
48static inline bool needs_unaligned_copy(const void *ptr)
49{
50#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
51 return false;
52#else
53 return ((ptr - NULL) & 3) != 0;
54#endif
55}
56
57static bool sg_dwiter_get_next_block(struct sg_mapping_iter *miter, uint32_t **ptr)
58{
59 size_t len;
60
61 if (sg_dwiter_is_at_end(miter))
62 return true;
63
64 len = miter->length - miter->consumed;
65
66 if (likely(len >= 4 && !needs_unaligned_copy(
67 miter->addr + miter->consumed))) {
68 *ptr = miter->addr + miter->consumed;
69 miter->consumed += 4;
70 return true;
71 }
72
73 return false;
74}
75
76/**
77 * cb710_sg_dwiter_read_next_block() - get next 32-bit word from sg buffer
78 * @miter: sg mapping iterator used for reading
79 *
80 * Description:
81 * Returns 32-bit word starting at byte pointed to by @miter@
82 * handling any alignment issues. Bytes past the buffer's end
83 * are not accessed (read) but are returned as zeroes. @miter@
84 * is advanced by 4 bytes or to the end of buffer whichever is
85 * closer.
86 *
87 * Context:
88 * Same requirements as in sg_miter_next().
89 *
90 * Returns:
91 * 32-bit word just read.
92 */
93uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter)
94{
95 uint32_t *ptr = NULL;
96
97 if (likely(sg_dwiter_get_next_block(miter, &ptr)))
98 return ptr ? *ptr : 0;
99
100 return sg_dwiter_read_buffer(miter);
101}
102EXPORT_SYMBOL_GPL(cb710_sg_dwiter_read_next_block);
103
104static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data)
105{
106 size_t len, left = 4;
107 void *addr = &data;
108
109 do {
110 len = min(miter->length - miter->consumed, left);
111 memcpy(miter->addr, addr, len);
112 miter->consumed += len;
113 left -= len;
114 if (!left)
115 return;
116 addr += len;
117 flush_kernel_dcache_page(miter->page);
118 } while (sg_dwiter_next(miter));
119}
120
121/**
122 * cb710_sg_dwiter_write_next_block() - write next 32-bit word to sg buffer
123 * @miter: sg mapping iterator used for writing
124 *
125 * Description:
126 * Writes 32-bit word starting at byte pointed to by @miter@
127 * handling any alignment issues. Bytes which would be written
128 * past the buffer's end are silently discarded. @miter@ is
129 * advanced by 4 bytes or to the end of buffer whichever is closer.
130 *
131 * Context:
132 * Same requirements as in sg_miter_next().
133 */
134void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data)
135{
136 uint32_t *ptr = NULL;
137
138 if (likely(sg_dwiter_get_next_block(miter, &ptr))) {
139 if (ptr)
140 *ptr = data;
141 else
142 return;
143 } else
144 sg_dwiter_write_slow(miter, data);
145
146 if (miter->length == miter->consumed)
147 flush_kernel_dcache_page(miter->page);
148}
149EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block);
150
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 3cf61ece71d..348443bdb23 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -119,7 +119,7 @@ enclosure_register(struct device *dev, const char *name, int components,
119 edev->edev.class = &enclosure_class; 119 edev->edev.class = &enclosure_class;
120 edev->edev.parent = get_device(dev); 120 edev->edev.parent = get_device(dev);
121 edev->cb = cb; 121 edev->cb = cb;
122 dev_set_name(&edev->edev, name); 122 dev_set_name(&edev->edev, "%s", name);
123 err = device_register(&edev->edev); 123 err = device_register(&edev->edev);
124 if (err) 124 if (err)
125 goto err; 125 goto err;
@@ -255,8 +255,8 @@ enclosure_component_register(struct enclosure_device *edev,
255 ecomp->number = number; 255 ecomp->number = number;
256 cdev = &ecomp->cdev; 256 cdev = &ecomp->cdev;
257 cdev->parent = get_device(&edev->edev); 257 cdev->parent = get_device(&edev->edev);
258 if (name) 258 if (name && name[0])
259 dev_set_name(cdev, name); 259 dev_set_name(cdev, "%s", name);
260 else 260 else
261 dev_set_name(cdev, "%u", number); 261 dev_set_name(cdev, "%u", number);
262 262
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b25e9b6516a..adc205c49fb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -147,7 +147,8 @@ struct mmc_blk_request {
147static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) 147static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
148{ 148{
149 int err; 149 int err;
150 __be32 blocks; 150 u32 result;
151 __be32 *blocks;
151 152
152 struct mmc_request mrq; 153 struct mmc_request mrq;
153 struct mmc_command cmd; 154 struct mmc_command cmd;
@@ -199,14 +200,21 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
199 mrq.cmd = &cmd; 200 mrq.cmd = &cmd;
200 mrq.data = &data; 201 mrq.data = &data;
201 202
202 sg_init_one(&sg, &blocks, 4); 203 blocks = kmalloc(4, GFP_KERNEL);
204 if (!blocks)
205 return (u32)-1;
206
207 sg_init_one(&sg, blocks, 4);
203 208
204 mmc_wait_for_req(card->host, &mrq); 209 mmc_wait_for_req(card->host, &mrq);
205 210
211 result = ntohl(*blocks);
212 kfree(blocks);
213
206 if (cmd.error || data.error) 214 if (cmd.error || data.error)
207 return (u32)-1; 215 result = (u32)-1;
208 216
209 return ntohl(blocks); 217 return result;
210} 218}
211 219
212static u32 get_card_status(struct mmc_card *card, struct request *req) 220static u32 get_card_status(struct mmc_card *card, struct request *req)
@@ -243,7 +251,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
243 brq.mrq.cmd = &brq.cmd; 251 brq.mrq.cmd = &brq.cmd;
244 brq.mrq.data = &brq.data; 252 brq.mrq.data = &brq.data;
245 253
246 brq.cmd.arg = req->sector; 254 brq.cmd.arg = blk_rq_pos(req);
247 if (!mmc_card_blockaddr(card)) 255 if (!mmc_card_blockaddr(card))
248 brq.cmd.arg <<= 9; 256 brq.cmd.arg <<= 9;
249 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 257 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -251,7 +259,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
251 brq.stop.opcode = MMC_STOP_TRANSMISSION; 259 brq.stop.opcode = MMC_STOP_TRANSMISSION;
252 brq.stop.arg = 0; 260 brq.stop.arg = 0;
253 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 261 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
254 brq.data.blocks = req->nr_sectors; 262 brq.data.blocks = blk_rq_sectors(req);
255 263
256 /* 264 /*
257 * The block layer doesn't support all sector count 265 * The block layer doesn't support all sector count
@@ -301,7 +309,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
301 * Adjust the sg list so it is the same size as the 309 * Adjust the sg list so it is the same size as the
302 * request. 310 * request.
303 */ 311 */
304 if (brq.data.blocks != req->nr_sectors) { 312 if (brq.data.blocks != blk_rq_sectors(req)) {
305 int i, data_size = brq.data.blocks << 9; 313 int i, data_size = brq.data.blocks << 9;
306 struct scatterlist *sg; 314 struct scatterlist *sg;
307 315
@@ -352,8 +360,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
352 printk(KERN_ERR "%s: error %d transferring data," 360 printk(KERN_ERR "%s: error %d transferring data,"
353 " sector %u, nr %u, card status %#x\n", 361 " sector %u, nr %u, card status %#x\n",
354 req->rq_disk->disk_name, brq.data.error, 362 req->rq_disk->disk_name, brq.data.error,
355 (unsigned)req->sector, 363 (unsigned)blk_rq_pos(req),
356 (unsigned)req->nr_sectors, status); 364 (unsigned)blk_rq_sectors(req), status);
357 } 365 }
358 366
359 if (brq.stop.error) { 367 if (brq.stop.error) {
@@ -521,7 +529,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
521 529
522 sprintf(md->disk->disk_name, "mmcblk%d", devidx); 530 sprintf(md->disk->disk_name, "mmcblk%d", devidx);
523 531
524 blk_queue_hardsect_size(md->queue.queue, 512); 532 blk_queue_logical_block_size(md->queue.queue, 512);
525 533
526 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 534 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
527 /* 535 /*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7a72e75d5c6..49e582356c6 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,7 +55,7 @@ static int mmc_queue_thread(void *d)
55 spin_lock_irq(q->queue_lock); 55 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE); 56 set_current_state(TASK_INTERRUPTIBLE);
57 if (!blk_queue_plugged(q)) 57 if (!blk_queue_plugged(q))
58 req = elv_next_request(q); 58 req = blk_fetch_request(q);
59 mq->req = req; 59 mq->req = req;
60 spin_unlock_irq(q->queue_lock); 60 spin_unlock_irq(q->queue_lock);
61 61
@@ -88,16 +88,11 @@ static void mmc_request(struct request_queue *q)
88{ 88{
89 struct mmc_queue *mq = q->queuedata; 89 struct mmc_queue *mq = q->queuedata;
90 struct request *req; 90 struct request *req;
91 int ret;
92 91
93 if (!mq) { 92 if (!mq) {
94 printk(KERN_ERR "MMC: killing requests for dead queue\n"); 93 printk(KERN_ERR "MMC: killing requests for dead queue\n");
95 while ((req = elv_next_request(q)) != NULL) { 94 while ((req = blk_fetch_request(q)) != NULL)
96 do { 95 __blk_end_request_all(req, -EIO);
97 ret = __blk_end_request(req, -EIO,
98 blk_rq_cur_bytes(req));
99 } while (ret);
100 }
101 return; 96 return;
102 } 97 }
103 98
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 26491173275..d84c880fac8 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -708,7 +708,13 @@ static void mmc_power_up(struct mmc_host *host)
708 */ 708 */
709 mmc_delay(10); 709 mmc_delay(10);
710 710
711 host->ios.clock = host->f_min; 711 if (host->f_min > 400000) {
712 pr_warning("%s: Minimum clock frequency too high for "
713 "identification mode\n", mmc_hostname(host));
714 host->ios.clock = host->f_min;
715 } else
716 host->ios.clock = 400000;
717
712 host->ios.power_mode = MMC_POWER_ON; 718 host->ios.power_mode = MMC_POWER_ON;
713 mmc_set_ios(host); 719 mmc_set_ios(host);
714 720
@@ -855,61 +861,72 @@ void mmc_rescan(struct work_struct *work)
855 861
856 mmc_bus_get(host); 862 mmc_bus_get(host);
857 863
858 if (host->bus_ops == NULL) { 864 /* if there is a card registered, check whether it is still present */
859 /* 865 if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
860 * Only we can add a new handler, so it's safe to 866 host->bus_ops->detect(host);
861 * release the lock here. 867
862 */ 868 mmc_bus_put(host);
869
870
871 mmc_bus_get(host);
872
873 /* if there still is a card present, stop here */
874 if (host->bus_ops != NULL) {
863 mmc_bus_put(host); 875 mmc_bus_put(host);
876 goto out;
877 }
864 878
865 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 879 /* detect a newly inserted card */
866 goto out;
867 880
868 mmc_claim_host(host); 881 /*
882 * Only we can add a new handler, so it's safe to
883 * release the lock here.
884 */
885 mmc_bus_put(host);
869 886
870 mmc_power_up(host); 887 if (host->ops->get_cd && host->ops->get_cd(host) == 0)
871 mmc_go_idle(host); 888 goto out;
872 889
873 mmc_send_if_cond(host, host->ocr_avail); 890 mmc_claim_host(host);
874 891
875 /* 892 mmc_power_up(host);
876 * First we search for SDIO... 893 mmc_go_idle(host);
877 */
878 err = mmc_send_io_op_cond(host, 0, &ocr);
879 if (!err) {
880 if (mmc_attach_sdio(host, ocr))
881 mmc_power_off(host);
882 goto out;
883 }
884 894
885 /* 895 mmc_send_if_cond(host, host->ocr_avail);
886 * ...then normal SD...
887 */
888 err = mmc_send_app_op_cond(host, 0, &ocr);
889 if (!err) {
890 if (mmc_attach_sd(host, ocr))
891 mmc_power_off(host);
892 goto out;
893 }
894 896
895 /* 897 /*
896 * ...and finally MMC. 898 * First we search for SDIO...
897 */ 899 */
898 err = mmc_send_op_cond(host, 0, &ocr); 900 err = mmc_send_io_op_cond(host, 0, &ocr);
899 if (!err) { 901 if (!err) {
900 if (mmc_attach_mmc(host, ocr)) 902 if (mmc_attach_sdio(host, ocr))
901 mmc_power_off(host); 903 mmc_power_off(host);
902 goto out; 904 goto out;
903 } 905 }
904 906
905 mmc_release_host(host); 907 /*
906 mmc_power_off(host); 908 * ...then normal SD...
907 } else { 909 */
908 if (host->bus_ops->detect && !host->bus_dead) 910 err = mmc_send_app_op_cond(host, 0, &ocr);
909 host->bus_ops->detect(host); 911 if (!err) {
912 if (mmc_attach_sd(host, ocr))
913 mmc_power_off(host);
914 goto out;
915 }
910 916
911 mmc_bus_put(host); 917 /*
918 * ...and finally MMC.
919 */
920 err = mmc_send_op_cond(host, 0, &ocr);
921 if (!err) {
922 if (mmc_attach_mmc(host, ocr))
923 mmc_power_off(host);
924 goto out;
912 } 925 }
926
927 mmc_release_host(host);
928 mmc_power_off(host);
929
913out: 930out:
914 if (host->caps & MMC_CAP_NEEDS_POLL) 931 if (host->caps & MMC_CAP_NEEDS_POLL)
915 mmc_schedule_delayed_work(&host->detect, HZ); 932 mmc_schedule_delayed_work(&host->detect, HZ);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index b4cf691f3f6..40111a6d8d5 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -83,6 +83,17 @@ config MMC_SDHCI_OF
83 83
84 If unsure, say N. 84 If unsure, say N.
85 85
86config MMC_SDHCI_PLTFM
87 tristate "SDHCI support on the platform specific bus"
88 depends on MMC_SDHCI
89 help
90 This selects the platform specific bus support for Secure Digital Host
91 Controller Interface.
92
93 If you have a controller with this interface, say Y or M here.
94
95 If unsure, say N.
96
86config MMC_OMAP 97config MMC_OMAP
87 tristate "TI OMAP Multimedia Card Interface support" 98 tristate "TI OMAP Multimedia Card Interface support"
88 depends on ARCH_OMAP 99 depends on ARCH_OMAP
@@ -155,7 +166,7 @@ config MMC_ATMELMCI_DMA
155 166
156config MMC_IMX 167config MMC_IMX
157 tristate "Motorola i.MX Multimedia Card Interface support" 168 tristate "Motorola i.MX Multimedia Card Interface support"
158 depends on ARCH_IMX 169 depends on ARCH_MX1
159 help 170 help
160 This selects the Motorola i.MX Multimedia card Interface. 171 This selects the Motorola i.MX Multimedia card Interface.
161 If you have a i.MX platform with a Multimedia Card slot, 172 If you have a i.MX platform with a Multimedia Card slot,
@@ -237,7 +248,20 @@ config MMC_SDRICOH_CS
237 248
238config MMC_TMIO 249config MMC_TMIO
239 tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" 250 tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
240 depends on MFD_TMIO 251 depends on MFD_TMIO || MFD_ASIC3
241 help 252 help
242 This provides support for the SD/MMC cell found in TC6393XB, 253 This provides support for the SD/MMC cell found in TC6393XB,
243 T7L66XB and also ipaq ASIC3 254 T7L66XB and also HTC ASIC3
255
256config MMC_CB710
257 tristate "ENE CB710 MMC/SD Interface support"
258 depends on PCI
259 select CB710_CORE
260 help
261 This option enables support for MMC/SD part of ENE CB710/720 Flash
262 memory card reader found in some laptops (ie. some versions of
263 HP Compaq nx9500).
264
265 This driver can also be built as a module. If so, the module
266 will be called cb710-mmc.
267
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 970a997620e..79da397c5fe 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o 15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
16obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 16obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
17obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
17obj-$(CONFIG_MMC_WBSD) += wbsd.o 18obj-$(CONFIG_MMC_WBSD) += wbsd.o
18obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 19obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
19obj-$(CONFIG_MMC_OMAP) += omap.o 20obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -29,4 +30,8 @@ endif
29obj-$(CONFIG_MMC_S3C) += s3cmci.o 30obj-$(CONFIG_MMC_S3C) += s3cmci.o
30obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o 31obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
31obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o 32obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
33obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
32 34
35ifeq ($(CONFIG_CB710_DEBUG),y)
36 CFLAGS-cb710-mmc += -DDEBUG
37endif
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index b58364ed6bb..fc8a0fe7c5c 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -7,6 +7,12 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10
11/*
12 * Superset of MCI IP registers integrated in Atmel AVR32 and AT91 Processors
13 * Registers and bitfields marked with [2] are only available in MCI2
14 */
15
10#ifndef __DRIVERS_MMC_ATMEL_MCI_H__ 16#ifndef __DRIVERS_MMC_ATMEL_MCI_H__
11#define __DRIVERS_MMC_ATMEL_MCI_H__ 17#define __DRIVERS_MMC_ATMEL_MCI_H__
12 18
@@ -14,11 +20,17 @@
14#define MCI_CR 0x0000 /* Control */ 20#define MCI_CR 0x0000 /* Control */
15# define MCI_CR_MCIEN ( 1 << 0) /* MCI Enable */ 21# define MCI_CR_MCIEN ( 1 << 0) /* MCI Enable */
16# define MCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */ 22# define MCI_CR_MCIDIS ( 1 << 1) /* MCI Disable */
23# define MCI_CR_PWSEN ( 1 << 2) /* Power Save Enable */
24# define MCI_CR_PWSDIS ( 1 << 3) /* Power Save Disable */
17# define MCI_CR_SWRST ( 1 << 7) /* Software Reset */ 25# define MCI_CR_SWRST ( 1 << 7) /* Software Reset */
18#define MCI_MR 0x0004 /* Mode */ 26#define MCI_MR 0x0004 /* Mode */
19# define MCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */ 27# define MCI_MR_CLKDIV(x) ((x) << 0) /* Clock Divider */
28# define MCI_MR_PWSDIV(x) ((x) << 8) /* Power Saving Divider */
20# define MCI_MR_RDPROOF ( 1 << 11) /* Read Proof */ 29# define MCI_MR_RDPROOF ( 1 << 11) /* Read Proof */
21# define MCI_MR_WRPROOF ( 1 << 12) /* Write Proof */ 30# define MCI_MR_WRPROOF ( 1 << 12) /* Write Proof */
31# define MCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */
32# define MCI_MR_PDCPADV ( 1 << 14) /* Padding Value */
33# define MCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */
22#define MCI_DTOR 0x0008 /* Data Timeout */ 34#define MCI_DTOR 0x0008 /* Data Timeout */
23# define MCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ 35# define MCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */
24# define MCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ 36# define MCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */
@@ -28,6 +40,7 @@
28# define MCI_SDCSEL_MASK ( 3 << 0) 40# define MCI_SDCSEL_MASK ( 3 << 0)
29# define MCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */ 41# define MCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */
30# define MCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */ 42# define MCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */
43# define MCI_SDCBUS_8BIT ( 3 << 6) /* 8-bit data bus[2] */
31# define MCI_SDCBUS_MASK ( 3 << 6) 44# define MCI_SDCBUS_MASK ( 3 << 6)
32#define MCI_ARGR 0x0010 /* Command Argument */ 45#define MCI_ARGR 0x0010 /* Command Argument */
33#define MCI_CMDR 0x0014 /* Command */ 46#define MCI_CMDR 0x0014 /* Command */
@@ -56,6 +69,9 @@
56#define MCI_BLKR 0x0018 /* Block */ 69#define MCI_BLKR 0x0018 /* Block */
57# define MCI_BCNT(x) ((x) << 0) /* Data Block Count */ 70# define MCI_BCNT(x) ((x) << 0) /* Data Block Count */
58# define MCI_BLKLEN(x) ((x) << 16) /* Data Block Length */ 71# define MCI_BLKLEN(x) ((x) << 16) /* Data Block Length */
72#define MCI_CSTOR 0x001c /* Completion Signal Timeout[2] */
73# define MCI_CSTOCYC(x) ((x) << 0) /* CST cycles */
74# define MCI_CSTOMUL(x) ((x) << 4) /* CST multiplier */
59#define MCI_RSPR 0x0020 /* Response 0 */ 75#define MCI_RSPR 0x0020 /* Response 0 */
60#define MCI_RSPR1 0x0024 /* Response 1 */ 76#define MCI_RSPR1 0x0024 /* Response 1 */
61#define MCI_RSPR2 0x0028 /* Response 2 */ 77#define MCI_RSPR2 0x0028 /* Response 2 */
@@ -83,7 +99,24 @@
83# define MCI_DTOE ( 1 << 22) /* Data Time-Out Error */ 99# define MCI_DTOE ( 1 << 22) /* Data Time-Out Error */
84# define MCI_OVRE ( 1 << 30) /* RX Overrun Error */ 100# define MCI_OVRE ( 1 << 30) /* RX Overrun Error */
85# define MCI_UNRE ( 1 << 31) /* TX Underrun Error */ 101# define MCI_UNRE ( 1 << 31) /* TX Underrun Error */
102#define MCI_DMA 0x0050 /* DMA Configuration[2] */
103# define MCI_DMA_OFFSET(x) ((x) << 0) /* DMA Write Buffer Offset */
104# define MCI_DMA_CHKSIZE(x) ((x) << 4) /* DMA Channel Read and Write Chunk Size */
105# define MCI_DMAEN ( 1 << 8) /* DMA Hardware Handshaking Enable */
106#define MCI_CFG 0x0054 /* Configuration[2] */
107# define MCI_CFG_FIFOMODE_1DATA ( 1 << 0) /* MCI Internal FIFO control mode */
108# define MCI_CFG_FERRCTRL_COR ( 1 << 4) /* Flow Error flag reset control mode */
109# define MCI_CFG_HSMODE ( 1 << 8) /* High Speed Mode */
110# define MCI_CFG_LSYNC ( 1 << 12) /* Synchronize on the last block */
111#define MCI_WPMR 0x00e4 /* Write Protection Mode[2] */
112# define MCI_WP_EN ( 1 << 0) /* WP Enable */
113# define MCI_WP_KEY (0x4d4349 << 8) /* WP Key */
114#define MCI_WPSR 0x00e8 /* Write Protection Status[2] */
115# define MCI_GET_WP_VS(x) ((x) & 0x0f)
116# define MCI_GET_WP_VSRC(x) (((x) >> 8) & 0xffff)
117#define MCI_FIFO_APERTURE 0x0200 /* FIFO Aperture[2] */
86 118
119/* This is not including the FIFO Aperture on MCI2 */
87#define MCI_REGS_SIZE 0x100 120#define MCI_REGS_SIZE 0x100
88 121
89/* Register access macros */ 122/* Register access macros */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index cf6a100bb38..7b603e4b41d 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -177,6 +177,7 @@ struct atmel_mci {
177 * available. 177 * available.
178 * @wp_pin: GPIO pin used for card write protect sending, or negative 178 * @wp_pin: GPIO pin used for card write protect sending, or negative
179 * if not available. 179 * if not available.
180 * @detect_is_active_high: The state of the detect pin when it is active.
180 * @detect_timer: Timer used for debouncing @detect_pin interrupts. 181 * @detect_timer: Timer used for debouncing @detect_pin interrupts.
181 */ 182 */
182struct atmel_mci_slot { 183struct atmel_mci_slot {
@@ -196,6 +197,7 @@ struct atmel_mci_slot {
196 197
197 int detect_pin; 198 int detect_pin;
198 int wp_pin; 199 int wp_pin;
200 bool detect_is_active_high;
199 201
200 struct timer_list detect_timer; 202 struct timer_list detect_timer;
201}; 203};
@@ -924,7 +926,8 @@ static int atmci_get_cd(struct mmc_host *mmc)
924 struct atmel_mci_slot *slot = mmc_priv(mmc); 926 struct atmel_mci_slot *slot = mmc_priv(mmc);
925 927
926 if (gpio_is_valid(slot->detect_pin)) { 928 if (gpio_is_valid(slot->detect_pin)) {
927 present = !gpio_get_value(slot->detect_pin); 929 present = !(gpio_get_value(slot->detect_pin) ^
930 slot->detect_is_active_high);
928 dev_dbg(&mmc->class_dev, "card is %spresent\n", 931 dev_dbg(&mmc->class_dev, "card is %spresent\n",
929 present ? "" : "not "); 932 present ? "" : "not ");
930 } 933 }
@@ -1028,7 +1031,8 @@ static void atmci_detect_change(unsigned long data)
1028 return; 1031 return;
1029 1032
1030 enable_irq(gpio_to_irq(slot->detect_pin)); 1033 enable_irq(gpio_to_irq(slot->detect_pin));
1031 present = !gpio_get_value(slot->detect_pin); 1034 present = !(gpio_get_value(slot->detect_pin) ^
1035 slot->detect_is_active_high);
1032 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags); 1036 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1033 1037
1034 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n", 1038 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
@@ -1456,6 +1460,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,
1456 slot->host = host; 1460 slot->host = host;
1457 slot->detect_pin = slot_data->detect_pin; 1461 slot->detect_pin = slot_data->detect_pin;
1458 slot->wp_pin = slot_data->wp_pin; 1462 slot->wp_pin = slot_data->wp_pin;
1463 slot->detect_is_active_high = slot_data->detect_is_active_high;
1459 slot->sdc_reg = sdc_reg; 1464 slot->sdc_reg = sdc_reg;
1460 1465
1461 mmc->ops = &atmci_ops; 1466 mmc->ops = &atmci_ops;
@@ -1477,7 +1482,8 @@ static int __init atmci_init_slot(struct atmel_mci *host,
1477 if (gpio_request(slot->detect_pin, "mmc_detect")) { 1482 if (gpio_request(slot->detect_pin, "mmc_detect")) {
1478 dev_dbg(&mmc->class_dev, "no detect pin available\n"); 1483 dev_dbg(&mmc->class_dev, "no detect pin available\n");
1479 slot->detect_pin = -EBUSY; 1484 slot->detect_pin = -EBUSY;
1480 } else if (gpio_get_value(slot->detect_pin)) { 1485 } else if (gpio_get_value(slot->detect_pin) ^
1486 slot->detect_is_active_high) {
1481 clear_bit(ATMCI_CARD_PRESENT, &slot->flags); 1487 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1482 } 1488 }
1483 } 1489 }
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
new file mode 100644
index 00000000000..11efefb1af5
--- /dev/null
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -0,0 +1,804 @@
1/*
2 * cb710/mmc.c
3 *
4 * Copyright by Michał Mirosław, 2008-2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include "cb710-mmc.h"
16
17static const u8 cb710_clock_divider_log2[8] = {
18/* 1, 2, 4, 8, 16, 32, 128, 512 */
19 0, 1, 2, 3, 4, 5, 7, 9
20};
21#define CB710_MAX_DIVIDER_IDX \
22 (ARRAY_SIZE(cb710_clock_divider_log2) - 1)
23
24static const u8 cb710_src_freq_mhz[16] = {
25 33, 10, 20, 25, 30, 35, 40, 45,
26 50, 55, 60, 65, 70, 75, 80, 85
27};
28
29static void cb710_mmc_set_clock(struct mmc_host *mmc, int hz)
30{
31 struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
32 struct pci_dev *pdev = cb710_slot_to_chip(slot)->pdev;
33 u32 src_freq_idx;
34 u32 divider_idx;
35 int src_hz;
36
37 /* this is magic, unverifiable for me, unless I get
38 * MMC card with cables connected to bus signals */
39 pci_read_config_dword(pdev, 0x48, &src_freq_idx);
40 src_freq_idx = (src_freq_idx >> 16) & 0xF;
41 src_hz = cb710_src_freq_mhz[src_freq_idx] * 1000000;
42
43 for (divider_idx = 0; divider_idx < CB710_MAX_DIVIDER_IDX; ++divider_idx) {
44 if (hz >= src_hz >> cb710_clock_divider_log2[divider_idx])
45 break;
46 }
47
48 if (src_freq_idx)
49 divider_idx |= 0x8;
50
51 cb710_pci_update_config_reg(pdev, 0x40, ~0xF0000000, divider_idx << 28);
52
53 dev_dbg(cb710_slot_dev(slot),
54 "clock set to %d Hz, wanted %d Hz; flag = %d\n",
55 src_hz >> cb710_clock_divider_log2[divider_idx & 7],
56 hz, (divider_idx & 8) != 0);
57}
58
59static void __cb710_mmc_enable_irq(struct cb710_slot *slot,
60 unsigned short enable, unsigned short mask)
61{
62 /* clear global IE
63 * - it gets set later if any interrupt sources are enabled */
64 mask |= CB710_MMC_IE_IRQ_ENABLE;
65
66 /* look like interrupt is fired whenever
67 * WORD[0x0C] & WORD[0x10] != 0;
68 * -> bit 15 port 0x0C seems to be global interrupt enable
69 */
70
71 enable = (cb710_read_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT)
72 & ~mask) | enable;
73
74 if (enable)
75 enable |= CB710_MMC_IE_IRQ_ENABLE;
76
77 cb710_write_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT, enable);
78}
79
80static void cb710_mmc_enable_irq(struct cb710_slot *slot,
81 unsigned short enable, unsigned short mask)
82{
83 struct cb710_mmc_reader *reader = mmc_priv(cb710_slot_to_mmc(slot));
84 unsigned long flags;
85
86 spin_lock_irqsave(&reader->irq_lock, flags);
87 /* this is the only thing irq_lock protects */
88 __cb710_mmc_enable_irq(slot, enable, mask);
89 spin_unlock_irqrestore(&reader->irq_lock, flags);
90}
91
92static void cb710_mmc_reset_events(struct cb710_slot *slot)
93{
94 cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, 0xFF);
95 cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, 0xFF);
96 cb710_write_port_8(slot, CB710_MMC_STATUS2_PORT, 0xFF);
97}
98
99static int cb710_mmc_is_card_inserted(struct cb710_slot *slot)
100{
101 return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
102 & CB710_MMC_S3_CARD_DETECTED;
103}
104
105static void cb710_mmc_enable_4bit_data(struct cb710_slot *slot, int enable)
106{
107 dev_dbg(cb710_slot_dev(slot), "configuring %d-data-line%s mode\n",
108 enable ? 4 : 1, enable ? "s" : "");
109 if (enable)
110 cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT,
111 CB710_MMC_C1_4BIT_DATA_BUS, 0);
112 else
113 cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT,
114 0, CB710_MMC_C1_4BIT_DATA_BUS);
115}
116
117static int cb710_check_event(struct cb710_slot *slot, u8 what)
118{
119 u16 status;
120
121 status = cb710_read_port_16(slot, CB710_MMC_STATUS_PORT);
122
123 if (status & CB710_MMC_S0_FIFO_UNDERFLOW) {
124 /* it is just a guess, so log it */
125 dev_dbg(cb710_slot_dev(slot),
126 "CHECK : ignoring bit 6 in status %04X\n", status);
127 cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT,
128 CB710_MMC_S0_FIFO_UNDERFLOW);
129 status &= ~CB710_MMC_S0_FIFO_UNDERFLOW;
130 }
131
132 if (status & CB710_MMC_STATUS_ERROR_EVENTS) {
133 dev_dbg(cb710_slot_dev(slot),
134 "CHECK : returning EIO on status %04X\n", status);
135 cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, status & 0xFF);
136 cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT,
137 CB710_MMC_S1_RESET);
138 return -EIO;
139 }
140
141 /* 'what' is a bit in MMC_STATUS1 */
142 if ((status >> 8) & what) {
143 cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, what);
144 return 1;
145 }
146
147 return 0;
148}
149
150static int cb710_wait_for_event(struct cb710_slot *slot, u8 what)
151{
152 int err = 0;
153 unsigned limit = 2000000; /* FIXME: real timeout */
154
155#ifdef CONFIG_CB710_DEBUG
156 u32 e, x;
157 e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
158#endif
159
160 while (!(err = cb710_check_event(slot, what))) {
161 if (!--limit) {
162 cb710_dump_regs(cb710_slot_to_chip(slot),
163 CB710_DUMP_REGS_MMC);
164 err = -ETIMEDOUT;
165 break;
166 }
167 udelay(1);
168 }
169
170#ifdef CONFIG_CB710_DEBUG
171 x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
172
173 limit = 2000000 - limit;
174 if (limit > 100)
175 dev_dbg(cb710_slot_dev(slot),
176 "WAIT10: waited %d loops, what %d, entry val %08X, exit val %08X\n",
177 limit, what, e, x);
178#endif
179 return err < 0 ? err : 0;
180}
181
182
183static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask)
184{
185 unsigned limit = 500000; /* FIXME: real timeout */
186 int err = 0;
187
188#ifdef CONFIG_CB710_DEBUG
189 u32 e, x;
190 e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
191#endif
192
193 while (cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & mask) {
194 if (!--limit) {
195 cb710_dump_regs(cb710_slot_to_chip(slot),
196 CB710_DUMP_REGS_MMC);
197 err = -ETIMEDOUT;
198 break;
199 }
200 udelay(1);
201 }
202
203#ifdef CONFIG_CB710_DEBUG
204 x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
205
206 limit = 500000 - limit;
207 if (limit > 100)
208 dev_dbg(cb710_slot_dev(slot),
209 "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n",
210 limit, mask, e, x);
211#endif
212 return 0;
213}
214
215static void cb710_mmc_set_transfer_size(struct cb710_slot *slot,
216 size_t count, size_t blocksize)
217{
218 cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
219 cb710_write_port_32(slot, CB710_MMC_TRANSFER_SIZE_PORT,
220 ((count - 1) << 16)|(blocksize - 1));
221
222 dev_vdbg(cb710_slot_dev(slot), "set up for %zu block%s of %zu bytes\n",
223 count, count == 1 ? "" : "s", blocksize);
224}
225
226static void cb710_mmc_fifo_hack(struct cb710_slot *slot)
227{
228 /* without this, received data is prepended with 8-bytes of zeroes */
229 u32 r1, r2;
230 int ok = 0;
231
232 r1 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT);
233 r2 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT);
234 if (cb710_read_port_8(slot, CB710_MMC_STATUS0_PORT)
235 & CB710_MMC_S0_FIFO_UNDERFLOW) {
236 cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT,
237 CB710_MMC_S0_FIFO_UNDERFLOW);
238 ok = 1;
239 }
240
241 dev_dbg(cb710_slot_dev(slot),
242 "FIFO-read-hack: expected STATUS0 bit was %s\n",
243 ok ? "set." : "NOT SET!");
244 dev_dbg(cb710_slot_dev(slot),
245 "FIFO-read-hack: dwords ignored: %08X %08X - %s\n",
246 r1, r2, (r1|r2) ? "BAD (NOT ZERO)!" : "ok");
247}
248
249static int cb710_mmc_receive_pio(struct cb710_slot *slot,
250 struct sg_mapping_iter *miter, size_t dw_count)
251{
252 if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & CB710_MMC_S2_FIFO_READY)) {
253 int err = cb710_wait_for_event(slot,
254 CB710_MMC_S1_PIO_TRANSFER_DONE);
255 if (err)
256 return err;
257 }
258
259 cb710_sg_dwiter_write_from_io(miter,
260 slot->iobase + CB710_MMC_DATA_PORT, dw_count);
261
262 return 0;
263}
264
265static bool cb710_is_transfer_size_supported(struct mmc_data *data)
266{
267 return !(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8));
268}
269
270static int cb710_mmc_receive(struct cb710_slot *slot, struct mmc_data *data)
271{
272 struct sg_mapping_iter miter;
273 size_t len, blocks = data->blocks;
274 int err = 0;
275
276 /* TODO: I don't know how/if the hardware handles non-16B-boundary blocks
277 * except single 8B block */
278 if (unlikely(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8)))
279 return -EINVAL;
280
281 sg_miter_start(&miter, data->sg, data->sg_len, 0);
282
283 cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
284 15, CB710_MMC_C2_READ_PIO_SIZE_MASK);
285
286 cb710_mmc_fifo_hack(slot);
287
288 while (blocks-- > 0) {
289 len = data->blksz;
290
291 while (len >= 16) {
292 err = cb710_mmc_receive_pio(slot, &miter, 4);
293 if (err)
294 goto out;
295 len -= 16;
296 }
297
298 if (!len)
299 continue;
300
301 cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
302 len - 1, CB710_MMC_C2_READ_PIO_SIZE_MASK);
303
304 len = (len >= 8) ? 4 : 2;
305 err = cb710_mmc_receive_pio(slot, &miter, len);
306 if (err)
307 goto out;
308 }
309out:
310 cb710_sg_miter_stop_writing(&miter);
311 return err;
312}
313
314static int cb710_mmc_send(struct cb710_slot *slot, struct mmc_data *data)
315{
316 struct sg_mapping_iter miter;
317 size_t len, blocks = data->blocks;
318 int err = 0;
319
320 /* TODO: I don't know how/if the hardware handles multiple
321 * non-16B-boundary blocks */
322 if (unlikely(data->blocks > 1 && data->blksz & 15))
323 return -EINVAL;
324
325 sg_miter_start(&miter, data->sg, data->sg_len, 0);
326
327 cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT,
328 0, CB710_MMC_C2_READ_PIO_SIZE_MASK);
329
330 while (blocks-- > 0) {
331 len = (data->blksz + 15) >> 4;
332 do {
333 if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT)
334 & CB710_MMC_S2_FIFO_EMPTY)) {
335 err = cb710_wait_for_event(slot,
336 CB710_MMC_S1_PIO_TRANSFER_DONE);
337 if (err)
338 goto out;
339 }
340 cb710_sg_dwiter_read_to_io(&miter,
341 slot->iobase + CB710_MMC_DATA_PORT, 4);
342 } while (--len);
343 }
344out:
345 sg_miter_stop(&miter);
346 return err;
347}
348
349static u16 cb710_encode_cmd_flags(struct cb710_mmc_reader *reader,
350 struct mmc_command *cmd)
351{
352 unsigned int flags = cmd->flags;
353 u16 cb_flags = 0;
354
355 /* Windows driver returned 0 for commands for which no response
356 * is expected. It happened that there were only two such commands
357 * used: MMC_GO_IDLE_STATE and MMC_GO_INACTIVE_STATE so it might
358 * as well be a bug in that driver.
359 *
360 * Original driver set bit 14 for MMC/SD application
361 * commands. There's no difference 'on the wire' and
362 * it apparently works without it anyway.
363 */
364
365 switch (flags & MMC_CMD_MASK) {
366 case MMC_CMD_AC: cb_flags = CB710_MMC_CMD_AC; break;
367 case MMC_CMD_ADTC: cb_flags = CB710_MMC_CMD_ADTC; break;
368 case MMC_CMD_BC: cb_flags = CB710_MMC_CMD_BC; break;
369 case MMC_CMD_BCR: cb_flags = CB710_MMC_CMD_BCR; break;
370 }
371
372 if (flags & MMC_RSP_BUSY)
373 cb_flags |= CB710_MMC_RSP_BUSY;
374
375 cb_flags |= cmd->opcode << CB710_MMC_CMD_CODE_SHIFT;
376
377 if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
378 cb_flags |= CB710_MMC_DATA_READ;
379
380 if (flags & MMC_RSP_PRESENT) {
381 /* Windows driver set 01 at bits 4,3 except for
382 * MMC_SET_BLOCKLEN where it set 10. Maybe the
383 * hardware can do something special about this
384 * command? The original driver looks buggy/incomplete
385 * anyway so we ignore this for now.
386 *
387 * I assume that 00 here means no response is expected.
388 */
389 cb_flags |= CB710_MMC_RSP_PRESENT;
390
391 if (flags & MMC_RSP_136)
392 cb_flags |= CB710_MMC_RSP_136;
393 if (!(flags & MMC_RSP_CRC))
394 cb_flags |= CB710_MMC_RSP_NO_CRC;
395 }
396
397 return cb_flags;
398}
399
400static void cb710_receive_response(struct cb710_slot *slot,
401 struct mmc_command *cmd)
402{
403 unsigned rsp_opcode, wanted_opcode;
404
405 /* Looks like final byte with CRC is always stripped (same as SDHCI) */
406 if (cmd->flags & MMC_RSP_136) {
407 u32 resp[4];
408
409 resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE3_PORT);
410 resp[1] = cb710_read_port_32(slot, CB710_MMC_RESPONSE2_PORT);
411 resp[2] = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT);
412 resp[3] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT);
413 rsp_opcode = resp[0] >> 24;
414
415 cmd->resp[0] = (resp[0] << 8)|(resp[1] >> 24);
416 cmd->resp[1] = (resp[1] << 8)|(resp[2] >> 24);
417 cmd->resp[2] = (resp[2] << 8)|(resp[3] >> 24);
418 cmd->resp[3] = (resp[3] << 8);
419 } else {
420 rsp_opcode = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT) & 0x3F;
421 cmd->resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT);
422 }
423
424 wanted_opcode = (cmd->flags & MMC_RSP_OPCODE) ? cmd->opcode : 0x3F;
425 if (rsp_opcode != wanted_opcode)
426 cmd->error = -EILSEQ;
427}
428
429static int cb710_mmc_transfer_data(struct cb710_slot *slot,
430 struct mmc_data *data)
431{
432 int error, to;
433
434 if (data->flags & MMC_DATA_READ)
435 error = cb710_mmc_receive(slot, data);
436 else
437 error = cb710_mmc_send(slot, data);
438
439 to = cb710_wait_for_event(slot, CB710_MMC_S1_DATA_TRANSFER_DONE);
440 if (!error)
441 error = to;
442
443 if (!error)
444 data->bytes_xfered = data->blksz * data->blocks;
445 return error;
446}
447
448static int cb710_mmc_command(struct mmc_host *mmc, struct mmc_command *cmd)
449{
450 struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
451 struct cb710_mmc_reader *reader = mmc_priv(mmc);
452 struct mmc_data *data = cmd->data;
453
454 u16 cb_cmd = cb710_encode_cmd_flags(reader, cmd);
455 dev_dbg(cb710_slot_dev(slot), "cmd request: 0x%04X\n", cb_cmd);
456
457 if (data) {
458 if (!cb710_is_transfer_size_supported(data)) {
459 data->error = -EINVAL;
460 return -1;
461 }
462 cb710_mmc_set_transfer_size(slot, data->blocks, data->blksz);
463 }
464
465 cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20|CB710_MMC_S2_BUSY_10);
466 cb710_write_port_16(slot, CB710_MMC_CMD_TYPE_PORT, cb_cmd);
467 cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
468 cb710_write_port_32(slot, CB710_MMC_CMD_PARAM_PORT, cmd->arg);
469 cb710_mmc_reset_events(slot);
470 cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
471 cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x01, 0);
472
473 cmd->error = cb710_wait_for_event(slot, CB710_MMC_S1_COMMAND_SENT);
474 if (cmd->error)
475 return -1;
476
477 if (cmd->flags & MMC_RSP_PRESENT) {
478 cb710_receive_response(slot, cmd);
479 if (cmd->error)
480 return -1;
481 }
482
483 if (data)
484 data->error = cb710_mmc_transfer_data(slot, data);
485 return 0;
486}
487
488static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
489{
490 struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
491 struct cb710_mmc_reader *reader = mmc_priv(mmc);
492
493 WARN_ON(reader->mrq != NULL);
494
495 reader->mrq = mrq;
496 cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0);
497
498 if (cb710_mmc_is_card_inserted(slot)) {
499 if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop)
500 cb710_mmc_command(mmc, mrq->stop);
501 mdelay(1);
502 } else {
503 mrq->cmd->error = -ENOMEDIUM;
504 }
505
506 tasklet_schedule(&reader->finish_req_tasklet);
507}
508
509static int cb710_mmc_powerup(struct cb710_slot *slot)
510{
511#ifdef CONFIG_CB710_DEBUG
512 struct cb710_chip *chip = cb710_slot_to_chip(slot);
513#endif
514 int err;
515
516 /* a lot of magic; see comment in cb710_mmc_set_clock() */
517 dev_dbg(cb710_slot_dev(slot), "bus powerup\n");
518 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
519 err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
520 if (unlikely(err))
521 return err;
522 cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x80, 0);
523 cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x80, 0);
524 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
525 mdelay(1);
526 dev_dbg(cb710_slot_dev(slot), "after delay 1\n");
527 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
528 err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
529 if (unlikely(err))
530 return err;
531 cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x09, 0);
532 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
533 mdelay(1);
534 dev_dbg(cb710_slot_dev(slot), "after delay 2\n");
535 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
536 err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
537 if (unlikely(err))
538 return err;
539 cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x08);
540 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
541 mdelay(2);
542 dev_dbg(cb710_slot_dev(slot), "after delay 3\n");
543 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
544 cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0);
545 cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x70, 0);
546 cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 0x80, 0);
547 cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x03, 0);
548 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
549 err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20);
550 if (unlikely(err))
551 return err;
552 /* This port behaves weird: quick byte reads of 0x08,0x09 return
553 * 0xFF,0x00 after writing 0xFFFF to 0x08; it works correctly when
554 * read/written from userspace... What am I missing here?
555 * (it doesn't depend on write-to-read delay) */
556 cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0xFFFF);
557 cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0);
558 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
559 dev_dbg(cb710_slot_dev(slot), "bus powerup finished\n");
560
561 return cb710_check_event(slot, 0);
562}
563
564static void cb710_mmc_powerdown(struct cb710_slot *slot)
565{
566 cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x81);
567 cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0, 0x80);
568}
569
570static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
571{
572 struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
573 struct cb710_mmc_reader *reader = mmc_priv(mmc);
574 int err;
575
576 cb710_mmc_set_clock(mmc, ios->clock);
577
578 if (!cb710_mmc_is_card_inserted(slot)) {
579 dev_dbg(cb710_slot_dev(slot),
580 "no card inserted - ignoring bus powerup request\n");
581 ios->power_mode = MMC_POWER_OFF;
582 }
583
584 if (ios->power_mode != reader->last_power_mode)
585 switch (ios->power_mode) {
586 case MMC_POWER_ON:
587 err = cb710_mmc_powerup(slot);
588 if (err) {
589 dev_warn(cb710_slot_dev(slot),
590 "powerup failed (%d)- retrying\n", err);
591 cb710_mmc_powerdown(slot);
592 udelay(1);
593 err = cb710_mmc_powerup(slot);
594 if (err)
595 dev_warn(cb710_slot_dev(slot),
596 "powerup retry failed (%d) - expect errors\n",
597 err);
598 }
599 reader->last_power_mode = MMC_POWER_ON;
600 break;
601 case MMC_POWER_OFF:
602 cb710_mmc_powerdown(slot);
603 reader->last_power_mode = MMC_POWER_OFF;
604 break;
605 case MMC_POWER_UP:
606 default:
607 /* ignore */;
608 }
609
610 cb710_mmc_enable_4bit_data(slot, ios->bus_width != MMC_BUS_WIDTH_1);
611
612 cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0);
613}
614
615static int cb710_mmc_get_ro(struct mmc_host *mmc)
616{
617 struct cb710_slot *slot = cb710_mmc_to_slot(mmc);
618
619 return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT)
620 & CB710_MMC_S3_WRITE_PROTECTED;
621}
622
623static int cb710_mmc_irq_handler(struct cb710_slot *slot)
624{
625 struct mmc_host *mmc = cb710_slot_to_mmc(slot);
626 struct cb710_mmc_reader *reader = mmc_priv(mmc);
627 u32 status, config1, config2, irqen;
628
629 status = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT);
630 irqen = cb710_read_port_32(slot, CB710_MMC_IRQ_ENABLE_PORT);
631 config2 = cb710_read_port_32(slot, CB710_MMC_CONFIGB_PORT);
632 config1 = cb710_read_port_32(slot, CB710_MMC_CONFIG_PORT);
633
634 dev_dbg(cb710_slot_dev(slot), "interrupt; status: %08X, "
635 "ie: %08X, c2: %08X, c1: %08X\n",
636 status, irqen, config2, config1);
637
638 if (status & (CB710_MMC_S1_CARD_CHANGED << 8)) {
639 /* ack the event */
640 cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT,
641 CB710_MMC_S1_CARD_CHANGED);
642 if ((irqen & CB710_MMC_IE_CISTATUS_MASK)
643 == CB710_MMC_IE_CISTATUS_MASK)
644 mmc_detect_change(mmc, HZ/5);
645 } else {
646 dev_dbg(cb710_slot_dev(slot), "unknown interrupt (test)\n");
647 spin_lock(&reader->irq_lock);
648 __cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_TEST_MASK);
649 spin_unlock(&reader->irq_lock);
650 }
651
652 return 1;
653}
654
655static void cb710_mmc_finish_request_tasklet(unsigned long data)
656{
657 struct mmc_host *mmc = (void *)data;
658 struct cb710_mmc_reader *reader = mmc_priv(mmc);
659 struct mmc_request *mrq = reader->mrq;
660
661 reader->mrq = NULL;
662 mmc_request_done(mmc, mrq);
663}
664
665static const struct mmc_host_ops cb710_mmc_host = {
666 .request = cb710_mmc_request,
667 .set_ios = cb710_mmc_set_ios,
668 .get_ro = cb710_mmc_get_ro
669};
670
671#ifdef CONFIG_PM
672
673static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
674{
675 struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
676 struct mmc_host *mmc = cb710_slot_to_mmc(slot);
677 int err;
678
679 err = mmc_suspend_host(mmc, state);
680 if (err)
681 return err;
682
683 cb710_mmc_enable_irq(slot, 0, ~0);
684 return 0;
685}
686
687static int cb710_mmc_resume(struct platform_device *pdev)
688{
689 struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
690 struct mmc_host *mmc = cb710_slot_to_mmc(slot);
691
692 cb710_mmc_enable_irq(slot, 0, ~0);
693
694 return mmc_resume_host(mmc);
695}
696
697#endif /* CONFIG_PM */
698
699static int __devinit cb710_mmc_init(struct platform_device *pdev)
700{
701 struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
702 struct cb710_chip *chip = cb710_slot_to_chip(slot);
703 struct mmc_host *mmc;
704 struct cb710_mmc_reader *reader;
705 int err;
706 u32 val;
707
708 mmc = mmc_alloc_host(sizeof(*reader), cb710_slot_dev(slot));
709 if (!mmc)
710 return -ENOMEM;
711
712 dev_set_drvdata(&pdev->dev, mmc);
713
714 /* harmless (maybe) magic */
715 pci_read_config_dword(chip->pdev, 0x48, &val);
716 val = cb710_src_freq_mhz[(val >> 16) & 0xF];
717 dev_dbg(cb710_slot_dev(slot), "source frequency: %dMHz\n", val);
718 val *= 1000000;
719
720 mmc->ops = &cb710_mmc_host;
721 mmc->f_max = val;
722 mmc->f_min = val >> cb710_clock_divider_log2[CB710_MAX_DIVIDER_IDX];
723 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
724 mmc->caps = MMC_CAP_4_BIT_DATA;
725
726 reader = mmc_priv(mmc);
727
728 tasklet_init(&reader->finish_req_tasklet,
729 cb710_mmc_finish_request_tasklet, (unsigned long)mmc);
730 spin_lock_init(&reader->irq_lock);
731 cb710_dump_regs(chip, CB710_DUMP_REGS_MMC);
732
733 cb710_mmc_enable_irq(slot, 0, ~0);
734 cb710_set_irq_handler(slot, cb710_mmc_irq_handler);
735
736 err = mmc_add_host(mmc);
737 if (unlikely(err))
738 goto err_free_mmc;
739
740 dev_dbg(cb710_slot_dev(slot), "mmc_hostname is %s\n",
741 mmc_hostname(mmc));
742
743 cb710_mmc_enable_irq(slot, CB710_MMC_IE_CARD_INSERTION_STATUS, 0);
744
745 return 0;
746
747err_free_mmc:
748 dev_dbg(cb710_slot_dev(slot), "mmc_add_host() failed: %d\n", err);
749
750 mmc_free_host(mmc);
751 return err;
752}
753
754static int __devexit cb710_mmc_exit(struct platform_device *pdev)
755{
756 struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
757 struct mmc_host *mmc = cb710_slot_to_mmc(slot);
758 struct cb710_mmc_reader *reader = mmc_priv(mmc);
759
760 cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_CARD_INSERTION_STATUS);
761
762 mmc_remove_host(mmc);
763
764 /* IRQs should be disabled now, but let's stay on the safe side */
765 cb710_mmc_enable_irq(slot, 0, ~0);
766 cb710_set_irq_handler(slot, NULL);
767
768 /* clear config ports - just in case */
769 cb710_write_port_32(slot, CB710_MMC_CONFIG_PORT, 0);
770 cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0);
771
772 tasklet_kill(&reader->finish_req_tasklet);
773
774 mmc_free_host(mmc);
775 return 0;
776}
777
778static struct platform_driver cb710_mmc_driver = {
779 .driver.name = "cb710-mmc",
780 .probe = cb710_mmc_init,
781 .remove = __devexit_p(cb710_mmc_exit),
782#ifdef CONFIG_PM
783 .suspend = cb710_mmc_suspend,
784 .resume = cb710_mmc_resume,
785#endif
786};
787
788static int __init cb710_mmc_init_module(void)
789{
790 return platform_driver_register(&cb710_mmc_driver);
791}
792
793static void __exit cb710_mmc_cleanup_module(void)
794{
795 platform_driver_unregister(&cb710_mmc_driver);
796}
797
798module_init(cb710_mmc_init_module);
799module_exit(cb710_mmc_cleanup_module);
800
801MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>");
802MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part");
803MODULE_LICENSE("GPL");
804MODULE_ALIAS("platform:cb710-mmc");
diff --git a/drivers/mmc/host/cb710-mmc.h b/drivers/mmc/host/cb710-mmc.h
new file mode 100644
index 00000000000..e845c776bdd
--- /dev/null
+++ b/drivers/mmc/host/cb710-mmc.h
@@ -0,0 +1,104 @@
1/*
2 * cb710/cb710-mmc.h
3 *
4 * Copyright by Michał Mirosław, 2008-2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef LINUX_CB710_MMC_H
11#define LINUX_CB710_MMC_H
12
13#include <linux/cb710.h>
14
15/* per-MMC-reader structure */
16struct cb710_mmc_reader {
17 struct tasklet_struct finish_req_tasklet;
18 struct mmc_request *mrq;
19 spinlock_t irq_lock;
20 unsigned char last_power_mode;
21};
22
23/* some device struct walking */
24
25static inline struct mmc_host *cb710_slot_to_mmc(struct cb710_slot *slot)
26{
27 return dev_get_drvdata(&slot->pdev.dev);
28}
29
30static inline struct cb710_slot *cb710_mmc_to_slot(struct mmc_host *mmc)
31{
32 struct platform_device *pdev = container_of(mmc_dev(mmc),
33 struct platform_device, dev);
34 return cb710_pdev_to_slot(pdev);
35}
36
37/* registers (this might be all wrong ;) */
38
39#define CB710_MMC_DATA_PORT 0x00
40
41#define CB710_MMC_CONFIG_PORT 0x04
42#define CB710_MMC_CONFIG0_PORT 0x04
43#define CB710_MMC_CONFIG1_PORT 0x05
44#define CB710_MMC_C1_4BIT_DATA_BUS 0x40
45#define CB710_MMC_CONFIG2_PORT 0x06
46#define CB710_MMC_C2_READ_PIO_SIZE_MASK 0x0F /* N-1 */
47#define CB710_MMC_CONFIG3_PORT 0x07
48
49#define CB710_MMC_CONFIGB_PORT 0x08
50
51#define CB710_MMC_IRQ_ENABLE_PORT 0x0C
52#define CB710_MMC_IE_TEST_MASK 0x00BF
53#define CB710_MMC_IE_CARD_INSERTION_STATUS 0x1000
54#define CB710_MMC_IE_IRQ_ENABLE 0x8000
55#define CB710_MMC_IE_CISTATUS_MASK \
56 (CB710_MMC_IE_CARD_INSERTION_STATUS|CB710_MMC_IE_IRQ_ENABLE)
57
58#define CB710_MMC_STATUS_PORT 0x10
59#define CB710_MMC_STATUS_ERROR_EVENTS 0x60FF
60#define CB710_MMC_STATUS0_PORT 0x10
61#define CB710_MMC_S0_FIFO_UNDERFLOW 0x40
62#define CB710_MMC_STATUS1_PORT 0x11
63#define CB710_MMC_S1_COMMAND_SENT 0x01
64#define CB710_MMC_S1_DATA_TRANSFER_DONE 0x02
65#define CB710_MMC_S1_PIO_TRANSFER_DONE 0x04
66#define CB710_MMC_S1_CARD_CHANGED 0x10
67#define CB710_MMC_S1_RESET 0x20
68#define CB710_MMC_STATUS2_PORT 0x12
69#define CB710_MMC_S2_FIFO_READY 0x01
70#define CB710_MMC_S2_FIFO_EMPTY 0x02
71#define CB710_MMC_S2_BUSY_10 0x10
72#define CB710_MMC_S2_BUSY_20 0x20
73#define CB710_MMC_STATUS3_PORT 0x13
74#define CB710_MMC_S3_CARD_DETECTED 0x02
75#define CB710_MMC_S3_WRITE_PROTECTED 0x04
76
77#define CB710_MMC_CMD_TYPE_PORT 0x14
78#define CB710_MMC_RSP_TYPE_MASK 0x0007
79#define CB710_MMC_RSP_R1 (0)
80#define CB710_MMC_RSP_136 (5)
81#define CB710_MMC_RSP_NO_CRC (2)
82#define CB710_MMC_RSP_PRESENT_MASK 0x0018
83#define CB710_MMC_RSP_NONE (0 << 3)
84#define CB710_MMC_RSP_PRESENT (1 << 3)
85#define CB710_MMC_RSP_PRESENT_X (2 << 3)
86#define CB710_MMC_CMD_TYPE_MASK 0x0060
87#define CB710_MMC_CMD_BC (0 << 5)
88#define CB710_MMC_CMD_BCR (1 << 5)
89#define CB710_MMC_CMD_AC (2 << 5)
90#define CB710_MMC_CMD_ADTC (3 << 5)
91#define CB710_MMC_DATA_READ 0x0080
92#define CB710_MMC_CMD_CODE_MASK 0x3F00
93#define CB710_MMC_CMD_CODE_SHIFT 8
94#define CB710_MMC_IS_APP_CMD 0x4000
95#define CB710_MMC_RSP_BUSY 0x8000
96
97#define CB710_MMC_CMD_PARAM_PORT 0x18
98#define CB710_MMC_TRANSFER_SIZE_PORT 0x1C
99#define CB710_MMC_RESPONSE0_PORT 0x20
100#define CB710_MMC_RESPONSE1_PORT 0x24
101#define CB710_MMC_RESPONSE2_PORT 0x28
102#define CB710_MMC_RESPONSE3_PORT 0x2C
103
104#endif /* LINUX_CB710_MMC_H */
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index f48349d18c9..240608cc7ae 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -97,6 +97,14 @@
97 */ 97 */
98#define r1b_timeout (HZ * 3) 98#define r1b_timeout (HZ * 3)
99 99
100/* One of the critical speed parameters is the amount of data which may
101 * be transfered in one command. If this value is too low, the SD card
102 * controller has to do multiple partial block writes (argggh!). With
103 * today (2008) SD cards there is little speed gain if we transfer more
104 * than 64 KBytes at a time. So use this value until there is any indication
105 * that we should do more here.
106 */
107#define MMC_SPI_BLOCKSATONCE 128
100 108
101/****************************************************************************/ 109/****************************************************************************/
102 110
@@ -327,15 +335,16 @@ checkstatus:
327 335
328 /* Status byte: the entire seven-bit R1 response. */ 336 /* Status byte: the entire seven-bit R1 response. */
329 if (cmd->resp[0] != 0) { 337 if (cmd->resp[0] != 0) {
330 if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS 338 if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
331 | R1_SPI_ILLEGAL_COMMAND)
332 & cmd->resp[0]) 339 & cmd->resp[0])
333 value = -EINVAL; 340 value = -EFAULT; /* Bad address */
341 else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
342 value = -ENOSYS; /* Function not implemented */
334 else if (R1_SPI_COM_CRC & cmd->resp[0]) 343 else if (R1_SPI_COM_CRC & cmd->resp[0])
335 value = -EILSEQ; 344 value = -EILSEQ; /* Illegal byte sequence */
336 else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET) 345 else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
337 & cmd->resp[0]) 346 & cmd->resp[0])
338 value = -EIO; 347 value = -EIO; /* I/O error */
339 /* else R1_SPI_IDLE, "it's resetting" */ 348 /* else R1_SPI_IDLE, "it's resetting" */
340 } 349 }
341 350
@@ -1366,6 +1375,10 @@ static int mmc_spi_probe(struct spi_device *spi)
1366 1375
1367 mmc->ops = &mmc_spi_ops; 1376 mmc->ops = &mmc_spi_ops;
1368 mmc->max_blk_size = MMC_SPI_BLOCKSIZE; 1377 mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1378 mmc->max_hw_segs = MMC_SPI_BLOCKSATONCE;
1379 mmc->max_phys_segs = MMC_SPI_BLOCKSATONCE;
1380 mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
1381 mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
1369 1382
1370 mmc->caps = MMC_CAP_SPI; 1383 mmc->caps = MMC_CAP_SPI;
1371 1384
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 36875dcfa49..e1aa8471ab1 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -490,7 +490,7 @@ static void mmci_check_status(unsigned long data)
490 mod_timer(&host->timer, jiffies + HZ); 490 mod_timer(&host->timer, jiffies + HZ);
491} 491}
492 492
493static int __devinit mmci_probe(struct amba_device *dev, void *id) 493static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
494{ 494{
495 struct mmc_platform_data *plat = dev->dev.platform_data; 495 struct mmc_platform_data *plat = dev->dev.platform_data;
496 struct mmci_host *host; 496 struct mmci_host *host;
@@ -546,7 +546,7 @@ static int __devinit mmci_probe(struct amba_device *dev, void *id)
546 host->mclk = clk_get_rate(host->clk); 546 host->mclk = clk_get_rate(host->clk);
547 DBG(host, "eventual mclk rate: %u Hz\n", host->mclk); 547 DBG(host, "eventual mclk rate: %u Hz\n", host->mclk);
548 } 548 }
549 host->base = ioremap(dev->res.start, SZ_4K); 549 host->base = ioremap(dev->res.start, resource_size(&dev->res));
550 if (!host->base) { 550 if (!host->base) {
551 ret = -ENOMEM; 551 ret = -ENOMEM;
552 goto clk_disable; 552 goto clk_disable;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index c643d0fe118..b56d72ff06e 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -64,6 +64,31 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
64 unsigned int tmout; 64 unsigned int tmout;
65 int tmout_index; 65 int tmout_index;
66 66
67 /*
68 * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE
69 * register is sometimes not set before a while when some
70 * "unusual" data block sizes are used (such as with the SWITCH
71 * command), even despite the fact that the XFER_DONE interrupt
72 * was raised. And if another data transfer starts before
73 * this bit comes to good sense (which eventually happens by
74 * itself) then the new transfer simply fails with a timeout.
75 */
76 if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) {
77 unsigned long t = jiffies + HZ;
78 unsigned int hw_state, count = 0;
79 do {
80 if (time_after(jiffies, t)) {
81 dev_warn(host->dev, "FIFO_EMPTY bit missing\n");
82 break;
83 }
84 hw_state = mvsd_read(MVSD_HW_STATE);
85 count++;
86 } while (!(hw_state & (1 << 13)));
87 dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit "
88 "(hw=0x%04x, count=%d, jiffies=%ld)\n",
89 hw_state, count, jiffies - (t - HZ));
90 }
91
67 /* If timeout=0 then maximum timeout index is used. */ 92 /* If timeout=0 then maximum timeout index is used. */
68 tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk); 93 tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk);
69 tmout += data->timeout_clks; 94 tmout += data->timeout_clks;
@@ -620,9 +645,18 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
620 if (ios->bus_width == MMC_BUS_WIDTH_4) 645 if (ios->bus_width == MMC_BUS_WIDTH_4)
621 ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS; 646 ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS;
622 647
648 /*
649 * The HI_SPEED_EN bit is causing trouble with many (but not all)
650 * high speed SD, SDHC and SDIO cards. Not enabling that bit
651 * makes all cards work. So let's just ignore that bit for now
652 * and revisit this issue if problems for not enabling this bit
653 * are ever reported.
654 */
655#if 0
623 if (ios->timing == MMC_TIMING_MMC_HS || 656 if (ios->timing == MMC_TIMING_MMC_HS ||
624 ios->timing == MMC_TIMING_SD_HS) 657 ios->timing == MMC_TIMING_SD_HS)
625 ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN; 658 ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN;
659#endif
626 660
627 host->ctrl = ctrl_reg; 661 host->ctrl = ctrl_reg;
628 mvsd_write(MVSD_HOST_CTRL, ctrl_reg); 662 mvsd_write(MVSD_HOST_CTRL, ctrl_reg);
@@ -882,3 +916,4 @@ module_param(nodma, int, 0);
882MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre"); 916MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre");
883MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver"); 917MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver");
884MODULE_LICENSE("GPL"); 918MODULE_LICENSE("GPL");
919MODULE_ALIAS("platform:mvsdio");
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index b4a615c55f2..bc14bb1b057 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -140,6 +140,8 @@ struct mxcmci_host {
140 struct work_struct datawork; 140 struct work_struct datawork;
141}; 141};
142 142
143static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
144
143static inline int mxcmci_use_dma(struct mxcmci_host *host) 145static inline int mxcmci_use_dma(struct mxcmci_host *host)
144{ 146{
145 return host->do_dma; 147 return host->do_dma;
@@ -160,7 +162,7 @@ static void mxcmci_softreset(struct mxcmci_host *host)
160 writew(0xff, host->base + MMC_REG_RES_TO); 162 writew(0xff, host->base + MMC_REG_RES_TO);
161} 163}
162 164
163static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) 165static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
164{ 166{
165 unsigned int nob = data->blocks; 167 unsigned int nob = data->blocks;
166 unsigned int blksz = data->blksz; 168 unsigned int blksz = data->blksz;
@@ -168,6 +170,7 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
168#ifdef HAS_DMA 170#ifdef HAS_DMA
169 struct scatterlist *sg; 171 struct scatterlist *sg;
170 int i; 172 int i;
173 int ret;
171#endif 174#endif
172 if (data->flags & MMC_DATA_STREAM) 175 if (data->flags & MMC_DATA_STREAM)
173 nob = 0xffff; 176 nob = 0xffff;
@@ -183,7 +186,7 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
183 for_each_sg(data->sg, sg, data->sg_len, i) { 186 for_each_sg(data->sg, sg, data->sg_len, i) {
184 if (sg->offset & 3 || sg->length & 3) { 187 if (sg->offset & 3 || sg->length & 3) {
185 host->do_dma = 0; 188 host->do_dma = 0;
186 return; 189 return 0;
187 } 190 }
188 } 191 }
189 192
@@ -192,23 +195,30 @@ static void mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
192 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, 195 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
193 data->sg_len, host->dma_dir); 196 data->sg_len, host->dma_dir);
194 197
195 imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize, 198 ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
196 host->res->start + MMC_REG_BUFFER_ACCESS, 199 datasize,
197 DMA_MODE_READ); 200 host->res->start + MMC_REG_BUFFER_ACCESS,
201 DMA_MODE_READ);
198 } else { 202 } else {
199 host->dma_dir = DMA_TO_DEVICE; 203 host->dma_dir = DMA_TO_DEVICE;
200 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, 204 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
201 data->sg_len, host->dma_dir); 205 data->sg_len, host->dma_dir);
202 206
203 imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, datasize, 207 ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents,
204 host->res->start + MMC_REG_BUFFER_ACCESS, 208 datasize,
205 DMA_MODE_WRITE); 209 host->res->start + MMC_REG_BUFFER_ACCESS,
210 DMA_MODE_WRITE);
206 } 211 }
207 212
213 if (ret) {
214 dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret);
215 return ret;
216 }
208 wmb(); 217 wmb();
209 218
210 imx_dma_enable(host->dma); 219 imx_dma_enable(host->dma);
211#endif /* HAS_DMA */ 220#endif /* HAS_DMA */
221 return 0;
212} 222}
213 223
214static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, 224static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
@@ -345,8 +355,11 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
345 stat = readl(host->base + MMC_REG_STATUS); 355 stat = readl(host->base + MMC_REG_STATUS);
346 if (stat & STATUS_ERR_MASK) 356 if (stat & STATUS_ERR_MASK)
347 return stat; 357 return stat;
348 if (time_after(jiffies, timeout)) 358 if (time_after(jiffies, timeout)) {
359 mxcmci_softreset(host);
360 mxcmci_set_clk_rate(host, host->clock);
349 return STATUS_TIME_OUT_READ; 361 return STATUS_TIME_OUT_READ;
362 }
350 if (stat & mask) 363 if (stat & mask)
351 return 0; 364 return 0;
352 cpu_relax(); 365 cpu_relax();
@@ -531,6 +544,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
531{ 544{
532 struct mxcmci_host *host = mmc_priv(mmc); 545 struct mxcmci_host *host = mmc_priv(mmc);
533 unsigned int cmdat = host->cmdat; 546 unsigned int cmdat = host->cmdat;
547 int error;
534 548
535 WARN_ON(host->req != NULL); 549 WARN_ON(host->req != NULL);
536 550
@@ -540,7 +554,12 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
540 host->do_dma = 1; 554 host->do_dma = 1;
541#endif 555#endif
542 if (req->data) { 556 if (req->data) {
543 mxcmci_setup_data(host, req->data); 557 error = mxcmci_setup_data(host, req->data);
558 if (error) {
559 req->cmd->error = error;
560 goto out;
561 }
562
544 563
545 cmdat |= CMD_DAT_CONT_DATA_ENABLE; 564 cmdat |= CMD_DAT_CONT_DATA_ENABLE;
546 565
@@ -548,7 +567,9 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
548 cmdat |= CMD_DAT_CONT_WRITE; 567 cmdat |= CMD_DAT_CONT_WRITE;
549 } 568 }
550 569
551 if (mxcmci_start_cmd(host, req->cmd, cmdat)) 570 error = mxcmci_start_cmd(host, req->cmd, cmdat);
571out:
572 if (error)
552 mxcmci_finish_request(host, req); 573 mxcmci_finish_request(host, req);
553} 574}
554 575
@@ -724,7 +745,7 @@ static int mxcmci_probe(struct platform_device *pdev)
724 goto out_clk_put; 745 goto out_clk_put;
725 } 746 }
726 747
727 mmc->f_min = clk_get_rate(host->clk) >> 7; 748 mmc->f_min = clk_get_rate(host->clk) >> 16;
728 mmc->f_max = clk_get_rate(host->clk) >> 1; 749 mmc->f_max = clk_get_rate(host->clk) >> 1;
729 750
730 /* recommended in data sheet */ 751 /* recommended in data sheet */
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index bfa25c01c87..e7a331de573 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -822,7 +822,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
822 del_timer(&host->cmd_abort_timer); 822 del_timer(&host->cmd_abort_timer);
823 host->abort = 1; 823 host->abort = 1;
824 OMAP_MMC_WRITE(host, IE, 0); 824 OMAP_MMC_WRITE(host, IE, 0);
825 disable_irq(host->irq); 825 disable_irq_nosync(host->irq);
826 schedule_work(&host->cmd_abort_work); 826 schedule_work(&host->cmd_abort_work);
827 return IRQ_HANDLED; 827 return IRQ_HANDLED;
828 } 828 }
@@ -1593,7 +1593,6 @@ static int mmc_omap_resume(struct platform_device *pdev)
1593#endif 1593#endif
1594 1594
1595static struct platform_driver mmc_omap_driver = { 1595static struct platform_driver mmc_omap_driver = {
1596 .probe = mmc_omap_probe,
1597 .remove = mmc_omap_remove, 1596 .remove = mmc_omap_remove,
1598 .suspend = mmc_omap_suspend, 1597 .suspend = mmc_omap_suspend,
1599 .resume = mmc_omap_resume, 1598 .resume = mmc_omap_resume,
@@ -1605,7 +1604,7 @@ static struct platform_driver mmc_omap_driver = {
1605 1604
1606static int __init mmc_omap_init(void) 1605static int __init mmc_omap_init(void)
1607{ 1606{
1608 return platform_driver_register(&mmc_omap_driver); 1607 return platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
1609} 1608}
1610 1609
1611static void __exit mmc_omap_exit(void) 1610static void __exit mmc_omap_exit(void)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e62a22a7f00..1cf9cfb3b64 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -680,7 +680,7 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
680 host->dma_ch = -1; 680 host->dma_ch = -1;
681 /* 681 /*
682 * DMA Callback: run in interrupt context. 682 * DMA Callback: run in interrupt context.
683 * mutex_unlock will through a kernel warning if used. 683 * mutex_unlock will throw a kernel warning if used.
684 */ 684 */
685 up(&host->sem); 685 up(&host->sem);
686} 686}
@@ -1073,7 +1073,6 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
1073 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1073 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1074 mmc->max_seg_size = mmc->max_req_size; 1074 mmc->max_seg_size = mmc->max_req_size;
1075 1075
1076 mmc->ocr_avail = mmc_slot(host).ocr_mask;
1077 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; 1076 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
1078 1077
1079 if (pdata->slots[host->slot_id].wires >= 8) 1078 if (pdata->slots[host->slot_id].wires >= 8)
@@ -1110,13 +1109,14 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
1110 goto err_irq; 1109 goto err_irq;
1111 } 1110 }
1112 1111
1112 /* initialize power supplies, gpios, etc */
1113 if (pdata->init != NULL) { 1113 if (pdata->init != NULL) {
1114 if (pdata->init(&pdev->dev) != 0) { 1114 if (pdata->init(&pdev->dev) != 0) {
1115 dev_dbg(mmc_dev(host->mmc), 1115 dev_dbg(mmc_dev(host->mmc), "late init error\n");
1116 "Unable to configure MMC IRQs\n");
1117 goto err_irq_cd_init; 1116 goto err_irq_cd_init;
1118 } 1117 }
1119 } 1118 }
1119 mmc->ocr_avail = mmc_slot(host).ocr_mask;
1120 1120
1121 /* Request IRQ for card detect */ 1121 /* Request IRQ for card detect */
1122 if ((mmc_slot(host).card_detect_irq)) { 1122 if ((mmc_slot(host).card_detect_irq)) {
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 430095725f9..d7d7109ef47 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -27,6 +27,7 @@
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/mmc/host.h> 28#include <linux/mmc/host.h>
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/regulator/consumer.h>
30 31
31#include <asm/sizes.h> 32#include <asm/sizes.h>
32 33
@@ -67,8 +68,42 @@ struct pxamci_host {
67 unsigned int dma_dir; 68 unsigned int dma_dir;
68 unsigned int dma_drcmrrx; 69 unsigned int dma_drcmrrx;
69 unsigned int dma_drcmrtx; 70 unsigned int dma_drcmrtx;
71
72 struct regulator *vcc;
70}; 73};
71 74
75static inline void pxamci_init_ocr(struct pxamci_host *host)
76{
77#ifdef CONFIG_REGULATOR
78 host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
79
80 if (IS_ERR(host->vcc))
81 host->vcc = NULL;
82 else {
83 host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
84 if (host->pdata && host->pdata->ocr_mask)
85 dev_warn(mmc_dev(host->mmc),
86 "ocr_mask/setpower will not be used\n");
87 }
88#endif
89 if (host->vcc == NULL) {
90 /* fall-back to platform data */
91 host->mmc->ocr_avail = host->pdata ?
92 host->pdata->ocr_mask :
93 MMC_VDD_32_33 | MMC_VDD_33_34;
94 }
95}
96
97static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
98{
99#ifdef CONFIG_REGULATOR
100 if (host->vcc)
101 mmc_regulator_set_ocr(host->vcc, vdd);
102#endif
103 if (!host->vcc && host->pdata && host->pdata->setpower)
104 host->pdata->setpower(mmc_dev(host->mmc), vdd);
105}
106
72static void pxamci_stop_clock(struct pxamci_host *host) 107static void pxamci_stop_clock(struct pxamci_host *host)
73{ 108{
74 if (readl(host->base + MMC_STAT) & STAT_CLK_EN) { 109 if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
@@ -438,8 +473,7 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
438 if (host->power_mode != ios->power_mode) { 473 if (host->power_mode != ios->power_mode) {
439 host->power_mode = ios->power_mode; 474 host->power_mode = ios->power_mode;
440 475
441 if (host->pdata && host->pdata->setpower) 476 pxamci_set_power(host, ios->vdd);
442 host->pdata->setpower(mmc_dev(mmc), ios->vdd);
443 477
444 if (ios->power_mode == MMC_POWER_ON) 478 if (ios->power_mode == MMC_POWER_ON)
445 host->cmdat |= CMDAT_INIT; 479 host->cmdat |= CMDAT_INIT;
@@ -562,9 +596,8 @@ static int pxamci_probe(struct platform_device *pdev)
562 mmc->f_max = (cpu_is_pxa300() || cpu_is_pxa310()) ? 26000000 596 mmc->f_max = (cpu_is_pxa300() || cpu_is_pxa310()) ? 26000000
563 : host->clkrate; 597 : host->clkrate;
564 598
565 mmc->ocr_avail = host->pdata ? 599 pxamci_init_ocr(host);
566 host->pdata->ocr_mask : 600
567 MMC_VDD_32_33|MMC_VDD_33_34;
568 mmc->caps = 0; 601 mmc->caps = 0;
569 host->cmdat = 0; 602 host->cmdat = 0;
570 if (!cpu_is_pxa25x()) { 603 if (!cpu_is_pxa25x()) {
@@ -661,6 +694,9 @@ static int pxamci_remove(struct platform_device *pdev)
661 if (mmc) { 694 if (mmc) {
662 struct pxamci_host *host = mmc_priv(mmc); 695 struct pxamci_host *host = mmc_priv(mmc);
663 696
697 if (host->vcc)
698 regulator_put(host->vcc);
699
664 if (host->pdata && host->pdata->exit) 700 if (host->pdata && host->pdata->exit)
665 host->pdata->exit(&pdev->dev, mmc); 701 host->pdata->exit(&pdev->dev, mmc);
666 702
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2db166b7096..4eb4f37544a 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -17,6 +17,7 @@
17#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/cpufreq.h> 19#include <linux/cpufreq.h>
20#include <linux/gpio.h>
20#include <linux/irq.h> 21#include <linux/irq.h>
21#include <linux/io.h> 22#include <linux/io.h>
22 23
@@ -789,7 +790,7 @@ static void s3cmci_dma_setup(struct s3cmci_host *host,
789 790
790 last_source = source; 791 last_source = source;
791 792
792 s3c2410_dma_devconfig(host->dma, source, 3, 793 s3c2410_dma_devconfig(host->dma, source,
793 host->mem->start + host->sdidata); 794 host->mem->start + host->sdidata);
794 795
795 if (!setup_ok) { 796 if (!setup_ok) {
@@ -1121,7 +1122,7 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1121 case MMC_POWER_OFF: 1122 case MMC_POWER_OFF:
1122 default: 1123 default:
1123 s3c2410_gpio_setpin(S3C2410_GPE5, 0); 1124 s3c2410_gpio_setpin(S3C2410_GPE5, 0);
1124 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_OUTP); 1125 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPIO_OUTPUT);
1125 1126
1126 if (host->is2440) 1127 if (host->is2440)
1127 mci_con |= S3C2440_SDICON_SDRESET; 1128 mci_con |= S3C2440_SDICON_SDRESET;
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c
index 3ff4ac3abe8..128c614d11a 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of.c
@@ -55,7 +55,13 @@ static u32 esdhc_readl(struct sdhci_host *host, int reg)
55 55
56static u16 esdhc_readw(struct sdhci_host *host, int reg) 56static u16 esdhc_readw(struct sdhci_host *host, int reg)
57{ 57{
58 return in_be16(host->ioaddr + (reg ^ 0x2)); 58 u16 ret;
59
60 if (unlikely(reg == SDHCI_HOST_VERSION))
61 ret = in_be16(host->ioaddr + reg);
62 else
63 ret = in_be16(host->ioaddr + (reg ^ 0x2));
64 return ret;
59} 65}
60 66
61static u8 esdhc_readb(struct sdhci_host *host, int reg) 67static u8 esdhc_readb(struct sdhci_host *host, int reg)
@@ -277,6 +283,7 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev)
277static const struct of_device_id sdhci_of_match[] = { 283static const struct of_device_id sdhci_of_match[] = {
278 { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, }, 284 { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, },
279 { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, }, 285 { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, },
286 { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, },
280 { .compatible = "generic-sdhci", }, 287 { .compatible = "generic-sdhci", },
281 {}, 288 {},
282}; 289};
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
new file mode 100644
index 00000000000..297f40ae6ad
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -0,0 +1,168 @@
1/*
2 * sdhci-pltfm.c Support for SDHCI platform devices
3 * Copyright (c) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* Supports:
20 * SDHCI platform devices
21 *
22 * Inspired by sdhci-pci.c, by Pierre Ossman
23 */
24
25#include <linux/delay.h>
26#include <linux/highmem.h>
27#include <linux/platform_device.h>
28
29#include <linux/mmc/host.h>
30
31#include <linux/io.h>
32
33#include "sdhci.h"
34
35/*****************************************************************************\
36 * *
37 * SDHCI core callbacks *
38 * *
39\*****************************************************************************/
40
41static struct sdhci_ops sdhci_pltfm_ops = {
42};
43
44/*****************************************************************************\
45 * *
46 * Device probing/removal *
47 * *
48\*****************************************************************************/
49
50static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
51{
52 struct sdhci_host *host;
53 struct resource *iomem;
54 int ret;
55
56 BUG_ON(pdev == NULL);
57
58 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59 if (!iomem) {
60 ret = -ENOMEM;
61 goto err;
62 }
63
64 if (resource_size(iomem) != 0x100)
65 dev_err(&pdev->dev, "Invalid iomem size. You may "
66 "experience problems.\n");
67
68 if (pdev->dev.parent)
69 host = sdhci_alloc_host(pdev->dev.parent, 0);
70 else
71 host = sdhci_alloc_host(&pdev->dev, 0);
72
73 if (IS_ERR(host)) {
74 ret = PTR_ERR(host);
75 goto err;
76 }
77
78 host->hw_name = "platform";
79 host->ops = &sdhci_pltfm_ops;
80 host->irq = platform_get_irq(pdev, 0);
81
82 if (!request_mem_region(iomem->start, resource_size(iomem),
83 mmc_hostname(host->mmc))) {
84 dev_err(&pdev->dev, "cannot request region\n");
85 ret = -EBUSY;
86 goto err_request;
87 }
88
89 host->ioaddr = ioremap(iomem->start, resource_size(iomem));
90 if (!host->ioaddr) {
91 dev_err(&pdev->dev, "failed to remap registers\n");
92 ret = -ENOMEM;
93 goto err_remap;
94 }
95
96 ret = sdhci_add_host(host);
97 if (ret)
98 goto err_add_host;
99
100 platform_set_drvdata(pdev, host);
101
102 return 0;
103
104err_add_host:
105 iounmap(host->ioaddr);
106err_remap:
107 release_mem_region(iomem->start, resource_size(iomem));
108err_request:
109 sdhci_free_host(host);
110err:
111 printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret);
112 return ret;
113}
114
115static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
116{
117 struct sdhci_host *host = platform_get_drvdata(pdev);
118 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
119 int dead;
120 u32 scratch;
121
122 dead = 0;
123 scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
124 if (scratch == (u32)-1)
125 dead = 1;
126
127 sdhci_remove_host(host, dead);
128 iounmap(host->ioaddr);
129 release_mem_region(iomem->start, resource_size(iomem));
130 sdhci_free_host(host);
131 platform_set_drvdata(pdev, NULL);
132
133 return 0;
134}
135
136static struct platform_driver sdhci_pltfm_driver = {
137 .driver = {
138 .name = "sdhci",
139 .owner = THIS_MODULE,
140 },
141 .probe = sdhci_pltfm_probe,
142 .remove = __devexit_p(sdhci_pltfm_remove),
143};
144
145/*****************************************************************************\
146 * *
147 * Driver init/exit *
148 * *
149\*****************************************************************************/
150
151static int __init sdhci_drv_init(void)
152{
153 return platform_driver_register(&sdhci_pltfm_driver);
154}
155
156static void __exit sdhci_drv_exit(void)
157{
158 platform_driver_unregister(&sdhci_pltfm_driver);
159}
160
161module_init(sdhci_drv_init);
162module_exit(sdhci_drv_exit);
163
164MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
165MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
166MODULE_LICENSE("GPL v2");
167MODULE_ALIAS("platform:sdhci");
168
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9234be2226e..35789c6edc1 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -78,6 +78,11 @@ static void sdhci_dumpregs(struct sdhci_host *host)
78 sdhci_readl(host, SDHCI_CAPABILITIES), 78 sdhci_readl(host, SDHCI_CAPABILITIES),
79 sdhci_readl(host, SDHCI_MAX_CURRENT)); 79 sdhci_readl(host, SDHCI_MAX_CURRENT));
80 80
81 if (host->flags & SDHCI_USE_ADMA)
82 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
83 readl(host->ioaddr + SDHCI_ADMA_ERROR),
84 readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
85
81 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n"); 86 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
82} 87}
83 88
@@ -1005,12 +1010,34 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1005{ 1010{
1006 u8 pwr; 1011 u8 pwr;
1007 1012
1008 if (host->power == power) 1013 if (power == (unsigned short)-1)
1014 pwr = 0;
1015 else {
1016 switch (1 << power) {
1017 case MMC_VDD_165_195:
1018 pwr = SDHCI_POWER_180;
1019 break;
1020 case MMC_VDD_29_30:
1021 case MMC_VDD_30_31:
1022 pwr = SDHCI_POWER_300;
1023 break;
1024 case MMC_VDD_32_33:
1025 case MMC_VDD_33_34:
1026 pwr = SDHCI_POWER_330;
1027 break;
1028 default:
1029 BUG();
1030 }
1031 }
1032
1033 if (host->pwr == pwr)
1009 return; 1034 return;
1010 1035
1011 if (power == (unsigned short)-1) { 1036 host->pwr = pwr;
1037
1038 if (pwr == 0) {
1012 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1039 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1013 goto out; 1040 return;
1014 } 1041 }
1015 1042
1016 /* 1043 /*
@@ -1020,35 +1047,16 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1020 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 1047 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1021 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1048 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1022 1049
1023 pwr = SDHCI_POWER_ON;
1024
1025 switch (1 << power) {
1026 case MMC_VDD_165_195:
1027 pwr |= SDHCI_POWER_180;
1028 break;
1029 case MMC_VDD_29_30:
1030 case MMC_VDD_30_31:
1031 pwr |= SDHCI_POWER_300;
1032 break;
1033 case MMC_VDD_32_33:
1034 case MMC_VDD_33_34:
1035 pwr |= SDHCI_POWER_330;
1036 break;
1037 default:
1038 BUG();
1039 }
1040
1041 /* 1050 /*
1042 * At least the Marvell CaFe chip gets confused if we set the voltage 1051 * At least the Marvell CaFe chip gets confused if we set the voltage
1043 * and set turn on power at the same time, so set the voltage first. 1052 * and set turn on power at the same time, so set the voltage first.
1044 */ 1053 */
1045 if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) 1054 if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
1046 sdhci_writeb(host, pwr & ~SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1055 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1047 1056
1048 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 1057 pwr |= SDHCI_POWER_ON;
1049 1058
1050out: 1059 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1051 host->power = power;
1052} 1060}
1053 1061
1054/*****************************************************************************\ 1062/*****************************************************************************\
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 65c6f996bbd..2de08349c3c 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -255,7 +255,7 @@ struct sdhci_host {
255 unsigned int timeout_clk; /* Timeout freq (KHz) */ 255 unsigned int timeout_clk; /* Timeout freq (KHz) */
256 256
257 unsigned int clock; /* Current clock (MHz) */ 257 unsigned int clock; /* Current clock (MHz) */
258 unsigned short power; /* Current voltage */ 258 u8 pwr; /* Current voltage */
259 259
260 struct mmc_request *mrq; /* Current request */ 260 struct mmc_request *mrq; /* Current request */
261 struct mmc_command *cmd; /* Current command */ 261 struct mmc_command *cmd; /* Current command */
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 63fbd5b7d31..91991b460c4 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -10,7 +10,7 @@
10 * 10 *
11 * Driver for the MMC / SD / SDIO cell found in: 11 * Driver for the MMC / SD / SDIO cell found in:
12 * 12 *
13 * TC6393XB TC6391XB TC6387XB T7L66XB 13 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
14 * 14 *
15 * This driver draws mainly on scattered spec sheets, Reverse engineering 15 * This driver draws mainly on scattered spec sheets, Reverse engineering
16 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit 16 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
@@ -35,69 +35,47 @@
35 35
36#include "tmio_mmc.h" 36#include "tmio_mmc.h"
37 37
38/*
39 * Fixme - documentation conflicts on what the clock values are for the
40 * various dividers.
41 * One document I have says that its a divisor of a 24MHz clock, another 33.
42 * This probably depends on HCLK for a given platform, so we may need to
43 * require HCLK be passed to us from the MFD core.
44 *
45 */
46
47static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) 38static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
48{ 39{
49 void __iomem *cnf = host->cnf;
50 void __iomem *ctl = host->ctl;
51 u32 clk = 0, clock; 40 u32 clk = 0, clock;
52 41
53 if (new_clock) { 42 if (new_clock) {
54 for (clock = 46875, clk = 0x100; new_clock >= (clock<<1); ) { 43 for (clock = host->mmc->f_min, clk = 0x80000080;
44 new_clock >= (clock<<1); clk >>= 1)
55 clock <<= 1; 45 clock <<= 1;
56 clk >>= 1;
57 }
58 if (clk & 0x1)
59 clk = 0x20000;
60
61 clk >>= 2;
62 tmio_iowrite8((clk & 0x8000) ? 0 : 1, cnf + CNF_SD_CLK_MODE);
63 clk |= 0x100; 46 clk |= 0x100;
64 } 47 }
65 48
66 tmio_iowrite16(clk, ctl + CTL_SD_CARD_CLK_CTL); 49 sd_config_write8(host, CNF_SD_CLK_MODE, clk >> 22);
50 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
67} 51}
68 52
69static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 53static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
70{ 54{
71 void __iomem *ctl = host->ctl; 55 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
72
73 tmio_iowrite16(0x0000, ctl + CTL_CLK_AND_WAIT_CTL);
74 msleep(10); 56 msleep(10);
75 tmio_iowrite16(tmio_ioread16(ctl + CTL_SD_CARD_CLK_CTL) & ~0x0100, 57 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
76 ctl + CTL_SD_CARD_CLK_CTL); 58 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
77 msleep(10); 59 msleep(10);
78} 60}
79 61
80static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 62static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
81{ 63{
82 void __iomem *ctl = host->ctl; 64 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
83 65 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
84 tmio_iowrite16(tmio_ioread16(ctl + CTL_SD_CARD_CLK_CTL) | 0x0100,
85 ctl + CTL_SD_CARD_CLK_CTL);
86 msleep(10); 66 msleep(10);
87 tmio_iowrite16(0x0100, ctl + CTL_CLK_AND_WAIT_CTL); 67 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
88 msleep(10); 68 msleep(10);
89} 69}
90 70
91static void reset(struct tmio_mmc_host *host) 71static void reset(struct tmio_mmc_host *host)
92{ 72{
93 void __iomem *ctl = host->ctl;
94
95 /* FIXME - should we set stop clock reg here */ 73 /* FIXME - should we set stop clock reg here */
96 tmio_iowrite16(0x0000, ctl + CTL_RESET_SD); 74 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
97 tmio_iowrite16(0x0000, ctl + CTL_RESET_SDIO); 75 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
98 msleep(10); 76 msleep(10);
99 tmio_iowrite16(0x0001, ctl + CTL_RESET_SD); 77 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
100 tmio_iowrite16(0x0001, ctl + CTL_RESET_SDIO); 78 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
101 msleep(10); 79 msleep(10);
102} 80}
103 81
@@ -129,13 +107,12 @@ tmio_mmc_finish_request(struct tmio_mmc_host *host)
129static int 107static int
130tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) 108tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
131{ 109{
132 void __iomem *ctl = host->ctl;
133 struct mmc_data *data = host->data; 110 struct mmc_data *data = host->data;
134 int c = cmd->opcode; 111 int c = cmd->opcode;
135 112
136 /* Command 12 is handled by hardware */ 113 /* Command 12 is handled by hardware */
137 if (cmd->opcode == 12 && !cmd->arg) { 114 if (cmd->opcode == 12 && !cmd->arg) {
138 tmio_iowrite16(0x001, ctl + CTL_STOP_INTERNAL_ACTION); 115 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
139 return 0; 116 return 0;
140 } 117 }
141 118
@@ -160,18 +137,18 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
160 if (data) { 137 if (data) {
161 c |= DATA_PRESENT; 138 c |= DATA_PRESENT;
162 if (data->blocks > 1) { 139 if (data->blocks > 1) {
163 tmio_iowrite16(0x100, ctl + CTL_STOP_INTERNAL_ACTION); 140 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
164 c |= TRANSFER_MULTI; 141 c |= TRANSFER_MULTI;
165 } 142 }
166 if (data->flags & MMC_DATA_READ) 143 if (data->flags & MMC_DATA_READ)
167 c |= TRANSFER_READ; 144 c |= TRANSFER_READ;
168 } 145 }
169 146
170 enable_mmc_irqs(ctl, TMIO_MASK_CMD); 147 enable_mmc_irqs(host, TMIO_MASK_CMD);
171 148
172 /* Fire off the command */ 149 /* Fire off the command */
173 tmio_iowrite32(cmd->arg, ctl + CTL_ARG_REG); 150 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
174 tmio_iowrite16(c, ctl + CTL_SD_CMD); 151 sd_ctrl_write16(host, CTL_SD_CMD, c);
175 152
176 return 0; 153 return 0;
177} 154}
@@ -183,7 +160,6 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
183 */ 160 */
184static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 161static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
185{ 162{
186 void __iomem *ctl = host->ctl;
187 struct mmc_data *data = host->data; 163 struct mmc_data *data = host->data;
188 unsigned short *buf; 164 unsigned short *buf;
189 unsigned int count; 165 unsigned int count;
@@ -206,9 +182,9 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
206 182
207 /* Transfer the data */ 183 /* Transfer the data */
208 if (data->flags & MMC_DATA_READ) 184 if (data->flags & MMC_DATA_READ)
209 tmio_ioread16_rep(ctl + CTL_SD_DATA_PORT, buf, count >> 1); 185 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
210 else 186 else
211 tmio_iowrite16_rep(ctl + CTL_SD_DATA_PORT, buf, count >> 1); 187 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
212 188
213 host->sg_off += count; 189 host->sg_off += count;
214 190
@@ -222,7 +198,6 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
222 198
223static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host) 199static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
224{ 200{
225 void __iomem *ctl = host->ctl;
226 struct mmc_data *data = host->data; 201 struct mmc_data *data = host->data;
227 struct mmc_command *stop; 202 struct mmc_command *stop;
228 203
@@ -251,13 +226,13 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
251 */ 226 */
252 227
253 if (data->flags & MMC_DATA_READ) 228 if (data->flags & MMC_DATA_READ)
254 disable_mmc_irqs(ctl, TMIO_MASK_READOP); 229 disable_mmc_irqs(host, TMIO_MASK_READOP);
255 else 230 else
256 disable_mmc_irqs(ctl, TMIO_MASK_WRITEOP); 231 disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
257 232
258 if (stop) { 233 if (stop) {
259 if (stop->opcode == 12 && !stop->arg) 234 if (stop->opcode == 12 && !stop->arg)
260 tmio_iowrite16(0x000, ctl + CTL_STOP_INTERNAL_ACTION); 235 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
261 else 236 else
262 BUG(); 237 BUG();
263 } 238 }
@@ -268,9 +243,8 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
268static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 243static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
269 unsigned int stat) 244 unsigned int stat)
270{ 245{
271 void __iomem *ctl = host->ctl, *addr;
272 struct mmc_command *cmd = host->cmd; 246 struct mmc_command *cmd = host->cmd;
273 int i; 247 int i, addr;
274 248
275 if (!host->cmd) { 249 if (!host->cmd) {
276 pr_debug("Spurious CMD irq\n"); 250 pr_debug("Spurious CMD irq\n");
@@ -284,8 +258,8 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
284 * modify the order of the response for short response command types. 258 * modify the order of the response for short response command types.
285 */ 259 */
286 260
287 for (i = 3, addr = ctl + CTL_RESPONSE ; i >= 0 ; i--, addr += 4) 261 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
288 cmd->resp[i] = tmio_ioread32(addr); 262 cmd->resp[i] = sd_ctrl_read32(host, addr);
289 263
290 if (cmd->flags & MMC_RSP_136) { 264 if (cmd->flags & MMC_RSP_136) {
291 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); 265 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
@@ -307,9 +281,9 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
307 */ 281 */
308 if (host->data && !cmd->error) { 282 if (host->data && !cmd->error) {
309 if (host->data->flags & MMC_DATA_READ) 283 if (host->data->flags & MMC_DATA_READ)
310 enable_mmc_irqs(ctl, TMIO_MASK_READOP); 284 enable_mmc_irqs(host, TMIO_MASK_READOP);
311 else 285 else
312 enable_mmc_irqs(ctl, TMIO_MASK_WRITEOP); 286 enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
313 } else { 287 } else {
314 tmio_mmc_finish_request(host); 288 tmio_mmc_finish_request(host);
315 } 289 }
@@ -321,20 +295,19 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
321static irqreturn_t tmio_mmc_irq(int irq, void *devid) 295static irqreturn_t tmio_mmc_irq(int irq, void *devid)
322{ 296{
323 struct tmio_mmc_host *host = devid; 297 struct tmio_mmc_host *host = devid;
324 void __iomem *ctl = host->ctl;
325 unsigned int ireg, irq_mask, status; 298 unsigned int ireg, irq_mask, status;
326 299
327 pr_debug("MMC IRQ begin\n"); 300 pr_debug("MMC IRQ begin\n");
328 301
329 status = tmio_ioread32(ctl + CTL_STATUS); 302 status = sd_ctrl_read32(host, CTL_STATUS);
330 irq_mask = tmio_ioread32(ctl + CTL_IRQ_MASK); 303 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
331 ireg = status & TMIO_MASK_IRQ & ~irq_mask; 304 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
332 305
333 pr_debug_status(status); 306 pr_debug_status(status);
334 pr_debug_status(ireg); 307 pr_debug_status(ireg);
335 308
336 if (!ireg) { 309 if (!ireg) {
337 disable_mmc_irqs(ctl, status & ~irq_mask); 310 disable_mmc_irqs(host, status & ~irq_mask);
338 311
339 pr_debug("tmio_mmc: Spurious irq, disabling! " 312 pr_debug("tmio_mmc: Spurious irq, disabling! "
340 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 313 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
@@ -346,7 +319,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
346 while (ireg) { 319 while (ireg) {
347 /* Card insert / remove attempts */ 320 /* Card insert / remove attempts */
348 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { 321 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
349 ack_mmc_irqs(ctl, TMIO_STAT_CARD_INSERT | 322 ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
350 TMIO_STAT_CARD_REMOVE); 323 TMIO_STAT_CARD_REMOVE);
351 mmc_detect_change(host->mmc, 0); 324 mmc_detect_change(host->mmc, 0);
352 } 325 }
@@ -358,25 +331,25 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
358 331
359 /* Command completion */ 332 /* Command completion */
360 if (ireg & TMIO_MASK_CMD) { 333 if (ireg & TMIO_MASK_CMD) {
361 ack_mmc_irqs(ctl, TMIO_MASK_CMD); 334 ack_mmc_irqs(host, TMIO_MASK_CMD);
362 tmio_mmc_cmd_irq(host, status); 335 tmio_mmc_cmd_irq(host, status);
363 } 336 }
364 337
365 /* Data transfer */ 338 /* Data transfer */
366 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { 339 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
367 ack_mmc_irqs(ctl, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); 340 ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
368 tmio_mmc_pio_irq(host); 341 tmio_mmc_pio_irq(host);
369 } 342 }
370 343
371 /* Data transfer completion */ 344 /* Data transfer completion */
372 if (ireg & TMIO_STAT_DATAEND) { 345 if (ireg & TMIO_STAT_DATAEND) {
373 ack_mmc_irqs(ctl, TMIO_STAT_DATAEND); 346 ack_mmc_irqs(host, TMIO_STAT_DATAEND);
374 tmio_mmc_data_irq(host); 347 tmio_mmc_data_irq(host);
375 } 348 }
376 349
377 /* Check status - keep going until we've handled it all */ 350 /* Check status - keep going until we've handled it all */
378 status = tmio_ioread32(ctl + CTL_STATUS); 351 status = sd_ctrl_read32(host, CTL_STATUS);
379 irq_mask = tmio_ioread32(ctl + CTL_IRQ_MASK); 352 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
380 ireg = status & TMIO_MASK_IRQ & ~irq_mask; 353 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
381 354
382 pr_debug("Status at end of loop: %08x\n", status); 355 pr_debug("Status at end of loop: %08x\n", status);
@@ -391,8 +364,6 @@ out:
391static int tmio_mmc_start_data(struct tmio_mmc_host *host, 364static int tmio_mmc_start_data(struct tmio_mmc_host *host,
392 struct mmc_data *data) 365 struct mmc_data *data)
393{ 366{
394 void __iomem *ctl = host->ctl;
395
396 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 367 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
397 data->blksz, data->blocks); 368 data->blksz, data->blocks);
398 369
@@ -407,8 +378,8 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
407 host->data = data; 378 host->data = data;
408 379
409 /* Set transfer length / blocksize */ 380 /* Set transfer length / blocksize */
410 tmio_iowrite16(data->blksz, ctl + CTL_SD_XFER_LEN); 381 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
411 tmio_iowrite16(data->blocks, ctl + CTL_XFER_BLK_COUNT); 382 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
412 383
413 return 0; 384 return 0;
414} 385}
@@ -449,8 +420,6 @@ fail:
449static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 420static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
450{ 421{
451 struct tmio_mmc_host *host = mmc_priv(mmc); 422 struct tmio_mmc_host *host = mmc_priv(mmc);
452 void __iomem *cnf = host->cnf;
453 void __iomem *ctl = host->ctl;
454 423
455 if (ios->clock) 424 if (ios->clock)
456 tmio_mmc_set_clock(host, ios->clock); 425 tmio_mmc_set_clock(host, ios->clock);
@@ -458,12 +427,12 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
458 /* Power sequence - OFF -> ON -> UP */ 427 /* Power sequence - OFF -> ON -> UP */
459 switch (ios->power_mode) { 428 switch (ios->power_mode) {
460 case MMC_POWER_OFF: /* power down SD bus */ 429 case MMC_POWER_OFF: /* power down SD bus */
461 tmio_iowrite8(0x00, cnf + CNF_PWR_CTL_2); 430 sd_config_write8(host, CNF_PWR_CTL_2, 0x00);
462 tmio_mmc_clk_stop(host); 431 tmio_mmc_clk_stop(host);
463 break; 432 break;
464 case MMC_POWER_ON: /* power up SD bus */ 433 case MMC_POWER_ON: /* power up SD bus */
465 434
466 tmio_iowrite8(0x02, cnf + CNF_PWR_CTL_2); 435 sd_config_write8(host, CNF_PWR_CTL_2, 0x02);
467 break; 436 break;
468 case MMC_POWER_UP: /* start bus clock */ 437 case MMC_POWER_UP: /* start bus clock */
469 tmio_mmc_clk_start(host); 438 tmio_mmc_clk_start(host);
@@ -472,10 +441,10 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
472 441
473 switch (ios->bus_width) { 442 switch (ios->bus_width) {
474 case MMC_BUS_WIDTH_1: 443 case MMC_BUS_WIDTH_1:
475 tmio_iowrite16(0x80e0, ctl + CTL_SD_MEM_CARD_OPT); 444 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
476 break; 445 break;
477 case MMC_BUS_WIDTH_4: 446 case MMC_BUS_WIDTH_4:
478 tmio_iowrite16(0x00e0, ctl + CTL_SD_MEM_CARD_OPT); 447 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
479 break; 448 break;
480 } 449 }
481 450
@@ -486,9 +455,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
486static int tmio_mmc_get_ro(struct mmc_host *mmc) 455static int tmio_mmc_get_ro(struct mmc_host *mmc)
487{ 456{
488 struct tmio_mmc_host *host = mmc_priv(mmc); 457 struct tmio_mmc_host *host = mmc_priv(mmc);
489 void __iomem *ctl = host->ctl;
490 458
491 return (tmio_ioread16(ctl + CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1; 459 return (sd_ctrl_read16(host, CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1;
492} 460}
493 461
494static struct mmc_host_ops tmio_mmc_ops = { 462static struct mmc_host_ops tmio_mmc_ops = {
@@ -518,13 +486,8 @@ static int tmio_mmc_resume(struct platform_device *dev)
518 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 486 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
519 struct mmc_host *mmc = platform_get_drvdata(dev); 487 struct mmc_host *mmc = platform_get_drvdata(dev);
520 struct tmio_mmc_host *host = mmc_priv(mmc); 488 struct tmio_mmc_host *host = mmc_priv(mmc);
521 void __iomem *cnf = host->cnf;
522 int ret = 0; 489 int ret = 0;
523 490
524 /* Enable the MMC/SD Control registers */
525 tmio_iowrite16(SDCREN, cnf + CNF_CMD);
526 tmio_iowrite32(dev->resource[0].start & 0xfffe, cnf + CNF_CTL_BASE);
527
528 /* Tell the MFD core we are ready to be enabled */ 491 /* Tell the MFD core we are ready to be enabled */
529 if (cell->enable) { 492 if (cell->enable) {
530 ret = cell->enable(dev); 493 ret = cell->enable(dev);
@@ -532,6 +495,11 @@ static int tmio_mmc_resume(struct platform_device *dev)
532 goto out; 495 goto out;
533 } 496 }
534 497
498 /* Enable the MMC/SD Control registers */
499 sd_config_write16(host, CNF_CMD, SDCREN);
500 sd_config_write32(host, CNF_CTL_BASE,
501 (dev->resource[0].start >> host->bus_shift) & 0xfffe);
502
535 mmc_resume_host(mmc); 503 mmc_resume_host(mmc);
536 504
537out: 505out:
@@ -545,20 +513,25 @@ out:
545static int __devinit tmio_mmc_probe(struct platform_device *dev) 513static int __devinit tmio_mmc_probe(struct platform_device *dev)
546{ 514{
547 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 515 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
516 struct tmio_mmc_data *pdata;
548 struct resource *res_ctl, *res_cnf; 517 struct resource *res_ctl, *res_cnf;
549 struct tmio_mmc_host *host; 518 struct tmio_mmc_host *host;
550 struct mmc_host *mmc; 519 struct mmc_host *mmc;
551 int ret = -ENOMEM; 520 int ret = -EINVAL;
552 521
553 if (dev->num_resources != 3) 522 if (dev->num_resources != 3)
554 goto out; 523 goto out;
555 524
556 res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); 525 res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0);
557 res_cnf = platform_get_resource(dev, IORESOURCE_MEM, 1); 526 res_cnf = platform_get_resource(dev, IORESOURCE_MEM, 1);
558 if (!res_ctl || !res_cnf) { 527 if (!res_ctl || !res_cnf)
559 ret = -EINVAL;
560 goto out; 528 goto out;
561 } 529
530 pdata = cell->driver_data;
531 if (!pdata || !pdata->hclk)
532 goto out;
533
534 ret = -ENOMEM;
562 535
563 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); 536 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev);
564 if (!mmc) 537 if (!mmc)
@@ -568,6 +541,9 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
568 host->mmc = mmc; 541 host->mmc = mmc;
569 platform_set_drvdata(dev, mmc); 542 platform_set_drvdata(dev, mmc);
570 543
544 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
545 host->bus_shift = resource_size(res_ctl) >> 10;
546
571 host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); 547 host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
572 if (!host->ctl) 548 if (!host->ctl)
573 goto host_free; 549 goto host_free;
@@ -578,15 +554,10 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
578 554
579 mmc->ops = &tmio_mmc_ops; 555 mmc->ops = &tmio_mmc_ops;
580 mmc->caps = MMC_CAP_4_BIT_DATA; 556 mmc->caps = MMC_CAP_4_BIT_DATA;
581 mmc->f_min = 46875; /* 24000000 / 512 */ 557 mmc->f_max = pdata->hclk;
582 mmc->f_max = 24000000; 558 mmc->f_min = mmc->f_max / 512;
583 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 559 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
584 560
585 /* Enable the MMC/SD Control registers */
586 tmio_iowrite16(SDCREN, host->cnf + CNF_CMD);
587 tmio_iowrite32(dev->resource[0].start & 0xfffe,
588 host->cnf + CNF_CTL_BASE);
589
590 /* Tell the MFD core we are ready to be enabled */ 561 /* Tell the MFD core we are ready to be enabled */
591 if (cell->enable) { 562 if (cell->enable) {
592 ret = cell->enable(dev); 563 ret = cell->enable(dev);
@@ -594,14 +565,19 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
594 goto unmap_cnf; 565 goto unmap_cnf;
595 } 566 }
596 567
568 /* Enable the MMC/SD Control registers */
569 sd_config_write16(host, CNF_CMD, SDCREN);
570 sd_config_write32(host, CNF_CTL_BASE,
571 (dev->resource[0].start >> host->bus_shift) & 0xfffe);
572
597 /* Disable SD power during suspend */ 573 /* Disable SD power during suspend */
598 tmio_iowrite8(0x01, host->cnf + CNF_PWR_CTL_3); 574 sd_config_write8(host, CNF_PWR_CTL_3, 0x01);
599 575
600 /* The below is required but why? FIXME */ 576 /* The below is required but why? FIXME */
601 tmio_iowrite8(0x1f, host->cnf + CNF_STOP_CLK_CTL); 577 sd_config_write8(host, CNF_STOP_CLK_CTL, 0x1f);
602 578
603 /* Power down SD bus*/ 579 /* Power down SD bus*/
604 tmio_iowrite8(0x0, host->cnf + CNF_PWR_CTL_2); 580 sd_config_write8(host, CNF_PWR_CTL_2, 0x00);
605 581
606 tmio_mmc_clk_stop(host); 582 tmio_mmc_clk_stop(host);
607 reset(host); 583 reset(host);
@@ -612,22 +588,20 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
612 else 588 else
613 goto unmap_cnf; 589 goto unmap_cnf;
614 590
615 disable_mmc_irqs(host->ctl, TMIO_MASK_ALL); 591 disable_mmc_irqs(host, TMIO_MASK_ALL);
616 592
617 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED, "tmio-mmc", 593 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
618 host); 594 IRQF_TRIGGER_FALLING, "tmio-mmc", host);
619 if (ret) 595 if (ret)
620 goto unmap_cnf; 596 goto unmap_cnf;
621 597
622 set_irq_type(host->irq, IRQ_TYPE_EDGE_FALLING);
623
624 mmc_add_host(mmc); 598 mmc_add_host(mmc);
625 599
626 printk(KERN_INFO "%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 600 printk(KERN_INFO "%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
627 (unsigned long)host->ctl, host->irq); 601 (unsigned long)host->ctl, host->irq);
628 602
629 /* Unmask the IRQs we want to know about */ 603 /* Unmask the IRQs we want to know about */
630 enable_mmc_irqs(host->ctl, TMIO_MASK_IRQ); 604 enable_mmc_irqs(host, TMIO_MASK_IRQ);
631 605
632 return 0; 606 return 0;
633 607
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 9c831ab2ece..9fa99859497 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -83,34 +83,36 @@
83 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT) 83 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
84#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) 84#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
85 85
86#define enable_mmc_irqs(ctl, i) \ 86
87#define enable_mmc_irqs(host, i) \
87 do { \ 88 do { \
88 u32 mask;\ 89 u32 mask;\
89 mask = tmio_ioread32((ctl) + CTL_IRQ_MASK); \ 90 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
90 mask &= ~((i) & TMIO_MASK_IRQ); \ 91 mask &= ~((i) & TMIO_MASK_IRQ); \
91 tmio_iowrite32(mask, (ctl) + CTL_IRQ_MASK); \ 92 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
92 } while (0) 93 } while (0)
93 94
94#define disable_mmc_irqs(ctl, i) \ 95#define disable_mmc_irqs(host, i) \
95 do { \ 96 do { \
96 u32 mask;\ 97 u32 mask;\
97 mask = tmio_ioread32((ctl) + CTL_IRQ_MASK); \ 98 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
98 mask |= ((i) & TMIO_MASK_IRQ); \ 99 mask |= ((i) & TMIO_MASK_IRQ); \
99 tmio_iowrite32(mask, (ctl) + CTL_IRQ_MASK); \ 100 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
100 } while (0) 101 } while (0)
101 102
102#define ack_mmc_irqs(ctl, i) \ 103#define ack_mmc_irqs(host, i) \
103 do { \ 104 do { \
104 u32 mask;\ 105 u32 mask;\
105 mask = tmio_ioread32((ctl) + CTL_STATUS); \ 106 mask = sd_ctrl_read32((host), CTL_STATUS); \
106 mask &= ~((i) & TMIO_MASK_IRQ); \ 107 mask &= ~((i) & TMIO_MASK_IRQ); \
107 tmio_iowrite32(mask, (ctl) + CTL_STATUS); \ 108 sd_ctrl_write32((host), CTL_STATUS, mask); \
108 } while (0) 109 } while (0)
109 110
110 111
111struct tmio_mmc_host { 112struct tmio_mmc_host {
112 void __iomem *cnf; 113 void __iomem *cnf;
113 void __iomem *ctl; 114 void __iomem *ctl;
115 unsigned long bus_shift;
114 struct mmc_command *cmd; 116 struct mmc_command *cmd;
115 struct mmc_request *mrq; 117 struct mmc_request *mrq;
116 struct mmc_data *data; 118 struct mmc_data *data;
@@ -123,6 +125,63 @@ struct tmio_mmc_host {
123 unsigned int sg_off; 125 unsigned int sg_off;
124}; 126};
125 127
128#include <linux/io.h>
129
130static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
131{
132 return readw(host->ctl + (addr << host->bus_shift));
133}
134
135static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
136 u16 *buf, int count)
137{
138 readsw(host->ctl + (addr << host->bus_shift), buf, count);
139}
140
141static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
142{
143 return readw(host->ctl + (addr << host->bus_shift)) |
144 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
145}
146
147static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
148 u16 val)
149{
150 writew(val, host->ctl + (addr << host->bus_shift));
151}
152
153static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
154 u16 *buf, int count)
155{
156 writesw(host->ctl + (addr << host->bus_shift), buf, count);
157}
158
159static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
160 u32 val)
161{
162 writew(val, host->ctl + (addr << host->bus_shift));
163 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
164}
165
166static inline void sd_config_write8(struct tmio_mmc_host *host, int addr,
167 u8 val)
168{
169 writeb(val, host->cnf + (addr << host->bus_shift));
170}
171
172static inline void sd_config_write16(struct tmio_mmc_host *host, int addr,
173 u16 val)
174{
175 writew(val, host->cnf + (addr << host->bus_shift));
176}
177
178static inline void sd_config_write32(struct tmio_mmc_host *host, int addr,
179 u32 val)
180{
181 writew(val, host->cnf + (addr << host->bus_shift));
182 writew(val >> 16, host->cnf + ((addr + 2) << host->bus_shift));
183}
184
126#include <linux/scatterlist.h> 185#include <linux/scatterlist.h>
127#include <linux/blkdev.h> 186#include <linux/blkdev.h>
128 187
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 7d04fb9ddca..b8e35a0b4d7 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -154,7 +154,8 @@ config MTD_AFS_PARTS
154 154
155 You will still need the parsing functions to be called by the driver 155 You will still need the parsing functions to be called by the driver
156 for your particular device. It won't happen automatically. The 156 for your particular device. It won't happen automatically. The
157 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example. 157 'armflash' map driver (CONFIG_MTD_ARM_INTEGRATOR) does this, for
158 example.
158 159
159config MTD_OF_PARTS 160config MTD_OF_PARTS
160 tristate "Flash partition map based on OF description" 161 tristate "Flash partition map based on OF description"
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 6fde0a2e356..325fab92a62 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -49,7 +49,7 @@ config MTD_MS02NV
49 If you want to compile this driver as a module ( = code which can be 49 If you want to compile this driver as a module ( = code which can be
50 inserted in and removed from the running kernel whenever you want), 50 inserted in and removed from the running kernel whenever you want),
51 say M here and read <file:Documentation/kbuild/modules.txt>. 51 say M here and read <file:Documentation/kbuild/modules.txt>.
52 The module will be called ms02-nv.ko. 52 The module will be called ms02-nv.
53 53
54config MTD_DATAFLASH 54config MTD_DATAFLASH
55 tristate "Support for AT45xxx DataFlash" 55 tristate "Support for AT45xxx DataFlash"
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 62dee54af0a..43976aa4dbb 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -178,7 +178,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
178 /* Calculate flash page address; use block erase (for speed) if 178 /* Calculate flash page address; use block erase (for speed) if
179 * we're at a block boundary and need to erase the whole block. 179 * we're at a block boundary and need to erase the whole block.
180 */ 180 */
181 pageaddr = div_u64(instr->len, priv->page_size); 181 pageaddr = div_u64(instr->addr, priv->page_size);
182 do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize; 182 do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
183 pageaddr = pageaddr << priv->page_offset; 183 pageaddr = pageaddr << priv->page_offset;
184 184
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a49a9c8f2cb..aaac3b6800b 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -47,40 +47,41 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
47 unsigned long block, nsect; 47 unsigned long block, nsect;
48 char *buf; 48 char *buf;
49 49
50 block = req->sector << 9 >> tr->blkshift; 50 block = blk_rq_pos(req) << 9 >> tr->blkshift;
51 nsect = req->current_nr_sectors << 9 >> tr->blkshift; 51 nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
52 52
53 buf = req->buffer; 53 buf = req->buffer;
54 54
55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
56 req->cmd[0] == REQ_LB_OP_DISCARD) 56 req->cmd[0] == REQ_LB_OP_DISCARD)
57 return !tr->discard(dev, block, nsect); 57 return tr->discard(dev, block, nsect);
58 58
59 if (!blk_fs_request(req)) 59 if (!blk_fs_request(req))
60 return 0; 60 return -EIO;
61 61
62 if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) 62 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
63 return 0; 63 get_capacity(req->rq_disk))
64 return -EIO;
64 65
65 switch(rq_data_dir(req)) { 66 switch(rq_data_dir(req)) {
66 case READ: 67 case READ:
67 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 68 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
68 if (tr->readsect(dev, block, buf)) 69 if (tr->readsect(dev, block, buf))
69 return 0; 70 return -EIO;
70 return 1; 71 return 0;
71 72
72 case WRITE: 73 case WRITE:
73 if (!tr->writesect) 74 if (!tr->writesect)
74 return 0; 75 return -EIO;
75 76
76 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 77 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
77 if (tr->writesect(dev, block, buf)) 78 if (tr->writesect(dev, block, buf))
78 return 0; 79 return -EIO;
79 return 1; 80 return 0;
80 81
81 default: 82 default:
82 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 83 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
83 return 0; 84 return -EIO;
84 } 85 }
85} 86}
86 87
@@ -88,19 +89,18 @@ static int mtd_blktrans_thread(void *arg)
88{ 89{
89 struct mtd_blktrans_ops *tr = arg; 90 struct mtd_blktrans_ops *tr = arg;
90 struct request_queue *rq = tr->blkcore_priv->rq; 91 struct request_queue *rq = tr->blkcore_priv->rq;
92 struct request *req = NULL;
91 93
92 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 94 /* we might get involved when memory gets low, so use PF_MEMALLOC */
93 current->flags |= PF_MEMALLOC; 95 current->flags |= PF_MEMALLOC;
94 96
95 spin_lock_irq(rq->queue_lock); 97 spin_lock_irq(rq->queue_lock);
98
96 while (!kthread_should_stop()) { 99 while (!kthread_should_stop()) {
97 struct request *req;
98 struct mtd_blktrans_dev *dev; 100 struct mtd_blktrans_dev *dev;
99 int res = 0; 101 int res;
100
101 req = elv_next_request(rq);
102 102
103 if (!req) { 103 if (!req && !(req = blk_fetch_request(rq))) {
104 set_current_state(TASK_INTERRUPTIBLE); 104 set_current_state(TASK_INTERRUPTIBLE);
105 spin_unlock_irq(rq->queue_lock); 105 spin_unlock_irq(rq->queue_lock);
106 schedule(); 106 schedule();
@@ -119,8 +119,13 @@ static int mtd_blktrans_thread(void *arg)
119 119
120 spin_lock_irq(rq->queue_lock); 120 spin_lock_irq(rq->queue_lock);
121 121
122 end_request(req, res); 122 if (!__blk_end_request_cur(req, res))
123 req = NULL;
123 } 124 }
125
126 if (req)
127 __blk_end_request_all(req, -EIO);
128
124 spin_unlock_irq(rq->queue_lock); 129 spin_unlock_irq(rq->queue_lock);
125 130
126 return 0; 131 return 0;
@@ -373,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
373 } 378 }
374 379
375 tr->blkcore_priv->rq->queuedata = tr; 380 tr->blkcore_priv->rq->queuedata = tr;
376 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 381 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
377 if (tr->discard) 382 if (tr->discard)
378 blk_queue_set_discard(tr->blkcore_priv->rq, 383 blk_queue_set_discard(tr->blkcore_priv->rq,
379 blktrans_discard_request); 384 blktrans_discard_request);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 890936d0275..f3276897859 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -260,7 +260,7 @@ config MTD_NAND_BASLER_EXCITE
260 help 260 help
261 This enables the driver for the NAND flash device found on the 261 This enables the driver for the NAND flash device found on the
262 Basler eXcite Smart Camera. If built as a module, the driver 262 Basler eXcite Smart Camera. If built as a module, the driver
263 will be named "excite_nandflash.ko". 263 will be named excite_nandflash.
264 264
265config MTD_NAND_CAFE 265config MTD_NAND_CAFE
266 tristate "NAND support for OLPC CAFÉ chip" 266 tristate "NAND support for OLPC CAFÉ chip"
@@ -282,7 +282,7 @@ config MTD_NAND_CS553X
282 controller is enabled for NAND, and currently requires that 282 controller is enabled for NAND, and currently requires that
283 the controller be in MMIO mode. 283 the controller be in MMIO mode.
284 284
285 If you say "m", the module will be called "cs553x_nand.ko". 285 If you say "m", the module will be called cs553x_nand.
286 286
287config MTD_NAND_ATMEL 287config MTD_NAND_ATMEL
288 tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32" 288 tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32"
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 0119220de7d..02700f769b8 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -407,16 +407,17 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
407 } 407 }
408 info->chip.ecc.mode = ecc_mode; 408 info->chip.ecc.mode = ecc_mode;
409 409
410 info->clk = clk_get(&pdev->dev, "AEMIFCLK"); 410 info->clk = clk_get(&pdev->dev, "aemif");
411 if (IS_ERR(info->clk)) { 411 if (IS_ERR(info->clk)) {
412 ret = PTR_ERR(info->clk); 412 ret = PTR_ERR(info->clk);
413 dev_dbg(&pdev->dev, "unable to get AEMIFCLK, err %d\n", ret); 413 dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
414 goto err_clk; 414 goto err_clk;
415 } 415 }
416 416
417 ret = clk_enable(info->clk); 417 ret = clk_enable(info->clk);
418 if (ret < 0) { 418 if (ret < 0) {
419 dev_dbg(&pdev->dev, "unable to enable AEMIFCLK, err %d\n", ret); 419 dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
420 ret);
420 goto err_clk_enable; 421 goto err_clk_enable;
421 } 422 }
422 423
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index f3548d04801..40c26080ecd 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -831,6 +831,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
831 break; 831 break;
832 832
833 case NAND_CMD_READID: 833 case NAND_CMD_READID:
834 host->col_addr = 0;
834 send_read_id(host); 835 send_read_id(host);
835 break; 836 break;
836 837
@@ -867,6 +868,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
867 mtd->priv = this; 868 mtd->priv = this;
868 mtd->owner = THIS_MODULE; 869 mtd->owner = THIS_MODULE;
869 mtd->dev.parent = &pdev->dev; 870 mtd->dev.parent = &pdev->dev;
871 mtd->name = "mxc_nand";
870 872
871 /* 50 us command delay time */ 873 /* 50 us command delay time */
872 this->chip_delay = 5; 874 this->chip_delay = 5;
@@ -882,8 +884,10 @@ static int __init mxcnd_probe(struct platform_device *pdev)
882 this->verify_buf = mxc_nand_verify_buf; 884 this->verify_buf = mxc_nand_verify_buf;
883 885
884 host->clk = clk_get(&pdev->dev, "nfc"); 886 host->clk = clk_get(&pdev->dev, "nfc");
885 if (IS_ERR(host->clk)) 887 if (IS_ERR(host->clk)) {
888 err = PTR_ERR(host->clk);
886 goto eclk; 889 goto eclk;
890 }
887 891
888 clk_enable(host->clk); 892 clk_enable(host->clk);
889 host->clk_act = 1; 893 host->clk_act = 1;
@@ -896,7 +900,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
896 900
897 host->regs = ioremap(res->start, res->end - res->start + 1); 901 host->regs = ioremap(res->start, res->end - res->start + 1);
898 if (!host->regs) { 902 if (!host->regs) {
899 err = -EIO; 903 err = -ENOMEM;
900 goto eres; 904 goto eres;
901 } 905 }
902 906
@@ -1011,30 +1015,35 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
1011#ifdef CONFIG_PM 1015#ifdef CONFIG_PM
1012static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state) 1016static int mxcnd_suspend(struct platform_device *pdev, pm_message_t state)
1013{ 1017{
1014 struct mtd_info *info = platform_get_drvdata(pdev); 1018 struct mtd_info *mtd = platform_get_drvdata(pdev);
1019 struct nand_chip *nand_chip = mtd->priv;
1020 struct mxc_nand_host *host = nand_chip->priv;
1015 int ret = 0; 1021 int ret = 0;
1016 1022
1017 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n"); 1023 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n");
1018 if (info) 1024 if (mtd) {
1019 ret = info->suspend(info); 1025 ret = mtd->suspend(mtd);
1020 1026 /* Disable the NFC clock */
1021 /* Disable the NFC clock */ 1027 clk_disable(host->clk);
1022 clk_disable(nfc_clk); /* FIXME */ 1028 }
1023 1029
1024 return ret; 1030 return ret;
1025} 1031}
1026 1032
1027static int mxcnd_resume(struct platform_device *pdev) 1033static int mxcnd_resume(struct platform_device *pdev)
1028{ 1034{
1029 struct mtd_info *info = platform_get_drvdata(pdev); 1035 struct mtd_info *mtd = platform_get_drvdata(pdev);
1036 struct nand_chip *nand_chip = mtd->priv;
1037 struct mxc_nand_host *host = nand_chip->priv;
1030 int ret = 0; 1038 int ret = 0;
1031 1039
1032 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n"); 1040 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n");
1033 /* Enable the NFC clock */
1034 clk_enable(nfc_clk); /* FIXME */
1035 1041
1036 if (info) 1042 if (mtd) {
1037 info->resume(info); 1043 /* Enable the NFC clock */
1044 clk_enable(host->clk);
1045 mtd->resume(mtd);
1046 }
1038 1047
1039 return ret; 1048 return ret;
1040} 1049}
@@ -1055,13 +1064,7 @@ static struct platform_driver mxcnd_driver = {
1055 1064
1056static int __init mxc_nd_init(void) 1065static int __init mxc_nd_init(void)
1057{ 1066{
1058 /* Register the device driver structure. */ 1067 return platform_driver_probe(&mxcnd_driver, mxcnd_probe);
1059 pr_info("MXC MTD nand Driver\n");
1060 if (platform_driver_probe(&mxcnd_driver, mxcnd_probe) != 0) {
1061 printk(KERN_ERR "Driver register failed for mxcnd_driver\n");
1062 return -ENODEV;
1063 }
1064 return 0;
1065} 1068}
1066 1069
1067static void __exit mxc_nd_cleanup(void) 1070static void __exit mxc_nd_cleanup(void)
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index f2e9de1414d..6391e3dc800 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -39,7 +39,6 @@
39#include <mach/gpmc.h> 39#include <mach/gpmc.h>
40#include <mach/onenand.h> 40#include <mach/onenand.h>
41#include <mach/gpio.h> 41#include <mach/gpio.h>
42#include <mach/pm.h>
43 42
44#include <mach/dma.h> 43#include <mach/dma.h>
45 44
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3f739cfd92f..01f282cd098 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1886,7 +1886,7 @@ config FEC_MPC52xx
1886 ---help--- 1886 ---help---
1887 This option enables support for the MPC5200's on-chip 1887 This option enables support for the MPC5200's on-chip
1888 Fast Ethernet Controller 1888 Fast Ethernet Controller
1889 If compiled as module, it will be called 'fec_mpc52xx.ko'. 1889 If compiled as module, it will be called fec_mpc52xx.
1890 1890
1891config FEC_MPC52xx_MDIO 1891config FEC_MPC52xx_MDIO
1892 bool "MPC52xx FEC MDIO bus driver" 1892 bool "MPC52xx FEC MDIO bus driver"
@@ -1898,7 +1898,7 @@ config FEC_MPC52xx_MDIO
1898 (Motorola? industry standard). 1898 (Motorola? industry standard).
1899 If your board uses an external PHY connected to FEC, enable this. 1899 If your board uses an external PHY connected to FEC, enable this.
1900 If not sure, enable. 1900 If not sure, enable.
1901 If compiled as module, it will be called 'fec_mpc52xx_phy.ko'. 1901 If compiled as module, it will be called fec_mpc52xx_phy.
1902 1902
1903config NE_H8300 1903config NE_H8300
1904 tristate "NE2000 compatible support for H8/300" 1904 tristate "NE2000 compatible support for H8/300"
@@ -2270,6 +2270,17 @@ config BNX2
2270 To compile this driver as a module, choose M here: the module 2270 To compile this driver as a module, choose M here: the module
2271 will be called bnx2. This is recommended. 2271 will be called bnx2. This is recommended.
2272 2272
2273config CNIC
2274 tristate "Broadcom CNIC support"
2275 depends on BNX2
2276 depends on UIO
2277 help
2278 This driver supports offload features of Broadcom NetXtremeII
2279 gigabit Ethernet cards.
2280
2281 To compile this driver as a module, choose M here: the module
2282 will be called cnic. This is recommended.
2283
2273config SPIDER_NET 2284config SPIDER_NET
2274 tristate "Spider Gigabit Ethernet driver" 2285 tristate "Spider Gigabit Ethernet driver"
2275 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) 2286 depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1c378dd5933..d366fb2b40e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
75obj-$(CONFIG_FEALNX) += fealnx.o 75obj-$(CONFIG_FEALNX) += fealnx.o
76obj-$(CONFIG_TIGON3) += tg3.o 76obj-$(CONFIG_TIGON3) += tg3.o
77obj-$(CONFIG_BNX2) += bnx2.o 77obj-$(CONFIG_BNX2) += bnx2.o
78obj-$(CONFIG_CNIC) += cnic.o
78obj-$(CONFIG_BNX2X) += bnx2x.o 79obj-$(CONFIG_BNX2X) += bnx2x.o
79bnx2x-objs := bnx2x_main.o bnx2x_link.o 80bnx2x-objs := bnx2x_main.o bnx2x_link.o
80spidernet-y += spider_net.o spider_net_ethtool.o 81spidernet-y += spider_net.o spider_net_ethtool.o
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 78cc7146913..b642647170b 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1220,7 +1220,7 @@ static int __init ltpc_setup(char *str)
1220 if (ints[0] > 2) { 1220 if (ints[0] > 2) {
1221 dma = ints[3]; 1221 dma = ints[3];
1222 } 1222 }
1223 /* ignore any other paramters */ 1223 /* ignore any other parameters */
1224 } 1224 }
1225 return 1; 1225 return 1;
1226} 1226}
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 1fcf8388b1c..6f42ad72891 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -456,7 +456,8 @@ static inline void queue_put_desc(unsigned int queue, u32 phys,
456 debug_desc(phys, desc); 456 debug_desc(phys, desc);
457 BUG_ON(phys & 0x1F); 457 BUG_ON(phys & 0x1F);
458 qmgr_put_entry(queue, phys); 458 qmgr_put_entry(queue, phys);
459 BUG_ON(qmgr_stat_overflow(queue)); 459 /* Don't check for queue overflow here, we've allocated sufficient
460 length and queues >= 32 don't support this check anyway. */
460} 461}
461 462
462 463
@@ -512,8 +513,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
512#endif 513#endif
513 napi_complete(napi); 514 napi_complete(napi);
514 qmgr_enable_irq(rxq); 515 qmgr_enable_irq(rxq);
515 if (!qmgr_stat_empty(rxq) && 516 if (!qmgr_stat_below_low_watermark(rxq) &&
516 napi_reschedule(napi)) { 517 napi_reschedule(napi)) { /* not empty again */
517#if DEBUG_RX 518#if DEBUG_RX
518 printk(KERN_DEBUG "%s: eth_poll" 519 printk(KERN_DEBUG "%s: eth_poll"
519 " napi_reschedule successed\n", 520 " napi_reschedule successed\n",
@@ -630,9 +631,9 @@ static void eth_txdone_irq(void *unused)
630 port->tx_buff_tab[n_desc] = NULL; 631 port->tx_buff_tab[n_desc] = NULL;
631 } 632 }
632 633
633 start = qmgr_stat_empty(port->plat->txreadyq); 634 start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
634 queue_put_desc(port->plat->txreadyq, phys, desc); 635 queue_put_desc(port->plat->txreadyq, phys, desc);
635 if (start) { 636 if (start) { /* TX-ready queue was empty */
636#if DEBUG_TX 637#if DEBUG_TX
637 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", 638 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
638 port->netdev->name); 639 port->netdev->name);
@@ -708,13 +709,14 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
708 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); 709 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
709 dev->trans_start = jiffies; 710 dev->trans_start = jiffies;
710 711
711 if (qmgr_stat_empty(txreadyq)) { 712 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
712#if DEBUG_TX 713#if DEBUG_TX
713 printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); 714 printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
714#endif 715#endif
715 netif_stop_queue(dev); 716 netif_stop_queue(dev);
716 /* we could miss TX ready interrupt */ 717 /* we could miss TX ready interrupt */
717 if (!qmgr_stat_empty(txreadyq)) { 718 /* really empty in fact */
719 if (!qmgr_stat_below_low_watermark(txreadyq)) {
718#if DEBUG_TX 720#if DEBUG_TX
719 printk(KERN_DEBUG "%s: eth_xmit ready again\n", 721 printk(KERN_DEBUG "%s: eth_xmit ready again\n",
720 dev->name); 722 dev->name);
@@ -814,29 +816,29 @@ static int request_queues(struct port *port)
814 int err; 816 int err;
815 817
816 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, 818 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
817 "%s:RX-free", port->netdev->name); 819 "%s:RX-free", port->netdev->name);
818 if (err) 820 if (err)
819 return err; 821 return err;
820 822
821 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, 823 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
822 "%s:RX", port->netdev->name); 824 "%s:RX", port->netdev->name);
823 if (err) 825 if (err)
824 goto rel_rxfree; 826 goto rel_rxfree;
825 827
826 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, 828 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
827 "%s:TX", port->netdev->name); 829 "%s:TX", port->netdev->name);
828 if (err) 830 if (err)
829 goto rel_rx; 831 goto rel_rx;
830 832
831 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, 833 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
832 "%s:TX-ready", port->netdev->name); 834 "%s:TX-ready", port->netdev->name);
833 if (err) 835 if (err)
834 goto rel_tx; 836 goto rel_tx;
835 837
836 /* TX-done queue handles skbs sent out by the NPEs */ 838 /* TX-done queue handles skbs sent out by the NPEs */
837 if (!ports_open) { 839 if (!ports_open) {
838 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, 840 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
839 "%s:TX-done", DRV_NAME); 841 "%s:TX-done", DRV_NAME);
840 if (err) 842 if (err)
841 goto rel_txready; 843 goto rel_txready;
842 } 844 }
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 0443f6801f6..e1905a49279 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -97,7 +97,7 @@
97#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */ 97#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */
98#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ 98#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
99#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */ 99#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */
100#define DMARX_STAT_SDISABLED 0x00000000 /* State Disbaled */ 100#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */
101#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */ 101#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */
102#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */ 102#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */
103#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */ 103#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index f99e17e0a31..7e3738112c4 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -50,6 +50,10 @@
50#include <linux/log2.h> 50#include <linux/log2.h>
51#include <linux/list.h> 51#include <linux/list.h>
52 52
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1
55#include "cnic_if.h"
56#endif
53#include "bnx2.h" 57#include "bnx2.h"
54#include "bnx2_fw.h" 58#include "bnx2_fw.h"
55 59
@@ -316,6 +320,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
316 spin_unlock_bh(&bp->indirect_lock); 320 spin_unlock_bh(&bp->indirect_lock);
317} 321}
318 322
323#ifdef BCM_CNIC
324static int
325bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
326{
327 struct bnx2 *bp = netdev_priv(dev);
328 struct drv_ctl_io *io = &info->data.io;
329
330 switch (info->cmd) {
331 case DRV_CTL_IO_WR_CMD:
332 bnx2_reg_wr_ind(bp, io->offset, io->data);
333 break;
334 case DRV_CTL_IO_RD_CMD:
335 io->data = bnx2_reg_rd_ind(bp, io->offset);
336 break;
337 case DRV_CTL_CTX_WR_CMD:
338 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
339 break;
340 default:
341 return -EINVAL;
342 }
343 return 0;
344}
345
346static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
347{
348 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
349 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
350 int sb_id;
351
352 if (bp->flags & BNX2_FLAG_USING_MSIX) {
353 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
354 bnapi->cnic_present = 0;
355 sb_id = bp->irq_nvecs;
356 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
357 } else {
358 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
359 bnapi->cnic_tag = bnapi->last_status_idx;
360 bnapi->cnic_present = 1;
361 sb_id = 0;
362 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
363 }
364
365 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
366 cp->irq_arr[0].status_blk = (void *)
367 ((unsigned long) bnapi->status_blk.msi +
368 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
369 cp->irq_arr[0].status_blk_num = sb_id;
370 cp->num_irq = 1;
371}
372
373static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
374 void *data)
375{
376 struct bnx2 *bp = netdev_priv(dev);
377 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
378
379 if (ops == NULL)
380 return -EINVAL;
381
382 if (cp->drv_state & CNIC_DRV_STATE_REGD)
383 return -EBUSY;
384
385 bp->cnic_data = data;
386 rcu_assign_pointer(bp->cnic_ops, ops);
387
388 cp->num_irq = 0;
389 cp->drv_state = CNIC_DRV_STATE_REGD;
390
391 bnx2_setup_cnic_irq_info(bp);
392
393 return 0;
394}
395
396static int bnx2_unregister_cnic(struct net_device *dev)
397{
398 struct bnx2 *bp = netdev_priv(dev);
399 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
400 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
401
402 cp->drv_state = 0;
403 bnapi->cnic_present = 0;
404 rcu_assign_pointer(bp->cnic_ops, NULL);
405 synchronize_rcu();
406 return 0;
407}
408
409struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
410{
411 struct bnx2 *bp = netdev_priv(dev);
412 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
413
414 cp->drv_owner = THIS_MODULE;
415 cp->chip_id = bp->chip_id;
416 cp->pdev = bp->pdev;
417 cp->io_base = bp->regview;
418 cp->drv_ctl = bnx2_drv_ctl;
419 cp->drv_register_cnic = bnx2_register_cnic;
420 cp->drv_unregister_cnic = bnx2_unregister_cnic;
421
422 return cp;
423}
424EXPORT_SYMBOL(bnx2_cnic_probe);
425
426static void
427bnx2_cnic_stop(struct bnx2 *bp)
428{
429 struct cnic_ops *c_ops;
430 struct cnic_ctl_info info;
431
432 rcu_read_lock();
433 c_ops = rcu_dereference(bp->cnic_ops);
434 if (c_ops) {
435 info.cmd = CNIC_CTL_STOP_CMD;
436 c_ops->cnic_ctl(bp->cnic_data, &info);
437 }
438 rcu_read_unlock();
439}
440
441static void
442bnx2_cnic_start(struct bnx2 *bp)
443{
444 struct cnic_ops *c_ops;
445 struct cnic_ctl_info info;
446
447 rcu_read_lock();
448 c_ops = rcu_dereference(bp->cnic_ops);
449 if (c_ops) {
450 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
451 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
452
453 bnapi->cnic_tag = bnapi->last_status_idx;
454 }
455 info.cmd = CNIC_CTL_START_CMD;
456 c_ops->cnic_ctl(bp->cnic_data, &info);
457 }
458 rcu_read_unlock();
459}
460
461#else
462
463static void
464bnx2_cnic_stop(struct bnx2 *bp)
465{
466}
467
468static void
469bnx2_cnic_start(struct bnx2 *bp)
470{
471}
472
473#endif
474
319static int 475static int
320bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) 476bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
321{ 477{
@@ -489,6 +645,7 @@ bnx2_napi_enable(struct bnx2 *bp)
489static void 645static void
490bnx2_netif_stop(struct bnx2 *bp) 646bnx2_netif_stop(struct bnx2 *bp)
491{ 647{
648 bnx2_cnic_stop(bp);
492 bnx2_disable_int_sync(bp); 649 bnx2_disable_int_sync(bp);
493 if (netif_running(bp->dev)) { 650 if (netif_running(bp->dev)) {
494 bnx2_napi_disable(bp); 651 bnx2_napi_disable(bp);
@@ -505,6 +662,7 @@ bnx2_netif_start(struct bnx2 *bp)
505 netif_tx_wake_all_queues(bp->dev); 662 netif_tx_wake_all_queues(bp->dev);
506 bnx2_napi_enable(bp); 663 bnx2_napi_enable(bp);
507 bnx2_enable_int(bp); 664 bnx2_enable_int(bp);
665 bnx2_cnic_start(bp);
508 } 666 }
509 } 667 }
510} 668}
@@ -3165,6 +3323,11 @@ bnx2_has_work(struct bnx2_napi *bnapi)
3165 if (bnx2_has_fast_work(bnapi)) 3323 if (bnx2_has_fast_work(bnapi))
3166 return 1; 3324 return 1;
3167 3325
3326#ifdef BCM_CNIC
3327 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3328 return 1;
3329#endif
3330
3168 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != 3331 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3169 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) 3332 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3170 return 1; 3333 return 1;
@@ -3194,6 +3357,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
3194 bp->idle_chk_status_idx = bnapi->last_status_idx; 3357 bp->idle_chk_status_idx = bnapi->last_status_idx;
3195} 3358}
3196 3359
3360#ifdef BCM_CNIC
3361static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3362{
3363 struct cnic_ops *c_ops;
3364
3365 if (!bnapi->cnic_present)
3366 return;
3367
3368 rcu_read_lock();
3369 c_ops = rcu_dereference(bp->cnic_ops);
3370 if (c_ops)
3371 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3372 bnapi->status_blk.msi);
3373 rcu_read_unlock();
3374}
3375#endif
3376
3197static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) 3377static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3198{ 3378{
3199 struct status_block *sblk = bnapi->status_blk.msi; 3379 struct status_block *sblk = bnapi->status_blk.msi;
@@ -3268,6 +3448,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3268 3448
3269 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); 3449 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3270 3450
3451#ifdef BCM_CNIC
3452 bnx2_poll_cnic(bp, bnapi);
3453#endif
3454
3271 /* bnapi->last_status_idx is used below to tell the hw how 3455 /* bnapi->last_status_idx is used below to tell the hw how
3272 * much work has been processed, so we must read it before 3456 * much work has been processed, so we must read it before
3273 * checking for more work. 3457 * checking for more work.
@@ -4631,8 +4815,11 @@ bnx2_init_chip(struct bnx2 *bp)
4631 val = REG_RD(bp, BNX2_MQ_CONFIG); 4815 val = REG_RD(bp, BNX2_MQ_CONFIG);
4632 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4816 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4633 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 4817 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4634 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1) 4818 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4635 val |= BNX2_MQ_CONFIG_HALT_DIS; 4819 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4820 if (CHIP_REV(bp) == CHIP_REV_Ax)
4821 val |= BNX2_MQ_CONFIG_HALT_DIS;
4822 }
4636 4823
4637 REG_WR(bp, BNX2_MQ_CONFIG, val); 4824 REG_WR(bp, BNX2_MQ_CONFIG, val);
4638 4825
@@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7471 INIT_WORK(&bp->reset_task, bnx2_reset_task); 7658 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7472 7659
7473 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 7660 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7474 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS); 7661 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7475 dev->mem_end = dev->mem_start + mem_len; 7662 dev->mem_end = dev->mem_start + mem_len;
7476 dev->irq = pdev->irq; 7663 dev->irq = pdev->irq;
7477 7664
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 026ed1c8469..f1edfaa9e56 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -361,6 +361,9 @@ struct l2_fhdr {
361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) 361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28)
362 362
363#define BNX2_L2CTX_HOST_BDIDX 0x00000004 363#define BNX2_L2CTX_HOST_BDIDX 0x00000004
364#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16
365#define BNX2_L2CTX_STATUSB_NUM(sb_id) \
366 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
364#define BNX2_L2CTX_HOST_BSEQ 0x00000008 367#define BNX2_L2CTX_HOST_BSEQ 0x00000008
365#define BNX2_L2CTX_NX_BSEQ 0x0000000c 368#define BNX2_L2CTX_NX_BSEQ 0x0000000c
366#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 369#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010
@@ -5900,6 +5903,7 @@ struct l2_fhdr {
5900#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) 5903#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
5901 5904
5902#define BNX2_RXP_SCRATCH 0x000e0000 5905#define BNX2_RXP_SCRATCH 0x000e0000
5906#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024
5903#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038 5907#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038
5904#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c 5908#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c
5905#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128 5909#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128
@@ -6680,6 +6684,11 @@ struct bnx2_napi {
6680 u32 last_status_idx; 6684 u32 last_status_idx;
6681 u32 int_num; 6685 u32 int_num;
6682 6686
6687#ifdef BCM_CNIC
6688 u32 cnic_tag;
6689 int cnic_present;
6690#endif
6691
6683 struct bnx2_rx_ring_info rx_ring; 6692 struct bnx2_rx_ring_info rx_ring;
6684 struct bnx2_tx_ring_info tx_ring; 6693 struct bnx2_tx_ring_info tx_ring;
6685}; 6694};
@@ -6729,6 +6738,11 @@ struct bnx2 {
6729 int tx_ring_size; 6738 int tx_ring_size;
6730 u32 tx_wake_thresh; 6739 u32 tx_wake_thresh;
6731 6740
6741#ifdef BCM_CNIC
6742 struct cnic_ops *cnic_ops;
6743 void *cnic_data;
6744#endif
6745
6732 /* End of fields used in the performance code paths. */ 6746 /* End of fields used in the performance code paths. */
6733 6747
6734 unsigned int current_interval; 6748 unsigned int current_interval;
@@ -6887,6 +6901,10 @@ struct bnx2 {
6887 6901
6888 u32 idle_chk_status_idx; 6902 u32 idle_chk_status_idx;
6889 6903
6904#ifdef BCM_CNIC
6905 struct cnic_eth_dev cnic_eth_dev;
6906#endif
6907
6890 const struct firmware *mips_firmware; 6908 const struct firmware *mips_firmware;
6891 const struct firmware *rv2p_firmware; 6909 const struct firmware *rv2p_firmware;
6892}; 6910};
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
new file mode 100644
index 00000000000..44f77eb1180
--- /dev/null
+++ b/drivers/net/cnic.c
@@ -0,0 +1,2717 @@
1/* cnic.c: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11 */
12
13#include <linux/module.h>
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/netdevice.h>
22#include <linux/uio_driver.h>
23#include <linux/in.h>
24#include <linux/dma-mapping.h>
25#include <linux/delay.h>
26#include <linux/ethtool.h>
27#include <linux/if_vlan.h>
28#include <linux/module.h>
29
30#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
31#define BCM_VLAN 1
32#endif
33#include <net/ip.h>
34#include <net/tcp.h>
35#include <net/route.h>
36#include <net/ipv6.h>
37#include <net/ip6_route.h>
38#include <scsi/iscsi_if.h>
39
40#include "cnic_if.h"
41#include "bnx2.h"
42#include "cnic.h"
43#include "cnic_defs.h"
44
45#define DRV_MODULE_NAME "cnic"
46#define PFX DRV_MODULE_NAME ": "
47
48static char version[] __devinitdata =
49 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
50
51MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
52 "Chen (zongxi@broadcom.com");
53MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
54MODULE_LICENSE("GPL");
55MODULE_VERSION(CNIC_MODULE_VERSION);
56
57static LIST_HEAD(cnic_dev_list);
58static DEFINE_RWLOCK(cnic_dev_lock);
59static DEFINE_MUTEX(cnic_lock);
60
61static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
62
63static int cnic_service_bnx2(void *, void *);
64static int cnic_ctl(void *, struct cnic_ctl_info *);
65
66static struct cnic_ops cnic_bnx2_ops = {
67 .cnic_owner = THIS_MODULE,
68 .cnic_handler = cnic_service_bnx2,
69 .cnic_ctl = cnic_ctl,
70};
71
72static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
73static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
74static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
75static int cnic_cm_set_pg(struct cnic_sock *);
76
77static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
78{
79 struct cnic_dev *dev = uinfo->priv;
80 struct cnic_local *cp = dev->cnic_priv;
81
82 if (!capable(CAP_NET_ADMIN))
83 return -EPERM;
84
85 if (cp->uio_dev != -1)
86 return -EBUSY;
87
88 cp->uio_dev = iminor(inode);
89
90 cnic_shutdown_bnx2_rx_ring(dev);
91
92 cnic_init_bnx2_tx_ring(dev);
93 cnic_init_bnx2_rx_ring(dev);
94
95 return 0;
96}
97
98static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
99{
100 struct cnic_dev *dev = uinfo->priv;
101 struct cnic_local *cp = dev->cnic_priv;
102
103 cp->uio_dev = -1;
104 return 0;
105}
106
107static inline void cnic_hold(struct cnic_dev *dev)
108{
109 atomic_inc(&dev->ref_count);
110}
111
112static inline void cnic_put(struct cnic_dev *dev)
113{
114 atomic_dec(&dev->ref_count);
115}
116
117static inline void csk_hold(struct cnic_sock *csk)
118{
119 atomic_inc(&csk->ref_count);
120}
121
122static inline void csk_put(struct cnic_sock *csk)
123{
124 atomic_dec(&csk->ref_count);
125}
126
127static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
128{
129 struct cnic_dev *cdev;
130
131 read_lock(&cnic_dev_lock);
132 list_for_each_entry(cdev, &cnic_dev_list, list) {
133 if (netdev == cdev->netdev) {
134 cnic_hold(cdev);
135 read_unlock(&cnic_dev_lock);
136 return cdev;
137 }
138 }
139 read_unlock(&cnic_dev_lock);
140 return NULL;
141}
142
143static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
144{
145 struct cnic_local *cp = dev->cnic_priv;
146 struct cnic_eth_dev *ethdev = cp->ethdev;
147 struct drv_ctl_info info;
148 struct drv_ctl_io *io = &info.data.io;
149
150 info.cmd = DRV_CTL_CTX_WR_CMD;
151 io->cid_addr = cid_addr;
152 io->offset = off;
153 io->data = val;
154 ethdev->drv_ctl(dev->netdev, &info);
155}
156
157static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
158{
159 struct cnic_local *cp = dev->cnic_priv;
160 struct cnic_eth_dev *ethdev = cp->ethdev;
161 struct drv_ctl_info info;
162 struct drv_ctl_io *io = &info.data.io;
163
164 info.cmd = DRV_CTL_IO_WR_CMD;
165 io->offset = off;
166 io->data = val;
167 ethdev->drv_ctl(dev->netdev, &info);
168}
169
170static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
171{
172 struct cnic_local *cp = dev->cnic_priv;
173 struct cnic_eth_dev *ethdev = cp->ethdev;
174 struct drv_ctl_info info;
175 struct drv_ctl_io *io = &info.data.io;
176
177 info.cmd = DRV_CTL_IO_RD_CMD;
178 io->offset = off;
179 ethdev->drv_ctl(dev->netdev, &info);
180 return io->data;
181}
182
183static int cnic_in_use(struct cnic_sock *csk)
184{
185 return test_bit(SK_F_INUSE, &csk->flags);
186}
187
188static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
189{
190 struct cnic_local *cp = dev->cnic_priv;
191 struct cnic_eth_dev *ethdev = cp->ethdev;
192 struct drv_ctl_info info;
193
194 info.cmd = DRV_CTL_COMPLETION_CMD;
195 info.data.comp.comp_count = count;
196 ethdev->drv_ctl(dev->netdev, &info);
197}
198
199static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
200 struct cnic_sock *csk)
201{
202 struct iscsi_path path_req;
203 char *buf = NULL;
204 u16 len = 0;
205 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
206 struct cnic_ulp_ops *ulp_ops;
207
208 if (cp->uio_dev == -1)
209 return -ENODEV;
210
211 if (csk) {
212 len = sizeof(path_req);
213 buf = (char *) &path_req;
214 memset(&path_req, 0, len);
215
216 msg_type = ISCSI_KEVENT_PATH_REQ;
217 path_req.handle = (u64) csk->l5_cid;
218 if (test_bit(SK_F_IPV6, &csk->flags)) {
219 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
220 sizeof(struct in6_addr));
221 path_req.ip_addr_len = 16;
222 } else {
223 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
224 sizeof(struct in_addr));
225 path_req.ip_addr_len = 4;
226 }
227 path_req.vlan_id = csk->vlan_id;
228 path_req.pmtu = csk->mtu;
229 }
230
231 rcu_read_lock();
232 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
233 if (ulp_ops)
234 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
235 rcu_read_unlock();
236 return 0;
237}
238
239static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
240 char *buf, u16 len)
241{
242 int rc = -EINVAL;
243
244 switch (msg_type) {
245 case ISCSI_UEVENT_PATH_UPDATE: {
246 struct cnic_local *cp;
247 u32 l5_cid;
248 struct cnic_sock *csk;
249 struct iscsi_path *path_resp;
250
251 if (len < sizeof(*path_resp))
252 break;
253
254 path_resp = (struct iscsi_path *) buf;
255 cp = dev->cnic_priv;
256 l5_cid = (u32) path_resp->handle;
257 if (l5_cid >= MAX_CM_SK_TBL_SZ)
258 break;
259
260 csk = &cp->csk_tbl[l5_cid];
261 csk_hold(csk);
262 if (cnic_in_use(csk)) {
263 memcpy(csk->ha, path_resp->mac_addr, 6);
264 if (test_bit(SK_F_IPV6, &csk->flags))
265 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
266 sizeof(struct in6_addr));
267 else
268 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
269 sizeof(struct in_addr));
270 if (is_valid_ether_addr(csk->ha))
271 cnic_cm_set_pg(csk);
272 }
273 csk_put(csk);
274 rc = 0;
275 }
276 }
277
278 return rc;
279}
280
281static int cnic_offld_prep(struct cnic_sock *csk)
282{
283 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
284 return 0;
285
286 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
287 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
288 return 0;
289 }
290
291 return 1;
292}
293
294static int cnic_close_prep(struct cnic_sock *csk)
295{
296 clear_bit(SK_F_CONNECT_START, &csk->flags);
297 smp_mb__after_clear_bit();
298
299 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
300 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
301 msleep(1);
302
303 return 1;
304 }
305 return 0;
306}
307
308static int cnic_abort_prep(struct cnic_sock *csk)
309{
310 clear_bit(SK_F_CONNECT_START, &csk->flags);
311 smp_mb__after_clear_bit();
312
313 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
314 msleep(1);
315
316 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
317 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
318 return 1;
319 }
320
321 return 0;
322}
323
324int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
325{
326 struct cnic_dev *dev;
327
328 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
329 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
330 ulp_type);
331 return -EINVAL;
332 }
333 mutex_lock(&cnic_lock);
334 if (cnic_ulp_tbl[ulp_type]) {
335 printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
336 "been registered\n", ulp_type);
337 mutex_unlock(&cnic_lock);
338 return -EBUSY;
339 }
340
341 read_lock(&cnic_dev_lock);
342 list_for_each_entry(dev, &cnic_dev_list, list) {
343 struct cnic_local *cp = dev->cnic_priv;
344
345 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
346 }
347 read_unlock(&cnic_dev_lock);
348
349 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
350 mutex_unlock(&cnic_lock);
351
352 /* Prevent race conditions with netdev_event */
353 rtnl_lock();
354 read_lock(&cnic_dev_lock);
355 list_for_each_entry(dev, &cnic_dev_list, list) {
356 struct cnic_local *cp = dev->cnic_priv;
357
358 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
359 ulp_ops->cnic_init(dev);
360 }
361 read_unlock(&cnic_dev_lock);
362 rtnl_unlock();
363
364 return 0;
365}
366
367int cnic_unregister_driver(int ulp_type)
368{
369 struct cnic_dev *dev;
370
371 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
372 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
373 ulp_type);
374 return -EINVAL;
375 }
376 mutex_lock(&cnic_lock);
377 if (!cnic_ulp_tbl[ulp_type]) {
378 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
379 "been registered\n", ulp_type);
380 goto out_unlock;
381 }
382 read_lock(&cnic_dev_lock);
383 list_for_each_entry(dev, &cnic_dev_list, list) {
384 struct cnic_local *cp = dev->cnic_priv;
385
386 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
387 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
388 "still has devices registered\n", ulp_type);
389 read_unlock(&cnic_dev_lock);
390 goto out_unlock;
391 }
392 }
393 read_unlock(&cnic_dev_lock);
394
395 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
396
397 mutex_unlock(&cnic_lock);
398 synchronize_rcu();
399 return 0;
400
401out_unlock:
402 mutex_unlock(&cnic_lock);
403 return -EINVAL;
404}
405
406static int cnic_start_hw(struct cnic_dev *);
407static void cnic_stop_hw(struct cnic_dev *);
408
409static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
410 void *ulp_ctx)
411{
412 struct cnic_local *cp = dev->cnic_priv;
413 struct cnic_ulp_ops *ulp_ops;
414
415 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
416 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
417 ulp_type);
418 return -EINVAL;
419 }
420 mutex_lock(&cnic_lock);
421 if (cnic_ulp_tbl[ulp_type] == NULL) {
422 printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
423 "has not been registered\n", ulp_type);
424 mutex_unlock(&cnic_lock);
425 return -EAGAIN;
426 }
427 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
428 printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
429 "been registered to this device\n", ulp_type);
430 mutex_unlock(&cnic_lock);
431 return -EBUSY;
432 }
433
434 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
435 cp->ulp_handle[ulp_type] = ulp_ctx;
436 ulp_ops = cnic_ulp_tbl[ulp_type];
437 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
438 cnic_hold(dev);
439
440 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
441 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
442 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
443
444 mutex_unlock(&cnic_lock);
445
446 return 0;
447
448}
449EXPORT_SYMBOL(cnic_register_driver);
450
451static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
452{
453 struct cnic_local *cp = dev->cnic_priv;
454
455 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
456 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
457 ulp_type);
458 return -EINVAL;
459 }
460 mutex_lock(&cnic_lock);
461 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
462 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
463 cnic_put(dev);
464 } else {
465 printk(KERN_ERR PFX "cnic_unregister_device: device not "
466 "registered to this ulp type %d\n", ulp_type);
467 mutex_unlock(&cnic_lock);
468 return -EINVAL;
469 }
470 mutex_unlock(&cnic_lock);
471
472 synchronize_rcu();
473
474 return 0;
475}
476EXPORT_SYMBOL(cnic_unregister_driver);
477
478static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
479{
480 id_tbl->start = start_id;
481 id_tbl->max = size;
482 id_tbl->next = 0;
483 spin_lock_init(&id_tbl->lock);
484 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
485 if (!id_tbl->table)
486 return -ENOMEM;
487
488 return 0;
489}
490
491static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
492{
493 kfree(id_tbl->table);
494 id_tbl->table = NULL;
495}
496
497static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
498{
499 int ret = -1;
500
501 id -= id_tbl->start;
502 if (id >= id_tbl->max)
503 return ret;
504
505 spin_lock(&id_tbl->lock);
506 if (!test_bit(id, id_tbl->table)) {
507 set_bit(id, id_tbl->table);
508 ret = 0;
509 }
510 spin_unlock(&id_tbl->lock);
511 return ret;
512}
513
514/* Returns -1 if not successful */
515static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
516{
517 u32 id;
518
519 spin_lock(&id_tbl->lock);
520 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
521 if (id >= id_tbl->max) {
522 id = -1;
523 if (id_tbl->next != 0) {
524 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
525 if (id >= id_tbl->next)
526 id = -1;
527 }
528 }
529
530 if (id < id_tbl->max) {
531 set_bit(id, id_tbl->table);
532 id_tbl->next = (id + 1) & (id_tbl->max - 1);
533 id += id_tbl->start;
534 }
535
536 spin_unlock(&id_tbl->lock);
537
538 return id;
539}
540
541static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
542{
543 if (id == -1)
544 return;
545
546 id -= id_tbl->start;
547 if (id >= id_tbl->max)
548 return;
549
550 clear_bit(id, id_tbl->table);
551}
552
553static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
554{
555 int i;
556
557 if (!dma->pg_arr)
558 return;
559
560 for (i = 0; i < dma->num_pages; i++) {
561 if (dma->pg_arr[i]) {
562 pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
563 dma->pg_arr[i], dma->pg_map_arr[i]);
564 dma->pg_arr[i] = NULL;
565 }
566 }
567 if (dma->pgtbl) {
568 pci_free_consistent(dev->pcidev, dma->pgtbl_size,
569 dma->pgtbl, dma->pgtbl_map);
570 dma->pgtbl = NULL;
571 }
572 kfree(dma->pg_arr);
573 dma->pg_arr = NULL;
574 dma->num_pages = 0;
575}
576
577static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
578{
579 int i;
580 u32 *page_table = dma->pgtbl;
581
582 for (i = 0; i < dma->num_pages; i++) {
583 /* Each entry needs to be in big endian format. */
584 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
585 page_table++;
586 *page_table = (u32) dma->pg_map_arr[i];
587 page_table++;
588 }
589}
590
591static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
592 int pages, int use_pg_tbl)
593{
594 int i, size;
595 struct cnic_local *cp = dev->cnic_priv;
596
597 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
598 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
599 if (dma->pg_arr == NULL)
600 return -ENOMEM;
601
602 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
603 dma->num_pages = pages;
604
605 for (i = 0; i < pages; i++) {
606 dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
607 BCM_PAGE_SIZE,
608 &dma->pg_map_arr[i]);
609 if (dma->pg_arr[i] == NULL)
610 goto error;
611 }
612 if (!use_pg_tbl)
613 return 0;
614
615 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
616 ~(BCM_PAGE_SIZE - 1);
617 dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
618 &dma->pgtbl_map);
619 if (dma->pgtbl == NULL)
620 goto error;
621
622 cp->setup_pgtbl(dev, dma);
623
624 return 0;
625
626error:
627 cnic_free_dma(dev, dma);
628 return -ENOMEM;
629}
630
631static void cnic_free_resc(struct cnic_dev *dev)
632{
633 struct cnic_local *cp = dev->cnic_priv;
634 int i = 0;
635
636 if (cp->cnic_uinfo) {
637 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
638 while (cp->uio_dev != -1 && i < 15) {
639 msleep(100);
640 i++;
641 }
642 uio_unregister_device(cp->cnic_uinfo);
643 kfree(cp->cnic_uinfo);
644 cp->cnic_uinfo = NULL;
645 }
646
647 if (cp->l2_buf) {
648 pci_free_consistent(dev->pcidev, cp->l2_buf_size,
649 cp->l2_buf, cp->l2_buf_map);
650 cp->l2_buf = NULL;
651 }
652
653 if (cp->l2_ring) {
654 pci_free_consistent(dev->pcidev, cp->l2_ring_size,
655 cp->l2_ring, cp->l2_ring_map);
656 cp->l2_ring = NULL;
657 }
658
659 for (i = 0; i < cp->ctx_blks; i++) {
660 if (cp->ctx_arr[i].ctx) {
661 pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
662 cp->ctx_arr[i].ctx,
663 cp->ctx_arr[i].mapping);
664 cp->ctx_arr[i].ctx = NULL;
665 }
666 }
667 kfree(cp->ctx_arr);
668 cp->ctx_arr = NULL;
669 cp->ctx_blks = 0;
670
671 cnic_free_dma(dev, &cp->gbl_buf_info);
672 cnic_free_dma(dev, &cp->conn_buf_info);
673 cnic_free_dma(dev, &cp->kwq_info);
674 cnic_free_dma(dev, &cp->kcq_info);
675 kfree(cp->iscsi_tbl);
676 cp->iscsi_tbl = NULL;
677 kfree(cp->ctx_tbl);
678 cp->ctx_tbl = NULL;
679
680 cnic_free_id_tbl(&cp->cid_tbl);
681}
682
683static int cnic_alloc_context(struct cnic_dev *dev)
684{
685 struct cnic_local *cp = dev->cnic_priv;
686
687 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
688 int i, k, arr_size;
689
690 cp->ctx_blk_size = BCM_PAGE_SIZE;
691 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
692 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
693 sizeof(struct cnic_ctx);
694 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
695 if (cp->ctx_arr == NULL)
696 return -ENOMEM;
697
698 k = 0;
699 for (i = 0; i < 2; i++) {
700 u32 j, reg, off, lo, hi;
701
702 if (i == 0)
703 off = BNX2_PG_CTX_MAP;
704 else
705 off = BNX2_ISCSI_CTX_MAP;
706
707 reg = cnic_reg_rd_ind(dev, off);
708 lo = reg >> 16;
709 hi = reg & 0xffff;
710 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
711 cp->ctx_arr[k].cid = j;
712 }
713
714 cp->ctx_blks = k;
715 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
716 cp->ctx_blks = 0;
717 return -ENOMEM;
718 }
719
720 for (i = 0; i < cp->ctx_blks; i++) {
721 cp->ctx_arr[i].ctx =
722 pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
723 &cp->ctx_arr[i].mapping);
724 if (cp->ctx_arr[i].ctx == NULL)
725 return -ENOMEM;
726 }
727 }
728 return 0;
729}
730
731static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
732{
733 struct cnic_local *cp = dev->cnic_priv;
734 struct uio_info *uinfo;
735 int ret;
736
737 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
738 if (ret)
739 goto error;
740 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
741
742 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
743 if (ret)
744 goto error;
745 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
746
747 ret = cnic_alloc_context(dev);
748 if (ret)
749 goto error;
750
751 cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
752 cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
753 &cp->l2_ring_map);
754 if (!cp->l2_ring)
755 goto error;
756
757 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
758 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
759 cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
760 &cp->l2_buf_map);
761 if (!cp->l2_buf)
762 goto error;
763
764 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
765 if (!uinfo)
766 goto error;
767
768 uinfo->mem[0].addr = dev->netdev->base_addr;
769 uinfo->mem[0].internal_addr = dev->regview;
770 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
771 uinfo->mem[0].memtype = UIO_MEM_PHYS;
772
773 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
774 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
775 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
776 else
777 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
778 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
779
780 uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
781 uinfo->mem[2].size = cp->l2_ring_size;
782 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
783
784 uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
785 uinfo->mem[3].size = cp->l2_buf_size;
786 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
787
788 uinfo->name = "bnx2_cnic";
789 uinfo->version = CNIC_MODULE_VERSION;
790 uinfo->irq = UIO_IRQ_CUSTOM;
791
792 uinfo->open = cnic_uio_open;
793 uinfo->release = cnic_uio_close;
794
795 uinfo->priv = dev;
796
797 ret = uio_register_device(&dev->pcidev->dev, uinfo);
798 if (ret) {
799 kfree(uinfo);
800 goto error;
801 }
802
803 cp->cnic_uinfo = uinfo;
804
805 return 0;
806
807error:
808 cnic_free_resc(dev);
809 return ret;
810}
811
812static inline u32 cnic_kwq_avail(struct cnic_local *cp)
813{
814 return cp->max_kwq_idx -
815 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
816}
817
818static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
819 u32 num_wqes)
820{
821 struct cnic_local *cp = dev->cnic_priv;
822 struct kwqe *prod_qe;
823 u16 prod, sw_prod, i;
824
825 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
826 return -EAGAIN; /* bnx2 is down */
827
828 spin_lock_bh(&cp->cnic_ulp_lock);
829 if (num_wqes > cnic_kwq_avail(cp) &&
830 !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
831 spin_unlock_bh(&cp->cnic_ulp_lock);
832 return -EAGAIN;
833 }
834
835 cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
836
837 prod = cp->kwq_prod_idx;
838 sw_prod = prod & MAX_KWQ_IDX;
839 for (i = 0; i < num_wqes; i++) {
840 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
841 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
842 prod++;
843 sw_prod = prod & MAX_KWQ_IDX;
844 }
845 cp->kwq_prod_idx = prod;
846
847 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
848
849 spin_unlock_bh(&cp->cnic_ulp_lock);
850 return 0;
851}
852
853static void service_kcqes(struct cnic_dev *dev, int num_cqes)
854{
855 struct cnic_local *cp = dev->cnic_priv;
856 int i, j;
857
858 i = 0;
859 j = 1;
860 while (num_cqes) {
861 struct cnic_ulp_ops *ulp_ops;
862 int ulp_type;
863 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
864 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
865
866 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
867 cnic_kwq_completion(dev, 1);
868
869 while (j < num_cqes) {
870 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
871
872 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
873 break;
874
875 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
876 cnic_kwq_completion(dev, 1);
877 j++;
878 }
879
880 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
881 ulp_type = CNIC_ULP_RDMA;
882 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
883 ulp_type = CNIC_ULP_ISCSI;
884 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
885 ulp_type = CNIC_ULP_L4;
886 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
887 goto end;
888 else {
889 printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
890 dev->netdev->name, kcqe_op_flag);
891 goto end;
892 }
893
894 rcu_read_lock();
895 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
896 if (likely(ulp_ops)) {
897 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
898 cp->completed_kcq + i, j);
899 }
900 rcu_read_unlock();
901end:
902 num_cqes -= j;
903 i += j;
904 j = 1;
905 }
906 return;
907}
908
909static u16 cnic_bnx2_next_idx(u16 idx)
910{
911 return idx + 1;
912}
913
914static u16 cnic_bnx2_hw_idx(u16 idx)
915{
916 return idx;
917}
918
919static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
920{
921 struct cnic_local *cp = dev->cnic_priv;
922 u16 i, ri, last;
923 struct kcqe *kcqe;
924 int kcqe_cnt = 0, last_cnt = 0;
925
926 i = ri = last = *sw_prod;
927 ri &= MAX_KCQ_IDX;
928
929 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
930 kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
931 cp->completed_kcq[kcqe_cnt++] = kcqe;
932 i = cp->next_idx(i);
933 ri = i & MAX_KCQ_IDX;
934 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
935 last_cnt = kcqe_cnt;
936 last = i;
937 }
938 }
939
940 *sw_prod = last;
941 return last_cnt;
942}
943
944static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
945{
946 u16 rx_cons = *cp->rx_cons_ptr;
947 u16 tx_cons = *cp->tx_cons_ptr;
948
949 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
950 cp->tx_cons = tx_cons;
951 cp->rx_cons = rx_cons;
952 uio_event_notify(cp->cnic_uinfo);
953 }
954}
955
956static int cnic_service_bnx2(void *data, void *status_blk)
957{
958 struct cnic_dev *dev = data;
959 struct status_block *sblk = status_blk;
960 struct cnic_local *cp = dev->cnic_priv;
961 u32 status_idx = sblk->status_idx;
962 u16 hw_prod, sw_prod;
963 int kcqe_cnt;
964
965 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
966 return status_idx;
967
968 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
969
970 hw_prod = sblk->status_completion_producer_index;
971 sw_prod = cp->kcq_prod_idx;
972 while (sw_prod != hw_prod) {
973 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
974 if (kcqe_cnt == 0)
975 goto done;
976
977 service_kcqes(dev, kcqe_cnt);
978
979 /* Tell compiler that status_blk fields can change. */
980 barrier();
981 if (status_idx != sblk->status_idx) {
982 status_idx = sblk->status_idx;
983 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
984 hw_prod = sblk->status_completion_producer_index;
985 } else
986 break;
987 }
988
989done:
990 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
991
992 cp->kcq_prod_idx = sw_prod;
993
994 cnic_chk_bnx2_pkt_rings(cp);
995 return status_idx;
996}
997
998static void cnic_service_bnx2_msix(unsigned long data)
999{
1000 struct cnic_dev *dev = (struct cnic_dev *) data;
1001 struct cnic_local *cp = dev->cnic_priv;
1002 struct status_block_msix *status_blk = cp->bnx2_status_blk;
1003 u32 status_idx = status_blk->status_idx;
1004 u16 hw_prod, sw_prod;
1005 int kcqe_cnt;
1006
1007 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1008
1009 hw_prod = status_blk->status_completion_producer_index;
1010 sw_prod = cp->kcq_prod_idx;
1011 while (sw_prod != hw_prod) {
1012 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
1013 if (kcqe_cnt == 0)
1014 goto done;
1015
1016 service_kcqes(dev, kcqe_cnt);
1017
1018 /* Tell compiler that status_blk fields can change. */
1019 barrier();
1020 if (status_idx != status_blk->status_idx) {
1021 status_idx = status_blk->status_idx;
1022 cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
1023 hw_prod = status_blk->status_completion_producer_index;
1024 } else
1025 break;
1026 }
1027
1028done:
1029 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
1030 cp->kcq_prod_idx = sw_prod;
1031
1032 cnic_chk_bnx2_pkt_rings(cp);
1033
1034 cp->last_status_idx = status_idx;
1035 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
1036 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
1037}
1038
1039static irqreturn_t cnic_irq(int irq, void *dev_instance)
1040{
1041 struct cnic_dev *dev = dev_instance;
1042 struct cnic_local *cp = dev->cnic_priv;
1043 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
1044
1045 if (cp->ack_int)
1046 cp->ack_int(dev);
1047
1048 prefetch(cp->status_blk);
1049 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
1050
1051 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
1052 tasklet_schedule(&cp->cnic_irq_task);
1053
1054 return IRQ_HANDLED;
1055}
1056
1057static void cnic_ulp_stop(struct cnic_dev *dev)
1058{
1059 struct cnic_local *cp = dev->cnic_priv;
1060 int if_type;
1061
1062 rcu_read_lock();
1063 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1064 struct cnic_ulp_ops *ulp_ops;
1065
1066 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1067 if (!ulp_ops)
1068 continue;
1069
1070 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1071 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
1072 }
1073 rcu_read_unlock();
1074}
1075
1076static void cnic_ulp_start(struct cnic_dev *dev)
1077{
1078 struct cnic_local *cp = dev->cnic_priv;
1079 int if_type;
1080
1081 rcu_read_lock();
1082 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1083 struct cnic_ulp_ops *ulp_ops;
1084
1085 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
1086 if (!ulp_ops || !ulp_ops->cnic_start)
1087 continue;
1088
1089 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1090 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
1091 }
1092 rcu_read_unlock();
1093}
1094
1095static int cnic_ctl(void *data, struct cnic_ctl_info *info)
1096{
1097 struct cnic_dev *dev = data;
1098
1099 switch (info->cmd) {
1100 case CNIC_CTL_STOP_CMD:
1101 cnic_hold(dev);
1102 mutex_lock(&cnic_lock);
1103
1104 cnic_ulp_stop(dev);
1105 cnic_stop_hw(dev);
1106
1107 mutex_unlock(&cnic_lock);
1108 cnic_put(dev);
1109 break;
1110 case CNIC_CTL_START_CMD:
1111 cnic_hold(dev);
1112 mutex_lock(&cnic_lock);
1113
1114 if (!cnic_start_hw(dev))
1115 cnic_ulp_start(dev);
1116
1117 mutex_unlock(&cnic_lock);
1118 cnic_put(dev);
1119 break;
1120 default:
1121 return -EINVAL;
1122 }
1123 return 0;
1124}
1125
1126static void cnic_ulp_init(struct cnic_dev *dev)
1127{
1128 int i;
1129 struct cnic_local *cp = dev->cnic_priv;
1130
1131 rcu_read_lock();
1132 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1133 struct cnic_ulp_ops *ulp_ops;
1134
1135 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1136 if (!ulp_ops || !ulp_ops->cnic_init)
1137 continue;
1138
1139 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1140 ulp_ops->cnic_init(dev);
1141
1142 }
1143 rcu_read_unlock();
1144}
1145
1146static void cnic_ulp_exit(struct cnic_dev *dev)
1147{
1148 int i;
1149 struct cnic_local *cp = dev->cnic_priv;
1150
1151 rcu_read_lock();
1152 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1153 struct cnic_ulp_ops *ulp_ops;
1154
1155 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
1156 if (!ulp_ops || !ulp_ops->cnic_exit)
1157 continue;
1158
1159 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1160 ulp_ops->cnic_exit(dev);
1161
1162 }
1163 rcu_read_unlock();
1164}
1165
1166static int cnic_cm_offload_pg(struct cnic_sock *csk)
1167{
1168 struct cnic_dev *dev = csk->dev;
1169 struct l4_kwq_offload_pg *l4kwqe;
1170 struct kwqe *wqes[1];
1171
1172 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
1173 memset(l4kwqe, 0, sizeof(*l4kwqe));
1174 wqes[0] = (struct kwqe *) l4kwqe;
1175
1176 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
1177 l4kwqe->flags =
1178 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
1179 l4kwqe->l2hdr_nbytes = ETH_HLEN;
1180
1181 l4kwqe->da0 = csk->ha[0];
1182 l4kwqe->da1 = csk->ha[1];
1183 l4kwqe->da2 = csk->ha[2];
1184 l4kwqe->da3 = csk->ha[3];
1185 l4kwqe->da4 = csk->ha[4];
1186 l4kwqe->da5 = csk->ha[5];
1187
1188 l4kwqe->sa0 = dev->mac_addr[0];
1189 l4kwqe->sa1 = dev->mac_addr[1];
1190 l4kwqe->sa2 = dev->mac_addr[2];
1191 l4kwqe->sa3 = dev->mac_addr[3];
1192 l4kwqe->sa4 = dev->mac_addr[4];
1193 l4kwqe->sa5 = dev->mac_addr[5];
1194
1195 l4kwqe->etype = ETH_P_IP;
1196 l4kwqe->ipid_count = DEF_IPID_COUNT;
1197 l4kwqe->host_opaque = csk->l5_cid;
1198
1199 if (csk->vlan_id) {
1200 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
1201 l4kwqe->vlan_tag = csk->vlan_id;
1202 l4kwqe->l2hdr_nbytes += 4;
1203 }
1204
1205 return dev->submit_kwqes(dev, wqes, 1);
1206}
1207
1208static int cnic_cm_update_pg(struct cnic_sock *csk)
1209{
1210 struct cnic_dev *dev = csk->dev;
1211 struct l4_kwq_update_pg *l4kwqe;
1212 struct kwqe *wqes[1];
1213
1214 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
1215 memset(l4kwqe, 0, sizeof(*l4kwqe));
1216 wqes[0] = (struct kwqe *) l4kwqe;
1217
1218 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
1219 l4kwqe->flags =
1220 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
1221 l4kwqe->pg_cid = csk->pg_cid;
1222
1223 l4kwqe->da0 = csk->ha[0];
1224 l4kwqe->da1 = csk->ha[1];
1225 l4kwqe->da2 = csk->ha[2];
1226 l4kwqe->da3 = csk->ha[3];
1227 l4kwqe->da4 = csk->ha[4];
1228 l4kwqe->da5 = csk->ha[5];
1229
1230 l4kwqe->pg_host_opaque = csk->l5_cid;
1231 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
1232
1233 return dev->submit_kwqes(dev, wqes, 1);
1234}
1235
1236static int cnic_cm_upload_pg(struct cnic_sock *csk)
1237{
1238 struct cnic_dev *dev = csk->dev;
1239 struct l4_kwq_upload *l4kwqe;
1240 struct kwqe *wqes[1];
1241
1242 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
1243 memset(l4kwqe, 0, sizeof(*l4kwqe));
1244 wqes[0] = (struct kwqe *) l4kwqe;
1245
1246 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
1247 l4kwqe->flags =
1248 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
1249 l4kwqe->cid = csk->pg_cid;
1250
1251 return dev->submit_kwqes(dev, wqes, 1);
1252}
1253
1254static int cnic_cm_conn_req(struct cnic_sock *csk)
1255{
1256 struct cnic_dev *dev = csk->dev;
1257 struct l4_kwq_connect_req1 *l4kwqe1;
1258 struct l4_kwq_connect_req2 *l4kwqe2;
1259 struct l4_kwq_connect_req3 *l4kwqe3;
1260 struct kwqe *wqes[3];
1261 u8 tcp_flags = 0;
1262 int num_wqes = 2;
1263
1264 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
1265 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
1266 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
1267 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
1268 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
1269 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
1270
1271 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
1272 l4kwqe3->flags =
1273 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
1274 l4kwqe3->ka_timeout = csk->ka_timeout;
1275 l4kwqe3->ka_interval = csk->ka_interval;
1276 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
1277 l4kwqe3->tos = csk->tos;
1278 l4kwqe3->ttl = csk->ttl;
1279 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
1280 l4kwqe3->pmtu = csk->mtu;
1281 l4kwqe3->rcv_buf = csk->rcv_buf;
1282 l4kwqe3->snd_buf = csk->snd_buf;
1283 l4kwqe3->seed = csk->seed;
1284
1285 wqes[0] = (struct kwqe *) l4kwqe1;
1286 if (test_bit(SK_F_IPV6, &csk->flags)) {
1287 wqes[1] = (struct kwqe *) l4kwqe2;
1288 wqes[2] = (struct kwqe *) l4kwqe3;
1289 num_wqes = 3;
1290
1291 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
1292 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
1293 l4kwqe2->flags =
1294 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
1295 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
1296 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
1297 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
1298 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
1299 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
1300 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
1301 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
1302 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
1303 sizeof(struct tcphdr);
1304 } else {
1305 wqes[1] = (struct kwqe *) l4kwqe3;
1306 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
1307 sizeof(struct tcphdr);
1308 }
1309
1310 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
1311 l4kwqe1->flags =
1312 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
1313 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
1314 l4kwqe1->cid = csk->cid;
1315 l4kwqe1->pg_cid = csk->pg_cid;
1316 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
1317 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
1318 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
1319 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
1320 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
1321 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
1322 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
1323 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
1324 if (csk->tcp_flags & SK_TCP_NAGLE)
1325 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
1326 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
1327 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
1328 if (csk->tcp_flags & SK_TCP_SACK)
1329 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
1330 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
1331 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
1332
1333 l4kwqe1->tcp_flags = tcp_flags;
1334
1335 return dev->submit_kwqes(dev, wqes, num_wqes);
1336}
1337
1338static int cnic_cm_close_req(struct cnic_sock *csk)
1339{
1340 struct cnic_dev *dev = csk->dev;
1341 struct l4_kwq_close_req *l4kwqe;
1342 struct kwqe *wqes[1];
1343
1344 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
1345 memset(l4kwqe, 0, sizeof(*l4kwqe));
1346 wqes[0] = (struct kwqe *) l4kwqe;
1347
1348 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
1349 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
1350 l4kwqe->cid = csk->cid;
1351
1352 return dev->submit_kwqes(dev, wqes, 1);
1353}
1354
1355static int cnic_cm_abort_req(struct cnic_sock *csk)
1356{
1357 struct cnic_dev *dev = csk->dev;
1358 struct l4_kwq_reset_req *l4kwqe;
1359 struct kwqe *wqes[1];
1360
1361 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
1362 memset(l4kwqe, 0, sizeof(*l4kwqe));
1363 wqes[0] = (struct kwqe *) l4kwqe;
1364
1365 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
1366 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
1367 l4kwqe->cid = csk->cid;
1368
1369 return dev->submit_kwqes(dev, wqes, 1);
1370}
1371
1372static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
1373 u32 l5_cid, struct cnic_sock **csk, void *context)
1374{
1375 struct cnic_local *cp = dev->cnic_priv;
1376 struct cnic_sock *csk1;
1377
1378 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1379 return -EINVAL;
1380
1381 csk1 = &cp->csk_tbl[l5_cid];
1382 if (atomic_read(&csk1->ref_count))
1383 return -EAGAIN;
1384
1385 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
1386 return -EBUSY;
1387
1388 csk1->dev = dev;
1389 csk1->cid = cid;
1390 csk1->l5_cid = l5_cid;
1391 csk1->ulp_type = ulp_type;
1392 csk1->context = context;
1393
1394 csk1->ka_timeout = DEF_KA_TIMEOUT;
1395 csk1->ka_interval = DEF_KA_INTERVAL;
1396 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
1397 csk1->tos = DEF_TOS;
1398 csk1->ttl = DEF_TTL;
1399 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
1400 csk1->rcv_buf = DEF_RCV_BUF;
1401 csk1->snd_buf = DEF_SND_BUF;
1402 csk1->seed = DEF_SEED;
1403
1404 *csk = csk1;
1405 return 0;
1406}
1407
1408static void cnic_cm_cleanup(struct cnic_sock *csk)
1409{
1410 if (csk->src_port) {
1411 struct cnic_dev *dev = csk->dev;
1412 struct cnic_local *cp = dev->cnic_priv;
1413
1414 cnic_free_id(&cp->csk_port_tbl, csk->src_port);
1415 csk->src_port = 0;
1416 }
1417}
1418
1419static void cnic_close_conn(struct cnic_sock *csk)
1420{
1421 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
1422 cnic_cm_upload_pg(csk);
1423 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1424 }
1425 cnic_cm_cleanup(csk);
1426}
1427
1428static int cnic_cm_destroy(struct cnic_sock *csk)
1429{
1430 if (!cnic_in_use(csk))
1431 return -EINVAL;
1432
1433 csk_hold(csk);
1434 clear_bit(SK_F_INUSE, &csk->flags);
1435 smp_mb__after_clear_bit();
1436 while (atomic_read(&csk->ref_count) != 1)
1437 msleep(1);
1438 cnic_cm_cleanup(csk);
1439
1440 csk->flags = 0;
1441 csk_put(csk);
1442 return 0;
1443}
1444
1445static inline u16 cnic_get_vlan(struct net_device *dev,
1446 struct net_device **vlan_dev)
1447{
1448 if (dev->priv_flags & IFF_802_1Q_VLAN) {
1449 *vlan_dev = vlan_dev_real_dev(dev);
1450 return vlan_dev_vlan_id(dev);
1451 }
1452 *vlan_dev = dev;
1453 return 0;
1454}
1455
1456static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
1457 struct dst_entry **dst)
1458{
1459#if defined(CONFIG_INET)
1460 struct flowi fl;
1461 int err;
1462 struct rtable *rt;
1463
1464 memset(&fl, 0, sizeof(fl));
1465 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
1466
1467 err = ip_route_output_key(&init_net, &rt, &fl);
1468 if (!err)
1469 *dst = &rt->u.dst;
1470 return err;
1471#else
1472 return -ENETUNREACH;
1473#endif
1474}
1475
1476static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
1477 struct dst_entry **dst)
1478{
1479#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
1480 struct flowi fl;
1481
1482 memset(&fl, 0, sizeof(fl));
1483 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
1484 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
1485 fl.oif = dst_addr->sin6_scope_id;
1486
1487 *dst = ip6_route_output(&init_net, NULL, &fl);
1488 if (*dst)
1489 return 0;
1490#endif
1491
1492 return -ENETUNREACH;
1493}
1494
1495static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
1496 int ulp_type)
1497{
1498 struct cnic_dev *dev = NULL;
1499 struct dst_entry *dst;
1500 struct net_device *netdev = NULL;
1501 int err = -ENETUNREACH;
1502
1503 if (dst_addr->sin_family == AF_INET)
1504 err = cnic_get_v4_route(dst_addr, &dst);
1505 else if (dst_addr->sin_family == AF_INET6) {
1506 struct sockaddr_in6 *dst_addr6 =
1507 (struct sockaddr_in6 *) dst_addr;
1508
1509 err = cnic_get_v6_route(dst_addr6, &dst);
1510 } else
1511 return NULL;
1512
1513 if (err)
1514 return NULL;
1515
1516 if (!dst->dev)
1517 goto done;
1518
1519 cnic_get_vlan(dst->dev, &netdev);
1520
1521 dev = cnic_from_netdev(netdev);
1522
1523done:
1524 dst_release(dst);
1525 if (dev)
1526 cnic_put(dev);
1527 return dev;
1528}
1529
1530static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1531{
1532 struct cnic_dev *dev = csk->dev;
1533 struct cnic_local *cp = dev->cnic_priv;
1534
1535 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
1536}
1537
1538static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1539{
1540 struct cnic_dev *dev = csk->dev;
1541 struct cnic_local *cp = dev->cnic_priv;
1542 int is_v6, err, rc = -ENETUNREACH;
1543 struct dst_entry *dst;
1544 struct net_device *realdev;
1545 u32 local_port;
1546
1547 if (saddr->local.v6.sin6_family == AF_INET6 &&
1548 saddr->remote.v6.sin6_family == AF_INET6)
1549 is_v6 = 1;
1550 else if (saddr->local.v4.sin_family == AF_INET &&
1551 saddr->remote.v4.sin_family == AF_INET)
1552 is_v6 = 0;
1553 else
1554 return -EINVAL;
1555
1556 clear_bit(SK_F_IPV6, &csk->flags);
1557
1558 if (is_v6) {
1559#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
1560 set_bit(SK_F_IPV6, &csk->flags);
1561 err = cnic_get_v6_route(&saddr->remote.v6, &dst);
1562 if (err)
1563 return err;
1564
1565 if (!dst || dst->error || !dst->dev)
1566 goto err_out;
1567
1568 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
1569 sizeof(struct in6_addr));
1570 csk->dst_port = saddr->remote.v6.sin6_port;
1571 local_port = saddr->local.v6.sin6_port;
1572#else
1573 return rc;
1574#endif
1575
1576 } else {
1577 err = cnic_get_v4_route(&saddr->remote.v4, &dst);
1578 if (err)
1579 return err;
1580
1581 if (!dst || dst->error || !dst->dev)
1582 goto err_out;
1583
1584 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
1585 csk->dst_port = saddr->remote.v4.sin_port;
1586 local_port = saddr->local.v4.sin_port;
1587 }
1588
1589 csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
1590 if (realdev != dev->netdev)
1591 goto err_out;
1592
1593 if (local_port >= CNIC_LOCAL_PORT_MIN &&
1594 local_port < CNIC_LOCAL_PORT_MAX) {
1595 if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
1596 local_port = 0;
1597 } else
1598 local_port = 0;
1599
1600 if (!local_port) {
1601 local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
1602 if (local_port == -1) {
1603 rc = -ENOMEM;
1604 goto err_out;
1605 }
1606 }
1607 csk->src_port = local_port;
1608
1609 csk->mtu = dst_mtu(dst);
1610 rc = 0;
1611
1612err_out:
1613 dst_release(dst);
1614 return rc;
1615}
1616
1617static void cnic_init_csk_state(struct cnic_sock *csk)
1618{
1619 csk->state = 0;
1620 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1621 clear_bit(SK_F_CLOSING, &csk->flags);
1622}
1623
1624static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
1625{
1626 int err = 0;
1627
1628 if (!cnic_in_use(csk))
1629 return -EINVAL;
1630
1631 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
1632 return -EINVAL;
1633
1634 cnic_init_csk_state(csk);
1635
1636 err = cnic_get_route(csk, saddr);
1637 if (err)
1638 goto err_out;
1639
1640 err = cnic_resolve_addr(csk, saddr);
1641 if (!err)
1642 return 0;
1643
1644err_out:
1645 clear_bit(SK_F_CONNECT_START, &csk->flags);
1646 return err;
1647}
1648
1649static int cnic_cm_abort(struct cnic_sock *csk)
1650{
1651 struct cnic_local *cp = csk->dev->cnic_priv;
1652 u32 opcode;
1653
1654 if (!cnic_in_use(csk))
1655 return -EINVAL;
1656
1657 if (cnic_abort_prep(csk))
1658 return cnic_cm_abort_req(csk);
1659
1660 /* Getting here means that we haven't started connect, or
1661 * connect was not successful.
1662 */
1663
1664 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
1665 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1666 opcode = csk->state;
1667 else
1668 opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
1669 cp->close_conn(csk, opcode);
1670
1671 return 0;
1672}
1673
1674static int cnic_cm_close(struct cnic_sock *csk)
1675{
1676 if (!cnic_in_use(csk))
1677 return -EINVAL;
1678
1679 if (cnic_close_prep(csk)) {
1680 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
1681 return cnic_cm_close_req(csk);
1682 }
1683 return 0;
1684}
1685
1686static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
1687 u8 opcode)
1688{
1689 struct cnic_ulp_ops *ulp_ops;
1690 int ulp_type = csk->ulp_type;
1691
1692 rcu_read_lock();
1693 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1694 if (ulp_ops) {
1695 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
1696 ulp_ops->cm_connect_complete(csk);
1697 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
1698 ulp_ops->cm_close_complete(csk);
1699 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
1700 ulp_ops->cm_remote_abort(csk);
1701 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
1702 ulp_ops->cm_abort_complete(csk);
1703 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
1704 ulp_ops->cm_remote_close(csk);
1705 }
1706 rcu_read_unlock();
1707}
1708
1709static int cnic_cm_set_pg(struct cnic_sock *csk)
1710{
1711 if (cnic_offld_prep(csk)) {
1712 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
1713 cnic_cm_update_pg(csk);
1714 else
1715 cnic_cm_offload_pg(csk);
1716 }
1717 return 0;
1718}
1719
1720static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
1721{
1722 struct cnic_local *cp = dev->cnic_priv;
1723 u32 l5_cid = kcqe->pg_host_opaque;
1724 u8 opcode = kcqe->op_code;
1725 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1726
1727 csk_hold(csk);
1728 if (!cnic_in_use(csk))
1729 goto done;
1730
1731 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1732 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1733 goto done;
1734 }
1735 csk->pg_cid = kcqe->pg_cid;
1736 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
1737 cnic_cm_conn_req(csk);
1738
1739done:
1740 csk_put(csk);
1741}
1742
1743static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
1744{
1745 struct cnic_local *cp = dev->cnic_priv;
1746 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
1747 u8 opcode = l4kcqe->op_code;
1748 u32 l5_cid;
1749 struct cnic_sock *csk;
1750
1751 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
1752 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
1753 cnic_cm_process_offld_pg(dev, l4kcqe);
1754 return;
1755 }
1756
1757 l5_cid = l4kcqe->conn_id;
1758 if (opcode & 0x80)
1759 l5_cid = l4kcqe->cid;
1760 if (l5_cid >= MAX_CM_SK_TBL_SZ)
1761 return;
1762
1763 csk = &cp->csk_tbl[l5_cid];
1764 csk_hold(csk);
1765
1766 if (!cnic_in_use(csk)) {
1767 csk_put(csk);
1768 return;
1769 }
1770
1771 switch (opcode) {
1772 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
1773 if (l4kcqe->status == 0)
1774 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
1775
1776 smp_mb__before_clear_bit();
1777 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
1778 cnic_cm_upcall(cp, csk, opcode);
1779 break;
1780
1781 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
1782 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
1783 csk->state = opcode;
1784 /* fall through */
1785 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
1786 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
1787 cp->close_conn(csk, opcode);
1788 break;
1789
1790 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
1791 cnic_cm_upcall(cp, csk, opcode);
1792 break;
1793 }
1794 csk_put(csk);
1795}
1796
1797static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
1798{
1799 struct cnic_dev *dev = data;
1800 int i;
1801
1802 for (i = 0; i < num; i++)
1803 cnic_cm_process_kcqe(dev, kcqe[i]);
1804}
1805
1806static struct cnic_ulp_ops cm_ulp_ops = {
1807 .indicate_kcqes = cnic_cm_indicate_kcqe,
1808};
1809
1810static void cnic_cm_free_mem(struct cnic_dev *dev)
1811{
1812 struct cnic_local *cp = dev->cnic_priv;
1813
1814 kfree(cp->csk_tbl);
1815 cp->csk_tbl = NULL;
1816 cnic_free_id_tbl(&cp->csk_port_tbl);
1817}
1818
1819static int cnic_cm_alloc_mem(struct cnic_dev *dev)
1820{
1821 struct cnic_local *cp = dev->cnic_priv;
1822
1823 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
1824 GFP_KERNEL);
1825 if (!cp->csk_tbl)
1826 return -ENOMEM;
1827
1828 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
1829 CNIC_LOCAL_PORT_MIN)) {
1830 cnic_cm_free_mem(dev);
1831 return -ENOMEM;
1832 }
1833 return 0;
1834}
1835
1836static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
1837{
1838 if ((opcode == csk->state) ||
1839 (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
1840 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
1841 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
1842 return 1;
1843 }
1844 return 0;
1845}
1846
1847static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
1848{
1849 struct cnic_dev *dev = csk->dev;
1850 struct cnic_local *cp = dev->cnic_priv;
1851
1852 clear_bit(SK_F_CONNECT_START, &csk->flags);
1853 if (cnic_ready_to_close(csk, opcode)) {
1854 cnic_close_conn(csk);
1855 cnic_cm_upcall(cp, csk, opcode);
1856 }
1857}
1858
1859static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
1860{
1861}
1862
1863static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
1864{
1865 u32 seed;
1866
1867 get_random_bytes(&seed, 4);
1868 cnic_ctx_wr(dev, 45, 0, seed);
1869 return 0;
1870}
1871
1872static int cnic_cm_open(struct cnic_dev *dev)
1873{
1874 struct cnic_local *cp = dev->cnic_priv;
1875 int err;
1876
1877 err = cnic_cm_alloc_mem(dev);
1878 if (err)
1879 return err;
1880
1881 err = cp->start_cm(dev);
1882
1883 if (err)
1884 goto err_out;
1885
1886 dev->cm_create = cnic_cm_create;
1887 dev->cm_destroy = cnic_cm_destroy;
1888 dev->cm_connect = cnic_cm_connect;
1889 dev->cm_abort = cnic_cm_abort;
1890 dev->cm_close = cnic_cm_close;
1891 dev->cm_select_dev = cnic_cm_select_dev;
1892
1893 cp->ulp_handle[CNIC_ULP_L4] = dev;
1894 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
1895 return 0;
1896
1897err_out:
1898 cnic_cm_free_mem(dev);
1899 return err;
1900}
1901
1902static int cnic_cm_shutdown(struct cnic_dev *dev)
1903{
1904 struct cnic_local *cp = dev->cnic_priv;
1905 int i;
1906
1907 cp->stop_cm(dev);
1908
1909 if (!cp->csk_tbl)
1910 return 0;
1911
1912 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
1913 struct cnic_sock *csk = &cp->csk_tbl[i];
1914
1915 clear_bit(SK_F_INUSE, &csk->flags);
1916 cnic_cm_cleanup(csk);
1917 }
1918 cnic_cm_free_mem(dev);
1919
1920 return 0;
1921}
1922
1923static void cnic_init_context(struct cnic_dev *dev, u32 cid)
1924{
1925 struct cnic_local *cp = dev->cnic_priv;
1926 u32 cid_addr;
1927 int i;
1928
1929 if (CHIP_NUM(cp) == CHIP_NUM_5709)
1930 return;
1931
1932 cid_addr = GET_CID_ADDR(cid);
1933
1934 for (i = 0; i < CTX_SIZE; i += 4)
1935 cnic_ctx_wr(dev, cid_addr, i, 0);
1936}
1937
1938static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
1939{
1940 struct cnic_local *cp = dev->cnic_priv;
1941 int ret = 0, i;
1942 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
1943
1944 if (CHIP_NUM(cp) != CHIP_NUM_5709)
1945 return 0;
1946
1947 for (i = 0; i < cp->ctx_blks; i++) {
1948 int j;
1949 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
1950 u32 val;
1951
1952 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
1953
1954 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1955 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
1956 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1957 (u64) cp->ctx_arr[i].mapping >> 32);
1958 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
1959 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1960 for (j = 0; j < 10; j++) {
1961
1962 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1963 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1964 break;
1965 udelay(5);
1966 }
1967 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1968 ret = -EBUSY;
1969 break;
1970 }
1971 }
1972 return ret;
1973}
1974
1975static void cnic_free_irq(struct cnic_dev *dev)
1976{
1977 struct cnic_local *cp = dev->cnic_priv;
1978 struct cnic_eth_dev *ethdev = cp->ethdev;
1979
1980 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1981 cp->disable_int_sync(dev);
1982 tasklet_disable(&cp->cnic_irq_task);
1983 free_irq(ethdev->irq_arr[0].vector, dev);
1984 }
1985}
1986
1987static int cnic_init_bnx2_irq(struct cnic_dev *dev)
1988{
1989 struct cnic_local *cp = dev->cnic_priv;
1990 struct cnic_eth_dev *ethdev = cp->ethdev;
1991
1992 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
1993 int err, i = 0;
1994 int sblk_num = cp->status_blk_num;
1995 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
1996 BNX2_HC_SB_CONFIG_1;
1997
1998 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
1999
2000 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
2001 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
2002 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
2003
2004 cp->bnx2_status_blk = cp->status_blk;
2005 cp->last_status_idx = cp->bnx2_status_blk->status_idx;
2006 tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
2007 (unsigned long) dev);
2008 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
2009 "cnic", dev);
2010 if (err) {
2011 tasklet_disable(&cp->cnic_irq_task);
2012 return err;
2013 }
2014 while (cp->bnx2_status_blk->status_completion_producer_index &&
2015 i < 10) {
2016 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
2017 1 << (11 + sblk_num));
2018 udelay(10);
2019 i++;
2020 barrier();
2021 }
2022 if (cp->bnx2_status_blk->status_completion_producer_index) {
2023 cnic_free_irq(dev);
2024 goto failed;
2025 }
2026
2027 } else {
2028 struct status_block *sblk = cp->status_blk;
2029 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
2030 int i = 0;
2031
2032 while (sblk->status_completion_producer_index && i < 10) {
2033 CNIC_WR(dev, BNX2_HC_COMMAND,
2034 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2035 udelay(10);
2036 i++;
2037 barrier();
2038 }
2039 if (sblk->status_completion_producer_index)
2040 goto failed;
2041
2042 }
2043 return 0;
2044
2045failed:
2046 printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
2047 dev->netdev->name);
2048 return -EBUSY;
2049}
2050
2051static void cnic_enable_bnx2_int(struct cnic_dev *dev)
2052{
2053 struct cnic_local *cp = dev->cnic_priv;
2054 struct cnic_eth_dev *ethdev = cp->ethdev;
2055
2056 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2057 return;
2058
2059 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2060 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2061}
2062
2063static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
2064{
2065 struct cnic_local *cp = dev->cnic_priv;
2066 struct cnic_eth_dev *ethdev = cp->ethdev;
2067
2068 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2069 return;
2070
2071 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2072 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2073 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
2074 synchronize_irq(ethdev->irq_arr[0].vector);
2075}
2076
2077static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
2078{
2079 struct cnic_local *cp = dev->cnic_priv;
2080 struct cnic_eth_dev *ethdev = cp->ethdev;
2081 u32 cid_addr, tx_cid, sb_id;
2082 u32 val, offset0, offset1, offset2, offset3;
2083 int i;
2084 struct tx_bd *txbd;
2085 dma_addr_t buf_map;
2086 struct status_block *s_blk = cp->status_blk;
2087
2088 sb_id = cp->status_blk_num;
2089 tx_cid = 20;
2090 cnic_init_context(dev, tx_cid);
2091 cnic_init_context(dev, tx_cid + 1);
2092 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
2093 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2094 struct status_block_msix *sblk = cp->status_blk;
2095
2096 tx_cid = TX_TSS_CID + sb_id - 1;
2097 cnic_init_context(dev, tx_cid);
2098 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
2099 (TX_TSS_CID << 7));
2100 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
2101 }
2102 cp->tx_cons = *cp->tx_cons_ptr;
2103
2104 cid_addr = GET_CID_ADDR(tx_cid);
2105 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
2106 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
2107
2108 for (i = 0; i < PHY_CTX_SIZE; i += 4)
2109 cnic_ctx_wr(dev, cid_addr2, i, 0);
2110
2111 offset0 = BNX2_L2CTX_TYPE_XI;
2112 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
2113 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
2114 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
2115 } else {
2116 offset0 = BNX2_L2CTX_TYPE;
2117 offset1 = BNX2_L2CTX_CMD_TYPE;
2118 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
2119 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
2120 }
2121 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
2122 cnic_ctx_wr(dev, cid_addr, offset0, val);
2123
2124 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
2125 cnic_ctx_wr(dev, cid_addr, offset1, val);
2126
2127 txbd = (struct tx_bd *) cp->l2_ring;
2128
2129 buf_map = cp->l2_buf_map;
2130 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
2131 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
2132 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2133 }
2134 val = (u64) cp->l2_ring_map >> 32;
2135 cnic_ctx_wr(dev, cid_addr, offset2, val);
2136 txbd->tx_bd_haddr_hi = val;
2137
2138 val = (u64) cp->l2_ring_map & 0xffffffff;
2139 cnic_ctx_wr(dev, cid_addr, offset3, val);
2140 txbd->tx_bd_haddr_lo = val;
2141}
2142
2143static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
2144{
2145 struct cnic_local *cp = dev->cnic_priv;
2146 struct cnic_eth_dev *ethdev = cp->ethdev;
2147 u32 cid_addr, sb_id, val, coal_reg, coal_val;
2148 int i;
2149 struct rx_bd *rxbd;
2150 struct status_block *s_blk = cp->status_blk;
2151
2152 sb_id = cp->status_blk_num;
2153 cnic_init_context(dev, 2);
2154 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
2155 coal_reg = BNX2_HC_COMMAND;
2156 coal_val = CNIC_RD(dev, coal_reg);
2157 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2158 struct status_block_msix *sblk = cp->status_blk;
2159
2160 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
2161 coal_reg = BNX2_HC_COALESCE_NOW;
2162 coal_val = 1 << (11 + sb_id);
2163 }
2164 i = 0;
2165 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
2166 CNIC_WR(dev, coal_reg, coal_val);
2167 udelay(10);
2168 i++;
2169 barrier();
2170 }
2171 cp->rx_cons = *cp->rx_cons_ptr;
2172
2173 cid_addr = GET_CID_ADDR(2);
2174 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
2175 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
2176 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
2177
2178 if (sb_id == 0)
2179 val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
2180 else
2181 val = BNX2_L2CTX_STATUSB_NUM(sb_id);
2182 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
2183
2184 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
2185 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
2186 dma_addr_t buf_map;
2187 int n = (i % cp->l2_rx_ring_size) + 1;
2188
2189 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
2190 rxbd->rx_bd_len = cp->l2_single_buf_size;
2191 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
2192 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
2193 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
2194 }
2195 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
2196 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
2197 rxbd->rx_bd_haddr_hi = val;
2198
2199 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
2200 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
2201 rxbd->rx_bd_haddr_lo = val;
2202
2203 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
2204 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
2205}
2206
2207static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
2208{
2209 struct kwqe *wqes[1], l2kwqe;
2210
2211 memset(&l2kwqe, 0, sizeof(l2kwqe));
2212 wqes[0] = &l2kwqe;
2213 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
2214 (L2_KWQE_OPCODE_VALUE_FLUSH <<
2215 KWQE_OPCODE_SHIFT) | 2;
2216 dev->submit_kwqes(dev, wqes, 1);
2217}
2218
2219static void cnic_set_bnx2_mac(struct cnic_dev *dev)
2220{
2221 struct cnic_local *cp = dev->cnic_priv;
2222 u32 val;
2223
2224 val = cp->func << 2;
2225
2226 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
2227
2228 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2229 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
2230 dev->mac_addr[0] = (u8) (val >> 8);
2231 dev->mac_addr[1] = (u8) val;
2232
2233 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
2234
2235 val = cnic_reg_rd_ind(dev, cp->shmem_base +
2236 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
2237 dev->mac_addr[2] = (u8) (val >> 24);
2238 dev->mac_addr[3] = (u8) (val >> 16);
2239 dev->mac_addr[4] = (u8) (val >> 8);
2240 dev->mac_addr[5] = (u8) val;
2241
2242 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
2243
2244 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
2245 if (CHIP_NUM(cp) != CHIP_NUM_5709)
2246 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
2247
2248 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
2249 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
2250 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
2251}
2252
2253static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2254{
2255 struct cnic_local *cp = dev->cnic_priv;
2256 struct cnic_eth_dev *ethdev = cp->ethdev;
2257 struct status_block *sblk = cp->status_blk;
2258 u32 val;
2259 int err;
2260
2261 cnic_set_bnx2_mac(dev);
2262
2263 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
2264 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
2265 if (BCM_PAGE_BITS > 12)
2266 val |= (12 - 8) << 4;
2267 else
2268 val |= (BCM_PAGE_BITS - 8) << 4;
2269
2270 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
2271
2272 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
2273 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
2274 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
2275
2276 err = cnic_setup_5709_context(dev, 1);
2277 if (err)
2278 return err;
2279
2280 cnic_init_context(dev, KWQ_CID);
2281 cnic_init_context(dev, KCQ_CID);
2282
2283 cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
2284 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
2285
2286 cp->max_kwq_idx = MAX_KWQ_IDX;
2287 cp->kwq_prod_idx = 0;
2288 cp->kwq_con_idx = 0;
2289 cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
2290
2291 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
2292 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
2293 else
2294 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
2295
2296 /* Initialize the kernel work queue context. */
2297 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2298 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2299 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
2300
2301 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
2302 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2303
2304 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
2305 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2306
2307 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
2308 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2309
2310 val = (u32) cp->kwq_info.pgtbl_map;
2311 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2312
2313 cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
2314 cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
2315
2316 cp->kcq_prod_idx = 0;
2317
2318 /* Initialize the kernel complete queue context. */
2319 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
2320 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
2321 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
2322
2323 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
2324 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
2325
2326 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
2327 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
2328
2329 val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
2330 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
2331
2332 val = (u32) cp->kcq_info.pgtbl_map;
2333 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
2334
2335 cp->int_num = 0;
2336 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2337 u32 sb_id = cp->status_blk_num;
2338 u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
2339
2340 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
2341 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2342 cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
2343 }
2344
2345 /* Enable Commnad Scheduler notification when we write to the
2346 * host producer index of the kernel contexts. */
2347 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
2348
2349 /* Enable Command Scheduler notification when we write to either
2350 * the Send Queue or Receive Queue producer indexes of the kernel
2351 * bypass contexts. */
2352 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
2353 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
2354
2355 /* Notify COM when the driver post an application buffer. */
2356 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
2357
2358 /* Set the CP and COM doorbells. These two processors polls the
2359 * doorbell for a non zero value before running. This must be done
2360 * after setting up the kernel queue contexts. */
2361 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
2362 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
2363
2364 cnic_init_bnx2_tx_ring(dev);
2365 cnic_init_bnx2_rx_ring(dev);
2366
2367 err = cnic_init_bnx2_irq(dev);
2368 if (err) {
2369 printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
2370 dev->netdev->name);
2371 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2372 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2373 return err;
2374 }
2375
2376 return 0;
2377}
2378
2379static int cnic_start_hw(struct cnic_dev *dev)
2380{
2381 struct cnic_local *cp = dev->cnic_priv;
2382 struct cnic_eth_dev *ethdev = cp->ethdev;
2383 int err;
2384
2385 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
2386 return -EALREADY;
2387
2388 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
2389 if (err) {
2390 printk(KERN_ERR PFX "%s: register_cnic failed\n",
2391 dev->netdev->name);
2392 goto err2;
2393 }
2394
2395 dev->regview = ethdev->io_base;
2396 cp->chip_id = ethdev->chip_id;
2397 pci_dev_get(dev->pcidev);
2398 cp->func = PCI_FUNC(dev->pcidev->devfn);
2399 cp->status_blk = ethdev->irq_arr[0].status_blk;
2400 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
2401
2402 err = cp->alloc_resc(dev);
2403 if (err) {
2404 printk(KERN_ERR PFX "%s: allocate resource failure\n",
2405 dev->netdev->name);
2406 goto err1;
2407 }
2408
2409 err = cp->start_hw(dev);
2410 if (err)
2411 goto err1;
2412
2413 err = cnic_cm_open(dev);
2414 if (err)
2415 goto err1;
2416
2417 set_bit(CNIC_F_CNIC_UP, &dev->flags);
2418
2419 cp->enable_int(dev);
2420
2421 return 0;
2422
2423err1:
2424 ethdev->drv_unregister_cnic(dev->netdev);
2425 cp->free_resc(dev);
2426 pci_dev_put(dev->pcidev);
2427err2:
2428 return err;
2429}
2430
2431static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2432{
2433 struct cnic_local *cp = dev->cnic_priv;
2434 struct cnic_eth_dev *ethdev = cp->ethdev;
2435
2436 cnic_disable_bnx2_int_sync(dev);
2437
2438 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
2439 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
2440
2441 cnic_init_context(dev, KWQ_CID);
2442 cnic_init_context(dev, KCQ_CID);
2443
2444 cnic_setup_5709_context(dev, 0);
2445 cnic_free_irq(dev);
2446
2447 ethdev->drv_unregister_cnic(dev->netdev);
2448
2449 cnic_free_resc(dev);
2450}
2451
2452static void cnic_stop_hw(struct cnic_dev *dev)
2453{
2454 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2455 struct cnic_local *cp = dev->cnic_priv;
2456
2457 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
2458 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
2459 synchronize_rcu();
2460 cnic_cm_shutdown(dev);
2461 cp->stop_hw(dev);
2462 pci_dev_put(dev->pcidev);
2463 }
2464}
2465
2466static void cnic_free_dev(struct cnic_dev *dev)
2467{
2468 int i = 0;
2469
2470 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
2471 msleep(100);
2472 i++;
2473 }
2474 if (atomic_read(&dev->ref_count) != 0)
2475 printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
2476 " to zero.\n", dev->netdev->name);
2477
2478 printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
2479 dev_put(dev->netdev);
2480 kfree(dev);
2481}
2482
2483static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
2484 struct pci_dev *pdev)
2485{
2486 struct cnic_dev *cdev;
2487 struct cnic_local *cp;
2488 int alloc_size;
2489
2490 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
2491
2492 cdev = kzalloc(alloc_size , GFP_KERNEL);
2493 if (cdev == NULL) {
2494 printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
2495 dev->name);
2496 return NULL;
2497 }
2498
2499 cdev->netdev = dev;
2500 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
2501 cdev->register_device = cnic_register_device;
2502 cdev->unregister_device = cnic_unregister_device;
2503 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
2504
2505 cp = cdev->cnic_priv;
2506 cp->dev = cdev;
2507 cp->uio_dev = -1;
2508 cp->l2_single_buf_size = 0x400;
2509 cp->l2_rx_ring_size = 3;
2510
2511 spin_lock_init(&cp->cnic_ulp_lock);
2512
2513 printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
2514
2515 return cdev;
2516}
2517
2518static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
2519{
2520 struct pci_dev *pdev;
2521 struct cnic_dev *cdev;
2522 struct cnic_local *cp;
2523 struct cnic_eth_dev *ethdev = NULL;
2524 struct cnic_eth_dev *(*probe)(void *) = NULL;
2525
2526 probe = __symbol_get("bnx2_cnic_probe");
2527 if (probe) {
2528 ethdev = (*probe)(dev);
2529 symbol_put_addr(probe);
2530 }
2531 if (!ethdev)
2532 return NULL;
2533
2534 pdev = ethdev->pdev;
2535 if (!pdev)
2536 return NULL;
2537
2538 dev_hold(dev);
2539 pci_dev_get(pdev);
2540 if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
2541 pdev->device == PCI_DEVICE_ID_NX2_5709S) {
2542 u8 rev;
2543
2544 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
2545 if (rev < 0x10) {
2546 pci_dev_put(pdev);
2547 goto cnic_err;
2548 }
2549 }
2550 pci_dev_put(pdev);
2551
2552 cdev = cnic_alloc_dev(dev, pdev);
2553 if (cdev == NULL)
2554 goto cnic_err;
2555
2556 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
2557 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
2558
2559 cp = cdev->cnic_priv;
2560 cp->ethdev = ethdev;
2561 cdev->pcidev = pdev;
2562
2563 cp->cnic_ops = &cnic_bnx2_ops;
2564 cp->start_hw = cnic_start_bnx2_hw;
2565 cp->stop_hw = cnic_stop_bnx2_hw;
2566 cp->setup_pgtbl = cnic_setup_page_tbl;
2567 cp->alloc_resc = cnic_alloc_bnx2_resc;
2568 cp->free_resc = cnic_free_resc;
2569 cp->start_cm = cnic_cm_init_bnx2_hw;
2570 cp->stop_cm = cnic_cm_stop_bnx2_hw;
2571 cp->enable_int = cnic_enable_bnx2_int;
2572 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
2573 cp->close_conn = cnic_close_bnx2_conn;
2574 cp->next_idx = cnic_bnx2_next_idx;
2575 cp->hw_idx = cnic_bnx2_hw_idx;
2576 return cdev;
2577
2578cnic_err:
2579 dev_put(dev);
2580 return NULL;
2581}
2582
2583static struct cnic_dev *is_cnic_dev(struct net_device *dev)
2584{
2585 struct ethtool_drvinfo drvinfo;
2586 struct cnic_dev *cdev = NULL;
2587
2588 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
2589 memset(&drvinfo, 0, sizeof(drvinfo));
2590 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
2591
2592 if (!strcmp(drvinfo.driver, "bnx2"))
2593 cdev = init_bnx2_cnic(dev);
2594 if (cdev) {
2595 write_lock(&cnic_dev_lock);
2596 list_add(&cdev->list, &cnic_dev_list);
2597 write_unlock(&cnic_dev_lock);
2598 }
2599 }
2600 return cdev;
2601}
2602
2603/**
2604 * netdev event handler
2605 */
2606static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2607 void *ptr)
2608{
2609 struct net_device *netdev = ptr;
2610 struct cnic_dev *dev;
2611 int if_type;
2612 int new_dev = 0;
2613
2614 dev = cnic_from_netdev(netdev);
2615
2616 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
2617 /* Check for the hot-plug device */
2618 dev = is_cnic_dev(netdev);
2619 if (dev) {
2620 new_dev = 1;
2621 cnic_hold(dev);
2622 }
2623 }
2624 if (dev) {
2625 struct cnic_local *cp = dev->cnic_priv;
2626
2627 if (new_dev)
2628 cnic_ulp_init(dev);
2629 else if (event == NETDEV_UNREGISTER)
2630 cnic_ulp_exit(dev);
2631 else if (event == NETDEV_UP) {
2632 mutex_lock(&cnic_lock);
2633 if (!cnic_start_hw(dev))
2634 cnic_ulp_start(dev);
2635 mutex_unlock(&cnic_lock);
2636 }
2637
2638 rcu_read_lock();
2639 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2640 struct cnic_ulp_ops *ulp_ops;
2641 void *ctx;
2642
2643 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
2644 if (!ulp_ops || !ulp_ops->indicate_netevent)
2645 continue;
2646
2647 ctx = cp->ulp_handle[if_type];
2648
2649 ulp_ops->indicate_netevent(ctx, event);
2650 }
2651 rcu_read_unlock();
2652
2653 if (event == NETDEV_GOING_DOWN) {
2654 mutex_lock(&cnic_lock);
2655 cnic_ulp_stop(dev);
2656 cnic_stop_hw(dev);
2657 mutex_unlock(&cnic_lock);
2658 } else if (event == NETDEV_UNREGISTER) {
2659 write_lock(&cnic_dev_lock);
2660 list_del_init(&dev->list);
2661 write_unlock(&cnic_dev_lock);
2662
2663 cnic_put(dev);
2664 cnic_free_dev(dev);
2665 goto done;
2666 }
2667 cnic_put(dev);
2668 }
2669done:
2670 return NOTIFY_DONE;
2671}
2672
2673static struct notifier_block cnic_netdev_notifier = {
2674 .notifier_call = cnic_netdev_event
2675};
2676
2677static void cnic_release(void)
2678{
2679 struct cnic_dev *dev;
2680
2681 while (!list_empty(&cnic_dev_list)) {
2682 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
2683 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
2684 cnic_ulp_stop(dev);
2685 cnic_stop_hw(dev);
2686 }
2687
2688 cnic_ulp_exit(dev);
2689 list_del_init(&dev->list);
2690 cnic_free_dev(dev);
2691 }
2692}
2693
2694static int __init cnic_init(void)
2695{
2696 int rc = 0;
2697
2698 printk(KERN_INFO "%s", version);
2699
2700 rc = register_netdevice_notifier(&cnic_netdev_notifier);
2701 if (rc) {
2702 cnic_release();
2703 return rc;
2704 }
2705
2706 return 0;
2707}
2708
2709static void __exit cnic_exit(void)
2710{
2711 unregister_netdevice_notifier(&cnic_netdev_notifier);
2712 cnic_release();
2713 return;
2714}
2715
2716module_init(cnic_init);
2717module_exit(cnic_exit);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
new file mode 100644
index 00000000000..5192d4a9df5
--- /dev/null
+++ b/drivers/net/cnic.h
@@ -0,0 +1,299 @@
1/* cnic.h: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006-2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_H
13#define CNIC_H
14
15#define KWQ_PAGE_CNT 4
16#define KCQ_PAGE_CNT 16
17
18#define KWQ_CID 24
19#define KCQ_CID 25
20
21/*
22 * krnlq_context definition
23 */
24#define L5_KRNLQ_FLAGS 0x00000000
25#define L5_KRNLQ_SIZE 0x00000000
26#define L5_KRNLQ_TYPE 0x00000000
27#define KRNLQ_FLAGS_PG_SZ (0xf<<0)
28#define KRNLQ_FLAGS_PG_SZ_256 (0<<0)
29#define KRNLQ_FLAGS_PG_SZ_512 (1<<0)
30#define KRNLQ_FLAGS_PG_SZ_1K (2<<0)
31#define KRNLQ_FLAGS_PG_SZ_2K (3<<0)
32#define KRNLQ_FLAGS_PG_SZ_4K (4<<0)
33#define KRNLQ_FLAGS_PG_SZ_8K (5<<0)
34#define KRNLQ_FLAGS_PG_SZ_16K (6<<0)
35#define KRNLQ_FLAGS_PG_SZ_32K (7<<0)
36#define KRNLQ_FLAGS_PG_SZ_64K (8<<0)
37#define KRNLQ_FLAGS_PG_SZ_128K (9<<0)
38#define KRNLQ_FLAGS_PG_SZ_256K (10<<0)
39#define KRNLQ_FLAGS_PG_SZ_512K (11<<0)
40#define KRNLQ_FLAGS_PG_SZ_1M (12<<0)
41#define KRNLQ_FLAGS_PG_SZ_2M (13<<0)
42#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15)
43#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
44#define KRNLQ_TYPE_TYPE (0xf<<28)
45#define KRNLQ_TYPE_TYPE_EMPTY (0<<28)
46#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28)
47
48#define L5_KRNLQ_HOST_QIDX 0x00000004
49#define L5_KRNLQ_HOST_FW_QIDX 0x00000008
50#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c
51#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c
52#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010
53#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014
54#define L5_KRNLQ_PGTBL_PGIDX 0x00000018
55#define L5_KRNLQ_NX_PG_QIDX 0x00000018
56#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c
57#define L5_KRNLQ_QIDX_INCR 0x0000001c
58#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020
59#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024
60
61#define BNX2_PG_CTX_MAP 0x1a0034
62#define BNX2_ISCSI_CTX_MAP 0x1a0074
63
64struct cnic_redirect_entry {
65 struct dst_entry *old_dst;
66 struct dst_entry *new_dst;
67};
68
69#define MAX_COMPLETED_KCQE 64
70
71#define MAX_CNIC_L5_CONTEXT 256
72
73#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT
74
75#define MAX_ISCSI_TBL_SZ 256
76
77#define CNIC_LOCAL_PORT_MIN 60000
78#define CNIC_LOCAL_PORT_MAX 61000
79#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
80
81#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
82#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
83#define MAX_KWQE_CNT (KWQE_CNT - 1)
84#define MAX_KCQE_CNT (KCQE_CNT - 1)
85
86#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
87#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
88
89#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
90#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
91
92#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
93#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
94
95#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
96 (MAX_KCQE_CNT - 1)) ? \
97 (x) + 2 : (x) + 1
98
99#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
100#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
101#define BNX2X_KWQ_DATA(cp, x) \
102 &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
103
104#define DEF_IPID_COUNT 0xc001
105
106#define DEF_KA_TIMEOUT 10000
107#define DEF_KA_INTERVAL 300000
108#define DEF_KA_MAX_PROBE_COUNT 3
109#define DEF_TOS 0
110#define DEF_TTL 0xfe
111#define DEF_SND_SEQ_SCALE 0
112#define DEF_RCV_BUF 0xffff
113#define DEF_SND_BUF 0xffff
114#define DEF_SEED 0
115#define DEF_MAX_RT_TIME 500
116#define DEF_MAX_DA_COUNT 2
117#define DEF_SWS_TIMER 1000
118#define DEF_MAX_CWND 0xffff
119
120struct cnic_ctx {
121 u32 cid;
122 void *ctx;
123 dma_addr_t mapping;
124};
125
126#define BNX2_MAX_CID 0x2000
127
128struct cnic_dma {
129 int num_pages;
130 void **pg_arr;
131 dma_addr_t *pg_map_arr;
132 int pgtbl_size;
133 u32 *pgtbl;
134 dma_addr_t pgtbl_map;
135};
136
137struct cnic_id_tbl {
138 spinlock_t lock;
139 u32 start;
140 u32 max;
141 u32 next;
142 unsigned long *table;
143};
144
145#define CNIC_KWQ16_DATA_SIZE 128
146
147struct kwqe_16_data {
148 u8 data[CNIC_KWQ16_DATA_SIZE];
149};
150
151struct cnic_iscsi {
152 struct cnic_dma task_array_info;
153 struct cnic_dma r2tq_info;
154 struct cnic_dma hq_info;
155};
156
157struct cnic_context {
158 u32 cid;
159 struct kwqe_16_data *kwqe_data;
160 dma_addr_t kwqe_data_mapping;
161 wait_queue_head_t waitq;
162 int wait_cond;
163 unsigned long timestamp;
164 u32 ctx_flags;
165#define CTX_FL_OFFLD_START 0x00000001
166 u8 ulp_proto_id;
167 union {
168 struct cnic_iscsi *iscsi;
169 } proto;
170};
171
172struct cnic_local {
173
174 spinlock_t cnic_ulp_lock;
175 void *ulp_handle[MAX_CNIC_ULP_TYPE];
176 unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
177#define ULP_F_INIT 0
178#define ULP_F_START 1
179 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
180
181 /* protected by ulp_lock */
182 u32 cnic_local_flags;
183#define CNIC_LCL_FL_KWQ_INIT 0x00000001
184
185 struct cnic_dev *dev;
186
187 struct cnic_eth_dev *ethdev;
188
189 void *l2_ring;
190 dma_addr_t l2_ring_map;
191 int l2_ring_size;
192 int l2_rx_ring_size;
193
194 void *l2_buf;
195 dma_addr_t l2_buf_map;
196 int l2_buf_size;
197 int l2_single_buf_size;
198
199 u16 *rx_cons_ptr;
200 u16 *tx_cons_ptr;
201 u16 rx_cons;
202 u16 tx_cons;
203
204 u32 kwq_cid_addr;
205 u32 kcq_cid_addr;
206
207 struct cnic_dma kwq_info;
208 struct kwqe **kwq;
209
210 struct cnic_dma kwq_16_data_info;
211
212 u16 max_kwq_idx;
213
214 u16 kwq_prod_idx;
215 u32 kwq_io_addr;
216
217 u16 *kwq_con_idx_ptr;
218 u16 kwq_con_idx;
219
220 struct cnic_dma kcq_info;
221 struct kcqe **kcq;
222
223 u16 kcq_prod_idx;
224 u32 kcq_io_addr;
225
226 void *status_blk;
227 struct status_block_msix *bnx2_status_blk;
228 struct host_status_block *bnx2x_status_blk;
229
230 u32 status_blk_num;
231 u32 int_num;
232 u32 last_status_idx;
233 struct tasklet_struct cnic_irq_task;
234
235 struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
236
237 struct cnic_sock *csk_tbl;
238 struct cnic_id_tbl csk_port_tbl;
239
240 struct cnic_dma conn_buf_info;
241 struct cnic_dma gbl_buf_info;
242
243 struct cnic_iscsi *iscsi_tbl;
244 struct cnic_context *ctx_tbl;
245 struct cnic_id_tbl cid_tbl;
246 int max_iscsi_conn;
247 atomic_t iscsi_conn;
248
249 /* per connection parameters */
250 int num_iscsi_tasks;
251 int num_ccells;
252 int task_array_size;
253 int r2tq_size;
254 int hq_size;
255 int num_cqs;
256
257 struct cnic_ctx *ctx_arr;
258 int ctx_blks;
259 int ctx_blk_size;
260 int cids_per_blk;
261
262 u32 chip_id;
263 int func;
264 u32 shmem_base;
265
266 u32 uio_dev;
267 struct uio_info *cnic_uinfo;
268
269 struct cnic_ops *cnic_ops;
270 int (*start_hw)(struct cnic_dev *);
271 void (*stop_hw)(struct cnic_dev *);
272 void (*setup_pgtbl)(struct cnic_dev *,
273 struct cnic_dma *);
274 int (*alloc_resc)(struct cnic_dev *);
275 void (*free_resc)(struct cnic_dev *);
276 int (*start_cm)(struct cnic_dev *);
277 void (*stop_cm)(struct cnic_dev *);
278 void (*enable_int)(struct cnic_dev *);
279 void (*disable_int_sync)(struct cnic_dev *);
280 void (*ack_int)(struct cnic_dev *);
281 void (*close_conn)(struct cnic_sock *, u32 opcode);
282 u16 (*next_idx)(u16);
283 u16 (*hw_idx)(u16);
284};
285
286struct bnx2x_bd_chain_next {
287 u32 addr_lo;
288 u32 addr_hi;
289 u8 reserved[8];
290};
291
292#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
293#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
294
295#define CDU_REGION_NUMBER_XCM_AG 2
296#define CDU_REGION_NUMBER_UCM_AG 4
297
298#endif
299
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
new file mode 100644
index 00000000000..cee80f69445
--- /dev/null
+++ b/drivers/net/cnic_defs.h
@@ -0,0 +1,580 @@
1
2/* cnic.c: Broadcom CNIC core network driver.
3 *
4 * Copyright (c) 2006-2009 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 *
10 */
11
12#ifndef CNIC_DEFS_H
13#define CNIC_DEFS_H
14
15/* KWQ (kernel work queue) request op codes */
16#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
17
18#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
19#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
20#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52)
21#define L4_KWQE_OPCODE_VALUE_RESET (53)
22#define L4_KWQE_OPCODE_VALUE_CLOSE (54)
23#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60)
24#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61)
25
26#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1)
27#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9)
28#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14)
29
30#define L5CM_RAMROD_CMD_ID_BASE (0x80)
31#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3)
32#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12)
33#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13)
34#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
35#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
36
37/* KCQ (kernel completion queue) response op codes */
38#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
39#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
40#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55)
41#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56)
42#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57)
43#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58)
44#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61)
45
46#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1)
47#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9)
48#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
49
50/* KCQ (kernel completion queue) completion status */
51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
53
54#define L4_LAYER_CODE (4)
55#define L2_LAYER_CODE (2)
56
57/*
58 * L4 KCQ CQE
59 */
60struct l4_kcq {
61 u32 cid;
62 u32 pg_cid;
63 u32 conn_id;
64 u32 pg_host_opaque;
65#if defined(__BIG_ENDIAN)
66 u16 status;
67 u16 reserved1;
68#elif defined(__LITTLE_ENDIAN)
69 u16 reserved1;
70 u16 status;
71#endif
72 u32 reserved2[2];
73#if defined(__BIG_ENDIAN)
74 u8 flags;
75#define L4_KCQ_RESERVED3 (0x7<<0)
76#define L4_KCQ_RESERVED3_SHIFT 0
77#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
78#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
79#define L4_KCQ_LAYER_CODE (0x7<<4)
80#define L4_KCQ_LAYER_CODE_SHIFT 4
81#define L4_KCQ_RESERVED4 (0x1<<7)
82#define L4_KCQ_RESERVED4_SHIFT 7
83 u8 op_code;
84 u16 qe_self_seq;
85#elif defined(__LITTLE_ENDIAN)
86 u16 qe_self_seq;
87 u8 op_code;
88 u8 flags;
89#define L4_KCQ_RESERVED3 (0xF<<0)
90#define L4_KCQ_RESERVED3_SHIFT 0
91#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
92#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
93#define L4_KCQ_LAYER_CODE (0x7<<4)
94#define L4_KCQ_LAYER_CODE_SHIFT 4
95#define L4_KCQ_RESERVED4 (0x1<<7)
96#define L4_KCQ_RESERVED4_SHIFT 7
97#endif
98};
99
100
101/*
102 * L4 KCQ CQE PG upload
103 */
104struct l4_kcq_upload_pg {
105 u32 pg_cid;
106#if defined(__BIG_ENDIAN)
107 u16 pg_status;
108 u16 pg_ipid_count;
109#elif defined(__LITTLE_ENDIAN)
110 u16 pg_ipid_count;
111 u16 pg_status;
112#endif
113 u32 reserved1[5];
114#if defined(__BIG_ENDIAN)
115 u8 flags;
116#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
117#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
118#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
119#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
120#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
121#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
122 u8 op_code;
123 u16 qe_self_seq;
124#elif defined(__LITTLE_ENDIAN)
125 u16 qe_self_seq;
126 u8 op_code;
127 u8 flags;
128#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
129#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
130#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
131#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
132#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
133#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
134#endif
135};
136
137
138/*
139 * Gracefully close the connection request
140 */
141struct l4_kwq_close_req {
142#if defined(__BIG_ENDIAN)
143 u8 flags;
144#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
145#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
146#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
147#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
148#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
149#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
150 u8 op_code;
151 u16 reserved0;
152#elif defined(__LITTLE_ENDIAN)
153 u16 reserved0;
154 u8 op_code;
155 u8 flags;
156#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
157#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
158#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
159#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
160#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
161#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
162#endif
163 u32 cid;
164 u32 reserved2[6];
165};
166
167
168/*
169 * The first request to be passed in order to establish connection in option2
170 */
171struct l4_kwq_connect_req1 {
172#if defined(__BIG_ENDIAN)
173 u8 flags;
174#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
175#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
176#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
177#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
178#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
179#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
180 u8 op_code;
181 u8 reserved0;
182 u8 conn_flags;
183#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
184#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
185#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
186#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
187#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
188#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
189#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
190#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
191#elif defined(__LITTLE_ENDIAN)
192 u8 conn_flags;
193#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
194#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
195#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
196#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
197#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
198#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
199#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
200#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
201 u8 reserved0;
202 u8 op_code;
203 u8 flags;
204#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
205#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
206#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
207#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
208#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
209#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
210#endif
211 u32 cid;
212 u32 pg_cid;
213 u32 src_ip;
214 u32 dst_ip;
215#if defined(__BIG_ENDIAN)
216 u16 dst_port;
217 u16 src_port;
218#elif defined(__LITTLE_ENDIAN)
219 u16 src_port;
220 u16 dst_port;
221#endif
222#if defined(__BIG_ENDIAN)
223 u8 rsrv1[3];
224 u8 tcp_flags;
225#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
226#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
227#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
228#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
229#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
230#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
231#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
232#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
233#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
234#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
235#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
236#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
237#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
238#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
239#elif defined(__LITTLE_ENDIAN)
240 u8 tcp_flags;
241#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
242#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
243#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
244#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
245#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
246#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
247#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
248#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
249#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
250#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
251#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
252#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
253#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
254#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
255 u8 rsrv1[3];
256#endif
257 u32 rsrv2;
258};
259
260
261/*
262 * The second ( optional )request to be passed in order to establish
263 * connection in option2 - for IPv6 only
264 */
265struct l4_kwq_connect_req2 {
266#if defined(__BIG_ENDIAN)
267 u8 flags;
268#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
269#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
270#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
271#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
272#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
273#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
274 u8 op_code;
275 u8 reserved0;
276 u8 rsrv;
277#elif defined(__LITTLE_ENDIAN)
278 u8 rsrv;
279 u8 reserved0;
280 u8 op_code;
281 u8 flags;
282#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
283#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
284#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
285#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
286#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
287#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
288#endif
289 u32 reserved2;
290 u32 src_ip_v6_2;
291 u32 src_ip_v6_3;
292 u32 src_ip_v6_4;
293 u32 dst_ip_v6_2;
294 u32 dst_ip_v6_3;
295 u32 dst_ip_v6_4;
296};
297
298
299/*
300 * The third ( and last )request to be passed in order to establish
301 * connection in option2
302 */
303struct l4_kwq_connect_req3 {
304#if defined(__BIG_ENDIAN)
305 u8 flags;
306#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
307#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
308#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
309#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
310#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
311#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
312 u8 op_code;
313 u16 reserved0;
314#elif defined(__LITTLE_ENDIAN)
315 u16 reserved0;
316 u8 op_code;
317 u8 flags;
318#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
319#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
320#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
321#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
322#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
323#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
324#endif
325 u32 ka_timeout;
326 u32 ka_interval ;
327#if defined(__BIG_ENDIAN)
328 u8 snd_seq_scale;
329 u8 ttl;
330 u8 tos;
331 u8 ka_max_probe_count;
332#elif defined(__LITTLE_ENDIAN)
333 u8 ka_max_probe_count;
334 u8 tos;
335 u8 ttl;
336 u8 snd_seq_scale;
337#endif
338#if defined(__BIG_ENDIAN)
339 u16 pmtu;
340 u16 mss;
341#elif defined(__LITTLE_ENDIAN)
342 u16 mss;
343 u16 pmtu;
344#endif
345 u32 rcv_buf;
346 u32 snd_buf;
347 u32 seed;
348};
349
350
351/*
352 * a KWQE request to offload a PG connection
353 */
354struct l4_kwq_offload_pg {
355#if defined(__BIG_ENDIAN)
356 u8 flags;
357#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
358#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
359#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
360#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
361#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
362#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
363 u8 op_code;
364 u16 reserved0;
365#elif defined(__LITTLE_ENDIAN)
366 u16 reserved0;
367 u8 op_code;
368 u8 flags;
369#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
370#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
371#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
372#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
373#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
374#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
375#endif
376#if defined(__BIG_ENDIAN)
377 u8 l2hdr_nbytes;
378 u8 pg_flags;
379#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
380#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
381#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
382#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
383#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
384#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
385 u8 da0;
386 u8 da1;
387#elif defined(__LITTLE_ENDIAN)
388 u8 da1;
389 u8 da0;
390 u8 pg_flags;
391#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
392#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
393#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
394#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
395#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
396#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
397 u8 l2hdr_nbytes;
398#endif
399#if defined(__BIG_ENDIAN)
400 u8 da2;
401 u8 da3;
402 u8 da4;
403 u8 da5;
404#elif defined(__LITTLE_ENDIAN)
405 u8 da5;
406 u8 da4;
407 u8 da3;
408 u8 da2;
409#endif
410#if defined(__BIG_ENDIAN)
411 u8 sa0;
412 u8 sa1;
413 u8 sa2;
414 u8 sa3;
415#elif defined(__LITTLE_ENDIAN)
416 u8 sa3;
417 u8 sa2;
418 u8 sa1;
419 u8 sa0;
420#endif
421#if defined(__BIG_ENDIAN)
422 u8 sa4;
423 u8 sa5;
424 u16 etype;
425#elif defined(__LITTLE_ENDIAN)
426 u16 etype;
427 u8 sa5;
428 u8 sa4;
429#endif
430#if defined(__BIG_ENDIAN)
431 u16 vlan_tag;
432 u16 ipid_start;
433#elif defined(__LITTLE_ENDIAN)
434 u16 ipid_start;
435 u16 vlan_tag;
436#endif
437#if defined(__BIG_ENDIAN)
438 u16 ipid_count;
439 u16 reserved3;
440#elif defined(__LITTLE_ENDIAN)
441 u16 reserved3;
442 u16 ipid_count;
443#endif
444 u32 host_opaque;
445};
446
447
448/*
449 * Abortively close the connection request
450 */
451struct l4_kwq_reset_req {
452#if defined(__BIG_ENDIAN)
453 u8 flags;
454#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
455#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
456#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
457#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
458#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
459#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
460 u8 op_code;
461 u16 reserved0;
462#elif defined(__LITTLE_ENDIAN)
463 u16 reserved0;
464 u8 op_code;
465 u8 flags;
466#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
467#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
468#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
469#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
470#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
471#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
472#endif
473 u32 cid;
474 u32 reserved2[6];
475};
476
477
478/*
479 * a KWQE request to update a PG connection
480 */
481struct l4_kwq_update_pg {
482#if defined(__BIG_ENDIAN)
483 u8 flags;
484#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
485#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
486#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
487#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
488#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
489#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
490 u8 opcode;
491 u16 oper16;
492#elif defined(__LITTLE_ENDIAN)
493 u16 oper16;
494 u8 opcode;
495 u8 flags;
496#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
497#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
498#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
499#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
500#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
501#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
502#endif
503 u32 pg_cid;
504 u32 pg_host_opaque;
505#if defined(__BIG_ENDIAN)
506 u8 pg_valids;
507#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
508#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
509#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
510#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
511#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
512#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
513 u8 pg_unused_a;
514 u16 pg_ipid_count;
515#elif defined(__LITTLE_ENDIAN)
516 u16 pg_ipid_count;
517 u8 pg_unused_a;
518 u8 pg_valids;
519#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
520#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
521#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
522#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
523#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
524#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
525#endif
526#if defined(__BIG_ENDIAN)
527 u16 reserverd3;
528 u8 da0;
529 u8 da1;
530#elif defined(__LITTLE_ENDIAN)
531 u8 da1;
532 u8 da0;
533 u16 reserverd3;
534#endif
535#if defined(__BIG_ENDIAN)
536 u8 da2;
537 u8 da3;
538 u8 da4;
539 u8 da5;
540#elif defined(__LITTLE_ENDIAN)
541 u8 da5;
542 u8 da4;
543 u8 da3;
544 u8 da2;
545#endif
546 u32 reserved4;
547 u32 reserved5;
548};
549
550
551/*
552 * a KWQE request to upload a PG or L4 context
553 */
554struct l4_kwq_upload {
555#if defined(__BIG_ENDIAN)
556 u8 flags;
557#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
558#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
559#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
560#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
561#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
562#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
563 u8 opcode;
564 u16 oper16;
565#elif defined(__LITTLE_ENDIAN)
566 u16 oper16;
567 u8 opcode;
568 u8 flags;
569#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
570#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
571#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
572#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
573#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
574#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
575#endif
576 u32 cid;
577 u32 reserved2[6];
578};
579
580#endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
new file mode 100644
index 00000000000..06380963a34
--- /dev/null
+++ b/drivers/net/cnic_if.h
@@ -0,0 +1,299 @@
1/* cnic_if.h: Broadcom CNIC core network driver.
2 *
3 * Copyright (c) 2006 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 */
10
11
12#ifndef CNIC_IF_H
13#define CNIC_IF_H
14
15#define CNIC_MODULE_VERSION "2.0.0"
16#define CNIC_MODULE_RELDATE "May 21, 2009"
17
18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1
20#define CNIC_ULP_L4 2
21#define MAX_CNIC_ULP_TYPE_EXT 2
22#define MAX_CNIC_ULP_TYPE 3
23
24struct kwqe {
25 u32 kwqe_op_flag;
26
27#define KWQE_OPCODE_MASK 0x00ff0000
28#define KWQE_OPCODE_SHIFT 16
29#define KWQE_FLAGS_LAYER_SHIFT 28
30#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
31
32 u32 kwqe_info0;
33 u32 kwqe_info1;
34 u32 kwqe_info2;
35 u32 kwqe_info3;
36 u32 kwqe_info4;
37 u32 kwqe_info5;
38 u32 kwqe_info6;
39};
40
41struct kwqe_16 {
42 u32 kwqe_info0;
43 u32 kwqe_info1;
44 u32 kwqe_info2;
45 u32 kwqe_info3;
46};
47
48struct kcqe {
49 u32 kcqe_info0;
50 u32 kcqe_info1;
51 u32 kcqe_info2;
52 u32 kcqe_info3;
53 u32 kcqe_info4;
54 u32 kcqe_info5;
55 u32 kcqe_info6;
56 u32 kcqe_op_flag;
57 #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */
58 #define KCQE_FLAGS_LAYER_MASK (0x7<<28)
59 #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28)
60 #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28)
61 #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28)
62 #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
63 #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
64 #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
65 #define KCQE_FLAGS_NEXT (1<<31)
66 #define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
67 #define KCQE_FLAGS_OPCODE_SHIFT (16)
68 #define KCQE_OPCODE(op) \
69 (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
70};
71
72#define MAX_CNIC_CTL_DATA 64
73#define MAX_DRV_CTL_DATA 64
74
75#define CNIC_CTL_STOP_CMD 1
76#define CNIC_CTL_START_CMD 2
77#define CNIC_CTL_COMPLETION_CMD 3
78
79#define DRV_CTL_IO_WR_CMD 0x101
80#define DRV_CTL_IO_RD_CMD 0x102
81#define DRV_CTL_CTX_WR_CMD 0x103
82#define DRV_CTL_CTXTBL_WR_CMD 0x104
83#define DRV_CTL_COMPLETION_CMD 0x105
84
85struct cnic_ctl_completion {
86 u32 cid;
87};
88
89struct drv_ctl_completion {
90 u32 comp_count;
91};
92
93struct cnic_ctl_info {
94 int cmd;
95 union {
96 struct cnic_ctl_completion comp;
97 char bytes[MAX_CNIC_CTL_DATA];
98 } data;
99};
100
101struct drv_ctl_io {
102 u32 cid_addr;
103 u32 offset;
104 u32 data;
105 dma_addr_t dma_addr;
106};
107
108struct drv_ctl_info {
109 int cmd;
110 union {
111 struct drv_ctl_completion comp;
112 struct drv_ctl_io io;
113 char bytes[MAX_DRV_CTL_DATA];
114 } data;
115};
116
117struct cnic_ops {
118 struct module *cnic_owner;
119 /* Calls to these functions are protected by RCU. When
120 * unregistering, we wait for any calls to complete before
121 * continuing.
122 */
123 int (*cnic_handler)(void *, void *);
124 int (*cnic_ctl)(void *, struct cnic_ctl_info *);
125};
126
127#define MAX_CNIC_VEC 8
128
129struct cnic_irq {
130 unsigned int vector;
131 void *status_blk;
132 u32 status_blk_num;
133 u32 irq_flags;
134#define CNIC_IRQ_FL_MSIX 0x00000001
135};
136
137struct cnic_eth_dev {
138 struct module *drv_owner;
139 u32 drv_state;
140#define CNIC_DRV_STATE_REGD 0x00000001
141#define CNIC_DRV_STATE_USING_MSIX 0x00000002
142 u32 chip_id;
143 u32 max_kwqe_pending;
144 struct pci_dev *pdev;
145 void __iomem *io_base;
146
147 u32 ctx_tbl_offset;
148 u32 ctx_tbl_len;
149 int ctx_blk_size;
150 u32 starting_cid;
151 u32 max_iscsi_conn;
152 u32 max_fcoe_conn;
153 u32 max_rdma_conn;
154 u32 reserved0[2];
155
156 int num_irq;
157 struct cnic_irq irq_arr[MAX_CNIC_VEC];
158 int (*drv_register_cnic)(struct net_device *,
159 struct cnic_ops *, void *);
160 int (*drv_unregister_cnic)(struct net_device *);
161 int (*drv_submit_kwqes_32)(struct net_device *,
162 struct kwqe *[], u32);
163 int (*drv_submit_kwqes_16)(struct net_device *,
164 struct kwqe_16 *[], u32);
165 int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
166 unsigned long reserved1[2];
167};
168
169struct cnic_sockaddr {
170 union {
171 struct sockaddr_in v4;
172 struct sockaddr_in6 v6;
173 } local;
174 union {
175 struct sockaddr_in v4;
176 struct sockaddr_in6 v6;
177 } remote;
178};
179
180struct cnic_sock {
181 struct cnic_dev *dev;
182 void *context;
183 u32 src_ip[4];
184 u32 dst_ip[4];
185 u16 src_port;
186 u16 dst_port;
187 u16 vlan_id;
188 unsigned char old_ha[6];
189 unsigned char ha[6];
190 u32 mtu;
191 u32 cid;
192 u32 l5_cid;
193 u32 pg_cid;
194 int ulp_type;
195
196 u32 ka_timeout;
197 u32 ka_interval;
198 u8 ka_max_probe_count;
199 u8 tos;
200 u8 ttl;
201 u8 snd_seq_scale;
202 u32 rcv_buf;
203 u32 snd_buf;
204 u32 seed;
205
206 unsigned long tcp_flags;
207#define SK_TCP_NO_DELAY_ACK 0x1
208#define SK_TCP_KEEP_ALIVE 0x2
209#define SK_TCP_NAGLE 0x4
210#define SK_TCP_TIMESTAMP 0x8
211#define SK_TCP_SACK 0x10
212#define SK_TCP_SEG_SCALING 0x20
213 unsigned long flags;
214#define SK_F_INUSE 0
215#define SK_F_OFFLD_COMPLETE 1
216#define SK_F_OFFLD_SCHED 2
217#define SK_F_PG_OFFLD_COMPLETE 3
218#define SK_F_CONNECT_START 4
219#define SK_F_IPV6 5
220#define SK_F_CLOSING 7
221
222 atomic_t ref_count;
223 u32 state;
224 struct kwqe kwqe1;
225 struct kwqe kwqe2;
226 struct kwqe kwqe3;
227};
228
229struct cnic_dev {
230 struct net_device *netdev;
231 struct pci_dev *pcidev;
232 void __iomem *regview;
233 struct list_head list;
234
235 int (*register_device)(struct cnic_dev *dev, int ulp_type,
236 void *ulp_ctx);
237 int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
238 int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
239 u32 num_wqes);
240 int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
241 u32 num_wqes);
242
243 int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
244 void *);
245 int (*cm_destroy)(struct cnic_sock *);
246 int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
247 int (*cm_abort)(struct cnic_sock *);
248 int (*cm_close)(struct cnic_sock *);
249 struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
250 int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
251 char *data, u16 data_size);
252 unsigned long flags;
253#define CNIC_F_CNIC_UP 1
254#define CNIC_F_BNX2_CLASS 3
255#define CNIC_F_BNX2X_CLASS 4
256 atomic_t ref_count;
257 u8 mac_addr[6];
258
259 int max_iscsi_conn;
260 int max_fcoe_conn;
261 int max_rdma_conn;
262
263 void *cnic_priv;
264};
265
266#define CNIC_WR(dev, off, val) writel(val, dev->regview + off)
267#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off)
268#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off)
269#define CNIC_RD(dev, off) readl(dev->regview + off)
270#define CNIC_RD16(dev, off) readw(dev->regview + off)
271
272struct cnic_ulp_ops {
273 /* Calls to these functions are protected by RCU. When
274 * unregistering, we wait for any calls to complete before
275 * continuing.
276 */
277
278 void (*cnic_init)(struct cnic_dev *dev);
279 void (*cnic_exit)(struct cnic_dev *dev);
280 void (*cnic_start)(void *ulp_ctx);
281 void (*cnic_stop)(void *ulp_ctx);
282 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
283 u32 num_cqes);
284 void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
285 void (*cm_connect_complete)(struct cnic_sock *);
286 void (*cm_close_complete)(struct cnic_sock *);
287 void (*cm_abort_complete)(struct cnic_sock *);
288 void (*cm_remote_close)(struct cnic_sock *);
289 void (*cm_remote_abort)(struct cnic_sock *);
290 void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
291 char *data, u16 data_size);
292 struct module *owner;
293};
294
295extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
296
297extern int cnic_unregister_driver(int ulp_type);
298
299#endif
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index e52a2018e91..f7929e89eb0 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2921,7 +2921,7 @@ static int e100_resume(struct pci_dev *pdev)
2921 /* ack any pending wake events, disable PME */ 2921 /* ack any pending wake events, disable PME */
2922 pci_enable_wake(pdev, 0, 0); 2922 pci_enable_wake(pdev, 0, 0);
2923 2923
2924 /* disbale reverse auto-negotiation */ 2924 /* disable reverse auto-negotiation */
2925 if (nic->phy == phy_82552_v) { 2925 if (nic->phy == phy_82552_v) {
2926 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, 2926 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2927 E100_82552_SMARTSPEED); 2927 E100_82552_SMARTSPEED);
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index d6e491bc58c..981936c1fb4 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -62,7 +62,7 @@ struct e1000_info;
62 e_printk(KERN_NOTICE, adapter, format, ## arg) 62 e_printk(KERN_NOTICE, adapter, format, ## arg)
63 63
64 64
65/* Interrupt modes, as used by the IntMode paramter */ 65/* Interrupt modes, as used by the IntMode parameter */
66#define E1000E_INT_MODE_LEGACY 0 66#define E1000E_INT_MODE_LEGACY 0
67#define E1000E_INT_MODE_MSI 1 67#define E1000E_INT_MODE_MSI 1
68#define E1000E_INT_MODE_MSIX 2 68#define E1000E_INT_MODE_MSIX 2
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 16a41389575..78952f8324e 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -268,7 +268,7 @@ struct ehea_qp_init_attr {
268}; 268};
269 269
270/* 270/*
271 * Event Queue attributes, passed as paramter 271 * Event Queue attributes, passed as parameter
272 */ 272 */
273struct ehea_eq_attr { 273struct ehea_eq_attr {
274 u32 type; 274 u32 type;
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 2ad6cd75653..8e9b67ebbf8 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -45,7 +45,7 @@ struct igbvf_adapter;
45/* Interrupt defines */ 45/* Interrupt defines */
46#define IGBVF_START_ITR 648 /* ~6000 ints/sec */ 46#define IGBVF_START_ITR 648 /* ~6000 ints/sec */
47 47
48/* Interrupt modes, as used by the IntMode paramter */ 48/* Interrupt modes, as used by the IntMode parameter */
49#define IGBVF_INT_MODE_LEGACY 0 49#define IGBVF_INT_MODE_LEGACY 0
50#define IGBVF_INT_MODE_MSI 1 50#define IGBVF_INT_MODE_MSI 1
51#define IGBVF_INT_MODE_MSIX 2 51#define IGBVF_INT_MODE_MSIX 2
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index dd9318f1949..dfc2541bb55 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -514,7 +514,7 @@ enum ipg_regs {
514#define IPG_DMALIST_ALIGN_PAD 0x07 514#define IPG_DMALIST_ALIGN_PAD 0x07
515#define IPG_MULTICAST_HASHTABLE_SIZE 0x40 515#define IPG_MULTICAST_HASHTABLE_SIZE 0x40
516 516
517/* Number of miliseconds to wait after issuing a software reset. 517/* Number of milliseconds to wait after issuing a software reset.
518 * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation. 518 * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation.
519 */ 519 */
520#define IPG_AC_RESETWAIT 0x05 520#define IPG_AC_RESETWAIT 0x05
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 0a7e78ade63..e02bafdd368 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -367,7 +367,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
367 int i; 367 int i;
368 368
369 /* If we haven't received a specific coalescing setting 369 /* If we haven't received a specific coalescing setting
370 * (module param), we set the moderation paramters as follows: 370 * (module param), we set the moderation parameters as follows:
371 * - moder_cnt is set to the number of mtu sized packets to 371 * - moder_cnt is set to the number of mtu sized packets to
372 * satisfy our coelsing target. 372 * satisfy our coelsing target.
373 * - moder_time is set to a fixed value. 373 * - moder_time is set to a fixed value.
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index dee188761a3..b9ceddde46c 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -497,8 +497,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
497 if (eq_table->have_irq) 497 if (eq_table->have_irq)
498 free_irq(dev->pdev->irq, dev); 498 free_irq(dev->pdev->irq, dev);
499 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 499 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
500 if (eq_table->eq[i].have_irq) 500 if (eq_table->eq[i].have_irq) {
501 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 501 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
502 eq_table->eq[i].have_irq = 0;
503 }
502 504
503 kfree(eq_table->irq_names); 505 kfree(eq_table->irq_names);
504} 506}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 30bea968969..018348c0119 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444);
100MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 100MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
101 "(0/1, default 0)"); 101 "(0/1, default 0)");
102 102
103static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
104module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
105MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
106
103int mlx4_check_port_params(struct mlx4_dev *dev, 107int mlx4_check_port_params(struct mlx4_dev *dev,
104 enum mlx4_port_type *port_type) 108 enum mlx4_port_type *port_type)
105{ 109{
@@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
203 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 207 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
204 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 208 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
205 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 209 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
210 dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
206 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, 211 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
207 MLX4_MTT_ENTRY_PER_SEG); 212 dev->caps.mtts_per_seg);
208 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 213 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
209 dev->caps.reserved_uars = dev_cap->reserved_uars; 214 dev->caps.reserved_uars = dev_cap->reserved_uars;
210 dev->caps.reserved_pds = dev_cap->reserved_pds; 215 dev->caps.reserved_pds = dev_cap->reserved_pds;
211 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; 216 dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
212 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 217 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
213 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 218 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
214 dev->caps.flags = dev_cap->flags; 219 dev->caps.flags = dev_cap->flags;
@@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void)
1304 return -1; 1309 return -1;
1305 } 1310 }
1306 1311
1312 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
1313 printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1314 return -1;
1315 }
1316
1307 return 0; 1317 return 0;
1308} 1318}
1309 1319
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 0a467785f06..5887e4764d2 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
209 } else 209 } else
210 mtt->page_shift = page_shift; 210 mtt->page_shift = page_shift;
211 211
212 for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) 212 for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
213 ++mtt->order; 213 ++mtt->order;
214 214
215 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); 215 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
@@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
350 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 350 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
351 MLX4_MPT_PD_FLAG_RAE); 351 MLX4_MPT_PD_FLAG_RAE);
352 mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * 352 mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
353 MLX4_MTT_ENTRY_PER_SEG); 353 dev->caps.mtts_per_seg);
354 } else { 354 } else {
355 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 355 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
356 } 356 }
@@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
391 (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) 391 (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
392 return -EINVAL; 392 return -EINVAL;
393 393
394 if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1)) 394 if (start_index & (dev->caps.mtts_per_seg - 1))
395 return -EINVAL; 395 return -EINVAL;
396 396
397 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + 397 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index cebdf3243ca..bd22df95adf 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
98 profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; 98 profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
99 profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; 99 profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
100 profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; 100 profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
101 profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; 101 profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
102 profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; 102 profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
103 103
104 profile[MLX4_RES_QP].num = request->num_qp; 104 profile[MLX4_RES_QP].num = request->num_qp;
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 8754e44cada..3bd0b5933d5 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -3242,8 +3242,8 @@ struct niu {
3242 struct niu_parent *parent; 3242 struct niu_parent *parent;
3243 3243
3244 u32 flags; 3244 u32 flags;
3245#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removebale PHY detected*/ 3245#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/
3246#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removebale PHY */ 3246#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */
3247#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */ 3247#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */
3248#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */ 3248#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */
3249#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */ 3249#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index b9a5f59d6c9..90d1f76c0e8 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3224,7 +3224,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3224 3224
3225 if (value & RST_FO_FR) { 3225 if (value & RST_FO_FR) {
3226 QPRINTK(qdev, IFDOWN, ERR, 3226 QPRINTK(qdev, IFDOWN, ERR,
3227 "ETIMEOUT!!! errored out of resetting the chip!\n"); 3227 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3228 status = -ETIMEDOUT; 3228 status = -ETIMEDOUT;
3229 } 3229 }
3230 3230
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index a67c14a7bef..71afbf8b9c5 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -141,7 +141,7 @@ end:
141/* We are being asked by firmware to accept 141/* We are being asked by firmware to accept
142 * a change to the port. This is only 142 * a change to the port. This is only
143 * a change to max frame sizes (Tx/Rx), pause 143 * a change to max frame sizes (Tx/Rx), pause
144 * paramters, or loopback mode. We wake up a worker 144 * parameters, or loopback mode. We wake up a worker
145 * to handler processing this since a mailbox command 145 * to handler processing this since a mailbox command
146 * will need to be sent to ACK the request. 146 * will need to be sent to ACK the request.
147 */ 147 */
@@ -371,7 +371,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
371 /* We are being asked by firmware to accept 371 /* We are being asked by firmware to accept
372 * a change to the port. This is only 372 * a change to the port. This is only
373 * a change to max frame sizes (Tx/Rx), pause 373 * a change to max frame sizes (Tx/Rx), pause
374 * paramters, or loopback mode. 374 * parameters, or loopback mode.
375 */ 375 */
376 case AEN_IDC_REQ: 376 case AEN_IDC_REQ:
377 status = ql_idc_req_aen(qdev); 377 status = ql_idc_req_aen(qdev);
@@ -380,7 +380,7 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
380 /* Process and inbound IDC event. 380 /* Process and inbound IDC event.
381 * This will happen when we're trying to 381 * This will happen when we're trying to
382 * change tx/rx max frame size, change pause 382 * change tx/rx max frame size, change pause
383 * paramters or loopback mode. 383 * parameters or loopback mode.
384 */ 384 */
385 case AEN_IDC_CMPLT: 385 case AEN_IDC_CMPLT:
386 case AEN_IDC_EXT: 386 case AEN_IDC_EXT:
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 007c881896d..35196faa084 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -66,7 +66,6 @@ static const int multicast_filter_limit = 32;
66#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 66#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
67#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 67#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
68#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */ 68#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
69#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
70#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ 69#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
71#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ 70#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
72 71
@@ -2366,10 +2365,10 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
2366 return cmd; 2365 return cmd;
2367} 2366}
2368 2367
2369static void rtl_set_rx_max_size(void __iomem *ioaddr) 2368static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
2370{ 2369{
2371 /* Low hurts. Let's disable the filtering. */ 2370 /* Low hurts. Let's disable the filtering. */
2372 RTL_W16(RxMaxSize, 16383); 2371 RTL_W16(RxMaxSize, rx_buf_sz);
2373} 2372}
2374 2373
2375static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) 2374static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
@@ -2416,7 +2415,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
2416 2415
2417 RTL_W8(EarlyTxThres, EarlyTxThld); 2416 RTL_W8(EarlyTxThres, EarlyTxThld);
2418 2417
2419 rtl_set_rx_max_size(ioaddr); 2418 rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
2420 2419
2421 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || 2420 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
2422 (tp->mac_version == RTL_GIGA_MAC_VER_02) || 2421 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
@@ -2677,7 +2676,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
2677 2676
2678 RTL_W8(EarlyTxThres, EarlyTxThld); 2677 RTL_W8(EarlyTxThres, EarlyTxThld);
2679 2678
2680 rtl_set_rx_max_size(ioaddr); 2679 rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
2681 2680
2682 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; 2681 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
2683 2682
@@ -2855,7 +2854,7 @@ static void rtl_hw_start_8101(struct net_device *dev)
2855 2854
2856 RTL_W8(EarlyTxThres, EarlyTxThld); 2855 RTL_W8(EarlyTxThres, EarlyTxThld);
2857 2856
2858 rtl_set_rx_max_size(ioaddr); 2857 rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz);
2859 2858
2860 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; 2859 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
2861 2860
diff --git a/drivers/net/skfp/h/smt.h b/drivers/net/skfp/h/smt.h
index 1ff589988d1..2976757a36f 100644
--- a/drivers/net/skfp/h/smt.h
+++ b/drivers/net/skfp/h/smt.h
@@ -413,7 +413,7 @@ struct smt_p_reason {
413#define SMT_RDF_SUCCESS 0x00000003 /* success (PMF) */ 413#define SMT_RDF_SUCCESS 0x00000003 /* success (PMF) */
414#define SMT_RDF_BADSET 0x00000004 /* bad set count (PMF) */ 414#define SMT_RDF_BADSET 0x00000004 /* bad set count (PMF) */
415#define SMT_RDF_ILLEGAL 0x00000005 /* read only (PMF) */ 415#define SMT_RDF_ILLEGAL 0x00000005 /* read only (PMF) */
416#define SMT_RDF_NOPARAM 0x6 /* paramter not supported (PMF) */ 416#define SMT_RDF_NOPARAM 0x6 /* parameter not supported (PMF) */
417#define SMT_RDF_RANGE 0x8 /* out of range */ 417#define SMT_RDF_RANGE 0x8 /* out of range */
418#define SMT_RDF_AUTHOR 0x9 /* not autohorized */ 418#define SMT_RDF_AUTHOR 0x9 /* not autohorized */
419#define SMT_RDF_LENGTH 0x0a /* length error */ 419#define SMT_RDF_LENGTH 0x0a /* length error */
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 329f890e290..f1f773b17fe 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -45,7 +45,8 @@
45 defined(CONFIG_MACH_ZYLONITE) ||\ 45 defined(CONFIG_MACH_ZYLONITE) ||\
46 defined(CONFIG_MACH_LITTLETON) ||\ 46 defined(CONFIG_MACH_LITTLETON) ||\
47 defined(CONFIG_MACH_ZYLONITE2) ||\ 47 defined(CONFIG_MACH_ZYLONITE2) ||\
48 defined(CONFIG_ARCH_VIPER) 48 defined(CONFIG_ARCH_VIPER) ||\
49 defined(CONFIG_MACH_STARGATE2)
49 50
50#include <asm/mach-types.h> 51#include <asm/mach-types.h>
51 52
@@ -73,7 +74,7 @@
73/* We actually can't write halfwords properly if not word aligned */ 74/* We actually can't write halfwords properly if not word aligned */
74static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) 75static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
75{ 76{
76 if (machine_is_mainstone() && reg & 2) { 77 if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
77 unsigned int v = val << 16; 78 unsigned int v = val << 16;
78 v |= readl(ioaddr + (reg & ~2)) & 0xffff; 79 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
79 writel(v, ioaddr + (reg & ~2)); 80 writel(v, ioaddr + (reg & ~2));
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 13dbc59bfe4..b40b6de2d08 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -79,7 +79,7 @@ MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
79MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ; 79MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ;
80MODULE_FIRMWARE(FW_NAME); 80MODULE_FIRMWARE(FW_NAME);
81 81
82/* Module paramters */ 82/* Module parameters */
83 83
84/* Ring Speed 0,4,16 84/* Ring Speed 0,4,16
85 * 0 = Autosense 85 * 0 = Autosense
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index b358bbbce33..b3715efdce5 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -169,7 +169,7 @@ static char *open_min_error[] = {
169 "Monitor Contention failer for RPL", "FDX Protocol Error" 169 "Monitor Contention failer for RPL", "FDX Protocol Error"
170}; 170};
171 171
172/* Module paramters */ 172/* Module parameters */
173 173
174/* Ring Speed 0,4,16 174/* Ring Speed 0,4,16
175 * 0 = Autosense 175 * 0 = Autosense
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index c36974925c1..451b54136ed 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -132,7 +132,7 @@ static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost",
132 "Reserved", "Reserved", "No Monitor Detected for RPL", 132 "Reserved", "Reserved", "No Monitor Detected for RPL",
133 "Monitor Contention failer for RPL", "FDX Protocol Error"}; 133 "Monitor Contention failer for RPL", "FDX Protocol Error"};
134 134
135/* Module paramters */ 135/* Module parameters */
136 136
137MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; 137MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
138MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ; 138MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 6fcb500257b..61fe80dda3e 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Limitation: 8 * Limitation:
9 * Can only get/set setttings of the first queue. 9 * Can only get/set setttings of the first queue.
10 * Need to re-open the interface manually after changing some paramters. 10 * Need to re-open the interface manually after changing some parameters.
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c94de624314..22c0585a031 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -429,7 +429,7 @@ static void rx_complete (struct urb *urb)
429 429
430 /* stalls need manual reset. this is rare ... except that 430 /* stalls need manual reset. this is rare ... except that
431 * when going through USB 2.0 TTs, unplug appears this way. 431 * when going through USB 2.0 TTs, unplug appears this way.
432 * we avoid the highspeed version of the ETIMEOUT/EILSEQ 432 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
433 * storm, recovering as needed. 433 * storm, recovering as needed.
434 */ 434 */
435 case -EPIPE: 435 case -EPIPE:
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f0bb1a4c832..52198f6797a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -843,6 +843,10 @@ static int virtnet_probe(struct virtio_device *vdev)
843 int err; 843 int err;
844 struct net_device *dev; 844 struct net_device *dev;
845 struct virtnet_info *vi; 845 struct virtnet_info *vi;
846 struct virtqueue *vqs[3];
847 vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
848 const char *names[] = { "input", "output", "control" };
849 int nvqs;
846 850
847 /* Allocate ourselves a network device with room for our info */ 851 /* Allocate ourselves a network device with room for our info */
848 dev = alloc_etherdev(sizeof(struct virtnet_info)); 852 dev = alloc_etherdev(sizeof(struct virtnet_info));
@@ -903,25 +907,19 @@ static int virtnet_probe(struct virtio_device *vdev)
903 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 907 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
904 vi->mergeable_rx_bufs = true; 908 vi->mergeable_rx_bufs = true;
905 909
906 /* We expect two virtqueues, receive then send. */ 910 /* We expect two virtqueues, receive then send,
907 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); 911 * and optionally control. */
908 if (IS_ERR(vi->rvq)) { 912 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
909 err = PTR_ERR(vi->rvq); 913
914 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
915 if (err)
910 goto free; 916 goto free;
911 }
912 917
913 vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done); 918 vi->rvq = vqs[0];
914 if (IS_ERR(vi->svq)) { 919 vi->svq = vqs[1];
915 err = PTR_ERR(vi->svq);
916 goto free_recv;
917 }
918 920
919 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { 921 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
920 vi->cvq = vdev->config->find_vq(vdev, 2, NULL); 922 vi->cvq = vqs[2];
921 if (IS_ERR(vi->cvq)) {
922 err = PTR_ERR(vi->svq);
923 goto free_send;
924 }
925 923
926 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 924 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
927 dev->features |= NETIF_F_HW_VLAN_FILTER; 925 dev->features |= NETIF_F_HW_VLAN_FILTER;
@@ -939,7 +937,7 @@ static int virtnet_probe(struct virtio_device *vdev)
939 err = register_netdev(dev); 937 err = register_netdev(dev);
940 if (err) { 938 if (err) {
941 pr_debug("virtio_net: registering device failed\n"); 939 pr_debug("virtio_net: registering device failed\n");
942 goto free_ctrl; 940 goto free_vqs;
943 } 941 }
944 942
945 /* Last of all, set up some receive buffers. */ 943 /* Last of all, set up some receive buffers. */
@@ -960,13 +958,8 @@ static int virtnet_probe(struct virtio_device *vdev)
960 958
961unregister: 959unregister:
962 unregister_netdev(dev); 960 unregister_netdev(dev);
963free_ctrl: 961free_vqs:
964 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) 962 vdev->config->del_vqs(vdev);
965 vdev->config->del_vq(vi->cvq);
966free_send:
967 vdev->config->del_vq(vi->svq);
968free_recv:
969 vdev->config->del_vq(vi->rvq);
970free: 963free:
971 free_netdev(dev); 964 free_netdev(dev);
972 return err; 965 return err;
@@ -992,12 +985,10 @@ static void virtnet_remove(struct virtio_device *vdev)
992 985
993 BUG_ON(vi->num != 0); 986 BUG_ON(vi->num != 0);
994 987
995 vdev->config->del_vq(vi->svq);
996 vdev->config->del_vq(vi->rvq);
997 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
998 vdev->config->del_vq(vi->cvq);
999 unregister_netdev(vi->dev); 988 unregister_netdev(vi->dev);
1000 989
990 vdev->config->del_vqs(vi->vdev);
991
1001 while (vi->pages) 992 while (vi->pages)
1002 __free_pages(get_a_page(vi, GFP_KERNEL), 0); 993 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
1003 994
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 08b1a284b69..bb719b6114c 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -579,7 +579,8 @@ static inline void queue_put_desc(unsigned int queue, u32 phys,
579 debug_desc(phys, desc); 579 debug_desc(phys, desc);
580 BUG_ON(phys & 0x1F); 580 BUG_ON(phys & 0x1F);
581 qmgr_put_entry(queue, phys); 581 qmgr_put_entry(queue, phys);
582 BUG_ON(qmgr_stat_overflow(queue)); 582 /* Don't check for queue overflow here, we've allocated sufficient
583 length and queues >= 32 don't support this check anyway. */
583} 584}
584 585
585 586
@@ -789,10 +790,10 @@ static void hss_hdlc_txdone_irq(void *pdev)
789 free_buffer_irq(port->tx_buff_tab[n_desc]); 790 free_buffer_irq(port->tx_buff_tab[n_desc]);
790 port->tx_buff_tab[n_desc] = NULL; 791 port->tx_buff_tab[n_desc] = NULL;
791 792
792 start = qmgr_stat_empty(port->plat->txreadyq); 793 start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
793 queue_put_desc(port->plat->txreadyq, 794 queue_put_desc(port->plat->txreadyq,
794 tx_desc_phys(port, n_desc), desc); 795 tx_desc_phys(port, n_desc), desc);
795 if (start) { 796 if (start) { /* TX-ready queue was empty */
796#if DEBUG_TX 797#if DEBUG_TX
797 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit" 798 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
798 " ready\n", dev->name); 799 " ready\n", dev->name);
@@ -867,13 +868,13 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
867 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); 868 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
868 dev->trans_start = jiffies; 869 dev->trans_start = jiffies;
869 870
870 if (qmgr_stat_empty(txreadyq)) { 871 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
871#if DEBUG_TX 872#if DEBUG_TX
872 printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name); 873 printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
873#endif 874#endif
874 netif_stop_queue(dev); 875 netif_stop_queue(dev);
875 /* we could miss TX ready interrupt */ 876 /* we could miss TX ready interrupt */
876 if (!qmgr_stat_empty(txreadyq)) { 877 if (!qmgr_stat_below_low_watermark(txreadyq)) {
877#if DEBUG_TX 878#if DEBUG_TX
878 printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n", 879 printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
879 dev->name); 880 dev->name);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index fb7541c28e5..5bc00db21b2 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -310,7 +310,7 @@ config PRISM54
310 If you want to compile the driver as a module ( = code which can be 310 If you want to compile the driver as a module ( = code which can be
311 inserted in and removed from the running kernel whenever you want), 311 inserted in and removed from the running kernel whenever you want),
312 say M here and read <file:Documentation/kbuild/modules.txt>. 312 say M here and read <file:Documentation/kbuild/modules.txt>.
313 The module will be called prism54.ko. 313 The module will be called prism54.
314 314
315config USB_ZD1201 315config USB_ZD1201
316 tristate "USB ZD1201 based Wireless device support" 316 tristate "USB ZD1201 based Wireless device support"
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index 932d207bce2..c15db229351 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -29,7 +29,7 @@ config HOSTAP
29 PLX/PCI/CS version of the driver to actually use the driver. 29 PLX/PCI/CS version of the driver to actually use the driver.
30 30
31 The driver can be compiled as a module and it will be called 31 The driver can be compiled as a module and it will be called
32 "hostap.ko". 32 hostap.
33 33
34config HOSTAP_FIRMWARE 34config HOSTAP_FIRMWARE
35 bool "Support downloading firmware images with Host AP driver" 35 bool "Support downloading firmware images with Host AP driver"
@@ -68,7 +68,7 @@ config HOSTAP_PLX
68 driver. 68 driver.
69 69
70 The driver can be compiled as a module and will be named 70 The driver can be compiled as a module and will be named
71 "hostap_plx.ko". 71 hostap_plx.
72 72
73config HOSTAP_PCI 73config HOSTAP_PCI
74 tristate "Host AP driver for Prism2.5 PCI adaptors" 74 tristate "Host AP driver for Prism2.5 PCI adaptors"
@@ -81,7 +81,7 @@ config HOSTAP_PCI
81 driver. 81 driver.
82 82
83 The driver can be compiled as a module and will be named 83 The driver can be compiled as a module and will be named
84 "hostap_pci.ko". 84 hostap_pci.
85 85
86config HOSTAP_CS 86config HOSTAP_CS
87 tristate "Host AP driver for Prism2/2.5/3 PC Cards" 87 tristate "Host AP driver for Prism2/2.5/3 PC Cards"
@@ -94,4 +94,4 @@ config HOSTAP_CS
94 driver. 94 driver.
95 95
96 The driver can be compiled as a module and will be named 96 The driver can be compiled as a module and will be named
97 "hostap_cs.ko". 97 hostap_cs.
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 029ccb6bdba..e092af09d6b 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -70,7 +70,7 @@ config IWLAGN
70 If you want to compile the driver as a module ( = code which can be 70 If you want to compile the driver as a module ( = code which can be
71 inserted in and removed from the running kernel whenever you want), 71 inserted in and removed from the running kernel whenever you want),
72 say M here and read <file:Documentation/kbuild/modules.txt>. The 72 say M here and read <file:Documentation/kbuild/modules.txt>. The
73 module will be called iwlagn.ko. 73 module will be called iwlagn.
74 74
75 75
76config IWL4965 76config IWL4965
@@ -108,7 +108,7 @@ config IWL3945
108 If you want to compile the driver as a module ( = code which can be 108 If you want to compile the driver as a module ( = code which can be
109 inserted in and removed from the running kernel whenever you want), 109 inserted in and removed from the running kernel whenever you want),
110 say M here and read <file:Documentation/kbuild/modules.txt>. The 110 say M here and read <file:Documentation/kbuild/modules.txt>. The
111 module will be called iwl3945.ko. 111 module will be called iwl3945.
112 112
113config IWL3945_SPECTRUM_MEASUREMENT 113config IWL3945_SPECTRUM_MEASUREMENT
114 bool "Enable Spectrum Measurement in iwl3945 driver" 114 bool "Enable Spectrum Measurement in iwl3945 driver"
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 7441d558511..3bec3dbd345 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -647,7 +647,7 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param,
647 ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER, 647 ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER,
648 infobuf, info_len); 648 infobuf, info_len);
649 if (ret != 0) 649 if (ret != 0)
650 devdbg(dev, "setting rndis config paramater failed, %d.", ret); 650 devdbg(dev, "setting rndis config parameter failed, %d.", ret);
651 651
652 kfree(infobuf); 652 kfree(infobuf);
653 return ret; 653 return ret;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 18ee7d6c402..8aab3e6754b 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -9,11 +9,11 @@ menuconfig RT2X00
9 9
10 When building one of the individual drivers, the rt2x00 library 10 When building one of the individual drivers, the rt2x00 library
11 will also be created. That library (when the driver is built as 11 will also be created. That library (when the driver is built as
12 a module) will be called "rt2x00lib.ko". 12 a module) will be called rt2x00lib.
13 13
14 Additionally PCI and USB libraries will also be build depending 14 Additionally PCI and USB libraries will also be build depending
15 on the types of drivers being selected, these libraries will be 15 on the types of drivers being selected, these libraries will be
16 called "rt2x00pci.ko" and "rt2x00usb.ko". 16 called rt2x00pci and rt2x00usb.
17 17
18if RT2X00 18if RT2X00
19 19
@@ -26,7 +26,7 @@ config RT2400PCI
26 This adds support for rt2400 wireless chipset family. 26 This adds support for rt2400 wireless chipset family.
27 Supported chips: RT2460. 27 Supported chips: RT2460.
28 28
29 When compiled as a module, this driver will be called "rt2400pci.ko". 29 When compiled as a module, this driver will be called rt2400pci.
30 30
31config RT2500PCI 31config RT2500PCI
32 tristate "Ralink rt2500 (PCI/PCMCIA) support" 32 tristate "Ralink rt2500 (PCI/PCMCIA) support"
@@ -37,7 +37,7 @@ config RT2500PCI
37 This adds support for rt2500 wireless chipset family. 37 This adds support for rt2500 wireless chipset family.
38 Supported chips: RT2560. 38 Supported chips: RT2560.
39 39
40 When compiled as a module, this driver will be called "rt2500pci.ko". 40 When compiled as a module, this driver will be called rt2500pci.
41 41
42config RT61PCI 42config RT61PCI
43 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" 43 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
@@ -51,7 +51,7 @@ config RT61PCI
51 This adds support for rt2501 wireless chipset family. 51 This adds support for rt2501 wireless chipset family.
52 Supported chips: RT2561, RT2561S & RT2661. 52 Supported chips: RT2561, RT2561S & RT2661.
53 53
54 When compiled as a module, this driver will be called "rt61pci.ko". 54 When compiled as a module, this driver will be called rt61pci.
55 55
56config RT2500USB 56config RT2500USB
57 tristate "Ralink rt2500 (USB) support" 57 tristate "Ralink rt2500 (USB) support"
@@ -62,7 +62,7 @@ config RT2500USB
62 This adds support for rt2500 wireless chipset family. 62 This adds support for rt2500 wireless chipset family.
63 Supported chips: RT2571 & RT2572. 63 Supported chips: RT2571 & RT2572.
64 64
65 When compiled as a module, this driver will be called "rt2500usb.ko". 65 When compiled as a module, this driver will be called rt2500usb.
66 66
67config RT73USB 67config RT73USB
68 tristate "Ralink rt2501/rt73 (USB) support" 68 tristate "Ralink rt2501/rt73 (USB) support"
@@ -75,7 +75,7 @@ config RT73USB
75 This adds support for rt2501 wireless chipset family. 75 This adds support for rt2501 wireless chipset family.
76 Supported chips: RT2571W, RT2573 & RT2671. 76 Supported chips: RT2571W, RT2573 & RT2671.
77 77
78 When compiled as a module, this driver will be called "rt73usb.ko". 78 When compiled as a module, this driver will be called rt73usb.
79 79
80config RT2800USB 80config RT2800USB
81 tristate "Ralink rt2800 (USB) support" 81 tristate "Ralink rt2800 (USB) support"
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 39e00b3d781..0bf2715fa93 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -261,7 +261,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna);
261 * @rt2x00dev: Pointer to &struct rt2x00_dev. 261 * @rt2x00dev: Pointer to &struct rt2x00_dev.
262 * 262 *
263 * Initialize work structure and all link tuning related 263 * Initialize work structure and all link tuning related
264 * paramters. This will not start the link tuning process itself. 264 * parameters. This will not start the link tuning process itself.
265 */ 265 */
266void rt2x00link_register(struct rt2x00_dev *rt2x00dev); 266void rt2x00link_register(struct rt2x00_dev *rt2x00dev);
267 267
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 1a90d69f18a..6af706408ac 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -138,7 +138,7 @@ psa_read(struct net_device * dev,
138 138
139/*------------------------------------------------------------------*/ 139/*------------------------------------------------------------------*/
140/* 140/*
141 * Write the Paramter Storage Area to the WaveLAN card's memory 141 * Write the Parameter Storage Area to the WaveLAN card's memory
142 */ 142 */
143static void 143static void
144psa_write(struct net_device * dev, 144psa_write(struct net_device * dev,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 6fe043bd377..d2fa27c5c1b 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -1,22 +1,22 @@
1config OF_DEVICE 1config OF_DEVICE
2 def_bool y 2 def_bool y
3 depends on OF && (SPARC || PPC_OF) 3 depends on OF && (SPARC || PPC_OF || MICROBLAZE)
4 4
5config OF_GPIO 5config OF_GPIO
6 def_bool y 6 def_bool y
7 depends on OF && PPC_OF && GPIOLIB 7 depends on OF && (PPC_OF || MICROBLAZE) && GPIOLIB
8 help 8 help
9 OpenFirmware GPIO accessors 9 OpenFirmware GPIO accessors
10 10
11config OF_I2C 11config OF_I2C
12 def_tristate I2C 12 def_tristate I2C
13 depends on PPC_OF && I2C 13 depends on (PPC_OF || MICROBLAZE) && I2C
14 help 14 help
15 OpenFirmware I2C accessors 15 OpenFirmware I2C accessors
16 16
17config OF_SPI 17config OF_SPI
18 def_tristate SPI 18 def_tristate SPI
19 depends on OF && PPC_OF && SPI 19 depends on OF && (PPC_OF || MICROBLAZE) && SPI
20 help 20 help
21 OpenFirmware SPI accessors 21 OpenFirmware SPI accessors
22 22
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index f0e99d4c066..242257b1944 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -78,16 +78,20 @@ void free_cpu_buffers(void)
78 op_ring_buffer_write = NULL; 78 op_ring_buffer_write = NULL;
79} 79}
80 80
81#define RB_EVENT_HDR_SIZE 4
82
81int alloc_cpu_buffers(void) 83int alloc_cpu_buffers(void)
82{ 84{
83 int i; 85 int i;
84 86
85 unsigned long buffer_size = oprofile_cpu_buffer_size; 87 unsigned long buffer_size = oprofile_cpu_buffer_size;
88 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
89 RB_EVENT_HDR_SIZE);
86 90
87 op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); 91 op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
88 if (!op_ring_buffer_read) 92 if (!op_ring_buffer_read)
89 goto fail; 93 goto fail;
90 op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); 94 op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
91 if (!op_ring_buffer_write) 95 if (!op_ring_buffer_write)
92 goto fail; 96 goto fail;
93 97
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 73348c4047e..4a9cc92d4d1 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -702,7 +702,7 @@ static unsigned int iosapic_startup_irq(unsigned int irq)
702} 702}
703 703
704#ifdef CONFIG_SMP 704#ifdef CONFIG_SMP
705static void iosapic_set_affinity_irq(unsigned int irq, 705static int iosapic_set_affinity_irq(unsigned int irq,
706 const struct cpumask *dest) 706 const struct cpumask *dest)
707{ 707{
708 struct vector_info *vi = iosapic_get_vector(irq); 708 struct vector_info *vi = iosapic_get_vector(irq);
@@ -712,7 +712,7 @@ static void iosapic_set_affinity_irq(unsigned int irq,
712 712
713 dest_cpu = cpu_check_affinity(irq, dest); 713 dest_cpu = cpu_check_affinity(irq, dest);
714 if (dest_cpu < 0) 714 if (dest_cpu < 0)
715 return; 715 return -1;
716 716
717 cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu)); 717 cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu));
718 vi->txn_addr = txn_affinity_addr(irq, dest_cpu); 718 vi->txn_addr = txn_affinity_addr(irq, dest_cpu);
@@ -724,6 +724,8 @@ static void iosapic_set_affinity_irq(unsigned int irq,
724 iosapic_set_irt_data(vi, &dummy_d0, &d1); 724 iosapic_set_irt_data(vi, &dummy_d0, &d1);
725 iosapic_wr_irt_entry(vi, d0, d1); 725 iosapic_wr_irt_entry(vi, d0, d1);
726 spin_unlock_irqrestore(&iosapic_lock, flags); 726 spin_unlock_irqrestore(&iosapic_lock, flags);
727
728 return 0;
727} 729}
728#endif 730#endif
729 731
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index e6a7e847ee8..ea31a452b15 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -352,8 +352,8 @@ static int __devinit parport_init_chip(struct parisc_device *dev)
352 unsigned long port; 352 unsigned long port;
353 353
354 if (!dev->irq) { 354 if (!dev->irq) {
355 printk(KERN_WARNING "IRQ not found for parallel device at 0x%lx\n", 355 printk(KERN_WARNING "IRQ not found for parallel device at 0x%llx\n",
356 dev->hpa.start); 356 (unsigned long long)dev->hpa.start);
357 return -ENODEV; 357 return -ENODEV;
358 } 358 }
359 359
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 4e63cc9e277..151bf5bc8af 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1,5 +1,5 @@
1/* Low-level parallel-port routines for 8255-based PC-style hardware. 1/* Low-level parallel-port routines for 8255-based PC-style hardware.
2 * 2 *
3 * Authors: Phil Blundell <philb@gnu.org> 3 * Authors: Phil Blundell <philb@gnu.org>
4 * Tim Waugh <tim@cyberelk.demon.co.uk> 4 * Tim Waugh <tim@cyberelk.demon.co.uk>
5 * Jose Renau <renau@acm.org> 5 * Jose Renau <renau@acm.org>
@@ -11,7 +11,7 @@
11 * Cleaned up include files - Russell King <linux@arm.uk.linux.org> 11 * Cleaned up include files - Russell King <linux@arm.uk.linux.org>
12 * DMA support - Bert De Jonghe <bert@sophis.be> 12 * DMA support - Bert De Jonghe <bert@sophis.be>
13 * Many ECP bugs fixed. Fred Barnes & Jamie Lokier, 1999 13 * Many ECP bugs fixed. Fred Barnes & Jamie Lokier, 1999
14 * More PCI support now conditional on CONFIG_PCI, 03/2001, Paul G. 14 * More PCI support now conditional on CONFIG_PCI, 03/2001, Paul G.
15 * Various hacks, Fred Barnes, 04/2001 15 * Various hacks, Fred Barnes, 04/2001
16 * Updated probing logic - Adam Belay <ambx1@neo.rr.com> 16 * Updated probing logic - Adam Belay <ambx1@neo.rr.com>
17 */ 17 */
@@ -56,10 +56,10 @@
56#include <linux/pnp.h> 56#include <linux/pnp.h>
57#include <linux/platform_device.h> 57#include <linux/platform_device.h>
58#include <linux/sysctl.h> 58#include <linux/sysctl.h>
59#include <linux/io.h>
60#include <linux/uaccess.h>
59 61
60#include <asm/io.h>
61#include <asm/dma.h> 62#include <asm/dma.h>
62#include <asm/uaccess.h>
63 63
64#include <linux/parport.h> 64#include <linux/parport.h>
65#include <linux/parport_pc.h> 65#include <linux/parport_pc.h>
@@ -82,7 +82,7 @@
82#define ECR_TST 06 82#define ECR_TST 06
83#define ECR_CNF 07 83#define ECR_CNF 07
84#define ECR_MODE_MASK 0xe0 84#define ECR_MODE_MASK 0xe0
85#define ECR_WRITE(p,v) frob_econtrol((p),0xff,(v)) 85#define ECR_WRITE(p, v) frob_econtrol((p), 0xff, (v))
86 86
87#undef DEBUG 87#undef DEBUG
88 88
@@ -109,27 +109,27 @@ static int pci_registered_parport;
109static int pnp_registered_parport; 109static int pnp_registered_parport;
110 110
111/* frob_control, but for ECR */ 111/* frob_control, but for ECR */
112static void frob_econtrol (struct parport *pb, unsigned char m, 112static void frob_econtrol(struct parport *pb, unsigned char m,
113 unsigned char v) 113 unsigned char v)
114{ 114{
115 unsigned char ectr = 0; 115 unsigned char ectr = 0;
116 116
117 if (m != 0xff) 117 if (m != 0xff)
118 ectr = inb (ECONTROL (pb)); 118 ectr = inb(ECONTROL(pb));
119 119
120 DPRINTK (KERN_DEBUG "frob_econtrol(%02x,%02x): %02x -> %02x\n", 120 DPRINTK(KERN_DEBUG "frob_econtrol(%02x,%02x): %02x -> %02x\n",
121 m, v, ectr, (ectr & ~m) ^ v); 121 m, v, ectr, (ectr & ~m) ^ v);
122 122
123 outb ((ectr & ~m) ^ v, ECONTROL (pb)); 123 outb((ectr & ~m) ^ v, ECONTROL(pb));
124} 124}
125 125
126static __inline__ void frob_set_mode (struct parport *p, int mode) 126static inline void frob_set_mode(struct parport *p, int mode)
127{ 127{
128 frob_econtrol (p, ECR_MODE_MASK, mode << 5); 128 frob_econtrol(p, ECR_MODE_MASK, mode << 5);
129} 129}
130 130
131#ifdef CONFIG_PARPORT_PC_FIFO 131#ifdef CONFIG_PARPORT_PC_FIFO
132/* Safely change the mode bits in the ECR 132/* Safely change the mode bits in the ECR
133 Returns: 133 Returns:
134 0 : Success 134 0 : Success
135 -EBUSY: Could not drain FIFO in some finite amount of time, 135 -EBUSY: Could not drain FIFO in some finite amount of time,
@@ -141,17 +141,18 @@ static int change_mode(struct parport *p, int m)
141 unsigned char oecr; 141 unsigned char oecr;
142 int mode; 142 int mode;
143 143
144 DPRINTK(KERN_INFO "parport change_mode ECP-ISA to mode 0x%02x\n",m); 144 DPRINTK(KERN_INFO "parport change_mode ECP-ISA to mode 0x%02x\n", m);
145 145
146 if (!priv->ecr) { 146 if (!priv->ecr) {
147 printk (KERN_DEBUG "change_mode: but there's no ECR!\n"); 147 printk(KERN_DEBUG "change_mode: but there's no ECR!\n");
148 return 0; 148 return 0;
149 } 149 }
150 150
151 /* Bits <7:5> contain the mode. */ 151 /* Bits <7:5> contain the mode. */
152 oecr = inb (ECONTROL (p)); 152 oecr = inb(ECONTROL(p));
153 mode = (oecr >> 5) & 0x7; 153 mode = (oecr >> 5) & 0x7;
154 if (mode == m) return 0; 154 if (mode == m)
155 return 0;
155 156
156 if (mode >= 2 && !(priv->ctr & 0x20)) { 157 if (mode >= 2 && !(priv->ctr & 0x20)) {
157 /* This mode resets the FIFO, so we may 158 /* This mode resets the FIFO, so we may
@@ -163,19 +164,21 @@ static int change_mode(struct parport *p, int m)
163 case ECR_ECP: /* ECP Parallel Port mode */ 164 case ECR_ECP: /* ECP Parallel Port mode */
164 /* Busy wait for 200us */ 165 /* Busy wait for 200us */
165 for (counter = 0; counter < 40; counter++) { 166 for (counter = 0; counter < 40; counter++) {
166 if (inb (ECONTROL (p)) & 0x01) 167 if (inb(ECONTROL(p)) & 0x01)
168 break;
169 if (signal_pending(current))
167 break; 170 break;
168 if (signal_pending (current)) break; 171 udelay(5);
169 udelay (5);
170 } 172 }
171 173
172 /* Poll slowly. */ 174 /* Poll slowly. */
173 while (!(inb (ECONTROL (p)) & 0x01)) { 175 while (!(inb(ECONTROL(p)) & 0x01)) {
174 if (time_after_eq (jiffies, expire)) 176 if (time_after_eq(jiffies, expire))
175 /* The FIFO is stuck. */ 177 /* The FIFO is stuck. */
176 return -EBUSY; 178 return -EBUSY;
177 schedule_timeout_interruptible(msecs_to_jiffies(10)); 179 schedule_timeout_interruptible(
178 if (signal_pending (current)) 180 msecs_to_jiffies(10));
181 if (signal_pending(current))
179 break; 182 break;
180 } 183 }
181 } 184 }
@@ -185,20 +188,20 @@ static int change_mode(struct parport *p, int m)
185 /* We have to go through mode 001 */ 188 /* We have to go through mode 001 */
186 oecr &= ~(7 << 5); 189 oecr &= ~(7 << 5);
187 oecr |= ECR_PS2 << 5; 190 oecr |= ECR_PS2 << 5;
188 ECR_WRITE (p, oecr); 191 ECR_WRITE(p, oecr);
189 } 192 }
190 193
191 /* Set the mode. */ 194 /* Set the mode. */
192 oecr &= ~(7 << 5); 195 oecr &= ~(7 << 5);
193 oecr |= m << 5; 196 oecr |= m << 5;
194 ECR_WRITE (p, oecr); 197 ECR_WRITE(p, oecr);
195 return 0; 198 return 0;
196} 199}
197 200
198#ifdef CONFIG_PARPORT_1284 201#ifdef CONFIG_PARPORT_1284
199/* Find FIFO lossage; FIFO is reset */ 202/* Find FIFO lossage; FIFO is reset */
200#if 0 203#if 0
201static int get_fifo_residue (struct parport *p) 204static int get_fifo_residue(struct parport *p)
202{ 205{
203 int residue; 206 int residue;
204 int cnfga; 207 int cnfga;
@@ -206,26 +209,26 @@ static int get_fifo_residue (struct parport *p)
206 209
207 /* Adjust for the contents of the FIFO. */ 210 /* Adjust for the contents of the FIFO. */
208 for (residue = priv->fifo_depth; ; residue--) { 211 for (residue = priv->fifo_depth; ; residue--) {
209 if (inb (ECONTROL (p)) & 0x2) 212 if (inb(ECONTROL(p)) & 0x2)
210 /* Full up. */ 213 /* Full up. */
211 break; 214 break;
212 215
213 outb (0, FIFO (p)); 216 outb(0, FIFO(p));
214 } 217 }
215 218
216 printk (KERN_DEBUG "%s: %d PWords were left in FIFO\n", p->name, 219 printk(KERN_DEBUG "%s: %d PWords were left in FIFO\n", p->name,
217 residue); 220 residue);
218 221
219 /* Reset the FIFO. */ 222 /* Reset the FIFO. */
220 frob_set_mode (p, ECR_PS2); 223 frob_set_mode(p, ECR_PS2);
221 224
222 /* Now change to config mode and clean up. FIXME */ 225 /* Now change to config mode and clean up. FIXME */
223 frob_set_mode (p, ECR_CNF); 226 frob_set_mode(p, ECR_CNF);
224 cnfga = inb (CONFIGA (p)); 227 cnfga = inb(CONFIGA(p));
225 printk (KERN_DEBUG "%s: cnfgA contains 0x%02x\n", p->name, cnfga); 228 printk(KERN_DEBUG "%s: cnfgA contains 0x%02x\n", p->name, cnfga);
226 229
227 if (!(cnfga & (1<<2))) { 230 if (!(cnfga & (1<<2))) {
228 printk (KERN_DEBUG "%s: Accounting for extra byte\n", p->name); 231 printk(KERN_DEBUG "%s: Accounting for extra byte\n", p->name);
229 residue++; 232 residue++;
230 } 233 }
231 234
@@ -233,9 +236,11 @@ static int get_fifo_residue (struct parport *p)
233 * PWord != 1 byte. */ 236 * PWord != 1 byte. */
234 237
235 /* Back to PS2 mode. */ 238 /* Back to PS2 mode. */
236 frob_set_mode (p, ECR_PS2); 239 frob_set_mode(p, ECR_PS2);
237 240
238 DPRINTK (KERN_DEBUG "*** get_fifo_residue: done residue collecting (ecr = 0x%2.2x)\n", inb (ECONTROL (p))); 241 DPRINTK(KERN_DEBUG
242 "*** get_fifo_residue: done residue collecting (ecr = 0x%2.2x)\n",
243 inb(ECONTROL(p)));
239 return residue; 244 return residue;
240} 245}
241#endif /* 0 */ 246#endif /* 0 */
@@ -257,8 +262,8 @@ static int clear_epp_timeout(struct parport *pb)
257 /* To clear timeout some chips require double read */ 262 /* To clear timeout some chips require double read */
258 parport_pc_read_status(pb); 263 parport_pc_read_status(pb);
259 r = parport_pc_read_status(pb); 264 r = parport_pc_read_status(pb);
260 outb (r | 0x01, STATUS (pb)); /* Some reset by writing 1 */ 265 outb(r | 0x01, STATUS(pb)); /* Some reset by writing 1 */
261 outb (r & 0xfe, STATUS (pb)); /* Others by writing 0 */ 266 outb(r & 0xfe, STATUS(pb)); /* Others by writing 0 */
262 r = parport_pc_read_status(pb); 267 r = parport_pc_read_status(pb);
263 268
264 return !(r & 0x01); 269 return !(r & 0x01);
@@ -272,7 +277,8 @@ static int clear_epp_timeout(struct parport *pb)
272 * of these are in parport_pc.h. 277 * of these are in parport_pc.h.
273 */ 278 */
274 279
275static void parport_pc_init_state(struct pardevice *dev, struct parport_state *s) 280static void parport_pc_init_state(struct pardevice *dev,
281 struct parport_state *s)
276{ 282{
277 s->u.pc.ctr = 0xc; 283 s->u.pc.ctr = 0xc;
278 if (dev->irq_func && 284 if (dev->irq_func &&
@@ -289,22 +295,23 @@ static void parport_pc_save_state(struct parport *p, struct parport_state *s)
289 const struct parport_pc_private *priv = p->physport->private_data; 295 const struct parport_pc_private *priv = p->physport->private_data;
290 s->u.pc.ctr = priv->ctr; 296 s->u.pc.ctr = priv->ctr;
291 if (priv->ecr) 297 if (priv->ecr)
292 s->u.pc.ecr = inb (ECONTROL (p)); 298 s->u.pc.ecr = inb(ECONTROL(p));
293} 299}
294 300
295static void parport_pc_restore_state(struct parport *p, struct parport_state *s) 301static void parport_pc_restore_state(struct parport *p,
302 struct parport_state *s)
296{ 303{
297 struct parport_pc_private *priv = p->physport->private_data; 304 struct parport_pc_private *priv = p->physport->private_data;
298 register unsigned char c = s->u.pc.ctr & priv->ctr_writable; 305 register unsigned char c = s->u.pc.ctr & priv->ctr_writable;
299 outb (c, CONTROL (p)); 306 outb(c, CONTROL(p));
300 priv->ctr = c; 307 priv->ctr = c;
301 if (priv->ecr) 308 if (priv->ecr)
302 ECR_WRITE (p, s->u.pc.ecr); 309 ECR_WRITE(p, s->u.pc.ecr);
303} 310}
304 311
305#ifdef CONFIG_PARPORT_1284 312#ifdef CONFIG_PARPORT_1284
306static size_t parport_pc_epp_read_data (struct parport *port, void *buf, 313static size_t parport_pc_epp_read_data(struct parport *port, void *buf,
307 size_t length, int flags) 314 size_t length, int flags)
308{ 315{
309 size_t got = 0; 316 size_t got = 0;
310 317
@@ -316,54 +323,52 @@ static size_t parport_pc_epp_read_data (struct parport *port, void *buf,
316 * nFault is 0 if there is at least 1 byte in the Warp's FIFO 323 * nFault is 0 if there is at least 1 byte in the Warp's FIFO
317 * pError is 1 if there are 16 bytes in the Warp's FIFO 324 * pError is 1 if there are 16 bytes in the Warp's FIFO
318 */ 325 */
319 status = inb (STATUS (port)); 326 status = inb(STATUS(port));
320 327
321 while (!(status & 0x08) && (got < length)) { 328 while (!(status & 0x08) && got < length) {
322 if ((left >= 16) && (status & 0x20) && !(status & 0x08)) { 329 if (left >= 16 && (status & 0x20) && !(status & 0x08)) {
323 /* can grab 16 bytes from warp fifo */ 330 /* can grab 16 bytes from warp fifo */
324 if (!((long)buf & 0x03)) { 331 if (!((long)buf & 0x03))
325 insl (EPPDATA (port), buf, 4); 332 insl(EPPDATA(port), buf, 4);
326 } else { 333 else
327 insb (EPPDATA (port), buf, 16); 334 insb(EPPDATA(port), buf, 16);
328 }
329 buf += 16; 335 buf += 16;
330 got += 16; 336 got += 16;
331 left -= 16; 337 left -= 16;
332 } else { 338 } else {
333 /* grab single byte from the warp fifo */ 339 /* grab single byte from the warp fifo */
334 *((char *)buf) = inb (EPPDATA (port)); 340 *((char *)buf) = inb(EPPDATA(port));
335 buf++; 341 buf++;
336 got++; 342 got++;
337 left--; 343 left--;
338 } 344 }
339 status = inb (STATUS (port)); 345 status = inb(STATUS(port));
340 if (status & 0x01) { 346 if (status & 0x01) {
341 /* EPP timeout should never occur... */ 347 /* EPP timeout should never occur... */
342 printk (KERN_DEBUG "%s: EPP timeout occurred while talking to " 348 printk(KERN_DEBUG
343 "w91284pic (should not have done)\n", port->name); 349"%s: EPP timeout occurred while talking to w91284pic (should not have done)\n", port->name);
344 clear_epp_timeout (port); 350 clear_epp_timeout(port);
345 } 351 }
346 } 352 }
347 return got; 353 return got;
348 } 354 }
349 if ((flags & PARPORT_EPP_FAST) && (length > 1)) { 355 if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
350 if (!(((long)buf | length) & 0x03)) { 356 if (!(((long)buf | length) & 0x03))
351 insl (EPPDATA (port), buf, (length >> 2)); 357 insl(EPPDATA(port), buf, (length >> 2));
352 } else { 358 else
353 insb (EPPDATA (port), buf, length); 359 insb(EPPDATA(port), buf, length);
354 } 360 if (inb(STATUS(port)) & 0x01) {
355 if (inb (STATUS (port)) & 0x01) { 361 clear_epp_timeout(port);
356 clear_epp_timeout (port);
357 return -EIO; 362 return -EIO;
358 } 363 }
359 return length; 364 return length;
360 } 365 }
361 for (; got < length; got++) { 366 for (; got < length; got++) {
362 *((char*)buf) = inb (EPPDATA(port)); 367 *((char *)buf) = inb(EPPDATA(port));
363 buf++; 368 buf++;
364 if (inb (STATUS (port)) & 0x01) { 369 if (inb(STATUS(port)) & 0x01) {
365 /* EPP timeout */ 370 /* EPP timeout */
366 clear_epp_timeout (port); 371 clear_epp_timeout(port);
367 break; 372 break;
368 } 373 }
369 } 374 }
@@ -371,28 +376,27 @@ static size_t parport_pc_epp_read_data (struct parport *port, void *buf,
371 return got; 376 return got;
372} 377}
373 378
374static size_t parport_pc_epp_write_data (struct parport *port, const void *buf, 379static size_t parport_pc_epp_write_data(struct parport *port, const void *buf,
375 size_t length, int flags) 380 size_t length, int flags)
376{ 381{
377 size_t written = 0; 382 size_t written = 0;
378 383
379 if ((flags & PARPORT_EPP_FAST) && (length > 1)) { 384 if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
380 if (!(((long)buf | length) & 0x03)) { 385 if (!(((long)buf | length) & 0x03))
381 outsl (EPPDATA (port), buf, (length >> 2)); 386 outsl(EPPDATA(port), buf, (length >> 2));
382 } else { 387 else
383 outsb (EPPDATA (port), buf, length); 388 outsb(EPPDATA(port), buf, length);
384 } 389 if (inb(STATUS(port)) & 0x01) {
385 if (inb (STATUS (port)) & 0x01) { 390 clear_epp_timeout(port);
386 clear_epp_timeout (port);
387 return -EIO; 391 return -EIO;
388 } 392 }
389 return length; 393 return length;
390 } 394 }
391 for (; written < length; written++) { 395 for (; written < length; written++) {
392 outb (*((char*)buf), EPPDATA(port)); 396 outb(*((char *)buf), EPPDATA(port));
393 buf++; 397 buf++;
394 if (inb (STATUS(port)) & 0x01) { 398 if (inb(STATUS(port)) & 0x01) {
395 clear_epp_timeout (port); 399 clear_epp_timeout(port);
396 break; 400 break;
397 } 401 }
398 } 402 }
@@ -400,24 +404,24 @@ static size_t parport_pc_epp_write_data (struct parport *port, const void *buf,
400 return written; 404 return written;
401} 405}
402 406
403static size_t parport_pc_epp_read_addr (struct parport *port, void *buf, 407static size_t parport_pc_epp_read_addr(struct parport *port, void *buf,
404 size_t length, int flags) 408 size_t length, int flags)
405{ 409{
406 size_t got = 0; 410 size_t got = 0;
407 411
408 if ((flags & PARPORT_EPP_FAST) && (length > 1)) { 412 if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
409 insb (EPPADDR (port), buf, length); 413 insb(EPPADDR(port), buf, length);
410 if (inb (STATUS (port)) & 0x01) { 414 if (inb(STATUS(port)) & 0x01) {
411 clear_epp_timeout (port); 415 clear_epp_timeout(port);
412 return -EIO; 416 return -EIO;
413 } 417 }
414 return length; 418 return length;
415 } 419 }
416 for (; got < length; got++) { 420 for (; got < length; got++) {
417 *((char*)buf) = inb (EPPADDR (port)); 421 *((char *)buf) = inb(EPPADDR(port));
418 buf++; 422 buf++;
419 if (inb (STATUS (port)) & 0x01) { 423 if (inb(STATUS(port)) & 0x01) {
420 clear_epp_timeout (port); 424 clear_epp_timeout(port);
421 break; 425 break;
422 } 426 }
423 } 427 }
@@ -425,25 +429,25 @@ static size_t parport_pc_epp_read_addr (struct parport *port, void *buf,
425 return got; 429 return got;
426} 430}
427 431
428static size_t parport_pc_epp_write_addr (struct parport *port, 432static size_t parport_pc_epp_write_addr(struct parport *port,
429 const void *buf, size_t length, 433 const void *buf, size_t length,
430 int flags) 434 int flags)
431{ 435{
432 size_t written = 0; 436 size_t written = 0;
433 437
434 if ((flags & PARPORT_EPP_FAST) && (length > 1)) { 438 if ((flags & PARPORT_EPP_FAST) && (length > 1)) {
435 outsb (EPPADDR (port), buf, length); 439 outsb(EPPADDR(port), buf, length);
436 if (inb (STATUS (port)) & 0x01) { 440 if (inb(STATUS(port)) & 0x01) {
437 clear_epp_timeout (port); 441 clear_epp_timeout(port);
438 return -EIO; 442 return -EIO;
439 } 443 }
440 return length; 444 return length;
441 } 445 }
442 for (; written < length; written++) { 446 for (; written < length; written++) {
443 outb (*((char*)buf), EPPADDR (port)); 447 outb(*((char *)buf), EPPADDR(port));
444 buf++; 448 buf++;
445 if (inb (STATUS (port)) & 0x01) { 449 if (inb(STATUS(port)) & 0x01) {
446 clear_epp_timeout (port); 450 clear_epp_timeout(port);
447 break; 451 break;
448 } 452 }
449 } 453 }
@@ -451,74 +455,74 @@ static size_t parport_pc_epp_write_addr (struct parport *port,
451 return written; 455 return written;
452} 456}
453 457
454static size_t parport_pc_ecpepp_read_data (struct parport *port, void *buf, 458static size_t parport_pc_ecpepp_read_data(struct parport *port, void *buf,
455 size_t length, int flags) 459 size_t length, int flags)
456{ 460{
457 size_t got; 461 size_t got;
458 462
459 frob_set_mode (port, ECR_EPP); 463 frob_set_mode(port, ECR_EPP);
460 parport_pc_data_reverse (port); 464 parport_pc_data_reverse(port);
461 parport_pc_write_control (port, 0x4); 465 parport_pc_write_control(port, 0x4);
462 got = parport_pc_epp_read_data (port, buf, length, flags); 466 got = parport_pc_epp_read_data(port, buf, length, flags);
463 frob_set_mode (port, ECR_PS2); 467 frob_set_mode(port, ECR_PS2);
464 468
465 return got; 469 return got;
466} 470}
467 471
468static size_t parport_pc_ecpepp_write_data (struct parport *port, 472static size_t parport_pc_ecpepp_write_data(struct parport *port,
469 const void *buf, size_t length, 473 const void *buf, size_t length,
470 int flags) 474 int flags)
471{ 475{
472 size_t written; 476 size_t written;
473 477
474 frob_set_mode (port, ECR_EPP); 478 frob_set_mode(port, ECR_EPP);
475 parport_pc_write_control (port, 0x4); 479 parport_pc_write_control(port, 0x4);
476 parport_pc_data_forward (port); 480 parport_pc_data_forward(port);
477 written = parport_pc_epp_write_data (port, buf, length, flags); 481 written = parport_pc_epp_write_data(port, buf, length, flags);
478 frob_set_mode (port, ECR_PS2); 482 frob_set_mode(port, ECR_PS2);
479 483
480 return written; 484 return written;
481} 485}
482 486
483static size_t parport_pc_ecpepp_read_addr (struct parport *port, void *buf, 487static size_t parport_pc_ecpepp_read_addr(struct parport *port, void *buf,
484 size_t length, int flags) 488 size_t length, int flags)
485{ 489{
486 size_t got; 490 size_t got;
487 491
488 frob_set_mode (port, ECR_EPP); 492 frob_set_mode(port, ECR_EPP);
489 parport_pc_data_reverse (port); 493 parport_pc_data_reverse(port);
490 parport_pc_write_control (port, 0x4); 494 parport_pc_write_control(port, 0x4);
491 got = parport_pc_epp_read_addr (port, buf, length, flags); 495 got = parport_pc_epp_read_addr(port, buf, length, flags);
492 frob_set_mode (port, ECR_PS2); 496 frob_set_mode(port, ECR_PS2);
493 497
494 return got; 498 return got;
495} 499}
496 500
497static size_t parport_pc_ecpepp_write_addr (struct parport *port, 501static size_t parport_pc_ecpepp_write_addr(struct parport *port,
498 const void *buf, size_t length, 502 const void *buf, size_t length,
499 int flags) 503 int flags)
500{ 504{
501 size_t written; 505 size_t written;
502 506
503 frob_set_mode (port, ECR_EPP); 507 frob_set_mode(port, ECR_EPP);
504 parport_pc_write_control (port, 0x4); 508 parport_pc_write_control(port, 0x4);
505 parport_pc_data_forward (port); 509 parport_pc_data_forward(port);
506 written = parport_pc_epp_write_addr (port, buf, length, flags); 510 written = parport_pc_epp_write_addr(port, buf, length, flags);
507 frob_set_mode (port, ECR_PS2); 511 frob_set_mode(port, ECR_PS2);
508 512
509 return written; 513 return written;
510} 514}
511#endif /* IEEE 1284 support */ 515#endif /* IEEE 1284 support */
512 516
513#ifdef CONFIG_PARPORT_PC_FIFO 517#ifdef CONFIG_PARPORT_PC_FIFO
514static size_t parport_pc_fifo_write_block_pio (struct parport *port, 518static size_t parport_pc_fifo_write_block_pio(struct parport *port,
515 const void *buf, size_t length) 519 const void *buf, size_t length)
516{ 520{
517 int ret = 0; 521 int ret = 0;
518 const unsigned char *bufp = buf; 522 const unsigned char *bufp = buf;
519 size_t left = length; 523 size_t left = length;
520 unsigned long expire = jiffies + port->physport->cad->timeout; 524 unsigned long expire = jiffies + port->physport->cad->timeout;
521 const int fifo = FIFO (port); 525 const int fifo = FIFO(port);
522 int poll_for = 8; /* 80 usecs */ 526 int poll_for = 8; /* 80 usecs */
523 const struct parport_pc_private *priv = port->physport->private_data; 527 const struct parport_pc_private *priv = port->physport->private_data;
524 const int fifo_depth = priv->fifo_depth; 528 const int fifo_depth = priv->fifo_depth;
@@ -526,25 +530,25 @@ static size_t parport_pc_fifo_write_block_pio (struct parport *port,
526 port = port->physport; 530 port = port->physport;
527 531
528 /* We don't want to be interrupted every character. */ 532 /* We don't want to be interrupted every character. */
529 parport_pc_disable_irq (port); 533 parport_pc_disable_irq(port);
530 /* set nErrIntrEn and serviceIntr */ 534 /* set nErrIntrEn and serviceIntr */
531 frob_econtrol (port, (1<<4) | (1<<2), (1<<4) | (1<<2)); 535 frob_econtrol(port, (1<<4) | (1<<2), (1<<4) | (1<<2));
532 536
533 /* Forward mode. */ 537 /* Forward mode. */
534 parport_pc_data_forward (port); /* Must be in PS2 mode */ 538 parport_pc_data_forward(port); /* Must be in PS2 mode */
535 539
536 while (left) { 540 while (left) {
537 unsigned char byte; 541 unsigned char byte;
538 unsigned char ecrval = inb (ECONTROL (port)); 542 unsigned char ecrval = inb(ECONTROL(port));
539 int i = 0; 543 int i = 0;
540 544
541 if (need_resched() && time_before (jiffies, expire)) 545 if (need_resched() && time_before(jiffies, expire))
542 /* Can't yield the port. */ 546 /* Can't yield the port. */
543 schedule (); 547 schedule();
544 548
545 /* Anyone else waiting for the port? */ 549 /* Anyone else waiting for the port? */
546 if (port->waithead) { 550 if (port->waithead) {
547 printk (KERN_DEBUG "Somebody wants the port\n"); 551 printk(KERN_DEBUG "Somebody wants the port\n");
548 break; 552 break;
549 } 553 }
550 554
@@ -552,21 +556,22 @@ static size_t parport_pc_fifo_write_block_pio (struct parport *port,
552 /* FIFO is full. Wait for interrupt. */ 556 /* FIFO is full. Wait for interrupt. */
553 557
554 /* Clear serviceIntr */ 558 /* Clear serviceIntr */
555 ECR_WRITE (port, ecrval & ~(1<<2)); 559 ECR_WRITE(port, ecrval & ~(1<<2));
556 false_alarm: 560false_alarm:
557 ret = parport_wait_event (port, HZ); 561 ret = parport_wait_event(port, HZ);
558 if (ret < 0) break; 562 if (ret < 0)
563 break;
559 ret = 0; 564 ret = 0;
560 if (!time_before (jiffies, expire)) { 565 if (!time_before(jiffies, expire)) {
561 /* Timed out. */ 566 /* Timed out. */
562 printk (KERN_DEBUG "FIFO write timed out\n"); 567 printk(KERN_DEBUG "FIFO write timed out\n");
563 break; 568 break;
564 } 569 }
565 ecrval = inb (ECONTROL (port)); 570 ecrval = inb(ECONTROL(port));
566 if (!(ecrval & (1<<2))) { 571 if (!(ecrval & (1<<2))) {
567 if (need_resched() && 572 if (need_resched() &&
568 time_before (jiffies, expire)) 573 time_before(jiffies, expire))
569 schedule (); 574 schedule();
570 575
571 goto false_alarm; 576 goto false_alarm;
572 } 577 }
@@ -577,38 +582,38 @@ static size_t parport_pc_fifo_write_block_pio (struct parport *port,
577 /* Can't fail now. */ 582 /* Can't fail now. */
578 expire = jiffies + port->cad->timeout; 583 expire = jiffies + port->cad->timeout;
579 584
580 poll: 585poll:
581 if (signal_pending (current)) 586 if (signal_pending(current))
582 break; 587 break;
583 588
584 if (ecrval & 0x01) { 589 if (ecrval & 0x01) {
585 /* FIFO is empty. Blast it full. */ 590 /* FIFO is empty. Blast it full. */
586 const int n = left < fifo_depth ? left : fifo_depth; 591 const int n = left < fifo_depth ? left : fifo_depth;
587 outsb (fifo, bufp, n); 592 outsb(fifo, bufp, n);
588 bufp += n; 593 bufp += n;
589 left -= n; 594 left -= n;
590 595
591 /* Adjust the poll time. */ 596 /* Adjust the poll time. */
592 if (i < (poll_for - 2)) poll_for--; 597 if (i < (poll_for - 2))
598 poll_for--;
593 continue; 599 continue;
594 } else if (i++ < poll_for) { 600 } else if (i++ < poll_for) {
595 udelay (10); 601 udelay(10);
596 ecrval = inb (ECONTROL (port)); 602 ecrval = inb(ECONTROL(port));
597 goto poll; 603 goto poll;
598 } 604 }
599 605
600 /* Half-full (call me an optimist) */ 606 /* Half-full(call me an optimist) */
601 byte = *bufp++; 607 byte = *bufp++;
602 outb (byte, fifo); 608 outb(byte, fifo);
603 left--; 609 left--;
604 } 610 }
605 611 dump_parport_state("leave fifo_write_block_pio", port);
606dump_parport_state ("leave fifo_write_block_pio", port);
607 return length - left; 612 return length - left;
608} 613}
609 614
610#ifdef HAS_DMA 615#ifdef HAS_DMA
611static size_t parport_pc_fifo_write_block_dma (struct parport *port, 616static size_t parport_pc_fifo_write_block_dma(struct parport *port,
612 const void *buf, size_t length) 617 const void *buf, size_t length)
613{ 618{
614 int ret = 0; 619 int ret = 0;
@@ -621,7 +626,7 @@ static size_t parport_pc_fifo_write_block_dma (struct parport *port,
621 unsigned long start = (unsigned long) buf; 626 unsigned long start = (unsigned long) buf;
622 unsigned long end = (unsigned long) buf + length - 1; 627 unsigned long end = (unsigned long) buf + length - 1;
623 628
624dump_parport_state ("enter fifo_write_block_dma", port); 629 dump_parport_state("enter fifo_write_block_dma", port);
625 if (end < MAX_DMA_ADDRESS) { 630 if (end < MAX_DMA_ADDRESS) {
626 /* If it would cross a 64k boundary, cap it at the end. */ 631 /* If it would cross a 64k boundary, cap it at the end. */
627 if ((start ^ end) & ~0xffffUL) 632 if ((start ^ end) & ~0xffffUL)
@@ -629,8 +634,9 @@ dump_parport_state ("enter fifo_write_block_dma", port);
629 634
630 dma_addr = dma_handle = dma_map_single(dev, (void *)buf, length, 635 dma_addr = dma_handle = dma_map_single(dev, (void *)buf, length,
631 DMA_TO_DEVICE); 636 DMA_TO_DEVICE);
632 } else { 637 } else {
633 /* above 16 MB we use a bounce buffer as ISA-DMA is not possible */ 638 /* above 16 MB we use a bounce buffer as ISA-DMA
639 is not possible */
634 maxlen = PAGE_SIZE; /* sizeof(priv->dma_buf) */ 640 maxlen = PAGE_SIZE; /* sizeof(priv->dma_buf) */
635 dma_addr = priv->dma_handle; 641 dma_addr = priv->dma_handle;
636 dma_handle = 0; 642 dma_handle = 0;
@@ -639,12 +645,12 @@ dump_parport_state ("enter fifo_write_block_dma", port);
639 port = port->physport; 645 port = port->physport;
640 646
641 /* We don't want to be interrupted every character. */ 647 /* We don't want to be interrupted every character. */
642 parport_pc_disable_irq (port); 648 parport_pc_disable_irq(port);
643 /* set nErrIntrEn and serviceIntr */ 649 /* set nErrIntrEn and serviceIntr */
644 frob_econtrol (port, (1<<4) | (1<<2), (1<<4) | (1<<2)); 650 frob_econtrol(port, (1<<4) | (1<<2), (1<<4) | (1<<2));
645 651
646 /* Forward mode. */ 652 /* Forward mode. */
647 parport_pc_data_forward (port); /* Must be in PS2 mode */ 653 parport_pc_data_forward(port); /* Must be in PS2 mode */
648 654
649 while (left) { 655 while (left) {
650 unsigned long expire = jiffies + port->physport->cad->timeout; 656 unsigned long expire = jiffies + port->physport->cad->timeout;
@@ -665,10 +671,10 @@ dump_parport_state ("enter fifo_write_block_dma", port);
665 set_dma_count(port->dma, count); 671 set_dma_count(port->dma, count);
666 672
667 /* Set DMA mode */ 673 /* Set DMA mode */
668 frob_econtrol (port, 1<<3, 1<<3); 674 frob_econtrol(port, 1<<3, 1<<3);
669 675
670 /* Clear serviceIntr */ 676 /* Clear serviceIntr */
671 frob_econtrol (port, 1<<2, 0); 677 frob_econtrol(port, 1<<2, 0);
672 678
673 enable_dma(port->dma); 679 enable_dma(port->dma);
674 release_dma_lock(dmaflag); 680 release_dma_lock(dmaflag);
@@ -676,20 +682,22 @@ dump_parport_state ("enter fifo_write_block_dma", port);
676 /* assume DMA will be successful */ 682 /* assume DMA will be successful */
677 left -= count; 683 left -= count;
678 buf += count; 684 buf += count;
679 if (dma_handle) dma_addr += count; 685 if (dma_handle)
686 dma_addr += count;
680 687
681 /* Wait for interrupt. */ 688 /* Wait for interrupt. */
682 false_alarm: 689false_alarm:
683 ret = parport_wait_event (port, HZ); 690 ret = parport_wait_event(port, HZ);
684 if (ret < 0) break; 691 if (ret < 0)
692 break;
685 ret = 0; 693 ret = 0;
686 if (!time_before (jiffies, expire)) { 694 if (!time_before(jiffies, expire)) {
687 /* Timed out. */ 695 /* Timed out. */
688 printk (KERN_DEBUG "DMA write timed out\n"); 696 printk(KERN_DEBUG "DMA write timed out\n");
689 break; 697 break;
690 } 698 }
691 /* Is serviceIntr set? */ 699 /* Is serviceIntr set? */
692 if (!(inb (ECONTROL (port)) & (1<<2))) { 700 if (!(inb(ECONTROL(port)) & (1<<2))) {
693 cond_resched(); 701 cond_resched();
694 702
695 goto false_alarm; 703 goto false_alarm;
@@ -705,14 +713,15 @@ dump_parport_state ("enter fifo_write_block_dma", port);
705 713
706 /* Anyone else waiting for the port? */ 714 /* Anyone else waiting for the port? */
707 if (port->waithead) { 715 if (port->waithead) {
708 printk (KERN_DEBUG "Somebody wants the port\n"); 716 printk(KERN_DEBUG "Somebody wants the port\n");
709 break; 717 break;
710 } 718 }
711 719
712 /* update for possible DMA residue ! */ 720 /* update for possible DMA residue ! */
713 buf -= count; 721 buf -= count;
714 left += count; 722 left += count;
715 if (dma_handle) dma_addr -= count; 723 if (dma_handle)
724 dma_addr -= count;
716 } 725 }
717 726
718 /* Maybe got here through break, so adjust for DMA residue! */ 727 /* Maybe got here through break, so adjust for DMA residue! */
@@ -723,12 +732,12 @@ dump_parport_state ("enter fifo_write_block_dma", port);
723 release_dma_lock(dmaflag); 732 release_dma_lock(dmaflag);
724 733
725 /* Turn off DMA mode */ 734 /* Turn off DMA mode */
726 frob_econtrol (port, 1<<3, 0); 735 frob_econtrol(port, 1<<3, 0);
727 736
728 if (dma_handle) 737 if (dma_handle)
729 dma_unmap_single(dev, dma_handle, length, DMA_TO_DEVICE); 738 dma_unmap_single(dev, dma_handle, length, DMA_TO_DEVICE);
730 739
731dump_parport_state ("leave fifo_write_block_dma", port); 740 dump_parport_state("leave fifo_write_block_dma", port);
732 return length - left; 741 return length - left;
733} 742}
734#endif 743#endif
@@ -738,13 +747,13 @@ static inline size_t parport_pc_fifo_write_block(struct parport *port,
738{ 747{
739#ifdef HAS_DMA 748#ifdef HAS_DMA
740 if (port->dma != PARPORT_DMA_NONE) 749 if (port->dma != PARPORT_DMA_NONE)
741 return parport_pc_fifo_write_block_dma (port, buf, length); 750 return parport_pc_fifo_write_block_dma(port, buf, length);
742#endif 751#endif
743 return parport_pc_fifo_write_block_pio (port, buf, length); 752 return parport_pc_fifo_write_block_pio(port, buf, length);
744} 753}
745 754
746/* Parallel Port FIFO mode (ECP chipsets) */ 755/* Parallel Port FIFO mode (ECP chipsets) */
747static size_t parport_pc_compat_write_block_pio (struct parport *port, 756static size_t parport_pc_compat_write_block_pio(struct parport *port,
748 const void *buf, size_t length, 757 const void *buf, size_t length,
749 int flags) 758 int flags)
750{ 759{
@@ -756,14 +765,16 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port,
756 /* Special case: a timeout of zero means we cannot call schedule(). 765 /* Special case: a timeout of zero means we cannot call schedule().
757 * Also if O_NONBLOCK is set then use the default implementation. */ 766 * Also if O_NONBLOCK is set then use the default implementation. */
758 if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK) 767 if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
759 return parport_ieee1284_write_compat (port, buf, 768 return parport_ieee1284_write_compat(port, buf,
760 length, flags); 769 length, flags);
761 770
762 /* Set up parallel port FIFO mode.*/ 771 /* Set up parallel port FIFO mode.*/
763 parport_pc_data_forward (port); /* Must be in PS2 mode */ 772 parport_pc_data_forward(port); /* Must be in PS2 mode */
764 parport_pc_frob_control (port, PARPORT_CONTROL_STROBE, 0); 773 parport_pc_frob_control(port, PARPORT_CONTROL_STROBE, 0);
765 r = change_mode (port, ECR_PPF); /* Parallel port FIFO */ 774 r = change_mode(port, ECR_PPF); /* Parallel port FIFO */
766 if (r) printk (KERN_DEBUG "%s: Warning change_mode ECR_PPF failed\n", port->name); 775 if (r)
776 printk(KERN_DEBUG "%s: Warning change_mode ECR_PPF failed\n",
777 port->name);
767 778
768 port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA; 779 port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
769 780
@@ -775,40 +786,39 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port,
775 * the FIFO is empty, so allow 4 seconds for each position 786 * the FIFO is empty, so allow 4 seconds for each position
776 * in the fifo. 787 * in the fifo.
777 */ 788 */
778 expire = jiffies + (priv->fifo_depth * HZ * 4); 789 expire = jiffies + (priv->fifo_depth * HZ * 4);
779 do { 790 do {
780 /* Wait for the FIFO to empty */ 791 /* Wait for the FIFO to empty */
781 r = change_mode (port, ECR_PS2); 792 r = change_mode(port, ECR_PS2);
782 if (r != -EBUSY) { 793 if (r != -EBUSY)
783 break; 794 break;
784 } 795 } while (time_before(jiffies, expire));
785 } while (time_before (jiffies, expire));
786 if (r == -EBUSY) { 796 if (r == -EBUSY) {
787 797
788 printk (KERN_DEBUG "%s: FIFO is stuck\n", port->name); 798 printk(KERN_DEBUG "%s: FIFO is stuck\n", port->name);
789 799
790 /* Prevent further data transfer. */ 800 /* Prevent further data transfer. */
791 frob_set_mode (port, ECR_TST); 801 frob_set_mode(port, ECR_TST);
792 802
793 /* Adjust for the contents of the FIFO. */ 803 /* Adjust for the contents of the FIFO. */
794 for (written -= priv->fifo_depth; ; written++) { 804 for (written -= priv->fifo_depth; ; written++) {
795 if (inb (ECONTROL (port)) & 0x2) { 805 if (inb(ECONTROL(port)) & 0x2) {
796 /* Full up. */ 806 /* Full up. */
797 break; 807 break;
798 } 808 }
799 outb (0, FIFO (port)); 809 outb(0, FIFO(port));
800 } 810 }
801 811
802 /* Reset the FIFO and return to PS2 mode. */ 812 /* Reset the FIFO and return to PS2 mode. */
803 frob_set_mode (port, ECR_PS2); 813 frob_set_mode(port, ECR_PS2);
804 } 814 }
805 815
806 r = parport_wait_peripheral (port, 816 r = parport_wait_peripheral(port,
807 PARPORT_STATUS_BUSY, 817 PARPORT_STATUS_BUSY,
808 PARPORT_STATUS_BUSY); 818 PARPORT_STATUS_BUSY);
809 if (r) 819 if (r)
810 printk (KERN_DEBUG 820 printk(KERN_DEBUG
811 "%s: BUSY timeout (%d) in compat_write_block_pio\n", 821 "%s: BUSY timeout (%d) in compat_write_block_pio\n",
812 port->name, r); 822 port->name, r);
813 823
814 port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE; 824 port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
@@ -818,7 +828,7 @@ static size_t parport_pc_compat_write_block_pio (struct parport *port,
818 828
819/* ECP */ 829/* ECP */
820#ifdef CONFIG_PARPORT_1284 830#ifdef CONFIG_PARPORT_1284
821static size_t parport_pc_ecp_write_block_pio (struct parport *port, 831static size_t parport_pc_ecp_write_block_pio(struct parport *port,
822 const void *buf, size_t length, 832 const void *buf, size_t length,
823 int flags) 833 int flags)
824{ 834{
@@ -830,36 +840,38 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port,
830 /* Special case: a timeout of zero means we cannot call schedule(). 840 /* Special case: a timeout of zero means we cannot call schedule().
831 * Also if O_NONBLOCK is set then use the default implementation. */ 841 * Also if O_NONBLOCK is set then use the default implementation. */
832 if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK) 842 if (port->physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
833 return parport_ieee1284_ecp_write_data (port, buf, 843 return parport_ieee1284_ecp_write_data(port, buf,
834 length, flags); 844 length, flags);
835 845
836 /* Switch to forward mode if necessary. */ 846 /* Switch to forward mode if necessary. */
837 if (port->physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) { 847 if (port->physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
838 /* Event 47: Set nInit high. */ 848 /* Event 47: Set nInit high. */
839 parport_frob_control (port, 849 parport_frob_control(port,
840 PARPORT_CONTROL_INIT 850 PARPORT_CONTROL_INIT
841 | PARPORT_CONTROL_AUTOFD, 851 | PARPORT_CONTROL_AUTOFD,
842 PARPORT_CONTROL_INIT 852 PARPORT_CONTROL_INIT
843 | PARPORT_CONTROL_AUTOFD); 853 | PARPORT_CONTROL_AUTOFD);
844 854
845 /* Event 49: PError goes high. */ 855 /* Event 49: PError goes high. */
846 r = parport_wait_peripheral (port, 856 r = parport_wait_peripheral(port,
847 PARPORT_STATUS_PAPEROUT, 857 PARPORT_STATUS_PAPEROUT,
848 PARPORT_STATUS_PAPEROUT); 858 PARPORT_STATUS_PAPEROUT);
849 if (r) { 859 if (r) {
850 printk (KERN_DEBUG "%s: PError timeout (%d) " 860 printk(KERN_DEBUG "%s: PError timeout (%d) "
851 "in ecp_write_block_pio\n", port->name, r); 861 "in ecp_write_block_pio\n", port->name, r);
852 } 862 }
853 } 863 }
854 864
855 /* Set up ECP parallel port mode.*/ 865 /* Set up ECP parallel port mode.*/
856 parport_pc_data_forward (port); /* Must be in PS2 mode */ 866 parport_pc_data_forward(port); /* Must be in PS2 mode */
857 parport_pc_frob_control (port, 867 parport_pc_frob_control(port,
858 PARPORT_CONTROL_STROBE | 868 PARPORT_CONTROL_STROBE |
859 PARPORT_CONTROL_AUTOFD, 869 PARPORT_CONTROL_AUTOFD,
860 0); 870 0);
861 r = change_mode (port, ECR_ECP); /* ECP FIFO */ 871 r = change_mode(port, ECR_ECP); /* ECP FIFO */
862 if (r) printk (KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n", port->name); 872 if (r)
873 printk(KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n",
874 port->name);
863 port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA; 875 port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
864 876
865 /* Write the data to the FIFO. */ 877 /* Write the data to the FIFO. */
@@ -873,55 +885,54 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port,
873 expire = jiffies + (priv->fifo_depth * (HZ * 4)); 885 expire = jiffies + (priv->fifo_depth * (HZ * 4));
874 do { 886 do {
875 /* Wait for the FIFO to empty */ 887 /* Wait for the FIFO to empty */
876 r = change_mode (port, ECR_PS2); 888 r = change_mode(port, ECR_PS2);
877 if (r != -EBUSY) { 889 if (r != -EBUSY)
878 break; 890 break;
879 } 891 } while (time_before(jiffies, expire));
880 } while (time_before (jiffies, expire));
881 if (r == -EBUSY) { 892 if (r == -EBUSY) {
882 893
883 printk (KERN_DEBUG "%s: FIFO is stuck\n", port->name); 894 printk(KERN_DEBUG "%s: FIFO is stuck\n", port->name);
884 895
885 /* Prevent further data transfer. */ 896 /* Prevent further data transfer. */
886 frob_set_mode (port, ECR_TST); 897 frob_set_mode(port, ECR_TST);
887 898
888 /* Adjust for the contents of the FIFO. */ 899 /* Adjust for the contents of the FIFO. */
889 for (written -= priv->fifo_depth; ; written++) { 900 for (written -= priv->fifo_depth; ; written++) {
890 if (inb (ECONTROL (port)) & 0x2) { 901 if (inb(ECONTROL(port)) & 0x2) {
891 /* Full up. */ 902 /* Full up. */
892 break; 903 break;
893 } 904 }
894 outb (0, FIFO (port)); 905 outb(0, FIFO(port));
895 } 906 }
896 907
897 /* Reset the FIFO and return to PS2 mode. */ 908 /* Reset the FIFO and return to PS2 mode. */
898 frob_set_mode (port, ECR_PS2); 909 frob_set_mode(port, ECR_PS2);
899 910
900 /* Host transfer recovery. */ 911 /* Host transfer recovery. */
901 parport_pc_data_reverse (port); /* Must be in PS2 mode */ 912 parport_pc_data_reverse(port); /* Must be in PS2 mode */
902 udelay (5); 913 udelay(5);
903 parport_frob_control (port, PARPORT_CONTROL_INIT, 0); 914 parport_frob_control(port, PARPORT_CONTROL_INIT, 0);
904 r = parport_wait_peripheral (port, PARPORT_STATUS_PAPEROUT, 0); 915 r = parport_wait_peripheral(port, PARPORT_STATUS_PAPEROUT, 0);
905 if (r) 916 if (r)
906 printk (KERN_DEBUG "%s: PE,1 timeout (%d) " 917 printk(KERN_DEBUG "%s: PE,1 timeout (%d) "
907 "in ecp_write_block_pio\n", port->name, r); 918 "in ecp_write_block_pio\n", port->name, r);
908 919
909 parport_frob_control (port, 920 parport_frob_control(port,
910 PARPORT_CONTROL_INIT, 921 PARPORT_CONTROL_INIT,
911 PARPORT_CONTROL_INIT); 922 PARPORT_CONTROL_INIT);
912 r = parport_wait_peripheral (port, 923 r = parport_wait_peripheral(port,
913 PARPORT_STATUS_PAPEROUT, 924 PARPORT_STATUS_PAPEROUT,
914 PARPORT_STATUS_PAPEROUT); 925 PARPORT_STATUS_PAPEROUT);
915 if (r) 926 if (r)
916 printk (KERN_DEBUG "%s: PE,2 timeout (%d) " 927 printk(KERN_DEBUG "%s: PE,2 timeout (%d) "
917 "in ecp_write_block_pio\n", port->name, r); 928 "in ecp_write_block_pio\n", port->name, r);
918 } 929 }
919 930
920 r = parport_wait_peripheral (port, 931 r = parport_wait_peripheral(port,
921 PARPORT_STATUS_BUSY, 932 PARPORT_STATUS_BUSY,
922 PARPORT_STATUS_BUSY); 933 PARPORT_STATUS_BUSY);
923 if(r) 934 if (r)
924 printk (KERN_DEBUG 935 printk(KERN_DEBUG
925 "%s: BUSY timeout (%d) in ecp_write_block_pio\n", 936 "%s: BUSY timeout (%d) in ecp_write_block_pio\n",
926 port->name, r); 937 port->name, r);
927 938
@@ -931,7 +942,7 @@ static size_t parport_pc_ecp_write_block_pio (struct parport *port,
931} 942}
932 943
933#if 0 944#if 0
934static size_t parport_pc_ecp_read_block_pio (struct parport *port, 945static size_t parport_pc_ecp_read_block_pio(struct parport *port,
935 void *buf, size_t length, 946 void *buf, size_t length,
936 int flags) 947 int flags)
937{ 948{
@@ -944,13 +955,13 @@ static size_t parport_pc_ecp_read_block_pio (struct parport *port,
944 char *bufp = buf; 955 char *bufp = buf;
945 956
946 port = port->physport; 957 port = port->physport;
947DPRINTK (KERN_DEBUG "parport_pc: parport_pc_ecp_read_block_pio\n"); 958 DPRINTK(KERN_DEBUG "parport_pc: parport_pc_ecp_read_block_pio\n");
948dump_parport_state ("enter fcn", port); 959 dump_parport_state("enter fcn", port);
949 960
950 /* Special case: a timeout of zero means we cannot call schedule(). 961 /* Special case: a timeout of zero means we cannot call schedule().
951 * Also if O_NONBLOCK is set then use the default implementation. */ 962 * Also if O_NONBLOCK is set then use the default implementation. */
952 if (port->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK) 963 if (port->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
953 return parport_ieee1284_ecp_read_data (port, buf, 964 return parport_ieee1284_ecp_read_data(port, buf,
954 length, flags); 965 length, flags);
955 966
956 if (port->ieee1284.mode == IEEE1284_MODE_ECPRLE) { 967 if (port->ieee1284.mode == IEEE1284_MODE_ECPRLE) {
@@ -966,173 +977,178 @@ dump_parport_state ("enter fcn", port);
966 * go through software emulation. Otherwise we may have to throw 977 * go through software emulation. Otherwise we may have to throw
967 * away data. */ 978 * away data. */
968 if (length < fifofull) 979 if (length < fifofull)
969 return parport_ieee1284_ecp_read_data (port, buf, 980 return parport_ieee1284_ecp_read_data(port, buf,
970 length, flags); 981 length, flags);
971 982
972 if (port->ieee1284.phase != IEEE1284_PH_REV_IDLE) { 983 if (port->ieee1284.phase != IEEE1284_PH_REV_IDLE) {
973 /* change to reverse-idle phase (must be in forward-idle) */ 984 /* change to reverse-idle phase (must be in forward-idle) */
974 985
975 /* Event 38: Set nAutoFd low (also make sure nStrobe is high) */ 986 /* Event 38: Set nAutoFd low (also make sure nStrobe is high) */
976 parport_frob_control (port, 987 parport_frob_control(port,
977 PARPORT_CONTROL_AUTOFD 988 PARPORT_CONTROL_AUTOFD
978 | PARPORT_CONTROL_STROBE, 989 | PARPORT_CONTROL_STROBE,
979 PARPORT_CONTROL_AUTOFD); 990 PARPORT_CONTROL_AUTOFD);
980 parport_pc_data_reverse (port); /* Must be in PS2 mode */ 991 parport_pc_data_reverse(port); /* Must be in PS2 mode */
981 udelay (5); 992 udelay(5);
982 /* Event 39: Set nInit low to initiate bus reversal */ 993 /* Event 39: Set nInit low to initiate bus reversal */
983 parport_frob_control (port, 994 parport_frob_control(port,
984 PARPORT_CONTROL_INIT, 995 PARPORT_CONTROL_INIT,
985 0); 996 0);
986 /* Event 40: Wait for nAckReverse (PError) to go low */ 997 /* Event 40: Wait for nAckReverse (PError) to go low */
987 r = parport_wait_peripheral (port, PARPORT_STATUS_PAPEROUT, 0); 998 r = parport_wait_peripheral(port, PARPORT_STATUS_PAPEROUT, 0);
988 if (r) { 999 if (r) {
989 printk (KERN_DEBUG "%s: PE timeout Event 40 (%d) " 1000 printk(KERN_DEBUG "%s: PE timeout Event 40 (%d) "
990 "in ecp_read_block_pio\n", port->name, r); 1001 "in ecp_read_block_pio\n", port->name, r);
991 return 0; 1002 return 0;
992 } 1003 }
993 } 1004 }
994 1005
995 /* Set up ECP FIFO mode.*/ 1006 /* Set up ECP FIFO mode.*/
996/* parport_pc_frob_control (port, 1007/* parport_pc_frob_control(port,
997 PARPORT_CONTROL_STROBE | 1008 PARPORT_CONTROL_STROBE |
998 PARPORT_CONTROL_AUTOFD, 1009 PARPORT_CONTROL_AUTOFD,
999 PARPORT_CONTROL_AUTOFD); */ 1010 PARPORT_CONTROL_AUTOFD); */
1000 r = change_mode (port, ECR_ECP); /* ECP FIFO */ 1011 r = change_mode(port, ECR_ECP); /* ECP FIFO */
1001 if (r) printk (KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n", port->name); 1012 if (r)
1013 printk(KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n",
1014 port->name);
1002 1015
1003 port->ieee1284.phase = IEEE1284_PH_REV_DATA; 1016 port->ieee1284.phase = IEEE1284_PH_REV_DATA;
1004 1017
1005 /* the first byte must be collected manually */ 1018 /* the first byte must be collected manually */
1006dump_parport_state ("pre 43", port); 1019 dump_parport_state("pre 43", port);
1007 /* Event 43: Wait for nAck to go low */ 1020 /* Event 43: Wait for nAck to go low */
1008 r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0); 1021 r = parport_wait_peripheral(port, PARPORT_STATUS_ACK, 0);
1009 if (r) { 1022 if (r) {
1010 /* timed out while reading -- no data */ 1023 /* timed out while reading -- no data */
1011 printk (KERN_DEBUG "PIO read timed out (initial byte)\n"); 1024 printk(KERN_DEBUG "PIO read timed out (initial byte)\n");
1012 goto out_no_data; 1025 goto out_no_data;
1013 } 1026 }
1014 /* read byte */ 1027 /* read byte */
1015 *bufp++ = inb (DATA (port)); 1028 *bufp++ = inb(DATA(port));
1016 left--; 1029 left--;
1017dump_parport_state ("43-44", port); 1030 dump_parport_state("43-44", port);
1018 /* Event 44: nAutoFd (HostAck) goes high to acknowledge */ 1031 /* Event 44: nAutoFd (HostAck) goes high to acknowledge */
1019 parport_pc_frob_control (port, 1032 parport_pc_frob_control(port,
1020 PARPORT_CONTROL_AUTOFD, 1033 PARPORT_CONTROL_AUTOFD,
1021 0); 1034 0);
1022dump_parport_state ("pre 45", port); 1035 dump_parport_state("pre 45", port);
1023 /* Event 45: Wait for nAck to go high */ 1036 /* Event 45: Wait for nAck to go high */
1024/* r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, PARPORT_STATUS_ACK); */ 1037 /* r = parport_wait_peripheral(port, PARPORT_STATUS_ACK,
1025dump_parport_state ("post 45", port); 1038 PARPORT_STATUS_ACK); */
1026r = 0; 1039 dump_parport_state("post 45", port);
1040 r = 0;
1027 if (r) { 1041 if (r) {
1028 /* timed out while waiting for peripheral to respond to ack */ 1042 /* timed out while waiting for peripheral to respond to ack */
1029 printk (KERN_DEBUG "ECP PIO read timed out (waiting for nAck)\n"); 1043 printk(KERN_DEBUG "ECP PIO read timed out (waiting for nAck)\n");
1030 1044
1031 /* keep hold of the byte we've got already */ 1045 /* keep hold of the byte we've got already */
1032 goto out_no_data; 1046 goto out_no_data;
1033 } 1047 }
1034 /* Event 46: nAutoFd (HostAck) goes low to accept more data */ 1048 /* Event 46: nAutoFd (HostAck) goes low to accept more data */
1035 parport_pc_frob_control (port, 1049 parport_pc_frob_control(port,
1036 PARPORT_CONTROL_AUTOFD, 1050 PARPORT_CONTROL_AUTOFD,
1037 PARPORT_CONTROL_AUTOFD); 1051 PARPORT_CONTROL_AUTOFD);
1038 1052
1039 1053
1040dump_parport_state ("rev idle", port); 1054 dump_parport_state("rev idle", port);
1041 /* Do the transfer. */ 1055 /* Do the transfer. */
1042 while (left > fifofull) { 1056 while (left > fifofull) {
1043 int ret; 1057 int ret;
1044 unsigned long expire = jiffies + port->cad->timeout; 1058 unsigned long expire = jiffies + port->cad->timeout;
1045 unsigned char ecrval = inb (ECONTROL (port)); 1059 unsigned char ecrval = inb(ECONTROL(port));
1046 1060
1047 if (need_resched() && time_before (jiffies, expire)) 1061 if (need_resched() && time_before(jiffies, expire))
1048 /* Can't yield the port. */ 1062 /* Can't yield the port. */
1049 schedule (); 1063 schedule();
1050 1064
1051 /* At this point, the FIFO may already be full. In 1065 /* At this point, the FIFO may already be full. In
1052 * that case ECP is already holding back the 1066 * that case ECP is already holding back the
1053 * peripheral (assuming proper design) with a delayed 1067 * peripheral (assuming proper design) with a delayed
1054 * handshake. Work fast to avoid a peripheral 1068 * handshake. Work fast to avoid a peripheral
1055 * timeout. */ 1069 * timeout. */
1056 1070
1057 if (ecrval & 0x01) { 1071 if (ecrval & 0x01) {
1058 /* FIFO is empty. Wait for interrupt. */ 1072 /* FIFO is empty. Wait for interrupt. */
1059dump_parport_state ("FIFO empty", port); 1073 dump_parport_state("FIFO empty", port);
1060 1074
1061 /* Anyone else waiting for the port? */ 1075 /* Anyone else waiting for the port? */
1062 if (port->waithead) { 1076 if (port->waithead) {
1063 printk (KERN_DEBUG "Somebody wants the port\n"); 1077 printk(KERN_DEBUG "Somebody wants the port\n");
1064 break; 1078 break;
1065 } 1079 }
1066 1080
1067 /* Clear serviceIntr */ 1081 /* Clear serviceIntr */
1068 ECR_WRITE (port, ecrval & ~(1<<2)); 1082 ECR_WRITE(port, ecrval & ~(1<<2));
1069 false_alarm: 1083false_alarm:
1070dump_parport_state ("waiting", port); 1084 dump_parport_state("waiting", port);
1071 ret = parport_wait_event (port, HZ); 1085 ret = parport_wait_event(port, HZ);
1072DPRINTK (KERN_DEBUG "parport_wait_event returned %d\n", ret); 1086 DPRINTK(KERN_DEBUG "parport_wait_event returned %d\n",
1087 ret);
1073 if (ret < 0) 1088 if (ret < 0)
1074 break; 1089 break;
1075 ret = 0; 1090 ret = 0;
1076 if (!time_before (jiffies, expire)) { 1091 if (!time_before(jiffies, expire)) {
1077 /* Timed out. */ 1092 /* Timed out. */
1078dump_parport_state ("timeout", port); 1093 dump_parport_state("timeout", port);
1079 printk (KERN_DEBUG "PIO read timed out\n"); 1094 printk(KERN_DEBUG "PIO read timed out\n");
1080 break; 1095 break;
1081 } 1096 }
1082 ecrval = inb (ECONTROL (port)); 1097 ecrval = inb(ECONTROL(port));
1083 if (!(ecrval & (1<<2))) { 1098 if (!(ecrval & (1<<2))) {
1084 if (need_resched() && 1099 if (need_resched() &&
1085 time_before (jiffies, expire)) { 1100 time_before(jiffies, expire)) {
1086 schedule (); 1101 schedule();
1087 } 1102 }
1088 goto false_alarm; 1103 goto false_alarm;
1089 } 1104 }
1090 1105
1091 /* Depending on how the FIFO threshold was 1106 /* Depending on how the FIFO threshold was
1092 * set, how long interrupt service took, and 1107 * set, how long interrupt service took, and
1093 * how fast the peripheral is, we might be 1108 * how fast the peripheral is, we might be
1094 * lucky and have a just filled FIFO. */ 1109 * lucky and have a just filled FIFO. */
1095 continue; 1110 continue;
1096 } 1111 }
1097 1112
1098 if (ecrval & 0x02) { 1113 if (ecrval & 0x02) {
1099 /* FIFO is full. */ 1114 /* FIFO is full. */
1100dump_parport_state ("FIFO full", port); 1115 dump_parport_state("FIFO full", port);
1101 insb (fifo, bufp, fifo_depth); 1116 insb(fifo, bufp, fifo_depth);
1102 bufp += fifo_depth; 1117 bufp += fifo_depth;
1103 left -= fifo_depth; 1118 left -= fifo_depth;
1104 continue; 1119 continue;
1105 } 1120 }
1106 1121
1107DPRINTK (KERN_DEBUG "*** ecp_read_block_pio: reading one byte from the FIFO\n"); 1122 DPRINTK(KERN_DEBUG
1123 "*** ecp_read_block_pio: reading one byte from the FIFO\n");
1108 1124
1109 /* FIFO not filled. We will cycle this loop for a while 1125 /* FIFO not filled. We will cycle this loop for a while
1110 * and either the peripheral will fill it faster, 1126 * and either the peripheral will fill it faster,
1111 * tripping a fast empty with insb, or we empty it. */ 1127 * tripping a fast empty with insb, or we empty it. */
1112 *bufp++ = inb (fifo); 1128 *bufp++ = inb(fifo);
1113 left--; 1129 left--;
1114 } 1130 }
1115 1131
1116 /* scoop up anything left in the FIFO */ 1132 /* scoop up anything left in the FIFO */
1117 while (left && !(inb (ECONTROL (port) & 0x01))) { 1133 while (left && !(inb(ECONTROL(port) & 0x01))) {
1118 *bufp++ = inb (fifo); 1134 *bufp++ = inb(fifo);
1119 left--; 1135 left--;
1120 } 1136 }
1121 1137
1122 port->ieee1284.phase = IEEE1284_PH_REV_IDLE; 1138 port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
1123dump_parport_state ("rev idle2", port); 1139 dump_parport_state("rev idle2", port);
1124 1140
1125out_no_data: 1141out_no_data:
1126 1142
1127 /* Go to forward idle mode to shut the peripheral up (event 47). */ 1143 /* Go to forward idle mode to shut the peripheral up (event 47). */
1128 parport_frob_control (port, PARPORT_CONTROL_INIT, PARPORT_CONTROL_INIT); 1144 parport_frob_control(port, PARPORT_CONTROL_INIT, PARPORT_CONTROL_INIT);
1129 1145
1130 /* event 49: PError goes high */ 1146 /* event 49: PError goes high */
1131 r = parport_wait_peripheral (port, 1147 r = parport_wait_peripheral(port,
1132 PARPORT_STATUS_PAPEROUT, 1148 PARPORT_STATUS_PAPEROUT,
1133 PARPORT_STATUS_PAPEROUT); 1149 PARPORT_STATUS_PAPEROUT);
1134 if (r) { 1150 if (r) {
1135 printk (KERN_DEBUG 1151 printk(KERN_DEBUG
1136 "%s: PE timeout FWDIDLE (%d) in ecp_read_block_pio\n", 1152 "%s: PE timeout FWDIDLE (%d) in ecp_read_block_pio\n",
1137 port->name, r); 1153 port->name, r);
1138 } 1154 }
@@ -1141,14 +1157,14 @@ out_no_data:
1141 1157
1142 /* Finish up. */ 1158 /* Finish up. */
1143 { 1159 {
1144 int lost = get_fifo_residue (port); 1160 int lost = get_fifo_residue(port);
1145 if (lost) 1161 if (lost)
1146 /* Shouldn't happen with compliant peripherals. */ 1162 /* Shouldn't happen with compliant peripherals. */
1147 printk (KERN_DEBUG "%s: DATA LOSS (%d bytes)!\n", 1163 printk(KERN_DEBUG "%s: DATA LOSS (%d bytes)!\n",
1148 port->name, lost); 1164 port->name, lost);
1149 } 1165 }
1150 1166
1151dump_parport_state ("fwd idle", port); 1167 dump_parport_state("fwd idle", port);
1152 return length - left; 1168 return length - left;
1153} 1169}
1154#endif /* 0 */ 1170#endif /* 0 */
@@ -1164,8 +1180,7 @@ dump_parport_state ("fwd idle", port);
1164 1180
1165/* GCC is not inlining extern inline function later overwriten to non-inline, 1181/* GCC is not inlining extern inline function later overwriten to non-inline,
1166 so we use outlined_ variants here. */ 1182 so we use outlined_ variants here. */
1167static const struct parport_operations parport_pc_ops = 1183static const struct parport_operations parport_pc_ops = {
1168{
1169 .write_data = parport_pc_write_data, 1184 .write_data = parport_pc_write_data,
1170 .read_data = parport_pc_read_data, 1185 .read_data = parport_pc_read_data,
1171 1186
@@ -1202,88 +1217,107 @@ static const struct parport_operations parport_pc_ops =
1202}; 1217};
1203 1218
1204#ifdef CONFIG_PARPORT_PC_SUPERIO 1219#ifdef CONFIG_PARPORT_PC_SUPERIO
1220
1221static struct superio_struct *find_free_superio(void)
1222{
1223 int i;
1224 for (i = 0; i < NR_SUPERIOS; i++)
1225 if (superios[i].io == 0)
1226 return &superios[i];
1227 return NULL;
1228}
1229
1230
1205/* Super-IO chipset detection, Winbond, SMSC */ 1231/* Super-IO chipset detection, Winbond, SMSC */
1206static void __devinit show_parconfig_smsc37c669(int io, int key) 1232static void __devinit show_parconfig_smsc37c669(int io, int key)
1207{ 1233{
1208 int cr1,cr4,cra,cr23,cr26,cr27,i=0; 1234 int cr1, cr4, cra, cr23, cr26, cr27;
1209 static const char *const modes[]={ 1235 struct superio_struct *s;
1236
1237 static const char *const modes[] = {
1210 "SPP and Bidirectional (PS/2)", 1238 "SPP and Bidirectional (PS/2)",
1211 "EPP and SPP", 1239 "EPP and SPP",
1212 "ECP", 1240 "ECP",
1213 "ECP and EPP" }; 1241 "ECP and EPP" };
1214 1242
1215 outb(key,io); 1243 outb(key, io);
1216 outb(key,io); 1244 outb(key, io);
1217 outb(1,io); 1245 outb(1, io);
1218 cr1=inb(io+1); 1246 cr1 = inb(io + 1);
1219 outb(4,io); 1247 outb(4, io);
1220 cr4=inb(io+1); 1248 cr4 = inb(io + 1);
1221 outb(0x0a,io); 1249 outb(0x0a, io);
1222 cra=inb(io+1); 1250 cra = inb(io + 1);
1223 outb(0x23,io); 1251 outb(0x23, io);
1224 cr23=inb(io+1); 1252 cr23 = inb(io + 1);
1225 outb(0x26,io); 1253 outb(0x26, io);
1226 cr26=inb(io+1); 1254 cr26 = inb(io + 1);
1227 outb(0x27,io); 1255 outb(0x27, io);
1228 cr27=inb(io+1); 1256 cr27 = inb(io + 1);
1229 outb(0xaa,io); 1257 outb(0xaa, io);
1230 1258
1231 if (verbose_probing) { 1259 if (verbose_probing) {
1232 printk (KERN_INFO "SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, " 1260 printk(KERN_INFO
1261 "SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, "
1233 "A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n", 1262 "A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n",
1234 cr1,cr4,cra,cr23,cr26,cr27); 1263 cr1, cr4, cra, cr23, cr26, cr27);
1235 1264
1236 /* The documentation calls DMA and IRQ-Lines by letters, so 1265 /* The documentation calls DMA and IRQ-Lines by letters, so
1237 the board maker can/will wire them 1266 the board maker can/will wire them
1238 appropriately/randomly... G=reserved H=IDE-irq, */ 1267 appropriately/randomly... G=reserved H=IDE-irq, */
1239 printk (KERN_INFO "SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, " 1268 printk(KERN_INFO
1240 "fifo threshold=%d\n", cr23*4, 1269 "SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n",
1241 (cr27 &0x0f) ? 'A'-1+(cr27 &0x0f): '-', 1270 cr23 * 4,
1242 (cr26 &0x0f) ? 'A'-1+(cr26 &0x0f): '-', cra & 0x0f); 1271 (cr27 & 0x0f) ? 'A' - 1 + (cr27 & 0x0f) : '-',
1272 (cr26 & 0x0f) ? 'A' - 1 + (cr26 & 0x0f) : '-',
1273 cra & 0x0f);
1243 printk(KERN_INFO "SMSC LPT Config: enabled=%s power=%s\n", 1274 printk(KERN_INFO "SMSC LPT Config: enabled=%s power=%s\n",
1244 (cr23*4 >=0x100) ?"yes":"no", (cr1 & 4) ? "yes" : "no"); 1275 (cr23 * 4 >= 0x100) ? "yes" : "no",
1245 printk(KERN_INFO "SMSC LPT Config: Port mode=%s, EPP version =%s\n", 1276 (cr1 & 4) ? "yes" : "no");
1246 (cr1 & 0x08 ) ? "Standard mode only (SPP)" : modes[cr4 & 0x03], 1277 printk(KERN_INFO
1247 (cr4 & 0x40) ? "1.7" : "1.9"); 1278 "SMSC LPT Config: Port mode=%s, EPP version =%s\n",
1279 (cr1 & 0x08) ? "Standard mode only (SPP)"
1280 : modes[cr4 & 0x03],
1281 (cr4 & 0x40) ? "1.7" : "1.9");
1248 } 1282 }
1249 1283
1250 /* Heuristics ! BIOS setup for this mainboard device limits 1284 /* Heuristics ! BIOS setup for this mainboard device limits
1251 the choices to standard settings, i.e. io-address and IRQ 1285 the choices to standard settings, i.e. io-address and IRQ
1252 are related, however DMA can be 1 or 3, assume DMA_A=DMA1, 1286 are related, however DMA can be 1 or 3, assume DMA_A=DMA1,
1253 DMA_C=DMA3 (this is true e.g. for TYAN 1564D Tomcat IV) */ 1287 DMA_C=DMA3 (this is true e.g. for TYAN 1564D Tomcat IV) */
1254 if(cr23*4 >=0x100) { /* if active */ 1288 if (cr23 * 4 >= 0x100) { /* if active */
1255 while((superios[i].io!= 0) && (i<NR_SUPERIOS)) 1289 s = find_free_superio();
1256 i++; 1290 if (s == NULL)
1257 if(i==NR_SUPERIOS)
1258 printk(KERN_INFO "Super-IO: too many chips!\n"); 1291 printk(KERN_INFO "Super-IO: too many chips!\n");
1259 else { 1292 else {
1260 int d; 1293 int d;
1261 switch (cr23*4) { 1294 switch (cr23 * 4) {
1262 case 0x3bc: 1295 case 0x3bc:
1263 superios[i].io = 0x3bc; 1296 s->io = 0x3bc;
1264 superios[i].irq = 7; 1297 s->irq = 7;
1265 break; 1298 break;
1266 case 0x378: 1299 case 0x378:
1267 superios[i].io = 0x378; 1300 s->io = 0x378;
1268 superios[i].irq = 7; 1301 s->irq = 7;
1269 break; 1302 break;
1270 case 0x278: 1303 case 0x278:
1271 superios[i].io = 0x278; 1304 s->io = 0x278;
1272 superios[i].irq = 5; 1305 s->irq = 5;
1273 } 1306 }
1274 d=(cr26 &0x0f); 1307 d = (cr26 & 0x0f);
1275 if((d==1) || (d==3)) 1308 if (d == 1 || d == 3)
1276 superios[i].dma= d; 1309 s->dma = d;
1277 else 1310 else
1278 superios[i].dma= PARPORT_DMA_NONE; 1311 s->dma = PARPORT_DMA_NONE;
1279 } 1312 }
1280 } 1313 }
1281} 1314}
1282 1315
1283 1316
1284static void __devinit show_parconfig_winbond(int io, int key) 1317static void __devinit show_parconfig_winbond(int io, int key)
1285{ 1318{
1286 int cr30,cr60,cr61,cr70,cr74,crf0,i=0; 1319 int cr30, cr60, cr61, cr70, cr74, crf0;
1320 struct superio_struct *s;
1287 static const char *const modes[] = { 1321 static const char *const modes[] = {
1288 "Standard (SPP) and Bidirectional(PS/2)", /* 0 */ 1322 "Standard (SPP) and Bidirectional(PS/2)", /* 0 */
1289 "EPP-1.9 and SPP", 1323 "EPP-1.9 and SPP",
@@ -1296,110 +1330,134 @@ static void __devinit show_parconfig_winbond(int io, int key)
1296 static char *const irqtypes[] = { 1330 static char *const irqtypes[] = {
1297 "pulsed low, high-Z", 1331 "pulsed low, high-Z",
1298 "follows nACK" }; 1332 "follows nACK" };
1299 1333
1300 /* The registers are called compatible-PnP because the 1334 /* The registers are called compatible-PnP because the
1301 register layout is modelled after ISA-PnP, the access 1335 register layout is modelled after ISA-PnP, the access
1302 method is just another ... */ 1336 method is just another ... */
1303 outb(key,io); 1337 outb(key, io);
1304 outb(key,io); 1338 outb(key, io);
1305 outb(0x07,io); /* Register 7: Select Logical Device */ 1339 outb(0x07, io); /* Register 7: Select Logical Device */
1306 outb(0x01,io+1); /* LD1 is Parallel Port */ 1340 outb(0x01, io + 1); /* LD1 is Parallel Port */
1307 outb(0x30,io); 1341 outb(0x30, io);
1308 cr30=inb(io+1); 1342 cr30 = inb(io + 1);
1309 outb(0x60,io); 1343 outb(0x60, io);
1310 cr60=inb(io+1); 1344 cr60 = inb(io + 1);
1311 outb(0x61,io); 1345 outb(0x61, io);
1312 cr61=inb(io+1); 1346 cr61 = inb(io + 1);
1313 outb(0x70,io); 1347 outb(0x70, io);
1314 cr70=inb(io+1); 1348 cr70 = inb(io + 1);
1315 outb(0x74,io); 1349 outb(0x74, io);
1316 cr74=inb(io+1); 1350 cr74 = inb(io + 1);
1317 outb(0xf0,io); 1351 outb(0xf0, io);
1318 crf0=inb(io+1); 1352 crf0 = inb(io + 1);
1319 outb(0xaa,io); 1353 outb(0xaa, io);
1320 1354
1321 if (verbose_probing) { 1355 if (verbose_probing) {
1322 printk(KERN_INFO "Winbond LPT Config: cr_30=%02x 60,61=%02x%02x " 1356 printk(KERN_INFO
1323 "70=%02x 74=%02x, f0=%02x\n", cr30,cr60,cr61,cr70,cr74,crf0); 1357 "Winbond LPT Config: cr_30=%02x 60,61=%02x%02x 70=%02x 74=%02x, f0=%02x\n",
1324 printk(KERN_INFO "Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ", 1358 cr30, cr60, cr61, cr70, cr74, crf0);
1325 (cr30 & 0x01) ? "yes":"no", cr60,cr61,cr70&0x0f ); 1359 printk(KERN_INFO "Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
1360 (cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
1326 if ((cr74 & 0x07) > 3) 1361 if ((cr74 & 0x07) > 3)
1327 printk("dma=none\n"); 1362 printk("dma=none\n");
1328 else 1363 else
1329 printk("dma=%d\n",cr74 & 0x07); 1364 printk("dma=%d\n", cr74 & 0x07);
1330 printk(KERN_INFO "Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n", 1365 printk(KERN_INFO
1331 irqtypes[crf0>>7], (crf0>>3)&0x0f); 1366 "Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
1332 printk(KERN_INFO "Winbond LPT Config: Port mode=%s\n", modes[crf0 & 0x07]); 1367 irqtypes[crf0>>7], (crf0>>3)&0x0f);
1368 printk(KERN_INFO "Winbond LPT Config: Port mode=%s\n",
1369 modes[crf0 & 0x07]);
1333 } 1370 }
1334 1371
1335 if(cr30 & 0x01) { /* the settings can be interrogated later ... */ 1372 if (cr30 & 0x01) { /* the settings can be interrogated later ... */
1336 while((superios[i].io!= 0) && (i<NR_SUPERIOS)) 1373 s = find_free_superio();
1337 i++; 1374 if (s == NULL)
1338 if(i==NR_SUPERIOS)
1339 printk(KERN_INFO "Super-IO: too many chips!\n"); 1375 printk(KERN_INFO "Super-IO: too many chips!\n");
1340 else { 1376 else {
1341 superios[i].io = (cr60<<8)|cr61; 1377 s->io = (cr60 << 8) | cr61;
1342 superios[i].irq = cr70&0x0f; 1378 s->irq = cr70 & 0x0f;
1343 superios[i].dma = (((cr74 & 0x07) > 3) ? 1379 s->dma = (((cr74 & 0x07) > 3) ?
1344 PARPORT_DMA_NONE : (cr74 & 0x07)); 1380 PARPORT_DMA_NONE : (cr74 & 0x07));
1345 } 1381 }
1346 } 1382 }
1347} 1383}
1348 1384
1349static void __devinit decode_winbond(int efer, int key, int devid, int devrev, int oldid) 1385static void __devinit decode_winbond(int efer, int key, int devid,
1386 int devrev, int oldid)
1350{ 1387{
1351 const char *type = "unknown"; 1388 const char *type = "unknown";
1352 int id,progif=2; 1389 int id, progif = 2;
1353 1390
1354 if (devid == devrev) 1391 if (devid == devrev)
1355 /* simple heuristics, we happened to read some 1392 /* simple heuristics, we happened to read some
1356 non-winbond register */ 1393 non-winbond register */
1357 return; 1394 return;
1358 1395
1359 id=(devid<<8) | devrev; 1396 id = (devid << 8) | devrev;
1360 1397
1361 /* Values are from public data sheets pdf files, I can just 1398 /* Values are from public data sheets pdf files, I can just
1362 confirm 83977TF is correct :-) */ 1399 confirm 83977TF is correct :-) */
1363 if (id == 0x9771) type="83977F/AF"; 1400 if (id == 0x9771)
1364 else if (id == 0x9773) type="83977TF / SMSC 97w33x/97w34x"; 1401 type = "83977F/AF";
1365 else if (id == 0x9774) type="83977ATF"; 1402 else if (id == 0x9773)
1366 else if ((id & ~0x0f) == 0x5270) type="83977CTF / SMSC 97w36x"; 1403 type = "83977TF / SMSC 97w33x/97w34x";
1367 else if ((id & ~0x0f) == 0x52f0) type="83977EF / SMSC 97w35x"; 1404 else if (id == 0x9774)
1368 else if ((id & ~0x0f) == 0x5210) type="83627"; 1405 type = "83977ATF";
1369 else if ((id & ~0x0f) == 0x6010) type="83697HF"; 1406 else if ((id & ~0x0f) == 0x5270)
1370 else if ((oldid &0x0f ) == 0x0a) { type="83877F"; progif=1;} 1407 type = "83977CTF / SMSC 97w36x";
1371 else if ((oldid &0x0f ) == 0x0b) { type="83877AF"; progif=1;} 1408 else if ((id & ~0x0f) == 0x52f0)
1372 else if ((oldid &0x0f ) == 0x0c) { type="83877TF"; progif=1;} 1409 type = "83977EF / SMSC 97w35x";
1373 else if ((oldid &0x0f ) == 0x0d) { type="83877ATF"; progif=1;} 1410 else if ((id & ~0x0f) == 0x5210)
1374 else progif=0; 1411 type = "83627";
1412 else if ((id & ~0x0f) == 0x6010)
1413 type = "83697HF";
1414 else if ((oldid & 0x0f) == 0x0a) {
1415 type = "83877F";
1416 progif = 1;
1417 } else if ((oldid & 0x0f) == 0x0b) {
1418 type = "83877AF";
1419 progif = 1;
1420 } else if ((oldid & 0x0f) == 0x0c) {
1421 type = "83877TF";
1422 progif = 1;
1423 } else if ((oldid & 0x0f) == 0x0d) {
1424 type = "83877ATF";
1425 progif = 1;
1426 } else
1427 progif = 0;
1375 1428
1376 if (verbose_probing) 1429 if (verbose_probing)
1377 printk(KERN_INFO "Winbond chip at EFER=0x%x key=0x%02x " 1430 printk(KERN_INFO "Winbond chip at EFER=0x%x key=0x%02x "
1378 "devid=%02x devrev=%02x oldid=%02x type=%s\n", 1431 "devid=%02x devrev=%02x oldid=%02x type=%s\n",
1379 efer, key, devid, devrev, oldid, type); 1432 efer, key, devid, devrev, oldid, type);
1380 1433
1381 if (progif == 2) 1434 if (progif == 2)
1382 show_parconfig_winbond(efer,key); 1435 show_parconfig_winbond(efer, key);
1383} 1436}
1384 1437
1385static void __devinit decode_smsc(int efer, int key, int devid, int devrev) 1438static void __devinit decode_smsc(int efer, int key, int devid, int devrev)
1386{ 1439{
1387 const char *type = "unknown"; 1440 const char *type = "unknown";
1388 void (*func)(int io, int key); 1441 void (*func)(int io, int key);
1389 int id; 1442 int id;
1390 1443
1391 if (devid == devrev) 1444 if (devid == devrev)
1392 /* simple heuristics, we happened to read some 1445 /* simple heuristics, we happened to read some
1393 non-smsc register */ 1446 non-smsc register */
1394 return; 1447 return;
1395 1448
1396 func=NULL; 1449 func = NULL;
1397 id=(devid<<8) | devrev; 1450 id = (devid << 8) | devrev;
1398 1451
1399 if (id==0x0302) {type="37c669"; func=show_parconfig_smsc37c669;} 1452 if (id == 0x0302) {
1400 else if (id==0x6582) type="37c665IR"; 1453 type = "37c669";
1401 else if (devid==0x65) type="37c665GT"; 1454 func = show_parconfig_smsc37c669;
1402 else if (devid==0x66) type="37c666GT"; 1455 } else if (id == 0x6582)
1456 type = "37c665IR";
1457 else if (devid == 0x65)
1458 type = "37c665GT";
1459 else if (devid == 0x66)
1460 type = "37c666GT";
1403 1461
1404 if (verbose_probing) 1462 if (verbose_probing)
1405 printk(KERN_INFO "SMSC chip at EFER=0x%x " 1463 printk(KERN_INFO "SMSC chip at EFER=0x%x "
@@ -1407,138 +1465,138 @@ static void __devinit decode_smsc(int efer, int key, int devid, int devrev)
1407 efer, key, devid, devrev, type); 1465 efer, key, devid, devrev, type);
1408 1466
1409 if (func) 1467 if (func)
1410 func(efer,key); 1468 func(efer, key);
1411} 1469}
1412 1470
1413 1471
1414static void __devinit winbond_check(int io, int key) 1472static void __devinit winbond_check(int io, int key)
1415{ 1473{
1416 int devid,devrev,oldid,x_devid,x_devrev,x_oldid; 1474 int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
1417 1475
1418 if (!request_region(io, 3, __func__)) 1476 if (!request_region(io, 3, __func__))
1419 return; 1477 return;
1420 1478
1421 /* First probe without key */ 1479 /* First probe without key */
1422 outb(0x20,io); 1480 outb(0x20, io);
1423 x_devid=inb(io+1); 1481 x_devid = inb(io + 1);
1424 outb(0x21,io); 1482 outb(0x21, io);
1425 x_devrev=inb(io+1); 1483 x_devrev = inb(io + 1);
1426 outb(0x09,io); 1484 outb(0x09, io);
1427 x_oldid=inb(io+1); 1485 x_oldid = inb(io + 1);
1428 1486
1429 outb(key,io); 1487 outb(key, io);
1430 outb(key,io); /* Write Magic Sequence to EFER, extended 1488 outb(key, io); /* Write Magic Sequence to EFER, extended
1431 funtion enable register */ 1489 funtion enable register */
1432 outb(0x20,io); /* Write EFIR, extended function index register */ 1490 outb(0x20, io); /* Write EFIR, extended function index register */
1433 devid=inb(io+1); /* Read EFDR, extended function data register */ 1491 devid = inb(io + 1); /* Read EFDR, extended function data register */
1434 outb(0x21,io); 1492 outb(0x21, io);
1435 devrev=inb(io+1); 1493 devrev = inb(io + 1);
1436 outb(0x09,io); 1494 outb(0x09, io);
1437 oldid=inb(io+1); 1495 oldid = inb(io + 1);
1438 outb(0xaa,io); /* Magic Seal */ 1496 outb(0xaa, io); /* Magic Seal */
1439 1497
1440 if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid)) 1498 if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
1441 goto out; /* protection against false positives */ 1499 goto out; /* protection against false positives */
1442 1500
1443 decode_winbond(io,key,devid,devrev,oldid); 1501 decode_winbond(io, key, devid, devrev, oldid);
1444out: 1502out:
1445 release_region(io, 3); 1503 release_region(io, 3);
1446} 1504}
1447 1505
1448static void __devinit winbond_check2(int io,int key) 1506static void __devinit winbond_check2(int io, int key)
1449{ 1507{
1450 int devid,devrev,oldid,x_devid,x_devrev,x_oldid; 1508 int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
1451 1509
1452 if (!request_region(io, 3, __func__)) 1510 if (!request_region(io, 3, __func__))
1453 return; 1511 return;
1454 1512
1455 /* First probe without the key */ 1513 /* First probe without the key */
1456 outb(0x20,io+2); 1514 outb(0x20, io + 2);
1457 x_devid=inb(io+2); 1515 x_devid = inb(io + 2);
1458 outb(0x21,io+1); 1516 outb(0x21, io + 1);
1459 x_devrev=inb(io+2); 1517 x_devrev = inb(io + 2);
1460 outb(0x09,io+1); 1518 outb(0x09, io + 1);
1461 x_oldid=inb(io+2); 1519 x_oldid = inb(io + 2);
1462 1520
1463 outb(key,io); /* Write Magic Byte to EFER, extended 1521 outb(key, io); /* Write Magic Byte to EFER, extended
1464 funtion enable register */ 1522 funtion enable register */
1465 outb(0x20,io+2); /* Write EFIR, extended function index register */ 1523 outb(0x20, io + 2); /* Write EFIR, extended function index register */
1466 devid=inb(io+2); /* Read EFDR, extended function data register */ 1524 devid = inb(io + 2); /* Read EFDR, extended function data register */
1467 outb(0x21,io+1); 1525 outb(0x21, io + 1);
1468 devrev=inb(io+2); 1526 devrev = inb(io + 2);
1469 outb(0x09,io+1); 1527 outb(0x09, io + 1);
1470 oldid=inb(io+2); 1528 oldid = inb(io + 2);
1471 outb(0xaa,io); /* Magic Seal */ 1529 outb(0xaa, io); /* Magic Seal */
1472 1530
1473 if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid)) 1531 if (x_devid == devid && x_devrev == devrev && x_oldid == oldid)
1474 goto out; /* protection against false positives */ 1532 goto out; /* protection against false positives */
1475 1533
1476 decode_winbond(io,key,devid,devrev,oldid); 1534 decode_winbond(io, key, devid, devrev, oldid);
1477out: 1535out:
1478 release_region(io, 3); 1536 release_region(io, 3);
1479} 1537}
1480 1538
1481static void __devinit smsc_check(int io, int key) 1539static void __devinit smsc_check(int io, int key)
1482{ 1540{
1483 int id,rev,oldid,oldrev,x_id,x_rev,x_oldid,x_oldrev; 1541 int id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
1484 1542
1485 if (!request_region(io, 3, __func__)) 1543 if (!request_region(io, 3, __func__))
1486 return; 1544 return;
1487 1545
1488 /* First probe without the key */ 1546 /* First probe without the key */
1489 outb(0x0d,io); 1547 outb(0x0d, io);
1490 x_oldid=inb(io+1); 1548 x_oldid = inb(io + 1);
1491 outb(0x0e,io); 1549 outb(0x0e, io);
1492 x_oldrev=inb(io+1); 1550 x_oldrev = inb(io + 1);
1493 outb(0x20,io); 1551 outb(0x20, io);
1494 x_id=inb(io+1); 1552 x_id = inb(io + 1);
1495 outb(0x21,io); 1553 outb(0x21, io);
1496 x_rev=inb(io+1); 1554 x_rev = inb(io + 1);
1497 1555
1498 outb(key,io); 1556 outb(key, io);
1499 outb(key,io); /* Write Magic Sequence to EFER, extended 1557 outb(key, io); /* Write Magic Sequence to EFER, extended
1500 funtion enable register */ 1558 funtion enable register */
1501 outb(0x0d,io); /* Write EFIR, extended function index register */ 1559 outb(0x0d, io); /* Write EFIR, extended function index register */
1502 oldid=inb(io+1); /* Read EFDR, extended function data register */ 1560 oldid = inb(io + 1); /* Read EFDR, extended function data register */
1503 outb(0x0e,io); 1561 outb(0x0e, io);
1504 oldrev=inb(io+1); 1562 oldrev = inb(io + 1);
1505 outb(0x20,io); 1563 outb(0x20, io);
1506 id=inb(io+1); 1564 id = inb(io + 1);
1507 outb(0x21,io); 1565 outb(0x21, io);
1508 rev=inb(io+1); 1566 rev = inb(io + 1);
1509 outb(0xaa,io); /* Magic Seal */ 1567 outb(0xaa, io); /* Magic Seal */
1510 1568
1511 if ((x_id == id) && (x_oldrev == oldrev) && 1569 if (x_id == id && x_oldrev == oldrev &&
1512 (x_oldid == oldid) && (x_rev == rev)) 1570 x_oldid == oldid && x_rev == rev)
1513 goto out; /* protection against false positives */ 1571 goto out; /* protection against false positives */
1514 1572
1515 decode_smsc(io,key,oldid,oldrev); 1573 decode_smsc(io, key, oldid, oldrev);
1516out: 1574out:
1517 release_region(io, 3); 1575 release_region(io, 3);
1518} 1576}
1519 1577
1520 1578
1521static void __devinit detect_and_report_winbond (void) 1579static void __devinit detect_and_report_winbond(void)
1522{ 1580{
1523 if (verbose_probing) 1581 if (verbose_probing)
1524 printk(KERN_DEBUG "Winbond Super-IO detection, now testing ports 3F0,370,250,4E,2E ...\n"); 1582 printk(KERN_DEBUG "Winbond Super-IO detection, now testing ports 3F0,370,250,4E,2E ...\n");
1525 winbond_check(0x3f0,0x87); 1583 winbond_check(0x3f0, 0x87);
1526 winbond_check(0x370,0x87); 1584 winbond_check(0x370, 0x87);
1527 winbond_check(0x2e ,0x87); 1585 winbond_check(0x2e , 0x87);
1528 winbond_check(0x4e ,0x87); 1586 winbond_check(0x4e , 0x87);
1529 winbond_check(0x3f0,0x86); 1587 winbond_check(0x3f0, 0x86);
1530 winbond_check2(0x250,0x88); 1588 winbond_check2(0x250, 0x88);
1531 winbond_check2(0x250,0x89); 1589 winbond_check2(0x250, 0x89);
1532} 1590}
1533 1591
1534static void __devinit detect_and_report_smsc (void) 1592static void __devinit detect_and_report_smsc(void)
1535{ 1593{
1536 if (verbose_probing) 1594 if (verbose_probing)
1537 printk(KERN_DEBUG "SMSC Super-IO detection, now testing Ports 2F0, 370 ...\n"); 1595 printk(KERN_DEBUG "SMSC Super-IO detection, now testing Ports 2F0, 370 ...\n");
1538 smsc_check(0x3f0,0x55); 1596 smsc_check(0x3f0, 0x55);
1539 smsc_check(0x370,0x55); 1597 smsc_check(0x370, 0x55);
1540 smsc_check(0x3f0,0x44); 1598 smsc_check(0x3f0, 0x44);
1541 smsc_check(0x370,0x44); 1599 smsc_check(0x370, 0x44);
1542} 1600}
1543 1601
1544static void __devinit detect_and_report_it87(void) 1602static void __devinit detect_and_report_it87(void)
@@ -1573,34 +1631,39 @@ static void __devinit detect_and_report_it87(void)
1573} 1631}
1574#endif /* CONFIG_PARPORT_PC_SUPERIO */ 1632#endif /* CONFIG_PARPORT_PC_SUPERIO */
1575 1633
1576static int get_superio_dma (struct parport *p) 1634static struct superio_struct *find_superio(struct parport *p)
1577{ 1635{
1578 int i=0; 1636 int i;
1579 while( (superios[i].io != p->base) && (i<NR_SUPERIOS)) 1637 for (i = 0; i < NR_SUPERIOS; i++)
1580 i++; 1638 if (superios[i].io != p->base)
1581 if (i!=NR_SUPERIOS) 1639 return &superios[i];
1582 return superios[i].dma; 1640 return NULL;
1641}
1642
1643static int get_superio_dma(struct parport *p)
1644{
1645 struct superio_struct *s = find_superio(p);
1646 if (s)
1647 return s->dma;
1583 return PARPORT_DMA_NONE; 1648 return PARPORT_DMA_NONE;
1584} 1649}
1585 1650
1586static int get_superio_irq (struct parport *p) 1651static int get_superio_irq(struct parport *p)
1587{ 1652{
1588 int i=0; 1653 struct superio_struct *s = find_superio(p);
1589 while( (superios[i].io != p->base) && (i<NR_SUPERIOS)) 1654 if (s)
1590 i++; 1655 return s->irq;
1591 if (i!=NR_SUPERIOS) 1656 return PARPORT_IRQ_NONE;
1592 return superios[i].irq;
1593 return PARPORT_IRQ_NONE;
1594} 1657}
1595 1658
1596 1659
1597/* --- Mode detection ------------------------------------- */ 1660/* --- Mode detection ------------------------------------- */
1598 1661
1599/* 1662/*
1600 * Checks for port existence, all ports support SPP MODE 1663 * Checks for port existence, all ports support SPP MODE
1601 * Returns: 1664 * Returns:
1602 * 0 : No parallel port at this address 1665 * 0 : No parallel port at this address
1603 * PARPORT_MODE_PCSPP : SPP port detected 1666 * PARPORT_MODE_PCSPP : SPP port detected
1604 * (if the user specified an ioport himself, 1667 * (if the user specified an ioport himself,
1605 * this shall always be the case!) 1668 * this shall always be the case!)
1606 * 1669 *
@@ -1610,7 +1673,7 @@ static int parport_SPP_supported(struct parport *pb)
1610 unsigned char r, w; 1673 unsigned char r, w;
1611 1674
1612 /* 1675 /*
1613 * first clear an eventually pending EPP timeout 1676 * first clear an eventually pending EPP timeout
1614 * I (sailer@ife.ee.ethz.ch) have an SMSC chipset 1677 * I (sailer@ife.ee.ethz.ch) have an SMSC chipset
1615 * that does not even respond to SPP cycles if an EPP 1678 * that does not even respond to SPP cycles if an EPP
1616 * timeout is pending 1679 * timeout is pending
@@ -1619,19 +1682,19 @@ static int parport_SPP_supported(struct parport *pb)
1619 1682
1620 /* Do a simple read-write test to make sure the port exists. */ 1683 /* Do a simple read-write test to make sure the port exists. */
1621 w = 0xc; 1684 w = 0xc;
1622 outb (w, CONTROL (pb)); 1685 outb(w, CONTROL(pb));
1623 1686
1624 /* Is there a control register that we can read from? Some 1687 /* Is there a control register that we can read from? Some
1625 * ports don't allow reads, so read_control just returns a 1688 * ports don't allow reads, so read_control just returns a
1626 * software copy. Some ports _do_ allow reads, so bypass the 1689 * software copy. Some ports _do_ allow reads, so bypass the
1627 * software copy here. In addition, some bits aren't 1690 * software copy here. In addition, some bits aren't
1628 * writable. */ 1691 * writable. */
1629 r = inb (CONTROL (pb)); 1692 r = inb(CONTROL(pb));
1630 if ((r & 0xf) == w) { 1693 if ((r & 0xf) == w) {
1631 w = 0xe; 1694 w = 0xe;
1632 outb (w, CONTROL (pb)); 1695 outb(w, CONTROL(pb));
1633 r = inb (CONTROL (pb)); 1696 r = inb(CONTROL(pb));
1634 outb (0xc, CONTROL (pb)); 1697 outb(0xc, CONTROL(pb));
1635 if ((r & 0xf) == w) 1698 if ((r & 0xf) == w)
1636 return PARPORT_MODE_PCSPP; 1699 return PARPORT_MODE_PCSPP;
1637 } 1700 }
@@ -1639,18 +1702,18 @@ static int parport_SPP_supported(struct parport *pb)
1639 if (user_specified) 1702 if (user_specified)
1640 /* That didn't work, but the user thinks there's a 1703 /* That didn't work, but the user thinks there's a
1641 * port here. */ 1704 * port here. */
1642 printk (KERN_INFO "parport 0x%lx (WARNING): CTR: " 1705 printk(KERN_INFO "parport 0x%lx (WARNING): CTR: "
1643 "wrote 0x%02x, read 0x%02x\n", pb->base, w, r); 1706 "wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
1644 1707
1645 /* Try the data register. The data lines aren't tri-stated at 1708 /* Try the data register. The data lines aren't tri-stated at
1646 * this stage, so we expect back what we wrote. */ 1709 * this stage, so we expect back what we wrote. */
1647 w = 0xaa; 1710 w = 0xaa;
1648 parport_pc_write_data (pb, w); 1711 parport_pc_write_data(pb, w);
1649 r = parport_pc_read_data (pb); 1712 r = parport_pc_read_data(pb);
1650 if (r == w) { 1713 if (r == w) {
1651 w = 0x55; 1714 w = 0x55;
1652 parport_pc_write_data (pb, w); 1715 parport_pc_write_data(pb, w);
1653 r = parport_pc_read_data (pb); 1716 r = parport_pc_read_data(pb);
1654 if (r == w) 1717 if (r == w)
1655 return PARPORT_MODE_PCSPP; 1718 return PARPORT_MODE_PCSPP;
1656 } 1719 }
@@ -1658,9 +1721,9 @@ static int parport_SPP_supported(struct parport *pb)
1658 if (user_specified) { 1721 if (user_specified) {
1659 /* Didn't work, but the user is convinced this is the 1722 /* Didn't work, but the user is convinced this is the
1660 * place. */ 1723 * place. */
1661 printk (KERN_INFO "parport 0x%lx (WARNING): DATA: " 1724 printk(KERN_INFO "parport 0x%lx (WARNING): DATA: "
1662 "wrote 0x%02x, read 0x%02x\n", pb->base, w, r); 1725 "wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
1663 printk (KERN_INFO "parport 0x%lx: You gave this address, " 1726 printk(KERN_INFO "parport 0x%lx: You gave this address, "
1664 "but there is probably no parallel port there!\n", 1727 "but there is probably no parallel port there!\n",
1665 pb->base); 1728 pb->base);
1666 } 1729 }
@@ -1691,33 +1754,33 @@ static int parport_ECR_present(struct parport *pb)
1691 struct parport_pc_private *priv = pb->private_data; 1754 struct parport_pc_private *priv = pb->private_data;
1692 unsigned char r = 0xc; 1755 unsigned char r = 0xc;
1693 1756
1694 outb (r, CONTROL (pb)); 1757 outb(r, CONTROL(pb));
1695 if ((inb (ECONTROL (pb)) & 0x3) == (r & 0x3)) { 1758 if ((inb(ECONTROL(pb)) & 0x3) == (r & 0x3)) {
1696 outb (r ^ 0x2, CONTROL (pb)); /* Toggle bit 1 */ 1759 outb(r ^ 0x2, CONTROL(pb)); /* Toggle bit 1 */
1697 1760
1698 r = inb (CONTROL (pb)); 1761 r = inb(CONTROL(pb));
1699 if ((inb (ECONTROL (pb)) & 0x2) == (r & 0x2)) 1762 if ((inb(ECONTROL(pb)) & 0x2) == (r & 0x2))
1700 goto no_reg; /* Sure that no ECR register exists */ 1763 goto no_reg; /* Sure that no ECR register exists */
1701 } 1764 }
1702 1765
1703 if ((inb (ECONTROL (pb)) & 0x3 ) != 0x1) 1766 if ((inb(ECONTROL(pb)) & 0x3) != 0x1)
1704 goto no_reg; 1767 goto no_reg;
1705 1768
1706 ECR_WRITE (pb, 0x34); 1769 ECR_WRITE(pb, 0x34);
1707 if (inb (ECONTROL (pb)) != 0x35) 1770 if (inb(ECONTROL(pb)) != 0x35)
1708 goto no_reg; 1771 goto no_reg;
1709 1772
1710 priv->ecr = 1; 1773 priv->ecr = 1;
1711 outb (0xc, CONTROL (pb)); 1774 outb(0xc, CONTROL(pb));
1712 1775
1713 /* Go to mode 000 */ 1776 /* Go to mode 000 */
1714 frob_set_mode (pb, ECR_SPP); 1777 frob_set_mode(pb, ECR_SPP);
1715 1778
1716 return 1; 1779 return 1;
1717 1780
1718 no_reg: 1781 no_reg:
1719 outb (0xc, CONTROL (pb)); 1782 outb(0xc, CONTROL(pb));
1720 return 0; 1783 return 0;
1721} 1784}
1722 1785
1723#ifdef CONFIG_PARPORT_1284 1786#ifdef CONFIG_PARPORT_1284
@@ -1727,7 +1790,7 @@ static int parport_ECR_present(struct parport *pb)
1727 * allows us to read data from the data lines. In theory we would get back 1790 * allows us to read data from the data lines. In theory we would get back
1728 * 0xff but any peripheral attached to the port may drag some or all of the 1791 * 0xff but any peripheral attached to the port may drag some or all of the
1729 * lines down to zero. So if we get back anything that isn't the contents 1792 * lines down to zero. So if we get back anything that isn't the contents
1730 * of the data register we deem PS/2 support to be present. 1793 * of the data register we deem PS/2 support to be present.
1731 * 1794 *
1732 * Some SPP ports have "half PS/2" ability - you can't turn off the line 1795 * Some SPP ports have "half PS/2" ability - you can't turn off the line
1733 * drivers, but an external peripheral with sufficiently beefy drivers of 1796 * drivers, but an external peripheral with sufficiently beefy drivers of
@@ -1735,26 +1798,28 @@ static int parport_ECR_present(struct parport *pb)
1735 * where they can then be read back as normal. Ports with this property 1798 * where they can then be read back as normal. Ports with this property
1736 * and the right type of device attached are likely to fail the SPP test, 1799 * and the right type of device attached are likely to fail the SPP test,
1737 * (as they will appear to have stuck bits) and so the fact that they might 1800 * (as they will appear to have stuck bits) and so the fact that they might
1738 * be misdetected here is rather academic. 1801 * be misdetected here is rather academic.
1739 */ 1802 */
1740 1803
1741static int parport_PS2_supported(struct parport *pb) 1804static int parport_PS2_supported(struct parport *pb)
1742{ 1805{
1743 int ok = 0; 1806 int ok = 0;
1744 1807
1745 clear_epp_timeout(pb); 1808 clear_epp_timeout(pb);
1746 1809
1747 /* try to tri-state the buffer */ 1810 /* try to tri-state the buffer */
1748 parport_pc_data_reverse (pb); 1811 parport_pc_data_reverse(pb);
1749 1812
1750 parport_pc_write_data(pb, 0x55); 1813 parport_pc_write_data(pb, 0x55);
1751 if (parport_pc_read_data(pb) != 0x55) ok++; 1814 if (parport_pc_read_data(pb) != 0x55)
1815 ok++;
1752 1816
1753 parport_pc_write_data(pb, 0xaa); 1817 parport_pc_write_data(pb, 0xaa);
1754 if (parport_pc_read_data(pb) != 0xaa) ok++; 1818 if (parport_pc_read_data(pb) != 0xaa)
1819 ok++;
1755 1820
1756 /* cancel input mode */ 1821 /* cancel input mode */
1757 parport_pc_data_forward (pb); 1822 parport_pc_data_forward(pb);
1758 1823
1759 if (ok) { 1824 if (ok) {
1760 pb->modes |= PARPORT_MODE_TRISTATE; 1825 pb->modes |= PARPORT_MODE_TRISTATE;
@@ -1773,68 +1838,68 @@ static int parport_ECP_supported(struct parport *pb)
1773 int config, configb; 1838 int config, configb;
1774 int pword; 1839 int pword;
1775 struct parport_pc_private *priv = pb->private_data; 1840 struct parport_pc_private *priv = pb->private_data;
1776 /* Translate ECP intrLine to ISA irq value */ 1841 /* Translate ECP intrLine to ISA irq value */
1777 static const int intrline[]= { 0, 7, 9, 10, 11, 14, 15, 5 }; 1842 static const int intrline[] = { 0, 7, 9, 10, 11, 14, 15, 5 };
1778 1843
1779 /* If there is no ECR, we have no hope of supporting ECP. */ 1844 /* If there is no ECR, we have no hope of supporting ECP. */
1780 if (!priv->ecr) 1845 if (!priv->ecr)
1781 return 0; 1846 return 0;
1782 1847
1783 /* Find out FIFO depth */ 1848 /* Find out FIFO depth */
1784 ECR_WRITE (pb, ECR_SPP << 5); /* Reset FIFO */ 1849 ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
1785 ECR_WRITE (pb, ECR_TST << 5); /* TEST FIFO */ 1850 ECR_WRITE(pb, ECR_TST << 5); /* TEST FIFO */
1786 for (i=0; i < 1024 && !(inb (ECONTROL (pb)) & 0x02); i++) 1851 for (i = 0; i < 1024 && !(inb(ECONTROL(pb)) & 0x02); i++)
1787 outb (0xaa, FIFO (pb)); 1852 outb(0xaa, FIFO(pb));
1788 1853
1789 /* 1854 /*
1790 * Using LGS chipset it uses ECR register, but 1855 * Using LGS chipset it uses ECR register, but
1791 * it doesn't support ECP or FIFO MODE 1856 * it doesn't support ECP or FIFO MODE
1792 */ 1857 */
1793 if (i == 1024) { 1858 if (i == 1024) {
1794 ECR_WRITE (pb, ECR_SPP << 5); 1859 ECR_WRITE(pb, ECR_SPP << 5);
1795 return 0; 1860 return 0;
1796 } 1861 }
1797 1862
1798 priv->fifo_depth = i; 1863 priv->fifo_depth = i;
1799 if (verbose_probing) 1864 if (verbose_probing)
1800 printk (KERN_DEBUG "0x%lx: FIFO is %d bytes\n", pb->base, i); 1865 printk(KERN_DEBUG "0x%lx: FIFO is %d bytes\n", pb->base, i);
1801 1866
1802 /* Find out writeIntrThreshold */ 1867 /* Find out writeIntrThreshold */
1803 frob_econtrol (pb, 1<<2, 1<<2); 1868 frob_econtrol(pb, 1<<2, 1<<2);
1804 frob_econtrol (pb, 1<<2, 0); 1869 frob_econtrol(pb, 1<<2, 0);
1805 for (i = 1; i <= priv->fifo_depth; i++) { 1870 for (i = 1; i <= priv->fifo_depth; i++) {
1806 inb (FIFO (pb)); 1871 inb(FIFO(pb));
1807 udelay (50); 1872 udelay(50);
1808 if (inb (ECONTROL (pb)) & (1<<2)) 1873 if (inb(ECONTROL(pb)) & (1<<2))
1809 break; 1874 break;
1810 } 1875 }
1811 1876
1812 if (i <= priv->fifo_depth) { 1877 if (i <= priv->fifo_depth) {
1813 if (verbose_probing) 1878 if (verbose_probing)
1814 printk (KERN_DEBUG "0x%lx: writeIntrThreshold is %d\n", 1879 printk(KERN_DEBUG "0x%lx: writeIntrThreshold is %d\n",
1815 pb->base, i); 1880 pb->base, i);
1816 } else 1881 } else
1817 /* Number of bytes we know we can write if we get an 1882 /* Number of bytes we know we can write if we get an
1818 interrupt. */ 1883 interrupt. */
1819 i = 0; 1884 i = 0;
1820 1885
1821 priv->writeIntrThreshold = i; 1886 priv->writeIntrThreshold = i;
1822 1887
1823 /* Find out readIntrThreshold */ 1888 /* Find out readIntrThreshold */
1824 frob_set_mode (pb, ECR_PS2); /* Reset FIFO and enable PS2 */ 1889 frob_set_mode(pb, ECR_PS2); /* Reset FIFO and enable PS2 */
1825 parport_pc_data_reverse (pb); /* Must be in PS2 mode */ 1890 parport_pc_data_reverse(pb); /* Must be in PS2 mode */
1826 frob_set_mode (pb, ECR_TST); /* Test FIFO */ 1891 frob_set_mode(pb, ECR_TST); /* Test FIFO */
1827 frob_econtrol (pb, 1<<2, 1<<2); 1892 frob_econtrol(pb, 1<<2, 1<<2);
1828 frob_econtrol (pb, 1<<2, 0); 1893 frob_econtrol(pb, 1<<2, 0);
1829 for (i = 1; i <= priv->fifo_depth; i++) { 1894 for (i = 1; i <= priv->fifo_depth; i++) {
1830 outb (0xaa, FIFO (pb)); 1895 outb(0xaa, FIFO(pb));
1831 if (inb (ECONTROL (pb)) & (1<<2)) 1896 if (inb(ECONTROL(pb)) & (1<<2))
1832 break; 1897 break;
1833 } 1898 }
1834 1899
1835 if (i <= priv->fifo_depth) { 1900 if (i <= priv->fifo_depth) {
1836 if (verbose_probing) 1901 if (verbose_probing)
1837 printk (KERN_INFO "0x%lx: readIntrThreshold is %d\n", 1902 printk(KERN_INFO "0x%lx: readIntrThreshold is %d\n",
1838 pb->base, i); 1903 pb->base, i);
1839 } else 1904 } else
1840 /* Number of bytes we can read if we get an interrupt. */ 1905 /* Number of bytes we can read if we get an interrupt. */
@@ -1842,23 +1907,23 @@ static int parport_ECP_supported(struct parport *pb)
1842 1907
1843 priv->readIntrThreshold = i; 1908 priv->readIntrThreshold = i;
1844 1909
1845 ECR_WRITE (pb, ECR_SPP << 5); /* Reset FIFO */ 1910 ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
1846 ECR_WRITE (pb, 0xf4); /* Configuration mode */ 1911 ECR_WRITE(pb, 0xf4); /* Configuration mode */
1847 config = inb (CONFIGA (pb)); 1912 config = inb(CONFIGA(pb));
1848 pword = (config >> 4) & 0x7; 1913 pword = (config >> 4) & 0x7;
1849 switch (pword) { 1914 switch (pword) {
1850 case 0: 1915 case 0:
1851 pword = 2; 1916 pword = 2;
1852 printk (KERN_WARNING "0x%lx: Unsupported pword size!\n", 1917 printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
1853 pb->base); 1918 pb->base);
1854 break; 1919 break;
1855 case 2: 1920 case 2:
1856 pword = 4; 1921 pword = 4;
1857 printk (KERN_WARNING "0x%lx: Unsupported pword size!\n", 1922 printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
1858 pb->base); 1923 pb->base);
1859 break; 1924 break;
1860 default: 1925 default:
1861 printk (KERN_WARNING "0x%lx: Unknown implementation ID\n", 1926 printk(KERN_WARNING "0x%lx: Unknown implementation ID\n",
1862 pb->base); 1927 pb->base);
1863 /* Assume 1 */ 1928 /* Assume 1 */
1864 case 1: 1929 case 1:
@@ -1867,28 +1932,29 @@ static int parport_ECP_supported(struct parport *pb)
1867 priv->pword = pword; 1932 priv->pword = pword;
1868 1933
1869 if (verbose_probing) { 1934 if (verbose_probing) {
1870 printk (KERN_DEBUG "0x%lx: PWord is %d bits\n", pb->base, 8 * pword); 1935 printk(KERN_DEBUG "0x%lx: PWord is %d bits\n",
1871 1936 pb->base, 8 * pword);
1872 printk (KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n", pb->base, 1937
1938 printk(KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n", pb->base,
1873 config & 0x80 ? "Level" : "Pulses"); 1939 config & 0x80 ? "Level" : "Pulses");
1874 1940
1875 configb = inb (CONFIGB (pb)); 1941 configb = inb(CONFIGB(pb));
1876 printk (KERN_DEBUG "0x%lx: ECP port cfgA=0x%02x cfgB=0x%02x\n", 1942 printk(KERN_DEBUG "0x%lx: ECP port cfgA=0x%02x cfgB=0x%02x\n",
1877 pb->base, config, configb); 1943 pb->base, config, configb);
1878 printk (KERN_DEBUG "0x%lx: ECP settings irq=", pb->base); 1944 printk(KERN_DEBUG "0x%lx: ECP settings irq=", pb->base);
1879 if ((configb >>3) & 0x07) 1945 if ((configb >> 3) & 0x07)
1880 printk("%d",intrline[(configb >>3) & 0x07]); 1946 printk("%d", intrline[(configb >> 3) & 0x07]);
1881 else 1947 else
1882 printk("<none or set by other means>"); 1948 printk("<none or set by other means>");
1883 printk (" dma="); 1949 printk(" dma=");
1884 if( (configb & 0x03 ) == 0x00) 1950 if ((configb & 0x03) == 0x00)
1885 printk("<none or set by other means>\n"); 1951 printk("<none or set by other means>\n");
1886 else 1952 else
1887 printk("%d\n",configb & 0x07); 1953 printk("%d\n", configb & 0x07);
1888 } 1954 }
1889 1955
1890 /* Go back to mode 000 */ 1956 /* Go back to mode 000 */
1891 frob_set_mode (pb, ECR_SPP); 1957 frob_set_mode(pb, ECR_SPP);
1892 1958
1893 return 1; 1959 return 1;
1894} 1960}
@@ -1903,10 +1969,10 @@ static int parport_ECPPS2_supported(struct parport *pb)
1903 if (!priv->ecr) 1969 if (!priv->ecr)
1904 return 0; 1970 return 0;
1905 1971
1906 oecr = inb (ECONTROL (pb)); 1972 oecr = inb(ECONTROL(pb));
1907 ECR_WRITE (pb, ECR_PS2 << 5); 1973 ECR_WRITE(pb, ECR_PS2 << 5);
1908 result = parport_PS2_supported(pb); 1974 result = parport_PS2_supported(pb);
1909 ECR_WRITE (pb, oecr); 1975 ECR_WRITE(pb, oecr);
1910 return result; 1976 return result;
1911} 1977}
1912 1978
@@ -1930,16 +1996,15 @@ static int parport_EPP_supported(struct parport *pb)
1930 */ 1996 */
1931 1997
1932 /* If EPP timeout bit clear then EPP available */ 1998 /* If EPP timeout bit clear then EPP available */
1933 if (!clear_epp_timeout(pb)) { 1999 if (!clear_epp_timeout(pb))
1934 return 0; /* No way to clear timeout */ 2000 return 0; /* No way to clear timeout */
1935 }
1936 2001
1937 /* Check for Intel bug. */ 2002 /* Check for Intel bug. */
1938 if (priv->ecr) { 2003 if (priv->ecr) {
1939 unsigned char i; 2004 unsigned char i;
1940 for (i = 0x00; i < 0x80; i += 0x20) { 2005 for (i = 0x00; i < 0x80; i += 0x20) {
1941 ECR_WRITE (pb, i); 2006 ECR_WRITE(pb, i);
1942 if (clear_epp_timeout (pb)) { 2007 if (clear_epp_timeout(pb)) {
1943 /* Phony EPP in ECP. */ 2008 /* Phony EPP in ECP. */
1944 return 0; 2009 return 0;
1945 } 2010 }
@@ -1963,17 +2028,16 @@ static int parport_ECPEPP_supported(struct parport *pb)
1963 int result; 2028 int result;
1964 unsigned char oecr; 2029 unsigned char oecr;
1965 2030
1966 if (!priv->ecr) { 2031 if (!priv->ecr)
1967 return 0; 2032 return 0;
1968 }
1969 2033
1970 oecr = inb (ECONTROL (pb)); 2034 oecr = inb(ECONTROL(pb));
1971 /* Search for SMC style EPP+ECP mode */ 2035 /* Search for SMC style EPP+ECP mode */
1972 ECR_WRITE (pb, 0x80); 2036 ECR_WRITE(pb, 0x80);
1973 outb (0x04, CONTROL (pb)); 2037 outb(0x04, CONTROL(pb));
1974 result = parport_EPP_supported(pb); 2038 result = parport_EPP_supported(pb);
1975 2039
1976 ECR_WRITE (pb, oecr); 2040 ECR_WRITE(pb, oecr);
1977 2041
1978 if (result) { 2042 if (result) {
1979 /* Set up access functions to use ECP+EPP hardware. */ 2043 /* Set up access functions to use ECP+EPP hardware. */
@@ -1991,11 +2055,25 @@ static int parport_ECPEPP_supported(struct parport *pb)
1991/* Don't bother probing for modes we know we won't use. */ 2055/* Don't bother probing for modes we know we won't use. */
1992static int __devinit parport_PS2_supported(struct parport *pb) { return 0; } 2056static int __devinit parport_PS2_supported(struct parport *pb) { return 0; }
1993#ifdef CONFIG_PARPORT_PC_FIFO 2057#ifdef CONFIG_PARPORT_PC_FIFO
1994static int parport_ECP_supported(struct parport *pb) { return 0; } 2058static int parport_ECP_supported(struct parport *pb)
2059{
2060 return 0;
2061}
1995#endif 2062#endif
1996static int __devinit parport_EPP_supported(struct parport *pb) { return 0; } 2063static int __devinit parport_EPP_supported(struct parport *pb)
1997static int __devinit parport_ECPEPP_supported(struct parport *pb){return 0;} 2064{
1998static int __devinit parport_ECPPS2_supported(struct parport *pb){return 0;} 2065 return 0;
2066}
2067
2068static int __devinit parport_ECPEPP_supported(struct parport *pb)
2069{
2070 return 0;
2071}
2072
2073static int __devinit parport_ECPPS2_supported(struct parport *pb)
2074{
2075 return 0;
2076}
1999 2077
2000#endif /* No IEEE 1284 support */ 2078#endif /* No IEEE 1284 support */
2001 2079
@@ -2005,17 +2083,17 @@ static int __devinit parport_ECPPS2_supported(struct parport *pb){return 0;}
2005static int programmable_irq_support(struct parport *pb) 2083static int programmable_irq_support(struct parport *pb)
2006{ 2084{
2007 int irq, intrLine; 2085 int irq, intrLine;
2008 unsigned char oecr = inb (ECONTROL (pb)); 2086 unsigned char oecr = inb(ECONTROL(pb));
2009 static const int lookup[8] = { 2087 static const int lookup[8] = {
2010 PARPORT_IRQ_NONE, 7, 9, 10, 11, 14, 15, 5 2088 PARPORT_IRQ_NONE, 7, 9, 10, 11, 14, 15, 5
2011 }; 2089 };
2012 2090
2013 ECR_WRITE (pb, ECR_CNF << 5); /* Configuration MODE */ 2091 ECR_WRITE(pb, ECR_CNF << 5); /* Configuration MODE */
2014 2092
2015 intrLine = (inb (CONFIGB (pb)) >> 3) & 0x07; 2093 intrLine = (inb(CONFIGB(pb)) >> 3) & 0x07;
2016 irq = lookup[intrLine]; 2094 irq = lookup[intrLine];
2017 2095
2018 ECR_WRITE (pb, oecr); 2096 ECR_WRITE(pb, oecr);
2019 return irq; 2097 return irq;
2020} 2098}
2021 2099
@@ -2025,17 +2103,17 @@ static int irq_probe_ECP(struct parport *pb)
2025 unsigned long irqs; 2103 unsigned long irqs;
2026 2104
2027 irqs = probe_irq_on(); 2105 irqs = probe_irq_on();
2028 2106
2029 ECR_WRITE (pb, ECR_SPP << 5); /* Reset FIFO */ 2107 ECR_WRITE(pb, ECR_SPP << 5); /* Reset FIFO */
2030 ECR_WRITE (pb, (ECR_TST << 5) | 0x04); 2108 ECR_WRITE(pb, (ECR_TST << 5) | 0x04);
2031 ECR_WRITE (pb, ECR_TST << 5); 2109 ECR_WRITE(pb, ECR_TST << 5);
2032 2110
2033 /* If Full FIFO sure that writeIntrThreshold is generated */ 2111 /* If Full FIFO sure that writeIntrThreshold is generated */
2034 for (i=0; i < 1024 && !(inb (ECONTROL (pb)) & 0x02) ; i++) 2112 for (i = 0; i < 1024 && !(inb(ECONTROL(pb)) & 0x02) ; i++)
2035 outb (0xaa, FIFO (pb)); 2113 outb(0xaa, FIFO(pb));
2036 2114
2037 pb->irq = probe_irq_off(irqs); 2115 pb->irq = probe_irq_off(irqs);
2038 ECR_WRITE (pb, ECR_SPP << 5); 2116 ECR_WRITE(pb, ECR_SPP << 5);
2039 2117
2040 if (pb->irq <= 0) 2118 if (pb->irq <= 0)
2041 pb->irq = PARPORT_IRQ_NONE; 2119 pb->irq = PARPORT_IRQ_NONE;
@@ -2045,7 +2123,7 @@ static int irq_probe_ECP(struct parport *pb)
2045 2123
2046/* 2124/*
2047 * This detection seems that only works in National Semiconductors 2125 * This detection seems that only works in National Semiconductors
2048 * This doesn't work in SMC, LGS, and Winbond 2126 * This doesn't work in SMC, LGS, and Winbond
2049 */ 2127 */
2050static int irq_probe_EPP(struct parport *pb) 2128static int irq_probe_EPP(struct parport *pb)
2051{ 2129{
@@ -2056,16 +2134,16 @@ static int irq_probe_EPP(struct parport *pb)
2056 unsigned char oecr; 2134 unsigned char oecr;
2057 2135
2058 if (pb->modes & PARPORT_MODE_PCECR) 2136 if (pb->modes & PARPORT_MODE_PCECR)
2059 oecr = inb (ECONTROL (pb)); 2137 oecr = inb(ECONTROL(pb));
2060 2138
2061 irqs = probe_irq_on(); 2139 irqs = probe_irq_on();
2062 2140
2063 if (pb->modes & PARPORT_MODE_PCECR) 2141 if (pb->modes & PARPORT_MODE_PCECR)
2064 frob_econtrol (pb, 0x10, 0x10); 2142 frob_econtrol(pb, 0x10, 0x10);
2065 2143
2066 clear_epp_timeout(pb); 2144 clear_epp_timeout(pb);
2067 parport_pc_frob_control (pb, 0x20, 0x20); 2145 parport_pc_frob_control(pb, 0x20, 0x20);
2068 parport_pc_frob_control (pb, 0x10, 0x10); 2146 parport_pc_frob_control(pb, 0x10, 0x10);
2069 clear_epp_timeout(pb); 2147 clear_epp_timeout(pb);
2070 2148
2071 /* Device isn't expecting an EPP read 2149 /* Device isn't expecting an EPP read
@@ -2074,9 +2152,9 @@ static int irq_probe_EPP(struct parport *pb)
2074 parport_pc_read_epp(pb); 2152 parport_pc_read_epp(pb);
2075 udelay(20); 2153 udelay(20);
2076 2154
2077 pb->irq = probe_irq_off (irqs); 2155 pb->irq = probe_irq_off(irqs);
2078 if (pb->modes & PARPORT_MODE_PCECR) 2156 if (pb->modes & PARPORT_MODE_PCECR)
2079 ECR_WRITE (pb, oecr); 2157 ECR_WRITE(pb, oecr);
2080 parport_pc_write_control(pb, 0xc); 2158 parport_pc_write_control(pb, 0xc);
2081 2159
2082 if (pb->irq <= 0) 2160 if (pb->irq <= 0)
@@ -2133,28 +2211,28 @@ static int parport_irq_probe(struct parport *pb)
2133/* --- DMA detection -------------------------------------- */ 2211/* --- DMA detection -------------------------------------- */
2134 2212
2135/* Only if chipset conforms to ECP ISA Interface Standard */ 2213/* Only if chipset conforms to ECP ISA Interface Standard */
2136static int programmable_dma_support (struct parport *p) 2214static int programmable_dma_support(struct parport *p)
2137{ 2215{
2138 unsigned char oecr = inb (ECONTROL (p)); 2216 unsigned char oecr = inb(ECONTROL(p));
2139 int dma; 2217 int dma;
2140 2218
2141 frob_set_mode (p, ECR_CNF); 2219 frob_set_mode(p, ECR_CNF);
2142 2220
2143 dma = inb (CONFIGB(p)) & 0x07; 2221 dma = inb(CONFIGB(p)) & 0x07;
2144 /* 000: Indicates jumpered 8-bit DMA if read-only. 2222 /* 000: Indicates jumpered 8-bit DMA if read-only.
2145 100: Indicates jumpered 16-bit DMA if read-only. */ 2223 100: Indicates jumpered 16-bit DMA if read-only. */
2146 if ((dma & 0x03) == 0) 2224 if ((dma & 0x03) == 0)
2147 dma = PARPORT_DMA_NONE; 2225 dma = PARPORT_DMA_NONE;
2148 2226
2149 ECR_WRITE (p, oecr); 2227 ECR_WRITE(p, oecr);
2150 return dma; 2228 return dma;
2151} 2229}
2152 2230
2153static int parport_dma_probe (struct parport *p) 2231static int parport_dma_probe(struct parport *p)
2154{ 2232{
2155 const struct parport_pc_private *priv = p->private_data; 2233 const struct parport_pc_private *priv = p->private_data;
2156 if (priv->ecr) 2234 if (priv->ecr) /* ask ECP chipset first */
2157 p->dma = programmable_dma_support(p); /* ask ECP chipset first */ 2235 p->dma = programmable_dma_support(p);
2158 if (p->dma == PARPORT_DMA_NONE) { 2236 if (p->dma == PARPORT_DMA_NONE) {
2159 /* ask known Super-IO chips proper, although these 2237 /* ask known Super-IO chips proper, although these
2160 claim ECP compatible, some don't report their DMA 2238 claim ECP compatible, some don't report their DMA
@@ -2212,7 +2290,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2212 if (!base_res) 2290 if (!base_res)
2213 goto out4; 2291 goto out4;
2214 2292
2215 memcpy(ops, &parport_pc_ops, sizeof (struct parport_operations)); 2293 memcpy(ops, &parport_pc_ops, sizeof(struct parport_operations));
2216 priv->ctr = 0xc; 2294 priv->ctr = 0xc;
2217 priv->ctr_writable = ~0x10; 2295 priv->ctr_writable = ~0x10;
2218 priv->ecr = 0; 2296 priv->ecr = 0;
@@ -2239,7 +2317,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2239 if (!parport_EPP_supported(p)) 2317 if (!parport_EPP_supported(p))
2240 parport_ECPEPP_supported(p); 2318 parport_ECPEPP_supported(p);
2241 } 2319 }
2242 if (!parport_SPP_supported (p)) 2320 if (!parport_SPP_supported(p))
2243 /* No port. */ 2321 /* No port. */
2244 goto out5; 2322 goto out5;
2245 if (priv->ecr) 2323 if (priv->ecr)
@@ -2247,7 +2325,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2247 else 2325 else
2248 parport_PS2_supported(p); 2326 parport_PS2_supported(p);
2249 2327
2250 p->size = (p->modes & PARPORT_MODE_EPP)?8:3; 2328 p->size = (p->modes & PARPORT_MODE_EPP) ? 8 : 3;
2251 2329
2252 printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base); 2330 printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
2253 if (p->base_hi && priv->ecr) 2331 if (p->base_hi && priv->ecr)
@@ -2271,7 +2349,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2271 } 2349 }
2272 } 2350 }
2273 if (p->dma == PARPORT_DMA_AUTO) /* To use DMA, giving the irq 2351 if (p->dma == PARPORT_DMA_AUTO) /* To use DMA, giving the irq
2274 is mandatory (see above) */ 2352 is mandatory (see above) */
2275 p->dma = PARPORT_DMA_NONE; 2353 p->dma = PARPORT_DMA_NONE;
2276 2354
2277#ifdef CONFIG_PARPORT_PC_FIFO 2355#ifdef CONFIG_PARPORT_PC_FIFO
@@ -2288,16 +2366,23 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2288 if (p->dma != PARPORT_DMA_NONE) { 2366 if (p->dma != PARPORT_DMA_NONE) {
2289 printk(", dma %d", p->dma); 2367 printk(", dma %d", p->dma);
2290 p->modes |= PARPORT_MODE_DMA; 2368 p->modes |= PARPORT_MODE_DMA;
2291 } 2369 } else
2292 else printk(", using FIFO"); 2370 printk(", using FIFO");
2293 } 2371 } else
2294 else
2295 /* We can't use the DMA channel after all. */ 2372 /* We can't use the DMA channel after all. */
2296 p->dma = PARPORT_DMA_NONE; 2373 p->dma = PARPORT_DMA_NONE;
2297#endif /* Allowed to use FIFO/DMA */ 2374#endif /* Allowed to use FIFO/DMA */
2298 2375
2299 printk(" ["); 2376 printk(" [");
2300#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}} 2377
2378#define printmode(x) \
2379 {\
2380 if (p->modes & PARPORT_MODE_##x) {\
2381 printk("%s%s", f ? "," : "", #x);\
2382 f++;\
2383 } \
2384 }
2385
2301 { 2386 {
2302 int f = 0; 2387 int f = 0;
2303 printmode(PCSPP); 2388 printmode(PCSPP);
@@ -2309,10 +2394,10 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2309 } 2394 }
2310#undef printmode 2395#undef printmode
2311#ifndef CONFIG_PARPORT_1284 2396#ifndef CONFIG_PARPORT_1284
2312 printk ("(,...)"); 2397 printk("(,...)");
2313#endif /* CONFIG_PARPORT_1284 */ 2398#endif /* CONFIG_PARPORT_1284 */
2314 printk("]\n"); 2399 printk("]\n");
2315 if (probedirq != PARPORT_IRQ_NONE) 2400 if (probedirq != PARPORT_IRQ_NONE)
2316 printk(KERN_INFO "%s: irq %d detected\n", p->name, probedirq); 2401 printk(KERN_INFO "%s: irq %d detected\n", p->name, probedirq);
2317 2402
2318 /* If No ECP release the ports grabbed above. */ 2403 /* If No ECP release the ports grabbed above. */
@@ -2328,7 +2413,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2328 if (p->irq != PARPORT_IRQ_NONE) { 2413 if (p->irq != PARPORT_IRQ_NONE) {
2329 if (request_irq(p->irq, parport_irq_handler, 2414 if (request_irq(p->irq, parport_irq_handler,
2330 irqflags, p->name, p)) { 2415 irqflags, p->name, p)) {
2331 printk (KERN_WARNING "%s: irq %d in use, " 2416 printk(KERN_WARNING "%s: irq %d in use, "
2332 "resorting to polled operation\n", 2417 "resorting to polled operation\n",
2333 p->name, p->irq); 2418 p->name, p->irq);
2334 p->irq = PARPORT_IRQ_NONE; 2419 p->irq = PARPORT_IRQ_NONE;
@@ -2338,8 +2423,8 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2338#ifdef CONFIG_PARPORT_PC_FIFO 2423#ifdef CONFIG_PARPORT_PC_FIFO
2339#ifdef HAS_DMA 2424#ifdef HAS_DMA
2340 if (p->dma != PARPORT_DMA_NONE) { 2425 if (p->dma != PARPORT_DMA_NONE) {
2341 if (request_dma (p->dma, p->name)) { 2426 if (request_dma(p->dma, p->name)) {
2342 printk (KERN_WARNING "%s: dma %d in use, " 2427 printk(KERN_WARNING "%s: dma %d in use, "
2343 "resorting to PIO operation\n", 2428 "resorting to PIO operation\n",
2344 p->name, p->dma); 2429 p->name, p->dma);
2345 p->dma = PARPORT_DMA_NONE; 2430 p->dma = PARPORT_DMA_NONE;
@@ -2349,8 +2434,8 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2349 PAGE_SIZE, 2434 PAGE_SIZE,
2350 &priv->dma_handle, 2435 &priv->dma_handle,
2351 GFP_KERNEL); 2436 GFP_KERNEL);
2352 if (! priv->dma_buf) { 2437 if (!priv->dma_buf) {
2353 printk (KERN_WARNING "%s: " 2438 printk(KERN_WARNING "%s: "
2354 "cannot get buffer for DMA, " 2439 "cannot get buffer for DMA, "
2355 "resorting to PIO operation\n", 2440 "resorting to PIO operation\n",
2356 p->name); 2441 p->name);
@@ -2369,10 +2454,10 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2369 * Put the ECP detected port in PS2 mode. 2454 * Put the ECP detected port in PS2 mode.
2370 * Do this also for ports that have ECR but don't do ECP. 2455 * Do this also for ports that have ECR but don't do ECP.
2371 */ 2456 */
2372 ECR_WRITE (p, 0x34); 2457 ECR_WRITE(p, 0x34);
2373 2458
2374 parport_pc_write_data(p, 0); 2459 parport_pc_write_data(p, 0);
2375 parport_pc_data_forward (p); 2460 parport_pc_data_forward(p);
2376 2461
2377 /* Now that we've told the sharing engine about the port, and 2462 /* Now that we've told the sharing engine about the port, and
2378 found out its characteristics, let the high-level drivers 2463 found out its characteristics, let the high-level drivers
@@ -2380,7 +2465,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
2380 spin_lock(&ports_lock); 2465 spin_lock(&ports_lock);
2381 list_add(&priv->list, &ports_list); 2466 list_add(&priv->list, &ports_list);
2382 spin_unlock(&ports_lock); 2467 spin_unlock(&ports_lock);
2383 parport_announce_port (p); 2468 parport_announce_port(p);
2384 2469
2385 return p; 2470 return p;
2386 2471
@@ -2393,18 +2478,17 @@ out5:
2393out4: 2478out4:
2394 parport_put_port(p); 2479 parport_put_port(p);
2395out3: 2480out3:
2396 kfree (priv); 2481 kfree(priv);
2397out2: 2482out2:
2398 kfree (ops); 2483 kfree(ops);
2399out1: 2484out1:
2400 if (pdev) 2485 if (pdev)
2401 platform_device_unregister(pdev); 2486 platform_device_unregister(pdev);
2402 return NULL; 2487 return NULL;
2403} 2488}
2489EXPORT_SYMBOL(parport_pc_probe_port);
2404 2490
2405EXPORT_SYMBOL (parport_pc_probe_port); 2491void parport_pc_unregister_port(struct parport *p)
2406
2407void parport_pc_unregister_port (struct parport *p)
2408{ 2492{
2409 struct parport_pc_private *priv = p->private_data; 2493 struct parport_pc_private *priv = p->private_data;
2410 struct parport_operations *ops = p->ops; 2494 struct parport_operations *ops = p->ops;
@@ -2430,17 +2514,16 @@ void parport_pc_unregister_port (struct parport *p)
2430 priv->dma_buf, 2514 priv->dma_buf,
2431 priv->dma_handle); 2515 priv->dma_handle);
2432#endif 2516#endif
2433 kfree (p->private_data); 2517 kfree(p->private_data);
2434 parport_put_port(p); 2518 parport_put_port(p);
2435 kfree (ops); /* hope no-one cached it */ 2519 kfree(ops); /* hope no-one cached it */
2436} 2520}
2437 2521EXPORT_SYMBOL(parport_pc_unregister_port);
2438EXPORT_SYMBOL (parport_pc_unregister_port);
2439 2522
2440#ifdef CONFIG_PCI 2523#ifdef CONFIG_PCI
2441 2524
2442/* ITE support maintained by Rich Liu <richliu@poorman.org> */ 2525/* ITE support maintained by Rich Liu <richliu@poorman.org> */
2443static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq, 2526static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
2444 int autodma, 2527 int autodma,
2445 const struct parport_pc_via_data *via) 2528 const struct parport_pc_via_data *via)
2446{ 2529{
@@ -2452,73 +2535,74 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
2452 int irq; 2535 int irq;
2453 int i; 2536 int i;
2454 2537
2455 DPRINTK (KERN_DEBUG "sio_ite_8872_probe()\n"); 2538 DPRINTK(KERN_DEBUG "sio_ite_8872_probe()\n");
2456 2539
2457 // make sure which one chip 2540 /* make sure which one chip */
2458 for(i = 0; i < 5; i++) { 2541 for (i = 0; i < 5; i++) {
2459 base_res = request_region(inta_addr[i], 32, "it887x"); 2542 base_res = request_region(inta_addr[i], 32, "it887x");
2460 if (base_res) { 2543 if (base_res) {
2461 int test; 2544 int test;
2462 pci_write_config_dword (pdev, 0x60, 2545 pci_write_config_dword(pdev, 0x60,
2463 0xe5000000 | inta_addr[i]); 2546 0xe5000000 | inta_addr[i]);
2464 pci_write_config_dword (pdev, 0x78, 2547 pci_write_config_dword(pdev, 0x78,
2465 0x00000000 | inta_addr[i]); 2548 0x00000000 | inta_addr[i]);
2466 test = inb (inta_addr[i]); 2549 test = inb(inta_addr[i]);
2467 if (test != 0xff) break; 2550 if (test != 0xff)
2551 break;
2468 release_region(inta_addr[i], 0x8); 2552 release_region(inta_addr[i], 0x8);
2469 } 2553 }
2470 } 2554 }
2471 if(i >= 5) { 2555 if (i >= 5) {
2472 printk (KERN_INFO "parport_pc: cannot find ITE8872 INTA\n"); 2556 printk(KERN_INFO "parport_pc: cannot find ITE8872 INTA\n");
2473 return 0; 2557 return 0;
2474 } 2558 }
2475 2559
2476 type = inb (inta_addr[i] + 0x18); 2560 type = inb(inta_addr[i] + 0x18);
2477 type &= 0x0f; 2561 type &= 0x0f;
2478 2562
2479 switch (type) { 2563 switch (type) {
2480 case 0x2: 2564 case 0x2:
2481 printk (KERN_INFO "parport_pc: ITE8871 found (1P)\n"); 2565 printk(KERN_INFO "parport_pc: ITE8871 found (1P)\n");
2482 ite8872set = 0x64200000; 2566 ite8872set = 0x64200000;
2483 break; 2567 break;
2484 case 0xa: 2568 case 0xa:
2485 printk (KERN_INFO "parport_pc: ITE8875 found (1P)\n"); 2569 printk(KERN_INFO "parport_pc: ITE8875 found (1P)\n");
2486 ite8872set = 0x64200000; 2570 ite8872set = 0x64200000;
2487 break; 2571 break;
2488 case 0xe: 2572 case 0xe:
2489 printk (KERN_INFO "parport_pc: ITE8872 found (2S1P)\n"); 2573 printk(KERN_INFO "parport_pc: ITE8872 found (2S1P)\n");
2490 ite8872set = 0x64e00000; 2574 ite8872set = 0x64e00000;
2491 break; 2575 break;
2492 case 0x6: 2576 case 0x6:
2493 printk (KERN_INFO "parport_pc: ITE8873 found (1S)\n"); 2577 printk(KERN_INFO "parport_pc: ITE8873 found (1S)\n");
2494 return 0; 2578 return 0;
2495 case 0x8: 2579 case 0x8:
2496 DPRINTK (KERN_DEBUG "parport_pc: ITE8874 found (2S)\n"); 2580 DPRINTK(KERN_DEBUG "parport_pc: ITE8874 found (2S)\n");
2497 return 0; 2581 return 0;
2498 default: 2582 default:
2499 printk (KERN_INFO "parport_pc: unknown ITE887x\n"); 2583 printk(KERN_INFO "parport_pc: unknown ITE887x\n");
2500 printk (KERN_INFO "parport_pc: please mail 'lspci -nvv' " 2584 printk(KERN_INFO "parport_pc: please mail 'lspci -nvv' "
2501 "output to Rich.Liu@ite.com.tw\n"); 2585 "output to Rich.Liu@ite.com.tw\n");
2502 return 0; 2586 return 0;
2503 } 2587 }
2504 2588
2505 pci_read_config_byte (pdev, 0x3c, &ite8872_irq); 2589 pci_read_config_byte(pdev, 0x3c, &ite8872_irq);
2506 pci_read_config_dword (pdev, 0x1c, &ite8872_lpt); 2590 pci_read_config_dword(pdev, 0x1c, &ite8872_lpt);
2507 ite8872_lpt &= 0x0000ff00; 2591 ite8872_lpt &= 0x0000ff00;
2508 pci_read_config_dword (pdev, 0x20, &ite8872_lpthi); 2592 pci_read_config_dword(pdev, 0x20, &ite8872_lpthi);
2509 ite8872_lpthi &= 0x0000ff00; 2593 ite8872_lpthi &= 0x0000ff00;
2510 pci_write_config_dword (pdev, 0x6c, 0xe3000000 | ite8872_lpt); 2594 pci_write_config_dword(pdev, 0x6c, 0xe3000000 | ite8872_lpt);
2511 pci_write_config_dword (pdev, 0x70, 0xe3000000 | ite8872_lpthi); 2595 pci_write_config_dword(pdev, 0x70, 0xe3000000 | ite8872_lpthi);
2512 pci_write_config_dword (pdev, 0x80, (ite8872_lpthi<<16) | ite8872_lpt); 2596 pci_write_config_dword(pdev, 0x80, (ite8872_lpthi<<16) | ite8872_lpt);
2513 // SET SPP&EPP , Parallel Port NO DMA , Enable All Function 2597 /* SET SPP&EPP , Parallel Port NO DMA , Enable All Function */
2514 // SET Parallel IRQ 2598 /* SET Parallel IRQ */
2515 pci_write_config_dword (pdev, 0x9c, 2599 pci_write_config_dword(pdev, 0x9c,
2516 ite8872set | (ite8872_irq * 0x11111)); 2600 ite8872set | (ite8872_irq * 0x11111));
2517 2601
2518 DPRINTK (KERN_DEBUG "ITE887x: The IRQ is %d.\n", ite8872_irq); 2602 DPRINTK(KERN_DEBUG "ITE887x: The IRQ is %d.\n", ite8872_irq);
2519 DPRINTK (KERN_DEBUG "ITE887x: The PARALLEL I/O port is 0x%x.\n", 2603 DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O port is 0x%x.\n",
2520 ite8872_lpt); 2604 ite8872_lpt);
2521 DPRINTK (KERN_DEBUG "ITE887x: The PARALLEL I/O porthi is 0x%x.\n", 2605 DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O porthi is 0x%x.\n",
2522 ite8872_lpthi); 2606 ite8872_lpthi);
2523 2607
2524 /* Let the user (or defaults) steer us away from interrupts */ 2608 /* Let the user (or defaults) steer us away from interrupts */
@@ -2530,14 +2614,14 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
2530 * Release the resource so that parport_pc_probe_port can get it. 2614 * Release the resource so that parport_pc_probe_port can get it.
2531 */ 2615 */
2532 release_resource(base_res); 2616 release_resource(base_res);
2533 if (parport_pc_probe_port (ite8872_lpt, ite8872_lpthi, 2617 if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
2534 irq, PARPORT_DMA_NONE, &pdev->dev, 0)) { 2618 irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
2535 printk (KERN_INFO 2619 printk(KERN_INFO
2536 "parport_pc: ITE 8872 parallel port: io=0x%X", 2620 "parport_pc: ITE 8872 parallel port: io=0x%X",
2537 ite8872_lpt); 2621 ite8872_lpt);
2538 if (irq != PARPORT_IRQ_NONE) 2622 if (irq != PARPORT_IRQ_NONE)
2539 printk (", irq=%d", irq); 2623 printk(", irq=%d", irq);
2540 printk ("\n"); 2624 printk("\n");
2541 return 1; 2625 return 1;
2542 } 2626 }
2543 2627
@@ -2546,7 +2630,7 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
2546 2630
2547/* VIA 8231 support by Pavel Fedin <sonic_amiga@rambler.ru> 2631/* VIA 8231 support by Pavel Fedin <sonic_amiga@rambler.ru>
2548 based on VIA 686a support code by Jeff Garzik <jgarzik@pobox.com> */ 2632 based on VIA 686a support code by Jeff Garzik <jgarzik@pobox.com> */
2549static int __devinitdata parport_init_mode = 0; 2633static int __devinitdata parport_init_mode;
2550 2634
2551/* Data for two known VIA chips */ 2635/* Data for two known VIA chips */
2552static struct parport_pc_via_data via_686a_data __devinitdata = { 2636static struct parport_pc_via_data via_686a_data __devinitdata = {
@@ -2568,7 +2652,7 @@ static struct parport_pc_via_data via_8231_data __devinitdata = {
2568 0xF6 2652 0xF6
2569}; 2653};
2570 2654
2571static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq, 2655static int __devinit sio_via_probe(struct pci_dev *pdev, int autoirq,
2572 int autodma, 2656 int autodma,
2573 const struct parport_pc_via_data *via) 2657 const struct parport_pc_via_data *via)
2574{ 2658{
@@ -2580,38 +2664,38 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
2580 2664
2581 printk(KERN_DEBUG "parport_pc: VIA 686A/8231 detected\n"); 2665 printk(KERN_DEBUG "parport_pc: VIA 686A/8231 detected\n");
2582 2666
2583 switch(parport_init_mode) 2667 switch (parport_init_mode) {
2584 {
2585 case 1: 2668 case 1:
2586 printk(KERN_DEBUG "parport_pc: setting SPP mode\n"); 2669 printk(KERN_DEBUG "parport_pc: setting SPP mode\n");
2587 siofunc = VIA_FUNCTION_PARPORT_SPP; 2670 siofunc = VIA_FUNCTION_PARPORT_SPP;
2588 break; 2671 break;
2589 case 2: 2672 case 2:
2590 printk(KERN_DEBUG "parport_pc: setting PS/2 mode\n"); 2673 printk(KERN_DEBUG "parport_pc: setting PS/2 mode\n");
2591 siofunc = VIA_FUNCTION_PARPORT_SPP; 2674 siofunc = VIA_FUNCTION_PARPORT_SPP;
2592 ppcontrol = VIA_PARPORT_BIDIR; 2675 ppcontrol = VIA_PARPORT_BIDIR;
2593 break; 2676 break;
2594 case 3: 2677 case 3:
2595 printk(KERN_DEBUG "parport_pc: setting EPP mode\n"); 2678 printk(KERN_DEBUG "parport_pc: setting EPP mode\n");
2596 siofunc = VIA_FUNCTION_PARPORT_EPP; 2679 siofunc = VIA_FUNCTION_PARPORT_EPP;
2597 ppcontrol = VIA_PARPORT_BIDIR; 2680 ppcontrol = VIA_PARPORT_BIDIR;
2598 have_epp = 1; 2681 have_epp = 1;
2599 break; 2682 break;
2600 case 4: 2683 case 4:
2601 printk(KERN_DEBUG "parport_pc: setting ECP mode\n"); 2684 printk(KERN_DEBUG "parport_pc: setting ECP mode\n");
2602 siofunc = VIA_FUNCTION_PARPORT_ECP; 2685 siofunc = VIA_FUNCTION_PARPORT_ECP;
2603 ppcontrol = VIA_PARPORT_BIDIR; 2686 ppcontrol = VIA_PARPORT_BIDIR;
2604 break; 2687 break;
2605 case 5: 2688 case 5:
2606 printk(KERN_DEBUG "parport_pc: setting EPP+ECP mode\n"); 2689 printk(KERN_DEBUG "parport_pc: setting EPP+ECP mode\n");
2607 siofunc = VIA_FUNCTION_PARPORT_ECP; 2690 siofunc = VIA_FUNCTION_PARPORT_ECP;
2608 ppcontrol = VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP; 2691 ppcontrol = VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP;
2609 have_epp = 1; 2692 have_epp = 1;
2610 break; 2693 break;
2611 default: 2694 default:
2612 printk(KERN_DEBUG "parport_pc: probing current configuration\n"); 2695 printk(KERN_DEBUG
2613 siofunc = VIA_FUNCTION_PROBE; 2696 "parport_pc: probing current configuration\n");
2614 break; 2697 siofunc = VIA_FUNCTION_PROBE;
2698 break;
2615 } 2699 }
2616 /* 2700 /*
2617 * unlock super i/o configuration 2701 * unlock super i/o configuration
@@ -2622,38 +2706,36 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
2622 2706
2623 /* Bits 1-0: Parallel Port Mode / Enable */ 2707 /* Bits 1-0: Parallel Port Mode / Enable */
2624 outb(via->viacfg_function, VIA_CONFIG_INDEX); 2708 outb(via->viacfg_function, VIA_CONFIG_INDEX);
2625 tmp = inb (VIA_CONFIG_DATA); 2709 tmp = inb(VIA_CONFIG_DATA);
2626 /* Bit 5: EPP+ECP enable; bit 7: PS/2 bidirectional port enable */ 2710 /* Bit 5: EPP+ECP enable; bit 7: PS/2 bidirectional port enable */
2627 outb(via->viacfg_parport_control, VIA_CONFIG_INDEX); 2711 outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
2628 tmp2 = inb (VIA_CONFIG_DATA); 2712 tmp2 = inb(VIA_CONFIG_DATA);
2629 if (siofunc == VIA_FUNCTION_PROBE) 2713 if (siofunc == VIA_FUNCTION_PROBE) {
2630 { 2714 siofunc = tmp & VIA_FUNCTION_PARPORT_DISABLE;
2631 siofunc = tmp & VIA_FUNCTION_PARPORT_DISABLE; 2715 ppcontrol = tmp2;
2632 ppcontrol = tmp2; 2716 } else {
2717 tmp &= ~VIA_FUNCTION_PARPORT_DISABLE;
2718 tmp |= siofunc;
2719 outb(via->viacfg_function, VIA_CONFIG_INDEX);
2720 outb(tmp, VIA_CONFIG_DATA);
2721 tmp2 &= ~(VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP);
2722 tmp2 |= ppcontrol;
2723 outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
2724 outb(tmp2, VIA_CONFIG_DATA);
2633 } 2725 }
2634 else 2726
2635 {
2636 tmp &= ~VIA_FUNCTION_PARPORT_DISABLE;
2637 tmp |= siofunc;
2638 outb(via->viacfg_function, VIA_CONFIG_INDEX);
2639 outb(tmp, VIA_CONFIG_DATA);
2640 tmp2 &= ~(VIA_PARPORT_BIDIR|VIA_PARPORT_ECPEPP);
2641 tmp2 |= ppcontrol;
2642 outb(via->viacfg_parport_control, VIA_CONFIG_INDEX);
2643 outb(tmp2, VIA_CONFIG_DATA);
2644 }
2645
2646 /* Parallel Port I/O Base Address, bits 9-2 */ 2727 /* Parallel Port I/O Base Address, bits 9-2 */
2647 outb(via->viacfg_parport_base, VIA_CONFIG_INDEX); 2728 outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
2648 port1 = inb(VIA_CONFIG_DATA) << 2; 2729 port1 = inb(VIA_CONFIG_DATA) << 2;
2649 2730
2650 printk (KERN_DEBUG "parport_pc: Current parallel port base: 0x%X\n",port1); 2731 printk(KERN_DEBUG "parport_pc: Current parallel port base: 0x%X\n",
2651 if ((port1 == 0x3BC) && have_epp) 2732 port1);
2652 { 2733 if (port1 == 0x3BC && have_epp) {
2653 outb(via->viacfg_parport_base, VIA_CONFIG_INDEX); 2734 outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
2654 outb((0x378 >> 2), VIA_CONFIG_DATA); 2735 outb((0x378 >> 2), VIA_CONFIG_DATA);
2655 printk(KERN_DEBUG "parport_pc: Parallel port base changed to 0x378\n"); 2736 printk(KERN_DEBUG
2656 port1 = 0x378; 2737 "parport_pc: Parallel port base changed to 0x378\n");
2738 port1 = 0x378;
2657 } 2739 }
2658 2740
2659 /* 2741 /*
@@ -2667,36 +2749,39 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
2667 printk(KERN_INFO "parport_pc: VIA parallel port disabled in BIOS\n"); 2749 printk(KERN_INFO "parport_pc: VIA parallel port disabled in BIOS\n");
2668 return 0; 2750 return 0;
2669 } 2751 }
2670 2752
2671 /* Bits 7-4: PnP Routing for Parallel Port IRQ */ 2753 /* Bits 7-4: PnP Routing for Parallel Port IRQ */
2672 pci_read_config_byte(pdev, via->via_pci_parport_irq_reg, &tmp); 2754 pci_read_config_byte(pdev, via->via_pci_parport_irq_reg, &tmp);
2673 irq = ((tmp & VIA_IRQCONTROL_PARALLEL) >> 4); 2755 irq = ((tmp & VIA_IRQCONTROL_PARALLEL) >> 4);
2674 2756
2675 if (siofunc == VIA_FUNCTION_PARPORT_ECP) 2757 if (siofunc == VIA_FUNCTION_PARPORT_ECP) {
2676 { 2758 /* Bits 3-2: PnP Routing for Parallel Port DMA */
2677 /* Bits 3-2: PnP Routing for Parallel Port DMA */ 2759 pci_read_config_byte(pdev, via->via_pci_parport_dma_reg, &tmp);
2678 pci_read_config_byte(pdev, via->via_pci_parport_dma_reg, &tmp); 2760 dma = ((tmp & VIA_DMACONTROL_PARALLEL) >> 2);
2679 dma = ((tmp & VIA_DMACONTROL_PARALLEL) >> 2); 2761 } else
2680 } 2762 /* if ECP not enabled, DMA is not enabled, assumed
2681 else 2763 bogus 'dma' value */
2682 /* if ECP not enabled, DMA is not enabled, assumed bogus 'dma' value */ 2764 dma = PARPORT_DMA_NONE;
2683 dma = PARPORT_DMA_NONE;
2684 2765
2685 /* Let the user (or defaults) steer us away from interrupts and DMA */ 2766 /* Let the user (or defaults) steer us away from interrupts and DMA */
2686 if (autoirq == PARPORT_IRQ_NONE) { 2767 if (autoirq == PARPORT_IRQ_NONE) {
2687 irq = PARPORT_IRQ_NONE; 2768 irq = PARPORT_IRQ_NONE;
2688 dma = PARPORT_DMA_NONE; 2769 dma = PARPORT_DMA_NONE;
2689 } 2770 }
2690 if (autodma == PARPORT_DMA_NONE) 2771 if (autodma == PARPORT_DMA_NONE)
2691 dma = PARPORT_DMA_NONE; 2772 dma = PARPORT_DMA_NONE;
2692 2773
2693 switch (port1) { 2774 switch (port1) {
2694 case 0x3bc: port2 = 0x7bc; break; 2775 case 0x3bc:
2695 case 0x378: port2 = 0x778; break; 2776 port2 = 0x7bc; break;
2696 case 0x278: port2 = 0x678; break; 2777 case 0x378:
2778 port2 = 0x778; break;
2779 case 0x278:
2780 port2 = 0x678; break;
2697 default: 2781 default:
2698 printk(KERN_INFO "parport_pc: Weird VIA parport base 0x%X, ignoring\n", 2782 printk(KERN_INFO
2699 port1); 2783 "parport_pc: Weird VIA parport base 0x%X, ignoring\n",
2784 port1);
2700 return 0; 2785 return 0;
2701 } 2786 }
2702 2787
@@ -2714,17 +2799,17 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
2714 } 2799 }
2715 2800
2716 /* finally, do the probe with values obtained */ 2801 /* finally, do the probe with values obtained */
2717 if (parport_pc_probe_port (port1, port2, irq, dma, &pdev->dev, 0)) { 2802 if (parport_pc_probe_port(port1, port2, irq, dma, &pdev->dev, 0)) {
2718 printk (KERN_INFO 2803 printk(KERN_INFO
2719 "parport_pc: VIA parallel port: io=0x%X", port1); 2804 "parport_pc: VIA parallel port: io=0x%X", port1);
2720 if (irq != PARPORT_IRQ_NONE) 2805 if (irq != PARPORT_IRQ_NONE)
2721 printk (", irq=%d", irq); 2806 printk(", irq=%d", irq);
2722 if (dma != PARPORT_DMA_NONE) 2807 if (dma != PARPORT_DMA_NONE)
2723 printk (", dma=%d", dma); 2808 printk(", dma=%d", dma);
2724 printk ("\n"); 2809 printk("\n");
2725 return 1; 2810 return 1;
2726 } 2811 }
2727 2812
2728 printk(KERN_WARNING "parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n", 2813 printk(KERN_WARNING "parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n",
2729 port1, irq, dma); 2814 port1, irq, dma);
2730 return 0; 2815 return 0;
@@ -2732,8 +2817,8 @@ static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
2732 2817
2733 2818
2734enum parport_pc_sio_types { 2819enum parport_pc_sio_types {
2735 sio_via_686a = 0, /* Via VT82C686A motherboard Super I/O */ 2820 sio_via_686a = 0, /* Via VT82C686A motherboard Super I/O */
2736 sio_via_8231, /* Via VT8231 south bridge integrated Super IO */ 2821 sio_via_8231, /* Via VT8231 south bridge integrated Super IO */
2737 sio_ite_8872, 2822 sio_ite_8872,
2738 last_sio 2823 last_sio
2739}; 2824};
@@ -2804,15 +2889,15 @@ enum parport_pc_pci_cards {
2804}; 2889};
2805 2890
2806 2891
2807/* each element directly indexed from enum list, above 2892/* each element directly indexed from enum list, above
2808 * (but offset by last_sio) */ 2893 * (but offset by last_sio) */
2809static struct parport_pc_pci { 2894static struct parport_pc_pci {
2810 int numports; 2895 int numports;
2811 struct { /* BAR (base address registers) numbers in the config 2896 struct { /* BAR (base address registers) numbers in the config
2812 space header */ 2897 space header */
2813 int lo; 2898 int lo;
2814 int hi; /* -1 if not there, >6 for offset-method (max 2899 int hi;
2815 BAR is 6) */ 2900 /* -1 if not there, >6 for offset-method (max BAR is 6) */
2816 } addr[4]; 2901 } addr[4];
2817 2902
2818 /* If set, this is called immediately after pci_enable_device. 2903 /* If set, this is called immediately after pci_enable_device.
@@ -2857,7 +2942,7 @@ static struct parport_pc_pci {
2857 /* timedia_4018 */ { 2, { { 0, 1 }, { 2, 3 }, } }, 2942 /* timedia_4018 */ { 2, { { 0, 1 }, { 2, 3 }, } },
2858 /* timedia_9018a */ { 2, { { 0, 1 }, { 2, 3 }, } }, 2943 /* timedia_9018a */ { 2, { { 0, 1 }, { 2, 3 }, } },
2859 /* SYBA uses fixed offsets in 2944 /* SYBA uses fixed offsets in
2860 a 1K io window */ 2945 a 1K io window */
2861 /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } }, 2946 /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
2862 /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } }, 2947 /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
2863 /* titan_010l */ { 1, { { 3, -1 }, } }, 2948 /* titan_010l */ { 1, { { 3, -1 }, } },
@@ -2873,11 +2958,14 @@ static struct parport_pc_pci {
2873 /* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } }, 2958 /* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } },
2874 /* aks_0100 */ { 1, { { 0, -1 }, } }, 2959 /* aks_0100 */ { 1, { { 0, -1 }, } },
2875 /* mobility_pp */ { 1, { { 0, 1 }, } }, 2960 /* mobility_pp */ { 1, { { 0, 1 }, } },
2876 /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */ 2961
2877 /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} }, /* untested */ 2962 /* The netmos entries below are untested */
2878 /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, /* untested */ 2963 /* netmos_9705 */ { 1, { { 0, -1 }, } },
2879 /* netmos_9805 */ { 1, { { 0, -1 }, } }, /* untested */ 2964 /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} },
2880 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */ 2965 /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
2966 /* netmos_9805 */ { 1, { { 0, -1 }, } },
2967 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
2968
2881 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, 2969 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
2882}; 2970};
2883 2971
@@ -2906,7 +2994,7 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
2906 { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_BOCA_IOPPAR, 2994 { PCI_VENDOR_ID_LAVA, PCI_DEVICE_ID_LAVA_BOCA_IOPPAR,
2907 PCI_ANY_ID, PCI_ANY_ID, 0, 0, boca_ioppar }, 2995 PCI_ANY_ID, PCI_ANY_ID, 0, 0, boca_ioppar },
2908 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, 2996 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
2909 PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4014, 0,0, plx_9050 }, 2997 PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4014, 0, 0, plx_9050 },
2910 /* PCI_VENDOR_ID_TIMEDIA/SUNIX has many differing cards ...*/ 2998 /* PCI_VENDOR_ID_TIMEDIA/SUNIX has many differing cards ...*/
2911 { 0x1409, 0x7168, 0x1409, 0x4078, 0, 0, timedia_4078a }, 2999 { 0x1409, 0x7168, 0x1409, 0x4078, 0, 0, timedia_4078a },
2912 { 0x1409, 0x7168, 0x1409, 0x4079, 0, 0, timedia_4079h }, 3000 { 0x1409, 0x7168, 0x1409, 0x4079, 0, 0, timedia_4079h },
@@ -2940,7 +3028,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
2940 { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 }, 3028 { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 },
2941 { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 }, 3029 { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
2942 /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/ 3030 /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
2943 { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */ 3031 /* AFAVLAB_TK9902 */
3032 { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p},
2944 { 0x14db, 0x2121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2p}, 3033 { 0x14db, 0x2121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2p},
2945 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952PP, 3034 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952PP,
2946 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_952 }, 3035 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_952 },
@@ -2983,14 +3072,14 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
2983 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, 3072 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
2984 { 0, } /* terminate list */ 3073 { 0, } /* terminate list */
2985}; 3074};
2986MODULE_DEVICE_TABLE(pci,parport_pc_pci_tbl); 3075MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
2987 3076
2988struct pci_parport_data { 3077struct pci_parport_data {
2989 int num; 3078 int num;
2990 struct parport *ports[2]; 3079 struct parport *ports[2];
2991}; 3080};
2992 3081
2993static int parport_pc_pci_probe (struct pci_dev *dev, 3082static int parport_pc_pci_probe(struct pci_dev *dev,
2994 const struct pci_device_id *id) 3083 const struct pci_device_id *id)
2995{ 3084{
2996 int err, count, n, i = id->driver_data; 3085 int err, count, n, i = id->driver_data;
@@ -3003,7 +3092,8 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
3003 /* This is a PCI card */ 3092 /* This is a PCI card */
3004 i -= last_sio; 3093 i -= last_sio;
3005 count = 0; 3094 count = 0;
3006 if ((err = pci_enable_device (dev)) != 0) 3095 err = pci_enable_device(dev);
3096 if (err)
3007 return err; 3097 return err;
3008 3098
3009 data = kmalloc(sizeof(struct pci_parport_data), GFP_KERNEL); 3099 data = kmalloc(sizeof(struct pci_parport_data), GFP_KERNEL);
@@ -3011,7 +3101,7 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
3011 return -ENOMEM; 3101 return -ENOMEM;
3012 3102
3013 if (cards[i].preinit_hook && 3103 if (cards[i].preinit_hook &&
3014 cards[i].preinit_hook (dev, PARPORT_IRQ_NONE, PARPORT_DMA_NONE)) { 3104 cards[i].preinit_hook(dev, PARPORT_IRQ_NONE, PARPORT_DMA_NONE)) {
3015 kfree(data); 3105 kfree(data);
3016 return -ENODEV; 3106 return -ENODEV;
3017 } 3107 }
@@ -3021,25 +3111,25 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
3021 int hi = cards[i].addr[n].hi; 3111 int hi = cards[i].addr[n].hi;
3022 int irq; 3112 int irq;
3023 unsigned long io_lo, io_hi; 3113 unsigned long io_lo, io_hi;
3024 io_lo = pci_resource_start (dev, lo); 3114 io_lo = pci_resource_start(dev, lo);
3025 io_hi = 0; 3115 io_hi = 0;
3026 if ((hi >= 0) && (hi <= 6)) 3116 if ((hi >= 0) && (hi <= 6))
3027 io_hi = pci_resource_start (dev, hi); 3117 io_hi = pci_resource_start(dev, hi);
3028 else if (hi > 6) 3118 else if (hi > 6)
3029 io_lo += hi; /* Reinterpret the meaning of 3119 io_lo += hi; /* Reinterpret the meaning of
3030 "hi" as an offset (see SYBA 3120 "hi" as an offset (see SYBA
3031 def.) */ 3121 def.) */
3032 /* TODO: test if sharing interrupts works */ 3122 /* TODO: test if sharing interrupts works */
3033 irq = dev->irq; 3123 irq = dev->irq;
3034 if (irq == IRQ_NONE) { 3124 if (irq == IRQ_NONE) {
3035 printk (KERN_DEBUG 3125 printk(KERN_DEBUG
3036 "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n", 3126 "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
3037 parport_pc_pci_tbl[i + last_sio].vendor, 3127 parport_pc_pci_tbl[i + last_sio].vendor,
3038 parport_pc_pci_tbl[i + last_sio].device, 3128 parport_pc_pci_tbl[i + last_sio].device,
3039 io_lo, io_hi); 3129 io_lo, io_hi);
3040 irq = PARPORT_IRQ_NONE; 3130 irq = PARPORT_IRQ_NONE;
3041 } else { 3131 } else {
3042 printk (KERN_DEBUG 3132 printk(KERN_DEBUG
3043 "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n", 3133 "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
3044 parport_pc_pci_tbl[i + last_sio].vendor, 3134 parport_pc_pci_tbl[i + last_sio].vendor,
3045 parport_pc_pci_tbl[i + last_sio].device, 3135 parport_pc_pci_tbl[i + last_sio].device,
@@ -3056,7 +3146,7 @@ static int parport_pc_pci_probe (struct pci_dev *dev,
3056 data->num = count; 3146 data->num = count;
3057 3147
3058 if (cards[i].postinit_hook) 3148 if (cards[i].postinit_hook)
3059 cards[i].postinit_hook (dev, count == 0); 3149 cards[i].postinit_hook(dev, count == 0);
3060 3150
3061 if (count) { 3151 if (count) {
3062 pci_set_drvdata(dev, data); 3152 pci_set_drvdata(dev, data);
@@ -3090,7 +3180,7 @@ static struct pci_driver parport_pc_pci_driver = {
3090 .remove = __devexit_p(parport_pc_pci_remove), 3180 .remove = __devexit_p(parport_pc_pci_remove),
3091}; 3181};
3092 3182
3093static int __init parport_pc_init_superio (int autoirq, int autodma) 3183static int __init parport_pc_init_superio(int autoirq, int autodma)
3094{ 3184{
3095 const struct pci_device_id *id; 3185 const struct pci_device_id *id;
3096 struct pci_dev *pdev = NULL; 3186 struct pci_dev *pdev = NULL;
@@ -3101,8 +3191,9 @@ static int __init parport_pc_init_superio (int autoirq, int autodma)
3101 if (id == NULL || id->driver_data >= last_sio) 3191 if (id == NULL || id->driver_data >= last_sio)
3102 continue; 3192 continue;
3103 3193
3104 if (parport_pc_superio_info[id->driver_data].probe 3194 if (parport_pc_superio_info[id->driver_data].probe(
3105 (pdev, autoirq, autodma,parport_pc_superio_info[id->driver_data].via)) { 3195 pdev, autoirq, autodma,
3196 parport_pc_superio_info[id->driver_data].via)) {
3106 ret++; 3197 ret++;
3107 } 3198 }
3108 } 3199 }
@@ -3111,7 +3202,10 @@ static int __init parport_pc_init_superio (int autoirq, int autodma)
3111} 3202}
3112#else 3203#else
3113static struct pci_driver parport_pc_pci_driver; 3204static struct pci_driver parport_pc_pci_driver;
3114static int __init parport_pc_init_superio(int autoirq, int autodma) {return 0;} 3205static int __init parport_pc_init_superio(int autoirq, int autodma)
3206{
3207 return 0;
3208}
3115#endif /* CONFIG_PCI */ 3209#endif /* CONFIG_PCI */
3116 3210
3117#ifdef CONFIG_PNP 3211#ifdef CONFIG_PNP
@@ -3124,44 +3218,45 @@ static const struct pnp_device_id parport_pc_pnp_tbl[] = {
3124 { } 3218 { }
3125}; 3219};
3126 3220
3127MODULE_DEVICE_TABLE(pnp,parport_pc_pnp_tbl); 3221MODULE_DEVICE_TABLE(pnp, parport_pc_pnp_tbl);
3128 3222
3129static int parport_pc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id) 3223static int parport_pc_pnp_probe(struct pnp_dev *dev,
3224 const struct pnp_device_id *id)
3130{ 3225{
3131 struct parport *pdata; 3226 struct parport *pdata;
3132 unsigned long io_lo, io_hi; 3227 unsigned long io_lo, io_hi;
3133 int dma, irq; 3228 int dma, irq;
3134 3229
3135 if (pnp_port_valid(dev,0) && 3230 if (pnp_port_valid(dev, 0) &&
3136 !(pnp_port_flags(dev,0) & IORESOURCE_DISABLED)) { 3231 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
3137 io_lo = pnp_port_start(dev,0); 3232 io_lo = pnp_port_start(dev, 0);
3138 } else 3233 } else
3139 return -EINVAL; 3234 return -EINVAL;
3140 3235
3141 if (pnp_port_valid(dev,1) && 3236 if (pnp_port_valid(dev, 1) &&
3142 !(pnp_port_flags(dev,1) & IORESOURCE_DISABLED)) { 3237 !(pnp_port_flags(dev, 1) & IORESOURCE_DISABLED)) {
3143 io_hi = pnp_port_start(dev,1); 3238 io_hi = pnp_port_start(dev, 1);
3144 } else 3239 } else
3145 io_hi = 0; 3240 io_hi = 0;
3146 3241
3147 if (pnp_irq_valid(dev,0) && 3242 if (pnp_irq_valid(dev, 0) &&
3148 !(pnp_irq_flags(dev,0) & IORESOURCE_DISABLED)) { 3243 !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED)) {
3149 irq = pnp_irq(dev,0); 3244 irq = pnp_irq(dev, 0);
3150 } else 3245 } else
3151 irq = PARPORT_IRQ_NONE; 3246 irq = PARPORT_IRQ_NONE;
3152 3247
3153 if (pnp_dma_valid(dev,0) && 3248 if (pnp_dma_valid(dev, 0) &&
3154 !(pnp_dma_flags(dev,0) & IORESOURCE_DISABLED)) { 3249 !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED)) {
3155 dma = pnp_dma(dev,0); 3250 dma = pnp_dma(dev, 0);
3156 } else 3251 } else
3157 dma = PARPORT_DMA_NONE; 3252 dma = PARPORT_DMA_NONE;
3158 3253
3159 dev_info(&dev->dev, "reported by %s\n", dev->protocol->name); 3254 dev_info(&dev->dev, "reported by %s\n", dev->protocol->name);
3160 if (!(pdata = parport_pc_probe_port(io_lo, io_hi, 3255 pdata = parport_pc_probe_port(io_lo, io_hi, irq, dma, &dev->dev, 0);
3161 irq, dma, &dev->dev, 0))) 3256 if (pdata == NULL)
3162 return -ENODEV; 3257 return -ENODEV;
3163 3258
3164 pnp_set_drvdata(dev,pdata); 3259 pnp_set_drvdata(dev, pdata);
3165 return 0; 3260 return 0;
3166} 3261}
3167 3262
@@ -3203,7 +3298,7 @@ static struct platform_driver parport_pc_platform_driver = {
3203 3298
3204/* This is called by parport_pc_find_nonpci_ports (in asm/parport.h) */ 3299/* This is called by parport_pc_find_nonpci_ports (in asm/parport.h) */
3205static int __devinit __attribute__((unused)) 3300static int __devinit __attribute__((unused))
3206parport_pc_find_isa_ports (int autoirq, int autodma) 3301parport_pc_find_isa_ports(int autoirq, int autodma)
3207{ 3302{
3208 int count = 0; 3303 int count = 0;
3209 3304
@@ -3227,7 +3322,7 @@ parport_pc_find_isa_ports (int autoirq, int autodma)
3227 * autoirq is PARPORT_IRQ_NONE, PARPORT_IRQ_AUTO, or PARPORT_IRQ_PROBEONLY 3322 * autoirq is PARPORT_IRQ_NONE, PARPORT_IRQ_AUTO, or PARPORT_IRQ_PROBEONLY
3228 * autodma is PARPORT_DMA_NONE or PARPORT_DMA_AUTO 3323 * autodma is PARPORT_DMA_NONE or PARPORT_DMA_AUTO
3229 */ 3324 */
3230static void __init parport_pc_find_ports (int autoirq, int autodma) 3325static void __init parport_pc_find_ports(int autoirq, int autodma)
3231{ 3326{
3232 int count = 0, err; 3327 int count = 0, err;
3233 3328
@@ -3261,11 +3356,18 @@ static void __init parport_pc_find_ports (int autoirq, int autodma)
3261 * syntax and keep in mind that code below is a cleaned up version. 3356 * syntax and keep in mind that code below is a cleaned up version.
3262 */ 3357 */
3263 3358
3264static int __initdata io[PARPORT_PC_MAX_PORTS+1] = { [0 ... PARPORT_PC_MAX_PORTS] = 0 }; 3359static int __initdata io[PARPORT_PC_MAX_PORTS+1] = {
3265static int __initdata io_hi[PARPORT_PC_MAX_PORTS+1] = 3360 [0 ... PARPORT_PC_MAX_PORTS] = 0
3266 { [0 ... PARPORT_PC_MAX_PORTS] = PARPORT_IOHI_AUTO }; 3361};
3267static int __initdata dmaval[PARPORT_PC_MAX_PORTS] = { [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_DMA_NONE }; 3362static int __initdata io_hi[PARPORT_PC_MAX_PORTS+1] = {
3268static int __initdata irqval[PARPORT_PC_MAX_PORTS] = { [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_IRQ_PROBEONLY }; 3363 [0 ... PARPORT_PC_MAX_PORTS] = PARPORT_IOHI_AUTO
3364};
3365static int __initdata dmaval[PARPORT_PC_MAX_PORTS] = {
3366 [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_DMA_NONE
3367};
3368static int __initdata irqval[PARPORT_PC_MAX_PORTS] = {
3369 [0 ... PARPORT_PC_MAX_PORTS-1] = PARPORT_IRQ_PROBEONLY
3370};
3269 3371
3270static int __init parport_parse_param(const char *s, int *val, 3372static int __init parport_parse_param(const char *s, int *val,
3271 int automatic, int none, int nofifo) 3373 int automatic, int none, int nofifo)
@@ -3306,18 +3408,19 @@ static int __init parport_parse_dma(const char *dmastr, int *val)
3306#ifdef CONFIG_PCI 3408#ifdef CONFIG_PCI
3307static int __init parport_init_mode_setup(char *str) 3409static int __init parport_init_mode_setup(char *str)
3308{ 3410{
3309 printk(KERN_DEBUG "parport_pc.c: Specified parameter parport_init_mode=%s\n", str); 3411 printk(KERN_DEBUG
3310 3412 "parport_pc.c: Specified parameter parport_init_mode=%s\n", str);
3311 if (!strcmp (str, "spp")) 3413
3312 parport_init_mode=1; 3414 if (!strcmp(str, "spp"))
3313 if (!strcmp (str, "ps2")) 3415 parport_init_mode = 1;
3314 parport_init_mode=2; 3416 if (!strcmp(str, "ps2"))
3315 if (!strcmp (str, "epp")) 3417 parport_init_mode = 2;
3316 parport_init_mode=3; 3418 if (!strcmp(str, "epp"))
3317 if (!strcmp (str, "ecp")) 3419 parport_init_mode = 3;
3318 parport_init_mode=4; 3420 if (!strcmp(str, "ecp"))
3319 if (!strcmp (str, "ecpepp")) 3421 parport_init_mode = 4;
3320 parport_init_mode=5; 3422 if (!strcmp(str, "ecpepp"))
3423 parport_init_mode = 5;
3321 return 1; 3424 return 1;
3322} 3425}
3323#endif 3426#endif
@@ -3341,7 +3444,8 @@ module_param(verbose_probing, int, 0644);
3341#endif 3444#endif
3342#ifdef CONFIG_PCI 3445#ifdef CONFIG_PCI
3343static char *init_mode; 3446static char *init_mode;
3344MODULE_PARM_DESC(init_mode, "Initialise mode for VIA VT8231 port (spp, ps2, epp, ecp or ecpepp)"); 3447MODULE_PARM_DESC(init_mode,
3448 "Initialise mode for VIA VT8231 port (spp, ps2, epp, ecp or ecpepp)");
3345module_param(init_mode, charp, 0); 3449module_param(init_mode, charp, 0);
3346#endif 3450#endif
3347 3451
@@ -3372,7 +3476,7 @@ static int __init parse_parport_params(void)
3372 irqval[0] = val; 3476 irqval[0] = val;
3373 break; 3477 break;
3374 default: 3478 default:
3375 printk (KERN_WARNING 3479 printk(KERN_WARNING
3376 "parport_pc: irq specified " 3480 "parport_pc: irq specified "
3377 "without base address. Use 'io=' " 3481 "without base address. Use 'io=' "
3378 "to specify one\n"); 3482 "to specify one\n");
@@ -3385,7 +3489,7 @@ static int __init parse_parport_params(void)
3385 dmaval[0] = val; 3489 dmaval[0] = val;
3386 break; 3490 break;
3387 default: 3491 default:
3388 printk (KERN_WARNING 3492 printk(KERN_WARNING
3389 "parport_pc: dma specified " 3493 "parport_pc: dma specified "
3390 "without base address. Use 'io=' " 3494 "without base address. Use 'io=' "
3391 "to specify one\n"); 3495 "to specify one\n");
@@ -3396,7 +3500,7 @@ static int __init parse_parport_params(void)
3396 3500
3397#else 3501#else
3398 3502
3399static int parport_setup_ptr __initdata = 0; 3503static int parport_setup_ptr __initdata;
3400 3504
3401/* 3505/*
3402 * Acceptable parameters: 3506 * Acceptable parameters:
@@ -3407,7 +3511,7 @@ static int parport_setup_ptr __initdata = 0;
3407 * 3511 *
3408 * IRQ/DMA may be numeric or 'auto' or 'none' 3512 * IRQ/DMA may be numeric or 'auto' or 'none'
3409 */ 3513 */
3410static int __init parport_setup (char *str) 3514static int __init parport_setup(char *str)
3411{ 3515{
3412 char *endptr; 3516 char *endptr;
3413 char *sep; 3517 char *sep;
@@ -3419,15 +3523,15 @@ static int __init parport_setup (char *str)
3419 return 1; 3523 return 1;
3420 } 3524 }
3421 3525
3422 if (!strncmp (str, "auto", 4)) { 3526 if (!strncmp(str, "auto", 4)) {
3423 irqval[0] = PARPORT_IRQ_AUTO; 3527 irqval[0] = PARPORT_IRQ_AUTO;
3424 dmaval[0] = PARPORT_DMA_AUTO; 3528 dmaval[0] = PARPORT_DMA_AUTO;
3425 return 1; 3529 return 1;
3426 } 3530 }
3427 3531
3428 val = simple_strtoul (str, &endptr, 0); 3532 val = simple_strtoul(str, &endptr, 0);
3429 if (endptr == str) { 3533 if (endptr == str) {
3430 printk (KERN_WARNING "parport=%s not understood\n", str); 3534 printk(KERN_WARNING "parport=%s not understood\n", str);
3431 return 1; 3535 return 1;
3432 } 3536 }
3433 3537
@@ -3461,7 +3565,7 @@ static int __init parse_parport_params(void)
3461 return io[0] == PARPORT_DISABLE; 3565 return io[0] == PARPORT_DISABLE;
3462} 3566}
3463 3567
3464__setup ("parport=", parport_setup); 3568__setup("parport=", parport_setup);
3465 3569
3466/* 3570/*
3467 * Acceptable parameters: 3571 * Acceptable parameters:
@@ -3469,7 +3573,7 @@ __setup ("parport=", parport_setup);
3469 * parport_init_mode=[spp|ps2|epp|ecp|ecpepp] 3573 * parport_init_mode=[spp|ps2|epp|ecp|ecpepp]
3470 */ 3574 */
3471#ifdef CONFIG_PCI 3575#ifdef CONFIG_PCI
3472__setup("parport_init_mode=",parport_init_mode_setup); 3576__setup("parport_init_mode=", parport_init_mode_setup);
3473#endif 3577#endif
3474#endif 3578#endif
3475 3579
@@ -3493,13 +3597,13 @@ static int __init parport_pc_init(void)
3493 for (i = 0; i < PARPORT_PC_MAX_PORTS; i++) { 3597 for (i = 0; i < PARPORT_PC_MAX_PORTS; i++) {
3494 if (!io[i]) 3598 if (!io[i])
3495 break; 3599 break;
3496 if ((io_hi[i]) == PARPORT_IOHI_AUTO) 3600 if (io_hi[i] == PARPORT_IOHI_AUTO)
3497 io_hi[i] = 0x400 + io[i]; 3601 io_hi[i] = 0x400 + io[i];
3498 parport_pc_probe_port(io[i], io_hi[i], 3602 parport_pc_probe_port(io[i], io_hi[i],
3499 irqval[i], dmaval[i], NULL, 0); 3603 irqval[i], dmaval[i], NULL, 0);
3500 } 3604 }
3501 } else 3605 } else
3502 parport_pc_find_ports (irqval[0], dmaval[0]); 3606 parport_pc_find_ports(irqval[0], dmaval[0]);
3503 3607
3504 return 0; 3608 return 0;
3505} 3609}
@@ -3507,9 +3611,9 @@ static int __init parport_pc_init(void)
3507static void __exit parport_pc_exit(void) 3611static void __exit parport_pc_exit(void)
3508{ 3612{
3509 if (pci_registered_parport) 3613 if (pci_registered_parport)
3510 pci_unregister_driver (&parport_pc_pci_driver); 3614 pci_unregister_driver(&parport_pc_pci_driver);
3511 if (pnp_registered_parport) 3615 if (pnp_registered_parport)
3512 pnp_unregister_driver (&parport_pc_pnp_driver); 3616 pnp_unregister_driver(&parport_pc_pnp_driver);
3513 platform_driver_unregister(&parport_pc_platform_driver); 3617 platform_driver_unregister(&parport_pc_platform_driver);
3514 3618
3515 while (!list_empty(&ports_list)) { 3619 while (!list_empty(&ports_list)) {
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 0ebca450ed2..dffa5d4fb29 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -614,7 +614,10 @@ parport_register_device(struct parport *port, const char *name,
614 * pardevice fields. -arca 614 * pardevice fields. -arca
615 */ 615 */
616 port->ops->init_state(tmp, tmp->state); 616 port->ops->init_state(tmp, tmp->state);
617 parport_device_proc_register(tmp); 617 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
618 port->proc_device = tmp;
619 parport_device_proc_register(tmp);
620 }
618 return tmp; 621 return tmp;
619 622
620 out_free_all: 623 out_free_all:
@@ -646,10 +649,14 @@ void parport_unregister_device(struct pardevice *dev)
646 } 649 }
647#endif 650#endif
648 651
649 parport_device_proc_unregister(dev);
650
651 port = dev->port->physport; 652 port = dev->port->physport;
652 653
654 if (port->proc_device == dev) {
655 port->proc_device = NULL;
656 clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
657 parport_device_proc_unregister(dev);
658 }
659
653 if (port->cad == dev) { 660 if (port->cad == dev) {
654 printk(KERN_DEBUG "%s: %s forgot to release port\n", 661 printk(KERN_DEBUG "%s: %s forgot to release port\n",
655 port->name, dev->name); 662 port->name, dev->name);
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 4fc168b7009..e68d5f20ffb 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -129,7 +129,6 @@ struct acpiphp_func {
129 struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */ 129 struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */
130 130
131 struct list_head sibling; 131 struct list_head sibling;
132 struct pci_dev *pci_dev;
133 struct notifier_block nb; 132 struct notifier_block nb;
134 acpi_handle handle; 133 acpi_handle handle;
135 134
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a33794d9e0d..3a6064bce56 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -32,9 +32,6 @@
32 32
33/* 33/*
34 * Lifetime rules for pci_dev: 34 * Lifetime rules for pci_dev:
35 * - The one in acpiphp_func has its refcount elevated by pci_get_slot()
36 * when the driver is loaded or when an insertion event occurs. It loses
37 * a refcount when its ejected or the driver unloads.
38 * - The one in acpiphp_bridge has its refcount elevated by pci_get_slot() 35 * - The one in acpiphp_bridge has its refcount elevated by pci_get_slot()
39 * when the bridge is scanned and it loses a refcount when the bridge 36 * when the bridge is scanned and it loses a refcount when the bridge
40 * is removed. 37 * is removed.
@@ -130,6 +127,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
130 unsigned long long adr, sun; 127 unsigned long long adr, sun;
131 int device, function, retval; 128 int device, function, retval;
132 struct pci_bus *pbus = bridge->pci_bus; 129 struct pci_bus *pbus = bridge->pci_bus;
130 struct pci_dev *pdev;
133 131
134 if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) 132 if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
135 return AE_OK; 133 return AE_OK;
@@ -213,10 +211,10 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
213 newfunc->slot = slot; 211 newfunc->slot = slot;
214 list_add_tail(&newfunc->sibling, &slot->funcs); 212 list_add_tail(&newfunc->sibling, &slot->funcs);
215 213
216 /* associate corresponding pci_dev */ 214 pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
217 newfunc->pci_dev = pci_get_slot(pbus, PCI_DEVFN(device, function)); 215 if (pdev) {
218 if (newfunc->pci_dev) {
219 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); 216 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
217 pci_dev_put(pdev);
220 } 218 }
221 219
222 if (is_dock_device(handle)) { 220 if (is_dock_device(handle)) {
@@ -617,7 +615,6 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
617 if (ACPI_FAILURE(status)) 615 if (ACPI_FAILURE(status))
618 err("failed to remove notify handler\n"); 616 err("failed to remove notify handler\n");
619 } 617 }
620 pci_dev_put(func->pci_dev);
621 list_del(list); 618 list_del(list);
622 kfree(func); 619 kfree(func);
623 } 620 }
@@ -1101,22 +1098,24 @@ static int __ref enable_device(struct acpiphp_slot *slot)
1101 pci_enable_bridges(bus); 1098 pci_enable_bridges(bus);
1102 pci_bus_add_devices(bus); 1099 pci_bus_add_devices(bus);
1103 1100
1104 /* associate pci_dev to our representation */
1105 list_for_each (l, &slot->funcs) { 1101 list_for_each (l, &slot->funcs) {
1106 func = list_entry(l, struct acpiphp_func, sibling); 1102 func = list_entry(l, struct acpiphp_func, sibling);
1107 func->pci_dev = pci_get_slot(bus, PCI_DEVFN(slot->device, 1103 dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
1108 func->function)); 1104 func->function));
1109 if (!func->pci_dev) 1105 if (!dev)
1110 continue; 1106 continue;
1111 1107
1112 if (func->pci_dev->hdr_type != PCI_HEADER_TYPE_BRIDGE && 1108 if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
1113 func->pci_dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) 1109 dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) {
1110 pci_dev_put(dev);
1114 continue; 1111 continue;
1112 }
1115 1113
1116 status = find_p2p_bridge(func->handle, (u32)1, bus, NULL); 1114 status = find_p2p_bridge(func->handle, (u32)1, bus, NULL);
1117 if (ACPI_FAILURE(status)) 1115 if (ACPI_FAILURE(status))
1118 warn("find_p2p_bridge failed (error code = 0x%x)\n", 1116 warn("find_p2p_bridge failed (error code = 0x%x)\n",
1119 status); 1117 status);
1118 pci_dev_put(dev);
1120 } 1119 }
1121 1120
1122 slot->flags |= SLOT_ENABLED; 1121 slot->flags |= SLOT_ENABLED;
@@ -1142,17 +1141,14 @@ static void disable_bridges(struct pci_bus *bus)
1142 */ 1141 */
1143static int disable_device(struct acpiphp_slot *slot) 1142static int disable_device(struct acpiphp_slot *slot)
1144{ 1143{
1145 int retval = 0;
1146 struct acpiphp_func *func; 1144 struct acpiphp_func *func;
1147 struct list_head *l; 1145 struct pci_dev *pdev;
1148 1146
1149 /* is this slot already disabled? */ 1147 /* is this slot already disabled? */
1150 if (!(slot->flags & SLOT_ENABLED)) 1148 if (!(slot->flags & SLOT_ENABLED))
1151 goto err_exit; 1149 goto err_exit;
1152 1150
1153 list_for_each (l, &slot->funcs) { 1151 list_for_each_entry(func, &slot->funcs, sibling) {
1154 func = list_entry(l, struct acpiphp_func, sibling);
1155
1156 if (func->bridge) { 1152 if (func->bridge) {
1157 /* cleanup p2p bridges under this P2P bridge */ 1153 /* cleanup p2p bridges under this P2P bridge */
1158 cleanup_p2p_bridge(func->bridge->handle, 1154 cleanup_p2p_bridge(func->bridge->handle,
@@ -1160,35 +1156,28 @@ static int disable_device(struct acpiphp_slot *slot)
1160 func->bridge = NULL; 1156 func->bridge = NULL;
1161 } 1157 }
1162 1158
1163 if (func->pci_dev) { 1159 pdev = pci_get_slot(slot->bridge->pci_bus,
1164 pci_stop_bus_device(func->pci_dev); 1160 PCI_DEVFN(slot->device, func->function));
1165 if (func->pci_dev->subordinate) { 1161 if (pdev) {
1166 disable_bridges(func->pci_dev->subordinate); 1162 pci_stop_bus_device(pdev);
1167 pci_disable_device(func->pci_dev); 1163 if (pdev->subordinate) {
1164 disable_bridges(pdev->subordinate);
1165 pci_disable_device(pdev);
1168 } 1166 }
1167 pci_remove_bus_device(pdev);
1168 pci_dev_put(pdev);
1169 } 1169 }
1170 } 1170 }
1171 1171
1172 list_for_each (l, &slot->funcs) { 1172 list_for_each_entry(func, &slot->funcs, sibling) {
1173 func = list_entry(l, struct acpiphp_func, sibling);
1174
1175 acpiphp_unconfigure_ioapics(func->handle); 1173 acpiphp_unconfigure_ioapics(func->handle);
1176 acpiphp_bus_trim(func->handle); 1174 acpiphp_bus_trim(func->handle);
1177 /* try to remove anyway.
1178 * acpiphp_bus_add might have been failed */
1179
1180 if (!func->pci_dev)
1181 continue;
1182
1183 pci_remove_bus_device(func->pci_dev);
1184 pci_dev_put(func->pci_dev);
1185 func->pci_dev = NULL;
1186 } 1175 }
1187 1176
1188 slot->flags &= (~SLOT_ENABLED); 1177 slot->flags &= (~SLOT_ENABLED);
1189 1178
1190 err_exit: 1179err_exit:
1191 return retval; 1180 return 0;
1192} 1181}
1193 1182
1194 1183
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index dd18f857dfb..42e4260c3b1 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -153,45 +153,47 @@ int ibmphp_init_devno(struct slot **cur_slot)
153 return -1; 153 return -1;
154 } 154 }
155 for (loop = 0; loop < len; loop++) { 155 for (loop = 0; loop < len; loop++) {
156 if ((*cur_slot)->number == rtable->slots[loop].slot) { 156 if ((*cur_slot)->number == rtable->slots[loop].slot &&
157 if ((*cur_slot)->bus == rtable->slots[loop].bus) { 157 (*cur_slot)->bus == rtable->slots[loop].bus) {
158 struct io_apic_irq_attr irq_attr;
159
158 (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); 160 (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn);
159 for (i = 0; i < 4; i++) 161 for (i = 0; i < 4; i++)
160 (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus, 162 (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus,
161 (int) (*cur_slot)->device, i); 163 (int) (*cur_slot)->device, i,
162 164 &irq_attr);
163 debug("(*cur_slot)->irq[0] = %x\n", 165
164 (*cur_slot)->irq[0]); 166 debug("(*cur_slot)->irq[0] = %x\n",
165 debug("(*cur_slot)->irq[1] = %x\n", 167 (*cur_slot)->irq[0]);
166 (*cur_slot)->irq[1]); 168 debug("(*cur_slot)->irq[1] = %x\n",
167 debug("(*cur_slot)->irq[2] = %x\n", 169 (*cur_slot)->irq[1]);
168 (*cur_slot)->irq[2]); 170 debug("(*cur_slot)->irq[2] = %x\n",
169 debug("(*cur_slot)->irq[3] = %x\n", 171 (*cur_slot)->irq[2]);
170 (*cur_slot)->irq[3]); 172 debug("(*cur_slot)->irq[3] = %x\n",
171 173 (*cur_slot)->irq[3]);
172 debug("rtable->exlusive_irqs = %x\n", 174
175 debug("rtable->exlusive_irqs = %x\n",
173 rtable->exclusive_irqs); 176 rtable->exclusive_irqs);
174 debug("rtable->slots[loop].irq[0].bitmap = %x\n", 177 debug("rtable->slots[loop].irq[0].bitmap = %x\n",
175 rtable->slots[loop].irq[0].bitmap); 178 rtable->slots[loop].irq[0].bitmap);
176 debug("rtable->slots[loop].irq[1].bitmap = %x\n", 179 debug("rtable->slots[loop].irq[1].bitmap = %x\n",
177 rtable->slots[loop].irq[1].bitmap); 180 rtable->slots[loop].irq[1].bitmap);
178 debug("rtable->slots[loop].irq[2].bitmap = %x\n", 181 debug("rtable->slots[loop].irq[2].bitmap = %x\n",
179 rtable->slots[loop].irq[2].bitmap); 182 rtable->slots[loop].irq[2].bitmap);
180 debug("rtable->slots[loop].irq[3].bitmap = %x\n", 183 debug("rtable->slots[loop].irq[3].bitmap = %x\n",
181 rtable->slots[loop].irq[3].bitmap); 184 rtable->slots[loop].irq[3].bitmap);
182 185
183 debug("rtable->slots[loop].irq[0].link = %x\n", 186 debug("rtable->slots[loop].irq[0].link = %x\n",
184 rtable->slots[loop].irq[0].link); 187 rtable->slots[loop].irq[0].link);
185 debug("rtable->slots[loop].irq[1].link = %x\n", 188 debug("rtable->slots[loop].irq[1].link = %x\n",
186 rtable->slots[loop].irq[1].link); 189 rtable->slots[loop].irq[1].link);
187 debug("rtable->slots[loop].irq[2].link = %x\n", 190 debug("rtable->slots[loop].irq[2].link = %x\n",
188 rtable->slots[loop].irq[2].link); 191 rtable->slots[loop].irq[2].link);
189 debug("rtable->slots[loop].irq[3].link = %x\n", 192 debug("rtable->slots[loop].irq[3].link = %x\n",
190 rtable->slots[loop].irq[3].link); 193 rtable->slots[loop].irq[3].link);
191 debug("end of init_devno\n"); 194 debug("end of init_devno\n");
192 kfree(rtable); 195 kfree(rtable);
193 return 0; 196 return 0;
194 }
195 } 197 }
196 } 198 }
197 199
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 3eee70928d4..2d6da78fddb 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -679,7 +679,7 @@ alloc_err:
679 return rc; 679 return rc;
680} 680}
681 681
682static int sn_pci_hotplug_init(void) 682static int __init sn_pci_hotplug_init(void)
683{ 683{
684 struct pci_bus *pci_bus = NULL; 684 struct pci_bus *pci_bus = NULL;
685 int rc; 685 int rc;
@@ -716,7 +716,7 @@ static int sn_pci_hotplug_init(void)
716 return registered == 1 ? 0 : -ENODEV; 716 return registered == 1 ? 0 : -ENODEV;
717} 717}
718 718
719static void sn_pci_hotplug_exit(void) 719static void __exit sn_pci_hotplug_exit(void)
720{ 720{
721 struct hotplug_slot *bss_hotplug_slot; 721 struct hotplug_slot *bss_hotplug_slot;
722 722
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 6808d8333ec..737a1c44b07 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -98,6 +98,7 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
98 int max_irq; 98 int max_irq;
99 int pos; 99 int pos;
100 int irq; 100 int irq;
101 int node;
101 102
102 pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ); 103 pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
103 if (!pos) 104 if (!pos)
@@ -125,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
125 cfg->msg.address_lo = 0xffffffff; 126 cfg->msg.address_lo = 0xffffffff;
126 cfg->msg.address_hi = 0xffffffff; 127 cfg->msg.address_hi = 0xffffffff;
127 128
128 irq = create_irq(); 129 node = dev_to_node(&dev->dev);
130 irq = create_irq_nr(0, node);
129 131
130 if (irq <= 0) { 132 if (irq <= 0) {
131 kfree(cfg); 133 kfree(cfg);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index a563fbe559d..cd389162735 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1972,15 +1972,6 @@ static int __init init_dmars(void)
1972 } 1972 }
1973 } 1973 }
1974 1974
1975#ifdef CONFIG_INTR_REMAP
1976 if (!intr_remapping_enabled) {
1977 ret = enable_intr_remapping(0);
1978 if (ret)
1979 printk(KERN_ERR
1980 "IOMMU: enable interrupt remapping failed\n");
1981 }
1982#endif
1983
1984 /* 1975 /*
1985 * For each rmrr 1976 * For each rmrr
1986 * for each dev attached to rmrr 1977 * for each dev attached to rmrr
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index f5e0ea724a6..3a0cb0bb059 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -15,6 +15,14 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
15static int ir_ioapic_num; 15static int ir_ioapic_num;
16int intr_remapping_enabled; 16int intr_remapping_enabled;
17 17
18static int disable_intremap;
19static __init int setup_nointremap(char *str)
20{
21 disable_intremap = 1;
22 return 0;
23}
24early_param("nointremap", setup_nointremap);
25
18struct irq_2_iommu { 26struct irq_2_iommu {
19 struct intel_iommu *iommu; 27 struct intel_iommu *iommu;
20 u16 irte_index; 28 u16 irte_index;
@@ -23,15 +31,12 @@ struct irq_2_iommu {
23}; 31};
24 32
25#ifdef CONFIG_GENERIC_HARDIRQS 33#ifdef CONFIG_GENERIC_HARDIRQS
26static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) 34static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
27{ 35{
28 struct irq_2_iommu *iommu; 36 struct irq_2_iommu *iommu;
29 int node;
30
31 node = cpu_to_node(cpu);
32 37
33 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); 38 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
34 printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node); 39 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
35 40
36 return iommu; 41 return iommu;
37} 42}
@@ -48,7 +53,7 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
48 return desc->irq_2_iommu; 53 return desc->irq_2_iommu;
49} 54}
50 55
51static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) 56static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
52{ 57{
53 struct irq_desc *desc; 58 struct irq_desc *desc;
54 struct irq_2_iommu *irq_iommu; 59 struct irq_2_iommu *irq_iommu;
@@ -56,7 +61,7 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
56 /* 61 /*
57 * alloc irq desc if not allocated already. 62 * alloc irq desc if not allocated already.
58 */ 63 */
59 desc = irq_to_desc_alloc_cpu(irq, cpu); 64 desc = irq_to_desc_alloc_node(irq, node);
60 if (!desc) { 65 if (!desc) {
61 printk(KERN_INFO "can not get irq_desc for %d\n", irq); 66 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
62 return NULL; 67 return NULL;
@@ -65,14 +70,14 @@ static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
65 irq_iommu = desc->irq_2_iommu; 70 irq_iommu = desc->irq_2_iommu;
66 71
67 if (!irq_iommu) 72 if (!irq_iommu)
68 desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu); 73 desc->irq_2_iommu = get_one_free_irq_2_iommu(node);
69 74
70 return desc->irq_2_iommu; 75 return desc->irq_2_iommu;
71} 76}
72 77
73static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 78static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
74{ 79{
75 return irq_2_iommu_alloc_cpu(irq, boot_cpu_id); 80 return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
76} 81}
77 82
78#else /* !CONFIG_SPARSE_IRQ */ 83#else /* !CONFIG_SPARSE_IRQ */
@@ -423,20 +428,6 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
423 readl, (sts & DMA_GSTS_IRTPS), sts); 428 readl, (sts & DMA_GSTS_IRTPS), sts);
424 spin_unlock_irqrestore(&iommu->register_lock, flags); 429 spin_unlock_irqrestore(&iommu->register_lock, flags);
425 430
426 if (mode == 0) {
427 spin_lock_irqsave(&iommu->register_lock, flags);
428
429 /* enable comaptiblity format interrupt pass through */
430 cmd = iommu->gcmd | DMA_GCMD_CFI;
431 iommu->gcmd |= DMA_GCMD_CFI;
432 writel(cmd, iommu->reg + DMAR_GCMD_REG);
433
434 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
435 readl, (sts & DMA_GSTS_CFIS), sts);
436
437 spin_unlock_irqrestore(&iommu->register_lock, flags);
438 }
439
440 /* 431 /*
441 * global invalidation of interrupt entry cache before enabling 432 * global invalidation of interrupt entry cache before enabling
442 * interrupt-remapping. 433 * interrupt-remapping.
@@ -516,6 +507,23 @@ end:
516 spin_unlock_irqrestore(&iommu->register_lock, flags); 507 spin_unlock_irqrestore(&iommu->register_lock, flags);
517} 508}
518 509
510int __init intr_remapping_supported(void)
511{
512 struct dmar_drhd_unit *drhd;
513
514 if (disable_intremap)
515 return 0;
516
517 for_each_drhd_unit(drhd) {
518 struct intel_iommu *iommu = drhd->iommu;
519
520 if (!ecap_ir_support(iommu->ecap))
521 return 0;
522 }
523
524 return 1;
525}
526
519int __init enable_intr_remapping(int eim) 527int __init enable_intr_remapping(int eim)
520{ 528{
521 struct dmar_drhd_unit *drhd; 529 struct dmar_drhd_unit *drhd;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 34bf0fdf504..1a91bf9687a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -557,7 +557,8 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
557 } else { 557 } else {
558 error = -ENODEV; 558 error = -ENODEV;
559 /* Fall back to PCI_D0 if native PM is not supported */ 559 /* Fall back to PCI_D0 if native PM is not supported */
560 pci_update_current_state(dev, PCI_D0); 560 if (!dev->pm_cap)
561 dev->current_state = PCI_D0;
561 } 562 }
562 563
563 return error; 564 return error;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e3c3e081b83..f1ae2475fff 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -745,6 +745,8 @@ int pci_setup_device(struct pci_dev *dev)
745 745
746 /* Early fixups, before probing the BARs */ 746 /* Early fixups, before probing the BARs */
747 pci_fixup_device(pci_fixup_early, dev); 747 pci_fixup_device(pci_fixup_early, dev);
748 /* device class may be changed after fixup */
749 class = dev->class >> 8;
748 750
749 switch (dev->hdr_type) { /* header type */ 751 switch (dev->hdr_type) { /* header type */
750 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 752 case PCI_HEADER_TYPE_NORMAL: /* standard header */
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 27647354398..fbf965b31c1 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -217,7 +217,7 @@ config PCMCIA_PXA2XX
217 depends on ARM && ARCH_PXA && PCMCIA 217 depends on ARM && ARCH_PXA && PCMCIA
218 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ 218 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
219 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \ 219 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
220 || ARCH_VIPER || ARCH_PXA_ESERIES) 220 || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2)
221 help 221 help
222 Say Y here to include support for the PXA2xx PCMCIA controller 222 Say Y here to include support for the PXA2xx PCMCIA controller
223 223
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index bbac4632722..047394d98ac 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -73,5 +73,6 @@ pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
73pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o 73pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
74pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o 74pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
75pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o 75pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
76pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
76 77
77obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_core.o $(pxa2xx-obj-y) 78obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_core.o $(pxa2xx-obj-y)
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
new file mode 100644
index 00000000000..490749ea677
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -0,0 +1,174 @@
1/*
2 * linux/drivers/pcmcia/pxa2xx_stargate2.c
3 *
4 * Stargate 2 PCMCIA specific routines.
5 *
6 * Created: December 6, 2005
7 * Author: Ed C. Epp
8 * Copyright: Intel Corp 2005
9 * Jonathan Cameron <jic23@cam.ac.uk> 2009
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/platform_device.h>
22#include <linux/gpio.h>
23
24#include <pcmcia/ss.h>
25
26#include <asm/irq.h>
27#include <asm/mach-types.h>
28
29#include "soc_common.h"
30
31#define SG2_S0_BUFF_CTL 120
32#define SG2_S0_POWER_CTL 108
33#define SG2_S0_GPIO_RESET 82
34#define SG2_S0_GPIO_DETECT 53
35#define SG2_S0_GPIO_READY 81
36
37static struct pcmcia_irqs irqs[] = {
38 { 0, IRQ_GPIO(SG2_S0_GPIO_DETECT), "PCMCIA0 CD" },
39};
40
41static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
42{
43 skt->irq = IRQ_GPIO(SG2_S0_GPIO_READY);
44 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
45}
46
47static void sg2_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
48{
49 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
50}
51
52static void sg2_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
53 struct pcmcia_state *state)
54{
55 state->detect = !gpio_get_value(SG2_S0_GPIO_DETECT);
56 state->ready = !!gpio_get_value(SG2_S0_GPIO_READY);
57 state->bvd1 = 0; /* not available - battery detect on card */
58 state->bvd2 = 0; /* not available */
59 state->vs_3v = 1; /* not available - voltage detect for card */
60 state->vs_Xv = 0; /* not available */
61 state->wrprot = 0; /* not available - write protect */
62}
63
64static int sg2_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
65 const socket_state_t *state)
66{
67 /* Enable card power */
68 switch (state->Vcc) {
69 case 0:
70 /* sets power ctl register high */
71 gpio_set_value(SG2_S0_POWER_CTL, 1);
72 break;
73 case 33:
74 case 50:
75 /* sets power control register low (clear) */
76 gpio_set_value(SG2_S0_POWER_CTL, 0);
77 msleep(100);
78 break;
79 default:
80 pr_err("%s(): bad Vcc %u\n",
81 __func__, state->Vcc);
82 return -1;
83 }
84
85 /* reset */
86 gpio_set_value(SG2_S0_GPIO_RESET, !!(state->flags & SS_RESET));
87
88 return 0;
89}
90
91static void sg2_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
92{
93 soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs));
94}
95
96static void sg2_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
97{
98 soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs));
99}
100
101static struct pcmcia_low_level sg2_pcmcia_ops __initdata = {
102 .owner = THIS_MODULE,
103 .hw_init = sg2_pcmcia_hw_init,
104 .hw_shutdown = sg2_pcmcia_hw_shutdown,
105 .socket_state = sg2_pcmcia_socket_state,
106 .configure_socket = sg2_pcmcia_configure_socket,
107 .socket_init = sg2_pcmcia_socket_init,
108 .socket_suspend = sg2_pcmcia_socket_suspend,
109 .nr = 1,
110};
111
112static struct platform_device *sg2_pcmcia_device;
113
114static int __init sg2_pcmcia_init(void)
115{
116 int ret;
117
118 if (!machine_is_stargate2())
119 return -ENODEV;
120
121 sg2_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
122 if (!sg2_pcmcia_device)
123 return -ENOMEM;
124
125 ret = gpio_request(SG2_S0_BUFF_CTL, "SG2 CF buff ctl");
126 if (ret)
127 goto error_put_platform_device;
128 ret = gpio_request(SG2_S0_POWER_CTL, "SG2 CF power ctl");
129 if (ret)
130 goto error_free_gpio_buff_ctl;
131 ret = gpio_request(SG2_S0_GPIO_RESET, "SG2 CF reset");
132 if (ret)
133 goto error_free_gpio_power_ctl;
134 /* Set gpio directions */
135 gpio_direction_output(SG2_S0_BUFF_CTL, 0);
136 gpio_direction_output(SG2_S0_POWER_CTL, 1);
137 gpio_direction_output(SG2_S0_GPIO_RESET, 1);
138
139 ret = platform_device_add_data(sg2_pcmcia_device,
140 &sg2_pcmcia_ops,
141 sizeof(sg2_pcmcia_ops));
142 if (ret)
143 goto error_free_gpio_reset;
144
145 ret = platform_device_add(sg2_pcmcia_device);
146 if (ret)
147 goto error_free_gpio_reset;
148
149 return 0;
150error_free_gpio_reset:
151 gpio_free(SG2_S0_GPIO_RESET);
152error_free_gpio_power_ctl:
153 gpio_free(SG2_S0_POWER_CTL);
154error_free_gpio_buff_ctl:
155 gpio_free(SG2_S0_BUFF_CTL);
156error_put_platform_device:
157 platform_device_put(sg2_pcmcia_device);
158
159 return ret;
160}
161
162static void __exit sg2_pcmcia_exit(void)
163{
164 platform_device_unregister(sg2_pcmcia_device);
165 gpio_free(SG2_S0_BUFF_CTL);
166 gpio_free(SG2_S0_POWER_CTL);
167 gpio_free(SG2_S0_GPIO_RESET);
168}
169
170fs_initcall(sg2_pcmcia_init);
171module_exit(sg2_pcmcia_exit);
172
173MODULE_LICENSE("GPL");
174MODULE_ALIAS("platform:pxa2xx-pcmcia");
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index adf17856bac..7f207f335be 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -123,7 +123,7 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev,
123 } 123 }
124 124
125 flags = irq_flags(triggering, polarity, shareable); 125 flags = irq_flags(triggering, polarity, shareable);
126 irq = acpi_register_gsi(gsi, triggering, polarity); 126 irq = acpi_register_gsi(&dev->dev, gsi, triggering, polarity);
127 if (irq >= 0) 127 if (irq >= 0)
128 pcibios_penalize_isa_irq(irq, 1); 128 pcibios_penalize_isa_irq(irq, 1);
129 else 129 else
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index f604061d2bb..ba976542788 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -638,6 +638,24 @@ int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t start,
638} 638}
639EXPORT_SYMBOL(pnp_possible_config); 639EXPORT_SYMBOL(pnp_possible_config);
640 640
641int pnp_range_reserved(resource_size_t start, resource_size_t end)
642{
643 struct pnp_dev *dev;
644 struct pnp_resource *pnp_res;
645 resource_size_t *dev_start, *dev_end;
646
647 pnp_for_each_dev(dev) {
648 list_for_each_entry(pnp_res, &dev->resources, list) {
649 dev_start = &pnp_res->res.start;
650 dev_end = &pnp_res->res.end;
651 if (ranged_conflict(&start, &end, dev_start, dev_end))
652 return 1;
653 }
654 }
655 return 0;
656}
657EXPORT_SYMBOL(pnp_range_reserved);
658
641/* format is: pnp_reserve_irq=irq1[,irq2] .... */ 659/* format is: pnp_reserve_irq=irq1[,irq2] .... */
642static int __init pnp_setup_reserve_irq(char *str) 660static int __init pnp_setup_reserve_irq(char *str)
643{ 661{
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 72b15495183..c6628f5a0af 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -497,7 +497,7 @@ static struct platform_driver da903x_regulator_driver = {
497 .owner = THIS_MODULE, 497 .owner = THIS_MODULE,
498 }, 498 },
499 .probe = da903x_regulator_probe, 499 .probe = da903x_regulator_probe,
500 .remove = da903x_regulator_remove, 500 .remove = __devexit_p(da903x_regulator_remove),
501}; 501};
502 502
503static int __init da903x_regulator_init(void) 503static int __init da903x_regulator_init(void)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4e9851fc174..277d35d232f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -692,7 +692,7 @@ config RTC_DRV_GENERIC
692 tristate "Generic RTC support" 692 tristate "Generic RTC support"
693 # Please consider writing a new RTC driver instead of using the generic 693 # Please consider writing a new RTC driver instead of using the generic
694 # RTC abstraction 694 # RTC abstraction
695 depends on PARISC || M68K || PPC 695 depends on PARISC || M68K || PPC || SUPERH32
696 help 696 help
697 Say Y or M here to enable RTC support on systems using the generic 697 Say Y or M here to enable RTC support on systems using the generic
698 RTC abstraction. If you do not know what you are doing, you should 698 RTC abstraction. If you do not know what you are doing, you should
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index f7a3283dd02..551332e4ed0 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -12,32 +12,56 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/rtc.h> 13#include <linux/rtc.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <mach/hardware.h> 15#include <linux/io.h>
16
17#define EP93XX_RTC_DATA 0x000
18#define EP93XX_RTC_MATCH 0x004
19#define EP93XX_RTC_STATUS 0x008
20#define EP93XX_RTC_STATUS_INTR (1<<0)
21#define EP93XX_RTC_LOAD 0x00C
22#define EP93XX_RTC_CONTROL 0x010
23#define EP93XX_RTC_CONTROL_MIE (1<<0)
24#define EP93XX_RTC_SWCOMP 0x108
25#define EP93XX_RTC_SWCOMP_DEL_MASK 0x001f0000
26#define EP93XX_RTC_SWCOMP_DEL_SHIFT 16
27#define EP93XX_RTC_SWCOMP_INT_MASK 0x0000ffff
28#define EP93XX_RTC_SWCOMP_INT_SHIFT 0
29
30#define DRV_VERSION "0.3"
16 31
17#define EP93XX_RTC_REG(x) (EP93XX_RTC_BASE + (x)) 32/*
18#define EP93XX_RTC_DATA EP93XX_RTC_REG(0x0000) 33 * struct device dev.platform_data is used to store our private data
19#define EP93XX_RTC_LOAD EP93XX_RTC_REG(0x000C) 34 * because struct rtc_device does not have a variable to hold it.
20#define EP93XX_RTC_SWCOMP EP93XX_RTC_REG(0x0108) 35 */
21 36struct ep93xx_rtc {
22#define DRV_VERSION "0.2" 37 void __iomem *mmio_base;
38};
23 39
24static int ep93xx_get_swcomp(struct device *dev, unsigned short *preload, 40static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
25 unsigned short *delete) 41 unsigned short *delete)
26{ 42{
27 unsigned short comp = __raw_readl(EP93XX_RTC_SWCOMP); 43 struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
44 unsigned long comp;
45
46 comp = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_SWCOMP);
28 47
29 if (preload) 48 if (preload)
30 *preload = comp & 0xffff; 49 *preload = (comp & EP93XX_RTC_SWCOMP_INT_MASK)
50 >> EP93XX_RTC_SWCOMP_INT_SHIFT;
31 51
32 if (delete) 52 if (delete)
33 *delete = (comp >> 16) & 0x1f; 53 *delete = (comp & EP93XX_RTC_SWCOMP_DEL_MASK)
54 >> EP93XX_RTC_SWCOMP_DEL_SHIFT;
34 55
35 return 0; 56 return 0;
36} 57}
37 58
38static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm) 59static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
39{ 60{
40 unsigned long time = __raw_readl(EP93XX_RTC_DATA); 61 struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
62 unsigned long time;
63
64 time = __raw_readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA);
41 65
42 rtc_time_to_tm(time, tm); 66 rtc_time_to_tm(time, tm);
43 return 0; 67 return 0;
@@ -45,7 +69,9 @@ static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
45 69
46static int ep93xx_rtc_set_mmss(struct device *dev, unsigned long secs) 70static int ep93xx_rtc_set_mmss(struct device *dev, unsigned long secs)
47{ 71{
48 __raw_writel(secs + 1, EP93XX_RTC_LOAD); 72 struct ep93xx_rtc *ep93xx_rtc = dev->platform_data;
73
74 __raw_writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD);
49 return 0; 75 return 0;
50} 76}
51 77
@@ -53,7 +79,7 @@ static int ep93xx_rtc_proc(struct device *dev, struct seq_file *seq)
53{ 79{
54 unsigned short preload, delete; 80 unsigned short preload, delete;
55 81
56 ep93xx_get_swcomp(dev, &preload, &delete); 82 ep93xx_rtc_get_swcomp(dev, &preload, &delete);
57 83
58 seq_printf(seq, "preload\t\t: %d\n", preload); 84 seq_printf(seq, "preload\t\t: %d\n", preload);
59 seq_printf(seq, "delete\t\t: %d\n", delete); 85 seq_printf(seq, "delete\t\t: %d\n", delete);
@@ -67,54 +93,104 @@ static const struct rtc_class_ops ep93xx_rtc_ops = {
67 .proc = ep93xx_rtc_proc, 93 .proc = ep93xx_rtc_proc,
68}; 94};
69 95
70static ssize_t ep93xx_sysfs_show_comp_preload(struct device *dev, 96static ssize_t ep93xx_rtc_show_comp_preload(struct device *dev,
71 struct device_attribute *attr, char *buf) 97 struct device_attribute *attr, char *buf)
72{ 98{
73 unsigned short preload; 99 unsigned short preload;
74 100
75 ep93xx_get_swcomp(dev, &preload, NULL); 101 ep93xx_rtc_get_swcomp(dev, &preload, NULL);
76 102
77 return sprintf(buf, "%d\n", preload); 103 return sprintf(buf, "%d\n", preload);
78} 104}
79static DEVICE_ATTR(comp_preload, S_IRUGO, ep93xx_sysfs_show_comp_preload, NULL); 105static DEVICE_ATTR(comp_preload, S_IRUGO, ep93xx_rtc_show_comp_preload, NULL);
80 106
81static ssize_t ep93xx_sysfs_show_comp_delete(struct device *dev, 107static ssize_t ep93xx_rtc_show_comp_delete(struct device *dev,
82 struct device_attribute *attr, char *buf) 108 struct device_attribute *attr, char *buf)
83{ 109{
84 unsigned short delete; 110 unsigned short delete;
85 111
86 ep93xx_get_swcomp(dev, NULL, &delete); 112 ep93xx_rtc_get_swcomp(dev, NULL, &delete);
87 113
88 return sprintf(buf, "%d\n", delete); 114 return sprintf(buf, "%d\n", delete);
89} 115}
90static DEVICE_ATTR(comp_delete, S_IRUGO, ep93xx_sysfs_show_comp_delete, NULL); 116static DEVICE_ATTR(comp_delete, S_IRUGO, ep93xx_rtc_show_comp_delete, NULL);
91 117
92 118
93static int __devinit ep93xx_rtc_probe(struct platform_device *dev) 119static int __init ep93xx_rtc_probe(struct platform_device *pdev)
94{ 120{
95 struct rtc_device *rtc = rtc_device_register("ep93xx", 121 struct ep93xx_rtc *ep93xx_rtc;
96 &dev->dev, &ep93xx_rtc_ops, THIS_MODULE); 122 struct resource *res;
123 struct rtc_device *rtc;
124 int err;
125
126 ep93xx_rtc = kzalloc(sizeof(struct ep93xx_rtc), GFP_KERNEL);
127 if (ep93xx_rtc == NULL)
128 return -ENOMEM;
129
130 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
131 if (res == NULL)
132 return -ENXIO;
133
134 res = request_mem_region(res->start, resource_size(res), pdev->name);
135 if (res == NULL)
136 return -EBUSY;
137
138 ep93xx_rtc->mmio_base = ioremap(res->start, resource_size(res));
139 if (ep93xx_rtc->mmio_base == NULL) {
140 err = -ENXIO;
141 goto fail;
142 }
97 143
144 pdev->dev.platform_data = ep93xx_rtc;
145
146 rtc = rtc_device_register(pdev->name,
147 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
98 if (IS_ERR(rtc)) { 148 if (IS_ERR(rtc)) {
99 return PTR_ERR(rtc); 149 err = PTR_ERR(rtc);
150 goto fail;
100 } 151 }
101 152
102 platform_set_drvdata(dev, rtc); 153 platform_set_drvdata(pdev, rtc);
103 154
104 device_create_file(&dev->dev, &dev_attr_comp_preload); 155 err = device_create_file(&pdev->dev, &dev_attr_comp_preload);
105 device_create_file(&dev->dev, &dev_attr_comp_delete); 156 if (err)
157 goto fail;
158 err = device_create_file(&pdev->dev, &dev_attr_comp_delete);
159 if (err) {
160 device_remove_file(&pdev->dev, &dev_attr_comp_preload);
161 goto fail;
162 }
106 163
107 return 0; 164 return 0;
165
166fail:
167 if (ep93xx_rtc->mmio_base) {
168 iounmap(ep93xx_rtc->mmio_base);
169 pdev->dev.platform_data = NULL;
170 }
171 release_mem_region(res->start, resource_size(res));
172 return err;
108} 173}
109 174
110static int __devexit ep93xx_rtc_remove(struct platform_device *dev) 175static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
111{ 176{
112 struct rtc_device *rtc = platform_get_drvdata(dev); 177 struct rtc_device *rtc = platform_get_drvdata(pdev);
178 struct ep93xx_rtc *ep93xx_rtc = pdev->dev.platform_data;
179 struct resource *res;
180
181 /* cleanup sysfs */
182 device_remove_file(&pdev->dev, &dev_attr_comp_delete);
183 device_remove_file(&pdev->dev, &dev_attr_comp_preload);
184
185 rtc_device_unregister(rtc);
186
187 iounmap(ep93xx_rtc->mmio_base);
188 pdev->dev.platform_data = NULL;
113 189
114 if (rtc) 190 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
115 rtc_device_unregister(rtc); 191 release_mem_region(res->start, resource_size(res));
116 192
117 platform_set_drvdata(dev, NULL); 193 platform_set_drvdata(pdev, NULL);
118 194
119 return 0; 195 return 0;
120} 196}
@@ -122,23 +198,22 @@ static int __devexit ep93xx_rtc_remove(struct platform_device *dev)
122/* work with hotplug and coldplug */ 198/* work with hotplug and coldplug */
123MODULE_ALIAS("platform:ep93xx-rtc"); 199MODULE_ALIAS("platform:ep93xx-rtc");
124 200
125static struct platform_driver ep93xx_rtc_platform_driver = { 201static struct platform_driver ep93xx_rtc_driver = {
126 .driver = { 202 .driver = {
127 .name = "ep93xx-rtc", 203 .name = "ep93xx-rtc",
128 .owner = THIS_MODULE, 204 .owner = THIS_MODULE,
129 }, 205 },
130 .probe = ep93xx_rtc_probe, 206 .remove = __exit_p(ep93xx_rtc_remove),
131 .remove = __devexit_p(ep93xx_rtc_remove),
132}; 207};
133 208
134static int __init ep93xx_rtc_init(void) 209static int __init ep93xx_rtc_init(void)
135{ 210{
136 return platform_driver_register(&ep93xx_rtc_platform_driver); 211 return platform_driver_probe(&ep93xx_rtc_driver, ep93xx_rtc_probe);
137} 212}
138 213
139static void __exit ep93xx_rtc_exit(void) 214static void __exit ep93xx_rtc_exit(void)
140{ 215{
141 platform_driver_unregister(&ep93xx_rtc_platform_driver); 216 platform_driver_unregister(&ep93xx_rtc_driver);
142} 217}
143 218
144MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); 219MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 82615355215..457231bb102 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -102,7 +102,7 @@ static const struct rtc_class_ops pl030_ops = {
102 .set_alarm = pl030_set_alarm, 102 .set_alarm = pl030_set_alarm,
103}; 103};
104 104
105static int pl030_probe(struct amba_device *dev, void *id) 105static int pl030_probe(struct amba_device *dev, struct amba_id *id)
106{ 106{
107 struct pl030_rtc *rtc; 107 struct pl030_rtc *rtc;
108 int ret; 108 int ret;
@@ -117,7 +117,7 @@ static int pl030_probe(struct amba_device *dev, void *id)
117 goto err_rtc; 117 goto err_rtc;
118 } 118 }
119 119
120 rtc->base = ioremap(dev->res.start, SZ_4K); 120 rtc->base = ioremap(dev->res.start, resource_size(&dev->res));
121 if (!rtc->base) { 121 if (!rtc->base) {
122 ret = -ENOMEM; 122 ret = -ENOMEM;
123 goto err_map; 123 goto err_map;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 333eec689d2..f41873f98f6 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -127,7 +127,7 @@ static int pl031_remove(struct amba_device *adev)
127 return 0; 127 return 0;
128} 128}
129 129
130static int pl031_probe(struct amba_device *adev, void *id) 130static int pl031_probe(struct amba_device *adev, struct amba_id *id)
131{ 131{
132 int ret; 132 int ret;
133 struct pl031_local *ldata; 133 struct pl031_local *ldata;
@@ -142,8 +142,7 @@ static int pl031_probe(struct amba_device *adev, void *id)
142 goto out; 142 goto out;
143 } 143 }
144 144
145 ldata->base = ioremap(adev->res.start, 145 ldata->base = ioremap(adev->res.start, resource_size(&adev->res));
146 adev->res.end - adev->res.start + 1);
147 if (!ldata->base) { 146 if (!ldata->base) {
148 ret = -ENOMEM; 147 ret = -ENOMEM;
149 goto out_no_remap; 148 goto out_no_remap;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d1815272c43..442bb98a282 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -603,7 +603,7 @@ static void dasd_profile_end(struct dasd_block *block,
603 if (dasd_profile_level != DASD_PROFILE_ON) 603 if (dasd_profile_level != DASD_PROFILE_ON)
604 return; 604 return;
605 605
606 sectors = req->nr_sectors; 606 sectors = blk_rq_sectors(req);
607 if (!cqr->buildclk || !cqr->startclk || 607 if (!cqr->buildclk || !cqr->startclk ||
608 !cqr->stopclk || !cqr->endclk || 608 !cqr->stopclk || !cqr->endclk ||
609 !sectors) 609 !sectors)
@@ -851,8 +851,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
851 851
852 /* Check the cqr */ 852 /* Check the cqr */
853 rc = dasd_check_cqr(cqr); 853 rc = dasd_check_cqr(cqr);
854 if (rc) 854 if (rc) {
855 cqr->intrc = rc;
855 return rc; 856 return rc;
857 }
856 device = (struct dasd_device *) cqr->startdev; 858 device = (struct dasd_device *) cqr->startdev;
857 if (cqr->retries < 0) { 859 if (cqr->retries < 0) {
858 /* internal error 14 - start_IO run out of retries */ 860 /* internal error 14 - start_IO run out of retries */
@@ -915,6 +917,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
915 BUG(); 917 BUG();
916 break; 918 break;
917 } 919 }
920 cqr->intrc = rc;
918 return rc; 921 return rc;
919} 922}
920 923
@@ -1454,8 +1457,12 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr)
1454 dasd_add_request_tail(cqr); 1457 dasd_add_request_tail(cqr);
1455 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1458 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1456 1459
1457 /* Request status is either done or failed. */ 1460 if (cqr->status == DASD_CQR_DONE)
1458 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1461 rc = 0;
1462 else if (cqr->intrc)
1463 rc = cqr->intrc;
1464 else
1465 rc = -EIO;
1459 return rc; 1466 return rc;
1460} 1467}
1461 1468
@@ -1477,8 +1484,15 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1477 dasd_cancel_req(cqr); 1484 dasd_cancel_req(cqr);
1478 /* wait (non-interruptible) for final status */ 1485 /* wait (non-interruptible) for final status */
1479 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1486 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1487 cqr->intrc = rc;
1480 } 1488 }
1481 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1489
1490 if (cqr->status == DASD_CQR_DONE)
1491 rc = 0;
1492 else if (cqr->intrc)
1493 rc = cqr->intrc;
1494 else
1495 rc = -EIO;
1482 return rc; 1496 return rc;
1483} 1497}
1484 1498
@@ -1523,8 +1537,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1523 1537
1524 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1538 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1525 1539
1526 /* Request status is either done or failed. */ 1540 if (cqr->status == DASD_CQR_DONE)
1527 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1541 rc = 0;
1542 else if (cqr->intrc)
1543 rc = cqr->intrc;
1544 else
1545 rc = -EIO;
1528 return rc; 1546 return rc;
1529} 1547}
1530 1548
@@ -1614,15 +1632,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
1614} 1632}
1615 1633
1616/* 1634/*
1617 * posts the buffer_cache about a finalized request
1618 */
1619static inline void dasd_end_request(struct request *req, int error)
1620{
1621 if (__blk_end_request(req, error, blk_rq_bytes(req)))
1622 BUG();
1623}
1624
1625/*
1626 * Process finished error recovery ccw. 1635 * Process finished error recovery ccw.
1627 */ 1636 */
1628static inline void __dasd_block_process_erp(struct dasd_block *block, 1637static inline void __dasd_block_process_erp(struct dasd_block *block,
@@ -1665,18 +1674,14 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1665 if (basedev->state < DASD_STATE_READY) 1674 if (basedev->state < DASD_STATE_READY)
1666 return; 1675 return;
1667 /* Now we try to fetch requests from the request queue */ 1676 /* Now we try to fetch requests from the request queue */
1668 while (!blk_queue_plugged(queue) && 1677 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1669 elv_next_request(queue)) {
1670
1671 req = elv_next_request(queue);
1672
1673 if (basedev->features & DASD_FEATURE_READONLY && 1678 if (basedev->features & DASD_FEATURE_READONLY &&
1674 rq_data_dir(req) == WRITE) { 1679 rq_data_dir(req) == WRITE) {
1675 DBF_DEV_EVENT(DBF_ERR, basedev, 1680 DBF_DEV_EVENT(DBF_ERR, basedev,
1676 "Rejecting write request %p", 1681 "Rejecting write request %p",
1677 req); 1682 req);
1678 blkdev_dequeue_request(req); 1683 blk_start_request(req);
1679 dasd_end_request(req, -EIO); 1684 __blk_end_request_all(req, -EIO);
1680 continue; 1685 continue;
1681 } 1686 }
1682 cqr = basedev->discipline->build_cp(basedev, block, req); 1687 cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -1704,8 +1709,8 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1704 "CCW creation failed (rc=%ld) " 1709 "CCW creation failed (rc=%ld) "
1705 "on request %p", 1710 "on request %p",
1706 PTR_ERR(cqr), req); 1711 PTR_ERR(cqr), req);
1707 blkdev_dequeue_request(req); 1712 blk_start_request(req);
1708 dasd_end_request(req, -EIO); 1713 __blk_end_request_all(req, -EIO);
1709 continue; 1714 continue;
1710 } 1715 }
1711 /* 1716 /*
@@ -1714,7 +1719,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1714 */ 1719 */
1715 cqr->callback_data = (void *) req; 1720 cqr->callback_data = (void *) req;
1716 cqr->status = DASD_CQR_FILLED; 1721 cqr->status = DASD_CQR_FILLED;
1717 blkdev_dequeue_request(req); 1722 blk_start_request(req);
1718 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1723 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1719 dasd_profile_start(block, cqr, req); 1724 dasd_profile_start(block, cqr, req);
1720 } 1725 }
@@ -1731,7 +1736,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1731 status = cqr->block->base->discipline->free_cp(cqr, req); 1736 status = cqr->block->base->discipline->free_cp(cqr, req);
1732 if (status <= 0) 1737 if (status <= 0)
1733 error = status ? status : -EIO; 1738 error = status ? status : -EIO;
1734 dasd_end_request(req, error); 1739 __blk_end_request_all(req, error);
1735} 1740}
1736 1741
1737/* 1742/*
@@ -2003,7 +2008,7 @@ static void dasd_setup_queue(struct dasd_block *block)
2003{ 2008{
2004 int max; 2009 int max;
2005 2010
2006 blk_queue_hardsect_size(block->request_queue, block->bp_block); 2011 blk_queue_logical_block_size(block->request_queue, block->bp_block);
2007 max = block->base->discipline->max_blocks << block->s2b_shift; 2012 max = block->base->discipline->max_blocks << block->s2b_shift;
2008 blk_queue_max_sectors(block->request_queue, max); 2013 blk_queue_max_sectors(block->request_queue, max);
2009 blk_queue_max_phys_segments(block->request_queue, -1L); 2014 blk_queue_max_phys_segments(block->request_queue, -1L);
@@ -2038,10 +2043,8 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2038 return; 2043 return;
2039 2044
2040 spin_lock_irq(&block->request_queue_lock); 2045 spin_lock_irq(&block->request_queue_lock);
2041 while ((req = elv_next_request(block->request_queue))) { 2046 while ((req = blk_fetch_request(block->request_queue)))
2042 blkdev_dequeue_request(req); 2047 __blk_end_request_all(req, -EIO);
2043 dasd_end_request(req, -EIO);
2044 }
2045 spin_unlock_irq(&block->request_queue_lock); 2048 spin_unlock_irq(&block->request_queue_lock);
2046} 2049}
2047 2050
@@ -2442,12 +2445,12 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2442 2445
2443 2446
2444int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2447int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2445 void **rdc_buffer, int rdc_buffer_size) 2448 void *rdc_buffer, int rdc_buffer_size)
2446{ 2449{
2447 int ret; 2450 int ret;
2448 struct dasd_ccw_req *cqr; 2451 struct dasd_ccw_req *cqr;
2449 2452
2450 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, 2453 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
2451 magic); 2454 magic);
2452 if (IS_ERR(cqr)) 2455 if (IS_ERR(cqr))
2453 return PTR_ERR(cqr); 2456 return PTR_ERR(cqr);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index b9a7f773344..644086ba2ed 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -202,6 +202,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
202 rc = -EIO; 202 rc = -EIO;
203 break; 203 break;
204 } 204 }
205 cqr->intrc = rc;
205 return rc; 206 return rc;
206} 207}
207 208
@@ -505,8 +506,9 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
505 return ERR_PTR(-EINVAL); 506 return ERR_PTR(-EINVAL);
506 blksize = block->bp_block; 507 blksize = block->bp_block;
507 /* Calculate record id of first and last block. */ 508 /* Calculate record id of first and last block. */
508 first_rec = req->sector >> block->s2b_shift; 509 first_rec = blk_rq_pos(req) >> block->s2b_shift;
509 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 510 last_rec =
511 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
510 /* Check struct bio and count the number of blocks for the request. */ 512 /* Check struct bio and count the number of blocks for the request. */
511 count = 0; 513 count = 0;
512 rq_for_each_segment(bv, req, iter) { 514 rq_for_each_segment(bv, req, iter) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cb52da033f0..cf0cfdba124 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1097,20 +1097,20 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1097{ 1097{
1098 struct dasd_eckd_private *private; 1098 struct dasd_eckd_private *private;
1099 struct dasd_block *block; 1099 struct dasd_block *block;
1100 void *rdc_data;
1101 int is_known, rc; 1100 int is_known, rc;
1102 1101
1103 private = (struct dasd_eckd_private *) device->private; 1102 private = (struct dasd_eckd_private *) device->private;
1104 if (private == NULL) { 1103 if (!private) {
1105 private = kzalloc(sizeof(struct dasd_eckd_private), 1104 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1106 GFP_KERNEL | GFP_DMA); 1105 if (!private) {
1107 if (private == NULL) {
1108 dev_warn(&device->cdev->dev, 1106 dev_warn(&device->cdev->dev,
1109 "Allocating memory for private DASD data " 1107 "Allocating memory for private DASD data "
1110 "failed\n"); 1108 "failed\n");
1111 return -ENOMEM; 1109 return -ENOMEM;
1112 } 1110 }
1113 device->private = (void *) private; 1111 device->private = (void *) private;
1112 } else {
1113 memset(private, 0, sizeof(*private));
1114 } 1114 }
1115 /* Invalidate status of initial analysis. */ 1115 /* Invalidate status of initial analysis. */
1116 private->init_cqr_status = -1; 1116 private->init_cqr_status = -1;
@@ -1161,9 +1161,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1161 goto out_err3; 1161 goto out_err3;
1162 1162
1163 /* Read Device Characteristics */ 1163 /* Read Device Characteristics */
1164 rdc_data = (void *) &(private->rdc_data); 1164 rc = dasd_generic_read_dev_chars(device, "ECKD", &private->rdc_data,
1165 memset(rdc_data, 0, sizeof(rdc_data)); 1165 64);
1166 rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64);
1167 if (rc) { 1166 if (rc) {
1168 DBF_EVENT(DBF_WARNING, 1167 DBF_EVENT(DBF_WARNING,
1169 "Read device characteristics failed, rc=%d for " 1168 "Read device characteristics failed, rc=%d for "
@@ -1183,7 +1182,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1183 private->rdc_data.dev_model, 1182 private->rdc_data.dev_model,
1184 private->rdc_data.cu_type, 1183 private->rdc_data.cu_type,
1185 private->rdc_data.cu_model.model, 1184 private->rdc_data.cu_model.model,
1186 private->real_cyl, 1185 private->real_cyl,
1187 private->rdc_data.trk_per_cyl, 1186 private->rdc_data.trk_per_cyl,
1188 private->rdc_data.sec_per_trk); 1187 private->rdc_data.sec_per_trk);
1189 return 0; 1188 return 0;
@@ -2336,9 +2335,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2336{ 2335{
2337 int tpm, cmdrtd, cmdwtd; 2336 int tpm, cmdrtd, cmdwtd;
2338 int use_prefix; 2337 int use_prefix;
2339 2338#if defined(CONFIG_64BIT)
2340 struct dasd_eckd_private *private;
2341 int fcx_in_css, fcx_in_gneq, fcx_in_features; 2339 int fcx_in_css, fcx_in_gneq, fcx_in_features;
2340#endif
2341 struct dasd_eckd_private *private;
2342 struct dasd_device *basedev; 2342 struct dasd_device *basedev;
2343 sector_t first_rec, last_rec; 2343 sector_t first_rec, last_rec;
2344 sector_t first_trk, last_trk; 2344 sector_t first_trk, last_trk;
@@ -2354,18 +2354,22 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2354 blksize = block->bp_block; 2354 blksize = block->bp_block;
2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2356 /* Calculate record id of first and last block. */ 2356 /* Calculate record id of first and last block. */
2357 first_rec = first_trk = req->sector >> block->s2b_shift; 2357 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2358 first_offs = sector_div(first_trk, blk_per_trk); 2358 first_offs = sector_div(first_trk, blk_per_trk);
2359 last_rec = last_trk = 2359 last_rec = last_trk =
2360 (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 2360 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2361 last_offs = sector_div(last_trk, blk_per_trk); 2361 last_offs = sector_div(last_trk, blk_per_trk);
2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2363 2363
2364 /* is transport mode supported ? */ 2364 /* is transport mode supported? */
2365#if defined(CONFIG_64BIT)
2365 fcx_in_css = css_general_characteristics.fcx; 2366 fcx_in_css = css_general_characteristics.fcx;
2366 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 2367 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
2367 fcx_in_features = private->features.feature[40] & 0x80; 2368 fcx_in_features = private->features.feature[40] & 0x80;
2368 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; 2369 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
2370#else
2371 tpm = 0;
2372#endif
2369 2373
2370 /* is read track data and write track data in command mode supported? */ 2374 /* is read track data and write track data in command mode supported? */
2371 cmdrtd = private->features.feature[9] & 0x20; 2375 cmdrtd = private->features.feature[9] & 0x20;
@@ -2420,7 +2424,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2420 private = (struct dasd_eckd_private *) cqr->block->base->private; 2424 private = (struct dasd_eckd_private *) cqr->block->base->private;
2421 blksize = cqr->block->bp_block; 2425 blksize = cqr->block->bp_block;
2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2426 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2423 recid = req->sector >> cqr->block->s2b_shift; 2427 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
2424 ccw = cqr->cpaddr; 2428 ccw = cqr->cpaddr;
2425 /* Skip over define extent & locate record. */ 2429 /* Skip over define extent & locate record. */
2426 ccw++; 2430 ccw++;
@@ -3013,8 +3017,9 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3013 " I/O status report for device %s:\n", 3017 " I/O status report for device %s:\n",
3014 dev_name(&device->cdev->dev)); 3018 dev_name(&device->cdev->dev));
3015 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3019 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3016 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 3020 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
3017 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw)); 3021 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3022 scsw_cc(&irb->scsw), req->intrc);
3018 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3023 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3019 " device %s: Failing CCW: %p\n", 3024 " device %s: Failing CCW: %p\n",
3020 dev_name(&device->cdev->dev), 3025 dev_name(&device->cdev->dev),
@@ -3115,9 +3120,10 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3115 " I/O status report for device %s:\n", 3120 " I/O status report for device %s:\n",
3116 dev_name(&device->cdev->dev)); 3121 dev_name(&device->cdev->dev));
3117 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3122 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3118 " in req: %p CS: 0x%02X DS: 0x%02X " 3123 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d "
3119 "fcxs: 0x%02X schxs: 0x%02X\n", req, 3124 "fcxs: 0x%02X schxs: 0x%02X\n", req,
3120 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3125 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3126 scsw_cc(&irb->scsw), req->intrc,
3121 irb->scsw.tm.fcxs, irb->scsw.tm.schxs); 3127 irb->scsw.tm.fcxs, irb->scsw.tm.schxs);
3122 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3128 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3123 " device %s: Failing TCW: %p\n", 3129 " device %s: Failing TCW: %p\n",
@@ -3273,8 +3279,14 @@ static struct dasd_discipline dasd_eckd_discipline = {
3273static int __init 3279static int __init
3274dasd_eckd_init(void) 3280dasd_eckd_init(void)
3275{ 3281{
3282 int ret;
3283
3276 ASCEBC(dasd_eckd_discipline.ebcname, 4); 3284 ASCEBC(dasd_eckd_discipline.ebcname, 4);
3277 return ccw_driver_register(&dasd_eckd_driver); 3285 ret = ccw_driver_register(&dasd_eckd_driver);
3286 if (!ret)
3287 wait_for_device_probe();
3288
3289 return ret;
3278} 3290}
3279 3291
3280static void __exit 3292static void __exit
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a3eb6fd1467..597c6ffdb9f 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -122,20 +122,20 @@ dasd_fba_check_characteristics(struct dasd_device *device)
122 struct dasd_block *block; 122 struct dasd_block *block;
123 struct dasd_fba_private *private; 123 struct dasd_fba_private *private;
124 struct ccw_device *cdev = device->cdev; 124 struct ccw_device *cdev = device->cdev;
125 void *rdc_data;
126 int rc; 125 int rc;
127 126
128 private = (struct dasd_fba_private *) device->private; 127 private = (struct dasd_fba_private *) device->private;
129 if (private == NULL) { 128 if (!private) {
130 private = kzalloc(sizeof(struct dasd_fba_private), 129 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
131 GFP_KERNEL | GFP_DMA); 130 if (!private) {
132 if (private == NULL) {
133 dev_warn(&device->cdev->dev, 131 dev_warn(&device->cdev->dev,
134 "Allocating memory for private DASD " 132 "Allocating memory for private DASD "
135 "data failed\n"); 133 "data failed\n");
136 return -ENOMEM; 134 return -ENOMEM;
137 } 135 }
138 device->private = (void *) private; 136 device->private = (void *) private;
137 } else {
138 memset(private, 0, sizeof(*private));
139 } 139 }
140 block = dasd_alloc_block(); 140 block = dasd_alloc_block();
141 if (IS_ERR(block)) { 141 if (IS_ERR(block)) {
@@ -150,8 +150,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
150 block->base = device; 150 block->base = device;
151 151
152 /* Read Device Characteristics */ 152 /* Read Device Characteristics */
153 rdc_data = (void *) &(private->rdc_data); 153 rc = dasd_generic_read_dev_chars(device, "FBA ", &private->rdc_data,
154 rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32); 154 32);
155 if (rc) { 155 if (rc) {
156 DBF_EVENT(DBF_WARNING, "Read device characteristics returned " 156 DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
157 "error %d for device: %s", 157 "error %d for device: %s",
@@ -270,8 +270,9 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
270 return ERR_PTR(-EINVAL); 270 return ERR_PTR(-EINVAL);
271 blksize = block->bp_block; 271 blksize = block->bp_block;
272 /* Calculate record id of first and last block. */ 272 /* Calculate record id of first and last block. */
273 first_rec = req->sector >> block->s2b_shift; 273 first_rec = blk_rq_pos(req) >> block->s2b_shift;
274 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 274 last_rec =
275 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
275 /* Check struct bio and count the number of blocks for the request. */ 276 /* Check struct bio and count the number of blocks for the request. */
276 count = 0; 277 count = 0;
277 cidaw = 0; 278 cidaw = 0;
@@ -309,7 +310,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
309 ccw = cqr->cpaddr; 310 ccw = cqr->cpaddr;
310 /* First ccw is define extent. */ 311 /* First ccw is define extent. */
311 define_extent(ccw++, cqr->data, rq_data_dir(req), 312 define_extent(ccw++, cqr->data, rq_data_dir(req),
312 block->bp_block, req->sector, req->nr_sectors); 313 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
313 /* Build locate_record + read/write ccws. */ 314 /* Build locate_record + read/write ccws. */
314 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); 315 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
315 LO_data = (struct LO_fba_data *) (idaws + cidaw); 316 LO_data = (struct LO_fba_data *) (idaws + cidaw);
@@ -603,8 +604,14 @@ static struct dasd_discipline dasd_fba_discipline = {
603static int __init 604static int __init
604dasd_fba_init(void) 605dasd_fba_init(void)
605{ 606{
607 int ret;
608
606 ASCEBC(dasd_fba_discipline.ebcname, 4); 609 ASCEBC(dasd_fba_discipline.ebcname, 4);
607 return ccw_driver_register(&dasd_fba_driver); 610 ret = ccw_driver_register(&dasd_fba_driver);
611 if (!ret)
612 wait_for_device_probe();
613
614 return ret;
608} 615}
609 616
610static void __exit 617static void __exit
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index c1e487f774c..f97ceb79507 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -173,6 +173,7 @@ struct dasd_ccw_req {
173 void *data; /* pointer to data area */ 173 void *data; /* pointer to data area */
174 174
175 /* these are important for recovering erroneous requests */ 175 /* these are important for recovering erroneous requests */
176 int intrc; /* internal error, e.g. from start_IO */
176 struct irb irb; /* device status in case of an error */ 177 struct irb irb; /* device status in case of an error */
177 struct dasd_ccw_req *refers; /* ERP-chain queueing. */ 178 struct dasd_ccw_req *refers; /* ERP-chain queueing. */
178 void *function; /* originating ERP action */ 179 void *function; /* originating ERP action */
@@ -578,7 +579,7 @@ int dasd_generic_set_offline (struct ccw_device *cdev);
578int dasd_generic_notify(struct ccw_device *, int); 579int dasd_generic_notify(struct ccw_device *, int);
579void dasd_generic_handle_state_change(struct dasd_device *); 580void dasd_generic_handle_state_change(struct dasd_device *);
580 581
581int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int); 582int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int);
582char *dasd_get_sense(struct irb *); 583char *dasd_get_sense(struct irb *);
583 584
584/* externals in dasd_devmap.c */ 585/* externals in dasd_devmap.c */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index cfdcf1aed33..b21caf177e3 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -127,7 +127,7 @@ dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
127 found = 0; 127 found = 0;
128 // test if minor available 128 // test if minor available
129 list_for_each_entry(entry, &dcssblk_devices, lh) 129 list_for_each_entry(entry, &dcssblk_devices, lh)
130 if (minor == MINOR(disk_devt(entry->gd))) 130 if (minor == entry->gd->first_minor)
131 found++; 131 found++;
132 if (!found) break; // got unused minor 132 if (!found) break; // got unused minor
133 } 133 }
@@ -602,7 +602,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
602 dev_info->gd->private_data = dev_info; 602 dev_info->gd->private_data = dev_info;
603 dev_info->gd->driverfs_dev = &dev_info->dev; 603 dev_info->gd->driverfs_dev = &dev_info->dev;
604 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); 604 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
605 blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); 605 blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
606 606
607 seg_byte_size = (dev_info->end - dev_info->start + 1); 607 seg_byte_size = (dev_info->end - dev_info->start + 1);
608 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors 608 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
@@ -625,7 +625,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
625 if (rc) 625 if (rc)
626 goto release_gd; 626 goto release_gd;
627 sprintf(dev_info->gd->disk_name, "dcssblk%d", 627 sprintf(dev_info->gd->disk_name, "dcssblk%d",
628 MINOR(disk_devt(dev_info->gd))); 628 dev_info->gd->first_minor);
629 list_add_tail(&dev_info->lh, &dcssblk_devices); 629 list_add_tail(&dev_info->lh, &dcssblk_devices);
630 630
631 if (!try_module_get(THIS_MODULE)) { 631 if (!try_module_get(THIS_MODULE)) {
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 76814f3e898..0ae0c83ef87 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -343,7 +343,7 @@ static int __init xpram_setup_blkdev(void)
343 goto out; 343 goto out;
344 } 344 }
345 blk_queue_make_request(xpram_queues[i], xpram_make_request); 345 blk_queue_make_request(xpram_queues[i], xpram_make_request);
346 blk_queue_hardsect_size(xpram_queues[i], 4096); 346 blk_queue_logical_block_size(xpram_queues[i], 4096);
347 } 347 }
348 348
349 /* 349 /*
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index d028d2ee83d..ed5396dae58 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -64,7 +64,7 @@ static struct con3270 *condev;
64#define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */ 64#define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
65#define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */ 65#define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */
66#define CON_UPDATE_STATUS 4 /* Update status line. */ 66#define CON_UPDATE_STATUS 4 /* Update status line. */
67#define CON_UPDATE_ALL 7 67#define CON_UPDATE_ALL 8 /* Recreate screen. */
68 68
69static void con3270_update(struct con3270 *); 69static void con3270_update(struct con3270 *);
70 70
@@ -73,18 +73,10 @@ static void con3270_update(struct con3270 *);
73 */ 73 */
74static void con3270_set_timer(struct con3270 *cp, int expires) 74static void con3270_set_timer(struct con3270 *cp, int expires)
75{ 75{
76 if (expires == 0) { 76 if (expires == 0)
77 if (timer_pending(&cp->timer)) 77 del_timer(&cp->timer);
78 del_timer(&cp->timer); 78 else
79 return; 79 mod_timer(&cp->timer, jiffies + expires);
80 }
81 if (timer_pending(&cp->timer) &&
82 mod_timer(&cp->timer, jiffies + expires))
83 return;
84 cp->timer.function = (void (*)(unsigned long)) con3270_update;
85 cp->timer.data = (unsigned long) cp;
86 cp->timer.expires = jiffies + expires;
87 add_timer(&cp->timer);
88} 80}
89 81
90/* 82/*
@@ -225,6 +217,12 @@ con3270_update(struct con3270 *cp)
225 217
226 spin_lock_irqsave(&cp->view.lock, flags); 218 spin_lock_irqsave(&cp->view.lock, flags);
227 updated = 0; 219 updated = 0;
220 if (cp->update_flags & CON_UPDATE_ALL) {
221 con3270_rebuild_update(cp);
222 con3270_update_status(cp);
223 cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST |
224 CON_UPDATE_STATUS;
225 }
228 if (cp->update_flags & CON_UPDATE_ERASE) { 226 if (cp->update_flags & CON_UPDATE_ERASE) {
229 /* Use erase write alternate to initialize display. */ 227 /* Use erase write alternate to initialize display. */
230 raw3270_request_set_cmd(wrq, TC_EWRITEA); 228 raw3270_request_set_cmd(wrq, TC_EWRITEA);
@@ -302,7 +300,6 @@ con3270_read_tasklet(struct raw3270_request *rrq)
302 deactivate = 1; 300 deactivate = 1;
303 break; 301 break;
304 case 0x6d: /* clear: start from scratch. */ 302 case 0x6d: /* clear: start from scratch. */
305 con3270_rebuild_update(cp);
306 cp->update_flags = CON_UPDATE_ALL; 303 cp->update_flags = CON_UPDATE_ALL;
307 con3270_set_timer(cp, 1); 304 con3270_set_timer(cp, 1);
308 break; 305 break;
@@ -382,30 +379,21 @@ con3270_issue_read(struct con3270 *cp)
382static int 379static int
383con3270_activate(struct raw3270_view *view) 380con3270_activate(struct raw3270_view *view)
384{ 381{
385 unsigned long flags;
386 struct con3270 *cp; 382 struct con3270 *cp;
387 383
388 cp = (struct con3270 *) view; 384 cp = (struct con3270 *) view;
389 spin_lock_irqsave(&cp->view.lock, flags);
390 cp->nr_up = 0;
391 con3270_rebuild_update(cp);
392 con3270_update_status(cp);
393 cp->update_flags = CON_UPDATE_ALL; 385 cp->update_flags = CON_UPDATE_ALL;
394 con3270_set_timer(cp, 1); 386 con3270_set_timer(cp, 1);
395 spin_unlock_irqrestore(&cp->view.lock, flags);
396 return 0; 387 return 0;
397} 388}
398 389
399static void 390static void
400con3270_deactivate(struct raw3270_view *view) 391con3270_deactivate(struct raw3270_view *view)
401{ 392{
402 unsigned long flags;
403 struct con3270 *cp; 393 struct con3270 *cp;
404 394
405 cp = (struct con3270 *) view; 395 cp = (struct con3270 *) view;
406 spin_lock_irqsave(&cp->view.lock, flags);
407 del_timer(&cp->timer); 396 del_timer(&cp->timer);
408 spin_unlock_irqrestore(&cp->view.lock, flags);
409} 397}
410 398
411static int 399static int
@@ -504,6 +492,7 @@ con3270_write(struct console *co, const char *str, unsigned int count)
504 con3270_cline_end(cp); 492 con3270_cline_end(cp);
505 } 493 }
506 /* Setup timer to output current console buffer after 1/10 second */ 494 /* Setup timer to output current console buffer after 1/10 second */
495 cp->nr_up = 0;
507 if (cp->view.dev && !timer_pending(&cp->timer)) 496 if (cp->view.dev && !timer_pending(&cp->timer))
508 con3270_set_timer(cp, HZ/10); 497 con3270_set_timer(cp, HZ/10);
509 spin_unlock_irqrestore(&cp->view.lock,flags); 498 spin_unlock_irqrestore(&cp->view.lock,flags);
@@ -624,7 +613,8 @@ con3270_init(void)
624 613
625 INIT_LIST_HEAD(&condev->lines); 614 INIT_LIST_HEAD(&condev->lines);
626 INIT_LIST_HEAD(&condev->update); 615 INIT_LIST_HEAD(&condev->update);
627 init_timer(&condev->timer); 616 setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update,
617 (unsigned long) condev);
628 tasklet_init(&condev->readlet, 618 tasklet_init(&condev->readlet,
629 (void (*)(unsigned long)) con3270_read_tasklet, 619 (void (*)(unsigned long)) con3270_read_tasklet,
630 (unsigned long) condev->read); 620 (unsigned long) condev->read);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5f8e8ef43dd..2d00a383a47 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1134,7 +1134,7 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
1134 /* Setup ccws. */ 1134 /* Setup ccws. */
1135 request->op = TO_BLOCK; 1135 request->op = TO_BLOCK;
1136 start_block = (struct tape_34xx_block_id *) request->cpdata; 1136 start_block = (struct tape_34xx_block_id *) request->cpdata;
1137 start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B; 1137 start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
1138 DBF_EVENT(6, "start_block = %i\n", start_block->block); 1138 DBF_EVENT(6, "start_block = %i\n", start_block->block);
1139 1139
1140 ccw = request->cpaddr; 1140 ccw = request->cpaddr;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 823b05bd0dd..c453b2f3e9f 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -633,7 +633,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
633 struct req_iterator iter; 633 struct req_iterator iter;
634 634
635 DBF_EVENT(6, "xBREDid:"); 635 DBF_EVENT(6, "xBREDid:");
636 start_block = req->sector >> TAPEBLOCK_HSEC_S2B; 636 start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
637 DBF_EVENT(6, "start_block = %i\n", start_block); 637 DBF_EVENT(6, "start_block = %i\n", start_block);
638 638
639 rq_for_each_segment(bv, req, iter) 639 rq_for_each_segment(bv, req, iter)
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index f32e89e7c4f..47ff695255e 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -74,13 +74,6 @@ tapeblock_trigger_requeue(struct tape_device *device)
74 * Post finished request. 74 * Post finished request.
75 */ 75 */
76static void 76static void
77tapeblock_end_request(struct request *req, int error)
78{
79 if (blk_end_request(req, error, blk_rq_bytes(req)))
80 BUG();
81}
82
83static void
84__tapeblock_end_request(struct tape_request *ccw_req, void *data) 77__tapeblock_end_request(struct tape_request *ccw_req, void *data)
85{ 78{
86 struct tape_device *device; 79 struct tape_device *device;
@@ -90,17 +83,17 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
90 83
91 device = ccw_req->device; 84 device = ccw_req->device;
92 req = (struct request *) data; 85 req = (struct request *) data;
93 tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO); 86 blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
94 if (ccw_req->rc == 0) 87 if (ccw_req->rc == 0)
95 /* Update position. */ 88 /* Update position. */
96 device->blk_data.block_position = 89 device->blk_data.block_position =
97 (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B; 90 (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
98 else 91 else
99 /* We lost the position information due to an error. */ 92 /* We lost the position information due to an error. */
100 device->blk_data.block_position = -1; 93 device->blk_data.block_position = -1;
101 device->discipline->free_bread(ccw_req); 94 device->discipline->free_bread(ccw_req);
102 if (!list_empty(&device->req_queue) || 95 if (!list_empty(&device->req_queue) ||
103 elv_next_request(device->blk_data.request_queue)) 96 blk_peek_request(device->blk_data.request_queue))
104 tapeblock_trigger_requeue(device); 97 tapeblock_trigger_requeue(device);
105} 98}
106 99
@@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
118 ccw_req = device->discipline->bread(device, req); 111 ccw_req = device->discipline->bread(device, req);
119 if (IS_ERR(ccw_req)) { 112 if (IS_ERR(ccw_req)) {
120 DBF_EVENT(1, "TBLOCK: bread failed\n"); 113 DBF_EVENT(1, "TBLOCK: bread failed\n");
121 tapeblock_end_request(req, -EIO); 114 blk_end_request_all(req, -EIO);
122 return PTR_ERR(ccw_req); 115 return PTR_ERR(ccw_req);
123 } 116 }
124 ccw_req->callback = __tapeblock_end_request; 117 ccw_req->callback = __tapeblock_end_request;
@@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
131 * Start/enqueueing failed. No retries in 124 * Start/enqueueing failed. No retries in
132 * this case. 125 * this case.
133 */ 126 */
134 tapeblock_end_request(req, -EIO); 127 blk_end_request_all(req, -EIO);
135 device->discipline->free_bread(ccw_req); 128 device->discipline->free_bread(ccw_req);
136 } 129 }
137 130
@@ -169,19 +162,16 @@ tapeblock_requeue(struct work_struct *work) {
169 spin_lock_irq(&device->blk_data.request_queue_lock); 162 spin_lock_irq(&device->blk_data.request_queue_lock);
170 while ( 163 while (
171 !blk_queue_plugged(queue) && 164 !blk_queue_plugged(queue) &&
172 elv_next_request(queue) && 165 (req = blk_fetch_request(queue)) &&
173 nr_queued < TAPEBLOCK_MIN_REQUEUE 166 nr_queued < TAPEBLOCK_MIN_REQUEUE
174 ) { 167 ) {
175 req = elv_next_request(queue);
176 if (rq_data_dir(req) == WRITE) { 168 if (rq_data_dir(req) == WRITE) {
177 DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); 169 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
178 blkdev_dequeue_request(req);
179 spin_unlock_irq(&device->blk_data.request_queue_lock); 170 spin_unlock_irq(&device->blk_data.request_queue_lock);
180 tapeblock_end_request(req, -EIO); 171 blk_end_request_all(req, -EIO);
181 spin_lock_irq(&device->blk_data.request_queue_lock); 172 spin_lock_irq(&device->blk_data.request_queue_lock);
182 continue; 173 continue;
183 } 174 }
184 blkdev_dequeue_request(req);
185 nr_queued++; 175 nr_queued++;
186 spin_unlock_irq(&device->blk_data.request_queue_lock); 176 spin_unlock_irq(&device->blk_data.request_queue_lock);
187 rc = tapeblock_start_request(device, req); 177 rc = tapeblock_start_request(device, req);
@@ -232,7 +222,7 @@ tapeblock_setup_device(struct tape_device * device)
232 if (rc) 222 if (rc)
233 goto cleanup_queue; 223 goto cleanup_queue;
234 224
235 blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); 225 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
236 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); 226 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
237 blk_queue_max_phys_segments(blkdat->request_queue, -1L); 227 blk_queue_max_phys_segments(blkdat->request_queue, -1L);
238 blk_queue_max_hw_segments(blkdat->request_queue, -1L); 228 blk_queue_max_hw_segments(blkdat->request_queue, -1L);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index a7fe6302c98..38385677c65 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -112,7 +112,7 @@ struct tty3270 {
112#define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */ 112#define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */
113#define TTY_UPDATE_INPUT 4 /* Update input line. */ 113#define TTY_UPDATE_INPUT 4 /* Update input line. */
114#define TTY_UPDATE_STATUS 8 /* Update status line. */ 114#define TTY_UPDATE_STATUS 8 /* Update status line. */
115#define TTY_UPDATE_ALL 15 115#define TTY_UPDATE_ALL 16 /* Recreate screen. */
116 116
117static void tty3270_update(struct tty3270 *); 117static void tty3270_update(struct tty3270 *);
118 118
@@ -121,19 +121,10 @@ static void tty3270_update(struct tty3270 *);
121 */ 121 */
122static void tty3270_set_timer(struct tty3270 *tp, int expires) 122static void tty3270_set_timer(struct tty3270 *tp, int expires)
123{ 123{
124 if (expires == 0) { 124 if (expires == 0)
125 if (timer_pending(&tp->timer) && del_timer(&tp->timer)) 125 del_timer(&tp->timer);
126 raw3270_put_view(&tp->view); 126 else
127 return; 127 mod_timer(&tp->timer, jiffies + expires);
128 }
129 if (timer_pending(&tp->timer) &&
130 mod_timer(&tp->timer, jiffies + expires))
131 return;
132 raw3270_get_view(&tp->view);
133 tp->timer.function = (void (*)(unsigned long)) tty3270_update;
134 tp->timer.data = (unsigned long) tp;
135 tp->timer.expires = jiffies + expires;
136 add_timer(&tp->timer);
137} 128}
138 129
139/* 130/*
@@ -337,7 +328,6 @@ tty3270_write_callback(struct raw3270_request *rq, void *data)
337 tp = (struct tty3270 *) rq->view; 328 tp = (struct tty3270 *) rq->view;
338 if (rq->rc != 0) { 329 if (rq->rc != 0) {
339 /* Write wasn't successfull. Refresh all. */ 330 /* Write wasn't successfull. Refresh all. */
340 tty3270_rebuild_update(tp);
341 tp->update_flags = TTY_UPDATE_ALL; 331 tp->update_flags = TTY_UPDATE_ALL;
342 tty3270_set_timer(tp, 1); 332 tty3270_set_timer(tp, 1);
343 } 333 }
@@ -366,6 +356,12 @@ tty3270_update(struct tty3270 *tp)
366 356
367 spin_lock(&tp->view.lock); 357 spin_lock(&tp->view.lock);
368 updated = 0; 358 updated = 0;
359 if (tp->update_flags & TTY_UPDATE_ALL) {
360 tty3270_rebuild_update(tp);
361 tty3270_update_status(tp);
362 tp->update_flags = TTY_UPDATE_ERASE | TTY_UPDATE_LIST |
363 TTY_UPDATE_INPUT | TTY_UPDATE_STATUS;
364 }
369 if (tp->update_flags & TTY_UPDATE_ERASE) { 365 if (tp->update_flags & TTY_UPDATE_ERASE) {
370 /* Use erase write alternate to erase display. */ 366 /* Use erase write alternate to erase display. */
371 raw3270_request_set_cmd(wrq, TC_EWRITEA); 367 raw3270_request_set_cmd(wrq, TC_EWRITEA);
@@ -425,7 +421,6 @@ tty3270_update(struct tty3270 *tp)
425 xchg(&tp->write, wrq); 421 xchg(&tp->write, wrq);
426 } 422 }
427 spin_unlock(&tp->view.lock); 423 spin_unlock(&tp->view.lock);
428 raw3270_put_view(&tp->view);
429} 424}
430 425
431/* 426/*
@@ -570,7 +565,6 @@ tty3270_read_tasklet(struct raw3270_request *rrq)
570 tty3270_set_timer(tp, 1); 565 tty3270_set_timer(tp, 1);
571 } else if (tp->input->string[0] == 0x6d) { 566 } else if (tp->input->string[0] == 0x6d) {
572 /* Display has been cleared. Redraw. */ 567 /* Display has been cleared. Redraw. */
573 tty3270_rebuild_update(tp);
574 tp->update_flags = TTY_UPDATE_ALL; 568 tp->update_flags = TTY_UPDATE_ALL;
575 tty3270_set_timer(tp, 1); 569 tty3270_set_timer(tp, 1);
576 } 570 }
@@ -641,22 +635,20 @@ static int
641tty3270_activate(struct raw3270_view *view) 635tty3270_activate(struct raw3270_view *view)
642{ 636{
643 struct tty3270 *tp; 637 struct tty3270 *tp;
644 unsigned long flags;
645 638
646 tp = (struct tty3270 *) view; 639 tp = (struct tty3270 *) view;
647 spin_lock_irqsave(&tp->view.lock, flags);
648 tp->nr_up = 0;
649 tty3270_rebuild_update(tp);
650 tty3270_update_status(tp);
651 tp->update_flags = TTY_UPDATE_ALL; 640 tp->update_flags = TTY_UPDATE_ALL;
652 tty3270_set_timer(tp, 1); 641 tty3270_set_timer(tp, 1);
653 spin_unlock_irqrestore(&tp->view.lock, flags);
654 return 0; 642 return 0;
655} 643}
656 644
657static void 645static void
658tty3270_deactivate(struct raw3270_view *view) 646tty3270_deactivate(struct raw3270_view *view)
659{ 647{
648 struct tty3270 *tp;
649
650 tp = (struct tty3270 *) view;
651 del_timer(&tp->timer);
660} 652}
661 653
662static int 654static int
@@ -743,6 +735,7 @@ tty3270_free_view(struct tty3270 *tp)
743{ 735{
744 int pages; 736 int pages;
745 737
738 del_timer_sync(&tp->timer);
746 kbd_free(tp->kbd); 739 kbd_free(tp->kbd);
747 raw3270_request_free(tp->kreset); 740 raw3270_request_free(tp->kreset);
748 raw3270_request_free(tp->read); 741 raw3270_request_free(tp->read);
@@ -889,7 +882,8 @@ tty3270_open(struct tty_struct *tty, struct file * filp)
889 INIT_LIST_HEAD(&tp->update); 882 INIT_LIST_HEAD(&tp->update);
890 INIT_LIST_HEAD(&tp->rcl_lines); 883 INIT_LIST_HEAD(&tp->rcl_lines);
891 tp->rcl_max = 20; 884 tp->rcl_max = 20;
892 init_timer(&tp->timer); 885 setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
886 (unsigned long) tp);
893 tasklet_init(&tp->readlet, 887 tasklet_init(&tp->readlet,
894 (void (*)(unsigned long)) tty3270_read_tasklet, 888 (void (*)(unsigned long)) tty3270_read_tasklet,
895 (unsigned long) tp->read); 889 (unsigned long) tp->read);
@@ -1754,14 +1748,6 @@ static const struct tty_operations tty3270_ops = {
1754 .set_termios = tty3270_set_termios 1748 .set_termios = tty3270_set_termios
1755}; 1749};
1756 1750
1757static void tty3270_notifier(int index, int active)
1758{
1759 if (active)
1760 tty_register_device(tty3270_driver, index, NULL);
1761 else
1762 tty_unregister_device(tty3270_driver, index);
1763}
1764
1765/* 1751/*
1766 * 3270 tty registration code called from tty_init(). 1752 * 3270 tty registration code called from tty_init().
1767 * Most kernel services (incl. kmalloc) are available at this poimt. 1753 * Most kernel services (incl. kmalloc) are available at this poimt.
@@ -1796,12 +1782,6 @@ static int __init tty3270_init(void)
1796 return ret; 1782 return ret;
1797 } 1783 }
1798 tty3270_driver = driver; 1784 tty3270_driver = driver;
1799 ret = raw3270_register_notifier(tty3270_notifier);
1800 if (ret) {
1801 put_tty_driver(driver);
1802 return ret;
1803
1804 }
1805 return 0; 1785 return 0;
1806} 1786}
1807 1787
@@ -1810,7 +1790,6 @@ tty3270_exit(void)
1810{ 1790{
1811 struct tty_driver *driver; 1791 struct tty_driver *driver;
1812 1792
1813 raw3270_unregister_notifier(tty3270_notifier);
1814 driver = tty3270_driver; 1793 driver = tty3270_driver;
1815 tty3270_driver = NULL; 1794 tty3270_driver = NULL;
1816 tty_unregister_driver(driver); 1795 tty_unregister_driver(driver);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 2aebb982304..5ec7789bd9d 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -12,6 +12,7 @@
12#define KMSG_COMPONENT "cio" 12#define KMSG_COMPONENT "cio"
13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 14
15#include <linux/ftrace.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
@@ -28,7 +29,7 @@
28#include <asm/chpid.h> 29#include <asm/chpid.h>
29#include <asm/airq.h> 30#include <asm/airq.h>
30#include <asm/isc.h> 31#include <asm/isc.h>
31#include <asm/cpu.h> 32#include <asm/cputime.h>
32#include <asm/fcx.h> 33#include <asm/fcx.h>
33#include <asm/nmi.h> 34#include <asm/nmi.h>
34#include <asm/crw.h> 35#include <asm/crw.h>
@@ -626,8 +627,7 @@ out:
626 * handlers). 627 * handlers).
627 * 628 *
628 */ 629 */
629void 630void __irq_entry do_IRQ(struct pt_regs *regs)
630do_IRQ (struct pt_regs *regs)
631{ 631{
632 struct tpi_info *tpi_info; 632 struct tpi_info *tpi_info;
633 struct subchannel *sch; 633 struct subchannel *sch;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 151754d5474..bf0a24af39a 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -114,7 +114,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
114 struct subchannel *sch; 114 struct subchannel *sch;
115 int ret; 115 int ret;
116 116
117 if (!cdev) 117 if (!cdev || !cdev->dev.parent)
118 return -ENODEV; 118 return -ENODEV;
119 if (cdev->private->state == DEV_STATE_NOT_OPER) 119 if (cdev->private->state == DEV_STATE_NOT_OPER)
120 return -ENODEV; 120 return -ENODEV;
@@ -122,8 +122,6 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
122 cdev->private->state != DEV_STATE_W4SENSE) 122 cdev->private->state != DEV_STATE_W4SENSE)
123 return -EINVAL; 123 return -EINVAL;
124 sch = to_subchannel(cdev->dev.parent); 124 sch = to_subchannel(cdev->dev.parent);
125 if (!sch)
126 return -ENODEV;
127 ret = cio_clear(sch); 125 ret = cio_clear(sch);
128 if (ret == 0) 126 if (ret == 0)
129 cdev->private->intparm = intparm; 127 cdev->private->intparm = intparm;
@@ -161,11 +159,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
161 struct subchannel *sch; 159 struct subchannel *sch;
162 int ret; 160 int ret;
163 161
164 if (!cdev) 162 if (!cdev || !cdev->dev.parent)
165 return -ENODEV; 163 return -ENODEV;
166 sch = to_subchannel(cdev->dev.parent); 164 sch = to_subchannel(cdev->dev.parent);
167 if (!sch)
168 return -ENODEV;
169 if (cdev->private->state == DEV_STATE_NOT_OPER) 165 if (cdev->private->state == DEV_STATE_NOT_OPER)
170 return -ENODEV; 166 return -ENODEV;
171 if (cdev->private->state == DEV_STATE_VERIFY || 167 if (cdev->private->state == DEV_STATE_VERIFY ||
@@ -339,7 +335,7 @@ int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
339 struct subchannel *sch; 335 struct subchannel *sch;
340 int ret; 336 int ret;
341 337
342 if (!cdev) 338 if (!cdev || !cdev->dev.parent)
343 return -ENODEV; 339 return -ENODEV;
344 if (cdev->private->state == DEV_STATE_NOT_OPER) 340 if (cdev->private->state == DEV_STATE_NOT_OPER)
345 return -ENODEV; 341 return -ENODEV;
@@ -347,8 +343,6 @@ int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
347 cdev->private->state != DEV_STATE_W4SENSE) 343 cdev->private->state != DEV_STATE_W4SENSE)
348 return -EINVAL; 344 return -EINVAL;
349 sch = to_subchannel(cdev->dev.parent); 345 sch = to_subchannel(cdev->dev.parent);
350 if (!sch)
351 return -ENODEV;
352 ret = cio_halt(sch); 346 ret = cio_halt(sch);
353 if (ret == 0) 347 if (ret == 0)
354 cdev->private->intparm = intparm; 348 cdev->private->intparm = intparm;
@@ -372,11 +366,9 @@ int ccw_device_resume(struct ccw_device *cdev)
372{ 366{
373 struct subchannel *sch; 367 struct subchannel *sch;
374 368
375 if (!cdev) 369 if (!cdev || !cdev->dev.parent)
376 return -ENODEV; 370 return -ENODEV;
377 sch = to_subchannel(cdev->dev.parent); 371 sch = to_subchannel(cdev->dev.parent);
378 if (!sch)
379 return -ENODEV;
380 if (cdev->private->state == DEV_STATE_NOT_OPER) 372 if (cdev->private->state == DEV_STATE_NOT_OPER)
381 return -ENODEV; 373 return -ENODEV;
382 if (cdev->private->state != DEV_STATE_ONLINE || 374 if (cdev->private->state != DEV_STATE_ONLINE ||
@@ -471,11 +463,11 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
471{ 463{
472 struct subchannel *sch; 464 struct subchannel *sch;
473 465
474 sch = to_subchannel(cdev->dev.parent); 466 if (!cdev->dev.parent)
475 if (!sch)
476 return 0; 467 return 0;
477 else 468
478 return sch->lpm; 469 sch = to_subchannel(cdev->dev.parent);
470 return sch->lpm;
479} 471}
480 472
481/* 473/*
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index accd957454e..d79cf5bf0e6 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -881,42 +881,26 @@ no_handler:
881 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 881 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
882} 882}
883 883
884static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, 884static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
885 int dstat) 885 int dstat)
886{ 886{
887 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 887 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
888 888
889 if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { 889 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
890 DBF_ERROR("EQ:ck con");
891 goto error;
892 }
893 890
894 if (!(dstat & DEV_STAT_DEV_END)) { 891 if (cstat)
895 DBF_ERROR("EQ:no dev");
896 goto error; 892 goto error;
897 } 893 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
898
899 if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) {
900 DBF_ERROR("EQ: bad io");
901 goto error; 894 goto error;
902 } 895 if (!(dstat & DEV_STAT_DEV_END))
903 return 0; 896 goto error;
897 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
898 return;
899
904error: 900error:
905 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); 901 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
906 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 902 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
907
908 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 903 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
909 return 1;
910}
911
912static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
913 int dstat)
914{
915 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
916
917 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
918 if (!qdio_establish_check_errors(cdev, cstat, dstat))
919 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
920} 904}
921 905
922/* qdio interrupt handler */ 906/* qdio interrupt handler */
@@ -946,7 +930,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
946 } 930 }
947 } 931 }
948 qdio_irq_check_sense(irq_ptr, irb); 932 qdio_irq_check_sense(irq_ptr, irb);
949
950 cstat = irb->scsw.cmd.cstat; 933 cstat = irb->scsw.cmd.cstat;
951 dstat = irb->scsw.cmd.dstat; 934 dstat = irb->scsw.cmd.dstat;
952 935
@@ -954,22 +937,19 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
954 case QDIO_IRQ_STATE_INACTIVE: 937 case QDIO_IRQ_STATE_INACTIVE:
955 qdio_establish_handle_irq(cdev, cstat, dstat); 938 qdio_establish_handle_irq(cdev, cstat, dstat);
956 break; 939 break;
957
958 case QDIO_IRQ_STATE_CLEANUP: 940 case QDIO_IRQ_STATE_CLEANUP:
959 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 941 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
960 break; 942 break;
961
962 case QDIO_IRQ_STATE_ESTABLISHED: 943 case QDIO_IRQ_STATE_ESTABLISHED:
963 case QDIO_IRQ_STATE_ACTIVE: 944 case QDIO_IRQ_STATE_ACTIVE:
964 if (cstat & SCHN_STAT_PCI) { 945 if (cstat & SCHN_STAT_PCI) {
965 qdio_int_handler_pci(irq_ptr); 946 qdio_int_handler_pci(irq_ptr);
966 return; 947 return;
967 } 948 }
968 if ((cstat & ~SCHN_STAT_PCI) || dstat) { 949 if (cstat || dstat)
969 qdio_handle_activate_check(cdev, intparm, cstat, 950 qdio_handle_activate_check(cdev, intparm, cstat,
970 dstat); 951 dstat);
971 break; 952 break;
972 }
973 default: 953 default:
974 WARN_ON(1); 954 WARN_ON(1);
975 } 955 }
@@ -1514,7 +1494,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1514 1494
1515 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || 1495 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
1516 (count > QDIO_MAX_BUFFERS_PER_Q) || 1496 (count > QDIO_MAX_BUFFERS_PER_Q) ||
1517 (q_nr > QDIO_MAX_QUEUES_PER_IRQ)) 1497 (q_nr >= QDIO_MAX_QUEUES_PER_IRQ))
1518 return -EINVAL; 1498 return -EINVAL;
1519 1499
1520 if (!count) 1500 if (!count)
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index 136d0f0b1e9..eff943923c6 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -25,18 +25,6 @@ struct qdio_perf_stats perf_stats;
25static struct proc_dir_entry *qdio_perf_pde; 25static struct proc_dir_entry *qdio_perf_pde;
26#endif 26#endif
27 27
28inline void qdio_perf_stat_inc(atomic_long_t *count)
29{
30 if (qdio_performance_stats)
31 atomic_long_inc(count);
32}
33
34inline void qdio_perf_stat_dec(atomic_long_t *count)
35{
36 if (qdio_performance_stats)
37 atomic_long_dec(count);
38}
39
40/* 28/*
41 * procfs functions 29 * procfs functions
42 */ 30 */
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
index 7821ac4fa51..ff4504ce1e3 100644
--- a/drivers/s390/cio/qdio_perf.h
+++ b/drivers/s390/cio/qdio_perf.h
@@ -9,7 +9,6 @@
9#define QDIO_PERF_H 9#define QDIO_PERF_H
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/device.h>
13#include <asm/atomic.h> 12#include <asm/atomic.h>
14 13
15struct qdio_perf_stats { 14struct qdio_perf_stats {
@@ -50,10 +49,13 @@ struct qdio_perf_stats {
50extern struct qdio_perf_stats perf_stats; 49extern struct qdio_perf_stats perf_stats;
51extern int qdio_performance_stats; 50extern int qdio_performance_stats;
52 51
52static inline void qdio_perf_stat_inc(atomic_long_t *count)
53{
54 if (qdio_performance_stats)
55 atomic_long_inc(count);
56}
57
53int qdio_setup_perf_stats(void); 58int qdio_setup_perf_stats(void);
54void qdio_remove_perf_stats(void); 59void qdio_remove_perf_stats(void);
55 60
56extern void qdio_perf_stat_inc(atomic_long_t *count);
57extern void qdio_perf_stat_dec(atomic_long_t *count);
58
59#endif 61#endif
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index cbc8566fab7..e38e5d306fa 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -173,8 +173,9 @@ static void kvm_notify(struct virtqueue *vq)
173 * this device and sets it up. 173 * this device and sets it up.
174 */ 174 */
175static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, 175static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
176 unsigned index, 176 unsigned index,
177 void (*callback)(struct virtqueue *vq)) 177 void (*callback)(struct virtqueue *vq),
178 const char *name)
178{ 179{
179 struct kvm_device *kdev = to_kvmdev(vdev); 180 struct kvm_device *kdev = to_kvmdev(vdev);
180 struct kvm_vqconfig *config; 181 struct kvm_vqconfig *config;
@@ -194,7 +195,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
194 195
195 vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, 196 vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
196 vdev, (void *) config->address, 197 vdev, (void *) config->address,
197 kvm_notify, callback); 198 kvm_notify, callback, name);
198 if (!vq) { 199 if (!vq) {
199 err = -ENOMEM; 200 err = -ENOMEM;
200 goto unmap; 201 goto unmap;
@@ -226,6 +227,38 @@ static void kvm_del_vq(struct virtqueue *vq)
226 KVM_S390_VIRTIO_RING_ALIGN)); 227 KVM_S390_VIRTIO_RING_ALIGN));
227} 228}
228 229
230static void kvm_del_vqs(struct virtio_device *vdev)
231{
232 struct virtqueue *vq, *n;
233
234 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
235 kvm_del_vq(vq);
236}
237
238static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
239 struct virtqueue *vqs[],
240 vq_callback_t *callbacks[],
241 const char *names[])
242{
243 struct kvm_device *kdev = to_kvmdev(vdev);
244 int i;
245
246 /* We must have this many virtqueues. */
247 if (nvqs > kdev->desc->num_vq)
248 return -ENOENT;
249
250 for (i = 0; i < nvqs; ++i) {
251 vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]);
252 if (IS_ERR(vqs[i]))
253 goto error;
254 }
255 return 0;
256
257error:
258 kvm_del_vqs(vdev);
259 return PTR_ERR(vqs[i]);
260}
261
229/* 262/*
230 * The config ops structure as defined by virtio config 263 * The config ops structure as defined by virtio config
231 */ 264 */
@@ -237,8 +270,8 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
237 .get_status = kvm_get_status, 270 .get_status = kvm_get_status,
238 .set_status = kvm_set_status, 271 .set_status = kvm_set_status,
239 .reset = kvm_reset, 272 .reset = kvm_reset,
240 .find_vq = kvm_find_vq, 273 .find_vqs = kvm_find_vqs,
241 .del_vq = kvm_del_vq, 274 .del_vqs = kvm_del_vqs,
242}; 275};
243 276
244/* 277/*
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index a7745c82b4a..cb909a5b504 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -8,7 +8,7 @@ config LCS
8 Select this option if you want to use LCS networking on IBM System z. 8 Select this option if you want to use LCS networking on IBM System z.
9 This device driver supports Token Ring (IEEE 802.5), 9 This device driver supports Token Ring (IEEE 802.5),
10 FDDI (IEEE 802.7) and Ethernet. 10 FDDI (IEEE 802.7) and Ethernet.
11 To compile as a module, choose M. The module name is lcs.ko. 11 To compile as a module, choose M. The module name is lcs.
12 If you do not know what it is, it's safe to choose Y. 12 If you do not know what it is, it's safe to choose Y.
13 13
14config CTCM 14config CTCM
@@ -21,7 +21,7 @@ config CTCM
21 It also supports virtual CTCs when running under VM. 21 It also supports virtual CTCs when running under VM.
22 This driver also supports channel-to-channel MPC SNA devices. 22 This driver also supports channel-to-channel MPC SNA devices.
23 MPC is an SNA protocol device used by Communication Server for Linux. 23 MPC is an SNA protocol device used by Communication Server for Linux.
24 To compile as a module, choose M. The module name is ctcm.ko. 24 To compile as a module, choose M. The module name is ctcm.
25 To compile into the kernel, choose Y. 25 To compile into the kernel, choose Y.
26 If you do not need any channel-to-channel connection, choose N. 26 If you do not need any channel-to-channel connection, choose N.
27 27
@@ -34,7 +34,7 @@ config NETIUCV
34 link between VM guests. Using ifconfig a point-to-point connection 34 link between VM guests. Using ifconfig a point-to-point connection
35 can be established to the Linux on IBM System z 35 can be established to the Linux on IBM System z
36 running on the other VM guest. To compile as a module, choose M. 36 running on the other VM guest. To compile as a module, choose M.
37 The module name is netiucv.ko. If unsure, choose Y. 37 The module name is netiucv. If unsure, choose Y.
38 38
39config SMSGIUCV 39config SMSGIUCV
40 tristate "IUCV special message support (VM only)" 40 tristate "IUCV special message support (VM only)"
@@ -50,7 +50,7 @@ config CLAW
50 This driver supports channel attached CLAW devices. 50 This driver supports channel attached CLAW devices.
51 CLAW is Common Link Access for Workstation. Common devices 51 CLAW is Common Link Access for Workstation. Common devices
52 that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices. 52 that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
53 To compile as a module, choose M. The module name is claw.ko. 53 To compile as a module, choose M. The module name is claw.
54 To compile into the kernel, choose Y. 54 To compile into the kernel, choose Y.
55 55
56config QETH 56config QETH
@@ -65,14 +65,14 @@ config QETH
65 <http://www.ibm.com/developerworks/linux/linux390> 65 <http://www.ibm.com/developerworks/linux/linux390>
66 66
67 To compile this driver as a module, choose M. 67 To compile this driver as a module, choose M.
68 The module name is qeth.ko. 68 The module name is qeth.
69 69
70config QETH_L2 70config QETH_L2
71 tristate "qeth layer 2 device support" 71 tristate "qeth layer 2 device support"
72 depends on QETH 72 depends on QETH
73 help 73 help
74 Select this option to be able to run qeth devices in layer 2 mode. 74 Select this option to be able to run qeth devices in layer 2 mode.
75 To compile as a module, choose M. The module name is qeth_l2.ko. 75 To compile as a module, choose M. The module name is qeth_l2.
76 If unsure, choose y. 76 If unsure, choose y.
77 77
78config QETH_L3 78config QETH_L3
@@ -80,7 +80,7 @@ config QETH_L3
80 depends on QETH 80 depends on QETH
81 help 81 help
82 Select this option to be able to run qeth devices in layer 3 mode. 82 Select this option to be able to run qeth devices in layer 3 mode.
83 To compile as a module choose M. The module name is qeth_l3.ko. 83 To compile as a module choose M. The module name is qeth_l3.
84 If unsure, choose Y. 84 If unsure, choose Y.
85 85
86config QETH_IPV6 86config QETH_IPV6
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 733fe3bf628..b2fe5cdbcae 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -11,6 +11,24 @@
11 11
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13 13
14#define ZFCP_MODEL_PRIV 0x4
15
16static struct ccw_device_id zfcp_ccw_device_id[] = {
17 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
18 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
19 {},
20};
21MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
22
23/**
24 * zfcp_ccw_priv_sch - check if subchannel is privileged
25 * @adapter: Adapter/Subchannel to check
26 */
27int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
28{
29 return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV;
30}
31
14/** 32/**
15 * zfcp_ccw_probe - probe function of zfcp driver 33 * zfcp_ccw_probe - probe function of zfcp driver
16 * @ccw_device: pointer to belonging ccw device 34 * @ccw_device: pointer to belonging ccw device
@@ -176,8 +194,8 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
176 "ccnoti4", NULL); 194 "ccnoti4", NULL);
177 break; 195 break;
178 case CIO_BOXED: 196 case CIO_BOXED:
179 dev_warn(&adapter->ccw_device->dev, 197 dev_warn(&adapter->ccw_device->dev, "The FCP device "
180 "The ccw device did not respond in time.\n"); 198 "did not respond within the specified time\n");
181 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); 199 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
182 break; 200 break;
183 } 201 }
@@ -199,14 +217,6 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
199 up(&zfcp_data.config_sema); 217 up(&zfcp_data.config_sema);
200} 218}
201 219
202static struct ccw_device_id zfcp_ccw_device_id[] = {
203 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
204 { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
205 {},
206};
207
208MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
209
210static struct ccw_driver zfcp_ccw_driver = { 220static struct ccw_driver zfcp_ccw_driver = {
211 .owner = THIS_MODULE, 221 .owner = THIS_MODULE,
212 .name = "zfcp", 222 .name = "zfcp",
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 0a1a5dd8d01..b99b87ce5a3 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -163,7 +163,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
163 } 163 }
164 164
165 response->fsf_command = fsf_req->fsf_command; 165 response->fsf_command = fsf_req->fsf_command;
166 response->fsf_reqid = (unsigned long)fsf_req; 166 response->fsf_reqid = fsf_req->req_id;
167 response->fsf_seqno = fsf_req->seq_no; 167 response->fsf_seqno = fsf_req->seq_no;
168 response->fsf_issued = fsf_req->issued; 168 response->fsf_issued = fsf_req->issued;
169 response->fsf_prot_status = qtcb->prefix.prot_status; 169 response->fsf_prot_status = qtcb->prefix.prot_status;
@@ -737,7 +737,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
737 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 737 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
738 memset(r, 0, sizeof(*r)); 738 memset(r, 0, sizeof(*r));
739 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); 739 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
740 r->fsf_reqid = (unsigned long)fsf_req; 740 r->fsf_reqid = fsf_req->req_id;
741 r->fsf_seqno = fsf_req->seq_no; 741 r->fsf_seqno = fsf_req->seq_no;
742 r->s_id = fc_host_port_id(adapter->scsi_host); 742 r->s_id = fc_host_port_id(adapter->scsi_host);
743 r->d_id = wka_port->d_id; 743 r->d_id = wka_port->d_id;
@@ -773,7 +773,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
773 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 773 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
774 memset(r, 0, sizeof(*r)); 774 memset(r, 0, sizeof(*r));
775 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); 775 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
776 r->fsf_reqid = (unsigned long)fsf_req; 776 r->fsf_reqid = fsf_req->req_id;
777 r->fsf_seqno = fsf_req->seq_no; 777 r->fsf_seqno = fsf_req->seq_no;
778 r->s_id = wka_port->d_id; 778 r->s_id = wka_port->d_id;
779 r->d_id = fc_host_port_id(adapter->scsi_host); 779 r->d_id = fc_host_port_id(adapter->scsi_host);
@@ -803,7 +803,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level,
803 spin_lock_irqsave(&adapter->san_dbf_lock, flags); 803 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
804 memset(rec, 0, sizeof(*rec)); 804 memset(rec, 0, sizeof(*rec));
805 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 805 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
806 rec->fsf_reqid = (unsigned long)fsf_req; 806 rec->fsf_reqid = fsf_req->req_id;
807 rec->fsf_seqno = fsf_req->seq_no; 807 rec->fsf_seqno = fsf_req->seq_no;
808 rec->s_id = s_id; 808 rec->s_id = s_id;
809 rec->d_id = d_id; 809 rec->d_id = d_id;
@@ -965,7 +965,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
965 ZFCP_DBF_SCSI_FCP_SNS_INFO); 965 ZFCP_DBF_SCSI_FCP_SNS_INFO);
966 } 966 }
967 967
968 rec->fsf_reqid = (unsigned long)fsf_req; 968 rec->fsf_reqid = fsf_req->req_id;
969 rec->fsf_seqno = fsf_req->seq_no; 969 rec->fsf_seqno = fsf_req->seq_no;
970 rec->fsf_issued = fsf_req->issued; 970 rec->fsf_issued = fsf_req->issued;
971 } 971 }
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 4c362a9069f..2074d45dbf6 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -47,13 +47,6 @@
47 47
48/********************* CIO/QDIO SPECIFIC DEFINES *****************************/ 48/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
49 49
50/* Adapter Identification Parameters */
51#define ZFCP_CONTROL_UNIT_TYPE 0x1731
52#define ZFCP_CONTROL_UNIT_MODEL 0x03
53#define ZFCP_DEVICE_TYPE 0x1732
54#define ZFCP_DEVICE_MODEL 0x03
55#define ZFCP_DEVICE_MODEL_PRIV 0x04
56
57/* DMQ bug workaround: don't use last SBALE */ 50/* DMQ bug workaround: don't use last SBALE */
58#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 51#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
59 52
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index fdc9b4352a6..e50ea465bc2 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -880,6 +880,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
880 zfcp_port_put(port); 880 zfcp_port_put(port);
881 return ZFCP_ERP_CONTINUES; 881 return ZFCP_ERP_CONTINUES;
882 } 882 }
883 /* fall through */
883 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: 884 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
884 if (!port->d_id) 885 if (!port->d_id)
885 return ZFCP_ERP_FAILED; 886 return ZFCP_ERP_FAILED;
@@ -894,8 +895,13 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
894 act->step = ZFCP_ERP_STEP_PORT_CLOSING; 895 act->step = ZFCP_ERP_STEP_PORT_CLOSING;
895 return ZFCP_ERP_CONTINUES; 896 return ZFCP_ERP_CONTINUES;
896 } 897 }
897 /* fall through otherwise */
898 } 898 }
899 if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
900 port->d_id = 0;
901 _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL);
902 return ZFCP_ERP_EXIT;
903 }
904 /* fall through otherwise */
899 } 905 }
900 return ZFCP_ERP_FAILED; 906 return ZFCP_ERP_FAILED;
901} 907}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 2e31b536548..120a9a1c81f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -27,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
27 27
28/* zfcp_ccw.c */ 28/* zfcp_ccw.c */
29extern int zfcp_ccw_register(void); 29extern int zfcp_ccw_register(void);
30extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
30extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); 31extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
31 32
32/* zfcp_cfdc.c */ 33/* zfcp_cfdc.c */
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 19ae0842047..35493a82d2a 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -116,7 +116,7 @@ static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
116{ 116{
117 if (atomic_dec_return(&wka_port->refcount) != 0) 117 if (atomic_dec_return(&wka_port->refcount) != 0)
118 return; 118 return;
119 /* wait 10 miliseconds, other reqs might pop in */ 119 /* wait 10 milliseconds, other reqs might pop in */
120 schedule_delayed_work(&wka_port->work, HZ / 100); 120 schedule_delayed_work(&wka_port->work, HZ / 100);
121} 121}
122 122
@@ -150,9 +150,14 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
150 struct zfcp_port *port; 150 struct zfcp_port *port;
151 151
152 read_lock_irqsave(&zfcp_data.config_lock, flags); 152 read_lock_irqsave(&zfcp_data.config_lock, flags);
153 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) 153 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
154 if ((port->d_id & range) == (elem->nport_did & range)) 154 if ((port->d_id & range) == (elem->nport_did & range))
155 zfcp_test_link(port); 155 zfcp_test_link(port);
156 if (!port->d_id)
157 zfcp_erp_port_reopen(port,
158 ZFCP_STATUS_COMMON_ERP_FAILED,
159 "fcrscn1", NULL);
160 }
156 161
157 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 162 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
158} 163}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 74dee32afba..e6dae3744e7 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -526,6 +526,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
526 break; 526 break;
527 case FSF_TOPO_AL: 527 case FSF_TOPO_AL:
528 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 528 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
529 /* fall through */
529 default: 530 default:
530 dev_err(&adapter->ccw_device->dev, 531 dev_err(&adapter->ccw_device->dev,
531 "Unknown or unsupported arbitrated loop " 532 "Unknown or unsupported arbitrated loop "
@@ -897,6 +898,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
897 switch (fsq->word[0]) { 898 switch (fsq->word[0]) {
898 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 899 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
899 zfcp_test_link(unit->port); 900 zfcp_test_link(unit->port);
901 /* fall through */
900 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 902 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
901 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 903 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
902 break; 904 break;
@@ -993,6 +995,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
993 break; 995 break;
994 case FSF_PORT_HANDLE_NOT_VALID: 996 case FSF_PORT_HANDLE_NOT_VALID:
995 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); 997 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
998 /* fall through */
996 case FSF_GENERIC_COMMAND_REJECTED: 999 case FSF_GENERIC_COMMAND_REJECTED:
997 case FSF_PAYLOAD_SIZE_MISMATCH: 1000 case FSF_PAYLOAD_SIZE_MISMATCH:
998 case FSF_REQUEST_SIZE_TOO_LARGE: 1001 case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1399,7 +1402,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1399 struct fsf_plogi *plogi; 1402 struct fsf_plogi *plogi;
1400 1403
1401 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1404 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1402 return; 1405 goto out;
1403 1406
1404 switch (header->fsf_status) { 1407 switch (header->fsf_status) {
1405 case FSF_PORT_ALREADY_OPEN: 1408 case FSF_PORT_ALREADY_OPEN:
@@ -1461,6 +1464,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1461 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1464 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1462 break; 1465 break;
1463 } 1466 }
1467
1468out:
1469 zfcp_port_put(port);
1464} 1470}
1465 1471
1466/** 1472/**
@@ -1473,6 +1479,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1473 struct qdio_buffer_element *sbale; 1479 struct qdio_buffer_element *sbale;
1474 struct zfcp_adapter *adapter = erp_action->adapter; 1480 struct zfcp_adapter *adapter = erp_action->adapter;
1475 struct zfcp_fsf_req *req; 1481 struct zfcp_fsf_req *req;
1482 struct zfcp_port *port = erp_action->port;
1476 int retval = -EIO; 1483 int retval = -EIO;
1477 1484
1478 spin_lock_bh(&adapter->req_q_lock); 1485 spin_lock_bh(&adapter->req_q_lock);
@@ -1493,16 +1500,18 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1493 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1500 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1494 1501
1495 req->handler = zfcp_fsf_open_port_handler; 1502 req->handler = zfcp_fsf_open_port_handler;
1496 req->qtcb->bottom.support.d_id = erp_action->port->d_id; 1503 req->qtcb->bottom.support.d_id = port->d_id;
1497 req->data = erp_action->port; 1504 req->data = port;
1498 req->erp_action = erp_action; 1505 req->erp_action = erp_action;
1499 erp_action->fsf_req = req; 1506 erp_action->fsf_req = req;
1507 zfcp_port_get(port);
1500 1508
1501 zfcp_fsf_start_erp_timer(req); 1509 zfcp_fsf_start_erp_timer(req);
1502 retval = zfcp_fsf_req_send(req); 1510 retval = zfcp_fsf_req_send(req);
1503 if (retval) { 1511 if (retval) {
1504 zfcp_fsf_req_free(req); 1512 zfcp_fsf_req_free(req);
1505 erp_action->fsf_req = NULL; 1513 erp_action->fsf_req = NULL;
1514 zfcp_port_put(port);
1506 } 1515 }
1507out: 1516out:
1508 spin_unlock_bh(&adapter->req_q_lock); 1517 spin_unlock_bh(&adapter->req_q_lock);
@@ -1590,8 +1599,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1590 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1599 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1591 dev_warn(&req->adapter->ccw_device->dev, 1600 dev_warn(&req->adapter->ccw_device->dev,
1592 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1601 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1602 /* fall through */
1593 case FSF_ADAPTER_STATUS_AVAILABLE: 1603 case FSF_ADAPTER_STATUS_AVAILABLE:
1594 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1604 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1605 /* fall through */
1595 case FSF_ACCESS_DENIED: 1606 case FSF_ACCESS_DENIED:
1596 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1607 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1597 break; 1608 break;
@@ -1876,7 +1887,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1876 1887
1877 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && 1888 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1878 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) && 1889 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1879 (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) { 1890 !zfcp_ccw_priv_sch(adapter)) {
1880 exclusive = (bottom->lun_access_info & 1891 exclusive = (bottom->lun_access_info &
1881 FSF_UNIT_ACCESS_EXCLUSIVE); 1892 FSF_UNIT_ACCESS_EXCLUSIVE);
1882 readwrite = (bottom->lun_access_info & 1893 readwrite = (bottom->lun_access_info &
@@ -2314,7 +2325,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2314{ 2325{
2315 struct zfcp_fsf_req *req; 2326 struct zfcp_fsf_req *req;
2316 struct fcp_cmnd_iu *fcp_cmnd_iu; 2327 struct fcp_cmnd_iu *fcp_cmnd_iu;
2317 unsigned int sbtype; 2328 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2318 int real_bytes, retval = -EIO; 2329 int real_bytes, retval = -EIO;
2319 struct zfcp_adapter *adapter = unit->port->adapter; 2330 struct zfcp_adapter *adapter = unit->port->adapter;
2320 2331
@@ -2356,11 +2367,9 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2356 switch (scsi_cmnd->sc_data_direction) { 2367 switch (scsi_cmnd->sc_data_direction) {
2357 case DMA_NONE: 2368 case DMA_NONE:
2358 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2369 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2359 sbtype = SBAL_FLAGS0_TYPE_READ;
2360 break; 2370 break;
2361 case DMA_FROM_DEVICE: 2371 case DMA_FROM_DEVICE:
2362 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; 2372 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2363 sbtype = SBAL_FLAGS0_TYPE_READ;
2364 fcp_cmnd_iu->rddata = 1; 2373 fcp_cmnd_iu->rddata = 1;
2365 break; 2374 break;
2366 case DMA_TO_DEVICE: 2375 case DMA_TO_DEVICE:
@@ -2369,8 +2378,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2369 fcp_cmnd_iu->wddata = 1; 2378 fcp_cmnd_iu->wddata = 1;
2370 break; 2379 break;
2371 case DMA_BIDIRECTIONAL: 2380 case DMA_BIDIRECTIONAL:
2372 default:
2373 retval = -EIO;
2374 goto failed_scsi_cmnd; 2381 goto failed_scsi_cmnd;
2375 } 2382 }
2376 2383
@@ -2394,9 +2401,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2394 scsi_sglist(scsi_cmnd), 2401 scsi_sglist(scsi_cmnd),
2395 FSF_MAX_SBALS_PER_REQ); 2402 FSF_MAX_SBALS_PER_REQ);
2396 if (unlikely(real_bytes < 0)) { 2403 if (unlikely(real_bytes < 0)) {
2397 if (req->sbal_number < FSF_MAX_SBALS_PER_REQ) 2404 if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2398 retval = -EIO;
2399 else {
2400 dev_err(&adapter->ccw_device->dev, 2405 dev_err(&adapter->ccw_device->dev,
2401 "Oversize data package, unit 0x%016Lx " 2406 "Oversize data package, unit 0x%016Lx "
2402 "on port 0x%016Lx closed\n", 2407 "on port 0x%016Lx closed\n",
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e8fbeaeb5fb..7d0da230eb6 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -12,6 +12,10 @@
12#include "zfcp_ext.h" 12#include "zfcp_ext.h"
13#include <asm/atomic.h> 13#include <asm/atomic.h>
14 14
15static unsigned int default_depth = 32;
16module_param_named(queue_depth, default_depth, uint, 0600);
17MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
18
15/* Find start of Sense Information in FCP response unit*/ 19/* Find start of Sense Information in FCP response unit*/
16char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) 20char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
17{ 21{
@@ -24,6 +28,12 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
24 return fcp_sns_info_ptr; 28 return fcp_sns_info_ptr;
25} 29}
26 30
31static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth)
32{
33 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
34 return sdev->queue_depth;
35}
36
27static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 37static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
28{ 38{
29 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 39 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@@ -34,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
34static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 44static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
35{ 45{
36 if (sdp->tagged_supported) 46 if (sdp->tagged_supported)
37 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32); 47 scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
38 else 48 else
39 scsi_adjust_queue_depth(sdp, 0, 1); 49 scsi_adjust_queue_depth(sdp, 0, 1);
40 return 0; 50 return 0;
@@ -647,6 +657,7 @@ struct zfcp_data zfcp_data = {
647 .name = "zfcp", 657 .name = "zfcp",
648 .module = THIS_MODULE, 658 .module = THIS_MODULE,
649 .proc_name = "zfcp", 659 .proc_name = "zfcp",
660 .change_queue_depth = zfcp_scsi_change_queue_depth,
650 .slave_alloc = zfcp_scsi_slave_alloc, 661 .slave_alloc = zfcp_scsi_slave_alloc,
651 .slave_configure = zfcp_scsi_slave_configure, 662 .slave_configure = zfcp_scsi_slave_configure,
652 .slave_destroy = zfcp_scsi_slave_destroy, 663 .slave_destroy = zfcp_scsi_slave_destroy,
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a85ad05e854..6d465168468 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -186,31 +186,31 @@ static void jsfd_do_request(struct request_queue *q)
186{ 186{
187 struct request *req; 187 struct request *req;
188 188
189 while ((req = elv_next_request(q)) != NULL) { 189 req = blk_fetch_request(q);
190 while (req) {
190 struct jsfd_part *jdp = req->rq_disk->private_data; 191 struct jsfd_part *jdp = req->rq_disk->private_data;
191 unsigned long offset = req->sector << 9; 192 unsigned long offset = blk_rq_pos(req) << 9;
192 size_t len = req->current_nr_sectors << 9; 193 size_t len = blk_rq_cur_bytes(req);
194 int err = -EIO;
193 195
194 if ((offset + len) > jdp->dsize) { 196 if ((offset + len) > jdp->dsize)
195 end_request(req, 0); 197 goto end;
196 continue;
197 }
198 198
199 if (rq_data_dir(req) != READ) { 199 if (rq_data_dir(req) != READ) {
200 printk(KERN_ERR "jsfd: write\n"); 200 printk(KERN_ERR "jsfd: write\n");
201 end_request(req, 0); 201 goto end;
202 continue;
203 } 202 }
204 203
205 if ((jdp->dbase & 0xff000000) != 0x20000000) { 204 if ((jdp->dbase & 0xff000000) != 0x20000000) {
206 printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase); 205 printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase);
207 end_request(req, 0); 206 goto end;
208 continue;
209 } 207 }
210 208
211 jsfd_read(req->buffer, jdp->dbase + offset, len); 209 jsfd_read(req->buffer, jdp->dbase + offset, len);
212 210 err = 0;
213 end_request(req, 1); 211 end:
212 if (!__blk_end_request_cur(req, err))
213 req = blk_fetch_request(q);
214 } 214 }
215} 215}
216 216
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 8b7983aba8f..36c21b19e5d 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1978,7 +1978,8 @@ static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1978{ 1978{
1979 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1979 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1980 1980
1981 scsi_dma_unmap(cmd); 1981 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1982 scsi_dma_unmap(cmd);
1982} /* End twa_unmap_scsi_data() */ 1983} /* End twa_unmap_scsi_data() */
1983 1984
1984/* scsi_host_template initializer */ 1985/* scsi_host_template initializer */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c03f1d2c9e2..faa0fcfed71 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -6,7 +6,7 @@
6 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 Brad Strand <linux@3ware.com> 7 Brad Strand <linux@3ware.com>
8 8
9 Copyright (C) 1999-2007 3ware Inc. 9 Copyright (C) 1999-2009 3ware Inc.
10 10
11 Kernel compatiblity By: Andre Hedrick <andre@suse.com> 11 Kernel compatiblity By: Andre Hedrick <andre@suse.com>
12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> 12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -1294,7 +1294,8 @@ static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1294{ 1294{
1295 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); 1295 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
1296 1296
1297 scsi_dma_unmap(cmd); 1297 if (cmd->SCp.phase == TW_PHASE_SGLIST)
1298 scsi_dma_unmap(cmd);
1298} /* End tw_unmap_scsi_data() */ 1299} /* End tw_unmap_scsi_data() */
1299 1300
1300/* This function will reset a device extension */ 1301/* This function will reset a device extension */
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 8e71e5e122b..a5a2ba2561d 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -6,7 +6,7 @@
6 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 Brad Strand <linux@3ware.com> 7 Brad Strand <linux@3ware.com>
8 8
9 Copyright (C) 1999-2007 3ware Inc. 9 Copyright (C) 1999-2009 3ware Inc.
10 10
11 Kernel compatiblity By: Andre Hedrick <andre@suse.com> 11 Kernel compatiblity By: Andre Hedrick <andre@suse.com>
12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> 12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8ed2990c826..6a19ed9a119 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -191,20 +191,19 @@ config SCSI_ENCLOSURE
191 it has an enclosure device. Selecting this option will just allow 191 it has an enclosure device. Selecting this option will just allow
192 certain enclosure conditions to be reported and is not required. 192 certain enclosure conditions to be reported and is not required.
193 193
194comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
195 depends on SCSI
196
197config SCSI_MULTI_LUN 194config SCSI_MULTI_LUN
198 bool "Probe all LUNs on each SCSI device" 195 bool "Probe all LUNs on each SCSI device"
199 depends on SCSI 196 depends on SCSI
200 help 197 help
201 If you have a SCSI device that supports more than one LUN (Logical 198 Some devices support more than one LUN (Logical Unit Number) in order
202 Unit Number), e.g. a CD jukebox, and only one LUN is detected, you 199 to allow access to several media, e.g. CD jukebox, USB card reader,
203 can say Y here to force the SCSI driver to probe for multiple LUNs. 200 mobile phone in mass storage mode. This option forces the kernel to
204 A SCSI device with multiple LUNs acts logically like multiple SCSI 201 probe for all LUNs by default. This setting can be overriden by
205 devices. The vast majority of SCSI devices have only one LUN, and 202 max_luns boot/module parameter. Note that this option does not affect
206 so most people can say N here. The max_luns boot/module parameter 203 devices conforming to SCSI-3 or higher as they can explicitely report
207 allows to override this setting. 204 their number of LUNs. It is safe to say Y here unless you have one of
205 those rare devices which reacts in an unexpected way when probed for
206 multiple LUNs.
208 207
209config SCSI_CONSTANTS 208config SCSI_CONSTANTS
210 bool "Verbose SCSI error reporting (kernel size +=12K)" 209 bool "Verbose SCSI error reporting (kernel size +=12K)"
@@ -355,6 +354,7 @@ config ISCSI_TCP
355 http://open-iscsi.org 354 http://open-iscsi.org
356 355
357source "drivers/scsi/cxgb3i/Kconfig" 356source "drivers/scsi/cxgb3i/Kconfig"
357source "drivers/scsi/bnx2i/Kconfig"
358 358
359config SGIWD93_SCSI 359config SGIWD93_SCSI
360 tristate "SGI WD93C93 SCSI Driver" 360 tristate "SGI WD93C93 SCSI Driver"
@@ -508,6 +508,7 @@ config SCSI_AIC7XXX_OLD
508 508
509source "drivers/scsi/aic7xxx/Kconfig.aic79xx" 509source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
510source "drivers/scsi/aic94xx/Kconfig" 510source "drivers/scsi/aic94xx/Kconfig"
511source "drivers/scsi/mvsas/Kconfig"
511 512
512config SCSI_DPT_I2O 513config SCSI_DPT_I2O
513 tristate "Adaptec I2O RAID support " 514 tristate "Adaptec I2O RAID support "
@@ -628,6 +629,17 @@ config FCOE
628 ---help--- 629 ---help---
629 Fibre Channel over Ethernet module 630 Fibre Channel over Ethernet module
630 631
632config FCOE_FNIC
633 tristate "Cisco FNIC Driver"
634 depends on PCI && X86
635 select LIBFC
636 help
637 This is support for the Cisco PCI-Express FCoE HBA.
638
639 To compile this driver as a module, choose M here and read
640 <file:Documentation/scsi/scsi.txt>.
641 The module will be called fnic.
642
631config SCSI_DMX3191D 643config SCSI_DMX3191D
632 tristate "DMX3191D SCSI support" 644 tristate "DMX3191D SCSI support"
633 depends on PCI && SCSI 645 depends on PCI && SCSI
@@ -1039,16 +1051,6 @@ config SCSI_IZIP_SLOW_CTR
1039 1051
1040 Generally, saying N is fine. 1052 Generally, saying N is fine.
1041 1053
1042config SCSI_MVSAS
1043 tristate "Marvell 88SE6440 SAS/SATA support"
1044 depends on PCI && SCSI
1045 select SCSI_SAS_LIBSAS
1046 help
1047 This driver supports Marvell SAS/SATA PCI devices.
1048
1049 To compiler this driver as a module, choose M here: the module
1050 will be called mvsas.
1051
1052config SCSI_NCR53C406A 1054config SCSI_NCR53C406A
1053 tristate "NCR53c406a SCSI support" 1055 tristate "NCR53c406a SCSI support"
1054 depends on ISA && SCSI 1056 depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e7c861ac417..25429ea63d0 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_SCSI_DH) += device_handler/
39obj-$(CONFIG_LIBFC) += libfc/ 39obj-$(CONFIG_LIBFC) += libfc/
40obj-$(CONFIG_LIBFCOE) += fcoe/ 40obj-$(CONFIG_LIBFCOE) += fcoe/
41obj-$(CONFIG_FCOE) += fcoe/ 41obj-$(CONFIG_FCOE) += fcoe/
42obj-$(CONFIG_FCOE_FNIC) += fnic/
42obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o 43obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
43obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 44obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
44obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o 45obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
@@ -125,9 +126,10 @@ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
125obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ 126obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
126obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 127obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
127obj-$(CONFIG_SCSI_STEX) += stex.o 128obj-$(CONFIG_SCSI_STEX) += stex.o
128obj-$(CONFIG_SCSI_MVSAS) += mvsas.o 129obj-$(CONFIG_SCSI_MVSAS) += mvsas/
129obj-$(CONFIG_PS3_ROM) += ps3rom.o 130obj-$(CONFIG_PS3_ROM) += ps3rom.o
130obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ 131obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
132obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
131 133
132obj-$(CONFIG_ARM) += arm/ 134obj-$(CONFIG_ARM) += arm/
133 135
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index c889d845868..1cdf09a4779 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
224 return ret; 224 return ret;
225} 225}
226 226
227static int 227static irqreturn_t
228NCR_D700_intr(int irq, void *data) 228NCR_D700_intr(int irq, void *data)
229{ 229{
230 struct NCR_D700_private *p = (struct NCR_D700_private *)data; 230 struct NCR_D700_private *p = (struct NCR_D700_private *)data;
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
new file mode 100644
index 00000000000..2fceb19eb27
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -0,0 +1,155 @@
1/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11#ifndef __57XX_ISCSI_CONSTANTS_H_
12#define __57XX_ISCSI_CONSTANTS_H_
13
14/**
15* This file defines HSI constants for the iSCSI flows
16*/
17
18/* iSCSI request op codes */
19#define ISCSI_OPCODE_CLEANUP_REQUEST (7)
20
21/* iSCSI response/messages op codes */
22#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27)
23#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0)
24
25/* iSCSI task types */
26#define ISCSI_TASK_TYPE_READ (0)
27#define ISCSI_TASK_TYPE_WRITE (1)
28#define ISCSI_TASK_TYPE_MPATH (2)
29
30/* initial CQ sequence numbers */
31#define ISCSI_INITIAL_SN (1)
32
33/* KWQ (kernel work queue) layer codes */
34#define ISCSI_KWQE_LAYER_CODE (6)
35
36/* KWQ (kernel work queue) request op codes */
37#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
38#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
39#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2)
40#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3)
41#define ISCSI_KWQE_OPCODE_INIT1 (4)
42#define ISCSI_KWQE_OPCODE_INIT2 (5)
43
44/* KCQ (kernel completion queue) response op codes */
45#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10)
46#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12)
47#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13)
48#define ISCSI_KCQE_OPCODE_INIT (0x14)
49#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15)
50#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16)
51#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17)
52#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18)
53#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19)
54#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
55#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21)
56
57/* KCQ (kernel completion queue) completion status */
58#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
59#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1)
60#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2)
61#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3)
62#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4)
63
64#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5)
65#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6)
66
67#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa)
68#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb)
69#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc)
70#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd)
71#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe)
72
73/* Response */
74#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf)
75#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10)
76#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c)
77#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d)
78#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11)
79#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12)
80#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13)
81#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14)
82#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15)
83#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16)
84#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17)
85
86/* Data-In */
87#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18)
88#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19)
89#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a)
90#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b)
91#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c)
92#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d)
93
94/* R2T */
95#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f)
96#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20)
97#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21)
98#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
99#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
100#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24)
101#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25)
102#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26)
103#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
104
105/* TMF */
106#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28)
107#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29)
108#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a)
109#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b)
110
111/* IP/TCP processing errors: */
112#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40)
113#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41)
114#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42)
115#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43)
116
117/* iSCSI licensing errors */
118/* general iSCSI license not installed */
119#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50)
120/* additional LOM specific iSCSI license not installed */
121#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
122
123/* SQ/RQ/CQ DB structure sizes */
124#define ISCSI_SQ_DB_SIZE (16)
125#define ISCSI_RQ_DB_SIZE (16)
126#define ISCSI_CQ_DB_SIZE (80)
127
128#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF
129
130/* Page size codes (for flags field in connection offload request) */
131#define ISCSI_PAGE_SIZE_256 (0)
132#define ISCSI_PAGE_SIZE_512 (1)
133#define ISCSI_PAGE_SIZE_1K (2)
134#define ISCSI_PAGE_SIZE_2K (3)
135#define ISCSI_PAGE_SIZE_4K (4)
136#define ISCSI_PAGE_SIZE_8K (5)
137#define ISCSI_PAGE_SIZE_16K (6)
138#define ISCSI_PAGE_SIZE_32K (7)
139#define ISCSI_PAGE_SIZE_64K (8)
140#define ISCSI_PAGE_SIZE_128K (9)
141#define ISCSI_PAGE_SIZE_256K (10)
142#define ISCSI_PAGE_SIZE_512K (11)
143#define ISCSI_PAGE_SIZE_1M (12)
144#define ISCSI_PAGE_SIZE_2M (13)
145#define ISCSI_PAGE_SIZE_4M (14)
146#define ISCSI_PAGE_SIZE_8M (15)
147
148/* Iscsi PDU related defines */
149#define ISCSI_HEADER_SIZE (48)
150#define ISCSI_DIGEST_SHIFT (2)
151#define ISCSI_DIGEST_SIZE (4)
152
153#define B577XX_ISCSI_CONNECTION_TYPE 3
154
155#endif /*__57XX_ISCSI_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
new file mode 100644
index 00000000000..36af1afef9b
--- /dev/null
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -0,0 +1,1509 @@
1/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11#ifndef __57XX_ISCSI_HSI_LINUX_LE__
12#define __57XX_ISCSI_HSI_LINUX_LE__
13
14/*
15 * iSCSI Async CQE
16 */
17struct bnx2i_async_msg {
18#if defined(__BIG_ENDIAN)
19 u8 op_code;
20 u8 reserved1;
21 u16 reserved0;
22#elif defined(__LITTLE_ENDIAN)
23 u16 reserved0;
24 u8 reserved1;
25 u8 op_code;
26#endif
27 u32 reserved2;
28 u32 exp_cmd_sn;
29 u32 max_cmd_sn;
30 u32 reserved3[2];
31#if defined(__BIG_ENDIAN)
32 u16 reserved5;
33 u8 err_code;
34 u8 reserved4;
35#elif defined(__LITTLE_ENDIAN)
36 u8 reserved4;
37 u8 err_code;
38 u16 reserved5;
39#endif
40 u32 reserved6;
41 u32 lun[2];
42#if defined(__BIG_ENDIAN)
43 u8 async_event;
44 u8 async_vcode;
45 u16 param1;
46#elif defined(__LITTLE_ENDIAN)
47 u16 param1;
48 u8 async_vcode;
49 u8 async_event;
50#endif
51#if defined(__BIG_ENDIAN)
52 u16 param2;
53 u16 param3;
54#elif defined(__LITTLE_ENDIAN)
55 u16 param3;
56 u16 param2;
57#endif
58 u32 reserved7[3];
59 u32 cq_req_sn;
60};
61
62
63/*
64 * iSCSI Buffer Descriptor (BD)
65 */
66struct iscsi_bd {
67 u32 buffer_addr_hi;
68 u32 buffer_addr_lo;
69#if defined(__BIG_ENDIAN)
70 u16 reserved0;
71 u16 buffer_length;
72#elif defined(__LITTLE_ENDIAN)
73 u16 buffer_length;
74 u16 reserved0;
75#endif
76#if defined(__BIG_ENDIAN)
77 u16 reserved3;
78 u16 flags;
79#define ISCSI_BD_RESERVED1 (0x3F<<0)
80#define ISCSI_BD_RESERVED1_SHIFT 0
81#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
82#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
83#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
84#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
85#define ISCSI_BD_RESERVED2 (0xFF<<8)
86#define ISCSI_BD_RESERVED2_SHIFT 8
87#elif defined(__LITTLE_ENDIAN)
88 u16 flags;
89#define ISCSI_BD_RESERVED1 (0x3F<<0)
90#define ISCSI_BD_RESERVED1_SHIFT 0
91#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
92#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
93#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
94#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
95#define ISCSI_BD_RESERVED2 (0xFF<<8)
96#define ISCSI_BD_RESERVED2_SHIFT 8
97 u16 reserved3;
98#endif
99};
100
101
102/*
103 * iSCSI Cleanup SQ WQE
104 */
105struct bnx2i_cleanup_request {
106#if defined(__BIG_ENDIAN)
107 u8 op_code;
108 u8 reserved1;
109 u16 reserved0;
110#elif defined(__LITTLE_ENDIAN)
111 u16 reserved0;
112 u8 reserved1;
113 u8 op_code;
114#endif
115 u32 reserved2[3];
116#if defined(__BIG_ENDIAN)
117 u16 reserved3;
118 u16 itt;
119#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
120#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
121#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
122#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
123#elif defined(__LITTLE_ENDIAN)
124 u16 itt;
125#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
126#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
127#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
128#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
129 u16 reserved3;
130#endif
131 u32 reserved4[10];
132#if defined(__BIG_ENDIAN)
133 u8 cq_index;
134 u8 reserved6;
135 u16 reserved5;
136#elif defined(__LITTLE_ENDIAN)
137 u16 reserved5;
138 u8 reserved6;
139 u8 cq_index;
140#endif
141};
142
143
144/*
145 * iSCSI Cleanup CQE
146 */
147struct bnx2i_cleanup_response {
148#if defined(__BIG_ENDIAN)
149 u8 op_code;
150 u8 status;
151 u16 reserved0;
152#elif defined(__LITTLE_ENDIAN)
153 u16 reserved0;
154 u8 status;
155 u8 op_code;
156#endif
157 u32 reserved1[3];
158 u32 reserved2[2];
159#if defined(__BIG_ENDIAN)
160 u16 reserved4;
161 u8 err_code;
162 u8 reserved3;
163#elif defined(__LITTLE_ENDIAN)
164 u8 reserved3;
165 u8 err_code;
166 u16 reserved4;
167#endif
168 u32 reserved5[7];
169#if defined(__BIG_ENDIAN)
170 u16 reserved6;
171 u16 itt;
172#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
173#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
174#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
175#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
176#elif defined(__LITTLE_ENDIAN)
177 u16 itt;
178#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
179#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
180#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
181#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
182 u16 reserved6;
183#endif
184 u32 cq_req_sn;
185};
186
187
188/*
189 * SCSI read/write SQ WQE
190 */
191struct bnx2i_cmd_request {
192#if defined(__BIG_ENDIAN)
193 u8 op_code;
194 u8 op_attr;
195#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
196#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
197#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
198#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
199#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
200#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
201#define ISCSI_CMD_REQUEST_READ (0x1<<6)
202#define ISCSI_CMD_REQUEST_READ_SHIFT 6
203#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
204#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
205 u16 reserved0;
206#elif defined(__LITTLE_ENDIAN)
207 u16 reserved0;
208 u8 op_attr;
209#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
210#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
211#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
212#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
213#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
214#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
215#define ISCSI_CMD_REQUEST_READ (0x1<<6)
216#define ISCSI_CMD_REQUEST_READ_SHIFT 6
217#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
218#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
219 u8 op_code;
220#endif
221#if defined(__BIG_ENDIAN)
222 u16 ud_buffer_offset;
223 u16 sd_buffer_offset;
224#elif defined(__LITTLE_ENDIAN)
225 u16 sd_buffer_offset;
226 u16 ud_buffer_offset;
227#endif
228 u32 lun[2];
229#if defined(__BIG_ENDIAN)
230 u16 reserved2;
231 u16 itt;
232#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
233#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
234#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
235#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
236#elif defined(__LITTLE_ENDIAN)
237 u16 itt;
238#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
239#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
240#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
241#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
242 u16 reserved2;
243#endif
244 u32 total_data_transfer_length;
245 u32 cmd_sn;
246 u32 reserved3;
247 u32 cdb[4];
248 u32 zero_fill;
249 u32 bd_list_addr_lo;
250 u32 bd_list_addr_hi;
251#if defined(__BIG_ENDIAN)
252 u8 cq_index;
253 u8 sd_start_bd_index;
254 u8 ud_start_bd_index;
255 u8 num_bds;
256#elif defined(__LITTLE_ENDIAN)
257 u8 num_bds;
258 u8 ud_start_bd_index;
259 u8 sd_start_bd_index;
260 u8 cq_index;
261#endif
262};
263
264
265/*
266 * task statistics for write response
267 */
268struct bnx2i_write_resp_task_stat {
269 u32 num_data_ins;
270};
271
272/*
273 * task statistics for read response
274 */
275struct bnx2i_read_resp_task_stat {
276#if defined(__BIG_ENDIAN)
277 u16 num_data_outs;
278 u16 num_r2ts;
279#elif defined(__LITTLE_ENDIAN)
280 u16 num_r2ts;
281 u16 num_data_outs;
282#endif
283};
284
285/*
286 * task statistics for iSCSI cmd response
287 */
288union bnx2i_cmd_resp_task_stat {
289 struct bnx2i_write_resp_task_stat write_stat;
290 struct bnx2i_read_resp_task_stat read_stat;
291};
292
293/*
294 * SCSI Command CQE
295 */
296struct bnx2i_cmd_response {
297#if defined(__BIG_ENDIAN)
298 u8 op_code;
299 u8 response_flags;
300#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
301#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
302#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
303#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
304#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
305#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
306#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
307#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
308#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
309#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
310#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
311#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
312 u8 response;
313 u8 status;
314#elif defined(__LITTLE_ENDIAN)
315 u8 status;
316 u8 response;
317 u8 response_flags;
318#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
319#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
320#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
321#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
322#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
323#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
324#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
325#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
326#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
327#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
328#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
329#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
330 u8 op_code;
331#endif
332 u32 data_length;
333 u32 exp_cmd_sn;
334 u32 max_cmd_sn;
335 u32 reserved2;
336 u32 residual_count;
337#if defined(__BIG_ENDIAN)
338 u16 reserved4;
339 u8 err_code;
340 u8 reserved3;
341#elif defined(__LITTLE_ENDIAN)
342 u8 reserved3;
343 u8 err_code;
344 u16 reserved4;
345#endif
346 u32 reserved5[5];
347 union bnx2i_cmd_resp_task_stat task_stat;
348 u32 reserved6;
349#if defined(__BIG_ENDIAN)
350 u16 reserved7;
351 u16 itt;
352#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
353#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
354#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
355#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
356#elif defined(__LITTLE_ENDIAN)
357 u16 itt;
358#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
359#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
360#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
361#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
362 u16 reserved7;
363#endif
364 u32 cq_req_sn;
365};
366
367
368
369/*
370 * firmware middle-path request SQ WQE
371 */
372struct bnx2i_fw_mp_request {
373#if defined(__BIG_ENDIAN)
374 u8 op_code;
375 u8 op_attr;
376 u16 hdr_opaque1;
377#elif defined(__LITTLE_ENDIAN)
378 u16 hdr_opaque1;
379 u8 op_attr;
380 u8 op_code;
381#endif
382 u32 data_length;
383 u32 hdr_opaque2[2];
384#if defined(__BIG_ENDIAN)
385 u16 reserved0;
386 u16 itt;
387#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
388#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
389#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
390#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
391#elif defined(__LITTLE_ENDIAN)
392 u16 itt;
393#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
394#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
395#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
396#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
397 u16 reserved0;
398#endif
399 u32 hdr_opaque3[4];
400 u32 resp_bd_list_addr_lo;
401 u32 resp_bd_list_addr_hi;
402 u32 resp_buffer;
403#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
404#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
405#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
406#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
407#if defined(__BIG_ENDIAN)
408 u16 reserved4;
409 u8 reserved3;
410 u8 flags;
411#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
412#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
413#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
414#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
415#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
416#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
417#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
418#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
419#elif defined(__LITTLE_ENDIAN)
420 u8 flags;
421#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
422#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
423#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
424#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
425#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
426#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
427#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
428#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
429 u8 reserved3;
430 u16 reserved4;
431#endif
432 u32 bd_list_addr_lo;
433 u32 bd_list_addr_hi;
434#if defined(__BIG_ENDIAN)
435 u8 cq_index;
436 u8 reserved6;
437 u8 reserved5;
438 u8 num_bds;
439#elif defined(__LITTLE_ENDIAN)
440 u8 num_bds;
441 u8 reserved5;
442 u8 reserved6;
443 u8 cq_index;
444#endif
445};
446
447
448/*
449 * firmware response - CQE: used only by firmware
450 */
451struct bnx2i_fw_response {
452 u32 hdr_dword1[2];
453 u32 hdr_exp_cmd_sn;
454 u32 hdr_max_cmd_sn;
455 u32 hdr_ttt;
456 u32 hdr_res_cnt;
457 u32 cqe_flags;
458#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
459#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
460#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
461#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
462#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
463#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
464 u32 stat_sn;
465 u32 hdr_dword2[2];
466 u32 hdr_dword3[2];
467 u32 task_stat;
468 u32 reserved0;
469 u32 hdr_itt;
470 u32 cq_req_sn;
471};
472
473
474/*
475 * iSCSI KCQ CQE parameters
476 */
477union iscsi_kcqe_params {
478 u32 reserved0[4];
479};
480
481/*
482 * iSCSI KCQ CQE
483 */
484struct iscsi_kcqe {
485 u32 iscsi_conn_id;
486 u32 completion_status;
487 u32 iscsi_conn_context_id;
488 union iscsi_kcqe_params params;
489#if defined(__BIG_ENDIAN)
490 u8 flags;
491#define ISCSI_KCQE_RESERVED0 (0xF<<0)
492#define ISCSI_KCQE_RESERVED0_SHIFT 0
493#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
494#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
495#define ISCSI_KCQE_RESERVED1 (0x1<<7)
496#define ISCSI_KCQE_RESERVED1_SHIFT 7
497 u8 op_code;
498 u16 qe_self_seq;
499#elif defined(__LITTLE_ENDIAN)
500 u16 qe_self_seq;
501 u8 op_code;
502 u8 flags;
503#define ISCSI_KCQE_RESERVED0 (0xF<<0)
504#define ISCSI_KCQE_RESERVED0_SHIFT 0
505#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
506#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
507#define ISCSI_KCQE_RESERVED1 (0x1<<7)
508#define ISCSI_KCQE_RESERVED1_SHIFT 7
509#endif
510};
511
512
513
514/*
515 * iSCSI KWQE header
516 */
517struct iscsi_kwqe_header {
518#if defined(__BIG_ENDIAN)
519 u8 flags;
520#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
521#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
522#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
523#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
524#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
525#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
526 u8 op_code;
527#elif defined(__LITTLE_ENDIAN)
528 u8 op_code;
529 u8 flags;
530#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
531#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
532#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
533#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
534#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
535#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
536#endif
537};
538
539/*
540 * iSCSI firmware init request 1
541 */
542struct iscsi_kwqe_init1 {
543#if defined(__BIG_ENDIAN)
544 struct iscsi_kwqe_header hdr;
545 u8 reserved0;
546 u8 num_cqs;
547#elif defined(__LITTLE_ENDIAN)
548 u8 num_cqs;
549 u8 reserved0;
550 struct iscsi_kwqe_header hdr;
551#endif
552 u32 dummy_buffer_addr_lo;
553 u32 dummy_buffer_addr_hi;
554#if defined(__BIG_ENDIAN)
555 u16 num_ccells_per_conn;
556 u16 num_tasks_per_conn;
557#elif defined(__LITTLE_ENDIAN)
558 u16 num_tasks_per_conn;
559 u16 num_ccells_per_conn;
560#endif
561#if defined(__BIG_ENDIAN)
562 u16 sq_wqes_per_page;
563 u16 sq_num_wqes;
564#elif defined(__LITTLE_ENDIAN)
565 u16 sq_num_wqes;
566 u16 sq_wqes_per_page;
567#endif
568#if defined(__BIG_ENDIAN)
569 u8 cq_log_wqes_per_page;
570 u8 flags;
571#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
572#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
573#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
574#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
575#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
576#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
577#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
578#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
579 u16 cq_num_wqes;
580#elif defined(__LITTLE_ENDIAN)
581 u16 cq_num_wqes;
582 u8 flags;
583#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
584#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
585#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
586#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
587#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
588#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
589#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
590#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
591 u8 cq_log_wqes_per_page;
592#endif
593#if defined(__BIG_ENDIAN)
594 u16 cq_num_pages;
595 u16 sq_num_pages;
596#elif defined(__LITTLE_ENDIAN)
597 u16 sq_num_pages;
598 u16 cq_num_pages;
599#endif
600#if defined(__BIG_ENDIAN)
601 u16 rq_buffer_size;
602 u16 rq_num_wqes;
603#elif defined(__LITTLE_ENDIAN)
604 u16 rq_num_wqes;
605 u16 rq_buffer_size;
606#endif
607};
608
609/*
610 * iSCSI firmware init request 2
611 */
612struct iscsi_kwqe_init2 {
613#if defined(__BIG_ENDIAN)
614 struct iscsi_kwqe_header hdr;
615 u16 max_cq_sqn;
616#elif defined(__LITTLE_ENDIAN)
617 u16 max_cq_sqn;
618 struct iscsi_kwqe_header hdr;
619#endif
620 u32 error_bit_map[2];
621 u32 reserved1[5];
622};
623
624/*
625 * Initial iSCSI connection offload request 1
626 */
627struct iscsi_kwqe_conn_offload1 {
628#if defined(__BIG_ENDIAN)
629 struct iscsi_kwqe_header hdr;
630 u16 iscsi_conn_id;
631#elif defined(__LITTLE_ENDIAN)
632 u16 iscsi_conn_id;
633 struct iscsi_kwqe_header hdr;
634#endif
635 u32 sq_page_table_addr_lo;
636 u32 sq_page_table_addr_hi;
637 u32 cq_page_table_addr_lo;
638 u32 cq_page_table_addr_hi;
639 u32 reserved0[3];
640};
641
642/*
643 * iSCSI Page Table Entry (PTE)
644 */
645struct iscsi_pte {
646 u32 hi;
647 u32 lo;
648};
649
650/*
651 * Initial iSCSI connection offload request 2
652 */
653struct iscsi_kwqe_conn_offload2 {
654#if defined(__BIG_ENDIAN)
655 struct iscsi_kwqe_header hdr;
656 u16 reserved0;
657#elif defined(__LITTLE_ENDIAN)
658 u16 reserved0;
659 struct iscsi_kwqe_header hdr;
660#endif
661 u32 rq_page_table_addr_lo;
662 u32 rq_page_table_addr_hi;
663 struct iscsi_pte sq_first_pte;
664 struct iscsi_pte cq_first_pte;
665 u32 num_additional_wqes;
666};
667
668
669/*
670 * Initial iSCSI connection offload request 3
671 */
672struct iscsi_kwqe_conn_offload3 {
673#if defined(__BIG_ENDIAN)
674 struct iscsi_kwqe_header hdr;
675 u16 reserved0;
676#elif defined(__LITTLE_ENDIAN)
677 u16 reserved0;
678 struct iscsi_kwqe_header hdr;
679#endif
680 u32 reserved1;
681 struct iscsi_pte qp_first_pte[3];
682};
683
684
685/*
686 * iSCSI connection update request
687 */
688struct iscsi_kwqe_conn_update {
689#if defined(__BIG_ENDIAN)
690 struct iscsi_kwqe_header hdr;
691 u16 reserved0;
692#elif defined(__LITTLE_ENDIAN)
693 u16 reserved0;
694 struct iscsi_kwqe_header hdr;
695#endif
696#if defined(__BIG_ENDIAN)
697 u8 session_error_recovery_level;
698 u8 max_outstanding_r2ts;
699 u8 reserved2;
700 u8 conn_flags;
701#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
702#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
703#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
704#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
705#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
706#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
707#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
708#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
709#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
710#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
711#elif defined(__LITTLE_ENDIAN)
712 u8 conn_flags;
713#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
714#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
715#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
716#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
717#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
718#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
719#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
720#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
721#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
722#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
723 u8 reserved2;
724 u8 max_outstanding_r2ts;
725 u8 session_error_recovery_level;
726#endif
727 u32 context_id;
728 u32 max_send_pdu_length;
729 u32 max_recv_pdu_length;
730 u32 first_burst_length;
731 u32 max_burst_length;
732 u32 exp_stat_sn;
733};
734
735/*
736 * iSCSI destroy connection request
737 */
738struct iscsi_kwqe_conn_destroy {
739#if defined(__BIG_ENDIAN)
740 struct iscsi_kwqe_header hdr;
741 u16 reserved0;
742#elif defined(__LITTLE_ENDIAN)
743 u16 reserved0;
744 struct iscsi_kwqe_header hdr;
745#endif
746 u32 context_id;
747 u32 reserved1[6];
748};
749
750/*
751 * iSCSI KWQ WQE
752 */
753union iscsi_kwqe {
754 struct iscsi_kwqe_init1 init1;
755 struct iscsi_kwqe_init2 init2;
756 struct iscsi_kwqe_conn_offload1 conn_offload1;
757 struct iscsi_kwqe_conn_offload2 conn_offload2;
758 struct iscsi_kwqe_conn_update conn_update;
759 struct iscsi_kwqe_conn_destroy conn_destroy;
760};
761
762/*
763 * iSCSI Login SQ WQE
764 */
765struct bnx2i_login_request {
766#if defined(__BIG_ENDIAN)
767 u8 op_code;
768 u8 op_attr;
769#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
770#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
771#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
772#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
773#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
774#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
775#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
776#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
777#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
778#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
779 u8 version_max;
780 u8 version_min;
781#elif defined(__LITTLE_ENDIAN)
782 u8 version_min;
783 u8 version_max;
784 u8 op_attr;
785#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
786#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
787#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
788#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
789#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
790#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
791#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
792#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
793#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
794#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
795 u8 op_code;
796#endif
797 u32 data_length;
798 u32 isid_lo;
799#if defined(__BIG_ENDIAN)
800 u16 isid_hi;
801 u16 tsih;
802#elif defined(__LITTLE_ENDIAN)
803 u16 tsih;
804 u16 isid_hi;
805#endif
806#if defined(__BIG_ENDIAN)
807 u16 reserved2;
808 u16 itt;
809#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
810#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
811#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
812#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
813#elif defined(__LITTLE_ENDIAN)
814 u16 itt;
815#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
816#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
817#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
818#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
819 u16 reserved2;
820#endif
821#if defined(__BIG_ENDIAN)
822 u16 cid;
823 u16 reserved3;
824#elif defined(__LITTLE_ENDIAN)
825 u16 reserved3;
826 u16 cid;
827#endif
828 u32 cmd_sn;
829 u32 exp_stat_sn;
830 u32 reserved4;
831 u32 resp_bd_list_addr_lo;
832 u32 resp_bd_list_addr_hi;
833 u32 resp_buffer;
834#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
835#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
836#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
837#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
838#if defined(__BIG_ENDIAN)
839 u16 reserved8;
840 u8 reserved7;
841 u8 flags;
842#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
843#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
844#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
845#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
846#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
847#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
848#elif defined(__LITTLE_ENDIAN)
849 u8 flags;
850#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
851#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
852#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
853#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
854#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
855#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
856 u8 reserved7;
857 u16 reserved8;
858#endif
859 u32 bd_list_addr_lo;
860 u32 bd_list_addr_hi;
861#if defined(__BIG_ENDIAN)
862 u8 cq_index;
863 u8 reserved10;
864 u8 reserved9;
865 u8 num_bds;
866#elif defined(__LITTLE_ENDIAN)
867 u8 num_bds;
868 u8 reserved9;
869 u8 reserved10;
870 u8 cq_index;
871#endif
872};
873
874
875/*
876 * iSCSI Login CQE
877 */
878struct bnx2i_login_response {
879#if defined(__BIG_ENDIAN)
880 u8 op_code;
881 u8 response_flags;
882#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
883#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
884#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
885#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
886#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
887#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
888#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
889#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
890#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
891#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
892 u8 version_max;
893 u8 version_active;
894#elif defined(__LITTLE_ENDIAN)
895 u8 version_active;
896 u8 version_max;
897 u8 response_flags;
898#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
899#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
900#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
901#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
902#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
903#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
904#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
905#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
906#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
907#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
908 u8 op_code;
909#endif
910 u32 data_length;
911 u32 exp_cmd_sn;
912 u32 max_cmd_sn;
913 u32 reserved1[2];
914#if defined(__BIG_ENDIAN)
915 u16 reserved3;
916 u8 err_code;
917 u8 reserved2;
918#elif defined(__LITTLE_ENDIAN)
919 u8 reserved2;
920 u8 err_code;
921 u16 reserved3;
922#endif
923 u32 stat_sn;
924 u32 isid_lo;
925#if defined(__BIG_ENDIAN)
926 u16 isid_hi;
927 u16 tsih;
928#elif defined(__LITTLE_ENDIAN)
929 u16 tsih;
930 u16 isid_hi;
931#endif
932#if defined(__BIG_ENDIAN)
933 u8 status_class;
934 u8 status_detail;
935 u16 reserved4;
936#elif defined(__LITTLE_ENDIAN)
937 u16 reserved4;
938 u8 status_detail;
939 u8 status_class;
940#endif
941 u32 reserved5[3];
942#if defined(__BIG_ENDIAN)
943 u16 reserved6;
944 u16 itt;
945#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
946#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
947#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
948#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
949#elif defined(__LITTLE_ENDIAN)
950 u16 itt;
951#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
952#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
953#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
954#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
955 u16 reserved6;
956#endif
957 u32 cq_req_sn;
958};
959
960
961/*
962 * iSCSI Logout SQ WQE
963 */
964struct bnx2i_logout_request {
965#if defined(__BIG_ENDIAN)
966 u8 op_code;
967 u8 op_attr;
968#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
969#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
970#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
971#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
972 u16 reserved0;
973#elif defined(__LITTLE_ENDIAN)
974 u16 reserved0;
975 u8 op_attr;
976#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
977#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
978#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
979#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
980 u8 op_code;
981#endif
982 u32 data_length;
983 u32 reserved1[2];
984#if defined(__BIG_ENDIAN)
985 u16 reserved2;
986 u16 itt;
987#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
988#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
989#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
990#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
991#elif defined(__LITTLE_ENDIAN)
992 u16 itt;
993#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
994#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
995#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
996#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
997 u16 reserved2;
998#endif
999#if defined(__BIG_ENDIAN)
1000 u16 cid;
1001 u16 reserved3;
1002#elif defined(__LITTLE_ENDIAN)
1003 u16 reserved3;
1004 u16 cid;
1005#endif
1006 u32 cmd_sn;
1007 u32 reserved4[5];
1008 u32 zero_fill;
1009 u32 bd_list_addr_lo;
1010 u32 bd_list_addr_hi;
1011#if defined(__BIG_ENDIAN)
1012 u8 cq_index;
1013 u8 reserved6;
1014 u8 reserved5;
1015 u8 num_bds;
1016#elif defined(__LITTLE_ENDIAN)
1017 u8 num_bds;
1018 u8 reserved5;
1019 u8 reserved6;
1020 u8 cq_index;
1021#endif
1022};
1023
1024
1025/*
1026 * iSCSI Logout CQE
1027 */
1028struct bnx2i_logout_response {
1029#if defined(__BIG_ENDIAN)
1030 u8 op_code;
1031 u8 reserved1;
1032 u8 response;
1033 u8 reserved0;
1034#elif defined(__LITTLE_ENDIAN)
1035 u8 reserved0;
1036 u8 response;
1037 u8 reserved1;
1038 u8 op_code;
1039#endif
1040 u32 reserved2;
1041 u32 exp_cmd_sn;
1042 u32 max_cmd_sn;
1043 u32 reserved3[2];
1044#if defined(__BIG_ENDIAN)
1045 u16 reserved5;
1046 u8 err_code;
1047 u8 reserved4;
1048#elif defined(__LITTLE_ENDIAN)
1049 u8 reserved4;
1050 u8 err_code;
1051 u16 reserved5;
1052#endif
1053 u32 reserved6[3];
1054#if defined(__BIG_ENDIAN)
1055 u16 time_to_wait;
1056 u16 time_to_retain;
1057#elif defined(__LITTLE_ENDIAN)
1058 u16 time_to_retain;
1059 u16 time_to_wait;
1060#endif
1061 u32 reserved7[3];
1062#if defined(__BIG_ENDIAN)
1063 u16 reserved8;
1064 u16 itt;
1065#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
1066#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
1067#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
1068#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
1069#elif defined(__LITTLE_ENDIAN)
1070 u16 itt;
1071#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
1072#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
1073#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
1074#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
1075 u16 reserved8;
1076#endif
1077 u32 cq_req_sn;
1078};
1079
1080
1081/*
1082 * iSCSI Nop-In CQE
1083 */
1084struct bnx2i_nop_in_msg {
1085#if defined(__BIG_ENDIAN)
1086 u8 op_code;
1087 u8 reserved1;
1088 u16 reserved0;
1089#elif defined(__LITTLE_ENDIAN)
1090 u16 reserved0;
1091 u8 reserved1;
1092 u8 op_code;
1093#endif
1094 u32 data_length;
1095 u32 exp_cmd_sn;
1096 u32 max_cmd_sn;
1097 u32 ttt;
1098 u32 reserved2;
1099#if defined(__BIG_ENDIAN)
1100 u16 reserved4;
1101 u8 err_code;
1102 u8 reserved3;
1103#elif defined(__LITTLE_ENDIAN)
1104 u8 reserved3;
1105 u8 err_code;
1106 u16 reserved4;
1107#endif
1108 u32 reserved5;
1109 u32 lun[2];
1110 u32 reserved6[4];
1111#if defined(__BIG_ENDIAN)
1112 u16 reserved7;
1113 u16 itt;
1114#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
1115#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
1116#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
1117#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
1118#elif defined(__LITTLE_ENDIAN)
1119 u16 itt;
1120#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
1121#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
1122#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
1123#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
1124 u16 reserved7;
1125#endif
1126 u32 cq_req_sn;
1127};
1128
1129
1130/*
1131 * iSCSI NOP-OUT SQ WQE
1132 */
1133struct bnx2i_nop_out_request {
1134#if defined(__BIG_ENDIAN)
1135 u8 op_code;
1136 u8 op_attr;
1137#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
1138#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
1139#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
1140#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
1141 u16 reserved0;
1142#elif defined(__LITTLE_ENDIAN)
1143 u16 reserved0;
1144 u8 op_attr;
1145#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
1146#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
1147#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
1148#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
1149 u8 op_code;
1150#endif
1151 u32 data_length;
1152 u32 lun[2];
1153#if defined(__BIG_ENDIAN)
1154 u16 reserved2;
1155 u16 itt;
1156#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
1157#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
1158#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
1159#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
1160#elif defined(__LITTLE_ENDIAN)
1161 u16 itt;
1162#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
1163#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
1164#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
1165#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
1166 u16 reserved2;
1167#endif
1168 u32 ttt;
1169 u32 cmd_sn;
1170 u32 reserved3[2];
1171 u32 resp_bd_list_addr_lo;
1172 u32 resp_bd_list_addr_hi;
1173 u32 resp_buffer;
1174#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
1175#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
1176#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
1177#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
1178#if defined(__BIG_ENDIAN)
1179 u16 reserved7;
1180 u8 reserved6;
1181 u8 flags;
1182#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
1183#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
1184#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
1185#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
1186#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
1187#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
1188#elif defined(__LITTLE_ENDIAN)
1189 u8 flags;
1190#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
1191#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
1192#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
1193#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
1194#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
1195#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
1196 u8 reserved6;
1197 u16 reserved7;
1198#endif
1199 u32 bd_list_addr_lo;
1200 u32 bd_list_addr_hi;
1201#if defined(__BIG_ENDIAN)
1202 u8 cq_index;
1203 u8 reserved9;
1204 u8 reserved8;
1205 u8 num_bds;
1206#elif defined(__LITTLE_ENDIAN)
1207 u8 num_bds;
1208 u8 reserved8;
1209 u8 reserved9;
1210 u8 cq_index;
1211#endif
1212};
1213
1214/*
1215 * iSCSI Reject CQE
1216 */
1217struct bnx2i_reject_msg {
1218#if defined(__BIG_ENDIAN)
1219 u8 op_code;
1220 u8 reserved1;
1221 u8 reason;
1222 u8 reserved0;
1223#elif defined(__LITTLE_ENDIAN)
1224 u8 reserved0;
1225 u8 reason;
1226 u8 reserved1;
1227 u8 op_code;
1228#endif
1229 u32 data_length;
1230 u32 exp_cmd_sn;
1231 u32 max_cmd_sn;
1232 u32 reserved2[2];
1233#if defined(__BIG_ENDIAN)
1234 u16 reserved4;
1235 u8 err_code;
1236 u8 reserved3;
1237#elif defined(__LITTLE_ENDIAN)
1238 u8 reserved3;
1239 u8 err_code;
1240 u16 reserved4;
1241#endif
1242 u32 reserved5[8];
1243 u32 cq_req_sn;
1244};
1245
1246/*
1247 * bnx2i iSCSI TMF SQ WQE
1248 */
1249struct bnx2i_tmf_request {
1250#if defined(__BIG_ENDIAN)
1251 u8 op_code;
1252 u8 op_attr;
1253#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
1254#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
1255#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
1256#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
1257 u16 reserved0;
1258#elif defined(__LITTLE_ENDIAN)
1259 u16 reserved0;
1260 u8 op_attr;
1261#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
1262#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
1263#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
1264#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
1265 u8 op_code;
1266#endif
1267 u32 data_length;
1268 u32 lun[2];
1269#if defined(__BIG_ENDIAN)
1270 u16 reserved1;
1271 u16 itt;
1272#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
1273#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
1274#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
1275#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
1276#elif defined(__LITTLE_ENDIAN)
1277 u16 itt;
1278#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
1279#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
1280#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
1281#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
1282 u16 reserved1;
1283#endif
1284 u32 ref_itt;
1285 u32 cmd_sn;
1286 u32 reserved2;
1287 u32 ref_cmd_sn;
1288 u32 reserved3[3];
1289 u32 zero_fill;
1290 u32 bd_list_addr_lo;
1291 u32 bd_list_addr_hi;
1292#if defined(__BIG_ENDIAN)
1293 u8 cq_index;
1294 u8 reserved5;
1295 u8 reserved4;
1296 u8 num_bds;
1297#elif defined(__LITTLE_ENDIAN)
1298 u8 num_bds;
1299 u8 reserved4;
1300 u8 reserved5;
1301 u8 cq_index;
1302#endif
1303};
1304
1305/*
1306 * iSCSI Text SQ WQE
1307 */
1308struct bnx2i_text_request {
1309#if defined(__BIG_ENDIAN)
1310 u8 op_code;
1311 u8 op_attr;
1312#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
1313#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
1314#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
1315#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
1316#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
1317#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
1318 u16 reserved0;
1319#elif defined(__LITTLE_ENDIAN)
1320 u16 reserved0;
1321 u8 op_attr;
1322#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
1323#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
1324#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
1325#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
1326#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
1327#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
1328 u8 op_code;
1329#endif
1330 u32 data_length;
1331 u32 lun[2];
1332#if defined(__BIG_ENDIAN)
1333 u16 reserved3;
1334 u16 itt;
1335#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
1336#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
1337#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
1338#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
1339#elif defined(__LITTLE_ENDIAN)
1340 u16 itt;
1341#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
1342#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
1343#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
1344#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
1345 u16 reserved3;
1346#endif
1347 u32 ttt;
1348 u32 cmd_sn;
1349 u32 reserved4[2];
1350 u32 resp_bd_list_addr_lo;
1351 u32 resp_bd_list_addr_hi;
1352 u32 resp_buffer;
1353#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
1354#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
1355#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
1356#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
1357 u32 zero_fill;
1358 u32 bd_list_addr_lo;
1359 u32 bd_list_addr_hi;
1360#if defined(__BIG_ENDIAN)
1361 u8 cq_index;
1362 u8 reserved7;
1363 u8 reserved6;
1364 u8 num_bds;
1365#elif defined(__LITTLE_ENDIAN)
1366 u8 num_bds;
1367 u8 reserved6;
1368 u8 reserved7;
1369 u8 cq_index;
1370#endif
1371};
1372
1373/*
1374 * iSCSI SQ WQE
1375 */
1376union iscsi_request {
1377 struct bnx2i_cmd_request cmd;
1378 struct bnx2i_tmf_request tmf;
1379 struct bnx2i_nop_out_request nop_out;
1380 struct bnx2i_login_request login_req;
1381 struct bnx2i_text_request text;
1382 struct bnx2i_logout_request logout_req;
1383 struct bnx2i_cleanup_request cleanup;
1384};
1385
1386
1387/*
1388 * iSCSI TMF CQE
1389 */
1390struct bnx2i_tmf_response {
1391#if defined(__BIG_ENDIAN)
1392 u8 op_code;
1393 u8 reserved1;
1394 u8 response;
1395 u8 reserved0;
1396#elif defined(__LITTLE_ENDIAN)
1397 u8 reserved0;
1398 u8 response;
1399 u8 reserved1;
1400 u8 op_code;
1401#endif
1402 u32 reserved2;
1403 u32 exp_cmd_sn;
1404 u32 max_cmd_sn;
1405 u32 reserved3[2];
1406#if defined(__BIG_ENDIAN)
1407 u16 reserved5;
1408 u8 err_code;
1409 u8 reserved4;
1410#elif defined(__LITTLE_ENDIAN)
1411 u8 reserved4;
1412 u8 err_code;
1413 u16 reserved5;
1414#endif
1415 u32 reserved6[7];
1416#if defined(__BIG_ENDIAN)
1417 u16 reserved7;
1418 u16 itt;
1419#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
1420#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
1421#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
1422#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
1423#elif defined(__LITTLE_ENDIAN)
1424 u16 itt;
1425#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
1426#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
1427#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
1428#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
1429 u16 reserved7;
1430#endif
1431 u32 cq_req_sn;
1432};
1433
1434/*
1435 * iSCSI Text CQE
1436 */
1437struct bnx2i_text_response {
1438#if defined(__BIG_ENDIAN)
1439 u8 op_code;
1440 u8 response_flags;
1441#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
1442#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
1443#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
1444#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
1445#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
1446#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
1447 u16 reserved0;
1448#elif defined(__LITTLE_ENDIAN)
1449 u16 reserved0;
1450 u8 response_flags;
1451#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
1452#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
1453#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
1454#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
1455#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
1456#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
1457 u8 op_code;
1458#endif
1459 u32 data_length;
1460 u32 exp_cmd_sn;
1461 u32 max_cmd_sn;
1462 u32 ttt;
1463 u32 reserved2;
1464#if defined(__BIG_ENDIAN)
1465 u16 reserved4;
1466 u8 err_code;
1467 u8 reserved3;
1468#elif defined(__LITTLE_ENDIAN)
1469 u8 reserved3;
1470 u8 err_code;
1471 u16 reserved4;
1472#endif
1473 u32 reserved5;
1474 u32 lun[2];
1475 u32 reserved6[4];
1476#if defined(__BIG_ENDIAN)
1477 u16 reserved7;
1478 u16 itt;
1479#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
1480#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
1481#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
1482#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
1483#elif defined(__LITTLE_ENDIAN)
1484 u16 itt;
1485#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
1486#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
1487#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
1488#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
1489 u16 reserved7;
1490#endif
1491 u32 cq_req_sn;
1492};
1493
1494/*
1495 * iSCSI CQE
1496 */
1497union iscsi_response {
1498 struct bnx2i_cmd_response cmd;
1499 struct bnx2i_tmf_response tmf;
1500 struct bnx2i_login_response login_resp;
1501 struct bnx2i_text_response text;
1502 struct bnx2i_logout_response logout_resp;
1503 struct bnx2i_cleanup_response cleanup;
1504 struct bnx2i_reject_msg reject;
1505 struct bnx2i_async_msg async;
1506 struct bnx2i_nop_in_msg nop_in;
1507};
1508
1509#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
new file mode 100644
index 00000000000..820d428ae83
--- /dev/null
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -0,0 +1,7 @@
1config SCSI_BNX2_ISCSI
2 tristate "Broadcom NetXtreme II iSCSI support"
3 select SCSI_ISCSI_ATTRS
4 select CNIC
5 ---help---
6 This driver supports iSCSI offload for the Broadcom NetXtreme II
7 devices.
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
new file mode 100644
index 00000000000..b5802bd2e76
--- /dev/null
+++ b/drivers/scsi/bnx2i/Makefile
@@ -0,0 +1,3 @@
1bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
2
3obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
new file mode 100644
index 00000000000..d7576f28c6e
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -0,0 +1,771 @@
1/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#ifndef _BNX2I_H_
15#define _BNX2I_H_
16
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19
20#include <linux/errno.h>
21#include <linux/pci.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/in.h>
26#include <linux/kfifo.h>
27#include <linux/netdevice.h>
28#include <linux/completion.h>
29
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi.h>
35#include <scsi/iscsi_proto.h>
36#include <scsi/libiscsi.h>
37#include <scsi/scsi_transport_iscsi.h>
38
39#include "../../net/cnic_if.h"
40#include "57xx_iscsi_hsi.h"
41#include "57xx_iscsi_constants.h"
42
43#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
44
45#define BNX2I_MAX_ADAPTERS 8
46
47#define ISCSI_MAX_CONNS_PER_HBA 128
48#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA
49#define ISCSI_MAX_CMDS_PER_SESS 128
50
51/* Total active commands across all connections supported by devices */
52#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
53#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
54#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
55
56#define ISCSI_MAX_BDS_PER_CMD 32
57
58#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
59#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
60
61/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
62#define MAX_BD_LENGTH 65535
63#define BD_SPLIT_SIZE 32768
64
65/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
66#define BNX2I_SQ_WQES_MIN 16
67#define BNX2I_570X_SQ_WQES_MAX 128
68#define BNX2I_5770X_SQ_WQES_MAX 512
69#define BNX2I_570X_SQ_WQES_DEFAULT 128
70#define BNX2I_5770X_SQ_WQES_DEFAULT 256
71
72#define BNX2I_570X_CQ_WQES_MAX 128
73#define BNX2I_5770X_CQ_WQES_MAX 512
74
75#define BNX2I_RQ_WQES_MIN 16
76#define BNX2I_RQ_WQES_MAX 32
77#define BNX2I_RQ_WQES_DEFAULT 16
78
79/* CCELLs per conn */
80#define BNX2I_CCELLS_MIN 16
81#define BNX2I_CCELLS_MAX 96
82#define BNX2I_CCELLS_DEFAULT 64
83
84#define ITT_INVALID_SIGNATURE 0xFFFF
85
86#define ISCSI_CMD_CLEANUP_TIMEOUT 100
87
88#define BNX2I_CONN_CTX_BUF_SIZE 16384
89
90#define BNX2I_SQ_WQE_SIZE 64
91#define BNX2I_RQ_WQE_SIZE 256
92#define BNX2I_CQE_SIZE 64
93
94#define MB_KERNEL_CTX_SHIFT 8
95#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT)
96
97#define CTX_SHIFT 7
98#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT)
99
100#define CTX_OFFSET 0x10000
101#define MAX_CID_CNT 0x4000
102
103/* 5709 context registers */
104#define BNX2_MQ_CONFIG2 0x00003d00
105#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4)
106#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8)
107
108/* 57710's BAR2 is mapped to doorbell registers */
109#define BNX2X_DOORBELL_PCI_BAR 2
110#define BNX2X_MAX_CQS 8
111
112#define CNIC_ARM_CQE 1
113#define CNIC_DISARM_CQE 0
114
115#define REG_RD(__hba, offset) \
116 readl(__hba->regview + offset)
117#define REG_WR(__hba, offset, val) \
118 writel(val, __hba->regview + offset)
119
120
121/**
122 * struct generic_pdu_resc - login pdu resource structure
123 *
124 * @req_buf: driver buffer used to stage payload associated with
125 * the login request
126 * @req_dma_addr: dma address for iscsi login request payload buffer
127 * @req_buf_size: actual login request payload length
128 * @req_wr_ptr: pointer into login request buffer when next data is
129 * to be written
130 * @resp_hdr: iscsi header where iscsi login response header is to
131 * be recreated
132 * @resp_buf: buffer to stage login response payload
133 * @resp_dma_addr: login response payload buffer dma address
134 * @resp_buf_size: login response paylod length
135 * @resp_wr_ptr: pointer into login response buffer when next data is
136 * to be written
137 * @req_bd_tbl: iscsi login request payload BD table
138 * @req_bd_dma: login request BD table dma address
139 * @resp_bd_tbl: iscsi login response payload BD table
140 * @resp_bd_dma: login request BD table dma address
141 *
142 * following structure defines buffer info for generic pdus such as iSCSI Login,
143 * Logout and NOP
144 */
145struct generic_pdu_resc {
146 char *req_buf;
147 dma_addr_t req_dma_addr;
148 u32 req_buf_size;
149 char *req_wr_ptr;
150 struct iscsi_hdr resp_hdr;
151 char *resp_buf;
152 dma_addr_t resp_dma_addr;
153 u32 resp_buf_size;
154 char *resp_wr_ptr;
155 char *req_bd_tbl;
156 dma_addr_t req_bd_dma;
157 char *resp_bd_tbl;
158 dma_addr_t resp_bd_dma;
159};
160
161
162/**
163 * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
164 *
165 * @link: list head to link elements
166 * @max_ptrs: maximun pointers that can be stored in this page
167 * @num_valid: number of pointer valid in this page
168 * @page: base addess for page pointer array
169 *
170 * structure to track DMA'able memory allocated for command BD tables
171 */
172struct bd_resc_page {
173 struct list_head link;
174 u32 max_ptrs;
175 u32 num_valid;
176 void *page[1];
177};
178
179
180/**
181 * struct io_bdt - I/O buffer destricptor table
182 *
183 * @bd_tbl: BD table's virtual address
184 * @bd_tbl_dma: BD table's dma address
185 * @bd_valid: num valid BD entries
186 *
187 * IO BD table
188 */
189struct io_bdt {
190 struct iscsi_bd *bd_tbl;
191 dma_addr_t bd_tbl_dma;
192 u16 bd_valid;
193};
194
195
196/**
197 * bnx2i_cmd - iscsi command structure
198 *
199 * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd
200 * @sg: SG list
201 * @io_tbl: buffer descriptor (BD) table
202 * @bd_tbl_dma: buffer descriptor (BD) table's dma address
203 */
204struct bnx2i_cmd {
205 struct iscsi_hdr hdr;
206 struct bnx2i_conn *conn;
207 struct scsi_cmnd *scsi_cmd;
208 struct scatterlist *sg;
209 struct io_bdt io_tbl;
210 dma_addr_t bd_tbl_dma;
211 struct bnx2i_cmd_request req;
212};
213
214
215/**
216 * struct bnx2i_conn - iscsi connection structure
217 *
218 * @cls_conn: pointer to iscsi cls conn
219 * @hba: adapter structure pointer
220 * @iscsi_conn_cid: iscsi conn id
221 * @fw_cid: firmware iscsi context id
222 * @ep: endpoint structure pointer
223 * @gen_pdu: login/nopout/logout pdu resources
224 * @violation_notified: bit mask used to track iscsi error/warning messages
225 * already printed out
226 *
227 * iSCSI connection structure
228 */
229struct bnx2i_conn {
230 struct iscsi_cls_conn *cls_conn;
231 struct bnx2i_hba *hba;
232 struct completion cmd_cleanup_cmpl;
233 int is_bound;
234
235 u32 iscsi_conn_cid;
236#define BNX2I_CID_RESERVED 0x5AFF
237 u32 fw_cid;
238
239 struct timer_list poll_timer;
240 /*
241 * Queue Pair (QP) related structure elements.
242 */
243 struct bnx2i_endpoint *ep;
244
245 /*
246 * Buffer for login negotiation process
247 */
248 struct generic_pdu_resc gen_pdu;
249 u64 violation_notified;
250};
251
252
253
254/**
255 * struct iscsi_cid_queue - Per adapter iscsi cid queue
256 *
257 * @cid_que_base: queue base memory
258 * @cid_que: queue memory pointer
259 * @cid_q_prod_idx: produce index
260 * @cid_q_cons_idx: consumer index
261 * @cid_q_max_idx: max index. used to detect wrap around condition
262 * @cid_free_cnt: queue size
263 * @conn_cid_tbl: iscsi cid to conn structure mapping table
264 *
265 * Per adapter iSCSI CID Queue
266 */
267struct iscsi_cid_queue {
268 void *cid_que_base;
269 u32 *cid_que;
270 u32 cid_q_prod_idx;
271 u32 cid_q_cons_idx;
272 u32 cid_q_max_idx;
273 u32 cid_free_cnt;
274 struct bnx2i_conn **conn_cid_tbl;
275};
276
277/**
278 * struct bnx2i_hba - bnx2i adapter structure
279 *
280 * @link: list head to link elements
281 * @cnic: pointer to cnic device
282 * @pcidev: pointer to pci dev
283 * @netdev: pointer to netdev structure
284 * @regview: mapped PCI register space
285 * @age: age, incremented by every recovery
286 * @cnic_dev_type: cnic device type, 5706/5708/5709/57710
287 * @mail_queue_access: mailbox queue access mode, applicable to 5709 only
288 * @reg_with_cnic: indicates whether the device is register with CNIC
289 * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN
290 * @mtu_supported: Ethernet MTU supported
291 * @shost: scsi host pointer
292 * @max_sqes: SQ size
293 * @max_rqes: RQ size
294 * @max_cqes: CQ size
295 * @num_ccell: number of command cells per connection
296 * @ofld_conns_active: active connection list
297 * @max_active_conns: max offload connections supported by this device
298 * @cid_que: iscsi cid queue
299 * @ep_rdwr_lock: read / write lock to synchronize various ep lists
300 * @ep_ofld_list: connection list for pending offload completion
301 * @ep_destroy_list: connection list for pending offload completion
302 * @mp_bd_tbl: BD table to be used with middle path requests
303 * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer
304 * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
305 * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer
306 * @lock: lock to synchonize access to hba structure
307 * @pci_did: PCI device ID
308 * @pci_vid: PCI vendor ID
309 * @pci_sdid: PCI subsystem device ID
310 * @pci_svid: PCI subsystem vendor ID
311 * @pci_func: PCI function number in system pci tree
312 * @pci_devno: PCI device number in system pci tree
313 * @num_wqe_sent: statistic counter, total wqe's sent
314 * @num_cqe_rcvd: statistic counter, total cqe's received
315 * @num_intr_claimed: statistic counter, total interrupts claimed
316 * @link_changed_count: statistic counter, num of link change notifications
317 * received
318 * @ipaddr_changed_count: statistic counter, num times IP address changed while
319 * at least one connection is offloaded
320 * @num_sess_opened: statistic counter, total num sessions opened
321 * @num_conn_opened: statistic counter, total num conns opened on this hba
322 * @ctx_ccell_tasks: captures number of ccells and tasks supported by
323 * currently offloaded connection, used to decode
324 * context memory
325 *
326 * Adapter Data Structure
327 */
328struct bnx2i_hba {
329 struct list_head link;
330 struct cnic_dev *cnic;
331 struct pci_dev *pcidev;
332 struct net_device *netdev;
333 void __iomem *regview;
334
335 u32 age;
336 unsigned long cnic_dev_type;
337 #define BNX2I_NX2_DEV_5706 0x0
338 #define BNX2I_NX2_DEV_5708 0x1
339 #define BNX2I_NX2_DEV_5709 0x2
340 #define BNX2I_NX2_DEV_57710 0x3
341 u32 mail_queue_access;
342 #define BNX2I_MQ_KERNEL_MODE 0x0
343 #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1
344 #define BNX2I_MQ_BIN_MODE 0x2
345 unsigned long reg_with_cnic;
346 #define BNX2I_CNIC_REGISTERED 1
347
348 unsigned long adapter_state;
349 #define ADAPTER_STATE_UP 0
350 #define ADAPTER_STATE_GOING_DOWN 1
351 #define ADAPTER_STATE_LINK_DOWN 2
352 #define ADAPTER_STATE_INIT_FAILED 31
353 unsigned int mtu_supported;
354 #define BNX2I_MAX_MTU_SUPPORTED 1500
355
356 struct Scsi_Host *shost;
357
358 u32 max_sqes;
359 u32 max_rqes;
360 u32 max_cqes;
361 u32 num_ccell;
362
363 int ofld_conns_active;
364
365 int max_active_conns;
366 struct iscsi_cid_queue cid_que;
367
368 rwlock_t ep_rdwr_lock;
369 struct list_head ep_ofld_list;
370 struct list_head ep_destroy_list;
371
372 /*
373 * BD table to be used with MP (Middle Path requests.
374 */
375 char *mp_bd_tbl;
376 dma_addr_t mp_bd_dma;
377 char *dummy_buffer;
378 dma_addr_t dummy_buf_dma;
379
380 spinlock_t lock; /* protects hba structure access */
381 struct mutex net_dev_lock;/* sync net device access */
382
383 /*
384 * PCI related info.
385 */
386 u16 pci_did;
387 u16 pci_vid;
388 u16 pci_sdid;
389 u16 pci_svid;
390 u16 pci_func;
391 u16 pci_devno;
392
393 /*
394 * Following are a bunch of statistics useful during development
395 * and later stage for score boarding.
396 */
397 u32 num_wqe_sent;
398 u32 num_cqe_rcvd;
399 u32 num_intr_claimed;
400 u32 link_changed_count;
401 u32 ipaddr_changed_count;
402 u32 num_sess_opened;
403 u32 num_conn_opened;
404 unsigned int ctx_ccell_tasks;
405};
406
407
408/*******************************************************************************
409 * QP [ SQ / RQ / CQ ] info.
410 ******************************************************************************/
411
412/*
413 * SQ/RQ/CQ generic structure definition
414 */
415struct sqe {
416 u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
417};
418
419struct rqe {
420 u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
421};
422
423struct cqe {
424 u8 cqe_byte[BNX2I_CQE_SIZE];
425};
426
427
428enum {
429#if defined(__LITTLE_ENDIAN)
430 CNIC_EVENT_COAL_INDEX = 0x0,
431 CNIC_SEND_DOORBELL = 0x4,
432 CNIC_EVENT_CQ_ARM = 0x7,
433 CNIC_RECV_DOORBELL = 0x8
434#elif defined(__BIG_ENDIAN)
435 CNIC_EVENT_COAL_INDEX = 0x2,
436 CNIC_SEND_DOORBELL = 0x6,
437 CNIC_EVENT_CQ_ARM = 0x4,
438 CNIC_RECV_DOORBELL = 0xa
439#endif
440};
441
442
443/*
444 * CQ DB
445 */
446struct bnx2x_iscsi_cq_pend_cmpl {
447 /* CQ producer, updated by Ustorm */
448 u16 ustrom_prod;
449 /* CQ pending completion counter */
450 u16 pend_cntr;
451};
452
453
454struct bnx2i_5771x_cq_db {
455 struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
456 /* CQ pending completion ITT array */
457 u16 itt[BNX2X_MAX_CQS];
458 /* Cstorm CQ sequence to notify array, updated by driver */;
459 u16 sqn[BNX2X_MAX_CQS];
460 u32 reserved[4] /* 16 byte allignment */;
461};
462
463
464struct bnx2i_5771x_sq_rq_db {
465 u16 prod_idx;
466 u8 reserved0[14]; /* Pad structure size to 16 bytes */
467};
468
469
470struct bnx2i_5771x_dbell_hdr {
471 u8 header;
472 /* 1 for rx doorbell, 0 for tx doorbell */
473#define B577XX_DOORBELL_HDR_RX (0x1<<0)
474#define B577XX_DOORBELL_HDR_RX_SHIFT 0
475 /* 0 for normal doorbell, 1 for advertise wnd doorbell */
476#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
477#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
478 /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
479#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
480#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
481 /* connection type */
482#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
483#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
484};
485
486struct bnx2i_5771x_dbell {
487 struct bnx2i_5771x_dbell_hdr dbell;
488 u8 pad[3];
489
490};
491
492/**
493 * struct qp_info - QP (share queue region) atrributes structure
494 *
495 * @ctx_base: ioremapped pci register base to access doorbell register
496 * pertaining to this offloaded connection
497 * @sq_virt: virtual address of send queue (SQ) region
498 * @sq_phys: DMA address of SQ memory region
499 * @sq_mem_size: SQ size
500 * @sq_prod_qe: SQ producer entry pointer
501 * @sq_cons_qe: SQ consumer entry pointer
502 * @sq_first_qe: virtaul address of first entry in SQ
503 * @sq_last_qe: virtaul address of last entry in SQ
504 * @sq_prod_idx: SQ producer index
505 * @sq_cons_idx: SQ consumer index
506 * @sqe_left: number sq entry left
507 * @sq_pgtbl_virt: page table describing buffer consituting SQ region
508 * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt'
509 * @sq_pgtbl_size: SQ page table size
510 * @cq_virt: virtual address of completion queue (CQ) region
511 * @cq_phys: DMA address of RQ memory region
512 * @cq_mem_size: CQ size
513 * @cq_prod_qe: CQ producer entry pointer
514 * @cq_cons_qe: CQ consumer entry pointer
515 * @cq_first_qe: virtaul address of first entry in CQ
516 * @cq_last_qe: virtaul address of last entry in CQ
517 * @cq_prod_idx: CQ producer index
518 * @cq_cons_idx: CQ consumer index
519 * @cqe_left: number cq entry left
520 * @cqe_size: size of each CQ entry
521 * @cqe_exp_seq_sn: next expected CQE sequence number
522 * @cq_pgtbl_virt: page table describing buffer consituting CQ region
523 * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt'
524 * @cq_pgtbl_size: CQ page table size
525 * @rq_virt: virtual address of receive queue (RQ) region
526 * @rq_phys: DMA address of RQ memory region
527 * @rq_mem_size: RQ size
528 * @rq_prod_qe: RQ producer entry pointer
529 * @rq_cons_qe: RQ consumer entry pointer
530 * @rq_first_qe: virtaul address of first entry in RQ
531 * @rq_last_qe: virtaul address of last entry in RQ
532 * @rq_prod_idx: RQ producer index
533 * @rq_cons_idx: RQ consumer index
534 * @rqe_left: number rq entry left
535 * @rq_pgtbl_virt: page table describing buffer consituting RQ region
536 * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt'
537 * @rq_pgtbl_size: RQ page table size
538 *
539 * queue pair (QP) is a per connection shared data structure which is used
540 * to send work requests (SQ), receive completion notifications (CQ)
541 * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
542 * below holds queue memory, consumer/producer indexes and page table
543 * information
544 */
545struct qp_info {
546 void __iomem *ctx_base;
547#define DPM_TRIGER_TYPE 0x40
548
549#define BNX2I_570x_QUE_DB_SIZE 0
550#define BNX2I_5771x_QUE_DB_SIZE 16
551 struct sqe *sq_virt;
552 dma_addr_t sq_phys;
553 u32 sq_mem_size;
554
555 struct sqe *sq_prod_qe;
556 struct sqe *sq_cons_qe;
557 struct sqe *sq_first_qe;
558 struct sqe *sq_last_qe;
559 u16 sq_prod_idx;
560 u16 sq_cons_idx;
561 u32 sqe_left;
562
563 void *sq_pgtbl_virt;
564 dma_addr_t sq_pgtbl_phys;
565 u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
566
567 struct cqe *cq_virt;
568 dma_addr_t cq_phys;
569 u32 cq_mem_size;
570
571 struct cqe *cq_prod_qe;
572 struct cqe *cq_cons_qe;
573 struct cqe *cq_first_qe;
574 struct cqe *cq_last_qe;
575 u16 cq_prod_idx;
576 u16 cq_cons_idx;
577 u32 cqe_left;
578 u32 cqe_size;
579 u32 cqe_exp_seq_sn;
580
581 void *cq_pgtbl_virt;
582 dma_addr_t cq_pgtbl_phys;
583 u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
584
585 struct rqe *rq_virt;
586 dma_addr_t rq_phys;
587 u32 rq_mem_size;
588
589 struct rqe *rq_prod_qe;
590 struct rqe *rq_cons_qe;
591 struct rqe *rq_first_qe;
592 struct rqe *rq_last_qe;
593 u16 rq_prod_idx;
594 u16 rq_cons_idx;
595 u32 rqe_left;
596
597 void *rq_pgtbl_virt;
598 dma_addr_t rq_pgtbl_phys;
599 u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */
600};
601
602
603
604/*
605 * CID handles
606 */
607struct ep_handles {
608 u32 fw_cid;
609 u32 drv_iscsi_cid;
610 u16 pg_cid;
611 u16 rsvd;
612};
613
614
615enum {
616 EP_STATE_IDLE = 0x0,
617 EP_STATE_PG_OFLD_START = 0x1,
618 EP_STATE_PG_OFLD_COMPL = 0x2,
619 EP_STATE_OFLD_START = 0x4,
620 EP_STATE_OFLD_COMPL = 0x8,
621 EP_STATE_CONNECT_START = 0x10,
622 EP_STATE_CONNECT_COMPL = 0x20,
623 EP_STATE_ULP_UPDATE_START = 0x40,
624 EP_STATE_ULP_UPDATE_COMPL = 0x80,
625 EP_STATE_DISCONN_START = 0x100,
626 EP_STATE_DISCONN_COMPL = 0x200,
627 EP_STATE_CLEANUP_START = 0x400,
628 EP_STATE_CLEANUP_CMPL = 0x800,
629 EP_STATE_TCP_FIN_RCVD = 0x1000,
630 EP_STATE_TCP_RST_RCVD = 0x2000,
631 EP_STATE_PG_OFLD_FAILED = 0x1000000,
632 EP_STATE_ULP_UPDATE_FAILED = 0x2000000,
633 EP_STATE_CLEANUP_FAILED = 0x4000000,
634 EP_STATE_OFLD_FAILED = 0x8000000,
635 EP_STATE_CONNECT_FAILED = 0x10000000,
636 EP_STATE_DISCONN_TIMEDOUT = 0x20000000,
637};
638
639/**
640 * struct bnx2i_endpoint - representation of tcp connection in NX2 world
641 *
642 * @link: list head to link elements
643 * @hba: adapter to which this connection belongs
644 * @conn: iscsi connection this EP is linked to
645 * @sess: iscsi session this EP is linked to
646 * @cm_sk: cnic sock struct
647 * @hba_age: age to detect if 'iscsid' issues ep_disconnect()
648 * after HBA reset is completed by bnx2i/cnic/bnx2
649 * modules
650 * @state: tracks offload connection state machine
651 * @teardown_mode: indicates if conn teardown is abortive or orderly
652 * @qp: QP information
653 * @ids: contains chip allocated *context id* & driver assigned
654 * *iscsi cid*
655 * @ofld_timer: offload timer to detect timeout
656 * @ofld_wait: wait queue
657 *
658 * Endpoint Structure - equivalent of tcp socket structure
659 */
660struct bnx2i_endpoint {
661 struct list_head link;
662 struct bnx2i_hba *hba;
663 struct bnx2i_conn *conn;
664 struct cnic_sock *cm_sk;
665 u32 hba_age;
666 u32 state;
667 unsigned long timestamp;
668 int num_active_cmds;
669
670 struct qp_info qp;
671 struct ep_handles ids;
672 #define ep_iscsi_cid ids.drv_iscsi_cid
673 #define ep_cid ids.fw_cid
674 #define ep_pg_cid ids.pg_cid
675 struct timer_list ofld_timer;
676 wait_queue_head_t ofld_wait;
677};
678
679
680
681/* Global variables */
682extern unsigned int error_mask1, error_mask2;
683extern u64 iscsi_error_mask;
684extern unsigned int en_tcp_dack;
685extern unsigned int event_coal_div;
686
687extern struct scsi_transport_template *bnx2i_scsi_xport_template;
688extern struct iscsi_transport bnx2i_iscsi_transport;
689extern struct cnic_ulp_ops bnx2i_cnic_cb;
690
691extern unsigned int sq_size;
692extern unsigned int rq_size;
693
694extern struct device_attribute *bnx2i_dev_attributes[];
695
696
697
698/*
699 * Function Prototypes
700 */
701extern void bnx2i_identify_device(struct bnx2i_hba *hba);
702extern void bnx2i_register_device(struct bnx2i_hba *hba);
703
704extern void bnx2i_ulp_init(struct cnic_dev *dev);
705extern void bnx2i_ulp_exit(struct cnic_dev *dev);
706extern void bnx2i_start(void *handle);
707extern void bnx2i_stop(void *handle);
708extern void bnx2i_reg_dev_all(void);
709extern void bnx2i_unreg_dev_all(void);
710extern struct bnx2i_hba *get_adapter_list_head(void);
711
712struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
713 u16 iscsi_cid);
714
715int bnx2i_alloc_ep_pool(void);
716void bnx2i_release_ep_pool(void);
717struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
718struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
719
720struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
721
722struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
723void bnx2i_free_hba(struct bnx2i_hba *hba);
724
725void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
726void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
727
728void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
729
730void bnx2i_drop_session(struct iscsi_cls_session *session);
731
732extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
733extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
734 struct iscsi_task *mtask);
735extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
736 struct iscsi_task *mtask);
737extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
738 struct bnx2i_cmd *cmnd);
739extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
740 struct iscsi_task *mtask, u32 ttt,
741 char *datap, int data_len, int unsol);
742extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
743 struct iscsi_task *mtask);
744extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
745 struct bnx2i_cmd *cmd);
746extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
747 struct bnx2i_endpoint *ep);
748extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
749extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
750 struct bnx2i_endpoint *ep);
751
752extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
753 struct bnx2i_endpoint *ep);
754extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
755 struct bnx2i_endpoint *ep);
756extern void bnx2i_ep_ofld_timer(unsigned long data);
757extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
758 struct bnx2i_hba *hba, u32 iscsi_cid);
759extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
760 struct bnx2i_hba *hba, u32 iscsi_cid);
761
762extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
763extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
764
765/* Debug related function prototypes */
766extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
767extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
768extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
769extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
770
771#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
new file mode 100644
index 00000000000..906cef5cda8
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -0,0 +1,2405 @@
1/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#include <scsi/scsi_tcq.h>
15#include <scsi/libiscsi.h>
16#include "bnx2i.h"
17
18/**
19 * bnx2i_get_cid_num - get cid from ep
20 * @ep: endpoint pointer
21 *
22 * Only applicable to 57710 family of devices
23 */
24static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
25{
26 u32 cid;
27
28 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
29 cid = ep->ep_cid;
30 else
31 cid = GET_CID_NUM(ep->ep_cid);
32 return cid;
33}
34
35
36/**
37 * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
38 * @hba: Adapter for which adjustments is to be made
39 *
40 * Only applicable to 57710 family of devices
41 */
42static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
43{
44 u32 num_elements_per_pg;
45
46 if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
47 test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
48 test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
49 if (!is_power_of_2(hba->max_sqes))
50 hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
51
52 if (!is_power_of_2(hba->max_rqes))
53 hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
54 }
55
56 /* Adjust each queue size if the user selection does not
57 * yield integral num of page buffers
58 */
59 /* adjust SQ */
60 num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
61 if (hba->max_sqes < num_elements_per_pg)
62 hba->max_sqes = num_elements_per_pg;
63 else if (hba->max_sqes % num_elements_per_pg)
64 hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
65 ~(num_elements_per_pg - 1);
66
67 /* adjust CQ */
68 num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
69 if (hba->max_cqes < num_elements_per_pg)
70 hba->max_cqes = num_elements_per_pg;
71 else if (hba->max_cqes % num_elements_per_pg)
72 hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
73 ~(num_elements_per_pg - 1);
74
75 /* adjust RQ */
76 num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
77 if (hba->max_rqes < num_elements_per_pg)
78 hba->max_rqes = num_elements_per_pg;
79 else if (hba->max_rqes % num_elements_per_pg)
80 hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
81 ~(num_elements_per_pg - 1);
82}
83
84
85/**
86 * bnx2i_get_link_state - get network interface link state
87 * @hba: adapter instance pointer
88 *
89 * updates adapter structure flag based on netdev state
90 */
91static void bnx2i_get_link_state(struct bnx2i_hba *hba)
92{
93 if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
94 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
95 else
96 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
97}
98
99
100/**
101 * bnx2i_iscsi_license_error - displays iscsi license related error message
102 * @hba: adapter instance pointer
103 * @error_code: error classification
104 *
105 * Puts out an error log when driver is unable to offload iscsi connection
106 * due to license restrictions
107 */
108static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
109{
110 if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
111 /* iSCSI offload not supported on this device */
112 printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
113 hba->netdev->name);
114 if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
115 /* iSCSI offload not supported on this LOM device */
116 printk(KERN_ERR "bnx2i: LOM is not enable to "
117 "offload iSCSI connections, dev=%s\n",
118 hba->netdev->name);
119 set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
120}
121
122
123/**
124 * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
125 * @ep: endpoint (transport indentifier) structure
126 * @action: action, ARM or DISARM. For now only ARM_CQE is used
127 *
128 * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
129 * the driver. EQ event is generated CQ index is hit or at least 1 CQ is
130 * outstanding and on chip timer expires
131 */
132void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
133{
134 struct bnx2i_5771x_cq_db *cq_db;
135 u16 cq_index;
136
137 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
138 return;
139
140 if (action == CNIC_ARM_CQE) {
141 cq_index = ep->qp.cqe_exp_seq_sn +
142 ep->num_active_cmds / event_coal_div;
143 cq_index %= (ep->qp.cqe_size * 2 + 1);
144 if (!cq_index) {
145 cq_index = 1;
146 cq_db = (struct bnx2i_5771x_cq_db *)
147 ep->qp.cq_pgtbl_virt;
148 cq_db->sqn[0] = cq_index;
149 }
150 }
151}
152
153
154/**
155 * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
156 * @conn: iscsi connection on which RQ event occured
157 * @ptr: driver buffer to which RQ buffer contents is to
158 * be copied
159 * @len: length of valid data inside RQ buf
160 *
161 * Copies RQ buffer contents from shared (DMA'able) memory region to
162 * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
163 * scsi sense info
164 */
165void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
166{
167 if (!bnx2i_conn->ep->qp.rqe_left)
168 return;
169
170 bnx2i_conn->ep->qp.rqe_left--;
171 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
172 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
173 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
174 bnx2i_conn->ep->qp.rq_cons_idx = 0;
175 } else {
176 bnx2i_conn->ep->qp.rq_cons_qe++;
177 bnx2i_conn->ep->qp.rq_cons_idx++;
178 }
179}
180
181
182static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
183{
184 struct bnx2i_5771x_dbell dbell;
185 u32 msg;
186
187 memset(&dbell, 0, sizeof(dbell));
188 dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
189 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
190 msg = *((u32 *)&dbell);
191 /* TODO : get doorbell register mapping */
192 writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
193}
194
195
196/**
197 * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
198 * @conn: iscsi connection on which event to post
199 * @count: number of RQ buffer being posted to chip
200 *
201 * No need to ring hardware doorbell for 57710 family of devices
202 */
203void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
204{
205 struct bnx2i_5771x_sq_rq_db *rq_db;
206 u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
207 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
208
209 ep->qp.rqe_left += count;
210 ep->qp.rq_prod_idx &= 0x7FFF;
211 ep->qp.rq_prod_idx += count;
212
213 if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
214 ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
215 if (!hi_bit)
216 ep->qp.rq_prod_idx |= 0x8000;
217 } else
218 ep->qp.rq_prod_idx |= hi_bit;
219
220 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
221 rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
222 rq_db->prod_idx = ep->qp.rq_prod_idx;
223 /* no need to ring hardware doorbell for 57710 */
224 } else {
225 writew(ep->qp.rq_prod_idx,
226 ep->qp.ctx_base + CNIC_RECV_DOORBELL);
227 }
228 mmiowb();
229}
230
231
232/**
233 * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
234 * @conn: iscsi connection to which new SQ entries belong
235 * @count: number of SQ WQEs to post
236 *
237 * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
238 * of devices. For 5706/5708/5709 new SQ WQE count is written into the
239 * doorbell register
240 */
241static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
242{
243 struct bnx2i_5771x_sq_rq_db *sq_db;
244 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
245
246 ep->num_active_cmds++;
247 wmb(); /* flush SQ WQE memory before the doorbell is rung */
248 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
249 sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
250 sq_db->prod_idx = ep->qp.sq_prod_idx;
251 bnx2i_ring_577xx_doorbell(bnx2i_conn);
252 } else
253 writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
254
255 mmiowb(); /* flush posted PCI writes */
256}
257
258
259/**
260 * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
261 * @conn: iscsi connection to which new SQ entries belong
262 * @count: number of SQ WQEs to post
263 *
264 * this routine will update SQ driver parameters and ring the doorbell
265 */
266static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
267 int count)
268{
269 int tmp_cnt;
270
271 if (count == 1) {
272 if (bnx2i_conn->ep->qp.sq_prod_qe ==
273 bnx2i_conn->ep->qp.sq_last_qe)
274 bnx2i_conn->ep->qp.sq_prod_qe =
275 bnx2i_conn->ep->qp.sq_first_qe;
276 else
277 bnx2i_conn->ep->qp.sq_prod_qe++;
278 } else {
279 if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
280 bnx2i_conn->ep->qp.sq_last_qe)
281 bnx2i_conn->ep->qp.sq_prod_qe += count;
282 else {
283 tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
284 bnx2i_conn->ep->qp.sq_prod_qe;
285 bnx2i_conn->ep->qp.sq_prod_qe =
286 &bnx2i_conn->ep->qp.sq_first_qe[count -
287 (tmp_cnt + 1)];
288 }
289 }
290 bnx2i_conn->ep->qp.sq_prod_idx += count;
291 /* Ring the doorbell */
292 bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
293}
294
295
296/**
297 * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
298 * @conn: iscsi connection
299 * @cmd: driver command structure which is requesting
300 * a WQE to sent to chip for further processing
301 *
302 * prepare and post an iSCSI Login request WQE to CNIC firmware
303 */
304int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
305 struct iscsi_task *task)
306{
307 struct bnx2i_cmd *bnx2i_cmd;
308 struct bnx2i_login_request *login_wqe;
309 struct iscsi_login *login_hdr;
310 u32 dword;
311
312 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
313 login_hdr = (struct iscsi_login *)task->hdr;
314 login_wqe = (struct bnx2i_login_request *)
315 bnx2i_conn->ep->qp.sq_prod_qe;
316
317 login_wqe->op_code = login_hdr->opcode;
318 login_wqe->op_attr = login_hdr->flags;
319 login_wqe->version_max = login_hdr->max_version;
320 login_wqe->version_min = login_hdr->min_version;
321 login_wqe->data_length = ntoh24(login_hdr->dlength);
322 login_wqe->isid_lo = *((u32 *) login_hdr->isid);
323 login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
324 login_wqe->tsih = login_hdr->tsih;
325 login_wqe->itt = task->itt |
326 (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
327 login_wqe->cid = login_hdr->cid;
328
329 login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
330 login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
331
332 login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
333 login_wqe->resp_bd_list_addr_hi =
334 (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
335
336 dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
337 (bnx2i_conn->gen_pdu.resp_buf_size <<
338 ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
339 login_wqe->resp_buffer = dword;
340 login_wqe->flags = 0;
341 login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
342 login_wqe->bd_list_addr_hi =
343 (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
344 login_wqe->num_bds = 1;
345 login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
346
347 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
348 return 0;
349}
350
351/**
352 * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
353 * @conn: iscsi connection
354 * @mtask: driver command structure which is requesting
355 * a WQE to sent to chip for further processing
356 *
357 * prepare and post an iSCSI Login request WQE to CNIC firmware
358 */
359int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
360 struct iscsi_task *mtask)
361{
362 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
363 struct iscsi_tm *tmfabort_hdr;
364 struct scsi_cmnd *ref_sc;
365 struct iscsi_task *ctask;
366 struct bnx2i_cmd *bnx2i_cmd;
367 struct bnx2i_tmf_request *tmfabort_wqe;
368 u32 dword;
369
370 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
371 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
372 tmfabort_wqe = (struct bnx2i_tmf_request *)
373 bnx2i_conn->ep->qp.sq_prod_qe;
374
375 tmfabort_wqe->op_code = tmfabort_hdr->opcode;
376 tmfabort_wqe->op_attr = 0;
377 tmfabort_wqe->op_attr =
378 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
379 tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
380 tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
381
382 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
383 tmfabort_wqe->reserved2 = 0;
384 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
385
386 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
387 if (!ctask || ctask->sc)
388 /*
389 * the iscsi layer must have completed the cmd while this
390 * was starting up.
391 */
392 return 0;
393 ref_sc = ctask->sc;
394
395 if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
396 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
397 else
398 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
399 tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
400 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
401
402 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
403 tmfabort_wqe->bd_list_addr_hi = (u32)
404 ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
405 tmfabort_wqe->num_bds = 1;
406 tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
407
408 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
409 return 0;
410}
411
412/**
413 * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
414 * @conn: iscsi connection
415 * @cmd: driver command structure which is requesting
416 * a WQE to sent to chip for further processing
417 *
418 * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
419 */
420int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
421 struct bnx2i_cmd *cmd)
422{
423 struct bnx2i_cmd_request *scsi_cmd_wqe;
424
425 scsi_cmd_wqe = (struct bnx2i_cmd_request *)
426 bnx2i_conn->ep->qp.sq_prod_qe;
427 memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
428 scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
429
430 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
431 return 0;
432}
433
434/**
435 * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
436 * @conn: iscsi connection
437 * @cmd: driver command structure which is requesting
438 * a WQE to sent to chip for further processing
439 * @ttt: TTT to be used when building pdu header
440 * @datap: payload buffer pointer
441 * @data_len: payload data length
442 * @unsol: indicated whether nopout pdu is unsolicited pdu or
443 * in response to target's NOPIN w/ TTT != FFFFFFFF
444 *
445 * prepare and post a nopout request WQE to CNIC firmware
446 */
447int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
448 struct iscsi_task *task, u32 ttt,
449 char *datap, int data_len, int unsol)
450{
451 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
452 struct bnx2i_cmd *bnx2i_cmd;
453 struct bnx2i_nop_out_request *nopout_wqe;
454 struct iscsi_nopout *nopout_hdr;
455
456 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
457 nopout_hdr = (struct iscsi_nopout *)task->hdr;
458 nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
459 nopout_wqe->op_code = nopout_hdr->opcode;
460 nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
461 memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
462
463 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
464 u32 tmp = nopout_hdr->lun[0];
465 /* 57710 requires LUN field to be swapped */
466 nopout_hdr->lun[0] = nopout_hdr->lun[1];
467 nopout_hdr->lun[1] = tmp;
468 }
469
470 nopout_wqe->itt = ((u16)task->itt |
471 (ISCSI_TASK_TYPE_MPATH <<
472 ISCSI_TMF_REQUEST_TYPE_SHIFT));
473 nopout_wqe->ttt = ttt;
474 nopout_wqe->flags = 0;
475 if (!unsol)
476 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
477 else if (nopout_hdr->itt == RESERVED_ITT)
478 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
479
480 nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
481 nopout_wqe->data_length = data_len;
482 if (data_len) {
483 /* handle payload data, not required in first release */
484 printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
485 } else {
486 nopout_wqe->bd_list_addr_lo = (u32)
487 bnx2i_conn->hba->mp_bd_dma;
488 nopout_wqe->bd_list_addr_hi =
489 (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
490 nopout_wqe->num_bds = 1;
491 }
492 nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
493
494 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
495 return 0;
496}
497
498
499/**
500 * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
501 * @conn: iscsi connection
502 * @cmd: driver command structure which is requesting
503 * a WQE to sent to chip for further processing
504 *
505 * prepare and post logout request WQE to CNIC firmware
506 */
507int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
508 struct iscsi_task *task)
509{
510 struct bnx2i_cmd *bnx2i_cmd;
511 struct bnx2i_logout_request *logout_wqe;
512 struct iscsi_logout *logout_hdr;
513
514 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
515 logout_hdr = (struct iscsi_logout *)task->hdr;
516
517 logout_wqe = (struct bnx2i_logout_request *)
518 bnx2i_conn->ep->qp.sq_prod_qe;
519 memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
520
521 logout_wqe->op_code = logout_hdr->opcode;
522 logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
523 logout_wqe->op_attr =
524 logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
525 logout_wqe->itt = ((u16)task->itt |
526 (ISCSI_TASK_TYPE_MPATH <<
527 ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
528 logout_wqe->data_length = 0;
529 logout_wqe->cid = 0;
530
531 logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
532 logout_wqe->bd_list_addr_hi = (u32)
533 ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
534 logout_wqe->num_bds = 1;
535 logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
536
537 bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
538 return 0;
539}
540
541
542/**
543 * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
544 * @conn: iscsi connection which requires iscsi parameter update
545 *
546 * sends down iSCSI Conn Update request to move iSCSI conn to FFP
547 */
548void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
549{
550 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
551 struct bnx2i_hba *hba = bnx2i_conn->hba;
552 struct kwqe *kwqe_arr[2];
553 struct iscsi_kwqe_conn_update *update_wqe;
554 struct iscsi_kwqe_conn_update conn_update_kwqe;
555
556 update_wqe = &conn_update_kwqe;
557
558 update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
559 update_wqe->hdr.flags =
560 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
561
562 /* 5771x requires conn context id to be passed as is */
563 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
564 update_wqe->context_id = bnx2i_conn->ep->ep_cid;
565 else
566 update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
567 update_wqe->conn_flags = 0;
568 if (conn->hdrdgst_en)
569 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
570 if (conn->datadgst_en)
571 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
572 if (conn->session->initial_r2t_en)
573 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
574 if (conn->session->imm_data_en)
575 update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
576
577 update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
578 update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
579 update_wqe->first_burst_length = conn->session->first_burst;
580 update_wqe->max_burst_length = conn->session->max_burst;
581 update_wqe->exp_stat_sn = conn->exp_statsn;
582 update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
583 update_wqe->session_error_recovery_level = conn->session->erl;
584 iscsi_conn_printk(KERN_ALERT, conn,
585 "bnx2i: conn update - MBL 0x%x FBL 0x%x"
586 "MRDSL_I 0x%x MRDSL_T 0x%x \n",
587 update_wqe->max_burst_length,
588 update_wqe->first_burst_length,
589 update_wqe->max_recv_pdu_length,
590 update_wqe->max_send_pdu_length);
591
592 kwqe_arr[0] = (struct kwqe *) update_wqe;
593 if (hba->cnic && hba->cnic->submit_kwqes)
594 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
595}
596
597
598/**
599 * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
600 * @data: endpoint (transport handle) structure pointer
601 *
602 * routine to handle connection offload/destroy request timeout
603 */
604void bnx2i_ep_ofld_timer(unsigned long data)
605{
606 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
607
608 if (ep->state == EP_STATE_OFLD_START) {
609 printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
610 ep->state = EP_STATE_OFLD_FAILED;
611 } else if (ep->state == EP_STATE_DISCONN_START) {
612 printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
613 ep->state = EP_STATE_DISCONN_TIMEDOUT;
614 } else if (ep->state == EP_STATE_CLEANUP_START) {
615 printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
616 ep->state = EP_STATE_CLEANUP_FAILED;
617 }
618
619 wake_up_interruptible(&ep->ofld_wait);
620}
621
622
623static int bnx2i_power_of2(u32 val)
624{
625 u32 power = 0;
626 if (val & (val - 1))
627 return power;
628 val--;
629 while (val) {
630 val = val >> 1;
631 power++;
632 }
633 return power;
634}
635
636
637/**
638 * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
639 * @hba: adapter structure pointer
640 * @cmd: driver command structure which is requesting
641 * a WQE to sent to chip for further processing
642 *
643 * prepares and posts CONN_OFLD_REQ1/2 KWQE
644 */
645void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
646{
647 struct bnx2i_cleanup_request *cmd_cleanup;
648
649 cmd_cleanup =
650 (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
651 memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
652
653 cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
654 cmd_cleanup->itt = cmd->req.itt;
655 cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
656
657 bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
658}
659
660
661/**
662 * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
663 * @hba: adapter structure pointer
664 * @ep: endpoint (transport indentifier) structure
665 *
666 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
667 * iscsi connection context clean-up process
668 */
669void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
670{
671 struct kwqe *kwqe_arr[2];
672 struct iscsi_kwqe_conn_destroy conn_cleanup;
673
674 memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
675
676 conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
677 conn_cleanup.hdr.flags =
678 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
679 /* 5771x requires conn context id to be passed as is */
680 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
681 conn_cleanup.context_id = ep->ep_cid;
682 else
683 conn_cleanup.context_id = (ep->ep_cid >> 7);
684
685 conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
686
687 kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
688 if (hba->cnic && hba->cnic->submit_kwqes)
689 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
690}
691
692
693/**
694 * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
695 * @hba: adapter structure pointer
696 * @ep: endpoint (transport indentifier) structure
697 *
698 * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
699 */
700static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
701 struct bnx2i_endpoint *ep)
702{
703 struct kwqe *kwqe_arr[2];
704 struct iscsi_kwqe_conn_offload1 ofld_req1;
705 struct iscsi_kwqe_conn_offload2 ofld_req2;
706 dma_addr_t dma_addr;
707 int num_kwqes = 2;
708 u32 *ptbl;
709
710 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
711 ofld_req1.hdr.flags =
712 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
713
714 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
715
716 dma_addr = ep->qp.sq_pgtbl_phys;
717 ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
718 ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
719
720 dma_addr = ep->qp.cq_pgtbl_phys;
721 ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
722 ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
723
724 ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
725 ofld_req2.hdr.flags =
726 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
727
728 dma_addr = ep->qp.rq_pgtbl_phys;
729 ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
730 ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
731
732 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
733
734 ofld_req2.sq_first_pte.hi = *ptbl++;
735 ofld_req2.sq_first_pte.lo = *ptbl;
736
737 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
738 ofld_req2.cq_first_pte.hi = *ptbl++;
739 ofld_req2.cq_first_pte.lo = *ptbl;
740
741 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
742 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
743 ofld_req2.num_additional_wqes = 0;
744
745 if (hba->cnic && hba->cnic->submit_kwqes)
746 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
747}
748
749
750/**
751 * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
752 * @hba: adapter structure pointer
753 * @ep: endpoint (transport indentifier) structure
754 *
755 * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
756 */
757static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
758 struct bnx2i_endpoint *ep)
759{
760 struct kwqe *kwqe_arr[5];
761 struct iscsi_kwqe_conn_offload1 ofld_req1;
762 struct iscsi_kwqe_conn_offload2 ofld_req2;
763 struct iscsi_kwqe_conn_offload3 ofld_req3[1];
764 dma_addr_t dma_addr;
765 int num_kwqes = 2;
766 u32 *ptbl;
767
768 ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
769 ofld_req1.hdr.flags =
770 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
771
772 ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
773
774 dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
775 ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
776 ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
777
778 dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
779 ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
780 ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
781
782 ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
783 ofld_req2.hdr.flags =
784 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
785
786 dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
787 ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
788 ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
789
790 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
791 ofld_req2.sq_first_pte.hi = *ptbl++;
792 ofld_req2.sq_first_pte.lo = *ptbl;
793
794 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
795 ofld_req2.cq_first_pte.hi = *ptbl++;
796 ofld_req2.cq_first_pte.lo = *ptbl;
797
798 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
799 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
800
801 ofld_req2.num_additional_wqes = 1;
802 memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
803 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
804 ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
805 ofld_req3[0].qp_first_pte[0].lo = *ptbl;
806
807 kwqe_arr[2] = (struct kwqe *) ofld_req3;
808 /* need if we decide to go with multiple KCQE's per conn */
809 num_kwqes += 1;
810
811 if (hba->cnic && hba->cnic->submit_kwqes)
812 hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
813}
814
815/**
816 * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
817 *
818 * @hba: adapter structure pointer
819 * @ep: endpoint (transport indentifier) structure
820 *
821 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
822 */
823void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
824{
825 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
826 bnx2i_5771x_send_conn_ofld_req(hba, ep);
827 else
828 bnx2i_570x_send_conn_ofld_req(hba, ep);
829}
830
831
832/**
833 * setup_qp_page_tables - iscsi QP page table setup function
834 * @ep: endpoint (transport indentifier) structure
835 *
836 * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
837 * 64-bit address in big endian format. Whereas 10G/sec (57710) requires
838 * PT in little endian format
839 */
840static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
841{
842 int num_pages;
843 u32 *ptbl;
844 dma_addr_t page;
845 int cnic_dev_10g;
846
847 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
848 cnic_dev_10g = 1;
849 else
850 cnic_dev_10g = 0;
851
852 /* SQ page table */
853 memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
854 num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
855 page = ep->qp.sq_phys;
856
857 if (cnic_dev_10g)
858 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
859 else
860 ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
861 while (num_pages--) {
862 if (cnic_dev_10g) {
863 /* PTE is written in little endian format for 57710 */
864 *ptbl = (u32) page;
865 ptbl++;
866 *ptbl = (u32) ((u64) page >> 32);
867 ptbl++;
868 page += PAGE_SIZE;
869 } else {
870 /* PTE is written in big endian format for
871 * 5706/5708/5709 devices */
872 *ptbl = (u32) ((u64) page >> 32);
873 ptbl++;
874 *ptbl = (u32) page;
875 ptbl++;
876 page += PAGE_SIZE;
877 }
878 }
879
880 /* RQ page table */
881 memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
882 num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
883 page = ep->qp.rq_phys;
884
885 if (cnic_dev_10g)
886 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
887 else
888 ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
889 while (num_pages--) {
890 if (cnic_dev_10g) {
891 /* PTE is written in little endian format for 57710 */
892 *ptbl = (u32) page;
893 ptbl++;
894 *ptbl = (u32) ((u64) page >> 32);
895 ptbl++;
896 page += PAGE_SIZE;
897 } else {
898 /* PTE is written in big endian format for
899 * 5706/5708/5709 devices */
900 *ptbl = (u32) ((u64) page >> 32);
901 ptbl++;
902 *ptbl = (u32) page;
903 ptbl++;
904 page += PAGE_SIZE;
905 }
906 }
907
908 /* CQ page table */
909 memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
910 num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
911 page = ep->qp.cq_phys;
912
913 if (cnic_dev_10g)
914 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
915 else
916 ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
917 while (num_pages--) {
918 if (cnic_dev_10g) {
919 /* PTE is written in little endian format for 57710 */
920 *ptbl = (u32) page;
921 ptbl++;
922 *ptbl = (u32) ((u64) page >> 32);
923 ptbl++;
924 page += PAGE_SIZE;
925 } else {
926 /* PTE is written in big endian format for
927 * 5706/5708/5709 devices */
928 *ptbl = (u32) ((u64) page >> 32);
929 ptbl++;
930 *ptbl = (u32) page;
931 ptbl++;
932 page += PAGE_SIZE;
933 }
934 }
935}
936
937
938/**
939 * bnx2i_alloc_qp_resc - allocates required resources for QP.
940 * @hba: adapter structure pointer
941 * @ep: endpoint (transport indentifier) structure
942 *
943 * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
944 * memory for SQ/RQ/CQ and page tables. EP structure elements such
945 * as producer/consumer indexes/pointers, queue sizes and page table
946 * contents are setup
947 */
948int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
949{
950 struct bnx2i_5771x_cq_db *cq_db;
951
952 ep->hba = hba;
953 ep->conn = NULL;
954 ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
955
956 /* Allocate page table memory for SQ which is page aligned */
957 ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
958 ep->qp.sq_mem_size =
959 (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
960 ep->qp.sq_pgtbl_size =
961 (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
962 ep->qp.sq_pgtbl_size =
963 (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
964
965 ep->qp.sq_pgtbl_virt =
966 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
967 &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
968 if (!ep->qp.sq_pgtbl_virt) {
969 printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
970 ep->qp.sq_pgtbl_size);
971 goto mem_alloc_err;
972 }
973
974 /* Allocate memory area for actual SQ element */
975 ep->qp.sq_virt =
976 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
977 &ep->qp.sq_phys, GFP_KERNEL);
978 if (!ep->qp.sq_virt) {
979 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
980 ep->qp.sq_mem_size);
981 goto mem_alloc_err;
982 }
983
984 memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
985 ep->qp.sq_first_qe = ep->qp.sq_virt;
986 ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
987 ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
988 ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
989 ep->qp.sq_prod_idx = 0;
990 ep->qp.sq_cons_idx = 0;
991 ep->qp.sqe_left = hba->max_sqes;
992
993 /* Allocate page table memory for CQ which is page aligned */
994 ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
995 ep->qp.cq_mem_size =
996 (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
997 ep->qp.cq_pgtbl_size =
998 (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
999 ep->qp.cq_pgtbl_size =
1000 (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1001
1002 ep->qp.cq_pgtbl_virt =
1003 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1004 &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
1005 if (!ep->qp.cq_pgtbl_virt) {
1006 printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
1007 ep->qp.cq_pgtbl_size);
1008 goto mem_alloc_err;
1009 }
1010
1011 /* Allocate memory area for actual CQ element */
1012 ep->qp.cq_virt =
1013 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1014 &ep->qp.cq_phys, GFP_KERNEL);
1015 if (!ep->qp.cq_virt) {
1016 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
1017 ep->qp.cq_mem_size);
1018 goto mem_alloc_err;
1019 }
1020 memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
1021
1022 ep->qp.cq_first_qe = ep->qp.cq_virt;
1023 ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
1024 ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
1025 ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
1026 ep->qp.cq_prod_idx = 0;
1027 ep->qp.cq_cons_idx = 0;
1028 ep->qp.cqe_left = hba->max_cqes;
1029 ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1030 ep->qp.cqe_size = hba->max_cqes;
1031
1032 /* Invalidate all EQ CQE index, req only for 57710 */
1033 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
1034 memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
1035
1036 /* Allocate page table memory for RQ which is page aligned */
1037 ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
1038 ep->qp.rq_mem_size =
1039 (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1040 ep->qp.rq_pgtbl_size =
1041 (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
1042 ep->qp.rq_pgtbl_size =
1043 (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
1044
1045 ep->qp.rq_pgtbl_virt =
1046 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1047 &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
1048 if (!ep->qp.rq_pgtbl_virt) {
1049 printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
1050 ep->qp.rq_pgtbl_size);
1051 goto mem_alloc_err;
1052 }
1053
1054 /* Allocate memory area for actual RQ element */
1055 ep->qp.rq_virt =
1056 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1057 &ep->qp.rq_phys, GFP_KERNEL);
1058 if (!ep->qp.rq_virt) {
1059 printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
1060 ep->qp.rq_mem_size);
1061 goto mem_alloc_err;
1062 }
1063
1064 ep->qp.rq_first_qe = ep->qp.rq_virt;
1065 ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
1066 ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
1067 ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
1068 ep->qp.rq_prod_idx = 0x8000;
1069 ep->qp.rq_cons_idx = 0;
1070 ep->qp.rqe_left = hba->max_rqes;
1071
1072 setup_qp_page_tables(ep);
1073
1074 return 0;
1075
1076mem_alloc_err:
1077 bnx2i_free_qp_resc(hba, ep);
1078 return -ENOMEM;
1079}
1080
1081
1082
1083/**
1084 * bnx2i_free_qp_resc - free memory resources held by QP
1085 * @hba: adapter structure pointer
1086 * @ep: endpoint (transport indentifier) structure
1087 *
1088 * Free QP resources - SQ/RQ/CQ memory and page tables.
1089 */
1090void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1091{
1092 if (ep->qp.ctx_base) {
1093 iounmap(ep->qp.ctx_base);
1094 ep->qp.ctx_base = NULL;
1095 }
1096 /* Free SQ mem */
1097 if (ep->qp.sq_pgtbl_virt) {
1098 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
1099 ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
1100 ep->qp.sq_pgtbl_virt = NULL;
1101 ep->qp.sq_pgtbl_phys = 0;
1102 }
1103 if (ep->qp.sq_virt) {
1104 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1105 ep->qp.sq_virt, ep->qp.sq_phys);
1106 ep->qp.sq_virt = NULL;
1107 ep->qp.sq_phys = 0;
1108 }
1109
1110 /* Free RQ mem */
1111 if (ep->qp.rq_pgtbl_virt) {
1112 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
1113 ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
1114 ep->qp.rq_pgtbl_virt = NULL;
1115 ep->qp.rq_pgtbl_phys = 0;
1116 }
1117 if (ep->qp.rq_virt) {
1118 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
1119 ep->qp.rq_virt, ep->qp.rq_phys);
1120 ep->qp.rq_virt = NULL;
1121 ep->qp.rq_phys = 0;
1122 }
1123
1124 /* Free CQ mem */
1125 if (ep->qp.cq_pgtbl_virt) {
1126 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
1127 ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
1128 ep->qp.cq_pgtbl_virt = NULL;
1129 ep->qp.cq_pgtbl_phys = 0;
1130 }
1131 if (ep->qp.cq_virt) {
1132 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1133 ep->qp.cq_virt, ep->qp.cq_phys);
1134 ep->qp.cq_virt = NULL;
1135 ep->qp.cq_phys = 0;
1136 }
1137}
1138
1139
1140/**
1141 * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
1142 * @hba: adapter structure pointer
1143 *
1144 * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
1145 * This results in iSCSi support validation and on-chip context manager
1146 * initialization. Firmware completes this handshake with a CQE carrying
1147 * the result of iscsi support validation. Parameter carried by
1148 * iscsi init request determines the number of offloaded connection and
1149 * tolerance level for iscsi protocol violation this hba/chip can support
1150 */
1151int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1152{
1153 struct kwqe *kwqe_arr[3];
1154 struct iscsi_kwqe_init1 iscsi_init;
1155 struct iscsi_kwqe_init2 iscsi_init2;
1156 int rc = 0;
1157 u64 mask64;
1158
1159 bnx2i_adjust_qp_size(hba);
1160
1161 iscsi_init.flags =
1162 ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
1163 if (en_tcp_dack)
1164 iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
1165 iscsi_init.reserved0 = 0;
1166 iscsi_init.num_cqs = 1;
1167 iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
1168 iscsi_init.hdr.flags =
1169 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1170
1171 iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
1172 iscsi_init.dummy_buffer_addr_hi =
1173 (u32) ((u64) hba->dummy_buf_dma >> 32);
1174
1175 hba->ctx_ccell_tasks =
1176 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
1177 iscsi_init.num_ccells_per_conn = hba->num_ccell;
1178 iscsi_init.num_tasks_per_conn = hba->max_sqes;
1179 iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
1180 iscsi_init.sq_num_wqes = hba->max_sqes;
1181 iscsi_init.cq_log_wqes_per_page =
1182 (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
1183 iscsi_init.cq_num_wqes = hba->max_cqes;
1184 iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
1185 (PAGE_SIZE - 1)) / PAGE_SIZE;
1186 iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
1187 (PAGE_SIZE - 1)) / PAGE_SIZE;
1188 iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
1189 iscsi_init.rq_num_wqes = hba->max_rqes;
1190
1191
1192 iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
1193 iscsi_init2.hdr.flags =
1194 (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
1195 iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
1196 mask64 = 0x0ULL;
1197 mask64 |= (
1198 /* CISCO MDS */
1199 (1UL <<
1200 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
1201 /* HP MSA1510i */
1202 (1UL <<
1203 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
1204 /* EMC */
1205 (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
1206 if (error_mask1)
1207 iscsi_init2.error_bit_map[0] = error_mask1;
1208 else
1209 iscsi_init2.error_bit_map[0] = (u32) mask64;
1210
1211 if (error_mask2)
1212 iscsi_init2.error_bit_map[1] = error_mask2;
1213 else
1214 iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
1215
1216 iscsi_error_mask = mask64;
1217
1218 kwqe_arr[0] = (struct kwqe *) &iscsi_init;
1219 kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
1220
1221 if (hba->cnic && hba->cnic->submit_kwqes)
1222 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
1223 return rc;
1224}
1225
1226
1227/**
1228 * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
1229 * @conn: iscsi connection
1230 * @cqe: pointer to newly DMA'ed CQE entry for processing
1231 *
1232 * process SCSI CMD Response CQE & complete the request to SCSI-ML
1233 */
1234static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
1235 struct bnx2i_conn *bnx2i_conn,
1236 struct cqe *cqe)
1237{
1238 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1239 struct bnx2i_cmd_response *resp_cqe;
1240 struct bnx2i_cmd *bnx2i_cmd;
1241 struct iscsi_task *task;
1242 struct iscsi_cmd_rsp *hdr;
1243 u32 datalen = 0;
1244
1245 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1246 spin_lock(&session->lock);
1247 task = iscsi_itt_to_task(conn,
1248 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1249 if (!task)
1250 goto fail;
1251
1252 bnx2i_cmd = task->dd_data;
1253
1254 if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
1255 conn->datain_pdus_cnt +=
1256 resp_cqe->task_stat.read_stat.num_data_outs;
1257 conn->rxdata_octets +=
1258 bnx2i_cmd->req.total_data_transfer_length;
1259 } else {
1260 conn->dataout_pdus_cnt +=
1261 resp_cqe->task_stat.read_stat.num_data_outs;
1262 conn->r2t_pdus_cnt +=
1263 resp_cqe->task_stat.read_stat.num_r2ts;
1264 conn->txdata_octets +=
1265 bnx2i_cmd->req.total_data_transfer_length;
1266 }
1267 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
1268
1269 hdr = (struct iscsi_cmd_rsp *)task->hdr;
1270 resp_cqe = (struct bnx2i_cmd_response *)cqe;
1271 hdr->opcode = resp_cqe->op_code;
1272 hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
1273 hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
1274 hdr->response = resp_cqe->response;
1275 hdr->cmd_status = resp_cqe->status;
1276 hdr->flags = resp_cqe->response_flags;
1277 hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
1278
1279 if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
1280 goto done;
1281
1282 if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
1283 datalen = resp_cqe->data_length;
1284 if (datalen < 2)
1285 goto done;
1286
1287 if (datalen > BNX2I_RQ_WQE_SIZE) {
1288 iscsi_conn_printk(KERN_ERR, conn,
1289 "sense data len %d > RQ sz\n",
1290 datalen);
1291 datalen = BNX2I_RQ_WQE_SIZE;
1292 } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
1293 iscsi_conn_printk(KERN_ERR, conn,
1294 "sense data len %d > conn data\n",
1295 datalen);
1296 datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
1297 }
1298
1299 bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
1300 bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
1301 }
1302
1303done:
1304 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
1305 conn->data, datalen);
1306fail:
1307 spin_unlock(&session->lock);
1308 return 0;
1309}
1310
1311
1312/**
1313 * bnx2i_process_login_resp - this function handles iscsi login response
1314 * @session: iscsi session pointer
1315 * @bnx2i_conn: iscsi connection pointer
1316 * @cqe: pointer to newly DMA'ed CQE entry for processing
1317 *
1318 * process Login Response CQE & complete it to open-iscsi user daemon
1319 */
1320static int bnx2i_process_login_resp(struct iscsi_session *session,
1321 struct bnx2i_conn *bnx2i_conn,
1322 struct cqe *cqe)
1323{
1324 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1325 struct iscsi_task *task;
1326 struct bnx2i_login_response *login;
1327 struct iscsi_login_rsp *resp_hdr;
1328 int pld_len;
1329 int pad_len;
1330
1331 login = (struct bnx2i_login_response *) cqe;
1332 spin_lock(&session->lock);
1333 task = iscsi_itt_to_task(conn,
1334 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
1335 if (!task)
1336 goto done;
1337
1338 resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1339 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1340 resp_hdr->opcode = login->op_code;
1341 resp_hdr->flags = login->response_flags;
1342 resp_hdr->max_version = login->version_max;
1343 resp_hdr->active_version = login->version_active;;
1344 resp_hdr->hlength = 0;
1345
1346 hton24(resp_hdr->dlength, login->data_length);
1347 memcpy(resp_hdr->isid, &login->isid_lo, 6);
1348 resp_hdr->tsih = cpu_to_be16(login->tsih);
1349 resp_hdr->itt = task->hdr->itt;
1350 resp_hdr->statsn = cpu_to_be32(login->stat_sn);
1351 resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
1352 resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
1353 resp_hdr->status_class = login->status_class;
1354 resp_hdr->status_detail = login->status_detail;
1355 pld_len = login->data_length;
1356 bnx2i_conn->gen_pdu.resp_wr_ptr =
1357 bnx2i_conn->gen_pdu.resp_buf + pld_len;
1358
1359 pad_len = 0;
1360 if (pld_len & 0x3)
1361 pad_len = 4 - (pld_len % 4);
1362
1363 if (pad_len) {
1364 int i = 0;
1365 for (i = 0; i < pad_len; i++) {
1366 bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
1367 bnx2i_conn->gen_pdu.resp_wr_ptr++;
1368 }
1369 }
1370
1371 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
1372 bnx2i_conn->gen_pdu.resp_buf,
1373 bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
1374done:
1375 spin_unlock(&session->lock);
1376 return 0;
1377}
1378
1379/**
1380 * bnx2i_process_tmf_resp - this function handles iscsi TMF response
1381 * @session: iscsi session pointer
1382 * @bnx2i_conn: iscsi connection pointer
1383 * @cqe: pointer to newly DMA'ed CQE entry for processing
1384 *
1385 * process iSCSI TMF Response CQE and wake up the driver eh thread.
1386 */
1387static int bnx2i_process_tmf_resp(struct iscsi_session *session,
1388 struct bnx2i_conn *bnx2i_conn,
1389 struct cqe *cqe)
1390{
1391 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1392 struct iscsi_task *task;
1393 struct bnx2i_tmf_response *tmf_cqe;
1394 struct iscsi_tm_rsp *resp_hdr;
1395
1396 tmf_cqe = (struct bnx2i_tmf_response *)cqe;
1397 spin_lock(&session->lock);
1398 task = iscsi_itt_to_task(conn,
1399 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
1400 if (!task)
1401 goto done;
1402
1403 resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1404 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1405 resp_hdr->opcode = tmf_cqe->op_code;
1406 resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
1407 resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
1408 resp_hdr->itt = task->hdr->itt;
1409 resp_hdr->response = tmf_cqe->response;
1410
1411 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1412done:
1413 spin_unlock(&session->lock);
1414 return 0;
1415}
1416
1417/**
1418 * bnx2i_process_logout_resp - this function handles iscsi logout response
1419 * @session: iscsi session pointer
1420 * @bnx2i_conn: iscsi connection pointer
1421 * @cqe: pointer to newly DMA'ed CQE entry for processing
1422 *
1423 * process iSCSI Logout Response CQE & make function call to
1424 * notify the user daemon.
1425 */
1426static int bnx2i_process_logout_resp(struct iscsi_session *session,
1427 struct bnx2i_conn *bnx2i_conn,
1428 struct cqe *cqe)
1429{
1430 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1431 struct iscsi_task *task;
1432 struct bnx2i_logout_response *logout;
1433 struct iscsi_logout_rsp *resp_hdr;
1434
1435 logout = (struct bnx2i_logout_response *) cqe;
1436 spin_lock(&session->lock);
1437 task = iscsi_itt_to_task(conn,
1438 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
1439 if (!task)
1440 goto done;
1441
1442 resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
1443 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1444 resp_hdr->opcode = logout->op_code;
1445 resp_hdr->flags = logout->response;
1446 resp_hdr->hlength = 0;
1447
1448 resp_hdr->itt = task->hdr->itt;
1449 resp_hdr->statsn = task->hdr->exp_statsn;
1450 resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
1451 resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
1452
1453 resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
1454 resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
1455
1456 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
1457done:
1458 spin_unlock(&session->lock);
1459 return 0;
1460}
1461
1462/**
1463 * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
1464 * @session: iscsi session pointer
1465 * @bnx2i_conn: iscsi connection pointer
1466 * @cqe: pointer to newly DMA'ed CQE entry for processing
1467 *
1468 * process iSCSI NOPIN local completion CQE, frees IIT and command structures
1469 */
1470static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
1471 struct bnx2i_conn *bnx2i_conn,
1472 struct cqe *cqe)
1473{
1474 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1475 struct bnx2i_nop_in_msg *nop_in;
1476 struct iscsi_task *task;
1477
1478 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1479 spin_lock(&session->lock);
1480 task = iscsi_itt_to_task(conn,
1481 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
1482 if (task)
1483 iscsi_put_task(task);
1484 spin_unlock(&session->lock);
1485}
1486
1487/**
1488 * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
1489 * @conn: iscsi connection
1490 *
1491 * Firmware advances RQ producer index for every unsolicited PDU even if
1492 * payload data length is '0'. This function makes corresponding
1493 * adjustments on the driver side to match this f/w behavior
1494 */
1495static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
1496{
1497 char dummy_rq_data[2];
1498 bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
1499 bnx2i_put_rq_buf(bnx2i_conn, 1);
1500}
1501
1502
1503/**
1504 * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
1505 * @session: iscsi session pointer
1506 * @bnx2i_conn: iscsi connection pointer
1507 * @cqe: pointer to newly DMA'ed CQE entry for processing
1508 *
1509 * process iSCSI target's proactive iSCSI NOPIN request
1510 */
1511static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
1512 struct bnx2i_conn *bnx2i_conn,
1513 struct cqe *cqe)
1514{
1515 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1516 struct iscsi_task *task;
1517 struct bnx2i_nop_in_msg *nop_in;
1518 struct iscsi_nopin *hdr;
1519 u32 itt;
1520 int tgt_async_nop = 0;
1521
1522 nop_in = (struct bnx2i_nop_in_msg *)cqe;
1523 itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
1524
1525 spin_lock(&session->lock);
1526 hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
1527 memset(hdr, 0, sizeof(struct iscsi_hdr));
1528 hdr->opcode = nop_in->op_code;
1529 hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
1530 hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
1531 hdr->ttt = cpu_to_be32(nop_in->ttt);
1532
1533 if (itt == (u16) RESERVED_ITT) {
1534 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1535 hdr->itt = RESERVED_ITT;
1536 tgt_async_nop = 1;
1537 goto done;
1538 }
1539
1540 /* this is a response to one of our nop-outs */
1541 task = iscsi_itt_to_task(conn, itt);
1542 if (task) {
1543 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1544 hdr->itt = task->hdr->itt;
1545 hdr->ttt = cpu_to_be32(nop_in->ttt);
1546 memcpy(hdr->lun, nop_in->lun, 8);
1547 }
1548done:
1549 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1550 spin_unlock(&session->lock);
1551
1552 return tgt_async_nop;
1553}
1554
1555
1556/**
1557 * bnx2i_process_async_mesg - this function handles iscsi async message
1558 * @session: iscsi session pointer
1559 * @bnx2i_conn: iscsi connection pointer
1560 * @cqe: pointer to newly DMA'ed CQE entry for processing
1561 *
1562 * process iSCSI ASYNC Message
1563 */
1564static void bnx2i_process_async_mesg(struct iscsi_session *session,
1565 struct bnx2i_conn *bnx2i_conn,
1566 struct cqe *cqe)
1567{
1568 struct bnx2i_async_msg *async_cqe;
1569 struct iscsi_async *resp_hdr;
1570 u8 async_event;
1571
1572 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1573
1574 async_cqe = (struct bnx2i_async_msg *)cqe;
1575 async_event = async_cqe->async_event;
1576
1577 if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
1578 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1579 "async: scsi events not supported\n");
1580 return;
1581 }
1582
1583 spin_lock(&session->lock);
1584 resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
1585 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
1586 resp_hdr->opcode = async_cqe->op_code;
1587 resp_hdr->flags = 0x80;
1588
1589 memcpy(resp_hdr->lun, async_cqe->lun, 8);
1590 resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
1591 resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
1592
1593 resp_hdr->async_event = async_cqe->async_event;
1594 resp_hdr->async_vcode = async_cqe->async_vcode;
1595
1596 resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
1597 resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
1598 resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
1599
1600 __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
1601 (struct iscsi_hdr *)resp_hdr, NULL, 0);
1602 spin_unlock(&session->lock);
1603}
1604
1605
1606/**
1607 * bnx2i_process_reject_mesg - process iscsi reject pdu
1608 * @session: iscsi session pointer
1609 * @bnx2i_conn: iscsi connection pointer
1610 * @cqe: pointer to newly DMA'ed CQE entry for processing
1611 *
1612 * process iSCSI REJECT message
1613 */
1614static void bnx2i_process_reject_mesg(struct iscsi_session *session,
1615 struct bnx2i_conn *bnx2i_conn,
1616 struct cqe *cqe)
1617{
1618 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1619 struct bnx2i_reject_msg *reject;
1620 struct iscsi_reject *hdr;
1621
1622 reject = (struct bnx2i_reject_msg *) cqe;
1623 if (reject->data_length) {
1624 bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
1625 bnx2i_put_rq_buf(bnx2i_conn, 1);
1626 } else
1627 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
1628
1629 spin_lock(&session->lock);
1630 hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
1631 memset(hdr, 0, sizeof(struct iscsi_hdr));
1632 hdr->opcode = reject->op_code;
1633 hdr->reason = reject->reason;
1634 hton24(hdr->dlength, reject->data_length);
1635 hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
1636 hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
1637 hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
1638 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
1639 reject->data_length);
1640 spin_unlock(&session->lock);
1641}
1642
1643/**
1644 * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
1645 * @session: iscsi session pointer
1646 * @bnx2i_conn: iscsi connection pointer
1647 * @cqe: pointer to newly DMA'ed CQE entry for processing
1648 *
1649 * process command cleanup response CQE during conn shutdown or error recovery
1650 */
1651static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
1652 struct bnx2i_conn *bnx2i_conn,
1653 struct cqe *cqe)
1654{
1655 struct bnx2i_cleanup_response *cmd_clean_rsp;
1656 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1657 struct iscsi_task *task;
1658
1659 cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
1660 spin_lock(&session->lock);
1661 task = iscsi_itt_to_task(conn,
1662 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1663 if (!task)
1664 printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
1665 cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
1666 spin_unlock(&session->lock);
1667 complete(&bnx2i_conn->cmd_cleanup_cmpl);
1668}
1669
1670
1671
1672/**
1673 * bnx2i_process_new_cqes - process newly DMA'ed CQE's
1674 * @bnx2i_conn: iscsi connection
1675 *
1676 * this function is called by generic KCQ handler to process all pending CQE's
1677 */
1678static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
1679{
1680 struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
1681 struct iscsi_session *session = conn->session;
1682 struct qp_info *qp = &bnx2i_conn->ep->qp;
1683 struct bnx2i_nop_in_msg *nopin;
1684 int tgt_async_msg;
1685
1686 while (1) {
1687 nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
1688 if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
1689 break;
1690
1691 if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
1692 break;
1693
1694 tgt_async_msg = 0;
1695
1696 switch (nopin->op_code) {
1697 case ISCSI_OP_SCSI_CMD_RSP:
1698 case ISCSI_OP_SCSI_DATA_IN:
1699 bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
1700 qp->cq_cons_qe);
1701 break;
1702 case ISCSI_OP_LOGIN_RSP:
1703 bnx2i_process_login_resp(session, bnx2i_conn,
1704 qp->cq_cons_qe);
1705 break;
1706 case ISCSI_OP_SCSI_TMFUNC_RSP:
1707 bnx2i_process_tmf_resp(session, bnx2i_conn,
1708 qp->cq_cons_qe);
1709 break;
1710 case ISCSI_OP_LOGOUT_RSP:
1711 bnx2i_process_logout_resp(session, bnx2i_conn,
1712 qp->cq_cons_qe);
1713 break;
1714 case ISCSI_OP_NOOP_IN:
1715 if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
1716 qp->cq_cons_qe))
1717 tgt_async_msg = 1;
1718 break;
1719 case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
1720 bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
1721 qp->cq_cons_qe);
1722 break;
1723 case ISCSI_OP_ASYNC_EVENT:
1724 bnx2i_process_async_mesg(session, bnx2i_conn,
1725 qp->cq_cons_qe);
1726 tgt_async_msg = 1;
1727 break;
1728 case ISCSI_OP_REJECT:
1729 bnx2i_process_reject_mesg(session, bnx2i_conn,
1730 qp->cq_cons_qe);
1731 break;
1732 case ISCSI_OPCODE_CLEANUP_RESPONSE:
1733 bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
1734 qp->cq_cons_qe);
1735 break;
1736 default:
1737 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
1738 nopin->op_code);
1739 }
1740
1741 if (!tgt_async_msg)
1742 bnx2i_conn->ep->num_active_cmds--;
1743
1744 /* clear out in production version only, till beta keep opcode
1745 * field intact, will be helpful in debugging (context dump)
1746 * nopin->op_code = 0;
1747 */
1748 qp->cqe_exp_seq_sn++;
1749 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
1750 qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
1751
1752 if (qp->cq_cons_qe == qp->cq_last_qe) {
1753 qp->cq_cons_qe = qp->cq_first_qe;
1754 qp->cq_cons_idx = 0;
1755 } else {
1756 qp->cq_cons_qe++;
1757 qp->cq_cons_idx++;
1758 }
1759 }
1760 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1761}
1762
1763/**
1764 * bnx2i_fastpath_notification - process global event queue (KCQ)
1765 * @hba: adapter structure pointer
1766 * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry
1767 *
1768 * Fast path event notification handler, KCQ entry carries context id
1769 * of the connection that has 1 or more pending CQ entries
1770 */
1771static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
1772 struct iscsi_kcqe *new_cqe_kcqe)
1773{
1774 struct bnx2i_conn *conn;
1775 u32 iscsi_cid;
1776
1777 iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
1778 conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1779
1780 if (!conn) {
1781 printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
1782 return;
1783 }
1784 if (!conn->ep) {
1785 printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
1786 return;
1787 }
1788
1789 bnx2i_process_new_cqes(conn);
1790}
1791
1792
1793/**
1794 * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
1795 * @hba: adapter structure pointer
1796 * @update_kcqe: kcqe pointer
1797 *
1798 * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
1799 */
1800static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
1801 struct iscsi_kcqe *update_kcqe)
1802{
1803 struct bnx2i_conn *conn;
1804 u32 iscsi_cid;
1805
1806 iscsi_cid = update_kcqe->iscsi_conn_id;
1807 conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1808
1809 if (!conn) {
1810 printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
1811 return;
1812 }
1813 if (!conn->ep) {
1814 printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
1815 return;
1816 }
1817
1818 if (update_kcqe->completion_status) {
1819 printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
1820 conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
1821 } else
1822 conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
1823
1824 wake_up_interruptible(&conn->ep->ofld_wait);
1825}
1826
1827
1828/**
1829 * bnx2i_recovery_que_add_conn - add connection to recovery queue
1830 * @hba: adapter structure pointer
1831 * @bnx2i_conn: iscsi connection
1832 *
1833 * Add connection to recovery queue and schedule adapter eh worker
1834 */
1835static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
1836 struct bnx2i_conn *bnx2i_conn)
1837{
1838 iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
1839 ISCSI_ERR_CONN_FAILED);
1840}
1841
1842
1843/**
1844 * bnx2i_process_tcp_error - process error notification on a given connection
1845 *
1846 * @hba: adapter structure pointer
1847 * @tcp_err: tcp error kcqe pointer
1848 *
1849 * handles tcp level error notifications from FW.
1850 */
1851static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
1852 struct iscsi_kcqe *tcp_err)
1853{
1854 struct bnx2i_conn *bnx2i_conn;
1855 u32 iscsi_cid;
1856
1857 iscsi_cid = tcp_err->iscsi_conn_id;
1858 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1859
1860 if (!bnx2i_conn) {
1861 printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
1862 return;
1863 }
1864
1865 printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
1866 iscsi_cid, tcp_err->completion_status);
1867 bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
1868}
1869
1870
1871/**
1872 * bnx2i_process_iscsi_error - process error notification on a given connection
1873 * @hba: adapter structure pointer
1874 * @iscsi_err: iscsi error kcqe pointer
1875 *
1876 * handles iscsi error notifications from the FW. Firmware based in initial
1877 * handshake classifies iscsi protocol / TCP rfc violation into either
1878 * warning or error indications. If indication is of "Error" type, driver
1879 * will initiate session recovery for that connection/session. For
1880 * "Warning" type indication, driver will put out a system log message
1881 * (there will be only one message for each type for the life of the
1882 * session, this is to avoid un-necessarily overloading the system)
1883 */
1884static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
1885 struct iscsi_kcqe *iscsi_err)
1886{
1887 struct bnx2i_conn *bnx2i_conn;
1888 u32 iscsi_cid;
1889 char warn_notice[] = "iscsi_warning";
1890 char error_notice[] = "iscsi_error";
1891 char additional_notice[64];
1892 char *message;
1893 int need_recovery;
1894 u64 err_mask64;
1895
1896 iscsi_cid = iscsi_err->iscsi_conn_id;
1897 bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
1898 if (!bnx2i_conn) {
1899 printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
1900 return;
1901 }
1902
1903 err_mask64 = (0x1ULL << iscsi_err->completion_status);
1904
1905 if (err_mask64 & iscsi_error_mask) {
1906 need_recovery = 0;
1907 message = warn_notice;
1908 } else {
1909 need_recovery = 1;
1910 message = error_notice;
1911 }
1912
1913 switch (iscsi_err->completion_status) {
1914 case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
1915 strcpy(additional_notice, "hdr digest err");
1916 break;
1917 case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
1918 strcpy(additional_notice, "data digest err");
1919 break;
1920 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
1921 strcpy(additional_notice, "wrong opcode rcvd");
1922 break;
1923 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
1924 strcpy(additional_notice, "AHS len > 0 rcvd");
1925 break;
1926 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
1927 strcpy(additional_notice, "invalid ITT rcvd");
1928 break;
1929 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
1930 strcpy(additional_notice, "wrong StatSN rcvd");
1931 break;
1932 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
1933 strcpy(additional_notice, "wrong DataSN rcvd");
1934 break;
1935 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
1936 strcpy(additional_notice, "pend R2T violation");
1937 break;
1938 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
1939 strcpy(additional_notice, "ERL0, UO");
1940 break;
1941 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
1942 strcpy(additional_notice, "ERL0, U1");
1943 break;
1944 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
1945 strcpy(additional_notice, "ERL0, U2");
1946 break;
1947 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
1948 strcpy(additional_notice, "ERL0, U3");
1949 break;
1950 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
1951 strcpy(additional_notice, "ERL0, U4");
1952 break;
1953 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
1954 strcpy(additional_notice, "ERL0, U5");
1955 break;
1956 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
1957 strcpy(additional_notice, "ERL0, U6");
1958 break;
1959 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
1960 strcpy(additional_notice, "invalid resi len");
1961 break;
1962 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
1963 strcpy(additional_notice, "MRDSL violation");
1964 break;
1965 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
1966 strcpy(additional_notice, "F-bit not set");
1967 break;
1968 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
1969 strcpy(additional_notice, "invalid TTT");
1970 break;
1971 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
1972 strcpy(additional_notice, "invalid DataSN");
1973 break;
1974 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
1975 strcpy(additional_notice, "burst len violation");
1976 break;
1977 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
1978 strcpy(additional_notice, "buf offset violation");
1979 break;
1980 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
1981 strcpy(additional_notice, "invalid LUN field");
1982 break;
1983 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
1984 strcpy(additional_notice, "invalid R2TSN field");
1985 break;
1986#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
1987 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
1988 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
1989 strcpy(additional_notice, "invalid cmd len1");
1990 break;
1991#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
1992 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
1993 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
1994 strcpy(additional_notice, "invalid cmd len2");
1995 break;
1996 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
1997 strcpy(additional_notice,
1998 "pend r2t exceeds MaxOutstandingR2T value");
1999 break;
2000 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
2001 strcpy(additional_notice, "TTT is rsvd");
2002 break;
2003 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
2004 strcpy(additional_notice, "MBL violation");
2005 break;
2006#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
2007 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
2008 case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
2009 strcpy(additional_notice, "data seg len != 0");
2010 break;
2011 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
2012 strcpy(additional_notice, "reject pdu len error");
2013 break;
2014 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
2015 strcpy(additional_notice, "async pdu len error");
2016 break;
2017 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
2018 strcpy(additional_notice, "nopin pdu len error");
2019 break;
2020#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
2021 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
2022 case BNX2_ERR_PEND_R2T_IN_CLEANUP:
2023 strcpy(additional_notice, "pend r2t in cleanup");
2024 break;
2025
2026 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
2027 strcpy(additional_notice, "IP fragments rcvd");
2028 break;
2029 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
2030 strcpy(additional_notice, "IP options error");
2031 break;
2032 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
2033 strcpy(additional_notice, "urgent flag error");
2034 break;
2035 default:
2036 printk(KERN_ALERT "iscsi_err - unknown err %x\n",
2037 iscsi_err->completion_status);
2038 }
2039
2040 if (need_recovery) {
2041 iscsi_conn_printk(KERN_ALERT,
2042 bnx2i_conn->cls_conn->dd_data,
2043 "bnx2i: %s - %s\n",
2044 message, additional_notice);
2045
2046 iscsi_conn_printk(KERN_ALERT,
2047 bnx2i_conn->cls_conn->dd_data,
2048 "conn_err - hostno %d conn %p, "
2049 "iscsi_cid %x cid %x\n",
2050 bnx2i_conn->hba->shost->host_no,
2051 bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
2052 bnx2i_conn->ep->ep_cid);
2053 bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
2054 } else
2055 if (!test_and_set_bit(iscsi_err->completion_status,
2056 (void *) &bnx2i_conn->violation_notified))
2057 iscsi_conn_printk(KERN_ALERT,
2058 bnx2i_conn->cls_conn->dd_data,
2059 "bnx2i: %s - %s\n",
2060 message, additional_notice);
2061}
2062
2063
2064/**
2065 * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
2066 * @hba: adapter structure pointer
2067 * @conn_destroy: conn destroy kcqe pointer
2068 *
2069 * handles connection destroy completion request.
2070 */
2071static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
2072 struct iscsi_kcqe *conn_destroy)
2073{
2074 struct bnx2i_endpoint *ep;
2075
2076 ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
2077 if (!ep) {
2078 printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
2079 "offload request, unexpected complection\n");
2080 return;
2081 }
2082
2083 if (hba != ep->hba) {
2084 printk(KERN_ALERT "conn destroy- error hba mis-match\n");
2085 return;
2086 }
2087
2088 if (conn_destroy->completion_status) {
2089 printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
2090 ep->state = EP_STATE_CLEANUP_FAILED;
2091 } else
2092 ep->state = EP_STATE_CLEANUP_CMPL;
2093 wake_up_interruptible(&ep->ofld_wait);
2094}
2095
2096
2097/**
2098 * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
2099 * @hba: adapter structure pointer
2100 * @ofld_kcqe: conn offload kcqe pointer
2101 *
2102 * handles initial connection offload completion, ep_connect() thread is
2103 * woken-up to continue with LLP connect process
2104 */
2105static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
2106 struct iscsi_kcqe *ofld_kcqe)
2107{
2108 u32 cid_addr;
2109 struct bnx2i_endpoint *ep;
2110 u32 cid_num;
2111
2112 ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
2113 if (!ep) {
2114 printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
2115 return;
2116 }
2117
2118 if (hba != ep->hba) {
2119 printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
2120 return;
2121 }
2122
2123 if (ofld_kcqe->completion_status) {
2124 if (ofld_kcqe->completion_status ==
2125 ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
2126 printk(KERN_ALERT "bnx2i: unable to allocate"
2127 " iSCSI context resources\n");
2128 ep->state = EP_STATE_OFLD_FAILED;
2129 } else {
2130 ep->state = EP_STATE_OFLD_COMPL;
2131 cid_addr = ofld_kcqe->iscsi_conn_context_id;
2132 cid_num = bnx2i_get_cid_num(ep);
2133 ep->ep_cid = cid_addr;
2134 ep->qp.ctx_base = NULL;
2135 }
2136 wake_up_interruptible(&ep->ofld_wait);
2137}
2138
2139/**
2140 * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
2141 * @hba: adapter structure pointer
2142 * @update_kcqe: kcqe pointer
2143 *
2144 * Generic KCQ event handler/dispatcher
2145 */
2146static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
2147 u32 num_cqe)
2148{
2149 struct bnx2i_hba *hba = context;
2150 int i = 0;
2151 struct iscsi_kcqe *ikcqe = NULL;
2152
2153 while (i < num_cqe) {
2154 ikcqe = (struct iscsi_kcqe *) kcqe[i++];
2155
2156 if (ikcqe->op_code ==
2157 ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
2158 bnx2i_fastpath_notification(hba, ikcqe);
2159 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
2160 bnx2i_process_ofld_cmpl(hba, ikcqe);
2161 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
2162 bnx2i_process_update_conn_cmpl(hba, ikcqe);
2163 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
2164 if (ikcqe->completion_status !=
2165 ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
2166 bnx2i_iscsi_license_error(hba, ikcqe->\
2167 completion_status);
2168 else {
2169 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2170 bnx2i_get_link_state(hba);
2171 printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
2172 "ISCSI_INIT passed\n",
2173 (u8)hba->pcidev->bus->number,
2174 hba->pci_devno,
2175 (u8)hba->pci_func);
2176
2177
2178 }
2179 } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
2180 bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
2181 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
2182 bnx2i_process_iscsi_error(hba, ikcqe);
2183 else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
2184 bnx2i_process_tcp_error(hba, ikcqe);
2185 else
2186 printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
2187 ikcqe->op_code);
2188 }
2189}
2190
2191
2192/**
2193 * bnx2i_indicate_netevent - Generic netdev event handler
2194 * @context: adapter structure pointer
2195 * @event: event type
2196 *
2197 * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
2198 * NETDEV_GOING_DOWN and NETDEV_CHANGE
2199 */
2200static void bnx2i_indicate_netevent(void *context, unsigned long event)
2201{
2202 struct bnx2i_hba *hba = context;
2203
2204 switch (event) {
2205 case NETDEV_UP:
2206 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
2207 bnx2i_send_fw_iscsi_init_msg(hba);
2208 break;
2209 case NETDEV_DOWN:
2210 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2211 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
2212 break;
2213 case NETDEV_GOING_DOWN:
2214 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
2215 iscsi_host_for_each_session(hba->shost,
2216 bnx2i_drop_session);
2217 break;
2218 case NETDEV_CHANGE:
2219 bnx2i_get_link_state(hba);
2220 break;
2221 default:
2222 ;
2223 }
2224}
2225
2226
2227/**
2228 * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
2229 * @cm_sk: cnic sock structure pointer
2230 *
2231 * function callback exported via bnx2i - cnic driver interface to
2232 * indicate completion of option-2 TCP connect request.
2233 */
2234static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
2235{
2236 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2237
2238 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
2239 ep->state = EP_STATE_CONNECT_FAILED;
2240 else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
2241 ep->state = EP_STATE_CONNECT_COMPL;
2242 else
2243 ep->state = EP_STATE_CONNECT_FAILED;
2244
2245 wake_up_interruptible(&ep->ofld_wait);
2246}
2247
2248
2249/**
2250 * bnx2i_cm_close_cmpl - process tcp conn close completion
2251 * @cm_sk: cnic sock structure pointer
2252 *
2253 * function callback exported via bnx2i - cnic driver interface to
2254 * indicate completion of option-2 graceful TCP connect shutdown
2255 */
2256static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
2257{
2258 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2259
2260 ep->state = EP_STATE_DISCONN_COMPL;
2261 wake_up_interruptible(&ep->ofld_wait);
2262}
2263
2264
2265/**
2266 * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
2267 * @cm_sk: cnic sock structure pointer
2268 *
2269 * function callback exported via bnx2i - cnic driver interface to
2270 * indicate completion of option-2 abortive TCP connect termination
2271 */
2272static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
2273{
2274 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2275
2276 ep->state = EP_STATE_DISCONN_COMPL;
2277 wake_up_interruptible(&ep->ofld_wait);
2278}
2279
2280
2281/**
2282 * bnx2i_cm_remote_close - process received TCP FIN
2283 * @hba: adapter structure pointer
2284 * @update_kcqe: kcqe pointer
2285 *
2286 * function callback exported via bnx2i - cnic driver interface to indicate
2287 * async TCP events such as FIN
2288 */
2289static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
2290{
2291 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2292
2293 ep->state = EP_STATE_TCP_FIN_RCVD;
2294 if (ep->conn)
2295 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2296}
2297
2298/**
2299 * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
2300 * @hba: adapter structure pointer
2301 * @update_kcqe: kcqe pointer
2302 *
2303 * function callback exported via bnx2i - cnic driver interface to
2304 * indicate async TCP events (RST) sent by the peer.
2305 */
2306static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
2307{
2308 struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
2309
2310 ep->state = EP_STATE_TCP_RST_RCVD;
2311 if (ep->conn)
2312 bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
2313}
2314
2315
2316static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type,
2317 char *buf, u16 buflen)
2318{
2319 struct bnx2i_hba *hba;
2320
2321 hba = bnx2i_find_hba_for_cnic(dev);
2322 if (!hba)
2323 return;
2324
2325 if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
2326 msg_type, buf, buflen))
2327 printk(KERN_ALERT "bnx2i: private nl message send error\n");
2328
2329}
2330
2331
2332/**
2333 * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
2334 * carrying callback function pointers
2335 *
2336 */
2337struct cnic_ulp_ops bnx2i_cnic_cb = {
2338 .cnic_init = bnx2i_ulp_init,
2339 .cnic_exit = bnx2i_ulp_exit,
2340 .cnic_start = bnx2i_start,
2341 .cnic_stop = bnx2i_stop,
2342 .indicate_kcqes = bnx2i_indicate_kcqe,
2343 .indicate_netevent = bnx2i_indicate_netevent,
2344 .cm_connect_complete = bnx2i_cm_connect_cmpl,
2345 .cm_close_complete = bnx2i_cm_close_cmpl,
2346 .cm_abort_complete = bnx2i_cm_abort_cmpl,
2347 .cm_remote_close = bnx2i_cm_remote_close,
2348 .cm_remote_abort = bnx2i_cm_remote_abort,
2349 .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
2350 .owner = THIS_MODULE
2351};
2352
2353
2354/**
2355 * bnx2i_map_ep_dbell_regs - map connection doorbell registers
2356 * @ep: bnx2i endpoint
2357 *
2358 * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
2359 * register in BAR #0. Whereas in 57710 these register are accessed by
2360 * mapping BAR #1
2361 */
2362int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2363{
2364 u32 cid_num;
2365 u32 reg_off;
2366 u32 first_l4l5;
2367 u32 ctx_sz;
2368 u32 config2;
2369 resource_size_t reg_base;
2370
2371 cid_num = bnx2i_get_cid_num(ep);
2372
2373 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
2374 reg_base = pci_resource_start(ep->hba->pcidev,
2375 BNX2X_DOORBELL_PCI_BAR);
2376 reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
2377 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
2378 goto arm_cq;
2379 }
2380
2381 reg_base = ep->hba->netdev->base_addr;
2382 if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
2383 (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
2384 config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
2385 first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
2386 ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
2387 if (ctx_sz)
2388 reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
2389 + PAGE_SIZE *
2390 (((cid_num - first_l4l5) / ctx_sz) + 256);
2391 else
2392 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2393 } else
2394 /* 5709 device in normal node and 5706/5708 devices */
2395 reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
2396
2397 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
2398 MB_KERNEL_CTX_SIZE);
2399 if (!ep->qp.ctx_base)
2400 return -ENOMEM;
2401
2402arm_cq:
2403 bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
2404 return 0;
2405}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
new file mode 100644
index 00000000000..ae4b2d588fd
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -0,0 +1,438 @@
1/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
12 */
13
14#include "bnx2i.h"
15
16static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count;
18static int bnx2i_reg_device;
19
20#define DRV_MODULE_NAME "bnx2i"
21#define DRV_MODULE_VERSION "2.0.1d"
22#define DRV_MODULE_RELDATE "Mar 25, 2009"
23
24static char version[] __devinitdata =
25 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
26 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
27
28
29MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
30MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
31MODULE_LICENSE("GPL");
32MODULE_VERSION(DRV_MODULE_VERSION);
33
34static DEFINE_RWLOCK(bnx2i_dev_lock);
35
36unsigned int event_coal_div = 1;
37module_param(event_coal_div, int, 0664);
38MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
39
40unsigned int en_tcp_dack = 1;
41module_param(en_tcp_dack, int, 0664);
42MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
43
44unsigned int error_mask1 = 0x00;
45module_param(error_mask1, int, 0664);
46MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
47
48unsigned int error_mask2 = 0x00;
49module_param(error_mask2, int, 0664);
50MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
51
52unsigned int sq_size;
53module_param(sq_size, int, 0664);
54MODULE_PARM_DESC(sq_size, "Configure SQ size");
55
56unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
57module_param(rq_size, int, 0664);
58MODULE_PARM_DESC(rq_size, "Configure RQ size");
59
60u64 iscsi_error_mask = 0x00;
61
62static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
63
64
65/**
66 * bnx2i_identify_device - identifies NetXtreme II device type
67 * @hba: Adapter structure pointer
68 *
69 * This function identifies the NX2 device type and sets appropriate
70 * queue mailbox register access method, 5709 requires driver to
71 * access MBOX regs using *bin* mode
72 */
73void bnx2i_identify_device(struct bnx2i_hba *hba)
74{
75 hba->cnic_dev_type = 0;
76 if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
77 (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
78 set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
79 else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
80 (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
81 set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
82 else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
83 (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
84 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
85 hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
86 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
87 hba->pci_did == PCI_DEVICE_ID_NX2_57711)
88 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
89}
90
91
92/**
93 * get_adapter_list_head - returns head of adapter list
94 */
95struct bnx2i_hba *get_adapter_list_head(void)
96{
97 struct bnx2i_hba *hba = NULL;
98 struct bnx2i_hba *tmp_hba;
99
100 if (!adapter_count)
101 goto hba_not_found;
102
103 read_lock(&bnx2i_dev_lock);
104 list_for_each_entry(tmp_hba, &adapter_list, link) {
105 if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
106 hba = tmp_hba;
107 break;
108 }
109 }
110 read_unlock(&bnx2i_dev_lock);
111hba_not_found:
112 return hba;
113}
114
115
116/**
117 * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
118 * @cnic: pointer to cnic device instance
119 *
120 */
121struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
122{
123 struct bnx2i_hba *hba, *temp;
124
125 read_lock(&bnx2i_dev_lock);
126 list_for_each_entry_safe(hba, temp, &adapter_list, link) {
127 if (hba->cnic == cnic) {
128 read_unlock(&bnx2i_dev_lock);
129 return hba;
130 }
131 }
132 read_unlock(&bnx2i_dev_lock);
133 return NULL;
134}
135
136
137/**
138 * bnx2i_start - cnic callback to initialize & start adapter instance
139 * @handle: transparent handle pointing to adapter structure
140 *
141 * This function maps adapter structure to pcidev structure and initiates
142 * firmware handshake to enable/initialize on chip iscsi components
143 * This bnx2i - cnic interface api callback is issued after following
144 * 2 conditions are met -
145 * a) underlying network interface is up (marked by event 'NETDEV_UP'
146 * from netdev
147 * b) bnx2i adapter instance is registered
148 */
149void bnx2i_start(void *handle)
150{
151#define BNX2I_INIT_POLL_TIME (1000 / HZ)
152 struct bnx2i_hba *hba = handle;
153 int i = HZ;
154
155 bnx2i_send_fw_iscsi_init_msg(hba);
156 while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
157 msleep(BNX2I_INIT_POLL_TIME);
158}
159
160
161/**
162 * bnx2i_stop - cnic callback to shutdown adapter instance
163 * @handle: transparent handle pointing to adapter structure
164 *
165 * driver checks if adapter is already in shutdown mode, if not start
166 * the shutdown process
167 */
168void bnx2i_stop(void *handle)
169{
170 struct bnx2i_hba *hba = handle;
171
172 /* check if cleanup happened in GOING_DOWN context */
173 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
174 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
175 &hba->adapter_state))
176 iscsi_host_for_each_session(hba->shost,
177 bnx2i_drop_session);
178}
179
180/**
181 * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
182 * @hba: Adapter instance to register
183 *
184 * registers bnx2i adapter instance with the cnic driver while holding the
185 * adapter structure lock
186 */
187void bnx2i_register_device(struct bnx2i_hba *hba)
188{
189 if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
190 test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
191 return;
192 }
193
194 hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
195
196 spin_lock(&hba->lock);
197 bnx2i_reg_device++;
198 spin_unlock(&hba->lock);
199
200 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
201}
202
203
204/**
205 * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
206 *
207 * registers all bnx2i adapter instances with the cnic driver while holding
208 * the global resource lock
209 */
210void bnx2i_reg_dev_all(void)
211{
212 struct bnx2i_hba *hba, *temp;
213
214 read_lock(&bnx2i_dev_lock);
215 list_for_each_entry_safe(hba, temp, &adapter_list, link)
216 bnx2i_register_device(hba);
217 read_unlock(&bnx2i_dev_lock);
218}
219
220
221/**
222 * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
223 * @hba: Adapter instance to unregister
224 *
225 * registers bnx2i adapter instance with the cnic driver while holding
226 * the adapter structure lock
227 */
228static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
229{
230 if (hba->ofld_conns_active ||
231 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
232 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
233 return;
234
235 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
236
237 spin_lock(&hba->lock);
238 bnx2i_reg_device--;
239 spin_unlock(&hba->lock);
240
241 /* ep_disconnect could come before NETDEV_DOWN, driver won't
242 * see NETDEV_DOWN as it already unregistered itself.
243 */
244 hba->adapter_state = 0;
245 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
246}
247
248/**
249 * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
250 *
251 * unregisters all bnx2i adapter instances with the cnic driver while holding
252 * the global resource lock
253 */
254void bnx2i_unreg_dev_all(void)
255{
256 struct bnx2i_hba *hba, *temp;
257
258 read_lock(&bnx2i_dev_lock);
259 list_for_each_entry_safe(hba, temp, &adapter_list, link)
260 bnx2i_unreg_one_device(hba);
261 read_unlock(&bnx2i_dev_lock);
262}
263
264
265/**
266 * bnx2i_init_one - initialize an adapter instance and allocate memory resources
267 * @hba: bnx2i adapter instance
268 * @cnic: cnic device handle
269 *
270 * Global resource lock and host adapter lock is held during critical sections
271 * below. This routine is called from cnic_register_driver() context and
272 * work horse thread which does majority of device specific initialization
273 */
274static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
275{
276 int rc;
277
278 read_lock(&bnx2i_dev_lock);
279 if (bnx2i_reg_device &&
280 !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
281 rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
282 if (rc) /* duplicate registration */
283 printk(KERN_ERR "bnx2i- dev reg failed\n");
284
285 spin_lock(&hba->lock);
286 bnx2i_reg_device++;
287 hba->age++;
288 spin_unlock(&hba->lock);
289
290 set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
291 }
292 read_unlock(&bnx2i_dev_lock);
293
294 write_lock(&bnx2i_dev_lock);
295 list_add_tail(&hba->link, &adapter_list);
296 adapter_count++;
297 write_unlock(&bnx2i_dev_lock);
298 return 0;
299}
300
301
302/**
303 * bnx2i_ulp_init - initialize an adapter instance
304 * @dev: cnic device handle
305 *
306 * Called from cnic_register_driver() context to initialize all enumerated
307 * cnic devices. This routine allocate adapter structure and other
308 * device specific resources.
309 */
310void bnx2i_ulp_init(struct cnic_dev *dev)
311{
312 struct bnx2i_hba *hba;
313
314 /* Allocate a HBA structure for this device */
315 hba = bnx2i_alloc_hba(dev);
316 if (!hba) {
317 printk(KERN_ERR "bnx2i init: hba initialization failed\n");
318 return;
319 }
320
321 /* Get PCI related information and update hba struct members */
322 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
323 if (bnx2i_init_one(hba, dev)) {
324 printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
325 bnx2i_free_hba(hba);
326 } else
327 hba->cnic = dev;
328}
329
330
331/**
332 * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
333 * @dev: cnic device handle
334 *
335 */
336void bnx2i_ulp_exit(struct cnic_dev *dev)
337{
338 struct bnx2i_hba *hba;
339
340 hba = bnx2i_find_hba_for_cnic(dev);
341 if (!hba) {
342 printk(KERN_INFO "bnx2i_ulp_exit: hba not "
343 "found, dev 0x%p\n", dev);
344 return;
345 }
346 write_lock(&bnx2i_dev_lock);
347 list_del_init(&hba->link);
348 adapter_count--;
349
350 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
351 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
352 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
353
354 spin_lock(&hba->lock);
355 bnx2i_reg_device--;
356 spin_unlock(&hba->lock);
357 }
358 write_unlock(&bnx2i_dev_lock);
359
360 bnx2i_free_hba(hba);
361}
362
363
364/**
365 * bnx2i_mod_init - module init entry point
366 *
367 * initialize any driver wide global data structures such as endpoint pool,
368 * tcp port manager/queue, sysfs. finally driver will register itself
369 * with the cnic module
370 */
371static int __init bnx2i_mod_init(void)
372{
373 int err;
374
375 printk(KERN_INFO "%s", version);
376
377 if (!is_power_of_2(sq_size))
378 sq_size = roundup_pow_of_two(sq_size);
379
380 bnx2i_scsi_xport_template =
381 iscsi_register_transport(&bnx2i_iscsi_transport);
382 if (!bnx2i_scsi_xport_template) {
383 printk(KERN_ERR "Could not register bnx2i transport.\n");
384 err = -ENOMEM;
385 goto out;
386 }
387
388 err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
389 if (err) {
390 printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
391 goto unreg_xport;
392 }
393
394 return 0;
395
396unreg_xport:
397 iscsi_unregister_transport(&bnx2i_iscsi_transport);
398out:
399 return err;
400}
401
402
403/**
404 * bnx2i_mod_exit - module cleanup/exit entry point
405 *
406 * Global resource lock and host adapter lock is held during critical sections
407 * in this function. Driver will browse through the adapter list, cleans-up
408 * each instance, unregisters iscsi transport name and finally driver will
409 * unregister itself with the cnic module
410 */
411static void __exit bnx2i_mod_exit(void)
412{
413 struct bnx2i_hba *hba;
414
415 write_lock(&bnx2i_dev_lock);
416 while (!list_empty(&adapter_list)) {
417 hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
418 list_del(&hba->link);
419 adapter_count--;
420
421 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
422 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
423 clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
424 bnx2i_reg_device--;
425 }
426
427 write_unlock(&bnx2i_dev_lock);
428 bnx2i_free_hba(hba);
429 write_lock(&bnx2i_dev_lock);
430 }
431 write_unlock(&bnx2i_dev_lock);
432
433 iscsi_unregister_transport(&bnx2i_iscsi_transport);
434 cnic_unregister_driver(CNIC_ULP_ISCSI);
435}
436
437module_init(bnx2i_mod_init);
438module_exit(bnx2i_mod_exit);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
new file mode 100644
index 00000000000..f7412196f2f
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -0,0 +1,2064 @@
1/*
2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
3 *
4 * Copyright (c) 2006 - 2009 Broadcom Corporation
5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
6 * Copyright (c) 2007, 2008 Mike Christie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
13 */
14
15#include <scsi/scsi_tcq.h>
16#include <scsi/libiscsi.h>
17#include "bnx2i.h"
18
19struct scsi_transport_template *bnx2i_scsi_xport_template;
20struct iscsi_transport bnx2i_iscsi_transport;
21static struct scsi_host_template bnx2i_host_template;
22
23/*
24 * Global endpoint resource info
25 */
26static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
27
28
29static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
30{
31 int retval = 0;
32
33 if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
34 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
35 test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
36 retval = -EPERM;
37 return retval;
38}
39
40/**
41 * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
42 * @cmd: iscsi cmd struct pointer
43 * @buf_off: absolute buffer offset
44 * @start_bd_off: u32 pointer to return the offset within the BD
45 * indicated by 'start_bd_idx' on which 'buf_off' falls
46 * @start_bd_idx: index of the BD on which 'buf_off' falls
47 *
48 * identifies & marks various bd info for scsi command's imm data,
49 * unsolicited data and the first solicited data seq.
50 */
51static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
52 u32 *start_bd_off, u32 *start_bd_idx)
53{
54 struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
55 u32 cur_offset = 0;
56 u32 cur_bd_idx = 0;
57
58 if (buf_off) {
59 while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
60 cur_offset += bd_tbl->buffer_length;
61 cur_bd_idx++;
62 bd_tbl++;
63 }
64 }
65
66 *start_bd_off = buf_off - cur_offset;
67 *start_bd_idx = cur_bd_idx;
68}
69
70/**
71 * bnx2i_setup_write_cmd_bd_info - sets up BD various information
72 * @task: transport layer's cmd struct pointer
73 *
74 * identifies & marks various bd info for scsi command's immediate data,
75 * unsolicited data and first solicited data seq which includes BD start
76 * index & BD buf off. his function takes into account iscsi parameter such
77 * as immediate data and unsolicited data is support on this connection.
78 */
79static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
80{
81 struct bnx2i_cmd *cmd = task->dd_data;
82 u32 start_bd_offset;
83 u32 start_bd_idx;
84 u32 buffer_offset = 0;
85 u32 cmd_len = cmd->req.total_data_transfer_length;
86
87 /* if ImmediateData is turned off & IntialR2T is turned on,
88 * there will be no immediate or unsolicited data, just return.
89 */
90 if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
91 return;
92
93 /* Immediate data */
94 buffer_offset += task->imm_count;
95 if (task->imm_count == cmd_len)
96 return;
97
98 if (iscsi_task_has_unsol_data(task)) {
99 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
100 &start_bd_offset, &start_bd_idx);
101 cmd->req.ud_buffer_offset = start_bd_offset;
102 cmd->req.ud_start_bd_index = start_bd_idx;
103 buffer_offset += task->unsol_r2t.data_length;
104 }
105
106 if (buffer_offset != cmd_len) {
107 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
108 &start_bd_offset, &start_bd_idx);
109 if ((start_bd_offset > task->conn->session->first_burst) ||
110 (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
111 int i = 0;
112
113 iscsi_conn_printk(KERN_ALERT, task->conn,
114 "bnx2i- error, buf offset 0x%x "
115 "bd_valid %d use_sg %d\n",
116 buffer_offset, cmd->io_tbl.bd_valid,
117 scsi_sg_count(cmd->scsi_cmd));
118 for (i = 0; i < cmd->io_tbl.bd_valid; i++)
119 iscsi_conn_printk(KERN_ALERT, task->conn,
120 "bnx2i err, bd[%d]: len %x\n",
121 i, cmd->io_tbl.bd_tbl[i].\
122 buffer_length);
123 }
124 cmd->req.sd_buffer_offset = start_bd_offset;
125 cmd->req.sd_start_bd_index = start_bd_idx;
126 }
127}
128
129
130
131/**
132 * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
133 * @hba: adapter instance
134 * @cmd: iscsi cmd struct pointer
135 *
136 * map SG list
137 */
138static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
139{
140 struct scsi_cmnd *sc = cmd->scsi_cmd;
141 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
142 struct scatterlist *sg;
143 int byte_count = 0;
144 int bd_count = 0;
145 int sg_count;
146 int sg_len;
147 u64 addr;
148 int i;
149
150 BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
151
152 sg_count = scsi_dma_map(sc);
153
154 scsi_for_each_sg(sc, sg, sg_count, i) {
155 sg_len = sg_dma_len(sg);
156 addr = (u64) sg_dma_address(sg);
157 bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
158 bd[bd_count].buffer_addr_hi = addr >> 32;
159 bd[bd_count].buffer_length = sg_len;
160 bd[bd_count].flags = 0;
161 if (bd_count == 0)
162 bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
163
164 byte_count += sg_len;
165 bd_count++;
166 }
167
168 if (bd_count)
169 bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
170
171 BUG_ON(byte_count != scsi_bufflen(sc));
172 return bd_count;
173}
174
175/**
176 * bnx2i_iscsi_map_sg_list - maps SG list
177 * @cmd: iscsi cmd struct pointer
178 *
179 * creates BD list table for the command
180 */
181static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
182{
183 int bd_count;
184
185 bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
186 if (!bd_count) {
187 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
188
189 bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
190 bd[0].buffer_length = bd[0].flags = 0;
191 }
192 cmd->io_tbl.bd_valid = bd_count;
193}
194
195
196/**
197 * bnx2i_iscsi_unmap_sg_list - unmaps SG list
198 * @cmd: iscsi cmd struct pointer
199 *
200 * unmap IO buffers and invalidate the BD table
201 */
202void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
203{
204 struct scsi_cmnd *sc = cmd->scsi_cmd;
205
206 if (cmd->io_tbl.bd_valid && sc) {
207 scsi_dma_unmap(sc);
208 cmd->io_tbl.bd_valid = 0;
209 }
210}
211
212static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
213{
214 memset(&cmd->req, 0x00, sizeof(cmd->req));
215 cmd->req.op_code = 0xFF;
216 cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
217 cmd->req.bd_list_addr_hi =
218 (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
219
220}
221
222
223/**
224 * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
225 * @hba: pointer to adapter instance
226 * @conn: pointer to iscsi connection
227 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
228 *
229 * update iscsi cid table entry with connection pointer. This enables
230 * driver to quickly get hold of connection structure pointer in
231 * completion/interrupt thread using iscsi context ID
232 */
233static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
234 struct bnx2i_conn *bnx2i_conn,
235 u32 iscsi_cid)
236{
237 if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
238 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
239 "conn bind - entry #%d not free\n", iscsi_cid);
240 return -EBUSY;
241 }
242
243 hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
244 return 0;
245}
246
247
248/**
249 * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
250 * @hba: pointer to adapter instance
251 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
252 */
253struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
254 u16 iscsi_cid)
255{
256 if (!hba->cid_que.conn_cid_tbl) {
257 printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
258 return NULL;
259
260 } else if (iscsi_cid >= hba->max_active_conns) {
261 printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
262 return NULL;
263 }
264 return hba->cid_que.conn_cid_tbl[iscsi_cid];
265}
266
267
268/**
269 * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
270 * @hba: pointer to adapter instance
271 */
272static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
273{
274 int idx;
275
276 if (!hba->cid_que.cid_free_cnt)
277 return -1;
278
279 idx = hba->cid_que.cid_q_cons_idx;
280 hba->cid_que.cid_q_cons_idx++;
281 if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
282 hba->cid_que.cid_q_cons_idx = 0;
283
284 hba->cid_que.cid_free_cnt--;
285 return hba->cid_que.cid_que[idx];
286}
287
288
289/**
290 * bnx2i_free_iscsi_cid - returns tcp port to free list
291 * @hba: pointer to adapter instance
292 * @iscsi_cid: iscsi context ID to free
293 */
294static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
295{
296 int idx;
297
298 if (iscsi_cid == (u16) -1)
299 return;
300
301 hba->cid_que.cid_free_cnt++;
302
303 idx = hba->cid_que.cid_q_prod_idx;
304 hba->cid_que.cid_que[idx] = iscsi_cid;
305 hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
306 hba->cid_que.cid_q_prod_idx++;
307 if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
308 hba->cid_que.cid_q_prod_idx = 0;
309}
310
311
312/**
313 * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
314 * @hba: pointer to adapter instance
315 *
316 * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
317 * and initialize table attributes
318 */
319static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
320{
321 int mem_size;
322 int i;
323
324 mem_size = hba->max_active_conns * sizeof(u32);
325 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
326
327 hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
328 if (!hba->cid_que.cid_que_base)
329 return -ENOMEM;
330
331 mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
332 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
333 hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
334 if (!hba->cid_que.conn_cid_tbl) {
335 kfree(hba->cid_que.cid_que_base);
336 hba->cid_que.cid_que_base = NULL;
337 return -ENOMEM;
338 }
339
340 hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
341 hba->cid_que.cid_q_prod_idx = 0;
342 hba->cid_que.cid_q_cons_idx = 0;
343 hba->cid_que.cid_q_max_idx = hba->max_active_conns;
344 hba->cid_que.cid_free_cnt = hba->max_active_conns;
345
346 for (i = 0; i < hba->max_active_conns; i++) {
347 hba->cid_que.cid_que[i] = i;
348 hba->cid_que.conn_cid_tbl[i] = NULL;
349 }
350 return 0;
351}
352
353
354/**
355 * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
356 * @hba: pointer to adapter instance
357 */
358static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
359{
360 kfree(hba->cid_que.cid_que_base);
361 hba->cid_que.cid_que_base = NULL;
362
363 kfree(hba->cid_que.conn_cid_tbl);
364 hba->cid_que.conn_cid_tbl = NULL;
365}
366
367
368/**
369 * bnx2i_alloc_ep - allocates ep structure from global pool
370 * @hba: pointer to adapter instance
371 *
372 * routine allocates a free endpoint structure from global pool and
373 * a tcp port to be used for this connection. Global resource lock,
374 * 'bnx2i_resc_lock' is held while accessing shared global data structures
375 */
376static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
377{
378 struct iscsi_endpoint *ep;
379 struct bnx2i_endpoint *bnx2i_ep;
380
381 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
382 if (!ep) {
383 printk(KERN_ERR "bnx2i: Could not allocate ep\n");
384 return NULL;
385 }
386
387 bnx2i_ep = ep->dd_data;
388 INIT_LIST_HEAD(&bnx2i_ep->link);
389 bnx2i_ep->state = EP_STATE_IDLE;
390 bnx2i_ep->hba = hba;
391 bnx2i_ep->hba_age = hba->age;
392 hba->ofld_conns_active++;
393 init_waitqueue_head(&bnx2i_ep->ofld_wait);
394 return ep;
395}
396
397
398/**
399 * bnx2i_free_ep - free endpoint
400 * @ep: pointer to iscsi endpoint structure
401 */
402static void bnx2i_free_ep(struct iscsi_endpoint *ep)
403{
404 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
405 unsigned long flags;
406
407 spin_lock_irqsave(&bnx2i_resc_lock, flags);
408 bnx2i_ep->state = EP_STATE_IDLE;
409 bnx2i_ep->hba->ofld_conns_active--;
410
411 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
412 if (bnx2i_ep->conn) {
413 bnx2i_ep->conn->ep = NULL;
414 bnx2i_ep->conn = NULL;
415 }
416
417 bnx2i_ep->hba = NULL;
418 spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
419 iscsi_destroy_endpoint(ep);
420}
421
422
423/**
424 * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
425 * @hba: adapter instance pointer
426 * @session: iscsi session pointer
427 * @cmd: iscsi command structure
428 */
429static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
430 struct bnx2i_cmd *cmd)
431{
432 struct io_bdt *io = &cmd->io_tbl;
433 struct iscsi_bd *bd;
434
435 io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
436 ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
437 &io->bd_tbl_dma, GFP_KERNEL);
438 if (!io->bd_tbl) {
439 iscsi_session_printk(KERN_ERR, session, "Could not "
440 "allocate bdt.\n");
441 return -ENOMEM;
442 }
443 io->bd_valid = 0;
444 return 0;
445}
446
447/**
448 * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
449 * @hba: adapter instance pointer
450 * @session: iscsi session pointer
451 * @cmd: iscsi command structure
452 */
453static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
454 struct iscsi_session *session)
455{
456 int i;
457
458 for (i = 0; i < session->cmds_max; i++) {
459 struct iscsi_task *task = session->cmds[i];
460 struct bnx2i_cmd *cmd = task->dd_data;
461
462 if (cmd->io_tbl.bd_tbl)
463 dma_free_coherent(&hba->pcidev->dev,
464 ISCSI_MAX_BDS_PER_CMD *
465 sizeof(struct iscsi_bd),
466 cmd->io_tbl.bd_tbl,
467 cmd->io_tbl.bd_tbl_dma);
468 }
469
470}
471
472
473/**
474 * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
475 * @hba: adapter instance pointer
476 * @session: iscsi session pointer
477 */
478static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
479 struct iscsi_session *session)
480{
481 int i;
482
483 for (i = 0; i < session->cmds_max; i++) {
484 struct iscsi_task *task = session->cmds[i];
485 struct bnx2i_cmd *cmd = task->dd_data;
486
487 /* Anil */
488 task->hdr = &cmd->hdr;
489 task->hdr_max = sizeof(struct iscsi_hdr);
490
491 if (bnx2i_alloc_bdt(hba, session, cmd))
492 goto free_bdts;
493 }
494
495 return 0;
496
497free_bdts:
498 bnx2i_destroy_cmd_pool(hba, session);
499 return -ENOMEM;
500}
501
502
503/**
504 * bnx2i_setup_mp_bdt - allocate BD table resources
505 * @hba: pointer to adapter structure
506 *
507 * Allocate memory for dummy buffer and associated BD
508 * table to be used by middle path (MP) requests
509 */
510static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
511{
512 int rc = 0;
513 struct iscsi_bd *mp_bdt;
514 u64 addr;
515
516 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
517 &hba->mp_bd_dma, GFP_KERNEL);
518 if (!hba->mp_bd_tbl) {
519 printk(KERN_ERR "unable to allocate Middle Path BDT\n");
520 rc = -1;
521 goto out;
522 }
523
524 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
525 &hba->dummy_buf_dma, GFP_KERNEL);
526 if (!hba->dummy_buffer) {
527 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
528 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
529 hba->mp_bd_tbl, hba->mp_bd_dma);
530 hba->mp_bd_tbl = NULL;
531 rc = -1;
532 goto out;
533 }
534
535 mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
536 addr = (unsigned long) hba->dummy_buf_dma;
537 mp_bdt->buffer_addr_lo = addr & 0xffffffff;
538 mp_bdt->buffer_addr_hi = addr >> 32;
539 mp_bdt->buffer_length = PAGE_SIZE;
540 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
541 ISCSI_BD_FIRST_IN_BD_CHAIN;
542out:
543 return rc;
544}
545
546
547/**
548 * bnx2i_free_mp_bdt - releases ITT back to free pool
549 * @hba: pointer to adapter instance
550 *
551 * free MP dummy buffer and associated BD table
552 */
553static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
554{
555 if (hba->mp_bd_tbl) {
556 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
557 hba->mp_bd_tbl, hba->mp_bd_dma);
558 hba->mp_bd_tbl = NULL;
559 }
560 if (hba->dummy_buffer) {
561 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
562 hba->dummy_buffer, hba->dummy_buf_dma);
563 hba->dummy_buffer = NULL;
564 }
565 return;
566}
567
568/**
569 * bnx2i_drop_session - notifies iscsid of connection error.
570 * @hba: adapter instance pointer
571 * @session: iscsi session pointer
572 *
573 * This notifies iscsid that there is a error, so it can initiate
574 * recovery.
575 *
576 * This relies on caller using the iscsi class iterator so the object
577 * is refcounted and does not disapper from under us.
578 */
579void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
580{
581 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
582}
583
584/**
585 * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
586 * @hba: pointer to adapter instance
587 * @ep: pointer to endpoint (transport indentifier) structure
588 *
589 * EP destroy queue manager
590 */
591static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
592 struct bnx2i_endpoint *ep)
593{
594 write_lock_bh(&hba->ep_rdwr_lock);
595 list_add_tail(&ep->link, &hba->ep_destroy_list);
596 write_unlock_bh(&hba->ep_rdwr_lock);
597 return 0;
598}
599
600/**
601 * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
602 *
603 * @hba: pointer to adapter instance
604 * @ep: pointer to endpoint (transport indentifier) structure
605 *
606 * EP destroy queue manager
607 */
608static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
609 struct bnx2i_endpoint *ep)
610{
611 write_lock_bh(&hba->ep_rdwr_lock);
612 list_del_init(&ep->link);
613 write_unlock_bh(&hba->ep_rdwr_lock);
614
615 return 0;
616}
617
618/**
619 * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
620 * @hba: pointer to adapter instance
621 * @ep: pointer to endpoint (transport indentifier) structure
622 *
623 * pending conn offload completion queue manager
624 */
625static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
626 struct bnx2i_endpoint *ep)
627{
628 write_lock_bh(&hba->ep_rdwr_lock);
629 list_add_tail(&ep->link, &hba->ep_ofld_list);
630 write_unlock_bh(&hba->ep_rdwr_lock);
631 return 0;
632}
633
634/**
635 * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
636 * @hba: pointer to adapter instance
637 * @ep: pointer to endpoint (transport indentifier) structure
638 *
639 * pending conn offload completion queue manager
640 */
641static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
642 struct bnx2i_endpoint *ep)
643{
644 write_lock_bh(&hba->ep_rdwr_lock);
645 list_del_init(&ep->link);
646 write_unlock_bh(&hba->ep_rdwr_lock);
647 return 0;
648}
649
650
651/**
652 * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
653 *
654 * @hba: pointer to adapter instance
655 * @iscsi_cid: iscsi context ID to find
656 *
657 */
658struct bnx2i_endpoint *
659bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
660{
661 struct list_head *list;
662 struct list_head *tmp;
663 struct bnx2i_endpoint *ep;
664
665 read_lock_bh(&hba->ep_rdwr_lock);
666 list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
667 ep = (struct bnx2i_endpoint *)list;
668
669 if (ep->ep_iscsi_cid == iscsi_cid)
670 break;
671 ep = NULL;
672 }
673 read_unlock_bh(&hba->ep_rdwr_lock);
674
675 if (!ep)
676 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
677 return ep;
678}
679
680
681/**
682 * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
683 * @hba: pointer to adapter instance
684 * @iscsi_cid: iscsi context ID to find
685 *
686 */
687struct bnx2i_endpoint *
688bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
689{
690 struct list_head *list;
691 struct list_head *tmp;
692 struct bnx2i_endpoint *ep;
693
694 read_lock_bh(&hba->ep_rdwr_lock);
695 list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
696 ep = (struct bnx2i_endpoint *)list;
697
698 if (ep->ep_iscsi_cid == iscsi_cid)
699 break;
700 ep = NULL;
701 }
702 read_unlock_bh(&hba->ep_rdwr_lock);
703
704 if (!ep)
705 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
706
707 return ep;
708}
709
710/**
711 * bnx2i_setup_host_queue_size - assigns shost->can_queue param
712 * @hba: pointer to adapter instance
713 * @shost: scsi host pointer
714 *
715 * Initializes 'can_queue' parameter based on how many outstanding commands
716 * the device can handle. Each device 5708/5709/57710 has different
717 * capabilities
718 */
719static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
720 struct Scsi_Host *shost)
721{
722 if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
723 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
724 else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
725 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
726 else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
727 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
728 else
729 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
730}
731
732
733/**
734 * bnx2i_alloc_hba - allocate and init adapter instance
735 * @cnic: cnic device pointer
736 *
737 * allocate & initialize adapter structure and call other
738 * support routines to do per adapter initialization
739 */
740struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
741{
742 struct Scsi_Host *shost;
743 struct bnx2i_hba *hba;
744
745 shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
746 if (!shost)
747 return NULL;
748 shost->dma_boundary = cnic->pcidev->dma_mask;
749 shost->transportt = bnx2i_scsi_xport_template;
750 shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
751 shost->max_channel = 0;
752 shost->max_lun = 512;
753 shost->max_cmd_len = 16;
754
755 hba = iscsi_host_priv(shost);
756 hba->shost = shost;
757 hba->netdev = cnic->netdev;
758 /* Get PCI related information and update hba struct members */
759 hba->pcidev = cnic->pcidev;
760 pci_dev_get(hba->pcidev);
761 hba->pci_did = hba->pcidev->device;
762 hba->pci_vid = hba->pcidev->vendor;
763 hba->pci_sdid = hba->pcidev->subsystem_device;
764 hba->pci_svid = hba->pcidev->subsystem_vendor;
765 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
766 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
767 bnx2i_identify_device(hba);
768
769 bnx2i_identify_device(hba);
770 bnx2i_setup_host_queue_size(hba, shost);
771
772 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
773 hba->regview = ioremap_nocache(hba->netdev->base_addr,
774 BNX2_MQ_CONFIG2);
775 if (!hba->regview)
776 goto ioreg_map_err;
777 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
778 hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
779 if (!hba->regview)
780 goto ioreg_map_err;
781 }
782
783 if (bnx2i_setup_mp_bdt(hba))
784 goto mp_bdt_mem_err;
785
786 INIT_LIST_HEAD(&hba->ep_ofld_list);
787 INIT_LIST_HEAD(&hba->ep_destroy_list);
788 rwlock_init(&hba->ep_rdwr_lock);
789
790 hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
791
792 /* different values for 5708/5709/57710 */
793 hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
794
795 if (bnx2i_setup_free_cid_que(hba))
796 goto cid_que_err;
797
798 /* SQ/RQ/CQ size can be changed via sysfx interface */
799 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
800 if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
801 hba->max_sqes = sq_size;
802 else
803 hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
804 } else { /* 5706/5708/5709 */
805 if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
806 hba->max_sqes = sq_size;
807 else
808 hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
809 }
810
811 hba->max_rqes = rq_size;
812 hba->max_cqes = hba->max_sqes + rq_size;
813 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
814 if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
815 hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
816 } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
817 hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
818
819 hba->num_ccell = hba->max_sqes / 2;
820
821 spin_lock_init(&hba->lock);
822 mutex_init(&hba->net_dev_lock);
823
824 if (iscsi_host_add(shost, &hba->pcidev->dev))
825 goto free_dump_mem;
826 return hba;
827
828free_dump_mem:
829 bnx2i_release_free_cid_que(hba);
830cid_que_err:
831 bnx2i_free_mp_bdt(hba);
832mp_bdt_mem_err:
833 if (hba->regview) {
834 iounmap(hba->regview);
835 hba->regview = NULL;
836 }
837ioreg_map_err:
838 pci_dev_put(hba->pcidev);
839 scsi_host_put(shost);
840 return NULL;
841}
842
843/**
844 * bnx2i_free_hba- releases hba structure and resources held by the adapter
845 * @hba: pointer to adapter instance
846 *
847 * free adapter structure and call various cleanup routines.
848 */
849void bnx2i_free_hba(struct bnx2i_hba *hba)
850{
851 struct Scsi_Host *shost = hba->shost;
852
853 iscsi_host_remove(shost);
854 INIT_LIST_HEAD(&hba->ep_ofld_list);
855 INIT_LIST_HEAD(&hba->ep_destroy_list);
856 pci_dev_put(hba->pcidev);
857
858 if (hba->regview) {
859 iounmap(hba->regview);
860 hba->regview = NULL;
861 }
862 bnx2i_free_mp_bdt(hba);
863 bnx2i_release_free_cid_que(hba);
864 iscsi_host_free(shost);
865}
866
867/**
868 * bnx2i_conn_free_login_resources - free DMA resources used for login process
869 * @hba: pointer to adapter instance
870 * @bnx2i_conn: iscsi connection pointer
871 *
872 * Login related resources, mostly BDT & payload DMA memory is freed
873 */
874static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
875 struct bnx2i_conn *bnx2i_conn)
876{
877 if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
878 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
879 bnx2i_conn->gen_pdu.resp_bd_tbl,
880 bnx2i_conn->gen_pdu.resp_bd_dma);
881 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
882 }
883
884 if (bnx2i_conn->gen_pdu.req_bd_tbl) {
885 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
886 bnx2i_conn->gen_pdu.req_bd_tbl,
887 bnx2i_conn->gen_pdu.req_bd_dma);
888 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
889 }
890
891 if (bnx2i_conn->gen_pdu.resp_buf) {
892 dma_free_coherent(&hba->pcidev->dev,
893 ISCSI_DEF_MAX_RECV_SEG_LEN,
894 bnx2i_conn->gen_pdu.resp_buf,
895 bnx2i_conn->gen_pdu.resp_dma_addr);
896 bnx2i_conn->gen_pdu.resp_buf = NULL;
897 }
898
899 if (bnx2i_conn->gen_pdu.req_buf) {
900 dma_free_coherent(&hba->pcidev->dev,
901 ISCSI_DEF_MAX_RECV_SEG_LEN,
902 bnx2i_conn->gen_pdu.req_buf,
903 bnx2i_conn->gen_pdu.req_dma_addr);
904 bnx2i_conn->gen_pdu.req_buf = NULL;
905 }
906}
907
908/**
909 * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
910 * @hba: pointer to adapter instance
911 * @bnx2i_conn: iscsi connection pointer
912 *
913 * Mgmt task DNA resources are allocated in this routine.
914 */
915static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
916 struct bnx2i_conn *bnx2i_conn)
917{
918 /* Allocate memory for login request/response buffers */
919 bnx2i_conn->gen_pdu.req_buf =
920 dma_alloc_coherent(&hba->pcidev->dev,
921 ISCSI_DEF_MAX_RECV_SEG_LEN,
922 &bnx2i_conn->gen_pdu.req_dma_addr,
923 GFP_KERNEL);
924 if (bnx2i_conn->gen_pdu.req_buf == NULL)
925 goto login_req_buf_failure;
926
927 bnx2i_conn->gen_pdu.req_buf_size = 0;
928 bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
929
930 bnx2i_conn->gen_pdu.resp_buf =
931 dma_alloc_coherent(&hba->pcidev->dev,
932 ISCSI_DEF_MAX_RECV_SEG_LEN,
933 &bnx2i_conn->gen_pdu.resp_dma_addr,
934 GFP_KERNEL);
935 if (bnx2i_conn->gen_pdu.resp_buf == NULL)
936 goto login_resp_buf_failure;
937
938 bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
939 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
940
941 bnx2i_conn->gen_pdu.req_bd_tbl =
942 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
943 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
944 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
945 goto login_req_bd_tbl_failure;
946
947 bnx2i_conn->gen_pdu.resp_bd_tbl =
948 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
949 &bnx2i_conn->gen_pdu.resp_bd_dma,
950 GFP_KERNEL);
951 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
952 goto login_resp_bd_tbl_failure;
953
954 return 0;
955
956login_resp_bd_tbl_failure:
957 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
958 bnx2i_conn->gen_pdu.req_bd_tbl,
959 bnx2i_conn->gen_pdu.req_bd_dma);
960 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
961
962login_req_bd_tbl_failure:
963 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
964 bnx2i_conn->gen_pdu.resp_buf,
965 bnx2i_conn->gen_pdu.resp_dma_addr);
966 bnx2i_conn->gen_pdu.resp_buf = NULL;
967login_resp_buf_failure:
968 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
969 bnx2i_conn->gen_pdu.req_buf,
970 bnx2i_conn->gen_pdu.req_dma_addr);
971 bnx2i_conn->gen_pdu.req_buf = NULL;
972login_req_buf_failure:
973 iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
974 "login resource alloc failed!!\n");
975 return -ENOMEM;
976
977}
978
979
980/**
981 * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
982 * @bnx2i_conn: iscsi connection pointer
983 *
984 * Allocates buffers and BD tables before shipping requests to cnic
985 * for PDUs prepared by 'iscsid' daemon
986 */
987static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
988{
989 struct iscsi_bd *bd_tbl;
990
991 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
992
993 bd_tbl->buffer_addr_hi =
994 (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
995 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
996 bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
997 bnx2i_conn->gen_pdu.req_buf;
998 bd_tbl->reserved0 = 0;
999 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1000 ISCSI_BD_FIRST_IN_BD_CHAIN;
1001
1002 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
1003 bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
1004 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
1005 bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
1006 bd_tbl->reserved0 = 0;
1007 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1008 ISCSI_BD_FIRST_IN_BD_CHAIN;
1009}
1010
1011
1012/**
1013 * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
1014 * @task: transport layer task pointer
1015 *
1016 * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
1017 * Nop-out and Logout requests flow through this path.
1018 */
1019static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1020{
1021 struct bnx2i_cmd *cmd = task->dd_data;
1022 struct bnx2i_conn *bnx2i_conn = cmd->conn;
1023 int rc = 0;
1024 char *buf;
1025 int data_len;
1026
1027 bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
1028 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1029 case ISCSI_OP_LOGIN:
1030 bnx2i_send_iscsi_login(bnx2i_conn, task);
1031 break;
1032 case ISCSI_OP_NOOP_OUT:
1033 data_len = bnx2i_conn->gen_pdu.req_buf_size;
1034 buf = bnx2i_conn->gen_pdu.req_buf;
1035 if (data_len)
1036 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1037 RESERVED_ITT,
1038 buf, data_len, 1);
1039 else
1040 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1041 RESERVED_ITT,
1042 NULL, 0, 1);
1043 break;
1044 case ISCSI_OP_LOGOUT:
1045 rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
1046 break;
1047 case ISCSI_OP_SCSI_TMFUNC:
1048 rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
1049 break;
1050 default:
1051 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1052 "send_gen: unsupported op 0x%x\n",
1053 task->hdr->opcode);
1054 }
1055 return rc;
1056}
1057
1058
1059/**********************************************************************
1060 * SCSI-ML Interface
1061 **********************************************************************/
1062
1063/**
1064 * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
1065 * @sc: SCSI-ML command pointer
1066 * @cmd: iscsi cmd pointer
1067 */
1068static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
1069{
1070 u32 dword;
1071 int lpcnt;
1072 u8 *srcp;
1073 u32 *dstp;
1074 u32 scsi_lun[2];
1075
1076 int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
1077 cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
1078 cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
1079
1080 lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
1081 srcp = (u8 *) sc->cmnd;
1082 dstp = (u32 *) cmd->req.cdb;
1083 while (lpcnt--) {
1084 memcpy(&dword, (const void *) srcp, 4);
1085 *dstp = cpu_to_be32(dword);
1086 srcp += 4;
1087 dstp++;
1088 }
1089 if (sc->cmd_len & 0x3) {
1090 dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
1091 *dstp = cpu_to_be32(dword);
1092 }
1093}
1094
1095static void bnx2i_cleanup_task(struct iscsi_task *task)
1096{
1097 struct iscsi_conn *conn = task->conn;
1098 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1099 struct bnx2i_hba *hba = bnx2i_conn->hba;
1100
1101 /*
1102 * mgmt task or cmd was never sent to us to transmit.
1103 */
1104 if (!task->sc || task->state == ISCSI_TASK_PENDING)
1105 return;
1106 /*
1107 * need to clean-up task context to claim dma buffers
1108 */
1109 if (task->state == ISCSI_TASK_ABRT_TMF) {
1110 bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1111
1112 spin_unlock_bh(&conn->session->lock);
1113 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
1114 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
1115 spin_lock_bh(&conn->session->lock);
1116 }
1117 bnx2i_iscsi_unmap_sg_list(task->dd_data);
1118}
1119
1120/**
1121 * bnx2i_mtask_xmit - transmit mtask to chip for further processing
1122 * @conn: transport layer conn structure pointer
1123 * @task: transport layer command structure pointer
1124 */
1125static int
1126bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1127{
1128 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1129 struct bnx2i_cmd *cmd = task->dd_data;
1130
1131 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1132
1133 bnx2i_setup_cmd_wqe_template(cmd);
1134 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1135 if (task->data_count) {
1136 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1137 task->data_count);
1138 bnx2i_conn->gen_pdu.req_wr_ptr =
1139 bnx2i_conn->gen_pdu.req_buf + task->data_count;
1140 }
1141 cmd->conn = conn->dd_data;
1142 cmd->scsi_cmd = NULL;
1143 return bnx2i_iscsi_send_generic_request(task);
1144}
1145
1146/**
1147 * bnx2i_task_xmit - transmit iscsi command to chip for further processing
1148 * @task: transport layer command structure pointer
1149 *
1150 * maps SG buffers and send request to chip/firmware in the form of SQ WQE
1151 */
1152static int bnx2i_task_xmit(struct iscsi_task *task)
1153{
1154 struct iscsi_conn *conn = task->conn;
1155 struct iscsi_session *session = conn->session;
1156 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
1157 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1158 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1159 struct scsi_cmnd *sc = task->sc;
1160 struct bnx2i_cmd *cmd = task->dd_data;
1161 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
1162
1163 if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
1164 return -ENOTCONN;
1165
1166 if (!bnx2i_conn->is_bound)
1167 return -ENOTCONN;
1168
1169 /*
1170 * If there is no scsi_cmnd this must be a mgmt task
1171 */
1172 if (!sc)
1173 return bnx2i_mtask_xmit(conn, task);
1174
1175 bnx2i_setup_cmd_wqe_template(cmd);
1176 cmd->req.op_code = ISCSI_OP_SCSI_CMD;
1177 cmd->conn = bnx2i_conn;
1178 cmd->scsi_cmd = sc;
1179 cmd->req.total_data_transfer_length = scsi_bufflen(sc);
1180 cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
1181
1182 bnx2i_iscsi_map_sg_list(cmd);
1183 bnx2i_cpy_scsi_cdb(sc, cmd);
1184
1185 cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
1186 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1187 cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
1188 cmd->req.itt = task->itt |
1189 (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1190 bnx2i_setup_write_cmd_bd_info(task);
1191 } else {
1192 if (scsi_bufflen(sc))
1193 cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
1194 cmd->req.itt = task->itt |
1195 (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1196 }
1197
1198 cmd->req.num_bds = cmd->io_tbl.bd_valid;
1199 if (!cmd->io_tbl.bd_valid) {
1200 cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
1201 cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
1202 cmd->req.num_bds = 1;
1203 }
1204
1205 bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
1206 return 0;
1207}
1208
1209/**
1210 * bnx2i_session_create - create a new iscsi session
1211 * @cmds_max: max commands supported
1212 * @qdepth: scsi queue depth to support
1213 * @initial_cmdsn: initial iscsi CMDSN to be used for this session
1214 *
1215 * Creates a new iSCSI session instance on given device.
1216 */
1217static struct iscsi_cls_session *
1218bnx2i_session_create(struct iscsi_endpoint *ep,
1219 uint16_t cmds_max, uint16_t qdepth,
1220 uint32_t initial_cmdsn)
1221{
1222 struct Scsi_Host *shost;
1223 struct iscsi_cls_session *cls_session;
1224 struct bnx2i_hba *hba;
1225 struct bnx2i_endpoint *bnx2i_ep;
1226
1227 if (!ep) {
1228 printk(KERN_ERR "bnx2i: missing ep.\n");
1229 return NULL;
1230 }
1231
1232 bnx2i_ep = ep->dd_data;
1233 shost = bnx2i_ep->hba->shost;
1234 hba = iscsi_host_priv(shost);
1235 if (bnx2i_adapter_ready(hba))
1236 return NULL;
1237
1238 /*
1239 * user can override hw limit as long as it is within
1240 * the min/max.
1241 */
1242 if (cmds_max > hba->max_sqes)
1243 cmds_max = hba->max_sqes;
1244 else if (cmds_max < BNX2I_SQ_WQES_MIN)
1245 cmds_max = BNX2I_SQ_WQES_MIN;
1246
1247 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
1248 cmds_max, sizeof(struct bnx2i_cmd),
1249 initial_cmdsn, ISCSI_MAX_TARGET);
1250 if (!cls_session)
1251 return NULL;
1252
1253 if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
1254 goto session_teardown;
1255 return cls_session;
1256
1257session_teardown:
1258 iscsi_session_teardown(cls_session);
1259 return NULL;
1260}
1261
1262
1263/**
1264 * bnx2i_session_destroy - destroys iscsi session
1265 * @cls_session: pointer to iscsi cls session
1266 *
1267 * Destroys previously created iSCSI session instance and releases
1268 * all resources held by it
1269 */
1270static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
1271{
1272 struct iscsi_session *session = cls_session->dd_data;
1273 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1274 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1275
1276 bnx2i_destroy_cmd_pool(hba, session);
1277 iscsi_session_teardown(cls_session);
1278}
1279
1280
1281/**
1282 * bnx2i_conn_create - create iscsi connection instance
1283 * @cls_session: pointer to iscsi cls session
1284 * @cid: iscsi cid as per rfc (not NX2's CID terminology)
1285 *
1286 * Creates a new iSCSI connection instance for a given session
1287 */
1288static struct iscsi_cls_conn *
1289bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
1290{
1291 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1292 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1293 struct bnx2i_conn *bnx2i_conn;
1294 struct iscsi_cls_conn *cls_conn;
1295 struct iscsi_conn *conn;
1296
1297 cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
1298 cid);
1299 if (!cls_conn)
1300 return NULL;
1301 conn = cls_conn->dd_data;
1302
1303 bnx2i_conn = conn->dd_data;
1304 bnx2i_conn->cls_conn = cls_conn;
1305 bnx2i_conn->hba = hba;
1306 /* 'ep' ptr will be assigned in bind() call */
1307 bnx2i_conn->ep = NULL;
1308 init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
1309
1310 if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
1311 iscsi_conn_printk(KERN_ALERT, conn,
1312 "conn_new: login resc alloc failed!!\n");
1313 goto free_conn;
1314 }
1315
1316 return cls_conn;
1317
1318free_conn:
1319 iscsi_conn_teardown(cls_conn);
1320 return NULL;
1321}
1322
1323/**
1324 * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
1325 * @cls_session: pointer to iscsi cls session
1326 * @cls_conn: pointer to iscsi cls conn
1327 * @transport_fd: 64-bit EP handle
1328 * @is_leading: leading connection on this session?
1329 *
1330 * Binds together iSCSI session instance, iSCSI connection instance
1331 * and the TCP connection. This routine returns error code if
1332 * TCP connection does not belong on the device iSCSI sess/conn
1333 * is bound
1334 */
1335static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1336 struct iscsi_cls_conn *cls_conn,
1337 uint64_t transport_fd, int is_leading)
1338{
1339 struct iscsi_conn *conn = cls_conn->dd_data;
1340 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1341 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1342 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1343 struct bnx2i_endpoint *bnx2i_ep;
1344 struct iscsi_endpoint *ep;
1345 int ret_code;
1346
1347 ep = iscsi_lookup_endpoint(transport_fd);
1348 if (!ep)
1349 return -EINVAL;
1350
1351 bnx2i_ep = ep->dd_data;
1352 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
1353 (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
1354 /* Peer disconnect via' FIN or RST */
1355 return -EINVAL;
1356
1357 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1358 return -EINVAL;
1359
1360 if (bnx2i_ep->hba != hba) {
1361 /* Error - TCP connection does not belong to this device
1362 */
1363 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1364 "conn bind, ep=0x%p (%s) does not",
1365 bnx2i_ep, bnx2i_ep->hba->netdev->name);
1366 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1367 "belong to hba (%s)\n",
1368 hba->netdev->name);
1369 return -EEXIST;
1370 }
1371
1372 bnx2i_ep->conn = bnx2i_conn;
1373 bnx2i_conn->ep = bnx2i_ep;
1374 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
1375 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
1376 bnx2i_conn->is_bound = 1;
1377
1378 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
1379 bnx2i_ep->ep_iscsi_cid);
1380
1381 /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
1382 * driver needs to explicitly replenish RQ index during setup.
1383 */
1384 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1385 bnx2i_put_rq_buf(bnx2i_conn, 0);
1386
1387 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1388 return ret_code;
1389}
1390
1391
1392/**
1393 * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
1394 * @cls_conn: pointer to iscsi cls conn
1395 *
1396 * Destroy an iSCSI connection instance and release memory resources held by
1397 * this connection
1398 */
1399static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
1400{
1401 struct iscsi_conn *conn = cls_conn->dd_data;
1402 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1403 struct Scsi_Host *shost;
1404 struct bnx2i_hba *hba;
1405
1406 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
1407 hba = iscsi_host_priv(shost);
1408
1409 bnx2i_conn_free_login_resources(hba, bnx2i_conn);
1410 iscsi_conn_teardown(cls_conn);
1411}
1412
1413
1414/**
1415 * bnx2i_conn_get_param - return iscsi connection parameter to caller
1416 * @cls_conn: pointer to iscsi cls conn
1417 * @param: parameter type identifier
1418 * @buf: buffer pointer
1419 *
1420 * returns iSCSI connection parameters
1421 */
1422static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
1423 enum iscsi_param param, char *buf)
1424{
1425 struct iscsi_conn *conn = cls_conn->dd_data;
1426 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1427 int len = 0;
1428
1429 switch (param) {
1430 case ISCSI_PARAM_CONN_PORT:
1431 if (bnx2i_conn->ep)
1432 len = sprintf(buf, "%hu\n",
1433 bnx2i_conn->ep->cm_sk->dst_port);
1434 break;
1435 case ISCSI_PARAM_CONN_ADDRESS:
1436 if (bnx2i_conn->ep)
1437 len = sprintf(buf, NIPQUAD_FMT "\n",
1438 NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
1439 break;
1440 default:
1441 return iscsi_conn_get_param(cls_conn, param, buf);
1442 }
1443
1444 return len;
1445}
1446
1447/**
1448 * bnx2i_host_get_param - returns host (adapter) related parameters
1449 * @shost: scsi host pointer
1450 * @param: parameter type identifier
1451 * @buf: buffer pointer
1452 */
1453static int bnx2i_host_get_param(struct Scsi_Host *shost,
1454 enum iscsi_host_param param, char *buf)
1455{
1456 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1457 int len = 0;
1458
1459 switch (param) {
1460 case ISCSI_HOST_PARAM_HWADDRESS:
1461 len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
1462 break;
1463 case ISCSI_HOST_PARAM_NETDEV_NAME:
1464 len = sprintf(buf, "%s\n", hba->netdev->name);
1465 break;
1466 default:
1467 return iscsi_host_get_param(shost, param, buf);
1468 }
1469 return len;
1470}
1471
1472/**
1473 * bnx2i_conn_start - completes iscsi connection migration to FFP
1474 * @cls_conn: pointer to iscsi cls conn
1475 *
1476 * last call in FFP migration to handover iscsi conn to the driver
1477 */
1478static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
1479{
1480 struct iscsi_conn *conn = cls_conn->dd_data;
1481 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1482
1483 bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
1484 bnx2i_update_iscsi_conn(conn);
1485
1486 /*
1487 * this should normally not sleep for a long time so it should
1488 * not disrupt the caller.
1489 */
1490 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
1491 bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1492 bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
1493 add_timer(&bnx2i_conn->ep->ofld_timer);
1494 /* update iSCSI context for this conn, wait for CNIC to complete */
1495 wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
1496 bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
1497
1498 if (signal_pending(current))
1499 flush_signals(current);
1500 del_timer_sync(&bnx2i_conn->ep->ofld_timer);
1501
1502 iscsi_conn_start(cls_conn);
1503 return 0;
1504}
1505
1506
1507/**
1508 * bnx2i_conn_get_stats - returns iSCSI stats
1509 * @cls_conn: pointer to iscsi cls conn
1510 * @stats: pointer to iscsi statistic struct
1511 */
1512static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1513 struct iscsi_stats *stats)
1514{
1515 struct iscsi_conn *conn = cls_conn->dd_data;
1516
1517 stats->txdata_octets = conn->txdata_octets;
1518 stats->rxdata_octets = conn->rxdata_octets;
1519 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1520 stats->dataout_pdus = conn->dataout_pdus_cnt;
1521 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1522 stats->datain_pdus = conn->datain_pdus_cnt;
1523 stats->r2t_pdus = conn->r2t_pdus_cnt;
1524 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1525 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1526 stats->custom_length = 3;
1527 strcpy(stats->custom[2].desc, "eh_abort_cnt");
1528 stats->custom[2].value = conn->eh_abort_cnt;
1529 stats->digest_err = 0;
1530 stats->timeout_err = 0;
1531 stats->custom_length = 0;
1532}
1533
1534
1535/**
1536 * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
1537 * @dst_addr: target IP address
1538 *
1539 * check if route resolves to BNX2 device
1540 */
1541static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
1542{
1543 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1544 struct bnx2i_hba *hba;
1545 struct cnic_dev *cnic = NULL;
1546
1547 bnx2i_reg_dev_all();
1548
1549 hba = get_adapter_list_head();
1550 if (hba && hba->cnic)
1551 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
1552 if (!cnic) {
1553 printk(KERN_ALERT "bnx2i: no route,"
1554 "can't connect using cnic\n");
1555 goto no_nx2_route;
1556 }
1557 hba = bnx2i_find_hba_for_cnic(cnic);
1558 if (!hba)
1559 goto no_nx2_route;
1560
1561 if (bnx2i_adapter_ready(hba)) {
1562 printk(KERN_ALERT "bnx2i: check route, hba not found\n");
1563 goto no_nx2_route;
1564 }
1565 if (hba->netdev->mtu > hba->mtu_supported) {
1566 printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
1567 hba->netdev->name, hba->netdev->mtu);
1568 printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
1569 hba->mtu_supported);
1570 goto no_nx2_route;
1571 }
1572 return hba;
1573no_nx2_route:
1574 return NULL;
1575}
1576
1577
1578/**
1579 * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
1580 * @hba: pointer to adapter instance
1581 * @ep: endpoint (transport indentifier) structure
1582 *
1583 * destroys cm_sock structure and on chip iscsi context
1584 */
1585static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1586 struct bnx2i_endpoint *ep)
1587{
1588 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
1589 hba->cnic->cm_destroy(ep->cm_sk);
1590
1591 if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
1592 ep->state = EP_STATE_DISCONN_COMPL;
1593
1594 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
1595 ep->state == EP_STATE_DISCONN_TIMEDOUT) {
1596 printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
1597 " NW/PCIe trace, driver msgs to developers"
1598 " for analysis\n");
1599 return 1;
1600 }
1601
1602 ep->state = EP_STATE_CLEANUP_START;
1603 init_timer(&ep->ofld_timer);
1604 ep->ofld_timer.expires = 10*HZ + jiffies;
1605 ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1606 ep->ofld_timer.data = (unsigned long) ep;
1607 add_timer(&ep->ofld_timer);
1608
1609 bnx2i_ep_destroy_list_add(hba, ep);
1610
1611 /* destroy iSCSI context, wait for it to complete */
1612 bnx2i_send_conn_destroy(hba, ep);
1613 wait_event_interruptible(ep->ofld_wait,
1614 (ep->state != EP_STATE_CLEANUP_START));
1615
1616 if (signal_pending(current))
1617 flush_signals(current);
1618 del_timer_sync(&ep->ofld_timer);
1619
1620 bnx2i_ep_destroy_list_del(hba, ep);
1621
1622 if (ep->state != EP_STATE_CLEANUP_CMPL)
1623 /* should never happen */
1624 printk(KERN_ALERT "bnx2i - conn destroy failed\n");
1625
1626 return 0;
1627}
1628
1629
1630/**
1631 * bnx2i_ep_connect - establish TCP connection to target portal
1632 * @shost: scsi host
1633 * @dst_addr: target IP address
1634 * @non_blocking: blocking or non-blocking call
1635 *
1636 * this routine initiates the TCP/IP connection by invoking Option-2 i/f
1637 * with l5_core and the CNIC. This is a multi-step process of resolving
1638 * route to target, create a iscsi connection context, handshaking with
1639 * CNIC module to create/initialize the socket struct and finally
1640 * sending down option-2 request to complete TCP 3-way handshake
1641 */
1642static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1643 struct sockaddr *dst_addr,
1644 int non_blocking)
1645{
1646 u32 iscsi_cid = BNX2I_CID_RESERVED;
1647 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1648 struct sockaddr_in6 *desti6;
1649 struct bnx2i_endpoint *bnx2i_ep;
1650 struct bnx2i_hba *hba;
1651 struct cnic_dev *cnic;
1652 struct cnic_sockaddr saddr;
1653 struct iscsi_endpoint *ep;
1654 int rc = 0;
1655
1656 if (shost)
1657 /* driver is given scsi host to work with */
1658 hba = iscsi_host_priv(shost);
1659 else
1660 /*
1661 * check if the given destination can be reached through
1662 * a iscsi capable NetXtreme2 device
1663 */
1664 hba = bnx2i_check_route(dst_addr);
1665 if (!hba) {
1666 rc = -ENOMEM;
1667 goto check_busy;
1668 }
1669
1670 cnic = hba->cnic;
1671 ep = bnx2i_alloc_ep(hba);
1672 if (!ep) {
1673 rc = -ENOMEM;
1674 goto check_busy;
1675 }
1676 bnx2i_ep = ep->dd_data;
1677
1678 mutex_lock(&hba->net_dev_lock);
1679 if (bnx2i_adapter_ready(hba)) {
1680 rc = -EPERM;
1681 goto net_if_down;
1682 }
1683
1684 bnx2i_ep->state = EP_STATE_IDLE;
1685 bnx2i_ep->ep_iscsi_cid = (u16) -1;
1686 bnx2i_ep->num_active_cmds = 0;
1687 iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1688 if (iscsi_cid == -1) {
1689 printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
1690 rc = -ENOMEM;
1691 goto iscsi_cid_err;
1692 }
1693 bnx2i_ep->hba_age = hba->age;
1694
1695 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
1696 if (rc != 0) {
1697 printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
1698 rc = -ENOMEM;
1699 goto qp_resc_err;
1700 }
1701
1702 bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
1703 bnx2i_ep->state = EP_STATE_OFLD_START;
1704 bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
1705
1706 init_timer(&bnx2i_ep->ofld_timer);
1707 bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
1708 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1709 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1710 add_timer(&bnx2i_ep->ofld_timer);
1711
1712 bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
1713
1714 /* Wait for CNIC hardware to setup conn context and return 'cid' */
1715 wait_event_interruptible(bnx2i_ep->ofld_wait,
1716 bnx2i_ep->state != EP_STATE_OFLD_START);
1717
1718 if (signal_pending(current))
1719 flush_signals(current);
1720 del_timer_sync(&bnx2i_ep->ofld_timer);
1721
1722 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1723
1724 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
1725 rc = -ENOSPC;
1726 goto conn_failed;
1727 }
1728
1729 rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
1730 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
1731 if (rc) {
1732 rc = -EINVAL;
1733 goto conn_failed;
1734 }
1735
1736 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
1737 bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
1738 clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
1739
1740 memset(&saddr, 0, sizeof(saddr));
1741 if (dst_addr->sa_family == AF_INET) {
1742 desti = (struct sockaddr_in *) dst_addr;
1743 saddr.remote.v4 = *desti;
1744 saddr.local.v4.sin_family = desti->sin_family;
1745 } else if (dst_addr->sa_family == AF_INET6) {
1746 desti6 = (struct sockaddr_in6 *) dst_addr;
1747 saddr.remote.v6 = *desti6;
1748 saddr.local.v6.sin6_family = desti6->sin6_family;
1749 }
1750
1751 bnx2i_ep->timestamp = jiffies;
1752 bnx2i_ep->state = EP_STATE_CONNECT_START;
1753 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1754 rc = -EINVAL;
1755 goto conn_failed;
1756 } else
1757 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
1758
1759 if (rc)
1760 goto release_ep;
1761
1762 if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
1763 goto release_ep;
1764 mutex_unlock(&hba->net_dev_lock);
1765 return ep;
1766
1767release_ep:
1768 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1769 mutex_unlock(&hba->net_dev_lock);
1770 return ERR_PTR(rc);
1771 }
1772conn_failed:
1773net_if_down:
1774iscsi_cid_err:
1775 bnx2i_free_qp_resc(hba, bnx2i_ep);
1776qp_resc_err:
1777 bnx2i_free_ep(ep);
1778 mutex_unlock(&hba->net_dev_lock);
1779check_busy:
1780 bnx2i_unreg_dev_all();
1781 return ERR_PTR(rc);
1782}
1783
1784
1785/**
1786 * bnx2i_ep_poll - polls for TCP connection establishement
1787 * @ep: TCP connection (endpoint) handle
1788 * @timeout_ms: timeout value in milli secs
1789 *
1790 * polls for TCP connect request to complete
1791 */
1792static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1793{
1794 struct bnx2i_endpoint *bnx2i_ep;
1795 int rc = 0;
1796
1797 bnx2i_ep = ep->dd_data;
1798 if ((bnx2i_ep->state == EP_STATE_IDLE) ||
1799 (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
1800 (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1801 return -1;
1802 if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
1803 return 1;
1804
1805 rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
1806 ((bnx2i_ep->state ==
1807 EP_STATE_OFLD_FAILED) ||
1808 (bnx2i_ep->state ==
1809 EP_STATE_CONNECT_FAILED) ||
1810 (bnx2i_ep->state ==
1811 EP_STATE_CONNECT_COMPL)),
1812 msecs_to_jiffies(timeout_ms));
1813 if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1814 rc = -1;
1815
1816 if (rc > 0)
1817 return 1;
1818 else if (!rc)
1819 return 0; /* timeout */
1820 else
1821 return rc;
1822}
1823
1824
1825/**
1826 * bnx2i_ep_tcp_conn_active - check EP state transition
1827 * @ep: endpoint pointer
1828 *
1829 * check if underlying TCP connection is active
1830 */
1831static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1832{
1833 int ret;
1834 int cnic_dev_10g = 0;
1835
1836 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1837 cnic_dev_10g = 1;
1838
1839 switch (bnx2i_ep->state) {
1840 case EP_STATE_CONNECT_START:
1841 case EP_STATE_CLEANUP_FAILED:
1842 case EP_STATE_OFLD_FAILED:
1843 case EP_STATE_DISCONN_TIMEDOUT:
1844 ret = 0;
1845 break;
1846 case EP_STATE_CONNECT_COMPL:
1847 case EP_STATE_ULP_UPDATE_START:
1848 case EP_STATE_ULP_UPDATE_COMPL:
1849 case EP_STATE_TCP_FIN_RCVD:
1850 case EP_STATE_ULP_UPDATE_FAILED:
1851 ret = 1;
1852 break;
1853 case EP_STATE_TCP_RST_RCVD:
1854 ret = 0;
1855 break;
1856 case EP_STATE_CONNECT_FAILED:
1857 if (cnic_dev_10g)
1858 ret = 1;
1859 else
1860 ret = 0;
1861 break;
1862 default:
1863 ret = 0;
1864 }
1865
1866 return ret;
1867}
1868
1869
1870/**
1871 * bnx2i_ep_disconnect - executes TCP connection teardown process
1872 * @ep: TCP connection (endpoint) handle
1873 *
1874 * executes TCP connection teardown process
1875 */
1876static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
1877{
1878 struct bnx2i_endpoint *bnx2i_ep;
1879 struct bnx2i_conn *bnx2i_conn = NULL;
1880 struct iscsi_session *session = NULL;
1881 struct iscsi_conn *conn;
1882 struct cnic_dev *cnic;
1883 struct bnx2i_hba *hba;
1884
1885 bnx2i_ep = ep->dd_data;
1886
1887 /* driver should not attempt connection cleanup untill TCP_CONNECT
1888 * completes either successfully or fails. Timeout is 9-secs, so
1889 * wait for it to complete
1890 */
1891 while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
1892 !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
1893 msleep(250);
1894
1895 if (bnx2i_ep->conn) {
1896 bnx2i_conn = bnx2i_ep->conn;
1897 conn = bnx2i_conn->cls_conn->dd_data;
1898 session = conn->session;
1899
1900 spin_lock_bh(&session->lock);
1901 bnx2i_conn->is_bound = 0;
1902 spin_unlock_bh(&session->lock);
1903 }
1904
1905 hba = bnx2i_ep->hba;
1906 if (bnx2i_ep->state == EP_STATE_IDLE)
1907 goto return_bnx2i_ep;
1908 cnic = hba->cnic;
1909
1910 mutex_lock(&hba->net_dev_lock);
1911
1912 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
1913 goto free_resc;
1914 if (bnx2i_ep->hba_age != hba->age)
1915 goto free_resc;
1916
1917 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
1918 goto destory_conn;
1919
1920 bnx2i_ep->state = EP_STATE_DISCONN_START;
1921
1922 init_timer(&bnx2i_ep->ofld_timer);
1923 bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
1924 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1925 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1926 add_timer(&bnx2i_ep->ofld_timer);
1927
1928 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1929 int close = 0;
1930
1931 if (session) {
1932 spin_lock_bh(&session->lock);
1933 if (session->state == ISCSI_STATE_LOGGING_OUT)
1934 close = 1;
1935 spin_unlock_bh(&session->lock);
1936 }
1937 if (close)
1938 cnic->cm_close(bnx2i_ep->cm_sk);
1939 else
1940 cnic->cm_abort(bnx2i_ep->cm_sk);
1941 } else
1942 goto free_resc;
1943
1944 /* wait for option-2 conn teardown */
1945 wait_event_interruptible(bnx2i_ep->ofld_wait,
1946 bnx2i_ep->state != EP_STATE_DISCONN_START);
1947
1948 if (signal_pending(current))
1949 flush_signals(current);
1950 del_timer_sync(&bnx2i_ep->ofld_timer);
1951
1952destory_conn:
1953 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1954 mutex_unlock(&hba->net_dev_lock);
1955 return;
1956 }
1957free_resc:
1958 mutex_unlock(&hba->net_dev_lock);
1959 bnx2i_free_qp_resc(hba, bnx2i_ep);
1960return_bnx2i_ep:
1961 if (bnx2i_conn)
1962 bnx2i_conn->ep = NULL;
1963
1964 bnx2i_free_ep(ep);
1965
1966 if (!hba->ofld_conns_active)
1967 bnx2i_unreg_dev_all();
1968}
1969
1970
1971/**
1972 * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
1973 * @buf: pointer to buffer containing iscsi path message
1974 *
1975 */
1976static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
1977{
1978 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1979 char *buf = (char *) params;
1980 u16 len = sizeof(*params);
1981
1982 /* handled by cnic driver */
1983 hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
1984 len);
1985
1986 return 0;
1987}
1988
1989
1990/*
1991 * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
1992 * used while registering with the scsi host and iSCSI transport module.
1993 */
1994static struct scsi_host_template bnx2i_host_template = {
1995 .module = THIS_MODULE,
1996 .name = "Broadcom Offload iSCSI Initiator",
1997 .proc_name = "bnx2i",
1998 .queuecommand = iscsi_queuecommand,
1999 .eh_abort_handler = iscsi_eh_abort,
2000 .eh_device_reset_handler = iscsi_eh_device_reset,
2001 .eh_target_reset_handler = iscsi_eh_target_reset,
2002 .can_queue = 1024,
2003 .max_sectors = 127,
2004 .cmd_per_lun = 32,
2005 .this_id = -1,
2006 .use_clustering = ENABLE_CLUSTERING,
2007 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2008 .shost_attrs = bnx2i_dev_attributes,
2009};
2010
2011struct iscsi_transport bnx2i_iscsi_transport = {
2012 .owner = THIS_MODULE,
2013 .name = "bnx2i",
2014 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
2015 CAP_MULTI_R2T | CAP_DATADGST |
2016 CAP_DATA_PATH_OFFLOAD,
2017 .param_mask = ISCSI_MAX_RECV_DLENGTH |
2018 ISCSI_MAX_XMIT_DLENGTH |
2019 ISCSI_HDRDGST_EN |
2020 ISCSI_DATADGST_EN |
2021 ISCSI_INITIAL_R2T_EN |
2022 ISCSI_MAX_R2T |
2023 ISCSI_IMM_DATA_EN |
2024 ISCSI_FIRST_BURST |
2025 ISCSI_MAX_BURST |
2026 ISCSI_PDU_INORDER_EN |
2027 ISCSI_DATASEQ_INORDER_EN |
2028 ISCSI_ERL |
2029 ISCSI_CONN_PORT |
2030 ISCSI_CONN_ADDRESS |
2031 ISCSI_EXP_STATSN |
2032 ISCSI_PERSISTENT_PORT |
2033 ISCSI_PERSISTENT_ADDRESS |
2034 ISCSI_TARGET_NAME | ISCSI_TPGT |
2035 ISCSI_USERNAME | ISCSI_PASSWORD |
2036 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
2037 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
2038 ISCSI_LU_RESET_TMO |
2039 ISCSI_PING_TMO | ISCSI_RECV_TMO |
2040 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
2041 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
2042 .create_session = bnx2i_session_create,
2043 .destroy_session = bnx2i_session_destroy,
2044 .create_conn = bnx2i_conn_create,
2045 .bind_conn = bnx2i_conn_bind,
2046 .destroy_conn = bnx2i_conn_destroy,
2047 .set_param = iscsi_set_param,
2048 .get_conn_param = bnx2i_conn_get_param,
2049 .get_session_param = iscsi_session_get_param,
2050 .get_host_param = bnx2i_host_get_param,
2051 .start_conn = bnx2i_conn_start,
2052 .stop_conn = iscsi_conn_stop,
2053 .send_pdu = iscsi_conn_send_pdu,
2054 .xmit_task = bnx2i_task_xmit,
2055 .get_stats = bnx2i_conn_get_stats,
2056 /* TCP connect - disconnect - option-2 interface calls */
2057 .ep_connect = bnx2i_ep_connect,
2058 .ep_poll = bnx2i_ep_poll,
2059 .ep_disconnect = bnx2i_ep_disconnect,
2060 .set_path = bnx2i_nl_set_path,
2061 /* Error recovery timeout call */
2062 .session_recovery_timedout = iscsi_session_recovery_timedout,
2063 .cleanup_task = bnx2i_cleanup_task,
2064};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
new file mode 100644
index 00000000000..96426b751eb
--- /dev/null
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -0,0 +1,142 @@
1/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
2 *
3 * Copyright (c) 2004 - 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
10 */
11
12#include "bnx2i.h"
13
14/**
15 * bnx2i_dev_to_hba - maps dev pointer to adapter struct
16 * @dev: device pointer
17 *
18 * Map device to hba structure
19 */
20static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
21{
22 struct Scsi_Host *shost = class_to_shost(dev);
23 return iscsi_host_priv(shost);
24}
25
26
27/**
28 * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
29 * @dev: device pointer
30 * @buf: buffer to return current SQ size parameter
31 *
32 * Returns current SQ size parameter, this paramater determines the number
33 * outstanding iSCSI commands supported on a connection
34 */
35static ssize_t bnx2i_show_sq_info(struct device *dev,
36 struct device_attribute *attr, char *buf)
37{
38 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
39
40 return sprintf(buf, "0x%x\n", hba->max_sqes);
41}
42
43
44/**
45 * bnx2i_set_sq_info - update send queue (SQ) size parameter
46 * @dev: device pointer
47 * @buf: buffer to return current SQ size parameter
48 * @count: parameter buffer size
49 *
50 * Interface for user to change shared queue size allocated for each conn
51 * Must be within SQ limits and a power of 2. For the latter this is needed
52 * because of how libiscsi preallocates tasks.
53 */
54static ssize_t bnx2i_set_sq_info(struct device *dev,
55 struct device_attribute *attr,
56 const char *buf, size_t count)
57{
58 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
59 u32 val;
60 int max_sq_size;
61
62 if (hba->ofld_conns_active)
63 goto skip_config;
64
65 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
66 max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
67 else
68 max_sq_size = BNX2I_570X_SQ_WQES_MAX;
69
70 if (sscanf(buf, " 0x%x ", &val) > 0) {
71 if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
72 (is_power_of_2(val)))
73 hba->max_sqes = val;
74 }
75
76 return count;
77
78skip_config:
79 printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
80 return 0;
81}
82
83
84/**
85 * bnx2i_show_ccell_info - returns command cell (HQ) size
86 * @dev: device pointer
87 * @buf: buffer to return current SQ size parameter
88 *
89 * returns per-connection TCP history queue size parameter
90 */
91static ssize_t bnx2i_show_ccell_info(struct device *dev,
92 struct device_attribute *attr, char *buf)
93{
94 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
95
96 return sprintf(buf, "0x%x\n", hba->num_ccell);
97}
98
99
100/**
101 * bnx2i_get_link_state - set command cell (HQ) size
102 * @dev: device pointer
103 * @buf: buffer to return current SQ size parameter
104 * @count: parameter buffer size
105 *
106 * updates per-connection TCP history queue size parameter
107 */
108static ssize_t bnx2i_set_ccell_info(struct device *dev,
109 struct device_attribute *attr,
110 const char *buf, size_t count)
111{
112 u32 val;
113 struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
114
115 if (hba->ofld_conns_active)
116 goto skip_config;
117
118 if (sscanf(buf, " 0x%x ", &val) > 0) {
119 if ((val >= BNX2I_CCELLS_MIN) &&
120 (val <= BNX2I_CCELLS_MAX)) {
121 hba->num_ccell = val;
122 }
123 }
124
125 return count;
126
127skip_config:
128 printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
129 return 0;
130}
131
132
133static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
134 bnx2i_show_sq_info, bnx2i_set_sq_info);
135static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
136 bnx2i_show_ccell_info, bnx2i_set_ccell_info);
137
138struct device_attribute *bnx2i_dev_attributes[] = {
139 &dev_attr_sq_size,
140 &dev_attr_num_ccell,
141 NULL
142};
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
index 59b0958d2d1..e3133b58e59 100644
--- a/drivers/scsi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -144,7 +144,6 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
144void cxgb3i_adapter_open(struct t3cdev *); 144void cxgb3i_adapter_open(struct t3cdev *);
145void cxgb3i_adapter_close(struct t3cdev *); 145void cxgb3i_adapter_close(struct t3cdev *);
146 146
147struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
148struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *, 147struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
149 struct net_device *); 148 struct net_device *);
150void cxgb3i_hba_host_remove(struct cxgb3i_hba *); 149void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 9212400b9b1..74369a3f963 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/inet.h> 14#include <linux/inet.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <net/dst.h>
16#include <net/tcp.h> 17#include <net/tcp.h>
17#include <scsi/scsi_cmnd.h> 18#include <scsi/scsi_cmnd.h>
18#include <scsi/scsi_device.h> 19#include <scsi/scsi_device.h>
@@ -178,7 +179,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev)
178 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device 179 * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
179 * @t3dev: t3cdev adapter 180 * @t3dev: t3cdev adapter
180 */ 181 */
181struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) 182static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
182{ 183{
183 struct cxgb3i_adapter *snic; 184 struct cxgb3i_adapter *snic;
184 int i; 185 int i;
@@ -261,20 +262,27 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
261 262
262/** 263/**
263 * cxgb3i_ep_connect - establish TCP connection to target portal 264 * cxgb3i_ep_connect - establish TCP connection to target portal
265 * @shost: scsi host to use
264 * @dst_addr: target IP address 266 * @dst_addr: target IP address
265 * @non_blocking: blocking or non-blocking call 267 * @non_blocking: blocking or non-blocking call
266 * 268 *
267 * Initiates a TCP/IP connection to the dst_addr 269 * Initiates a TCP/IP connection to the dst_addr
268 */ 270 */
269static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr, 271static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost,
272 struct sockaddr *dst_addr,
270 int non_blocking) 273 int non_blocking)
271{ 274{
272 struct iscsi_endpoint *ep; 275 struct iscsi_endpoint *ep;
273 struct cxgb3i_endpoint *cep; 276 struct cxgb3i_endpoint *cep;
274 struct cxgb3i_hba *hba; 277 struct cxgb3i_hba *hba = NULL;
275 struct s3_conn *c3cn = NULL; 278 struct s3_conn *c3cn = NULL;
276 int err = 0; 279 int err = 0;
277 280
281 if (shost)
282 hba = iscsi_host_priv(shost);
283
284 cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba);
285
278 c3cn = cxgb3i_c3cn_create(); 286 c3cn = cxgb3i_c3cn_create();
279 if (!c3cn) { 287 if (!c3cn) {
280 cxgb3i_log_info("ep connect OOM.\n"); 288 cxgb3i_log_info("ep connect OOM.\n");
@@ -282,17 +290,27 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
282 goto release_conn; 290 goto release_conn;
283 } 291 }
284 292
285 err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr); 293 err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn,
294 (struct sockaddr_in *)dst_addr);
286 if (err < 0) { 295 if (err < 0) {
287 cxgb3i_log_info("ep connect failed.\n"); 296 cxgb3i_log_info("ep connect failed.\n");
288 goto release_conn; 297 goto release_conn;
289 } 298 }
299
290 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev); 300 hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
291 if (!hba) { 301 if (!hba) {
292 err = -ENOSPC; 302 err = -ENOSPC;
293 cxgb3i_log_info("NOT going through cxgbi device.\n"); 303 cxgb3i_log_info("NOT going through cxgbi device.\n");
294 goto release_conn; 304 goto release_conn;
295 } 305 }
306
307 if (shost && hba != iscsi_host_priv(shost)) {
308 err = -ENOSPC;
309 cxgb3i_log_info("Could not connect through request host%u\n",
310 shost->host_no);
311 goto release_conn;
312 }
313
296 if (c3cn_is_closing(c3cn)) { 314 if (c3cn_is_closing(c3cn)) {
297 err = -ENOSPC; 315 err = -ENOSPC;
298 cxgb3i_log_info("ep connect unable to connect.\n"); 316 cxgb3i_log_info("ep connect unable to connect.\n");
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index e11c9c180f3..c1d5be4adf9 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1479,12 +1479,13 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
1479 return NULL; 1479 return NULL;
1480} 1480}
1481 1481
1482static struct rtable *find_route(__be32 saddr, __be32 daddr, 1482static struct rtable *find_route(struct net_device *dev,
1483 __be32 saddr, __be32 daddr,
1483 __be16 sport, __be16 dport) 1484 __be16 sport, __be16 dport)
1484{ 1485{
1485 struct rtable *rt; 1486 struct rtable *rt;
1486 struct flowi fl = { 1487 struct flowi fl = {
1487 .oif = 0, 1488 .oif = dev ? dev->ifindex : 0,
1488 .nl_u = { 1489 .nl_u = {
1489 .ip4_u = { 1490 .ip4_u = {
1490 .daddr = daddr, 1491 .daddr = daddr,
@@ -1573,36 +1574,40 @@ out_err:
1573 * 1574 *
1574 * return 0 if active open request is sent, < 0 otherwise. 1575 * return 0 if active open request is sent, < 0 otherwise.
1575 */ 1576 */
1576int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin) 1577int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
1578 struct sockaddr_in *usin)
1577{ 1579{
1578 struct rtable *rt; 1580 struct rtable *rt;
1579 struct net_device *dev;
1580 struct cxgb3i_sdev_data *cdata; 1581 struct cxgb3i_sdev_data *cdata;
1581 struct t3cdev *cdev; 1582 struct t3cdev *cdev;
1582 __be32 sipv4; 1583 __be32 sipv4;
1583 int err; 1584 int err;
1584 1585
1586 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
1587
1585 if (usin->sin_family != AF_INET) 1588 if (usin->sin_family != AF_INET)
1586 return -EAFNOSUPPORT; 1589 return -EAFNOSUPPORT;
1587 1590
1588 c3cn->daddr.sin_port = usin->sin_port; 1591 c3cn->daddr.sin_port = usin->sin_port;
1589 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; 1592 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
1590 1593
1591 rt = find_route(c3cn->saddr.sin_addr.s_addr, 1594 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
1592 c3cn->daddr.sin_addr.s_addr, 1595 c3cn->daddr.sin_addr.s_addr,
1593 c3cn->saddr.sin_port, 1596 c3cn->saddr.sin_port,
1594 c3cn->daddr.sin_port); 1597 c3cn->daddr.sin_port);
1595 if (rt == NULL) { 1598 if (rt == NULL) {
1596 c3cn_conn_debug("NO route to 0x%x, port %u.\n", 1599 c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
1597 c3cn->daddr.sin_addr.s_addr, 1600 c3cn->daddr.sin_addr.s_addr,
1598 ntohs(c3cn->daddr.sin_port)); 1601 ntohs(c3cn->daddr.sin_port),
1602 dev ? dev->name : "any");
1599 return -ENETUNREACH; 1603 return -ENETUNREACH;
1600 } 1604 }
1601 1605
1602 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 1606 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
1603 c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n", 1607 c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
1604 c3cn->daddr.sin_addr.s_addr, 1608 c3cn->daddr.sin_addr.s_addr,
1605 ntohs(c3cn->daddr.sin_port)); 1609 ntohs(c3cn->daddr.sin_port),
1610 dev ? dev->name : "any");
1606 ip_rt_put(rt); 1611 ip_rt_put(rt);
1607 return -ENETUNREACH; 1612 return -ENETUNREACH;
1608 } 1613 }
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
index ebfca960c0a..6a1d86b1faf 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -169,7 +169,8 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
169void cxgb3i_sdev_remove(struct t3cdev *); 169void cxgb3i_sdev_remove(struct t3cdev *);
170 170
171struct s3_conn *cxgb3i_c3cn_create(void); 171struct s3_conn *cxgb3i_c3cn_create(void);
172int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *); 172int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *,
173 struct sockaddr_in *);
173void cxgb3i_c3cn_rx_credits(struct s3_conn *, int); 174void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
174int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *); 175int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
175void cxgb3i_c3cn_release(struct s3_conn *); 176void cxgb3i_c3cn_release(struct s3_conn *);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 43b8c51e98d..fd0544f7da8 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -561,6 +561,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
561 struct rdac_dh_data *h = get_rdac_data(sdev); 561 struct rdac_dh_data *h = get_rdac_data(sdev);
562 switch (sense_hdr->sense_key) { 562 switch (sense_hdr->sense_key) {
563 case NOT_READY: 563 case NOT_READY:
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
565 /* LUN Not Ready - Logical Unit Not Ready and is in
566 * the process of becoming ready
567 * Just retry.
568 */
569 return ADD_TO_MLQUEUE;
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) 570 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
565 /* LUN Not Ready - Storage firmware incompatible 571 /* LUN Not Ready - Storage firmware incompatible
566 * Manual code synchonisation required. 572 * Manual code synchonisation required.
diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h
index 4b56c0436ba..b2613c2eaac 100644
--- a/drivers/scsi/dpt/osd_util.h
+++ b/drivers/scsi/dpt/osd_util.h
@@ -342,7 +342,7 @@ uLONG osdGetThreadID(void);
342/* wakes up the specifed thread */ 342/* wakes up the specifed thread */
343void osdWakeThread(uLONG); 343void osdWakeThread(uLONG);
344 344
345/* osd sleep for x miliseconds */ 345/* osd sleep for x milliseconds */
346void osdSleep(uLONG); 346void osdSleep(uLONG);
347 347
348#define DPT_THREAD_PRIORITY_LOWEST 0x00 348#define DPT_THREAD_PRIORITY_LOWEST 0x00
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index be5099dd94b..c7076ce25e2 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1825,7 +1825,7 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
1825 if (linked_comm && SCpnt->device->queue_depth > 2 1825 if (linked_comm && SCpnt->device->queue_depth > 2
1826 && TLDEV(SCpnt->device->type)) { 1826 && TLDEV(SCpnt->device->type)) {
1827 ha->cp_stat[i] = READY; 1827 ha->cp_stat[i] = READY;
1828 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0); 1828 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
1829 return 0; 1829 return 0;
1830 } 1830 }
1831 1831
@@ -2144,13 +2144,13 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2144 if (!cpp->din) 2144 if (!cpp->din)
2145 input_only = 0; 2145 input_only = 0;
2146 2146
2147 if (SCpnt->request->sector < minsec) 2147 if (blk_rq_pos(SCpnt->request) < minsec)
2148 minsec = SCpnt->request->sector; 2148 minsec = blk_rq_pos(SCpnt->request);
2149 if (SCpnt->request->sector > maxsec) 2149 if (blk_rq_pos(SCpnt->request) > maxsec)
2150 maxsec = SCpnt->request->sector; 2150 maxsec = blk_rq_pos(SCpnt->request);
2151 2151
2152 sl[n] = SCpnt->request->sector; 2152 sl[n] = blk_rq_pos(SCpnt->request);
2153 ioseek += SCpnt->request->nr_sectors; 2153 ioseek += blk_rq_sectors(SCpnt->request);
2154 2154
2155 if (!n) 2155 if (!n)
2156 continue; 2156 continue;
@@ -2190,7 +2190,7 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2190 k = il[n]; 2190 k = il[n];
2191 cpp = &ha->cp[k]; 2191 cpp = &ha->cp[k];
2192 SCpnt = cpp->SCpnt; 2192 SCpnt = cpp->SCpnt;
2193 ll[n] = SCpnt->request->nr_sectors; 2193 ll[n] = blk_rq_sectors(SCpnt->request);
2194 pl[n] = SCpnt->serial_number; 2194 pl[n] = SCpnt->serial_number;
2195 2195
2196 if (!n) 2196 if (!n)
@@ -2236,12 +2236,12 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2236 cpp = &ha->cp[k]; 2236 cpp = &ha->cp[k];
2237 SCpnt = cpp->SCpnt; 2237 SCpnt = cpp->SCpnt;
2238 scmd_printk(KERN_INFO, SCpnt, 2238 scmd_printk(KERN_INFO, SCpnt,
2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld" 2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %u"
2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
2241 (ihdlr ? "ihdlr" : "qcomm"), 2241 (ihdlr ? "ihdlr" : "qcomm"),
2242 SCpnt->serial_number, k, flushcount, 2242 SCpnt->serial_number, k, flushcount,
2243 n_ready, SCpnt->request->sector, 2243 n_ready, blk_rq_pos(SCpnt->request),
2244 SCpnt->request->nr_sectors, cursec, YESNO(s), 2244 blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
2245 YESNO(r), YESNO(rev), YESNO(input_only), 2245 YESNO(r), YESNO(rev), YESNO(input_only),
2246 YESNO(overlap), cpp->din); 2246 YESNO(overlap), cpp->din);
2247 } 2247 }
@@ -2408,7 +2408,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2408 2408
2409 if (linked_comm && SCpnt->device->queue_depth > 2 2409 if (linked_comm && SCpnt->device->queue_depth > 2
2410 && TLDEV(SCpnt->device->type)) 2410 && TLDEV(SCpnt->device->type))
2411 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1); 2411 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
2412 2412
2413 tstatus = status_byte(spp->target_status); 2413 tstatus = status_byte(spp->target_status);
2414 2414
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f791348871f..c15878e8815 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -54,7 +54,6 @@ MODULE_LICENSE("GPL v2");
54/* fcoe host list */ 54/* fcoe host list */
55LIST_HEAD(fcoe_hostlist); 55LIST_HEAD(fcoe_hostlist);
56DEFINE_RWLOCK(fcoe_hostlist_lock); 56DEFINE_RWLOCK(fcoe_hostlist_lock);
57DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
58DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); 57DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
59 58
60/* Function Prototypes */ 59/* Function Prototypes */
@@ -71,7 +70,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
71static int fcoe_hostlist_add(const struct fc_lport *); 70static int fcoe_hostlist_add(const struct fc_lport *);
72static int fcoe_hostlist_remove(const struct fc_lport *); 71static int fcoe_hostlist_remove(const struct fc_lport *);
73 72
74static int fcoe_check_wait_queue(struct fc_lport *); 73static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
75static int fcoe_device_notification(struct notifier_block *, ulong, void *); 74static int fcoe_device_notification(struct notifier_block *, ulong, void *);
76static void fcoe_dev_setup(void); 75static void fcoe_dev_setup(void);
77static void fcoe_dev_cleanup(void); 76static void fcoe_dev_cleanup(void);
@@ -198,6 +197,7 @@ static int fcoe_lport_config(struct fc_lport *lp)
198 lp->link_up = 0; 197 lp->link_up = 0;
199 lp->qfull = 0; 198 lp->qfull = 0;
200 lp->max_retry_count = 3; 199 lp->max_retry_count = 3;
200 lp->max_rport_retry_count = 3;
201 lp->e_d_tov = 2 * 1000; /* FC-FS default */ 201 lp->e_d_tov = 2 * 1000; /* FC-FS default */
202 lp->r_a_tov = 2 * 2 * 1000; 202 lp->r_a_tov = 2 * 2 * 1000;
203 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 203 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@@ -243,6 +243,18 @@ void fcoe_netdev_cleanup(struct fcoe_softc *fc)
243} 243}
244 244
245/** 245/**
246 * fcoe_queue_timer() - fcoe queue timer
247 * @lp: the fc_lport pointer
248 *
249 * Calls fcoe_check_wait_queue on timeout
250 *
251 */
252static void fcoe_queue_timer(ulong lp)
253{
254 fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
255}
256
257/**
246 * fcoe_netdev_config() - Set up netdev for SW FCoE 258 * fcoe_netdev_config() - Set up netdev for SW FCoE
247 * @lp : ptr to the fc_lport 259 * @lp : ptr to the fc_lport
248 * @netdev : ptr to the associated netdevice struct 260 * @netdev : ptr to the associated netdevice struct
@@ -313,6 +325,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
313 } 325 }
314 skb_queue_head_init(&fc->fcoe_pending_queue); 326 skb_queue_head_init(&fc->fcoe_pending_queue);
315 fc->fcoe_pending_queue_active = 0; 327 fc->fcoe_pending_queue_active = 0;
328 setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
316 329
317 /* look for SAN MAC address, if multiple SAN MACs exist, only 330 /* look for SAN MAC address, if multiple SAN MACs exist, only
318 * use the first one for SPMA */ 331 * use the first one for SPMA */
@@ -474,6 +487,9 @@ static int fcoe_if_destroy(struct net_device *netdev)
474 /* Free existing skbs */ 487 /* Free existing skbs */
475 fcoe_clean_pending_queue(lp); 488 fcoe_clean_pending_queue(lp);
476 489
490 /* Stop the timer */
491 del_timer_sync(&fc->timer);
492
477 /* Free memory used by statistical counters */ 493 /* Free memory used by statistical counters */
478 fc_lport_free_stats(lp); 494 fc_lport_free_stats(lp);
479 495
@@ -1021,7 +1037,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
1021 */ 1037 */
1022int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) 1038int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1023{ 1039{
1024 int wlen, rc = 0; 1040 int wlen;
1025 u32 crc; 1041 u32 crc;
1026 struct ethhdr *eh; 1042 struct ethhdr *eh;
1027 struct fcoe_crc_eof *cp; 1043 struct fcoe_crc_eof *cp;
@@ -1054,8 +1070,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1054 sof = fr_sof(fp); 1070 sof = fr_sof(fp);
1055 eof = fr_eof(fp); 1071 eof = fr_eof(fp);
1056 1072
1057 elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ? 1073 elen = sizeof(struct ethhdr);
1058 sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
1059 hlen = sizeof(struct fcoe_hdr); 1074 hlen = sizeof(struct fcoe_hdr);
1060 tlen = sizeof(struct fcoe_crc_eof); 1075 tlen = sizeof(struct fcoe_crc_eof);
1061 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; 1076 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
@@ -1140,18 +1155,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1140 /* send down to lld */ 1155 /* send down to lld */
1141 fr_dev(fp) = lp; 1156 fr_dev(fp) = lp;
1142 if (fc->fcoe_pending_queue.qlen) 1157 if (fc->fcoe_pending_queue.qlen)
1143 rc = fcoe_check_wait_queue(lp); 1158 fcoe_check_wait_queue(lp, skb);
1144 1159 else if (fcoe_start_io(skb))
1145 if (rc == 0) 1160 fcoe_check_wait_queue(lp, skb);
1146 rc = fcoe_start_io(skb);
1147
1148 if (rc) {
1149 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1150 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1151 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1152 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1153 lp->qfull = 1;
1154 }
1155 1161
1156 return 0; 1162 return 0;
1157} 1163}
@@ -1301,32 +1307,6 @@ int fcoe_percpu_receive_thread(void *arg)
1301} 1307}
1302 1308
1303/** 1309/**
1304 * fcoe_watchdog() - fcoe timer callback
1305 * @vp:
1306 *
1307 * This checks the pending queue length for fcoe and set lport qfull
1308 * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
1309 * fcoe_hostlist.
1310 *
1311 * Returns: 0 for success
1312 */
1313void fcoe_watchdog(ulong vp)
1314{
1315 struct fcoe_softc *fc;
1316
1317 read_lock(&fcoe_hostlist_lock);
1318 list_for_each_entry(fc, &fcoe_hostlist, list) {
1319 if (fc->ctlr.lp)
1320 fcoe_check_wait_queue(fc->ctlr.lp);
1321 }
1322 read_unlock(&fcoe_hostlist_lock);
1323
1324 fcoe_timer.expires = jiffies + (1 * HZ);
1325 add_timer(&fcoe_timer);
1326}
1327
1328
1329/**
1330 * fcoe_check_wait_queue() - attempt to clear the transmit backlog 1310 * fcoe_check_wait_queue() - attempt to clear the transmit backlog
1331 * @lp: the fc_lport 1311 * @lp: the fc_lport
1332 * 1312 *
@@ -1338,16 +1318,17 @@ void fcoe_watchdog(ulong vp)
1338 * The wait_queue is used when the skb transmit fails. skb will go 1318 * The wait_queue is used when the skb transmit fails. skb will go
1339 * in the wait_queue which will be emptied by the timer function or 1319 * in the wait_queue which will be emptied by the timer function or
1340 * by the next skb transmit. 1320 * by the next skb transmit.
1341 *
1342 * Returns: 0 for success
1343 */ 1321 */
1344static int fcoe_check_wait_queue(struct fc_lport *lp) 1322static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
1345{ 1323{
1346 struct fcoe_softc *fc = lport_priv(lp); 1324 struct fcoe_softc *fc = lport_priv(lp);
1347 struct sk_buff *skb; 1325 int rc;
1348 int rc = -1;
1349 1326
1350 spin_lock_bh(&fc->fcoe_pending_queue.lock); 1327 spin_lock_bh(&fc->fcoe_pending_queue.lock);
1328
1329 if (skb)
1330 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
1331
1351 if (fc->fcoe_pending_queue_active) 1332 if (fc->fcoe_pending_queue_active)
1352 goto out; 1333 goto out;
1353 fc->fcoe_pending_queue_active = 1; 1334 fc->fcoe_pending_queue_active = 1;
@@ -1373,23 +1354,26 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
1373 1354
1374 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) 1355 if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1375 lp->qfull = 0; 1356 lp->qfull = 0;
1357 if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
1358 mod_timer(&fc->timer, jiffies + 2);
1376 fc->fcoe_pending_queue_active = 0; 1359 fc->fcoe_pending_queue_active = 0;
1377 rc = fc->fcoe_pending_queue.qlen;
1378out: 1360out:
1361 if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1362 lp->qfull = 1;
1379 spin_unlock_bh(&fc->fcoe_pending_queue.lock); 1363 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1380 return rc; 1364 return;
1381} 1365}
1382 1366
1383/** 1367/**
1384 * fcoe_dev_setup() - setup link change notification interface 1368 * fcoe_dev_setup() - setup link change notification interface
1385 */ 1369 */
1386static void fcoe_dev_setup() 1370static void fcoe_dev_setup(void)
1387{ 1371{
1388 register_netdevice_notifier(&fcoe_notifier); 1372 register_netdevice_notifier(&fcoe_notifier);
1389} 1373}
1390 1374
1391/** 1375/**
1392 * fcoe_dev_setup() - cleanup link change notification interface 1376 * fcoe_dev_cleanup() - cleanup link change notification interface
1393 */ 1377 */
1394static void fcoe_dev_cleanup(void) 1378static void fcoe_dev_cleanup(void)
1395{ 1379{
@@ -1848,10 +1832,6 @@ static int __init fcoe_init(void)
1848 /* Setup link change notification */ 1832 /* Setup link change notification */
1849 fcoe_dev_setup(); 1833 fcoe_dev_setup();
1850 1834
1851 setup_timer(&fcoe_timer, fcoe_watchdog, 0);
1852
1853 mod_timer(&fcoe_timer, jiffies + (10 * HZ));
1854
1855 fcoe_if_init(); 1835 fcoe_if_init();
1856 1836
1857 return 0; 1837 return 0;
@@ -1877,9 +1857,6 @@ static void __exit fcoe_exit(void)
1877 1857
1878 fcoe_dev_cleanup(); 1858 fcoe_dev_cleanup();
1879 1859
1880 /* Stop the timer */
1881 del_timer_sync(&fcoe_timer);
1882
1883 /* releases the associated fcoe hosts */ 1860 /* releases the associated fcoe hosts */
1884 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) 1861 list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1885 fcoe_if_destroy(fc->real_dev); 1862 fcoe_if_destroy(fc->real_dev);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 917aae88689..a1eb8c1988b 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -61,6 +61,7 @@ struct fcoe_softc {
61 struct packet_type fip_packet_type; 61 struct packet_type fip_packet_type;
62 struct sk_buff_head fcoe_pending_queue; 62 struct sk_buff_head fcoe_pending_queue;
63 u8 fcoe_pending_queue_active; 63 u8 fcoe_pending_queue_active;
64 struct timer_list timer; /* queue timer */
64 struct fcoe_ctlr ctlr; 65 struct fcoe_ctlr ctlr;
65}; 66};
66 67
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index b9aa280100b..2f5bc7fd3fa 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -215,7 +215,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
215 sol->desc.size.fd_size = htons(fcoe_size); 215 sol->desc.size.fd_size = htons(fcoe_size);
216 216
217 skb_put(skb, sizeof(*sol)); 217 skb_put(skb, sizeof(*sol));
218 skb->protocol = htons(ETH_P_802_3); 218 skb->protocol = htons(ETH_P_FIP);
219 skb_reset_mac_header(skb); 219 skb_reset_mac_header(skb);
220 skb_reset_network_header(skb); 220 skb_reset_network_header(skb);
221 fip->send(fip, skb); 221 fip->send(fip, skb);
@@ -369,7 +369,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
369 } 369 }
370 370
371 skb_put(skb, len); 371 skb_put(skb, len);
372 skb->protocol = htons(ETH_P_802_3); 372 skb->protocol = htons(ETH_P_FIP);
373 skb_reset_mac_header(skb); 373 skb_reset_mac_header(skb);
374 skb_reset_network_header(skb); 374 skb_reset_network_header(skb);
375 fip->send(fip, skb); 375 fip->send(fip, skb);
@@ -432,7 +432,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
432 else if (fip->spma) 432 else if (fip->spma)
433 memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); 433 memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
434 434
435 skb->protocol = htons(ETH_P_802_3); 435 skb->protocol = htons(ETH_P_FIP);
436 skb_reset_mac_header(skb); 436 skb_reset_mac_header(skb);
437 skb_reset_network_header(skb); 437 skb_reset_network_header(skb);
438 return 0; 438 return 0;
@@ -455,14 +455,10 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
455 u16 old_xid; 455 u16 old_xid;
456 u8 op; 456 u8 op;
457 457
458 if (fip->state == FIP_ST_NON_FIP)
459 return 0;
460
461 fh = (struct fc_frame_header *)skb->data; 458 fh = (struct fc_frame_header *)skb->data;
462 op = *(u8 *)(fh + 1); 459 op = *(u8 *)(fh + 1);
463 460
464 switch (op) { 461 if (op == ELS_FLOGI) {
465 case ELS_FLOGI:
466 old_xid = fip->flogi_oxid; 462 old_xid = fip->flogi_oxid;
467 fip->flogi_oxid = ntohs(fh->fh_ox_id); 463 fip->flogi_oxid = ntohs(fh->fh_ox_id);
468 if (fip->state == FIP_ST_AUTO) { 464 if (fip->state == FIP_ST_AUTO) {
@@ -474,6 +470,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
474 fip->map_dest = 1; 470 fip->map_dest = 1;
475 return 0; 471 return 0;
476 } 472 }
473 if (fip->state == FIP_ST_NON_FIP)
474 fip->map_dest = 1;
475 }
476
477 if (fip->state == FIP_ST_NON_FIP)
478 return 0;
479
480 switch (op) {
481 case ELS_FLOGI:
477 op = FIP_DT_FLOGI; 482 op = FIP_DT_FLOGI;
478 break; 483 break;
479 case ELS_FDISC: 484 case ELS_FDISC:
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
new file mode 100644
index 00000000000..37c3440bc17
--- /dev/null
+++ b/drivers/scsi/fnic/Makefile
@@ -0,0 +1,15 @@
1obj-$(CONFIG_FCOE_FNIC) += fnic.o
2
3fnic-y := \
4 fnic_attrs.o \
5 fnic_isr.o \
6 fnic_main.o \
7 fnic_res.o \
8 fnic_fcs.o \
9 fnic_scsi.o \
10 vnic_cq.o \
11 vnic_dev.o \
12 vnic_intr.o \
13 vnic_rq.o \
14 vnic_wq_copy.o \
15 vnic_wq.o
diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h
new file mode 100644
index 00000000000..d1225cf6320
--- /dev/null
+++ b/drivers/scsi/fnic/cq_desc.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _CQ_DESC_H_
19#define _CQ_DESC_H_
20
21/*
22 * Completion queue descriptor types
23 */
24enum cq_desc_types {
25 CQ_DESC_TYPE_WQ_ENET = 0,
26 CQ_DESC_TYPE_DESC_COPY = 1,
27 CQ_DESC_TYPE_WQ_EXCH = 2,
28 CQ_DESC_TYPE_RQ_ENET = 3,
29 CQ_DESC_TYPE_RQ_FCP = 4,
30};
31
32/* Completion queue descriptor: 16B
33 *
34 * All completion queues have this basic layout. The
35 * type_specfic area is unique for each completion
36 * queue type.
37 */
38struct cq_desc {
39 __le16 completed_index;
40 __le16 q_number;
41 u8 type_specfic[11];
42 u8 type_color;
43};
44
45#define CQ_DESC_TYPE_BITS 4
46#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
47#define CQ_DESC_COLOR_MASK 1
48#define CQ_DESC_COLOR_SHIFT 7
49#define CQ_DESC_Q_NUM_BITS 10
50#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
51#define CQ_DESC_COMP_NDX_BITS 12
52#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
53
54static inline void cq_desc_dec(const struct cq_desc *desc_arg,
55 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
56{
57 const struct cq_desc *desc = desc_arg;
58 const u8 type_color = desc->type_color;
59
60 *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
61
62 /*
63 * Make sure color bit is read from desc *before* other fields
64 * are read from desc. Hardware guarantees color bit is last
65 * bit (byte) written. Adding the rmb() prevents the compiler
66 * and/or CPU from reordering the reads which would potentially
67 * result in reading stale values.
68 */
69
70 rmb();
71
72 *type = type_color & CQ_DESC_TYPE_MASK;
73 *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
74 *completed_index = le16_to_cpu(desc->completed_index) &
75 CQ_DESC_COMP_NDX_MASK;
76}
77
78#endif /* _CQ_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h
new file mode 100644
index 00000000000..a9fa26f82dd
--- /dev/null
+++ b/drivers/scsi/fnic/cq_enet_desc.h
@@ -0,0 +1,167 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _CQ_ENET_DESC_H_
19#define _CQ_ENET_DESC_H_
20
21#include "cq_desc.h"
22
23/* Ethernet completion queue descriptor: 16B */
24struct cq_enet_wq_desc {
25 __le16 completed_index;
26 __le16 q_number;
27 u8 reserved[11];
28 u8 type_color;
29};
30
31static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
32 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
33{
34 cq_desc_dec((struct cq_desc *)desc, type,
35 color, q_number, completed_index);
36}
37
38/* Completion queue descriptor: Ethernet receive queue, 16B */
39struct cq_enet_rq_desc {
40 __le16 completed_index_flags;
41 __le16 q_number_rss_type_flags;
42 __le32 rss_hash;
43 __le16 bytes_written_flags;
44 __le16 vlan;
45 __le16 checksum_fcoe;
46 u8 flags;
47 u8 type_color;
48};
49
50#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
51#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
52#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
53#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
54
55#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
56#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
57 ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
58#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
59#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
60#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
61#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
62#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
63#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
64#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
65
66#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
67
68#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
69#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
70 ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
71#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
72#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
73
74#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
75#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
76 ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
77#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
78#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
79 ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
80#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
81
82#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
83#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
84#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
85#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
86#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
87#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
88#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
89#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
90#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
91#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
92
93static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
94 u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
95 u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
96 u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
97 u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
98 u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
99 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
100 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
101{
102 u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
103 u16 q_number_rss_type_flags =
104 le16_to_cpu(desc->q_number_rss_type_flags);
105 u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
106
107 cq_desc_dec((struct cq_desc *)desc, type,
108 color, q_number, completed_index);
109
110 *ingress_port = (completed_index_flags &
111 CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
112 *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
113 1 : 0;
114 *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
115 1 : 0;
116 *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
117 1 : 0;
118
119 *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
120 CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
121 *csum_not_calc = (q_number_rss_type_flags &
122 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
123
124 *rss_hash = le32_to_cpu(desc->rss_hash);
125
126 *bytes_written = bytes_written_flags &
127 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
128 *packet_error = (bytes_written_flags &
129 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
130 *vlan_stripped = (bytes_written_flags &
131 CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
132
133 *vlan = le16_to_cpu(desc->vlan);
134
135 if (*fcoe) {
136 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
137 CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
138 *fcoe_fc_crc_ok = (desc->flags &
139 CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
140 *fcoe_enc_error = (desc->flags &
141 CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
142 *fcoe_eof = (u8)((desc->checksum_fcoe >>
143 CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
144 CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
145 *checksum = 0;
146 } else {
147 *fcoe_sof = 0;
148 *fcoe_fc_crc_ok = 0;
149 *fcoe_enc_error = 0;
150 *fcoe_eof = 0;
151 *checksum = le16_to_cpu(desc->checksum_fcoe);
152 }
153
154 *tcp_udp_csum_ok =
155 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
156 *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
157 *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
158 *ipv4_csum_ok =
159 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
160 *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
161 *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
162 *ipv4_fragment =
163 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
164 *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
165}
166
167#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h
new file mode 100644
index 00000000000..501660cfe22
--- /dev/null
+++ b/drivers/scsi/fnic/cq_exch_desc.h
@@ -0,0 +1,182 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _CQ_EXCH_DESC_H_
19#define _CQ_EXCH_DESC_H_
20
21#include "cq_desc.h"
22
23/* Exchange completion queue descriptor: 16B */
24struct cq_exch_wq_desc {
25 u16 completed_index;
26 u16 q_number;
27 u16 exchange_id;
28 u8 tmpl;
29 u8 reserved0;
30 u32 reserved1;
31 u8 exch_status;
32 u8 reserved2[2];
33 u8 type_color;
34};
35
36#define CQ_EXCH_WQ_STATUS_BITS 2
37#define CQ_EXCH_WQ_STATUS_MASK ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1)
38
39enum cq_exch_status_types {
40 CQ_EXCH_WQ_STATUS_TYPE_COMPLETE = 0,
41 CQ_EXCH_WQ_STATUS_TYPE_ABORT = 1,
42 CQ_EXCH_WQ_STATUS_TYPE_SGL_EOF = 2,
43 CQ_EXCH_WQ_STATUS_TYPE_TMPL_ERR = 3,
44};
45
46static inline void cq_exch_wq_desc_dec(struct cq_exch_wq_desc *desc_ptr,
47 u8 *type,
48 u8 *color,
49 u16 *q_number,
50 u16 *completed_index,
51 u8 *exch_status)
52{
53 cq_desc_dec((struct cq_desc *)desc_ptr, type,
54 color, q_number, completed_index);
55 *exch_status = desc_ptr->exch_status & CQ_EXCH_WQ_STATUS_MASK;
56}
57
58struct cq_fcp_rq_desc {
59 u16 completed_index_eop_sop_prt;
60 u16 q_number;
61 u16 exchange_id;
62 u16 tmpl;
63 u16 bytes_written;
64 u16 vlan;
65 u8 sof;
66 u8 eof;
67 u8 fcs_fer_fck;
68 u8 type_color;
69};
70
71#define CQ_FCP_RQ_DESC_FLAGS_SOP (1 << 15)
72#define CQ_FCP_RQ_DESC_FLAGS_EOP (1 << 14)
73#define CQ_FCP_RQ_DESC_FLAGS_PRT (1 << 12)
74#define CQ_FCP_RQ_DESC_TMPL_MASK 0x1f
75#define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK 0x3fff
76#define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT 14
77#define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT)
78#define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT 15
79#define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT)
80#define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK 0x1
81#define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT 1
82#define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT)
83#define CQ_FCP_RQ_DESC_FCS_OK_SHIFT 7
84#define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT)
85
86static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc *desc_ptr,
87 u8 *type,
88 u8 *color,
89 u16 *q_number,
90 u16 *completed_index,
91 u8 *eop,
92 u8 *sop,
93 u8 *fck,
94 u16 *exchange_id,
95 u16 *tmpl,
96 u32 *bytes_written,
97 u8 *sof,
98 u8 *eof,
99 u8 *ingress_port,
100 u8 *packet_err,
101 u8 *fcoe_err,
102 u8 *fcs_ok,
103 u8 *vlan_stripped,
104 u16 *vlan)
105{
106 cq_desc_dec((struct cq_desc *)desc_ptr, type,
107 color, q_number, completed_index);
108 *eop = (desc_ptr->completed_index_eop_sop_prt &
109 CQ_FCP_RQ_DESC_FLAGS_EOP) ? 1 : 0;
110 *sop = (desc_ptr->completed_index_eop_sop_prt &
111 CQ_FCP_RQ_DESC_FLAGS_SOP) ? 1 : 0;
112 *ingress_port =
113 (desc_ptr->completed_index_eop_sop_prt &
114 CQ_FCP_RQ_DESC_FLAGS_PRT) ? 1 : 0;
115 *exchange_id = desc_ptr->exchange_id;
116 *tmpl = desc_ptr->tmpl & CQ_FCP_RQ_DESC_TMPL_MASK;
117 *bytes_written =
118 desc_ptr->bytes_written & CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK;
119 *packet_err =
120 (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_PACKET_ERR_MASK) >>
121 CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT;
122 *vlan_stripped =
123 (desc_ptr->bytes_written & CQ_FCP_RQ_DESC_VS_STRIPPED_MASK) >>
124 CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT;
125 *vlan = desc_ptr->vlan;
126 *sof = desc_ptr->sof;
127 *fck = desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FC_CRC_OK_MASK;
128 *fcoe_err = (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCOE_ERR_MASK) >>
129 CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT;
130 *eof = desc_ptr->eof;
131 *fcs_ok =
132 (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCS_OK_MASK) >>
133 CQ_FCP_RQ_DESC_FCS_OK_SHIFT;
134}
135
136struct cq_sgl_desc {
137 u16 exchange_id;
138 u16 q_number;
139 u32 active_burst_offset;
140 u32 tot_data_bytes;
141 u16 tmpl;
142 u8 sgl_err;
143 u8 type_color;
144};
145
146enum cq_sgl_err_types {
147 CQ_SGL_ERR_NO_ERROR = 0,
148 CQ_SGL_ERR_OVERFLOW, /* data ran beyond end of SGL */
149 CQ_SGL_ERR_SGL_LCL_ADDR_ERR, /* sgl access to local vnic addr illegal*/
150 CQ_SGL_ERR_ADDR_RSP_ERR, /* sgl address error */
151 CQ_SGL_ERR_DATA_RSP_ERR, /* sgl data rsp error */
152 CQ_SGL_ERR_CNT_ZERO_ERR, /* SGL count is 0 */
153 CQ_SGL_ERR_CNT_MAX_ERR, /* SGL count is larger than supported */
154 CQ_SGL_ERR_ORDER_ERR, /* frames recv on both ports, order err */
155 CQ_SGL_ERR_DATA_LCL_ADDR_ERR,/* sgl data buf to local vnic addr ill */
156 CQ_SGL_ERR_HOST_CQ_ERR, /* host cq entry to local vnic addr ill */
157};
158
159#define CQ_SGL_SGL_ERR_MASK 0x1f
160#define CQ_SGL_TMPL_MASK 0x1f
161
162static inline void cq_sgl_desc_dec(struct cq_sgl_desc *desc_ptr,
163 u8 *type,
164 u8 *color,
165 u16 *q_number,
166 u16 *exchange_id,
167 u32 *active_burst_offset,
168 u32 *tot_data_bytes,
169 u16 *tmpl,
170 u8 *sgl_err)
171{
172 /* Cheat a little by assuming exchange_id is the same as completed
173 index */
174 cq_desc_dec((struct cq_desc *)desc_ptr, type, color, q_number,
175 exchange_id);
176 *active_burst_offset = desc_ptr->active_burst_offset;
177 *tot_data_bytes = desc_ptr->tot_data_bytes;
178 *tmpl = desc_ptr->tmpl & CQ_SGL_TMPL_MASK;
179 *sgl_err = desc_ptr->sgl_err & CQ_SGL_SGL_ERR_MASK;
180}
181
182#endif /* _CQ_EXCH_DESC_H_ */
diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h
new file mode 100644
index 00000000000..12d770d885c
--- /dev/null
+++ b/drivers/scsi/fnic/fcpio.h
@@ -0,0 +1,780 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FCPIO_H_
19#define _FCPIO_H_
20
21#include <linux/if_ether.h>
22
23/*
24 * This header file includes all of the data structures used for
25 * communication by the host driver to the fcp firmware.
26 */
27
28/*
29 * Exchange and sequence id space allocated to the host driver
30 */
31#define FCPIO_HOST_EXCH_RANGE_START 0x1000
32#define FCPIO_HOST_EXCH_RANGE_END 0x1fff
33#define FCPIO_HOST_SEQ_ID_RANGE_START 0x80
34#define FCPIO_HOST_SEQ_ID_RANGE_END 0xff
35
36/*
37 * Command entry type
38 */
39enum fcpio_type {
40 /*
41 * Initiator request types
42 */
43 FCPIO_ICMND_16 = 0x1,
44 FCPIO_ICMND_32,
45 FCPIO_ICMND_CMPL,
46 FCPIO_ITMF,
47 FCPIO_ITMF_CMPL,
48
49 /*
50 * Target request types
51 */
52 FCPIO_TCMND_16 = 0x11,
53 FCPIO_TCMND_32,
54 FCPIO_TDATA,
55 FCPIO_TXRDY,
56 FCPIO_TRSP,
57 FCPIO_TDRSP_CMPL,
58 FCPIO_TTMF,
59 FCPIO_TTMF_ACK,
60 FCPIO_TABORT,
61 FCPIO_TABORT_CMPL,
62
63 /*
64 * Misc request types
65 */
66 FCPIO_ACK = 0x20,
67 FCPIO_RESET,
68 FCPIO_RESET_CMPL,
69 FCPIO_FLOGI_REG,
70 FCPIO_FLOGI_REG_CMPL,
71 FCPIO_ECHO,
72 FCPIO_ECHO_CMPL,
73 FCPIO_LUNMAP_CHNG,
74 FCPIO_LUNMAP_REQ,
75 FCPIO_LUNMAP_REQ_CMPL,
76 FCPIO_FLOGI_FIP_REG,
77 FCPIO_FLOGI_FIP_REG_CMPL,
78};
79
80/*
81 * Header status codes from the firmware
82 */
83enum fcpio_status {
84 FCPIO_SUCCESS = 0, /* request was successful */
85
86 /*
87 * If a request to the firmware is rejected, the original request
88 * header will be returned with the status set to one of the following:
89 */
90 FCPIO_INVALID_HEADER, /* header contains invalid data */
91 FCPIO_OUT_OF_RESOURCE, /* out of resources to complete request */
92 FCPIO_INVALID_PARAM, /* some parameter in request is invalid */
93 FCPIO_REQ_NOT_SUPPORTED, /* request type is not supported */
94 FCPIO_IO_NOT_FOUND, /* requested I/O was not found */
95
96 /*
97 * Once a request is processed, the firmware will usually return
98 * a cmpl message type. In cases where errors occurred,
99 * the header status field would be filled in with one of the following:
100 */
101 FCPIO_ABORTED = 0x41, /* request was aborted */
102 FCPIO_TIMEOUT, /* request was timed out */
103 FCPIO_SGL_INVALID, /* request was aborted due to sgl error */
104 FCPIO_MSS_INVALID, /* request was aborted due to mss error */
105 FCPIO_DATA_CNT_MISMATCH, /* recv/sent more/less data than exp. */
106 FCPIO_FW_ERR, /* request was terminated due to fw error */
107 FCPIO_ITMF_REJECTED, /* itmf req was rejected by remote node */
108 FCPIO_ITMF_FAILED, /* itmf req was failed by remote node */
109 FCPIO_ITMF_INCORRECT_LUN, /* itmf req targeted incorrect LUN */
110 FCPIO_CMND_REJECTED, /* request was invalid and rejected */
111 FCPIO_NO_PATH_AVAIL, /* no paths to the lun was available */
112 FCPIO_PATH_FAILED, /* i/o sent to current path failed */
113 FCPIO_LUNMAP_CHNG_PEND, /* i/o rejected due to lunmap change */
114};
115
116/*
117 * The header command tag. All host requests will use the "tag" field
118 * to mark commands with a unique tag. When the firmware responds to
119 * a host request, it will copy the tag field into the response.
120 *
121 * The only firmware requests that will use the rx_id/ox_id fields instead
122 * of the tag field will be the target command and target task management
123 * requests. These two requests do not have corresponding host requests
124 * since they come directly from the FC initiator on the network.
125 */
126struct fcpio_tag {
127 union {
128 u32 req_id;
129 struct {
130 u16 rx_id;
131 u16 ox_id;
132 } ex_id;
133 } u;
134};
135
136static inline void
137fcpio_tag_id_enc(struct fcpio_tag *tag, u32 id)
138{
139 tag->u.req_id = id;
140}
141
142static inline void
143fcpio_tag_id_dec(struct fcpio_tag *tag, u32 *id)
144{
145 *id = tag->u.req_id;
146}
147
148static inline void
149fcpio_tag_exid_enc(struct fcpio_tag *tag, u16 ox_id, u16 rx_id)
150{
151 tag->u.ex_id.rx_id = rx_id;
152 tag->u.ex_id.ox_id = ox_id;
153}
154
155static inline void
156fcpio_tag_exid_dec(struct fcpio_tag *tag, u16 *ox_id, u16 *rx_id)
157{
158 *rx_id = tag->u.ex_id.rx_id;
159 *ox_id = tag->u.ex_id.ox_id;
160}
161
162/*
163 * The header for an fcpio request, whether from the firmware or from the
164 * host driver
165 */
166struct fcpio_header {
167 u8 type; /* enum fcpio_type */
168 u8 status; /* header status entry */
169 u16 _resvd; /* reserved */
170 struct fcpio_tag tag; /* header tag */
171};
172
173static inline void
174fcpio_header_enc(struct fcpio_header *hdr,
175 u8 type, u8 status,
176 struct fcpio_tag tag)
177{
178 hdr->type = type;
179 hdr->status = status;
180 hdr->_resvd = 0;
181 hdr->tag = tag;
182}
183
184static inline void
185fcpio_header_dec(struct fcpio_header *hdr,
186 u8 *type, u8 *status,
187 struct fcpio_tag *tag)
188{
189 *type = hdr->type;
190 *status = hdr->status;
191 *tag = hdr->tag;
192}
193
194#define CDB_16 16
195#define CDB_32 32
196#define LUN_ADDRESS 8
197
198/*
199 * fcpio_icmnd_16: host -> firmware request
200 *
201 * used for sending out an initiator SCSI 16-byte command
202 */
203struct fcpio_icmnd_16 {
204 u32 lunmap_id; /* index into lunmap table */
205 u8 special_req_flags; /* special exchange request flags */
206 u8 _resvd0[3]; /* reserved */
207 u32 sgl_cnt; /* scatter-gather list count */
208 u32 sense_len; /* sense buffer length */
209 u64 sgl_addr; /* scatter-gather list addr */
210 u64 sense_addr; /* sense buffer address */
211 u8 crn; /* SCSI Command Reference No. */
212 u8 pri_ta; /* SCSI Priority and Task attribute */
213 u8 _resvd1; /* reserved: should be 0 */
214 u8 flags; /* command flags */
215 u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
216 u32 data_len; /* length of data expected */
217 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
218 u8 _resvd2; /* reserved */
219 u8 d_id[3]; /* FC vNIC only: Target D_ID */
220 u16 mss; /* FC vNIC only: max burst */
221 u16 _resvd3; /* reserved */
222 u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
223 u32 e_d_tov; /* FC vNIC only: Err Detect Timeout */
224};
225
226/*
227 * Special request flags
228 */
229#define FCPIO_ICMND_SRFLAG_RETRY 0x01 /* Enable Retry handling on exchange */
230
231/*
232 * Priority/Task Attribute settings
233 */
234#define FCPIO_ICMND_PTA_SIMPLE 0 /* simple task attribute */
235#define FCPIO_ICMND_PTA_HEADQ 1 /* head of queue task attribute */
236#define FCPIO_ICMND_PTA_ORDERED 2 /* ordered task attribute */
237#define FCPIO_ICMND_PTA_ACA 4 /* auto contingent allegiance */
238#define FCPIO_ICMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
239
240/*
241 * Command flags
242 */
243#define FCPIO_ICMND_RDDATA 0x02 /* read data */
244#define FCPIO_ICMND_WRDATA 0x01 /* write data */
245
246/*
247 * fcpio_icmnd_32: host -> firmware request
248 *
249 * used for sending out an initiator SCSI 32-byte command
250 */
251struct fcpio_icmnd_32 {
252 u32 lunmap_id; /* index into lunmap table */
253 u8 special_req_flags; /* special exchange request flags */
254 u8 _resvd0[3]; /* reserved */
255 u32 sgl_cnt; /* scatter-gather list count */
256 u32 sense_len; /* sense buffer length */
257 u64 sgl_addr; /* scatter-gather list addr */
258 u64 sense_addr; /* sense buffer address */
259 u8 crn; /* SCSI Command Reference No. */
260 u8 pri_ta; /* SCSI Priority and Task attribute */
261 u8 _resvd1; /* reserved: should be 0 */
262 u8 flags; /* command flags */
263 u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
264 u32 data_len; /* length of data expected */
265 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
266 u8 _resvd2; /* reserved */
267 u8 d_id[3]; /* FC vNIC only: Target D_ID */
268 u16 mss; /* FC vNIC only: max burst */
269 u16 _resvd3; /* reserved */
270 u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
271 u32 e_d_tov; /* FC vNIC only: Error Detect Timeout */
272};
273
274/*
275 * fcpio_itmf: host -> firmware request
276 *
277 * used for requesting the firmware to abort a request and/or send out
278 * a task management function
279 *
280 * The t_tag field is only needed when the request type is ABT_TASK.
281 */
282struct fcpio_itmf {
283 u32 lunmap_id; /* index into lunmap table */
284 u32 tm_req; /* SCSI Task Management request */
285 u32 t_tag; /* header tag of fcpio to be aborted */
286 u32 _resvd; /* _reserved */
287 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
288 u8 _resvd1; /* reserved */
289 u8 d_id[3]; /* FC vNIC only: Target D_ID */
290 u32 r_a_tov; /* FC vNIC only: R_A_TOV in msec */
291 u32 e_d_tov; /* FC vNIC only: E_D_TOV in msec */
292};
293
294/*
295 * Task Management request
296 */
297enum fcpio_itmf_tm_req_type {
298 FCPIO_ITMF_ABT_TASK_TERM = 0x01, /* abort task and terminate */
299 FCPIO_ITMF_ABT_TASK, /* abort task and issue abts */
300 FCPIO_ITMF_ABT_TASK_SET, /* abort task set */
301 FCPIO_ITMF_CLR_TASK_SET, /* clear task set */
302 FCPIO_ITMF_LUN_RESET, /* logical unit reset task mgmt */
303 FCPIO_ITMF_CLR_ACA, /* Clear ACA condition */
304};
305
306/*
307 * fcpio_tdata: host -> firmware request
308 *
309 * used for requesting the firmware to send out a read data transfer for a
310 * target command
311 */
312struct fcpio_tdata {
313 u16 rx_id; /* FC rx_id of target command */
314 u16 flags; /* command flags */
315 u32 rel_offset; /* data sequence relative offset */
316 u32 sgl_cnt; /* scatter-gather list count */
317 u32 data_len; /* length of data expected to send */
318 u64 sgl_addr; /* scatter-gather list address */
319};
320
321/*
322 * Command flags
323 */
324#define FCPIO_TDATA_SCSI_RSP 0x01 /* send a scsi resp. after last frame */
325
326/*
327 * fcpio_txrdy: host -> firmware request
328 *
329 * used for requesting the firmware to send out a write data transfer for a
330 * target command
331 */
332struct fcpio_txrdy {
333 u16 rx_id; /* FC rx_id of target command */
334 u16 _resvd0; /* reserved */
335 u32 rel_offset; /* data sequence relative offset */
336 u32 sgl_cnt; /* scatter-gather list count */
337 u32 data_len; /* length of data expected to send */
338 u64 sgl_addr; /* scatter-gather list address */
339};
340
341/*
342 * fcpio_trsp: host -> firmware request
343 *
344 * used for requesting the firmware to send out a response for a target
345 * command
346 */
347struct fcpio_trsp {
348 u16 rx_id; /* FC rx_id of target command */
349 u16 _resvd0; /* reserved */
350 u32 sense_len; /* sense data buffer length */
351 u64 sense_addr; /* sense data buffer address */
352 u16 _resvd1; /* reserved */
353 u8 flags; /* response request flags */
354 u8 scsi_status; /* SCSI status */
355 u32 residual; /* SCSI data residual value of I/O */
356};
357
358/*
359 * resposnse request flags
360 */
361#define FCPIO_TRSP_RESID_UNDER 0x08 /* residual is valid and is underflow */
362#define FCPIO_TRSP_RESID_OVER 0x04 /* residual is valid and is overflow */
363
364/*
365 * fcpio_ttmf_ack: host -> firmware response
366 *
367 * used by the host to indicate to the firmware it has received and processed
368 * the target tmf request
369 */
370struct fcpio_ttmf_ack {
371 u16 rx_id; /* FC rx_id of target command */
372 u16 _resvd0; /* reserved */
373 u32 tmf_status; /* SCSI task management status */
374};
375
376/*
377 * fcpio_tabort: host -> firmware request
378 *
379 * used by the host to request the firmware to abort a target request that was
380 * received by the firmware
381 */
382struct fcpio_tabort {
383 u16 rx_id; /* rx_id of the target request */
384};
385
386/*
387 * fcpio_reset: host -> firmware request
388 *
389 * used by the host to signal a reset of the driver to the firmware
390 * and to request firmware to clean up all outstanding I/O
391 */
392struct fcpio_reset {
393 u32 _resvd;
394};
395
396enum fcpio_flogi_reg_format_type {
397 FCPIO_FLOGI_REG_DEF_DEST = 0, /* Use the oui | s_id mac format */
398 FCPIO_FLOGI_REG_GW_DEST, /* Use the fixed gateway mac */
399};
400
401/*
402 * fcpio_flogi_reg: host -> firmware request
403 *
404 * fc vnic only
405 * used by the host to notify the firmware of the lif's s_id
406 * and destination mac address format
407 */
408struct fcpio_flogi_reg {
409 u8 format;
410 u8 s_id[3]; /* FC vNIC only: Source S_ID */
411 u8 gateway_mac[ETH_ALEN]; /* Destination gateway mac */
412 u16 _resvd;
413 u32 r_a_tov; /* R_A_TOV in msec */
414 u32 e_d_tov; /* E_D_TOV in msec */
415};
416
417/*
418 * fcpio_echo: host -> firmware request
419 *
420 * sends a heartbeat echo request to the firmware
421 */
422struct fcpio_echo {
423 u32 _resvd;
424};
425
426/*
427 * fcpio_lunmap_req: host -> firmware request
428 *
429 * scsi vnic only
430 * sends a request to retrieve the lunmap table for scsi vnics
431 */
432struct fcpio_lunmap_req {
433 u64 addr; /* address of the buffer */
434 u32 len; /* len of the buffer */
435};
436
437/*
438 * fcpio_flogi_fip_reg: host -> firmware request
439 *
440 * fc vnic only
441 * used by the host to notify the firmware of the lif's s_id
442 * and destination mac address format
443 */
444struct fcpio_flogi_fip_reg {
445 u8 _resvd0;
446 u8 s_id[3]; /* FC vNIC only: Source S_ID */
447 u8 fcf_mac[ETH_ALEN]; /* FCF Target destination mac */
448 u16 _resvd1;
449 u32 r_a_tov; /* R_A_TOV in msec */
450 u32 e_d_tov; /* E_D_TOV in msec */
451 u8 ha_mac[ETH_ALEN]; /* Host adapter source mac */
452 u16 _resvd2;
453};
454
455/*
456 * Basic structure for all fcpio structures that are sent from the host to the
457 * firmware. They are 128 bytes per structure.
458 */
459#define FCPIO_HOST_REQ_LEN 128 /* expected length of host requests */
460
461struct fcpio_host_req {
462 struct fcpio_header hdr;
463
464 union {
465 /*
466 * Defines space needed for request
467 */
468 u8 buf[FCPIO_HOST_REQ_LEN - sizeof(struct fcpio_header)];
469
470 /*
471 * Initiator host requests
472 */
473 struct fcpio_icmnd_16 icmnd_16;
474 struct fcpio_icmnd_32 icmnd_32;
475 struct fcpio_itmf itmf;
476
477 /*
478 * Target host requests
479 */
480 struct fcpio_tdata tdata;
481 struct fcpio_txrdy txrdy;
482 struct fcpio_trsp trsp;
483 struct fcpio_ttmf_ack ttmf_ack;
484 struct fcpio_tabort tabort;
485
486 /*
487 * Misc requests
488 */
489 struct fcpio_reset reset;
490 struct fcpio_flogi_reg flogi_reg;
491 struct fcpio_echo echo;
492 struct fcpio_lunmap_req lunmap_req;
493 struct fcpio_flogi_fip_reg flogi_fip_reg;
494 } u;
495};
496
497/*
498 * fcpio_icmnd_cmpl: firmware -> host response
499 *
500 * used for sending the host a response to an initiator command
501 */
502struct fcpio_icmnd_cmpl {
503 u8 _resvd0[6]; /* reserved */
504 u8 flags; /* response flags */
505 u8 scsi_status; /* SCSI status */
506 u32 residual; /* SCSI data residual length */
507 u32 sense_len; /* SCSI sense length */
508};
509
510/*
511 * response flags
512 */
513#define FCPIO_ICMND_CMPL_RESID_UNDER 0x08 /* resid under and valid */
514#define FCPIO_ICMND_CMPL_RESID_OVER 0x04 /* resid over and valid */
515
516/*
517 * fcpio_itmf_cmpl: firmware -> host response
518 *
519 * used for sending the host a response for a itmf request
520 */
521struct fcpio_itmf_cmpl {
522 u32 _resvd; /* reserved */
523};
524
525/*
526 * fcpio_tcmnd_16: firmware -> host request
527 *
528 * used by the firmware to notify the host of an incoming target SCSI 16-Byte
529 * request
530 */
531struct fcpio_tcmnd_16 {
532 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
533 u8 crn; /* SCSI Command Reference No. */
534 u8 pri_ta; /* SCSI Priority and Task attribute */
535 u8 _resvd2; /* reserved: should be 0 */
536 u8 flags; /* command flags */
537 u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
538 u32 data_len; /* length of data expected */
539 u8 _resvd1; /* reserved */
540 u8 s_id[3]; /* FC vNIC only: Source S_ID */
541};
542
543/*
544 * Priority/Task Attribute settings
545 */
546#define FCPIO_TCMND_PTA_SIMPLE 0 /* simple task attribute */
547#define FCPIO_TCMND_PTA_HEADQ 1 /* head of queue task attribute */
548#define FCPIO_TCMND_PTA_ORDERED 2 /* ordered task attribute */
549#define FCPIO_TCMND_PTA_ACA 4 /* auto contingent allegiance */
550#define FCPIO_TCMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
551
552/*
553 * Command flags
554 */
555#define FCPIO_TCMND_RDDATA 0x02 /* read data */
556#define FCPIO_TCMND_WRDATA 0x01 /* write data */
557
558/*
559 * fcpio_tcmnd_32: firmware -> host request
560 *
561 * used by the firmware to notify the host of an incoming target SCSI 32-Byte
562 * request
563 */
564struct fcpio_tcmnd_32 {
565 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
566 u8 crn; /* SCSI Command Reference No. */
567 u8 pri_ta; /* SCSI Priority and Task attribute */
568 u8 _resvd2; /* reserved: should be 0 */
569 u8 flags; /* command flags */
570 u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
571 u32 data_len; /* length of data expected */
572 u8 _resvd0; /* reserved */
573 u8 s_id[3]; /* FC vNIC only: Source S_ID */
574};
575
576/*
577 * fcpio_tdrsp_cmpl: firmware -> host response
578 *
579 * used by the firmware to notify the host of a response to a host target
580 * command
581 */
582struct fcpio_tdrsp_cmpl {
583 u16 rx_id; /* rx_id of the target request */
584 u16 _resvd0; /* reserved */
585};
586
587/*
588 * fcpio_ttmf: firmware -> host request
589 *
590 * used by the firmware to notify the host of an incoming task management
591 * function request
592 */
593struct fcpio_ttmf {
594 u8 _resvd0; /* reserved */
595 u8 s_id[3]; /* FC vNIC only: Source S_ID */
596 u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
597 u8 crn; /* SCSI Command Reference No. */
598 u8 _resvd2[3]; /* reserved */
599 u32 tmf_type; /* task management request type */
600};
601
602/*
603 * Task Management request
604 */
605#define FCPIO_TTMF_CLR_ACA 0x40 /* Clear ACA condition */
606#define FCPIO_TTMF_LUN_RESET 0x10 /* logical unit reset task mgmt */
607#define FCPIO_TTMF_CLR_TASK_SET 0x04 /* clear task set */
608#define FCPIO_TTMF_ABT_TASK_SET 0x02 /* abort task set */
609#define FCPIO_TTMF_ABT_TASK 0x01 /* abort task */
610
611/*
612 * fcpio_tabort_cmpl: firmware -> host response
613 *
614 * used by the firmware to respond to a host's tabort request
615 */
616struct fcpio_tabort_cmpl {
617 u16 rx_id; /* rx_id of the target request */
618 u16 _resvd0; /* reserved */
619};
620
621/*
622 * fcpio_ack: firmware -> host response
623 *
624 * used by firmware to notify the host of the last work request received
625 */
626struct fcpio_ack {
627 u16 request_out; /* last host entry received */
628 u16 _resvd;
629};
630
631/*
632 * fcpio_reset_cmpl: firmware -> host response
633 *
634 * use by firmware to respond to the host's reset request
635 */
636struct fcpio_reset_cmpl {
637 u16 vnic_id;
638};
639
640/*
641 * fcpio_flogi_reg_cmpl: firmware -> host response
642 *
643 * fc vnic only
644 * response to the fcpio_flogi_reg request
645 */
646struct fcpio_flogi_reg_cmpl {
647 u32 _resvd;
648};
649
650/*
651 * fcpio_echo_cmpl: firmware -> host response
652 *
653 * response to the fcpio_echo request
654 */
655struct fcpio_echo_cmpl {
656 u32 _resvd;
657};
658
659/*
660 * fcpio_lunmap_chng: firmware -> host notification
661 *
662 * scsi vnic only
663 * notifies the host that the lunmap tables have changed
664 */
665struct fcpio_lunmap_chng {
666 u32 _resvd;
667};
668
669/*
670 * fcpio_lunmap_req_cmpl: firmware -> host response
671 *
672 * scsi vnic only
673 * response for lunmap table request from the host
674 */
675struct fcpio_lunmap_req_cmpl {
676 u32 _resvd;
677};
678
679/*
680 * Basic structure for all fcpio structures that are sent from the firmware to
681 * the host. They are 64 bytes per structure.
682 */
683#define FCPIO_FW_REQ_LEN 64 /* expected length of fw requests */
684struct fcpio_fw_req {
685 struct fcpio_header hdr;
686
687 union {
688 /*
689 * Defines space needed for request
690 */
691 u8 buf[FCPIO_FW_REQ_LEN - sizeof(struct fcpio_header)];
692
693 /*
694 * Initiator firmware responses
695 */
696 struct fcpio_icmnd_cmpl icmnd_cmpl;
697 struct fcpio_itmf_cmpl itmf_cmpl;
698
699 /*
700 * Target firmware new requests
701 */
702 struct fcpio_tcmnd_16 tcmnd_16;
703 struct fcpio_tcmnd_32 tcmnd_32;
704
705 /*
706 * Target firmware responses
707 */
708 struct fcpio_tdrsp_cmpl tdrsp_cmpl;
709 struct fcpio_ttmf ttmf;
710 struct fcpio_tabort_cmpl tabort_cmpl;
711
712 /*
713 * Firmware response to work received
714 */
715 struct fcpio_ack ack;
716
717 /*
718 * Misc requests
719 */
720 struct fcpio_reset_cmpl reset_cmpl;
721 struct fcpio_flogi_reg_cmpl flogi_reg_cmpl;
722 struct fcpio_echo_cmpl echo_cmpl;
723 struct fcpio_lunmap_chng lunmap_chng;
724 struct fcpio_lunmap_req_cmpl lunmap_req_cmpl;
725 } u;
726};
727
728/*
729 * Access routines to encode and decode the color bit, which is the most
730 * significant bit of the MSB of the structure
731 */
732static inline void fcpio_color_enc(struct fcpio_fw_req *fw_req, u8 color)
733{
734 u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
735
736 if (color)
737 *c |= 0x80;
738 else
739 *c &= ~0x80;
740}
741
742static inline void fcpio_color_dec(struct fcpio_fw_req *fw_req, u8 *color)
743{
744 u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
745
746 *color = *c >> 7;
747
748 /*
749 * Make sure color bit is read from desc *before* other fields
750 * are read from desc. Hardware guarantees color bit is last
751 * bit (byte) written. Adding the rmb() prevents the compiler
752 * and/or CPU from reordering the reads which would potentially
753 * result in reading stale values.
754 */
755
756 rmb();
757
758}
759
760/*
761 * Lunmap table entry for scsi vnics
762 */
763#define FCPIO_LUNMAP_TABLE_SIZE 256
764#define FCPIO_FLAGS_LUNMAP_VALID 0x80
765#define FCPIO_FLAGS_BOOT 0x01
766struct fcpio_lunmap_entry {
767 u8 bus;
768 u8 target;
769 u8 lun;
770 u8 path_cnt;
771 u16 flags;
772 u16 update_cnt;
773};
774
775struct fcpio_lunmap_tbl {
776 u32 update_cnt;
777 struct fcpio_lunmap_entry lunmaps[FCPIO_LUNMAP_TABLE_SIZE];
778};
779
780#endif /* _FCPIO_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
new file mode 100644
index 00000000000..e4c0a3d7d87
--- /dev/null
+++ b/drivers/scsi/fnic/fnic.h
@@ -0,0 +1,265 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FNIC_H_
19#define _FNIC_H_
20
21#include <linux/interrupt.h>
22#include <linux/netdevice.h>
23#include <linux/workqueue.h>
24#include <scsi/libfc.h>
25#include "fnic_io.h"
26#include "fnic_res.h"
27#include "vnic_dev.h"
28#include "vnic_wq.h"
29#include "vnic_rq.h"
30#include "vnic_cq.h"
31#include "vnic_wq_copy.h"
32#include "vnic_intr.h"
33#include "vnic_stats.h"
34#include "vnic_scsi.h"
35
36#define DRV_NAME "fnic"
37#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
38#define DRV_VERSION "1.0.0.1121"
39#define PFX DRV_NAME ": "
40#define DFX DRV_NAME "%d: "
41
42#define DESC_CLEAN_LOW_WATERMARK 8
43#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */
44#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
45#define FNIC_DFLT_QUEUE_DEPTH 32
46#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
47
48/*
49 * Tag bits used for special requests.
50 */
51#define BIT(nr) (1UL << (nr))
52#define FNIC_TAG_ABORT BIT(30) /* tag bit indicating abort */
53#define FNIC_TAG_DEV_RST BIT(29) /* indicates device reset */
54#define FNIC_TAG_MASK (BIT(24) - 1) /* mask for lookup */
55#define FNIC_NO_TAG -1
56
57/*
58 * Usage of the scsi_cmnd scratchpad.
59 * These fields are locked by the hashed io_req_lock.
60 */
61#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
62#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase)
63#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
64#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
65#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
66
67#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
68
69#define FNIC_LUN_RESET_TIMEOUT 10000 /* mSec */
70#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
71#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
72#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
73
74#define FNIC_MAX_FCP_TARGET 256
75
76extern unsigned int fnic_log_level;
77
78#define FNIC_MAIN_LOGGING 0x01
79#define FNIC_FCS_LOGGING 0x02
80#define FNIC_SCSI_LOGGING 0x04
81#define FNIC_ISR_LOGGING 0x08
82
83#define FNIC_CHECK_LOGGING(LEVEL, CMD) \
84do { \
85 if (unlikely(fnic_log_level & LEVEL)) \
86 do { \
87 CMD; \
88 } while (0); \
89} while (0)
90
91#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \
92 FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \
93 shost_printk(kern_level, host, fmt, ##args);)
94
95#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \
96 FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
97 shost_printk(kern_level, host, fmt, ##args);)
98
99#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \
100 FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
101 shost_printk(kern_level, host, fmt, ##args);)
102
103#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \
104 FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
105 shost_printk(kern_level, host, fmt, ##args);)
106
107extern const char *fnic_state_str[];
108
109enum fnic_intx_intr_index {
110 FNIC_INTX_WQ_RQ_COPYWQ,
111 FNIC_INTX_ERR,
112 FNIC_INTX_NOTIFY,
113 FNIC_INTX_INTR_MAX,
114};
115
116enum fnic_msix_intr_index {
117 FNIC_MSIX_RQ,
118 FNIC_MSIX_WQ,
119 FNIC_MSIX_WQ_COPY,
120 FNIC_MSIX_ERR_NOTIFY,
121 FNIC_MSIX_INTR_MAX,
122};
123
124struct fnic_msix_entry {
125 int requested;
126 char devname[IFNAMSIZ];
127 irqreturn_t (*isr)(int, void *);
128 void *devid;
129};
130
131enum fnic_state {
132 FNIC_IN_FC_MODE = 0,
133 FNIC_IN_FC_TRANS_ETH_MODE,
134 FNIC_IN_ETH_MODE,
135 FNIC_IN_ETH_TRANS_FC_MODE,
136};
137
138#define FNIC_WQ_COPY_MAX 1
139#define FNIC_WQ_MAX 1
140#define FNIC_RQ_MAX 1
141#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
142
143struct mempool;
144
145/* Per-instance private data structure */
146struct fnic {
147 struct fc_lport *lport;
148 struct vnic_dev_bar bar0;
149
150 struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX];
151 struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
152
153 struct vnic_stats *stats;
154 unsigned long stats_time; /* time of stats update */
155 struct vnic_nic_cfg *nic_cfg;
156 char name[IFNAMSIZ];
157 struct timer_list notify_timer; /* used for MSI interrupts */
158
159 unsigned int err_intr_offset;
160 unsigned int link_intr_offset;
161
162 unsigned int wq_count;
163 unsigned int cq_count;
164
165 u32 fcoui_mode:1; /* use fcoui address*/
166 u32 vlan_hw_insert:1; /* let hw insert the tag */
167 u32 in_remove:1; /* fnic device in removal */
168 u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
169
170 struct completion *remove_wait; /* device remove thread blocks */
171
172 struct fc_frame *flogi;
173 struct fc_frame *flogi_resp;
174 u16 flogi_oxid;
175 unsigned long s_id;
176 enum fnic_state state;
177 spinlock_t fnic_lock;
178
179 u16 vlan_id; /* VLAN tag including priority */
180 u8 mac_addr[ETH_ALEN];
181 u8 dest_addr[ETH_ALEN];
182 u8 data_src_addr[ETH_ALEN];
183 u64 fcp_input_bytes; /* internal statistic */
184 u64 fcp_output_bytes; /* internal statistic */
185 u32 link_down_cnt;
186 int link_status;
187
188 struct list_head list;
189 struct pci_dev *pdev;
190 struct vnic_fc_config config;
191 struct vnic_dev *vdev;
192 unsigned int raw_wq_count;
193 unsigned int wq_copy_count;
194 unsigned int rq_count;
195 int fw_ack_index[FNIC_WQ_COPY_MAX];
196 unsigned short fw_ack_recd[FNIC_WQ_COPY_MAX];
197 unsigned short wq_copy_desc_low[FNIC_WQ_COPY_MAX];
198 unsigned int intr_count;
199 u32 __iomem *legacy_pba;
200 struct fnic_host_tag *tags;
201 mempool_t *io_req_pool;
202 mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES];
203 spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */
204
205 struct work_struct link_work;
206 struct work_struct frame_work;
207 struct sk_buff_head frame_queue;
208
209 /* copy work queue cache line section */
210 ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
211 /* completion queue cache line section */
212 ____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX];
213
214 spinlock_t wq_copy_lock[FNIC_WQ_COPY_MAX];
215
216 /* work queue cache line section */
217 ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX];
218 spinlock_t wq_lock[FNIC_WQ_MAX];
219
220 /* receive queue cache line section */
221 ____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX];
222
223 /* interrupt resource cache line section */
224 ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
225};
226
227extern struct workqueue_struct *fnic_event_queue;
228extern struct device_attribute *fnic_attrs[];
229
230void fnic_clear_intr_mode(struct fnic *fnic);
231int fnic_set_intr_mode(struct fnic *fnic);
232void fnic_free_intr(struct fnic *fnic);
233int fnic_request_intr(struct fnic *fnic);
234
235int fnic_send(struct fc_lport *, struct fc_frame *);
236void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
237void fnic_handle_frame(struct work_struct *work);
238void fnic_handle_link(struct work_struct *work);
239int fnic_rq_cmpl_handler(struct fnic *fnic, int);
240int fnic_alloc_rq_frame(struct vnic_rq *rq);
241void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
242int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp);
243
244int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
245int fnic_abort_cmd(struct scsi_cmnd *);
246int fnic_device_reset(struct scsi_cmnd *);
247int fnic_host_reset(struct scsi_cmnd *);
248int fnic_reset(struct Scsi_Host *);
249void fnic_scsi_cleanup(struct fc_lport *);
250void fnic_scsi_abort_io(struct fc_lport *);
251void fnic_empty_scsi_cleanup(struct fc_lport *);
252void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
253int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
254int fnic_wq_cmpl_handler(struct fnic *fnic, int);
255int fnic_flogi_reg_handler(struct fnic *fnic);
256void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
257 struct fcpio_host_req *desc);
258int fnic_fw_reset_handler(struct fnic *fnic);
259void fnic_terminate_rport_io(struct fc_rport *);
260const char *fnic_state_to_str(unsigned int state);
261
262void fnic_log_q_error(struct fnic *fnic);
263void fnic_handle_link_event(struct fnic *fnic);
264
265#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
new file mode 100644
index 00000000000..aea0c3becfd
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/string.h>
19#include <linux/device.h>
20#include <scsi/scsi_host.h>
21#include "fnic.h"
22
23static ssize_t fnic_show_state(struct device *dev,
24 struct device_attribute *attr, char *buf)
25{
26 struct fc_lport *lp = shost_priv(class_to_shost(dev));
27 struct fnic *fnic = lport_priv(lp);
28
29 return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]);
30}
31
32static ssize_t fnic_show_drv_version(struct device *dev,
33 struct device_attribute *attr, char *buf)
34{
35 return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
36}
37
38static ssize_t fnic_show_link_state(struct device *dev,
39 struct device_attribute *attr, char *buf)
40{
41 struct fc_lport *lp = shost_priv(class_to_shost(dev));
42
43 return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up)
44 ? "Link Up" : "Link Down");
45}
46
47static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
48static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL);
49static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL);
50
51struct device_attribute *fnic_attrs[] = {
52 &dev_attr_fnic_state,
53 &dev_attr_drv_version,
54 &dev_attr_link_state,
55 NULL,
56};
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
new file mode 100644
index 00000000000..07e6eedb83c
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -0,0 +1,742 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/errno.h>
19#include <linux/pci.h>
20#include <linux/skbuff.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/if_ether.h>
24#include <linux/if_vlan.h>
25#include <linux/workqueue.h>
26#include <scsi/fc/fc_els.h>
27#include <scsi/fc/fc_fcoe.h>
28#include <scsi/fc_frame.h>
29#include <scsi/libfc.h>
30#include "fnic_io.h"
31#include "fnic.h"
32#include "cq_enet_desc.h"
33#include "cq_exch_desc.h"
34
35struct workqueue_struct *fnic_event_queue;
36
37void fnic_handle_link(struct work_struct *work)
38{
39 struct fnic *fnic = container_of(work, struct fnic, link_work);
40 unsigned long flags;
41 int old_link_status;
42 u32 old_link_down_cnt;
43
44 spin_lock_irqsave(&fnic->fnic_lock, flags);
45
46 if (fnic->stop_rx_link_events) {
47 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
48 return;
49 }
50
51 old_link_down_cnt = fnic->link_down_cnt;
52 old_link_status = fnic->link_status;
53 fnic->link_status = vnic_dev_link_status(fnic->vdev);
54 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
55
56 if (old_link_status == fnic->link_status) {
57 if (!fnic->link_status)
58 /* DOWN -> DOWN */
59 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
60 else {
61 if (old_link_down_cnt != fnic->link_down_cnt) {
62 /* UP -> DOWN -> UP */
63 fnic->lport->host_stats.link_failure_count++;
64 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
65 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
66 "link down\n");
67 fc_linkdown(fnic->lport);
68 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
69 "link up\n");
70 fc_linkup(fnic->lport);
71 } else
72 /* UP -> UP */
73 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
74 }
75 } else if (fnic->link_status) {
76 /* DOWN -> UP */
77 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
78 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
79 fc_linkup(fnic->lport);
80 } else {
81 /* UP -> DOWN */
82 fnic->lport->host_stats.link_failure_count++;
83 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
84 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
85 fc_linkdown(fnic->lport);
86 }
87
88}
89
90/*
91 * This function passes incoming fabric frames to libFC
92 */
93void fnic_handle_frame(struct work_struct *work)
94{
95 struct fnic *fnic = container_of(work, struct fnic, frame_work);
96 struct fc_lport *lp = fnic->lport;
97 unsigned long flags;
98 struct sk_buff *skb;
99 struct fc_frame *fp;
100
101 while ((skb = skb_dequeue(&fnic->frame_queue))) {
102
103 spin_lock_irqsave(&fnic->fnic_lock, flags);
104 if (fnic->stop_rx_link_events) {
105 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
106 dev_kfree_skb(skb);
107 return;
108 }
109 fp = (struct fc_frame *)skb;
110 /* if Flogi resp frame, register the address */
111 if (fr_flags(fp)) {
112 vnic_dev_add_addr(fnic->vdev,
113 fnic->data_src_addr);
114 fr_flags(fp) = 0;
115 }
116 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
117
118 fc_exch_recv(lp, lp->emp, fp);
119 }
120
121}
122
123static inline void fnic_import_rq_fc_frame(struct sk_buff *skb,
124 u32 len, u8 sof, u8 eof)
125{
126 struct fc_frame *fp = (struct fc_frame *)skb;
127
128 skb_trim(skb, len);
129 fr_eof(fp) = eof;
130 fr_sof(fp) = sof;
131}
132
133
134static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len)
135{
136 struct fc_frame *fp;
137 struct ethhdr *eh;
138 struct vlan_ethhdr *vh;
139 struct fcoe_hdr *fcoe_hdr;
140 struct fcoe_crc_eof *ft;
141 u32 transport_len = 0;
142
143 eh = (struct ethhdr *)skb->data;
144 vh = (struct vlan_ethhdr *)skb->data;
145 if (vh->h_vlan_proto == htons(ETH_P_8021Q) &&
146 vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) {
147 skb_pull(skb, sizeof(struct vlan_ethhdr));
148 transport_len += sizeof(struct vlan_ethhdr);
149 } else if (eh->h_proto == htons(ETH_P_FCOE)) {
150 transport_len += sizeof(struct ethhdr);
151 skb_pull(skb, sizeof(struct ethhdr));
152 } else
153 return -1;
154
155 fcoe_hdr = (struct fcoe_hdr *)skb->data;
156 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
157 return -1;
158
159 fp = (struct fc_frame *)skb;
160 fc_frame_init(fp);
161 fr_sof(fp) = fcoe_hdr->fcoe_sof;
162 skb_pull(skb, sizeof(struct fcoe_hdr));
163 transport_len += sizeof(struct fcoe_hdr);
164
165 ft = (struct fcoe_crc_eof *)(skb->data + len -
166 transport_len - sizeof(*ft));
167 fr_eof(fp) = ft->fcoe_eof;
168 skb_trim(skb, len - transport_len - sizeof(*ft));
169 return 0;
170}
171
172static inline int fnic_handle_flogi_resp(struct fnic *fnic,
173 struct fc_frame *fp)
174{
175 u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
176 struct ethhdr *eth_hdr;
177 struct fc_frame_header *fh;
178 int ret = 0;
179 unsigned long flags;
180 struct fc_frame *old_flogi_resp = NULL;
181
182 fh = (struct fc_frame_header *)fr_hdr(fp);
183
184 spin_lock_irqsave(&fnic->fnic_lock, flags);
185
186 if (fnic->state == FNIC_IN_ETH_MODE) {
187
188 /*
189 * Check if oxid matches on taking the lock. A new Flogi
190 * issued by libFC might have changed the fnic cached oxid
191 */
192 if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) {
193 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
194 "Flogi response oxid not"
195 " matching cached oxid, dropping frame"
196 "\n");
197 ret = -1;
198 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
199 dev_kfree_skb_irq(fp_skb(fp));
200 goto handle_flogi_resp_end;
201 }
202
203 /* Drop older cached flogi response frame, cache this frame */
204 old_flogi_resp = fnic->flogi_resp;
205 fnic->flogi_resp = fp;
206 fnic->flogi_oxid = FC_XID_UNKNOWN;
207
208 /*
209 * this frame is part of flogi get the src mac addr from this
210 * frame if the src mac is fcoui based then we mark the
211 * address mode flag to use fcoui base for dst mac addr
212 * otherwise we have to store the fcoe gateway addr
213 */
214 eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp));
215 memcpy(mac, eth_hdr->h_source, ETH_ALEN);
216
217 if (ntoh24(mac) == FC_FCOE_OUI)
218 fnic->fcoui_mode = 1;
219 else {
220 fnic->fcoui_mode = 0;
221 memcpy(fnic->dest_addr, mac, ETH_ALEN);
222 }
223
224 /*
225 * Except for Flogi frame, all outbound frames from us have the
226 * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses
227 * the vnic MAC address as the Eth Src address
228 */
229 fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);
230
231 /* We get our s_id from the d_id of the flogi resp frame */
232 fnic->s_id = ntoh24(fh->fh_d_id);
233
234 /* Change state to reflect transition from Eth to FC mode */
235 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
236
237 } else {
238 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
239 "Unexpected fnic state %s while"
240 " processing flogi resp\n",
241 fnic_state_to_str(fnic->state));
242 ret = -1;
243 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
244 dev_kfree_skb_irq(fp_skb(fp));
245 goto handle_flogi_resp_end;
246 }
247
248 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
249
250 /* Drop older cached frame */
251 if (old_flogi_resp)
252 dev_kfree_skb_irq(fp_skb(old_flogi_resp));
253
254 /*
255 * send flogi reg request to firmware, this will put the fnic in
256 * in FC mode
257 */
258 ret = fnic_flogi_reg_handler(fnic);
259
260 if (ret < 0) {
261 int free_fp = 1;
262 spin_lock_irqsave(&fnic->fnic_lock, flags);
263 /*
264 * free the frame is some other thread is not
265 * pointing to it
266 */
267 if (fnic->flogi_resp != fp)
268 free_fp = 0;
269 else
270 fnic->flogi_resp = NULL;
271
272 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
273 fnic->state = FNIC_IN_ETH_MODE;
274 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
275 if (free_fp)
276 dev_kfree_skb_irq(fp_skb(fp));
277 }
278
279 handle_flogi_resp_end:
280 return ret;
281}
282
283/* Returns 1 for a response that matches cached flogi oxid */
284static inline int is_matching_flogi_resp_frame(struct fnic *fnic,
285 struct fc_frame *fp)
286{
287 struct fc_frame_header *fh;
288 int ret = 0;
289 u32 f_ctl;
290
291 fh = fc_frame_header_get(fp);
292 f_ctl = ntoh24(fh->fh_f_ctl);
293
294 if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) &&
295 fh->fh_r_ctl == FC_RCTL_ELS_REP &&
296 (f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX &&
297 fh->fh_type == FC_TYPE_ELS)
298 ret = 1;
299
300 return ret;
301}
302
303static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
304 *cq_desc, struct vnic_rq_buf *buf,
305 int skipped __attribute__((unused)),
306 void *opaque)
307{
308 struct fnic *fnic = vnic_dev_priv(rq->vdev);
309 struct sk_buff *skb;
310 struct fc_frame *fp;
311 unsigned int eth_hdrs_stripped;
312 u8 type, color, eop, sop, ingress_port, vlan_stripped;
313 u8 fcoe = 0, fcoe_sof, fcoe_eof;
314 u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
315 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
316 u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
317 u8 fcs_ok = 1, packet_error = 0;
318 u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
319 u32 rss_hash;
320 u16 exchange_id, tmpl;
321 u8 sof = 0;
322 u8 eof = 0;
323 u32 fcp_bytes_written = 0;
324 unsigned long flags;
325
326 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
327 PCI_DMA_FROMDEVICE);
328 skb = buf->os_buf;
329 buf->os_buf = NULL;
330
331 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
332 if (type == CQ_DESC_TYPE_RQ_FCP) {
333 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
334 &type, &color, &q_number, &completed_index,
335 &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
336 &tmpl, &fcp_bytes_written, &sof, &eof,
337 &ingress_port, &packet_error,
338 &fcoe_enc_error, &fcs_ok, &vlan_stripped,
339 &vlan);
340 eth_hdrs_stripped = 1;
341
342 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
343 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
344 &type, &color, &q_number, &completed_index,
345 &ingress_port, &fcoe, &eop, &sop,
346 &rss_type, &csum_not_calc, &rss_hash,
347 &bytes_written, &packet_error,
348 &vlan_stripped, &vlan, &checksum,
349 &fcoe_sof, &fcoe_fc_crc_ok,
350 &fcoe_enc_error, &fcoe_eof,
351 &tcp_udp_csum_ok, &udp, &tcp,
352 &ipv4_csum_ok, &ipv6, &ipv4,
353 &ipv4_fragment, &fcs_ok);
354 eth_hdrs_stripped = 0;
355
356 } else {
357 /* wrong CQ type*/
358 shost_printk(KERN_ERR, fnic->lport->host,
359 "fnic rq_cmpl wrong cq type x%x\n", type);
360 goto drop;
361 }
362
363 if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
364 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
365 "fnic rq_cmpl fcoe x%x fcsok x%x"
366 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
367 " x%x\n",
368 fcoe, fcs_ok, packet_error,
369 fcoe_fc_crc_ok, fcoe_enc_error);
370 goto drop;
371 }
372
373 if (eth_hdrs_stripped)
374 fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
375 else if (fnic_import_rq_eth_pkt(skb, bytes_written))
376 goto drop;
377
378 fp = (struct fc_frame *)skb;
379
380 /*
381 * If frame is an ELS response that matches the cached FLOGI OX_ID,
382 * and is accept, issue flogi_reg_request copy wq request to firmware
383 * to register the S_ID and determine whether FC_OUI mode or GW mode.
384 */
385 if (is_matching_flogi_resp_frame(fnic, fp)) {
386 if (!eth_hdrs_stripped) {
387 if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
388 fnic_handle_flogi_resp(fnic, fp);
389 return;
390 }
391 /*
392 * Recd. Flogi reject. No point registering
393 * with fw, but forward to libFC
394 */
395 goto forward;
396 }
397 goto drop;
398 }
399 if (!eth_hdrs_stripped)
400 goto drop;
401
402forward:
403 spin_lock_irqsave(&fnic->fnic_lock, flags);
404 if (fnic->stop_rx_link_events) {
405 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
406 goto drop;
407 }
408 /* Use fr_flags to indicate whether succ. flogi resp or not */
409 fr_flags(fp) = 0;
410 fr_dev(fp) = fnic->lport;
411 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
412
413 skb_queue_tail(&fnic->frame_queue, skb);
414 queue_work(fnic_event_queue, &fnic->frame_work);
415
416 return;
417drop:
418 dev_kfree_skb_irq(skb);
419}
420
421static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
422 struct cq_desc *cq_desc, u8 type,
423 u16 q_number, u16 completed_index,
424 void *opaque)
425{
426 struct fnic *fnic = vnic_dev_priv(vdev);
427
428 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
429 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
430 NULL);
431 return 0;
432}
433
434int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
435{
436 unsigned int tot_rq_work_done = 0, cur_work_done;
437 unsigned int i;
438 int err;
439
440 for (i = 0; i < fnic->rq_count; i++) {
441 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
442 fnic_rq_cmpl_handler_cont,
443 NULL);
444 if (cur_work_done) {
445 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
446 if (err)
447 shost_printk(KERN_ERR, fnic->lport->host,
448 "fnic_alloc_rq_frame cant alloc"
449 " frame\n");
450 }
451 tot_rq_work_done += cur_work_done;
452 }
453
454 return tot_rq_work_done;
455}
456
457/*
458 * This function is called once at init time to allocate and fill RQ
459 * buffers. Subsequently, it is called in the interrupt context after RQ
460 * buffer processing to replenish the buffers in the RQ
461 */
462int fnic_alloc_rq_frame(struct vnic_rq *rq)
463{
464 struct fnic *fnic = vnic_dev_priv(rq->vdev);
465 struct sk_buff *skb;
466 u16 len;
467 dma_addr_t pa;
468
469 len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
470 skb = dev_alloc_skb(len);
471 if (!skb) {
472 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
473 "Unable to allocate RQ sk_buff\n");
474 return -ENOMEM;
475 }
476 skb_reset_mac_header(skb);
477 skb_reset_transport_header(skb);
478 skb_reset_network_header(skb);
479 skb_put(skb, len);
480 pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
481 fnic_queue_rq_desc(rq, skb, pa, len);
482 return 0;
483}
484
485void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
486{
487 struct fc_frame *fp = buf->os_buf;
488 struct fnic *fnic = vnic_dev_priv(rq->vdev);
489
490 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
491 PCI_DMA_FROMDEVICE);
492
493 dev_kfree_skb(fp_skb(fp));
494 buf->os_buf = NULL;
495}
496
497static inline int is_flogi_frame(struct fc_frame_header *fh)
498{
499 return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI;
500}
501
502int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
503{
504 struct vnic_wq *wq = &fnic->wq[0];
505 struct sk_buff *skb;
506 dma_addr_t pa;
507 struct ethhdr *eth_hdr;
508 struct vlan_ethhdr *vlan_hdr;
509 struct fcoe_hdr *fcoe_hdr;
510 struct fc_frame_header *fh;
511 u32 tot_len, eth_hdr_len;
512 int ret = 0;
513 unsigned long flags;
514
515 fh = fc_frame_header_get(fp);
516 skb = fp_skb(fp);
517
518 if (!fnic->vlan_hw_insert) {
519 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
520 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
521 eth_hdr = (struct ethhdr *)vlan_hdr;
522 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
523 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
524 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
525 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
526 } else {
527 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
528 eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
529 eth_hdr->h_proto = htons(ETH_P_FCOE);
530 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
531 }
532
533 if (is_flogi_frame(fh)) {
534 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
535 memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN);
536 } else {
537 if (fnic->fcoui_mode)
538 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
539 else
540 memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
541 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
542 }
543
544 tot_len = skb->len;
545 BUG_ON(tot_len % 4);
546
547 memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
548 fcoe_hdr->fcoe_sof = fr_sof(fp);
549 if (FC_FCOE_VER)
550 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
551
552 pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
553
554 spin_lock_irqsave(&fnic->wq_lock[0], flags);
555
556 if (!vnic_wq_desc_avail(wq)) {
557 pci_unmap_single(fnic->pdev, pa,
558 tot_len, PCI_DMA_TODEVICE);
559 ret = -1;
560 goto fnic_send_frame_end;
561 }
562
563 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
564 fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
565fnic_send_frame_end:
566 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
567
568 if (ret)
569 dev_kfree_skb_any(fp_skb(fp));
570
571 return ret;
572}
573
574/*
575 * fnic_send
576 * Routine to send a raw frame
577 */
578int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
579{
580 struct fnic *fnic = lport_priv(lp);
581 struct fc_frame_header *fh;
582 int ret = 0;
583 enum fnic_state old_state;
584 unsigned long flags;
585 struct fc_frame *old_flogi = NULL;
586 struct fc_frame *old_flogi_resp = NULL;
587
588 if (fnic->in_remove) {
589 dev_kfree_skb(fp_skb(fp));
590 ret = -1;
591 goto fnic_send_end;
592 }
593
594 fh = fc_frame_header_get(fp);
595 /* if not an Flogi frame, send it out, this is the common case */
596 if (!is_flogi_frame(fh))
597 return fnic_send_frame(fnic, fp);
598
599 /* Flogi frame, now enter the state machine */
600
601 spin_lock_irqsave(&fnic->fnic_lock, flags);
602again:
603 /* Get any old cached frames, free them after dropping lock */
604 old_flogi = fnic->flogi;
605 fnic->flogi = NULL;
606 old_flogi_resp = fnic->flogi_resp;
607 fnic->flogi_resp = NULL;
608
609 fnic->flogi_oxid = FC_XID_UNKNOWN;
610
611 old_state = fnic->state;
612 switch (old_state) {
613 case FNIC_IN_FC_MODE:
614 case FNIC_IN_ETH_TRANS_FC_MODE:
615 default:
616 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
617 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
618 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
619
620 if (old_flogi) {
621 dev_kfree_skb(fp_skb(old_flogi));
622 old_flogi = NULL;
623 }
624 if (old_flogi_resp) {
625 dev_kfree_skb(fp_skb(old_flogi_resp));
626 old_flogi_resp = NULL;
627 }
628
629 ret = fnic_fw_reset_handler(fnic);
630
631 spin_lock_irqsave(&fnic->fnic_lock, flags);
632 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
633 goto again;
634 if (ret) {
635 fnic->state = old_state;
636 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
637 dev_kfree_skb(fp_skb(fp));
638 goto fnic_send_end;
639 }
640 old_flogi = fnic->flogi;
641 fnic->flogi = fp;
642 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
643 old_flogi_resp = fnic->flogi_resp;
644 fnic->flogi_resp = NULL;
645 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
646 break;
647
648 case FNIC_IN_FC_TRANS_ETH_MODE:
649 /*
650 * A reset is pending with the firmware. Store the flogi
651 * and its oxid. The transition out of this state happens
652 * only when Firmware completes the reset, either with
653 * success or failed. If success, transition to
654 * FNIC_IN_ETH_MODE, if fail, then transition to
655 * FNIC_IN_FC_MODE
656 */
657 fnic->flogi = fp;
658 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
659 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
660 break;
661
662 case FNIC_IN_ETH_MODE:
663 /*
664 * The fw/hw is already in eth mode. Store the oxid,
665 * and send the flogi frame out. The transition out of this
666 * state happens only we receive flogi response from the
667 * network, and the oxid matches the cached oxid when the
668 * flogi frame was sent out. If they match, then we issue
669 * a flogi_reg request and transition to state
670 * FNIC_IN_ETH_TRANS_FC_MODE
671 */
672 fnic->flogi_oxid = ntohs(fh->fh_ox_id);
673 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
674 ret = fnic_send_frame(fnic, fp);
675 break;
676 }
677
678fnic_send_end:
679 if (old_flogi)
680 dev_kfree_skb(fp_skb(old_flogi));
681 if (old_flogi_resp)
682 dev_kfree_skb(fp_skb(old_flogi_resp));
683 return ret;
684}
685
686static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
687 struct cq_desc *cq_desc,
688 struct vnic_wq_buf *buf, void *opaque)
689{
690 struct sk_buff *skb = buf->os_buf;
691 struct fc_frame *fp = (struct fc_frame *)skb;
692 struct fnic *fnic = vnic_dev_priv(wq->vdev);
693
694 pci_unmap_single(fnic->pdev, buf->dma_addr,
695 buf->len, PCI_DMA_TODEVICE);
696 dev_kfree_skb_irq(fp_skb(fp));
697 buf->os_buf = NULL;
698}
699
700static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
701 struct cq_desc *cq_desc, u8 type,
702 u16 q_number, u16 completed_index,
703 void *opaque)
704{
705 struct fnic *fnic = vnic_dev_priv(vdev);
706 unsigned long flags;
707
708 spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
709 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
710 fnic_wq_complete_frame_send, NULL);
711 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
712
713 return 0;
714}
715
716int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
717{
718 unsigned int wq_work_done = 0;
719 unsigned int i;
720
721 for (i = 0; i < fnic->raw_wq_count; i++) {
722 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
723 work_to_do,
724 fnic_wq_cmpl_handler_cont,
725 NULL);
726 }
727
728 return wq_work_done;
729}
730
731
732void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
733{
734 struct fc_frame *fp = buf->os_buf;
735 struct fnic *fnic = vnic_dev_priv(wq->vdev);
736
737 pci_unmap_single(fnic->pdev, buf->dma_addr,
738 buf->len, PCI_DMA_TODEVICE);
739
740 dev_kfree_skb(fp_skb(fp));
741 buf->os_buf = NULL;
742}
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
new file mode 100644
index 00000000000..f0b896988cd
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FNIC_IO_H_
19#define _FNIC_IO_H_
20
21#include <scsi/fc/fc_fcp.h>
22
23#define FNIC_DFLT_SG_DESC_CNT 32
24#define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */
25#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
26
27struct host_sg_desc {
28 __le64 addr;
29 __le32 len;
30 u32 _resvd;
31};
32
33struct fnic_dflt_sgl_list {
34 struct host_sg_desc sg_desc[FNIC_DFLT_SG_DESC_CNT];
35};
36
37struct fnic_sgl_list {
38 struct host_sg_desc sg_desc[FNIC_MAX_SG_DESC_CNT];
39};
40
41enum fnic_sgl_list_type {
42 FNIC_SGL_CACHE_DFLT = 0, /* cache with default size sgl */
43 FNIC_SGL_CACHE_MAX, /* cache with max size sgl */
44 FNIC_SGL_NUM_CACHES /* number of sgl caches */
45};
46
47enum fnic_ioreq_state {
48 FNIC_IOREQ_CMD_PENDING = 0,
49 FNIC_IOREQ_ABTS_PENDING,
50 FNIC_IOREQ_ABTS_COMPLETE,
51 FNIC_IOREQ_CMD_COMPLETE,
52};
53
54struct fnic_io_req {
55 struct host_sg_desc *sgl_list; /* sgl list */
56 void *sgl_list_alloc; /* sgl list address used for free */
57 dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
58 dma_addr_t sgl_list_pa; /* dma address for sgl list */
59 u16 sgl_cnt;
60 u8 sgl_type; /* device DMA descriptor list type */
61 u8 io_completed:1; /* set to 1 when fw completes IO */
62 u32 port_id; /* remote port DID */
63 struct completion *abts_done; /* completion for abts */
64 struct completion *dr_done; /* completion for device reset */
65};
66
67#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
new file mode 100644
index 00000000000..2b3064828ae
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -0,0 +1,332 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/string.h>
19#include <linux/errno.h>
20#include <linux/pci.h>
21#include <linux/interrupt.h>
22#include <scsi/libfc.h>
23#include <scsi/fc_frame.h>
24#include "vnic_dev.h"
25#include "vnic_intr.h"
26#include "vnic_stats.h"
27#include "fnic_io.h"
28#include "fnic.h"
29
30static irqreturn_t fnic_isr_legacy(int irq, void *data)
31{
32 struct fnic *fnic = data;
33 u32 pba;
34 unsigned long work_done = 0;
35
36 pba = vnic_intr_legacy_pba(fnic->legacy_pba);
37 if (!pba)
38 return IRQ_NONE;
39
40 if (pba & (1 << FNIC_INTX_NOTIFY)) {
41 vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
42 fnic_handle_link_event(fnic);
43 }
44
45 if (pba & (1 << FNIC_INTX_ERR)) {
46 vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]);
47 fnic_log_q_error(fnic);
48 }
49
50 if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
51 work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
52 work_done += fnic_wq_cmpl_handler(fnic, 4);
53 work_done += fnic_rq_cmpl_handler(fnic, 4);
54
55 vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
56 work_done,
57 1 /* unmask intr */,
58 1 /* reset intr timer */);
59 }
60
61 return IRQ_HANDLED;
62}
63
64static irqreturn_t fnic_isr_msi(int irq, void *data)
65{
66 struct fnic *fnic = data;
67 unsigned long work_done = 0;
68
69 work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
70 work_done += fnic_wq_cmpl_handler(fnic, 4);
71 work_done += fnic_rq_cmpl_handler(fnic, 4);
72
73 vnic_intr_return_credits(&fnic->intr[0],
74 work_done,
75 1 /* unmask intr */,
76 1 /* reset intr timer */);
77
78 return IRQ_HANDLED;
79}
80
81static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
82{
83 struct fnic *fnic = data;
84 unsigned long rq_work_done = 0;
85
86 rq_work_done = fnic_rq_cmpl_handler(fnic, 4);
87 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
88 rq_work_done,
89 1 /* unmask intr */,
90 1 /* reset intr timer */);
91
92 return IRQ_HANDLED;
93}
94
95static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
96{
97 struct fnic *fnic = data;
98 unsigned long wq_work_done = 0;
99
100 wq_work_done = fnic_wq_cmpl_handler(fnic, 4);
101 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
102 wq_work_done,
103 1 /* unmask intr */,
104 1 /* reset intr timer */);
105 return IRQ_HANDLED;
106}
107
108static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
109{
110 struct fnic *fnic = data;
111 unsigned long wq_copy_work_done = 0;
112
113 wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8);
114 vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
115 wq_copy_work_done,
116 1 /* unmask intr */,
117 1 /* reset intr timer */);
118 return IRQ_HANDLED;
119}
120
121static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
122{
123 struct fnic *fnic = data;
124
125 vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
126 fnic_log_q_error(fnic);
127 fnic_handle_link_event(fnic);
128
129 return IRQ_HANDLED;
130}
131
132void fnic_free_intr(struct fnic *fnic)
133{
134 int i;
135
136 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
137 case VNIC_DEV_INTR_MODE_INTX:
138 case VNIC_DEV_INTR_MODE_MSI:
139 free_irq(fnic->pdev->irq, fnic);
140 break;
141
142 case VNIC_DEV_INTR_MODE_MSIX:
143 for (i = 0; i < ARRAY_SIZE(fnic->msix); i++)
144 if (fnic->msix[i].requested)
145 free_irq(fnic->msix_entry[i].vector,
146 fnic->msix[i].devid);
147 break;
148
149 default:
150 break;
151 }
152}
153
154int fnic_request_intr(struct fnic *fnic)
155{
156 int err = 0;
157 int i;
158
159 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
160
161 case VNIC_DEV_INTR_MODE_INTX:
162 err = request_irq(fnic->pdev->irq, &fnic_isr_legacy,
163 IRQF_SHARED, DRV_NAME, fnic);
164 break;
165
166 case VNIC_DEV_INTR_MODE_MSI:
167 err = request_irq(fnic->pdev->irq, &fnic_isr_msi,
168 0, fnic->name, fnic);
169 break;
170
171 case VNIC_DEV_INTR_MODE_MSIX:
172
173 sprintf(fnic->msix[FNIC_MSIX_RQ].devname,
174 "%.11s-fcs-rq", fnic->name);
175 fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq;
176 fnic->msix[FNIC_MSIX_RQ].devid = fnic;
177
178 sprintf(fnic->msix[FNIC_MSIX_WQ].devname,
179 "%.11s-fcs-wq", fnic->name);
180 fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
181 fnic->msix[FNIC_MSIX_WQ].devid = fnic;
182
183 sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
184 "%.11s-scsi-wq", fnic->name);
185 fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
186 fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;
187
188 sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
189 "%.11s-err-notify", fnic->name);
190 fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
191 fnic_isr_msix_err_notify;
192 fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
193
194 for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
195 err = request_irq(fnic->msix_entry[i].vector,
196 fnic->msix[i].isr, 0,
197 fnic->msix[i].devname,
198 fnic->msix[i].devid);
199 if (err) {
200 shost_printk(KERN_ERR, fnic->lport->host,
201 "MSIX: request_irq"
202 " failed %d\n", err);
203 fnic_free_intr(fnic);
204 break;
205 }
206 fnic->msix[i].requested = 1;
207 }
208 break;
209
210 default:
211 break;
212 }
213
214 return err;
215}
216
217int fnic_set_intr_mode(struct fnic *fnic)
218{
219 unsigned int n = ARRAY_SIZE(fnic->rq);
220 unsigned int m = ARRAY_SIZE(fnic->wq);
221 unsigned int o = ARRAY_SIZE(fnic->wq_copy);
222 unsigned int i;
223
224 /*
225 * Set interrupt mode (INTx, MSI, MSI-X) depending
226 * system capabilities.
227 *
228 * Try MSI-X first
229 *
230 * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
231 * (last INTR is used for WQ/RQ errors and notification area)
232 */
233
234 BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1);
235 for (i = 0; i < n + m + o + 1; i++)
236 fnic->msix_entry[i].entry = i;
237
238 if (fnic->rq_count >= n &&
239 fnic->raw_wq_count >= m &&
240 fnic->wq_copy_count >= o &&
241 fnic->cq_count >= n + m + o) {
242 if (!pci_enable_msix(fnic->pdev, fnic->msix_entry,
243 n + m + o + 1)) {
244 fnic->rq_count = n;
245 fnic->raw_wq_count = m;
246 fnic->wq_copy_count = o;
247 fnic->wq_count = m + o;
248 fnic->cq_count = n + m + o;
249 fnic->intr_count = n + m + o + 1;
250 fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
251
252 FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
253 "Using MSI-X Interrupts\n");
254 vnic_dev_set_intr_mode(fnic->vdev,
255 VNIC_DEV_INTR_MODE_MSIX);
256 return 0;
257 }
258 }
259
260 /*
261 * Next try MSI
262 * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR
263 */
264 if (fnic->rq_count >= 1 &&
265 fnic->raw_wq_count >= 1 &&
266 fnic->wq_copy_count >= 1 &&
267 fnic->cq_count >= 3 &&
268 fnic->intr_count >= 1 &&
269 !pci_enable_msi(fnic->pdev)) {
270
271 fnic->rq_count = 1;
272 fnic->raw_wq_count = 1;
273 fnic->wq_copy_count = 1;
274 fnic->wq_count = 2;
275 fnic->cq_count = 3;
276 fnic->intr_count = 1;
277 fnic->err_intr_offset = 0;
278
279 FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
280 "Using MSI Interrupts\n");
281 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
282
283 return 0;
284 }
285
286 /*
287 * Next try INTx
288 * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs
289 * 1 INTR is used for all 3 queues, 1 INTR for queue errors
290 * 1 INTR for notification area
291 */
292
293 if (fnic->rq_count >= 1 &&
294 fnic->raw_wq_count >= 1 &&
295 fnic->wq_copy_count >= 1 &&
296 fnic->cq_count >= 3 &&
297 fnic->intr_count >= 3) {
298
299 fnic->rq_count = 1;
300 fnic->raw_wq_count = 1;
301 fnic->wq_copy_count = 1;
302 fnic->cq_count = 3;
303 fnic->intr_count = 3;
304
305 FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
306 "Using Legacy Interrupts\n");
307 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
308
309 return 0;
310 }
311
312 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
313
314 return -EINVAL;
315}
316
317void fnic_clear_intr_mode(struct fnic *fnic)
318{
319 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
320 case VNIC_DEV_INTR_MODE_MSIX:
321 pci_disable_msix(fnic->pdev);
322 break;
323 case VNIC_DEV_INTR_MODE_MSI:
324 pci_disable_msi(fnic->pdev);
325 break;
326 default:
327 break;
328 }
329
330 vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
331}
332
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
new file mode 100644
index 00000000000..a84072865fc
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -0,0 +1,943 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/module.h>
19#include <linux/mempool.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/pci.h>
24#include <linux/skbuff.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
27#include <linux/workqueue.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport.h>
30#include <scsi/scsi_transport_fc.h>
31#include <scsi/scsi_tcq.h>
32#include <scsi/libfc.h>
33#include <scsi/fc_frame.h>
34
35#include "vnic_dev.h"
36#include "vnic_intr.h"
37#include "vnic_stats.h"
38#include "fnic_io.h"
39#include "fnic.h"
40
41#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
42
43/* Timer to poll notification area for events. Used for MSI interrupts */
44#define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ)
45
46static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
47static struct kmem_cache *fnic_io_req_cache;
48LIST_HEAD(fnic_list);
49DEFINE_SPINLOCK(fnic_list_lock);
50
51/* Supported devices by fnic module */
52static struct pci_device_id fnic_id_table[] = {
53 { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
54 { 0, }
55};
56
57MODULE_DESCRIPTION(DRV_DESCRIPTION);
58MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, "
59 "Joseph R. Eykholt <jeykholt@cisco.com>");
60MODULE_LICENSE("GPL v2");
61MODULE_VERSION(DRV_VERSION);
62MODULE_DEVICE_TABLE(pci, fnic_id_table);
63
64unsigned int fnic_log_level;
65module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
66MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
67
68
69static struct libfc_function_template fnic_transport_template = {
70 .frame_send = fnic_send,
71 .fcp_abort_io = fnic_empty_scsi_cleanup,
72 .fcp_cleanup = fnic_empty_scsi_cleanup,
73 .exch_mgr_reset = fnic_exch_mgr_reset
74};
75
76static int fnic_slave_alloc(struct scsi_device *sdev)
77{
78 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
79 struct fc_lport *lp = shost_priv(sdev->host);
80 struct fnic *fnic = lport_priv(lp);
81
82 sdev->tagged_supported = 1;
83
84 if (!rport || fc_remote_port_chkready(rport))
85 return -ENXIO;
86
87 scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
88 rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000;
89
90 return 0;
91}
92
93static struct scsi_host_template fnic_host_template = {
94 .module = THIS_MODULE,
95 .name = DRV_NAME,
96 .queuecommand = fnic_queuecommand,
97 .eh_abort_handler = fnic_abort_cmd,
98 .eh_device_reset_handler = fnic_device_reset,
99 .eh_host_reset_handler = fnic_host_reset,
100 .slave_alloc = fnic_slave_alloc,
101 .change_queue_depth = fc_change_queue_depth,
102 .change_queue_type = fc_change_queue_type,
103 .this_id = -1,
104 .cmd_per_lun = 3,
105 .can_queue = FNIC_MAX_IO_REQ,
106 .use_clustering = ENABLE_CLUSTERING,
107 .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
108 .max_sectors = 0xffff,
109 .shost_attrs = fnic_attrs,
110};
111
112static void fnic_get_host_speed(struct Scsi_Host *shost);
113static struct scsi_transport_template *fnic_fc_transport;
114static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
115
116static struct fc_function_template fnic_fc_functions = {
117
118 .show_host_node_name = 1,
119 .show_host_port_name = 1,
120 .show_host_supported_classes = 1,
121 .show_host_supported_fc4s = 1,
122 .show_host_active_fc4s = 1,
123 .show_host_maxframe_size = 1,
124 .show_host_port_id = 1,
125 .show_host_supported_speeds = 1,
126 .get_host_speed = fnic_get_host_speed,
127 .show_host_speed = 1,
128 .show_host_port_type = 1,
129 .get_host_port_state = fc_get_host_port_state,
130 .show_host_port_state = 1,
131 .show_host_symbolic_name = 1,
132 .show_rport_maxframe_size = 1,
133 .show_rport_supported_classes = 1,
134 .show_host_fabric_name = 1,
135 .show_starget_node_name = 1,
136 .show_starget_port_name = 1,
137 .show_starget_port_id = 1,
138 .show_rport_dev_loss_tmo = 1,
139 .issue_fc_host_lip = fnic_reset,
140 .get_fc_host_stats = fnic_get_stats,
141 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
142 .terminate_rport_io = fnic_terminate_rport_io,
143};
144
145static void fnic_get_host_speed(struct Scsi_Host *shost)
146{
147 struct fc_lport *lp = shost_priv(shost);
148 struct fnic *fnic = lport_priv(lp);
149 u32 port_speed = vnic_dev_port_speed(fnic->vdev);
150
151 /* Add in other values as they get defined in fw */
152 switch (port_speed) {
153 case 10000:
154 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
155 break;
156 default:
157 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
158 break;
159 }
160}
161
162static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
163{
164 int ret;
165 struct fc_lport *lp = shost_priv(host);
166 struct fnic *fnic = lport_priv(lp);
167 struct fc_host_statistics *stats = &lp->host_stats;
168 struct vnic_stats *vs;
169 unsigned long flags;
170
171 if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
172 return stats;
173 fnic->stats_time = jiffies;
174
175 spin_lock_irqsave(&fnic->fnic_lock, flags);
176 ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
177 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
178
179 if (ret) {
180 FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
181 "fnic: Get vnic stats failed"
182 " 0x%x", ret);
183 return stats;
184 }
185 vs = fnic->stats;
186 stats->tx_frames = vs->tx.tx_unicast_frames_ok;
187 stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
188 stats->rx_frames = vs->rx.rx_unicast_frames_ok;
189 stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
190 stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
191 stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
192 stats->invalid_crc_count = vs->rx.rx_crc_errors;
193 stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ;
194 stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
195 stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
196
197 return stats;
198}
199
200void fnic_log_q_error(struct fnic *fnic)
201{
202 unsigned int i;
203 u32 error_status;
204
205 for (i = 0; i < fnic->raw_wq_count; i++) {
206 error_status = ioread32(&fnic->wq[i].ctrl->error_status);
207 if (error_status)
208 shost_printk(KERN_ERR, fnic->lport->host,
209 "WQ[%d] error_status"
210 " %d\n", i, error_status);
211 }
212
213 for (i = 0; i < fnic->rq_count; i++) {
214 error_status = ioread32(&fnic->rq[i].ctrl->error_status);
215 if (error_status)
216 shost_printk(KERN_ERR, fnic->lport->host,
217 "RQ[%d] error_status"
218 " %d\n", i, error_status);
219 }
220
221 for (i = 0; i < fnic->wq_copy_count; i++) {
222 error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
223 if (error_status)
224 shost_printk(KERN_ERR, fnic->lport->host,
225 "CWQ[%d] error_status"
226 " %d\n", i, error_status);
227 }
228}
229
230void fnic_handle_link_event(struct fnic *fnic)
231{
232 unsigned long flags;
233
234 spin_lock_irqsave(&fnic->fnic_lock, flags);
235 if (fnic->stop_rx_link_events) {
236 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
237 return;
238 }
239 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
240
241 queue_work(fnic_event_queue, &fnic->link_work);
242
243}
244
245static int fnic_notify_set(struct fnic *fnic)
246{
247 int err;
248
249 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
250 case VNIC_DEV_INTR_MODE_INTX:
251 err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
252 break;
253 case VNIC_DEV_INTR_MODE_MSI:
254 err = vnic_dev_notify_set(fnic->vdev, -1);
255 break;
256 case VNIC_DEV_INTR_MODE_MSIX:
257 err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
258 break;
259 default:
260 shost_printk(KERN_ERR, fnic->lport->host,
261 "Interrupt mode should be set up"
262 " before devcmd notify set %d\n",
263 vnic_dev_get_intr_mode(fnic->vdev));
264 err = -1;
265 break;
266 }
267
268 return err;
269}
270
271static void fnic_notify_timer(unsigned long data)
272{
273 struct fnic *fnic = (struct fnic *)data;
274
275 fnic_handle_link_event(fnic);
276 mod_timer(&fnic->notify_timer,
277 round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
278}
279
280static void fnic_notify_timer_start(struct fnic *fnic)
281{
282 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
283 case VNIC_DEV_INTR_MODE_MSI:
284 /*
285 * Schedule first timeout immediately. The driver is
286 * initiatialized and ready to look for link up notification
287 */
288 mod_timer(&fnic->notify_timer, jiffies);
289 break;
290 default:
291 /* Using intr for notification for INTx/MSI-X */
292 break;
293 };
294}
295
296static int fnic_dev_wait(struct vnic_dev *vdev,
297 int (*start)(struct vnic_dev *, int),
298 int (*finished)(struct vnic_dev *, int *),
299 int arg)
300{
301 unsigned long time;
302 int done;
303 int err;
304
305 err = start(vdev, arg);
306 if (err)
307 return err;
308
309 /* Wait for func to complete...2 seconds max */
310 time = jiffies + (HZ * 2);
311 do {
312 err = finished(vdev, &done);
313 if (err)
314 return err;
315 if (done)
316 return 0;
317 schedule_timeout_uninterruptible(HZ / 10);
318 } while (time_after(time, jiffies));
319
320 return -ETIMEDOUT;
321}
322
323static int fnic_cleanup(struct fnic *fnic)
324{
325 unsigned int i;
326 int err;
327 unsigned long flags;
328 struct fc_frame *flogi = NULL;
329 struct fc_frame *flogi_resp = NULL;
330
331 vnic_dev_disable(fnic->vdev);
332 for (i = 0; i < fnic->intr_count; i++)
333 vnic_intr_mask(&fnic->intr[i]);
334
335 for (i = 0; i < fnic->rq_count; i++) {
336 err = vnic_rq_disable(&fnic->rq[i]);
337 if (err)
338 return err;
339 }
340 for (i = 0; i < fnic->raw_wq_count; i++) {
341 err = vnic_wq_disable(&fnic->wq[i]);
342 if (err)
343 return err;
344 }
345 for (i = 0; i < fnic->wq_copy_count; i++) {
346 err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
347 if (err)
348 return err;
349 }
350
351 /* Clean up completed IOs and FCS frames */
352 fnic_wq_copy_cmpl_handler(fnic, -1);
353 fnic_wq_cmpl_handler(fnic, -1);
354 fnic_rq_cmpl_handler(fnic, -1);
355
356 /* Clean up the IOs and FCS frames that have not completed */
357 for (i = 0; i < fnic->raw_wq_count; i++)
358 vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
359 for (i = 0; i < fnic->rq_count; i++)
360 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
361 for (i = 0; i < fnic->wq_copy_count; i++)
362 vnic_wq_copy_clean(&fnic->wq_copy[i],
363 fnic_wq_copy_cleanup_handler);
364
365 for (i = 0; i < fnic->cq_count; i++)
366 vnic_cq_clean(&fnic->cq[i]);
367 for (i = 0; i < fnic->intr_count; i++)
368 vnic_intr_clean(&fnic->intr[i]);
369
370 /*
371 * Remove cached flogi and flogi resp frames if any
372 * These frames are not in any queue, and therefore queue
373 * cleanup does not clean them. So clean them explicitly
374 */
375 spin_lock_irqsave(&fnic->fnic_lock, flags);
376 flogi = fnic->flogi;
377 fnic->flogi = NULL;
378 flogi_resp = fnic->flogi_resp;
379 fnic->flogi_resp = NULL;
380 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
381
382 if (flogi)
383 dev_kfree_skb(fp_skb(flogi));
384
385 if (flogi_resp)
386 dev_kfree_skb(fp_skb(flogi_resp));
387
388 mempool_destroy(fnic->io_req_pool);
389 for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
390 mempool_destroy(fnic->io_sgl_pool[i]);
391
392 return 0;
393}
394
395static void fnic_iounmap(struct fnic *fnic)
396{
397 if (fnic->bar0.vaddr)
398 iounmap(fnic->bar0.vaddr);
399}
400
401/*
402 * Allocate element for mempools requiring GFP_DMA flag.
403 * Otherwise, checks in kmem_flagcheck() hit BUG_ON().
404 */
405static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
406{
407 struct kmem_cache *mem = pool_data;
408
409 return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
410}
411
412static int __devinit fnic_probe(struct pci_dev *pdev,
413 const struct pci_device_id *ent)
414{
415 struct Scsi_Host *host;
416 struct fc_lport *lp;
417 struct fnic *fnic;
418 mempool_t *pool;
419 int err;
420 int i;
421 unsigned long flags;
422
423 /*
424 * Allocate SCSI Host and set up association between host,
425 * local port, and fnic
426 */
427 host = scsi_host_alloc(&fnic_host_template,
428 sizeof(struct fc_lport) + sizeof(struct fnic));
429 if (!host) {
430 printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
431 err = -ENOMEM;
432 goto err_out;
433 }
434 lp = shost_priv(host);
435 lp->host = host;
436 fnic = lport_priv(lp);
437 fnic->lport = lp;
438
439 snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
440 host->host_no);
441
442 host->transportt = fnic_fc_transport;
443
444 err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
445 if (err) {
446 shost_printk(KERN_ERR, fnic->lport->host,
447 "Unable to alloc shared tag map\n");
448 goto err_out_free_hba;
449 }
450
451 /* Setup PCI resources */
452 pci_set_drvdata(pdev, fnic);
453
454 fnic->pdev = pdev;
455
456 err = pci_enable_device(pdev);
457 if (err) {
458 shost_printk(KERN_ERR, fnic->lport->host,
459 "Cannot enable PCI device, aborting.\n");
460 goto err_out_free_hba;
461 }
462
463 err = pci_request_regions(pdev, DRV_NAME);
464 if (err) {
465 shost_printk(KERN_ERR, fnic->lport->host,
466 "Cannot enable PCI resources, aborting\n");
467 goto err_out_disable_device;
468 }
469
470 pci_set_master(pdev);
471
472 /* Query PCI controller on system for DMA addressing
473 * limitation for the device. Try 40-bit first, and
474 * fail to 32-bit.
475 */
476 err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
477 if (err) {
478 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
479 if (err) {
480 shost_printk(KERN_ERR, fnic->lport->host,
481 "No usable DMA configuration "
482 "aborting\n");
483 goto err_out_release_regions;
484 }
485 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
486 if (err) {
487 shost_printk(KERN_ERR, fnic->lport->host,
488 "Unable to obtain 32-bit DMA "
489 "for consistent allocations, aborting.\n");
490 goto err_out_release_regions;
491 }
492 } else {
493 err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
494 if (err) {
495 shost_printk(KERN_ERR, fnic->lport->host,
496 "Unable to obtain 40-bit DMA "
497 "for consistent allocations, aborting.\n");
498 goto err_out_release_regions;
499 }
500 }
501
502 /* Map vNIC resources from BAR0 */
503 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
504 shost_printk(KERN_ERR, fnic->lport->host,
505 "BAR0 not memory-map'able, aborting.\n");
506 err = -ENODEV;
507 goto err_out_release_regions;
508 }
509
510 fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
511 fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
512 fnic->bar0.len = pci_resource_len(pdev, 0);
513
514 if (!fnic->bar0.vaddr) {
515 shost_printk(KERN_ERR, fnic->lport->host,
516 "Cannot memory-map BAR0 res hdr, "
517 "aborting.\n");
518 err = -ENODEV;
519 goto err_out_release_regions;
520 }
521
522 fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
523 if (!fnic->vdev) {
524 shost_printk(KERN_ERR, fnic->lport->host,
525 "vNIC registration failed, "
526 "aborting.\n");
527 err = -ENODEV;
528 goto err_out_iounmap;
529 }
530
531 err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
532 vnic_dev_open_done, 0);
533 if (err) {
534 shost_printk(KERN_ERR, fnic->lport->host,
535 "vNIC dev open failed, aborting.\n");
536 goto err_out_vnic_unregister;
537 }
538
539 err = vnic_dev_init(fnic->vdev, 0);
540 if (err) {
541 shost_printk(KERN_ERR, fnic->lport->host,
542 "vNIC dev init failed, aborting.\n");
543 goto err_out_dev_close;
544 }
545
546 err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr);
547 if (err) {
548 shost_printk(KERN_ERR, fnic->lport->host,
549 "vNIC get MAC addr failed \n");
550 goto err_out_dev_close;
551 }
552
553 /* Get vNIC configuration */
554 err = fnic_get_vnic_config(fnic);
555 if (err) {
556 shost_printk(KERN_ERR, fnic->lport->host,
557 "Get vNIC configuration failed, "
558 "aborting.\n");
559 goto err_out_dev_close;
560 }
561 host->max_lun = fnic->config.luns_per_tgt;
562 host->max_id = FNIC_MAX_FCP_TARGET;
563
564 fnic_get_res_counts(fnic);
565
566 err = fnic_set_intr_mode(fnic);
567 if (err) {
568 shost_printk(KERN_ERR, fnic->lport->host,
569 "Failed to set intr mode, "
570 "aborting.\n");
571 goto err_out_dev_close;
572 }
573
574 err = fnic_request_intr(fnic);
575 if (err) {
576 shost_printk(KERN_ERR, fnic->lport->host,
577 "Unable to request irq.\n");
578 goto err_out_clear_intr;
579 }
580
581 err = fnic_alloc_vnic_resources(fnic);
582 if (err) {
583 shost_printk(KERN_ERR, fnic->lport->host,
584 "Failed to alloc vNIC resources, "
585 "aborting.\n");
586 goto err_out_free_intr;
587 }
588
589
590 /* initialize all fnic locks */
591 spin_lock_init(&fnic->fnic_lock);
592
593 for (i = 0; i < FNIC_WQ_MAX; i++)
594 spin_lock_init(&fnic->wq_lock[i]);
595
596 for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
597 spin_lock_init(&fnic->wq_copy_lock[i]);
598 fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
599 fnic->fw_ack_recd[i] = 0;
600 fnic->fw_ack_index[i] = -1;
601 }
602
603 for (i = 0; i < FNIC_IO_LOCKS; i++)
604 spin_lock_init(&fnic->io_req_lock[i]);
605
606 fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
607 if (!fnic->io_req_pool)
608 goto err_out_free_resources;
609
610 pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
611 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
612 if (!pool)
613 goto err_out_free_ioreq_pool;
614 fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
615
616 pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
617 fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
618 if (!pool)
619 goto err_out_free_dflt_pool;
620 fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
621
622 /* setup vlan config, hw inserts vlan header */
623 fnic->vlan_hw_insert = 1;
624 fnic->vlan_id = 0;
625
626 fnic->flogi_oxid = FC_XID_UNKNOWN;
627 fnic->flogi = NULL;
628 fnic->flogi_resp = NULL;
629 fnic->state = FNIC_IN_FC_MODE;
630
631 /* Enable hardware stripping of vlan header on ingress */
632 fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
633
634 /* Setup notification buffer area */
635 err = fnic_notify_set(fnic);
636 if (err) {
637 shost_printk(KERN_ERR, fnic->lport->host,
638 "Failed to alloc notify buffer, aborting.\n");
639 goto err_out_free_max_pool;
640 }
641
642 /* Setup notify timer when using MSI interrupts */
643 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
644 setup_timer(&fnic->notify_timer,
645 fnic_notify_timer, (unsigned long)fnic);
646
647 /* allocate RQ buffers and post them to RQ*/
648 for (i = 0; i < fnic->rq_count; i++) {
649 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
650 if (err) {
651 shost_printk(KERN_ERR, fnic->lport->host,
652 "fnic_alloc_rq_frame can't alloc "
653 "frame\n");
654 goto err_out_free_rq_buf;
655 }
656 }
657
658 /*
659 * Initialization done with PCI system, hardware, firmware.
660 * Add host to SCSI
661 */
662 err = scsi_add_host(lp->host, &pdev->dev);
663 if (err) {
664 shost_printk(KERN_ERR, fnic->lport->host,
665 "fnic: scsi_add_host failed...exiting\n");
666 goto err_out_free_rq_buf;
667 }
668
669 /* Start local port initiatialization */
670
671 lp->link_up = 0;
672 lp->tt = fnic_transport_template;
673
674 lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
675 FCPIO_HOST_EXCH_RANGE_START,
676 FCPIO_HOST_EXCH_RANGE_END);
677 if (!lp->emp) {
678 err = -ENOMEM;
679 goto err_out_remove_scsi_host;
680 }
681
682 lp->max_retry_count = fnic->config.flogi_retries;
683 lp->max_rport_retry_count = fnic->config.plogi_retries;
684 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
685 FCP_SPPF_CONF_COMPL);
686 if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
687 lp->service_params |= FCP_SPPF_RETRY;
688
689 lp->boot_time = jiffies;
690 lp->e_d_tov = fnic->config.ed_tov;
691 lp->r_a_tov = fnic->config.ra_tov;
692 lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
693 fc_set_wwnn(lp, fnic->config.node_wwn);
694 fc_set_wwpn(lp, fnic->config.port_wwn);
695
696 fc_exch_init(lp);
697 fc_lport_init(lp);
698 fc_elsct_init(lp);
699 fc_rport_init(lp);
700 fc_disc_init(lp);
701
702 fc_lport_config(lp);
703
704 if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
705 sizeof(struct fc_frame_header))) {
706 err = -EINVAL;
707 goto err_out_free_exch_mgr;
708 }
709 fc_host_maxframe_size(lp->host) = lp->mfs;
710
711 sprintf(fc_host_symbolic_name(lp->host),
712 DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
713
714 spin_lock_irqsave(&fnic_list_lock, flags);
715 list_add_tail(&fnic->list, &fnic_list);
716 spin_unlock_irqrestore(&fnic_list_lock, flags);
717
718 INIT_WORK(&fnic->link_work, fnic_handle_link);
719 INIT_WORK(&fnic->frame_work, fnic_handle_frame);
720 skb_queue_head_init(&fnic->frame_queue);
721
722 /* Enable all queues */
723 for (i = 0; i < fnic->raw_wq_count; i++)
724 vnic_wq_enable(&fnic->wq[i]);
725 for (i = 0; i < fnic->rq_count; i++)
726 vnic_rq_enable(&fnic->rq[i]);
727 for (i = 0; i < fnic->wq_copy_count; i++)
728 vnic_wq_copy_enable(&fnic->wq_copy[i]);
729
730 fc_fabric_login(lp);
731
732 vnic_dev_enable(fnic->vdev);
733 for (i = 0; i < fnic->intr_count; i++)
734 vnic_intr_unmask(&fnic->intr[i]);
735
736 fnic_notify_timer_start(fnic);
737
738 return 0;
739
740err_out_free_exch_mgr:
741 fc_exch_mgr_free(lp->emp);
742err_out_remove_scsi_host:
743 fc_remove_host(fnic->lport->host);
744 scsi_remove_host(fnic->lport->host);
745err_out_free_rq_buf:
746 for (i = 0; i < fnic->rq_count; i++)
747 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
748 vnic_dev_notify_unset(fnic->vdev);
749err_out_free_max_pool:
750 mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
751err_out_free_dflt_pool:
752 mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
753err_out_free_ioreq_pool:
754 mempool_destroy(fnic->io_req_pool);
755err_out_free_resources:
756 fnic_free_vnic_resources(fnic);
757err_out_free_intr:
758 fnic_free_intr(fnic);
759err_out_clear_intr:
760 fnic_clear_intr_mode(fnic);
761err_out_dev_close:
762 vnic_dev_close(fnic->vdev);
763err_out_vnic_unregister:
764 vnic_dev_unregister(fnic->vdev);
765err_out_iounmap:
766 fnic_iounmap(fnic);
767err_out_release_regions:
768 pci_release_regions(pdev);
769err_out_disable_device:
770 pci_disable_device(pdev);
771err_out_free_hba:
772 scsi_host_put(lp->host);
773err_out:
774 return err;
775}
776
777static void __devexit fnic_remove(struct pci_dev *pdev)
778{
779 struct fnic *fnic = pci_get_drvdata(pdev);
780 unsigned long flags;
781
782 /*
783 * Mark state so that the workqueue thread stops forwarding
784 * received frames and link events to the local port. ISR and
785 * other threads that can queue work items will also stop
786 * creating work items on the fnic workqueue
787 */
788 spin_lock_irqsave(&fnic->fnic_lock, flags);
789 fnic->stop_rx_link_events = 1;
790 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
791
792 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
793 del_timer_sync(&fnic->notify_timer);
794
795 /*
796 * Flush the fnic event queue. After this call, there should
797 * be no event queued for this fnic device in the workqueue
798 */
799 flush_workqueue(fnic_event_queue);
800 skb_queue_purge(&fnic->frame_queue);
801
802 /*
803 * Log off the fabric. This stops all remote ports, dns port,
804 * logs off the fabric. This flushes all rport, disc, lport work
805 * before returning
806 */
807 fc_fabric_logoff(fnic->lport);
808
809 spin_lock_irqsave(&fnic->fnic_lock, flags);
810 fnic->in_remove = 1;
811 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
812
813 fc_lport_destroy(fnic->lport);
814
815 /*
816 * This stops the fnic device, masks all interrupts. Completed
817 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
818 * cleaned up
819 */
820 fnic_cleanup(fnic);
821
822 BUG_ON(!skb_queue_empty(&fnic->frame_queue));
823
824 spin_lock_irqsave(&fnic_list_lock, flags);
825 list_del(&fnic->list);
826 spin_unlock_irqrestore(&fnic_list_lock, flags);
827
828 fc_remove_host(fnic->lport->host);
829 scsi_remove_host(fnic->lport->host);
830 fc_exch_mgr_free(fnic->lport->emp);
831 vnic_dev_notify_unset(fnic->vdev);
832 fnic_free_vnic_resources(fnic);
833 fnic_free_intr(fnic);
834 fnic_clear_intr_mode(fnic);
835 vnic_dev_close(fnic->vdev);
836 vnic_dev_unregister(fnic->vdev);
837 fnic_iounmap(fnic);
838 pci_release_regions(pdev);
839 pci_disable_device(pdev);
840 pci_set_drvdata(pdev, NULL);
841 scsi_host_put(fnic->lport->host);
842}
843
844static struct pci_driver fnic_driver = {
845 .name = DRV_NAME,
846 .id_table = fnic_id_table,
847 .probe = fnic_probe,
848 .remove = __devexit_p(fnic_remove),
849};
850
851static int __init fnic_init_module(void)
852{
853 size_t len;
854 int err = 0;
855
856 printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
857
858 /* Create a cache for allocation of default size sgls */
859 len = sizeof(struct fnic_dflt_sgl_list);
860 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
861 ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
862 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
863 NULL);
864 if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
865 printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
866 err = -ENOMEM;
867 goto err_create_fnic_sgl_slab_dflt;
868 }
869
870 /* Create a cache for allocation of max size sgls*/
871 len = sizeof(struct fnic_sgl_list);
872 fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
873 ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
874 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
875 NULL);
876 if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
877 printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
878 err = -ENOMEM;
879 goto err_create_fnic_sgl_slab_max;
880 }
881
882 /* Create a cache of io_req structs for use via mempool */
883 fnic_io_req_cache = kmem_cache_create("fnic_io_req",
884 sizeof(struct fnic_io_req),
885 0, SLAB_HWCACHE_ALIGN, NULL);
886 if (!fnic_io_req_cache) {
887 printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
888 err = -ENOMEM;
889 goto err_create_fnic_ioreq_slab;
890 }
891
892 fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
893 if (!fnic_event_queue) {
894 printk(KERN_ERR PFX "fnic work queue create failed\n");
895 err = -ENOMEM;
896 goto err_create_fnic_workq;
897 }
898
899 spin_lock_init(&fnic_list_lock);
900 INIT_LIST_HEAD(&fnic_list);
901
902 fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
903 if (!fnic_fc_transport) {
904 printk(KERN_ERR PFX "fc_attach_transport error\n");
905 err = -ENOMEM;
906 goto err_fc_transport;
907 }
908
909 /* register the driver with PCI system */
910 err = pci_register_driver(&fnic_driver);
911 if (err < 0) {
912 printk(KERN_ERR PFX "pci register error\n");
913 goto err_pci_register;
914 }
915 return err;
916
917err_pci_register:
918 fc_release_transport(fnic_fc_transport);
919err_fc_transport:
920 destroy_workqueue(fnic_event_queue);
921err_create_fnic_workq:
922 kmem_cache_destroy(fnic_io_req_cache);
923err_create_fnic_ioreq_slab:
924 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
925err_create_fnic_sgl_slab_max:
926 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
927err_create_fnic_sgl_slab_dflt:
928 return err;
929}
930
931static void __exit fnic_cleanup_module(void)
932{
933 pci_unregister_driver(&fnic_driver);
934 destroy_workqueue(fnic_event_queue);
935 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
936 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
937 kmem_cache_destroy(fnic_io_req_cache);
938 fc_release_transport(fnic_fc_transport);
939}
940
941module_init(fnic_init_module);
942module_exit(fnic_cleanup_module);
943
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
new file mode 100644
index 00000000000..7ba61ec715d
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -0,0 +1,444 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/pci.h>
21#include "wq_enet_desc.h"
22#include "rq_enet_desc.h"
23#include "cq_enet_desc.h"
24#include "vnic_resource.h"
25#include "vnic_dev.h"
26#include "vnic_wq.h"
27#include "vnic_rq.h"
28#include "vnic_cq.h"
29#include "vnic_intr.h"
30#include "vnic_stats.h"
31#include "vnic_nic.h"
32#include "fnic.h"
33
34int fnic_get_vnic_config(struct fnic *fnic)
35{
36 struct vnic_fc_config *c = &fnic->config;
37 int err;
38
39#define GET_CONFIG(m) \
40 do { \
41 err = vnic_dev_spec(fnic->vdev, \
42 offsetof(struct vnic_fc_config, m), \
43 sizeof(c->m), &c->m); \
44 if (err) { \
45 shost_printk(KERN_ERR, fnic->lport->host, \
46 "Error getting %s, %d\n", #m, \
47 err); \
48 return err; \
49 } \
50 } while (0);
51
52 GET_CONFIG(node_wwn);
53 GET_CONFIG(port_wwn);
54 GET_CONFIG(wq_enet_desc_count);
55 GET_CONFIG(wq_copy_desc_count);
56 GET_CONFIG(rq_desc_count);
57 GET_CONFIG(maxdatafieldsize);
58 GET_CONFIG(ed_tov);
59 GET_CONFIG(ra_tov);
60 GET_CONFIG(intr_timer);
61 GET_CONFIG(intr_timer_type);
62 GET_CONFIG(flags);
63 GET_CONFIG(flogi_retries);
64 GET_CONFIG(flogi_timeout);
65 GET_CONFIG(plogi_retries);
66 GET_CONFIG(plogi_timeout);
67 GET_CONFIG(io_throttle_count);
68 GET_CONFIG(link_down_timeout);
69 GET_CONFIG(port_down_timeout);
70 GET_CONFIG(port_down_io_retries);
71 GET_CONFIG(luns_per_tgt);
72
73 c->wq_enet_desc_count =
74 min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
75 max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
76 c->wq_enet_desc_count));
77 c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
78
79 c->wq_copy_desc_count =
80 min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX,
81 max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN,
82 c->wq_copy_desc_count));
83 c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16);
84
85 c->rq_desc_count =
86 min_t(u32, VNIC_FNIC_RQ_DESCS_MAX,
87 max_t(u32, VNIC_FNIC_RQ_DESCS_MIN,
88 c->rq_desc_count));
89 c->rq_desc_count = ALIGN(c->rq_desc_count, 16);
90
91 c->maxdatafieldsize =
92 min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX,
93 max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN,
94 c->maxdatafieldsize));
95 c->ed_tov =
96 min_t(u32, VNIC_FNIC_EDTOV_MAX,
97 max_t(u32, VNIC_FNIC_EDTOV_MIN,
98 c->ed_tov));
99
100 c->ra_tov =
101 min_t(u32, VNIC_FNIC_RATOV_MAX,
102 max_t(u32, VNIC_FNIC_RATOV_MIN,
103 c->ra_tov));
104
105 c->flogi_retries =
106 min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries);
107
108 c->flogi_timeout =
109 min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX,
110 max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN,
111 c->flogi_timeout));
112
113 c->plogi_retries =
114 min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries);
115
116 c->plogi_timeout =
117 min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX,
118 max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN,
119 c->plogi_timeout));
120
121 c->io_throttle_count =
122 min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX,
123 max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN,
124 c->io_throttle_count));
125
126 c->link_down_timeout =
127 min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX,
128 c->link_down_timeout);
129
130 c->port_down_timeout =
131 min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX,
132 c->port_down_timeout);
133
134 c->port_down_io_retries =
135 min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX,
136 c->port_down_io_retries);
137
138 c->luns_per_tgt =
139 min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX,
140 max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN,
141 c->luns_per_tgt));
142
143 c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
144 c->intr_timer_type = c->intr_timer_type;
145
146 shost_printk(KERN_INFO, fnic->lport->host,
147 "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
148 "wq/wq_copy/rq %d/%d/%d\n",
149 fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2],
150 fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5],
151 c->wq_enet_desc_count, c->wq_copy_desc_count,
152 c->rq_desc_count);
153 shost_printk(KERN_INFO, fnic->lport->host,
154 "vNIC node wwn %llx port wwn %llx\n",
155 c->node_wwn, c->port_wwn);
156 shost_printk(KERN_INFO, fnic->lport->host,
157 "vNIC ed_tov %d ra_tov %d\n",
158 c->ed_tov, c->ra_tov);
159 shost_printk(KERN_INFO, fnic->lport->host,
160 "vNIC mtu %d intr timer %d\n",
161 c->maxdatafieldsize, c->intr_timer);
162 shost_printk(KERN_INFO, fnic->lport->host,
163 "vNIC flags 0x%x luns per tgt %d\n",
164 c->flags, c->luns_per_tgt);
165 shost_printk(KERN_INFO, fnic->lport->host,
166 "vNIC flogi_retries %d flogi timeout %d\n",
167 c->flogi_retries, c->flogi_timeout);
168 shost_printk(KERN_INFO, fnic->lport->host,
169 "vNIC plogi retries %d plogi timeout %d\n",
170 c->plogi_retries, c->plogi_timeout);
171 shost_printk(KERN_INFO, fnic->lport->host,
172 "vNIC io throttle count %d link dn timeout %d\n",
173 c->io_throttle_count, c->link_down_timeout);
174 shost_printk(KERN_INFO, fnic->lport->host,
175 "vNIC port dn io retries %d port dn timeout %d\n",
176 c->port_down_io_retries, c->port_down_timeout);
177
178 return 0;
179}
180
181int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
182 u8 rss_hash_type,
183 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable,
184 u8 tso_ipid_split_en, u8 ig_vlan_strip_en)
185{
186 u64 a0, a1;
187 u32 nic_cfg;
188 int wait = 1000;
189
190 vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
191 rss_hash_type, rss_hash_bits, rss_base_cpu,
192 rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
193
194 a0 = nic_cfg;
195 a1 = 0;
196
197 return vnic_dev_cmd(fnic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
198}
199
200void fnic_get_res_counts(struct fnic *fnic)
201{
202 fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ);
203 fnic->raw_wq_count = fnic->wq_count - 1;
204 fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count;
205 fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ);
206 fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ);
207 fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
208 RES_TYPE_INTR_CTRL);
209}
210
211void fnic_free_vnic_resources(struct fnic *fnic)
212{
213 unsigned int i;
214
215 for (i = 0; i < fnic->raw_wq_count; i++)
216 vnic_wq_free(&fnic->wq[i]);
217
218 for (i = 0; i < fnic->wq_copy_count; i++)
219 vnic_wq_copy_free(&fnic->wq_copy[i]);
220
221 for (i = 0; i < fnic->rq_count; i++)
222 vnic_rq_free(&fnic->rq[i]);
223
224 for (i = 0; i < fnic->cq_count; i++)
225 vnic_cq_free(&fnic->cq[i]);
226
227 for (i = 0; i < fnic->intr_count; i++)
228 vnic_intr_free(&fnic->intr[i]);
229}
230
231int fnic_alloc_vnic_resources(struct fnic *fnic)
232{
233 enum vnic_dev_intr_mode intr_mode;
234 unsigned int mask_on_assertion;
235 unsigned int interrupt_offset;
236 unsigned int error_interrupt_enable;
237 unsigned int error_interrupt_offset;
238 unsigned int i, cq_index;
239 unsigned int wq_copy_cq_desc_count;
240 int err;
241
242 intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
243
244 shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
245 intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
246 intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
247 intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
248 "MSI-X" : "unknown");
249
250 shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
251 "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
252 fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
253 fnic->rq_count, fnic->cq_count, fnic->intr_count);
254
255 /* Allocate Raw WQ used for FCS frames */
256 for (i = 0; i < fnic->raw_wq_count; i++) {
257 err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i,
258 fnic->config.wq_enet_desc_count,
259 sizeof(struct wq_enet_desc));
260 if (err)
261 goto err_out_cleanup;
262 }
263
264 /* Allocate Copy WQs used for SCSI IOs */
265 for (i = 0; i < fnic->wq_copy_count; i++) {
266 err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
267 (fnic->raw_wq_count + i),
268 fnic->config.wq_copy_desc_count,
269 sizeof(struct fcpio_host_req));
270 if (err)
271 goto err_out_cleanup;
272 }
273
274 /* RQ for receiving FCS frames */
275 for (i = 0; i < fnic->rq_count; i++) {
276 err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
277 fnic->config.rq_desc_count,
278 sizeof(struct rq_enet_desc));
279 if (err)
280 goto err_out_cleanup;
281 }
282
283 /* CQ for each RQ */
284 for (i = 0; i < fnic->rq_count; i++) {
285 cq_index = i;
286 err = vnic_cq_alloc(fnic->vdev,
287 &fnic->cq[cq_index], cq_index,
288 fnic->config.rq_desc_count,
289 sizeof(struct cq_enet_rq_desc));
290 if (err)
291 goto err_out_cleanup;
292 }
293
294 /* CQ for each WQ */
295 for (i = 0; i < fnic->raw_wq_count; i++) {
296 cq_index = fnic->rq_count + i;
297 err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index,
298 fnic->config.wq_enet_desc_count,
299 sizeof(struct cq_enet_wq_desc));
300 if (err)
301 goto err_out_cleanup;
302 }
303
304 /* CQ for each COPY WQ */
305 wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3);
306 for (i = 0; i < fnic->wq_copy_count; i++) {
307 cq_index = fnic->raw_wq_count + fnic->rq_count + i;
308 err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index],
309 cq_index,
310 wq_copy_cq_desc_count,
311 sizeof(struct fcpio_fw_req));
312 if (err)
313 goto err_out_cleanup;
314 }
315
316 for (i = 0; i < fnic->intr_count; i++) {
317 err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i);
318 if (err)
319 goto err_out_cleanup;
320 }
321
322 fnic->legacy_pba = vnic_dev_get_res(fnic->vdev,
323 RES_TYPE_INTR_PBA_LEGACY, 0);
324
325 if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
326 shost_printk(KERN_ERR, fnic->lport->host,
327 "Failed to hook legacy pba resource\n");
328 err = -ENODEV;
329 goto err_out_cleanup;
330 }
331
332 /*
333 * Init RQ/WQ resources.
334 *
335 * RQ[0 to n-1] point to CQ[0 to n-1]
336 * WQ[0 to m-1] point to CQ[n to n+m-1]
337 * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1]
338 *
339 * Note for copy wq we always initialize with cq_index = 0
340 *
341 * Error interrupt is not enabled for MSI.
342 */
343
344 switch (intr_mode) {
345 case VNIC_DEV_INTR_MODE_INTX:
346 case VNIC_DEV_INTR_MODE_MSIX:
347 error_interrupt_enable = 1;
348 error_interrupt_offset = fnic->err_intr_offset;
349 break;
350 default:
351 error_interrupt_enable = 0;
352 error_interrupt_offset = 0;
353 break;
354 }
355
356 for (i = 0; i < fnic->rq_count; i++) {
357 cq_index = i;
358 vnic_rq_init(&fnic->rq[i],
359 cq_index,
360 error_interrupt_enable,
361 error_interrupt_offset);
362 }
363
364 for (i = 0; i < fnic->raw_wq_count; i++) {
365 cq_index = i + fnic->rq_count;
366 vnic_wq_init(&fnic->wq[i],
367 cq_index,
368 error_interrupt_enable,
369 error_interrupt_offset);
370 }
371
372 for (i = 0; i < fnic->wq_copy_count; i++) {
373 vnic_wq_copy_init(&fnic->wq_copy[i],
374 0 /* cq_index 0 - always */,
375 error_interrupt_enable,
376 error_interrupt_offset);
377 }
378
379 for (i = 0; i < fnic->cq_count; i++) {
380
381 switch (intr_mode) {
382 case VNIC_DEV_INTR_MODE_MSIX:
383 interrupt_offset = i;
384 break;
385 default:
386 interrupt_offset = 0;
387 break;
388 }
389
390 vnic_cq_init(&fnic->cq[i],
391 0 /* flow_control_enable */,
392 1 /* color_enable */,
393 0 /* cq_head */,
394 0 /* cq_tail */,
395 1 /* cq_tail_color */,
396 1 /* interrupt_enable */,
397 1 /* cq_entry_enable */,
398 0 /* cq_message_enable */,
399 interrupt_offset,
400 0 /* cq_message_addr */);
401 }
402
403 /*
404 * Init INTR resources
405 *
406 * mask_on_assertion is not used for INTx due to the level-
407 * triggered nature of INTx
408 */
409
410 switch (intr_mode) {
411 case VNIC_DEV_INTR_MODE_MSI:
412 case VNIC_DEV_INTR_MODE_MSIX:
413 mask_on_assertion = 1;
414 break;
415 default:
416 mask_on_assertion = 0;
417 break;
418 }
419
420 for (i = 0; i < fnic->intr_count; i++) {
421 vnic_intr_init(&fnic->intr[i],
422 fnic->config.intr_timer,
423 fnic->config.intr_timer_type,
424 mask_on_assertion);
425 }
426
427 /* init the stats memory by making the first call here */
428 err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
429 if (err) {
430 shost_printk(KERN_ERR, fnic->lport->host,
431 "vnic_dev_stats_dump failed - x%x\n", err);
432 goto err_out_cleanup;
433 }
434
435 /* Clear LIF stats */
436 vnic_dev_stats_clear(fnic->vdev);
437
438 return 0;
439
440err_out_cleanup:
441 fnic_free_vnic_resources(fnic);
442
443 return err;
444}
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
new file mode 100644
index 00000000000..b6f31026253
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -0,0 +1,197 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _FNIC_RES_H_
19#define _FNIC_RES_H_
20
21#include "wq_enet_desc.h"
22#include "rq_enet_desc.h"
23#include "vnic_wq.h"
24#include "vnic_rq.h"
25#include "fnic_io.h"
26#include "fcpio.h"
27#include "vnic_wq_copy.h"
28#include "vnic_cq_copy.h"
29
30static inline void fnic_queue_wq_desc(struct vnic_wq *wq,
31 void *os_buf, dma_addr_t dma_addr,
32 unsigned int len, unsigned int fc_eof,
33 int vlan_tag_insert,
34 unsigned int vlan_tag,
35 int cq_entry, int sop, int eop)
36{
37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
38
39 wq_enet_desc_enc(desc,
40 (u64)dma_addr | VNIC_PADDR_TARGET,
41 (u16)len,
42 0, /* mss_or_csum_offset */
43 (u16)fc_eof,
44 0, /* offload_mode */
45 (u8)eop, (u8)cq_entry,
46 1, /* fcoe_encap */
47 (u8)vlan_tag_insert,
48 (u16)vlan_tag,
49 0 /* loopback */);
50
51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
52}
53
54static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
55 u32 req_id,
56 u32 lunmap_id, u8 spl_flags,
57 u32 sgl_cnt, u32 sense_len,
58 u64 sgl_addr, u64 sns_addr,
59 u8 crn, u8 pri_ta,
60 u8 flags, u8 *scsi_cdb,
61 u32 data_len, u8 *lun,
62 u32 d_id, u16 mss,
63 u32 ratov, u32 edtov)
64{
65 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
66
67 desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */
68 desc->hdr.status = 0; /* header status entry */
69 desc->hdr._resvd = 0; /* reserved */
70 desc->hdr.tag.u.req_id = req_id; /* id for this request */
71
72 desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */
73 desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */
74 desc->u.icmnd_16._resvd0[0] = 0; /* reserved */
75 desc->u.icmnd_16._resvd0[1] = 0; /* reserved */
76 desc->u.icmnd_16._resvd0[2] = 0; /* reserved */
77 desc->u.icmnd_16.sgl_cnt = sgl_cnt; /* scatter-gather list count */
78 desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */
79 desc->u.icmnd_16.sgl_addr = sgl_addr; /* scatter-gather list addr */
80 desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */
81 desc->u.icmnd_16.crn = crn; /* SCSI Command Reference No.*/
82 desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */
83 desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */
84 desc->u.icmnd_16.flags = flags; /* command flags */
85 memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16); /* SCSI CDB */
86 desc->u.icmnd_16.data_len = data_len; /* length of data expected */
87 memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */
88 desc->u.icmnd_16._resvd2 = 0; /* reserved */
89 hton24(desc->u.icmnd_16.d_id, d_id); /* FC vNIC only: Target D_ID */
90 desc->u.icmnd_16.mss = mss; /* FC vNIC only: max burst */
91 desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */
92 desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */
93
94 vnic_wq_copy_post(wq);
95}
96
97static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq,
98 u32 req_id, u32 lunmap_id,
99 u32 tm_req, u32 tm_id, u8 *lun,
100 u32 d_id, u32 r_a_tov,
101 u32 e_d_tov)
102{
103 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
104
105 desc->hdr.type = FCPIO_ITMF; /* enum fcpio_type */
106 desc->hdr.status = 0; /* header status entry */
107 desc->hdr._resvd = 0; /* reserved */
108 desc->hdr.tag.u.req_id = req_id; /* id for this request */
109
110 desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */
111 desc->u.itmf.tm_req = tm_req; /* SCSI Task Management request */
112 desc->u.itmf.t_tag = tm_id; /* tag of fcpio to be aborted */
113 desc->u.itmf._resvd = 0;
114 memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS); /* LUN address */
115 desc->u.itmf._resvd1 = 0;
116 hton24(desc->u.itmf.d_id, d_id); /* FC vNIC only: Target D_ID */
117 desc->u.itmf.r_a_tov = r_a_tov; /* FC vNIC only: R_A_TOV in msec */
118 desc->u.itmf.e_d_tov = e_d_tov; /* FC vNIC only: E_D_TOV in msec */
119
120 vnic_wq_copy_post(wq);
121}
122
123static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq,
124 u32 req_id, u8 format,
125 u32 s_id, u8 *gw_mac)
126{
127 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
128
129 desc->hdr.type = FCPIO_FLOGI_REG; /* enum fcpio_type */
130 desc->hdr.status = 0; /* header status entry */
131 desc->hdr._resvd = 0; /* reserved */
132 desc->hdr.tag.u.req_id = req_id; /* id for this request */
133
134 desc->u.flogi_reg.format = format;
135 hton24(desc->u.flogi_reg.s_id, s_id);
136 memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN);
137
138 vnic_wq_copy_post(wq);
139}
140
141static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq,
142 u32 req_id)
143{
144 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
145
146 desc->hdr.type = FCPIO_RESET; /* enum fcpio_type */
147 desc->hdr.status = 0; /* header status entry */
148 desc->hdr._resvd = 0; /* reserved */
149 desc->hdr.tag.u.req_id = req_id; /* id for this request */
150
151 vnic_wq_copy_post(wq);
152}
153
154static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq,
155 u32 req_id, u64 lunmap_addr,
156 u32 lunmap_len)
157{
158 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
159
160 desc->hdr.type = FCPIO_LUNMAP_REQ; /* enum fcpio_type */
161 desc->hdr.status = 0; /* header status entry */
162 desc->hdr._resvd = 0; /* reserved */
163 desc->hdr.tag.u.req_id = req_id; /* id for this request */
164
165 desc->u.lunmap_req.addr = lunmap_addr; /* address of the buffer */
166 desc->u.lunmap_req.len = lunmap_len; /* len of the buffer */
167
168 vnic_wq_copy_post(wq);
169}
170
171static inline void fnic_queue_rq_desc(struct vnic_rq *rq,
172 void *os_buf, dma_addr_t dma_addr,
173 u16 len)
174{
175 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
176
177 rq_enet_desc_enc(desc,
178 (u64)dma_addr | VNIC_PADDR_TARGET,
179 RQ_ENET_TYPE_ONLY_SOP,
180 (u16)len);
181
182 vnic_rq_post(rq, os_buf, 0, dma_addr, len);
183}
184
185
186struct fnic;
187
188int fnic_get_vnic_config(struct fnic *);
189int fnic_alloc_vnic_resources(struct fnic *);
190void fnic_free_vnic_resources(struct fnic *);
191void fnic_get_res_counts(struct fnic *);
192int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
193 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu,
194 u8 rss_enable, u8 tso_ipid_split_en,
195 u8 ig_vlan_strip_en);
196
197#endif /* _FNIC_RES_H_ */
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
new file mode 100644
index 00000000000..eabf3650285
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -0,0 +1,1850 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/mempool.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/workqueue.h>
22#include <linux/pci.h>
23#include <linux/scatterlist.h>
24#include <linux/skbuff.h>
25#include <linux/spinlock.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <linux/delay.h>
29#include <scsi/scsi.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_tcq.h>
34#include <scsi/fc/fc_els.h>
35#include <scsi/fc/fc_fcoe.h>
36#include <scsi/libfc.h>
37#include <scsi/fc_frame.h>
38#include "fnic_io.h"
39#include "fnic.h"
40
41const char *fnic_state_str[] = {
42 [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
43 [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
44 [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
45 [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
46};
47
48static const char *fnic_ioreq_state_str[] = {
49 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
50 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
51 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
52 [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
53};
54
55static const char *fcpio_status_str[] = {
56 [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
57 [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
58 [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
59 [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
60 [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
61 [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
62 [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
63 [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
64 [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
65 [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
66 [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
67 [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
68 [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
69 [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
70 [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
71 [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
72 [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
73 [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
74 [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
75};
76
77const char *fnic_state_to_str(unsigned int state)
78{
79 if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
80 return "unknown";
81
82 return fnic_state_str[state];
83}
84
85static const char *fnic_ioreq_state_to_str(unsigned int state)
86{
87 if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
88 !fnic_ioreq_state_str[state])
89 return "unknown";
90
91 return fnic_ioreq_state_str[state];
92}
93
94static const char *fnic_fcpio_status_to_str(unsigned int status)
95{
96 if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
97 return "unknown";
98
99 return fcpio_status_str[status];
100}
101
102static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
103
104static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
105 struct scsi_cmnd *sc)
106{
107 u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
108
109 return &fnic->io_req_lock[hash];
110}
111
112/*
113 * Unmap the data buffer and sense buffer for an io_req,
114 * also unmap and free the device-private scatter/gather list.
115 */
116static void fnic_release_ioreq_buf(struct fnic *fnic,
117 struct fnic_io_req *io_req,
118 struct scsi_cmnd *sc)
119{
120 if (io_req->sgl_list_pa)
121 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
122 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
123 PCI_DMA_TODEVICE);
124 scsi_dma_unmap(sc);
125
126 if (io_req->sgl_cnt)
127 mempool_free(io_req->sgl_list_alloc,
128 fnic->io_sgl_pool[io_req->sgl_type]);
129 if (io_req->sense_buf_pa)
130 pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
131 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
132}
133
134/* Free up Copy Wq descriptors. Called with copy_wq lock held */
135static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
136{
137 /* if no Ack received from firmware, then nothing to clean */
138 if (!fnic->fw_ack_recd[0])
139 return 1;
140
141 /*
142 * Update desc_available count based on number of freed descriptors
143 * Account for wraparound
144 */
145 if (wq->to_clean_index <= fnic->fw_ack_index[0])
146 wq->ring.desc_avail += (fnic->fw_ack_index[0]
147 - wq->to_clean_index + 1);
148 else
149 wq->ring.desc_avail += (wq->ring.desc_count
150 - wq->to_clean_index
151 + fnic->fw_ack_index[0] + 1);
152
153 /*
154 * just bump clean index to ack_index+1 accounting for wraparound
155 * this will essentially free up all descriptors between
156 * to_clean_index and fw_ack_index, both inclusive
157 */
158 wq->to_clean_index =
159 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
160
161 /* we have processed the acks received so far */
162 fnic->fw_ack_recd[0] = 0;
163 return 0;
164}
165
166
167/*
168 * fnic_fw_reset_handler
169 * Routine to send reset msg to fw
170 */
171int fnic_fw_reset_handler(struct fnic *fnic)
172{
173 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
174 int ret = 0;
175 unsigned long flags;
176
177 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
178
179 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
180 free_wq_copy_descs(fnic, wq);
181
182 if (!vnic_wq_copy_desc_avail(wq))
183 ret = -EAGAIN;
184 else
185 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
186
187 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
188
189 if (!ret)
190 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
191 "Issued fw reset\n");
192 else
193 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
194 "Failed to issue fw reset\n");
195 return ret;
196}
197
198
199/*
200 * fnic_flogi_reg_handler
201 * Routine to send flogi register msg to fw
202 */
203int fnic_flogi_reg_handler(struct fnic *fnic)
204{
205 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
206 u8 gw_mac[ETH_ALEN];
207 int ret = 0;
208 unsigned long flags;
209
210 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
211
212 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
213 free_wq_copy_descs(fnic, wq);
214
215 if (!vnic_wq_copy_desc_avail(wq)) {
216 ret = -EAGAIN;
217 goto flogi_reg_ioreq_end;
218 }
219
220 if (fnic->fcoui_mode)
221 memset(gw_mac, 0xff, ETH_ALEN);
222 else
223 memcpy(gw_mac, fnic->dest_addr, ETH_ALEN);
224
225 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
226 FCPIO_FLOGI_REG_GW_DEST,
227 fnic->s_id,
228 gw_mac);
229
230flogi_reg_ioreq_end:
231 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
232
233 if (!ret)
234 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
235 "flog reg issued\n");
236
237 return ret;
238}
239
240/*
241 * fnic_queue_wq_copy_desc
242 * Routine to enqueue a wq copy desc
243 */
244static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
245 struct vnic_wq_copy *wq,
246 struct fnic_io_req *io_req,
247 struct scsi_cmnd *sc,
248 u32 sg_count)
249{
250 struct scatterlist *sg;
251 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
252 struct fc_rport_libfc_priv *rp = rport->dd_data;
253 struct host_sg_desc *desc;
254 u8 pri_tag = 0;
255 unsigned int i;
256 unsigned long intr_flags;
257 int flags;
258 u8 exch_flags;
259 struct scsi_lun fc_lun;
260 char msg[2];
261
262 if (sg_count) {
263 BUG_ON(sg_count < 0);
264 BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT);
265
266 /* For each SGE, create a device desc entry */
267 desc = io_req->sgl_list;
268 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
269 desc->addr = cpu_to_le64(sg_dma_address(sg));
270 desc->len = cpu_to_le32(sg_dma_len(sg));
271 desc->_resvd = 0;
272 desc++;
273 }
274
275 io_req->sgl_list_pa = pci_map_single
276 (fnic->pdev,
277 io_req->sgl_list,
278 sizeof(io_req->sgl_list[0]) * sg_count,
279 PCI_DMA_TODEVICE);
280 }
281
282 io_req->sense_buf_pa = pci_map_single(fnic->pdev,
283 sc->sense_buffer,
284 SCSI_SENSE_BUFFERSIZE,
285 PCI_DMA_FROMDEVICE);
286
287 int_to_scsilun(sc->device->lun, &fc_lun);
288
289 pri_tag = FCPIO_ICMND_PTA_SIMPLE;
290 msg[0] = MSG_SIMPLE_TAG;
291 scsi_populate_tag_msg(sc, msg);
292 if (msg[0] == MSG_ORDERED_TAG)
293 pri_tag = FCPIO_ICMND_PTA_ORDERED;
294
295 /* Enqueue the descriptor in the Copy WQ */
296 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
297
298 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
299 free_wq_copy_descs(fnic, wq);
300
301 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
302 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
303 return SCSI_MLQUEUE_HOST_BUSY;
304 }
305
306 flags = 0;
307 if (sc->sc_data_direction == DMA_FROM_DEVICE)
308 flags = FCPIO_ICMND_RDDATA;
309 else if (sc->sc_data_direction == DMA_TO_DEVICE)
310 flags = FCPIO_ICMND_WRDATA;
311
312 exch_flags = 0;
313 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
314 (rp->flags & FC_RP_FLAGS_RETRY))
315 exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
316
317 fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
318 0, exch_flags, io_req->sgl_cnt,
319 SCSI_SENSE_BUFFERSIZE,
320 io_req->sgl_list_pa,
321 io_req->sense_buf_pa,
322 0, /* scsi cmd ref, always 0 */
323 pri_tag, /* scsi pri and tag */
324 flags, /* command flags */
325 sc->cmnd, scsi_bufflen(sc),
326 fc_lun.scsi_lun, io_req->port_id,
327 rport->maxframe_size, rp->r_a_tov,
328 rp->e_d_tov);
329
330 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
331 return 0;
332}
333
334/*
335 * fnic_queuecommand
336 * Routine to send a scsi cdb
337 * Called with host_lock held and interrupts disabled.
338 */
339int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
340{
341 struct fc_lport *lp;
342 struct fc_rport *rport;
343 struct fnic_io_req *io_req;
344 struct fnic *fnic;
345 struct vnic_wq_copy *wq;
346 int ret;
347 u32 sg_count;
348 unsigned long flags;
349 unsigned long ptr;
350
351 rport = starget_to_rport(scsi_target(sc->device));
352 ret = fc_remote_port_chkready(rport);
353 if (ret) {
354 sc->result = ret;
355 done(sc);
356 return 0;
357 }
358
359 lp = shost_priv(sc->device->host);
360 if (lp->state != LPORT_ST_READY || !(lp->link_up))
361 return SCSI_MLQUEUE_HOST_BUSY;
362
363 /*
364 * Release host lock, use driver resource specific locks from here.
365 * Don't re-enable interrupts in case they were disabled prior to the
366 * caller disabling them.
367 */
368 spin_unlock(lp->host->host_lock);
369
370 /* Get a new io_req for this SCSI IO */
371 fnic = lport_priv(lp);
372
373 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
374 if (!io_req) {
375 ret = SCSI_MLQUEUE_HOST_BUSY;
376 goto out;
377 }
378 memset(io_req, 0, sizeof(*io_req));
379
380 /* Map the data buffer */
381 sg_count = scsi_dma_map(sc);
382 if (sg_count < 0) {
383 mempool_free(io_req, fnic->io_req_pool);
384 goto out;
385 }
386
387 /* Determine the type of scatter/gather list we need */
388 io_req->sgl_cnt = sg_count;
389 io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
390 if (sg_count > FNIC_DFLT_SG_DESC_CNT)
391 io_req->sgl_type = FNIC_SGL_CACHE_MAX;
392
393 if (sg_count) {
394 io_req->sgl_list =
395 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
396 GFP_ATOMIC | GFP_DMA);
397 if (!io_req->sgl_list) {
398 ret = SCSI_MLQUEUE_HOST_BUSY;
399 scsi_dma_unmap(sc);
400 mempool_free(io_req, fnic->io_req_pool);
401 goto out;
402 }
403
404 /* Cache sgl list allocated address before alignment */
405 io_req->sgl_list_alloc = io_req->sgl_list;
406 ptr = (unsigned long) io_req->sgl_list;
407 if (ptr % FNIC_SG_DESC_ALIGN) {
408 io_req->sgl_list = (struct host_sg_desc *)
409 (((unsigned long) ptr
410 + FNIC_SG_DESC_ALIGN - 1)
411 & ~(FNIC_SG_DESC_ALIGN - 1));
412 }
413 }
414
415 /* initialize rest of io_req */
416 io_req->port_id = rport->port_id;
417 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
418 CMD_SP(sc) = (char *)io_req;
419 sc->scsi_done = done;
420
421 /* create copy wq desc and enqueue it */
422 wq = &fnic->wq_copy[0];
423 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
424 if (ret) {
425 /*
426 * In case another thread cancelled the request,
427 * refetch the pointer under the lock.
428 */
429 spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
430
431 spin_lock_irqsave(io_lock, flags);
432 io_req = (struct fnic_io_req *)CMD_SP(sc);
433 CMD_SP(sc) = NULL;
434 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
435 spin_unlock_irqrestore(io_lock, flags);
436 if (io_req) {
437 fnic_release_ioreq_buf(fnic, io_req, sc);
438 mempool_free(io_req, fnic->io_req_pool);
439 }
440 }
441out:
442 /* acquire host lock before returning to SCSI */
443 spin_lock(lp->host->host_lock);
444 return ret;
445}
446
447/*
448 * fnic_fcpio_fw_reset_cmpl_handler
449 * Routine to handle fw reset completion
450 */
451static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
452 struct fcpio_fw_req *desc)
453{
454 u8 type;
455 u8 hdr_status;
456 struct fcpio_tag tag;
457 int ret = 0;
458 struct fc_frame *flogi;
459 unsigned long flags;
460
461 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
462
463 /* Clean up all outstanding io requests */
464 fnic_cleanup_io(fnic, SCSI_NO_TAG);
465
466 spin_lock_irqsave(&fnic->fnic_lock, flags);
467
468 flogi = fnic->flogi;
469 fnic->flogi = NULL;
470
471 /* fnic should be in FC_TRANS_ETH_MODE */
472 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
473 /* Check status of reset completion */
474 if (!hdr_status) {
475 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
476 "reset cmpl success\n");
477 /* Ready to send flogi out */
478 fnic->state = FNIC_IN_ETH_MODE;
479 } else {
480 FNIC_SCSI_DBG(KERN_DEBUG,
481 fnic->lport->host,
482 "fnic fw_reset : failed %s\n",
483 fnic_fcpio_status_to_str(hdr_status));
484
485 /*
486 * Unable to change to eth mode, cannot send out flogi
487 * Change state to fc mode, so that subsequent Flogi
488 * requests from libFC will cause more attempts to
489 * reset the firmware. Free the cached flogi
490 */
491 fnic->state = FNIC_IN_FC_MODE;
492 ret = -1;
493 }
494 } else {
495 FNIC_SCSI_DBG(KERN_DEBUG,
496 fnic->lport->host,
497 "Unexpected state %s while processing"
498 " reset cmpl\n", fnic_state_to_str(fnic->state));
499 ret = -1;
500 }
501
502 /* Thread removing device blocks till firmware reset is complete */
503 if (fnic->remove_wait)
504 complete(fnic->remove_wait);
505
506 /*
507 * If fnic is being removed, or fw reset failed
508 * free the flogi frame. Else, send it out
509 */
510 if (fnic->remove_wait || ret) {
511 fnic->flogi_oxid = FC_XID_UNKNOWN;
512 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
513 if (flogi)
514 dev_kfree_skb_irq(fp_skb(flogi));
515 goto reset_cmpl_handler_end;
516 }
517
518 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
519
520 if (flogi)
521 ret = fnic_send_frame(fnic, flogi);
522
523 reset_cmpl_handler_end:
524 return ret;
525}
526
527/*
528 * fnic_fcpio_flogi_reg_cmpl_handler
529 * Routine to handle flogi register completion
530 */
531static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
532 struct fcpio_fw_req *desc)
533{
534 u8 type;
535 u8 hdr_status;
536 struct fcpio_tag tag;
537 int ret = 0;
538 struct fc_frame *flogi_resp = NULL;
539 unsigned long flags;
540 struct sk_buff *skb;
541
542 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
543
544 /* Update fnic state based on status of flogi reg completion */
545 spin_lock_irqsave(&fnic->fnic_lock, flags);
546
547 flogi_resp = fnic->flogi_resp;
548 fnic->flogi_resp = NULL;
549
550 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
551
552 /* Check flogi registration completion status */
553 if (!hdr_status) {
554 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
555 "flog reg succeeded\n");
556 fnic->state = FNIC_IN_FC_MODE;
557 } else {
558 FNIC_SCSI_DBG(KERN_DEBUG,
559 fnic->lport->host,
560 "fnic flogi reg :failed %s\n",
561 fnic_fcpio_status_to_str(hdr_status));
562 fnic->state = FNIC_IN_ETH_MODE;
563 ret = -1;
564 }
565 } else {
566 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
567 "Unexpected fnic state %s while"
568 " processing flogi reg completion\n",
569 fnic_state_to_str(fnic->state));
570 ret = -1;
571 }
572
573 /* Successful flogi reg cmpl, pass frame to LibFC */
574 if (!ret && flogi_resp) {
575 if (fnic->stop_rx_link_events) {
576 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
577 goto reg_cmpl_handler_end;
578 }
579 skb = (struct sk_buff *)flogi_resp;
580 /* Use fr_flags to indicate whether flogi resp or not */
581 fr_flags(flogi_resp) = 1;
582 fr_dev(flogi_resp) = fnic->lport;
583 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
584
585 skb_queue_tail(&fnic->frame_queue, skb);
586 queue_work(fnic_event_queue, &fnic->frame_work);
587
588 } else {
589 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
590 if (flogi_resp)
591 dev_kfree_skb_irq(fp_skb(flogi_resp));
592 }
593
594reg_cmpl_handler_end:
595 return ret;
596}
597
598static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
599 u16 request_out)
600{
601 if (wq->to_clean_index <= wq->to_use_index) {
602 /* out of range, stale request_out index */
603 if (request_out < wq->to_clean_index ||
604 request_out >= wq->to_use_index)
605 return 0;
606 } else {
607 /* out of range, stale request_out index */
608 if (request_out < wq->to_clean_index &&
609 request_out >= wq->to_use_index)
610 return 0;
611 }
612 /* request_out index is in range */
613 return 1;
614}
615
616
617/*
618 * Mark that ack received and store the Ack index. If there are multiple
619 * acks received before Tx thread cleans it up, the latest value will be
620 * used which is correct behavior. This state should be in the copy Wq
621 * instead of in the fnic
622 */
623static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
624 unsigned int cq_index,
625 struct fcpio_fw_req *desc)
626{
627 struct vnic_wq_copy *wq;
628 u16 request_out = desc->u.ack.request_out;
629 unsigned long flags;
630
631 /* mark the ack state */
632 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
633 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
634
635 if (is_ack_index_in_range(wq, request_out)) {
636 fnic->fw_ack_index[0] = request_out;
637 fnic->fw_ack_recd[0] = 1;
638 }
639 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
640}
641
642/*
643 * fnic_fcpio_icmnd_cmpl_handler
644 * Routine to handle icmnd completions
645 */
646static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
647 struct fcpio_fw_req *desc)
648{
649 u8 type;
650 u8 hdr_status;
651 struct fcpio_tag tag;
652 u32 id;
653 u64 xfer_len = 0;
654 struct fcpio_icmnd_cmpl *icmnd_cmpl;
655 struct fnic_io_req *io_req;
656 struct scsi_cmnd *sc;
657 unsigned long flags;
658 spinlock_t *io_lock;
659
660 /* Decode the cmpl description to get the io_req id */
661 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
662 fcpio_tag_id_dec(&tag, &id);
663
664 if (id >= FNIC_MAX_IO_REQ)
665 return;
666
667 sc = scsi_host_find_tag(fnic->lport->host, id);
668 WARN_ON_ONCE(!sc);
669 if (!sc)
670 return;
671
672 io_lock = fnic_io_lock_hash(fnic, sc);
673 spin_lock_irqsave(io_lock, flags);
674 io_req = (struct fnic_io_req *)CMD_SP(sc);
675 WARN_ON_ONCE(!io_req);
676 if (!io_req) {
677 spin_unlock_irqrestore(io_lock, flags);
678 return;
679 }
680
681 /* firmware completed the io */
682 io_req->io_completed = 1;
683
684 /*
685 * if SCSI-ML has already issued abort on this command,
686 * ignore completion of the IO. The abts path will clean it up
687 */
688 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
689 spin_unlock_irqrestore(io_lock, flags);
690 return;
691 }
692
693 /* Mark the IO as complete */
694 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
695
696 icmnd_cmpl = &desc->u.icmnd_cmpl;
697
698 switch (hdr_status) {
699 case FCPIO_SUCCESS:
700 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
701 xfer_len = scsi_bufflen(sc);
702 scsi_set_resid(sc, icmnd_cmpl->residual);
703
704 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
705 xfer_len -= icmnd_cmpl->residual;
706
707 /*
708 * If queue_full, then try to reduce queue depth for all
709 * LUNS on the target. Todo: this should be accompanied
710 * by a periodic queue_depth rampup based on successful
711 * IO completion.
712 */
713 if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
714 struct scsi_device *t_sdev;
715 int qd = 0;
716
717 shost_for_each_device(t_sdev, sc->device->host) {
718 if (t_sdev->id != sc->device->id)
719 continue;
720
721 if (t_sdev->queue_depth > 1) {
722 qd = scsi_track_queue_full
723 (t_sdev,
724 t_sdev->queue_depth - 1);
725 if (qd == -1)
726 qd = t_sdev->host->cmd_per_lun;
727 shost_printk(KERN_INFO,
728 fnic->lport->host,
729 "scsi[%d:%d:%d:%d"
730 "] queue full detected,"
731 "new depth = %d\n",
732 t_sdev->host->host_no,
733 t_sdev->channel,
734 t_sdev->id, t_sdev->lun,
735 t_sdev->queue_depth);
736 }
737 }
738 }
739 break;
740
741 case FCPIO_TIMEOUT: /* request was timed out */
742 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
743 break;
744
745 case FCPIO_ABORTED: /* request was aborted */
746 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
747 break;
748
749 case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
750 scsi_set_resid(sc, icmnd_cmpl->residual);
751 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
752 break;
753
754 case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
755 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
756 break;
757 case FCPIO_INVALID_HEADER: /* header contains invalid data */
758 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
759 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
760 case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
761 case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
762 case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
763 case FCPIO_FW_ERR: /* request was terminated due fw error */
764 default:
765 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
766 fnic_fcpio_status_to_str(hdr_status));
767 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
768 break;
769 }
770
771 /* Break link with the SCSI command */
772 CMD_SP(sc) = NULL;
773
774 spin_unlock_irqrestore(io_lock, flags);
775
776 fnic_release_ioreq_buf(fnic, io_req, sc);
777
778 mempool_free(io_req, fnic->io_req_pool);
779
780 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
781 fnic->lport->host_stats.fcp_input_requests++;
782 fnic->fcp_input_bytes += xfer_len;
783 } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
784 fnic->lport->host_stats.fcp_output_requests++;
785 fnic->fcp_output_bytes += xfer_len;
786 } else
787 fnic->lport->host_stats.fcp_control_requests++;
788
789 /* Call SCSI completion function to complete the IO */
790 if (sc->scsi_done)
791 sc->scsi_done(sc);
792
793}
794
795/* fnic_fcpio_itmf_cmpl_handler
796 * Routine to handle itmf completions
797 */
798static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
799 struct fcpio_fw_req *desc)
800{
801 u8 type;
802 u8 hdr_status;
803 struct fcpio_tag tag;
804 u32 id;
805 struct scsi_cmnd *sc;
806 struct fnic_io_req *io_req;
807 unsigned long flags;
808 spinlock_t *io_lock;
809
810 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
811 fcpio_tag_id_dec(&tag, &id);
812
813 if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
814 return;
815
816 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
817 WARN_ON_ONCE(!sc);
818 if (!sc)
819 return;
820
821 io_lock = fnic_io_lock_hash(fnic, sc);
822 spin_lock_irqsave(io_lock, flags);
823 io_req = (struct fnic_io_req *)CMD_SP(sc);
824 WARN_ON_ONCE(!io_req);
825 if (!io_req) {
826 spin_unlock_irqrestore(io_lock, flags);
827 return;
828 }
829
830 if (id & FNIC_TAG_ABORT) {
831 /* Completion of abort cmd */
832 if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
833 /* This is a late completion. Ignore it */
834 spin_unlock_irqrestore(io_lock, flags);
835 return;
836 }
837 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
838 CMD_ABTS_STATUS(sc) = hdr_status;
839
840 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
841 "abts cmpl recd. id %d status %s\n",
842 (int)(id & FNIC_TAG_MASK),
843 fnic_fcpio_status_to_str(hdr_status));
844
845 /*
846 * If scsi_eh thread is blocked waiting for abts to complete,
847 * signal completion to it. IO will be cleaned in the thread
848 * else clean it in this context
849 */
850 if (io_req->abts_done) {
851 complete(io_req->abts_done);
852 spin_unlock_irqrestore(io_lock, flags);
853 } else {
854 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
855 "abts cmpl, completing IO\n");
856 CMD_SP(sc) = NULL;
857 sc->result = (DID_ERROR << 16);
858
859 spin_unlock_irqrestore(io_lock, flags);
860
861 fnic_release_ioreq_buf(fnic, io_req, sc);
862 mempool_free(io_req, fnic->io_req_pool);
863 if (sc->scsi_done)
864 sc->scsi_done(sc);
865 }
866
867 } else if (id & FNIC_TAG_DEV_RST) {
868 /* Completion of device reset */
869 CMD_LR_STATUS(sc) = hdr_status;
870 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
871 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
872 "dev reset cmpl recd. id %d status %s\n",
873 (int)(id & FNIC_TAG_MASK),
874 fnic_fcpio_status_to_str(hdr_status));
875 if (io_req->dr_done)
876 complete(io_req->dr_done);
877 spin_unlock_irqrestore(io_lock, flags);
878
879 } else {
880 shost_printk(KERN_ERR, fnic->lport->host,
881 "Unexpected itmf io state %s tag %x\n",
882 fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
883 spin_unlock_irqrestore(io_lock, flags);
884 }
885
886}
887
888/*
889 * fnic_fcpio_cmpl_handler
890 * Routine to service the cq for wq_copy
891 */
892static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
893 unsigned int cq_index,
894 struct fcpio_fw_req *desc)
895{
896 struct fnic *fnic = vnic_dev_priv(vdev);
897 int ret = 0;
898
899 switch (desc->hdr.type) {
900 case FCPIO_ACK: /* fw copied copy wq desc to its queue */
901 fnic_fcpio_ack_handler(fnic, cq_index, desc);
902 break;
903
904 case FCPIO_ICMND_CMPL: /* fw completed a command */
905 fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
906 break;
907
908 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
909 fnic_fcpio_itmf_cmpl_handler(fnic, desc);
910 break;
911
912 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
913 ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
914 break;
915
916 case FCPIO_RESET_CMPL: /* fw completed reset */
917 ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
918 break;
919
920 default:
921 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
922 "firmware completion type %d\n",
923 desc->hdr.type);
924 break;
925 }
926
927 return ret;
928}
929
930/*
931 * fnic_wq_copy_cmpl_handler
932 * Routine to process wq copy
933 */
934int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
935{
936 unsigned int wq_work_done = 0;
937 unsigned int i, cq_index;
938 unsigned int cur_work_done;
939
940 for (i = 0; i < fnic->wq_copy_count; i++) {
941 cq_index = i + fnic->raw_wq_count + fnic->rq_count;
942 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
943 fnic_fcpio_cmpl_handler,
944 copy_work_to_do);
945 wq_work_done += cur_work_done;
946 }
947 return wq_work_done;
948}
949
950static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
951{
952 unsigned int i;
953 struct fnic_io_req *io_req;
954 unsigned long flags = 0;
955 struct scsi_cmnd *sc;
956 spinlock_t *io_lock;
957
958 for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
959 if (i == exclude_id)
960 continue;
961
962 sc = scsi_host_find_tag(fnic->lport->host, i);
963 if (!sc)
964 continue;
965
966 io_lock = fnic_io_lock_hash(fnic, sc);
967 spin_lock_irqsave(io_lock, flags);
968 io_req = (struct fnic_io_req *)CMD_SP(sc);
969 if (!io_req) {
970 spin_unlock_irqrestore(io_lock, flags);
971 goto cleanup_scsi_cmd;
972 }
973
974 CMD_SP(sc) = NULL;
975
976 spin_unlock_irqrestore(io_lock, flags);
977
978 /*
979 * If there is a scsi_cmnd associated with this io_req, then
980 * free the corresponding state
981 */
982 fnic_release_ioreq_buf(fnic, io_req, sc);
983 mempool_free(io_req, fnic->io_req_pool);
984
985cleanup_scsi_cmd:
986 sc->result = DID_TRANSPORT_DISRUPTED << 16;
987 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
988 " DID_TRANSPORT_DISRUPTED\n");
989
990 /* Complete the command to SCSI */
991 if (sc->scsi_done)
992 sc->scsi_done(sc);
993 }
994}
995
996void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
997 struct fcpio_host_req *desc)
998{
999 u32 id;
1000 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1001 struct fnic_io_req *io_req;
1002 struct scsi_cmnd *sc;
1003 unsigned long flags;
1004 spinlock_t *io_lock;
1005
1006 /* get the tag reference */
1007 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1008 id &= FNIC_TAG_MASK;
1009
1010 if (id >= FNIC_MAX_IO_REQ)
1011 return;
1012
1013 sc = scsi_host_find_tag(fnic->lport->host, id);
1014 if (!sc)
1015 return;
1016
1017 io_lock = fnic_io_lock_hash(fnic, sc);
1018 spin_lock_irqsave(io_lock, flags);
1019
1020 /* Get the IO context which this desc refers to */
1021 io_req = (struct fnic_io_req *)CMD_SP(sc);
1022
1023 /* fnic interrupts are turned off by now */
1024
1025 if (!io_req) {
1026 spin_unlock_irqrestore(io_lock, flags);
1027 goto wq_copy_cleanup_scsi_cmd;
1028 }
1029
1030 CMD_SP(sc) = NULL;
1031
1032 spin_unlock_irqrestore(io_lock, flags);
1033
1034 fnic_release_ioreq_buf(fnic, io_req, sc);
1035 mempool_free(io_req, fnic->io_req_pool);
1036
1037wq_copy_cleanup_scsi_cmd:
1038 sc->result = DID_NO_CONNECT << 16;
1039 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1040 " DID_NO_CONNECT\n");
1041
1042 if (sc->scsi_done)
1043 sc->scsi_done(sc);
1044}
1045
1046static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1047 u32 task_req, u8 *fc_lun,
1048 struct fnic_io_req *io_req)
1049{
1050 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1051 unsigned long flags;
1052
1053 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1054
1055 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1056 free_wq_copy_descs(fnic, wq);
1057
1058 if (!vnic_wq_copy_desc_avail(wq)) {
1059 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1060 return 1;
1061 }
1062 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1063 0, task_req, tag, fc_lun, io_req->port_id,
1064 fnic->config.ra_tov, fnic->config.ed_tov);
1065
1066 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1067 return 0;
1068}
1069
1070void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1071{
1072 int tag;
1073 struct fnic_io_req *io_req;
1074 spinlock_t *io_lock;
1075 unsigned long flags;
1076 struct scsi_cmnd *sc;
1077 struct scsi_lun fc_lun;
1078 enum fnic_ioreq_state old_ioreq_state;
1079
1080 FNIC_SCSI_DBG(KERN_DEBUG,
1081 fnic->lport->host,
1082 "fnic_rport_reset_exch called portid 0x%06x\n",
1083 port_id);
1084
1085 if (fnic->in_remove)
1086 return;
1087
1088 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1089 sc = scsi_host_find_tag(fnic->lport->host, tag);
1090 if (!sc)
1091 continue;
1092
1093 io_lock = fnic_io_lock_hash(fnic, sc);
1094 spin_lock_irqsave(io_lock, flags);
1095
1096 io_req = (struct fnic_io_req *)CMD_SP(sc);
1097
1098 if (!io_req || io_req->port_id != port_id) {
1099 spin_unlock_irqrestore(io_lock, flags);
1100 continue;
1101 }
1102
1103 /*
1104 * Found IO that is still pending with firmware and
1105 * belongs to rport that went away
1106 */
1107 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1108 spin_unlock_irqrestore(io_lock, flags);
1109 continue;
1110 }
1111 old_ioreq_state = CMD_STATE(sc);
1112 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1113 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1114
1115 BUG_ON(io_req->abts_done);
1116
1117 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1118 "fnic_rport_reset_exch: Issuing abts\n");
1119
1120 spin_unlock_irqrestore(io_lock, flags);
1121
1122 /* Now queue the abort command to firmware */
1123 int_to_scsilun(sc->device->lun, &fc_lun);
1124
1125 if (fnic_queue_abort_io_req(fnic, tag,
1126 FCPIO_ITMF_ABT_TASK_TERM,
1127 fc_lun.scsi_lun, io_req)) {
1128 /*
1129 * Revert the cmd state back to old state, if
1130 * it hasnt changed in between. This cmd will get
1131 * aborted later by scsi_eh, or cleaned up during
1132 * lun reset
1133 */
1134 io_lock = fnic_io_lock_hash(fnic, sc);
1135
1136 spin_lock_irqsave(io_lock, flags);
1137 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1138 CMD_STATE(sc) = old_ioreq_state;
1139 spin_unlock_irqrestore(io_lock, flags);
1140 }
1141 }
1142
1143}
1144
1145void fnic_terminate_rport_io(struct fc_rport *rport)
1146{
1147 int tag;
1148 struct fnic_io_req *io_req;
1149 spinlock_t *io_lock;
1150 unsigned long flags;
1151 struct scsi_cmnd *sc;
1152 struct scsi_lun fc_lun;
1153 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1154 struct fc_lport *lport = rdata->local_port;
1155 struct fnic *fnic = lport_priv(lport);
1156 struct fc_rport *cmd_rport;
1157 enum fnic_ioreq_state old_ioreq_state;
1158
1159 FNIC_SCSI_DBG(KERN_DEBUG,
1160 fnic->lport->host, "fnic_terminate_rport_io called"
1161 " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
1162 rport->port_name, rport->node_name,
1163 rport->port_id);
1164
1165 if (fnic->in_remove)
1166 return;
1167
1168 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1169 sc = scsi_host_find_tag(fnic->lport->host, tag);
1170 if (!sc)
1171 continue;
1172
1173 cmd_rport = starget_to_rport(scsi_target(sc->device));
1174 if (rport != cmd_rport)
1175 continue;
1176
1177 io_lock = fnic_io_lock_hash(fnic, sc);
1178 spin_lock_irqsave(io_lock, flags);
1179
1180 io_req = (struct fnic_io_req *)CMD_SP(sc);
1181
1182 if (!io_req || rport != cmd_rport) {
1183 spin_unlock_irqrestore(io_lock, flags);
1184 continue;
1185 }
1186
1187 /*
1188 * Found IO that is still pending with firmware and
1189 * belongs to rport that went away
1190 */
1191 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1192 spin_unlock_irqrestore(io_lock, flags);
1193 continue;
1194 }
1195 old_ioreq_state = CMD_STATE(sc);
1196 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1197 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1198
1199 BUG_ON(io_req->abts_done);
1200
1201 FNIC_SCSI_DBG(KERN_DEBUG,
1202 fnic->lport->host,
1203 "fnic_terminate_rport_io: Issuing abts\n");
1204
1205 spin_unlock_irqrestore(io_lock, flags);
1206
1207 /* Now queue the abort command to firmware */
1208 int_to_scsilun(sc->device->lun, &fc_lun);
1209
1210 if (fnic_queue_abort_io_req(fnic, tag,
1211 FCPIO_ITMF_ABT_TASK_TERM,
1212 fc_lun.scsi_lun, io_req)) {
1213 /*
1214 * Revert the cmd state back to old state, if
1215 * it hasnt changed in between. This cmd will get
1216 * aborted later by scsi_eh, or cleaned up during
1217 * lun reset
1218 */
1219 io_lock = fnic_io_lock_hash(fnic, sc);
1220
1221 spin_lock_irqsave(io_lock, flags);
1222 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1223 CMD_STATE(sc) = old_ioreq_state;
1224 spin_unlock_irqrestore(io_lock, flags);
1225 }
1226 }
1227
1228}
1229
1230static void fnic_block_error_handler(struct scsi_cmnd *sc)
1231{
1232 struct Scsi_Host *shost = sc->device->host;
1233 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
1234 unsigned long flags;
1235
1236 spin_lock_irqsave(shost->host_lock, flags);
1237 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1238 spin_unlock_irqrestore(shost->host_lock, flags);
1239 msleep(1000);
1240 spin_lock_irqsave(shost->host_lock, flags);
1241 }
1242 spin_unlock_irqrestore(shost->host_lock, flags);
1243
1244}
1245
1246/*
1247 * This function is exported to SCSI for sending abort cmnds.
1248 * A SCSI IO is represented by a io_req in the driver.
1249 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1250 */
1251int fnic_abort_cmd(struct scsi_cmnd *sc)
1252{
1253 struct fc_lport *lp;
1254 struct fnic *fnic;
1255 struct fnic_io_req *io_req;
1256 struct fc_rport *rport;
1257 spinlock_t *io_lock;
1258 unsigned long flags;
1259 int ret = SUCCESS;
1260 u32 task_req;
1261 struct scsi_lun fc_lun;
1262 DECLARE_COMPLETION_ONSTACK(tm_done);
1263
1264 /* Wait for rport to unblock */
1265 fnic_block_error_handler(sc);
1266
1267 /* Get local-port, check ready and link up */
1268 lp = shost_priv(sc->device->host);
1269
1270 fnic = lport_priv(lp);
1271 FNIC_SCSI_DBG(KERN_DEBUG,
1272 fnic->lport->host,
1273 "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
1274 (starget_to_rport(scsi_target(sc->device)))->port_id,
1275 sc->device->lun, sc->request->tag);
1276
1277 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1278 ret = FAILED;
1279 goto fnic_abort_cmd_end;
1280 }
1281
1282 /*
1283 * Avoid a race between SCSI issuing the abort and the device
1284 * completing the command.
1285 *
1286 * If the command is already completed by the fw cmpl code,
1287 * we just return SUCCESS from here. This means that the abort
1288 * succeeded. In the SCSI ML, since the timeout for command has
1289 * happened, the completion wont actually complete the command
1290 * and it will be considered as an aborted command
1291 *
1292 * The CMD_SP will not be cleared except while holding io_req_lock.
1293 */
1294 io_lock = fnic_io_lock_hash(fnic, sc);
1295 spin_lock_irqsave(io_lock, flags);
1296 io_req = (struct fnic_io_req *)CMD_SP(sc);
1297 if (!io_req) {
1298 spin_unlock_irqrestore(io_lock, flags);
1299 goto fnic_abort_cmd_end;
1300 }
1301
1302 io_req->abts_done = &tm_done;
1303
1304 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1305 spin_unlock_irqrestore(io_lock, flags);
1306 goto wait_pending;
1307 }
1308 /*
1309 * Command is still pending, need to abort it
1310 * If the firmware completes the command after this point,
1311 * the completion wont be done till mid-layer, since abort
1312 * has already started.
1313 */
1314 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1315 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1316
1317 spin_unlock_irqrestore(io_lock, flags);
1318
1319 /*
1320 * Check readiness of the remote port. If the path to remote
1321 * port is up, then send abts to the remote port to terminate
1322 * the IO. Else, just locally terminate the IO in the firmware
1323 */
1324 rport = starget_to_rport(scsi_target(sc->device));
1325 if (fc_remote_port_chkready(rport) == 0)
1326 task_req = FCPIO_ITMF_ABT_TASK;
1327 else
1328 task_req = FCPIO_ITMF_ABT_TASK_TERM;
1329
1330 /* Now queue the abort command to firmware */
1331 int_to_scsilun(sc->device->lun, &fc_lun);
1332
1333 if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1334 fc_lun.scsi_lun, io_req)) {
1335 spin_lock_irqsave(io_lock, flags);
1336 io_req = (struct fnic_io_req *)CMD_SP(sc);
1337 if (io_req)
1338 io_req->abts_done = NULL;
1339 spin_unlock_irqrestore(io_lock, flags);
1340 ret = FAILED;
1341 goto fnic_abort_cmd_end;
1342 }
1343
1344 /*
1345 * We queued an abort IO, wait for its completion.
1346 * Once the firmware completes the abort command, it will
1347 * wake up this thread.
1348 */
1349 wait_pending:
1350 wait_for_completion_timeout(&tm_done,
1351 msecs_to_jiffies
1352 (2 * fnic->config.ra_tov +
1353 fnic->config.ed_tov));
1354
1355 /* Check the abort status */
1356 spin_lock_irqsave(io_lock, flags);
1357
1358 io_req = (struct fnic_io_req *)CMD_SP(sc);
1359 if (!io_req) {
1360 spin_unlock_irqrestore(io_lock, flags);
1361 ret = FAILED;
1362 goto fnic_abort_cmd_end;
1363 }
1364 io_req->abts_done = NULL;
1365
1366 /* fw did not complete abort, timed out */
1367 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1368 spin_unlock_irqrestore(io_lock, flags);
1369 ret = FAILED;
1370 goto fnic_abort_cmd_end;
1371 }
1372
1373 /*
1374 * firmware completed the abort, check the status,
1375 * free the io_req irrespective of failure or success
1376 */
1377 if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
1378 ret = FAILED;
1379
1380 CMD_SP(sc) = NULL;
1381
1382 spin_unlock_irqrestore(io_lock, flags);
1383
1384 fnic_release_ioreq_buf(fnic, io_req, sc);
1385 mempool_free(io_req, fnic->io_req_pool);
1386
1387fnic_abort_cmd_end:
1388 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1389 "Returning from abort cmd %s\n",
1390 (ret == SUCCESS) ?
1391 "SUCCESS" : "FAILED");
1392 return ret;
1393}
1394
1395static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1396 struct scsi_cmnd *sc,
1397 struct fnic_io_req *io_req)
1398{
1399 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1400 struct scsi_lun fc_lun;
1401 int ret = 0;
1402 unsigned long intr_flags;
1403
1404 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
1405
1406 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1407 free_wq_copy_descs(fnic, wq);
1408
1409 if (!vnic_wq_copy_desc_avail(wq)) {
1410 ret = -EAGAIN;
1411 goto lr_io_req_end;
1412 }
1413
1414 /* fill in the lun info */
1415 int_to_scsilun(sc->device->lun, &fc_lun);
1416
1417 fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
1418 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
1419 fc_lun.scsi_lun, io_req->port_id,
1420 fnic->config.ra_tov, fnic->config.ed_tov);
1421
1422lr_io_req_end:
1423 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
1424
1425 return ret;
1426}
1427
1428/*
1429 * Clean up any pending aborts on the lun
1430 * For each outstanding IO on this lun, whose abort is not completed by fw,
1431 * issue a local abort. Wait for abort to complete. Return 0 if all commands
1432 * successfully aborted, 1 otherwise
1433 */
1434static int fnic_clean_pending_aborts(struct fnic *fnic,
1435 struct scsi_cmnd *lr_sc)
1436{
1437 int tag;
1438 struct fnic_io_req *io_req;
1439 spinlock_t *io_lock;
1440 unsigned long flags;
1441 int ret = 0;
1442 struct scsi_cmnd *sc;
1443 struct fc_rport *rport;
1444 struct scsi_lun fc_lun;
1445 struct scsi_device *lun_dev = lr_sc->device;
1446 DECLARE_COMPLETION_ONSTACK(tm_done);
1447
1448 for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
1449 sc = scsi_host_find_tag(fnic->lport->host, tag);
1450 /*
1451 * ignore this lun reset cmd or cmds that do not belong to
1452 * this lun
1453 */
1454 if (!sc || sc == lr_sc || sc->device != lun_dev)
1455 continue;
1456
1457 io_lock = fnic_io_lock_hash(fnic, sc);
1458 spin_lock_irqsave(io_lock, flags);
1459
1460 io_req = (struct fnic_io_req *)CMD_SP(sc);
1461
1462 if (!io_req || sc->device != lun_dev) {
1463 spin_unlock_irqrestore(io_lock, flags);
1464 continue;
1465 }
1466
1467 /*
1468 * Found IO that is still pending with firmware and
1469 * belongs to the LUN that we are resetting
1470 */
1471 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1472 "Found IO in %s on lun\n",
1473 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1474
1475 BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
1476
1477 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1478 io_req->abts_done = &tm_done;
1479 spin_unlock_irqrestore(io_lock, flags);
1480
1481 /* Now queue the abort command to firmware */
1482 int_to_scsilun(sc->device->lun, &fc_lun);
1483 rport = starget_to_rport(scsi_target(sc->device));
1484
1485 if (fnic_queue_abort_io_req(fnic, tag,
1486 FCPIO_ITMF_ABT_TASK_TERM,
1487 fc_lun.scsi_lun, io_req)) {
1488 spin_lock_irqsave(io_lock, flags);
1489 io_req = (struct fnic_io_req *)CMD_SP(sc);
1490 if (io_req)
1491 io_req->abts_done = NULL;
1492 spin_unlock_irqrestore(io_lock, flags);
1493 ret = 1;
1494 goto clean_pending_aborts_end;
1495 }
1496
1497 wait_for_completion_timeout(&tm_done,
1498 msecs_to_jiffies
1499 (fnic->config.ed_tov));
1500
1501 /* Recheck cmd state to check if it is now aborted */
1502 spin_lock_irqsave(io_lock, flags);
1503 io_req = (struct fnic_io_req *)CMD_SP(sc);
1504 if (!io_req) {
1505 spin_unlock_irqrestore(io_lock, flags);
1506 ret = 1;
1507 goto clean_pending_aborts_end;
1508 }
1509
1510 io_req->abts_done = NULL;
1511
1512 /* if abort is still pending with fw, fail */
1513 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1514 spin_unlock_irqrestore(io_lock, flags);
1515 ret = 1;
1516 goto clean_pending_aborts_end;
1517 }
1518 CMD_SP(sc) = NULL;
1519 spin_unlock_irqrestore(io_lock, flags);
1520
1521 fnic_release_ioreq_buf(fnic, io_req, sc);
1522 mempool_free(io_req, fnic->io_req_pool);
1523 }
1524
1525clean_pending_aborts_end:
1526 return ret;
1527}
1528
1529/*
1530 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
1531 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
1532 * on the LUN.
1533 */
1534int fnic_device_reset(struct scsi_cmnd *sc)
1535{
1536 struct fc_lport *lp;
1537 struct fnic *fnic;
1538 struct fnic_io_req *io_req;
1539 struct fc_rport *rport;
1540 int status;
1541 int ret = FAILED;
1542 spinlock_t *io_lock;
1543 unsigned long flags;
1544 DECLARE_COMPLETION_ONSTACK(tm_done);
1545
1546 /* Wait for rport to unblock */
1547 fnic_block_error_handler(sc);
1548
1549 /* Get local-port, check ready and link up */
1550 lp = shost_priv(sc->device->host);
1551
1552 fnic = lport_priv(lp);
1553 FNIC_SCSI_DBG(KERN_DEBUG,
1554 fnic->lport->host,
1555 "Device reset called FCID 0x%x, LUN 0x%x\n",
1556 (starget_to_rport(scsi_target(sc->device)))->port_id,
1557 sc->device->lun);
1558
1559
1560 if (lp->state != LPORT_ST_READY || !(lp->link_up))
1561 goto fnic_device_reset_end;
1562
1563 /* Check if remote port up */
1564 rport = starget_to_rport(scsi_target(sc->device));
1565 if (fc_remote_port_chkready(rport))
1566 goto fnic_device_reset_end;
1567
1568 io_lock = fnic_io_lock_hash(fnic, sc);
1569 spin_lock_irqsave(io_lock, flags);
1570 io_req = (struct fnic_io_req *)CMD_SP(sc);
1571
1572 /*
1573 * If there is a io_req attached to this command, then use it,
1574 * else allocate a new one.
1575 */
1576 if (!io_req) {
1577 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
1578 if (!io_req) {
1579 spin_unlock_irqrestore(io_lock, flags);
1580 goto fnic_device_reset_end;
1581 }
1582 memset(io_req, 0, sizeof(*io_req));
1583 io_req->port_id = rport->port_id;
1584 CMD_SP(sc) = (char *)io_req;
1585 }
1586 io_req->dr_done = &tm_done;
1587 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
1588 CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
1589 spin_unlock_irqrestore(io_lock, flags);
1590
1591 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
1592 sc->request->tag);
1593
1594 /*
1595 * issue the device reset, if enqueue failed, clean up the ioreq
1596 * and break assoc with scsi cmd
1597 */
1598 if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
1599 spin_lock_irqsave(io_lock, flags);
1600 io_req = (struct fnic_io_req *)CMD_SP(sc);
1601 if (io_req)
1602 io_req->dr_done = NULL;
1603 goto fnic_device_reset_clean;
1604 }
1605
1606 /*
1607 * Wait on the local completion for LUN reset. The io_req may be
1608 * freed while we wait since we hold no lock.
1609 */
1610 wait_for_completion_timeout(&tm_done,
1611 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
1612
1613 spin_lock_irqsave(io_lock, flags);
1614 io_req = (struct fnic_io_req *)CMD_SP(sc);
1615 if (!io_req) {
1616 spin_unlock_irqrestore(io_lock, flags);
1617 goto fnic_device_reset_end;
1618 }
1619 io_req->dr_done = NULL;
1620
1621 status = CMD_LR_STATUS(sc);
1622 spin_unlock_irqrestore(io_lock, flags);
1623
1624 /*
1625 * If lun reset not completed, bail out with failed. io_req
1626 * gets cleaned up during higher levels of EH
1627 */
1628 if (status == FCPIO_INVALID_CODE) {
1629 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1630 "Device reset timed out\n");
1631 goto fnic_device_reset_end;
1632 }
1633
1634 /* Completed, but not successful, clean up the io_req, return fail */
1635 if (status != FCPIO_SUCCESS) {
1636 spin_lock_irqsave(io_lock, flags);
1637 FNIC_SCSI_DBG(KERN_DEBUG,
1638 fnic->lport->host,
1639 "Device reset completed - failed\n");
1640 io_req = (struct fnic_io_req *)CMD_SP(sc);
1641 goto fnic_device_reset_clean;
1642 }
1643
1644 /*
1645 * Clean up any aborts on this lun that have still not
1646 * completed. If any of these fail, then LUN reset fails.
1647 * clean_pending_aborts cleans all cmds on this lun except
1648 * the lun reset cmd. If all cmds get cleaned, the lun reset
1649 * succeeds
1650 */
1651 if (fnic_clean_pending_aborts(fnic, sc)) {
1652 spin_lock_irqsave(io_lock, flags);
1653 io_req = (struct fnic_io_req *)CMD_SP(sc);
1654 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1655 "Device reset failed"
1656 " since could not abort all IOs\n");
1657 goto fnic_device_reset_clean;
1658 }
1659
1660 /* Clean lun reset command */
1661 spin_lock_irqsave(io_lock, flags);
1662 io_req = (struct fnic_io_req *)CMD_SP(sc);
1663 if (io_req)
1664 /* Completed, and successful */
1665 ret = SUCCESS;
1666
1667fnic_device_reset_clean:
1668 if (io_req)
1669 CMD_SP(sc) = NULL;
1670
1671 spin_unlock_irqrestore(io_lock, flags);
1672
1673 if (io_req) {
1674 fnic_release_ioreq_buf(fnic, io_req, sc);
1675 mempool_free(io_req, fnic->io_req_pool);
1676 }
1677
1678fnic_device_reset_end:
1679 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1680 "Returning from device reset %s\n",
1681 (ret == SUCCESS) ?
1682 "SUCCESS" : "FAILED");
1683 return ret;
1684}
1685
1686/* Clean up all IOs, clean up libFC local port */
1687int fnic_reset(struct Scsi_Host *shost)
1688{
1689 struct fc_lport *lp;
1690 struct fnic *fnic;
1691 int ret = SUCCESS;
1692
1693 lp = shost_priv(shost);
1694 fnic = lport_priv(lp);
1695
1696 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1697 "fnic_reset called\n");
1698
1699 /*
1700 * Reset local port, this will clean up libFC exchanges,
1701 * reset remote port sessions, and if link is up, begin flogi
1702 */
1703 if (lp->tt.lport_reset(lp))
1704 ret = FAILED;
1705
1706 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1707 "Returning from fnic reset %s\n",
1708 (ret == SUCCESS) ?
1709 "SUCCESS" : "FAILED");
1710
1711 return ret;
1712}
1713
1714/*
1715 * SCSI Error handling calls driver's eh_host_reset if all prior
1716 * error handling levels return FAILED. If host reset completes
1717 * successfully, and if link is up, then Fabric login begins.
1718 *
1719 * Host Reset is the highest level of error recovery. If this fails, then
1720 * host is offlined by SCSI.
1721 *
1722 */
1723int fnic_host_reset(struct scsi_cmnd *sc)
1724{
1725 int ret;
1726 unsigned long wait_host_tmo;
1727 struct Scsi_Host *shost = sc->device->host;
1728 struct fc_lport *lp = shost_priv(shost);
1729
1730 /*
1731 * If fnic_reset is successful, wait for fabric login to complete
1732 * scsi-ml tries to send a TUR to every device if host reset is
1733 * successful, so before returning to scsi, fabric should be up
1734 */
1735 ret = fnic_reset(shost);
1736 if (ret == SUCCESS) {
1737 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
1738 ret = FAILED;
1739 while (time_before(jiffies, wait_host_tmo)) {
1740 if ((lp->state == LPORT_ST_READY) &&
1741 (lp->link_up)) {
1742 ret = SUCCESS;
1743 break;
1744 }
1745 ssleep(1);
1746 }
1747 }
1748
1749 return ret;
1750}
1751
1752/*
1753 * This fxn is called from libFC when host is removed
1754 */
1755void fnic_scsi_abort_io(struct fc_lport *lp)
1756{
1757 int err = 0;
1758 unsigned long flags;
1759 enum fnic_state old_state;
1760 struct fnic *fnic = lport_priv(lp);
1761 DECLARE_COMPLETION_ONSTACK(remove_wait);
1762
1763 /* Issue firmware reset for fnic, wait for reset to complete */
1764 spin_lock_irqsave(&fnic->fnic_lock, flags);
1765 fnic->remove_wait = &remove_wait;
1766 old_state = fnic->state;
1767 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1768 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
1769 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1770
1771 err = fnic_fw_reset_handler(fnic);
1772 if (err) {
1773 spin_lock_irqsave(&fnic->fnic_lock, flags);
1774 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
1775 fnic->state = old_state;
1776 fnic->remove_wait = NULL;
1777 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1778 return;
1779 }
1780
1781 /* Wait for firmware reset to complete */
1782 wait_for_completion_timeout(&remove_wait,
1783 msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
1784
1785 spin_lock_irqsave(&fnic->fnic_lock, flags);
1786 fnic->remove_wait = NULL;
1787 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1788 "fnic_scsi_abort_io %s\n",
1789 (fnic->state == FNIC_IN_ETH_MODE) ?
1790 "SUCCESS" : "FAILED");
1791 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1792
1793}
1794
1795/*
1796 * This fxn called from libFC to clean up driver IO state on link down
1797 */
1798void fnic_scsi_cleanup(struct fc_lport *lp)
1799{
1800 unsigned long flags;
1801 enum fnic_state old_state;
1802 struct fnic *fnic = lport_priv(lp);
1803
1804 /* issue fw reset */
1805 spin_lock_irqsave(&fnic->fnic_lock, flags);
1806 old_state = fnic->state;
1807 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1808 vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
1809 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1810
1811 if (fnic_fw_reset_handler(fnic)) {
1812 spin_lock_irqsave(&fnic->fnic_lock, flags);
1813 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
1814 fnic->state = old_state;
1815 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1816 }
1817
1818}
1819
1820void fnic_empty_scsi_cleanup(struct fc_lport *lp)
1821{
1822}
1823
1824void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
1825{
1826 struct fnic *fnic = lport_priv(lp);
1827
1828 /* Non-zero sid, nothing to do */
1829 if (sid)
1830 goto call_fc_exch_mgr_reset;
1831
1832 if (did) {
1833 fnic_rport_exch_reset(fnic, did);
1834 goto call_fc_exch_mgr_reset;
1835 }
1836
1837 /*
1838 * sid = 0, did = 0
1839 * link down or device being removed
1840 */
1841 if (!fnic->in_remove)
1842 fnic_scsi_cleanup(lp);
1843 else
1844 fnic_scsi_abort_io(lp);
1845
1846 /* call libFC exch mgr reset to reset its exchanges */
1847call_fc_exch_mgr_reset:
1848 fc_exch_mgr_reset(lp, sid, did);
1849
1850}
diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h
new file mode 100644
index 00000000000..92e80ae6b72
--- /dev/null
+++ b/drivers/scsi/fnic/rq_enet_desc.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _RQ_ENET_DESC_H_
19#define _RQ_ENET_DESC_H_
20
21/* Ethernet receive queue descriptor: 16B */
22struct rq_enet_desc {
23 __le64 address;
24 __le16 length_type;
25 u8 reserved[6];
26};
27
28enum rq_enet_type_types {
29 RQ_ENET_TYPE_ONLY_SOP = 0,
30 RQ_ENET_TYPE_NOT_SOP = 1,
31 RQ_ENET_TYPE_RESV2 = 2,
32 RQ_ENET_TYPE_RESV3 = 3,
33};
34
35#define RQ_ENET_ADDR_BITS 64
36#define RQ_ENET_LEN_BITS 14
37#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
38#define RQ_ENET_TYPE_BITS 2
39#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
40
41static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
42 u64 address, u8 type, u16 length)
43{
44 desc->address = cpu_to_le64(address);
45 desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
46 ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
47}
48
49static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
50 u64 *address, u8 *type, u16 *length)
51{
52 *address = le64_to_cpu(desc->address);
53 *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
54 *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
55 RQ_ENET_TYPE_MASK);
56}
57
58#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c
new file mode 100644
index 00000000000..c5db32eda5e
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.c
@@ -0,0 +1,85 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/errno.h>
19#include <linux/types.h>
20#include <linux/pci.h>
21#include "vnic_dev.h"
22#include "vnic_cq.h"
23
24void vnic_cq_free(struct vnic_cq *cq)
25{
26 vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
27
28 cq->ctrl = NULL;
29}
30
31int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
32 unsigned int desc_count, unsigned int desc_size)
33{
34 int err;
35
36 cq->index = index;
37 cq->vdev = vdev;
38
39 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
40 if (!cq->ctrl) {
41 printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
42 return -EINVAL;
43 }
44
45 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
46 if (err)
47 return err;
48
49 return 0;
50}
51
52void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
53 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
54 unsigned int cq_tail_color, unsigned int interrupt_enable,
55 unsigned int cq_entry_enable, unsigned int cq_message_enable,
56 unsigned int interrupt_offset, u64 cq_message_addr)
57{
58 u64 paddr;
59
60 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
61 writeq(paddr, &cq->ctrl->ring_base);
62 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
63 iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
64 iowrite32(color_enable, &cq->ctrl->color_enable);
65 iowrite32(cq_head, &cq->ctrl->cq_head);
66 iowrite32(cq_tail, &cq->ctrl->cq_tail);
67 iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
68 iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
69 iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
70 iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
71 iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
72 writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
73}
74
75void vnic_cq_clean(struct vnic_cq *cq)
76{
77 cq->to_clean = 0;
78 cq->last_color = 0;
79
80 iowrite32(0, &cq->ctrl->cq_head);
81 iowrite32(0, &cq->ctrl->cq_tail);
82 iowrite32(1, &cq->ctrl->cq_tail_color);
83
84 vnic_dev_clear_desc_ring(&cq->ring);
85}
diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h
new file mode 100644
index 00000000000..4ede6809fb1
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_CQ_H_
19#define _VNIC_CQ_H_
20
21#include "cq_desc.h"
22#include "vnic_dev.h"
23
24/*
25 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
26 * Driver) when both are built with CONFIG options =y
27 */
28#define vnic_cq_service fnic_cq_service
29#define vnic_cq_free fnic_cq_free
30#define vnic_cq_alloc fnic_cq_alloc
31#define vnic_cq_init fnic_cq_init
32#define vnic_cq_clean fnic_cq_clean
33
34/* Completion queue control */
35struct vnic_cq_ctrl {
36 u64 ring_base; /* 0x00 */
37 u32 ring_size; /* 0x08 */
38 u32 pad0;
39 u32 flow_control_enable; /* 0x10 */
40 u32 pad1;
41 u32 color_enable; /* 0x18 */
42 u32 pad2;
43 u32 cq_head; /* 0x20 */
44 u32 pad3;
45 u32 cq_tail; /* 0x28 */
46 u32 pad4;
47 u32 cq_tail_color; /* 0x30 */
48 u32 pad5;
49 u32 interrupt_enable; /* 0x38 */
50 u32 pad6;
51 u32 cq_entry_enable; /* 0x40 */
52 u32 pad7;
53 u32 cq_message_enable; /* 0x48 */
54 u32 pad8;
55 u32 interrupt_offset; /* 0x50 */
56 u32 pad9;
57 u64 cq_message_addr; /* 0x58 */
58 u32 pad10;
59};
60
61struct vnic_cq {
62 unsigned int index;
63 struct vnic_dev *vdev;
64 struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
65 struct vnic_dev_ring ring;
66 unsigned int to_clean;
67 unsigned int last_color;
68};
69
70static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
71 unsigned int work_to_do,
72 int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
73 u8 type, u16 q_number, u16 completed_index, void *opaque),
74 void *opaque)
75{
76 struct cq_desc *cq_desc;
77 unsigned int work_done = 0;
78 u16 q_number, completed_index;
79 u8 type, color;
80
81 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
82 cq->ring.desc_size * cq->to_clean);
83 cq_desc_dec(cq_desc, &type, &color,
84 &q_number, &completed_index);
85
86 while (color != cq->last_color) {
87
88 if ((*q_service)(cq->vdev, cq_desc, type,
89 q_number, completed_index, opaque))
90 break;
91
92 cq->to_clean++;
93 if (cq->to_clean == cq->ring.desc_count) {
94 cq->to_clean = 0;
95 cq->last_color = cq->last_color ? 0 : 1;
96 }
97
98 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
99 cq->ring.desc_size * cq->to_clean);
100 cq_desc_dec(cq_desc, &type, &color,
101 &q_number, &completed_index);
102
103 work_done++;
104 if (work_done >= work_to_do)
105 break;
106 }
107
108 return work_done;
109}
110
111void vnic_cq_free(struct vnic_cq *cq);
112int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
113 unsigned int desc_count, unsigned int desc_size);
114void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
115 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
116 unsigned int cq_tail_color, unsigned int interrupt_enable,
117 unsigned int cq_entry_enable, unsigned int message_enable,
118 unsigned int interrupt_offset, u64 message_addr);
119void vnic_cq_clean(struct vnic_cq *cq);
120
121#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h
new file mode 100644
index 00000000000..7901ce255a8
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_cq_copy.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_CQ_COPY_H_
19#define _VNIC_CQ_COPY_H_
20
21#include "fcpio.h"
22
23static inline unsigned int vnic_cq_copy_service(
24 struct vnic_cq *cq,
25 int (*q_service)(struct vnic_dev *vdev,
26 unsigned int index,
27 struct fcpio_fw_req *desc),
28 unsigned int work_to_do)
29
30{
31 struct fcpio_fw_req *desc;
32 unsigned int work_done = 0;
33 u8 color;
34
35 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
36 cq->ring.desc_size * cq->to_clean);
37 fcpio_color_dec(desc, &color);
38
39 while (color != cq->last_color) {
40
41 if ((*q_service)(cq->vdev, cq->index, desc))
42 break;
43
44 cq->to_clean++;
45 if (cq->to_clean == cq->ring.desc_count) {
46 cq->to_clean = 0;
47 cq->last_color = cq->last_color ? 0 : 1;
48 }
49
50 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
51 cq->ring.desc_size * cq->to_clean);
52 fcpio_color_dec(desc, &color);
53
54 work_done++;
55 if (work_done >= work_to_do)
56 break;
57 }
58
59 return work_done;
60}
61
62#endif /* _VNIC_CQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
new file mode 100644
index 00000000000..56677064508
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -0,0 +1,690 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include <linux/if_ether.h>
25#include "vnic_resource.h"
26#include "vnic_devcmd.h"
27#include "vnic_dev.h"
28#include "vnic_stats.h"
29
30struct vnic_res {
31 void __iomem *vaddr;
32 unsigned int count;
33};
34
35struct vnic_dev {
36 void *priv;
37 struct pci_dev *pdev;
38 struct vnic_res res[RES_TYPE_MAX];
39 enum vnic_dev_intr_mode intr_mode;
40 struct vnic_devcmd __iomem *devcmd;
41 struct vnic_devcmd_notify *notify;
42 struct vnic_devcmd_notify notify_copy;
43 dma_addr_t notify_pa;
44 u32 *linkstatus;
45 dma_addr_t linkstatus_pa;
46 struct vnic_stats *stats;
47 dma_addr_t stats_pa;
48 struct vnic_devcmd_fw_info *fw_info;
49 dma_addr_t fw_info_pa;
50};
51
52#define VNIC_MAX_RES_HDR_SIZE \
53 (sizeof(struct vnic_resource_header) + \
54 sizeof(struct vnic_resource) * RES_TYPE_MAX)
55#define VNIC_RES_STRIDE 128
56
57void *vnic_dev_priv(struct vnic_dev *vdev)
58{
59 return vdev->priv;
60}
61
62static int vnic_dev_discover_res(struct vnic_dev *vdev,
63 struct vnic_dev_bar *bar)
64{
65 struct vnic_resource_header __iomem *rh;
66 struct vnic_resource __iomem *r;
67 u8 type;
68
69 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
70 printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
71 return -EINVAL;
72 }
73
74 rh = bar->vaddr;
75 if (!rh) {
76 printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
77 return -EINVAL;
78 }
79
80 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
81 ioread32(&rh->version) != VNIC_RES_VERSION) {
82 printk(KERN_ERR "vNIC BAR0 res magic/version error "
83 "exp (%lx/%lx) curr (%x/%x)\n",
84 VNIC_RES_MAGIC, VNIC_RES_VERSION,
85 ioread32(&rh->magic), ioread32(&rh->version));
86 return -EINVAL;
87 }
88
89 r = (struct vnic_resource __iomem *)(rh + 1);
90
91 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
92
93 u8 bar_num = ioread8(&r->bar);
94 u32 bar_offset = ioread32(&r->bar_offset);
95 u32 count = ioread32(&r->count);
96 u32 len;
97
98 r++;
99
100 if (bar_num != 0) /* only mapping in BAR0 resources */
101 continue;
102
103 switch (type) {
104 case RES_TYPE_WQ:
105 case RES_TYPE_RQ:
106 case RES_TYPE_CQ:
107 case RES_TYPE_INTR_CTRL:
108 /* each count is stride bytes long */
109 len = count * VNIC_RES_STRIDE;
110 if (len + bar_offset > bar->len) {
111 printk(KERN_ERR "vNIC BAR0 resource %d "
112 "out-of-bounds, offset 0x%x + "
113 "size 0x%x > bar len 0x%lx\n",
114 type, bar_offset,
115 len,
116 bar->len);
117 return -EINVAL;
118 }
119 break;
120 case RES_TYPE_INTR_PBA_LEGACY:
121 case RES_TYPE_DEVCMD:
122 len = count;
123 break;
124 default:
125 continue;
126 }
127
128 vdev->res[type].count = count;
129 vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
130 }
131
132 return 0;
133}
134
135unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
136 enum vnic_res_type type)
137{
138 return vdev->res[type].count;
139}
140
141void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
142 unsigned int index)
143{
144 if (!vdev->res[type].vaddr)
145 return NULL;
146
147 switch (type) {
148 case RES_TYPE_WQ:
149 case RES_TYPE_RQ:
150 case RES_TYPE_CQ:
151 case RES_TYPE_INTR_CTRL:
152 return (char __iomem *)vdev->res[type].vaddr +
153 index * VNIC_RES_STRIDE;
154 default:
155 return (char __iomem *)vdev->res[type].vaddr;
156 }
157}
158
159unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
160 unsigned int desc_count,
161 unsigned int desc_size)
162{
163 /* The base address of the desc rings must be 512 byte aligned.
164 * Descriptor count is aligned to groups of 32 descriptors. A
165 * count of 0 means the maximum 4096 descriptors. Descriptor
166 * size is aligned to 16 bytes.
167 */
168
169 unsigned int count_align = 32;
170 unsigned int desc_align = 16;
171
172 ring->base_align = 512;
173
174 if (desc_count == 0)
175 desc_count = 4096;
176
177 ring->desc_count = ALIGN(desc_count, count_align);
178
179 ring->desc_size = ALIGN(desc_size, desc_align);
180
181 ring->size = ring->desc_count * ring->desc_size;
182 ring->size_unaligned = ring->size + ring->base_align;
183
184 return ring->size_unaligned;
185}
186
187void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
188{
189 memset(ring->descs, 0, ring->size);
190}
191
192int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
193 unsigned int desc_count, unsigned int desc_size)
194{
195 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
196
197 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
198 ring->size_unaligned,
199 &ring->base_addr_unaligned);
200
201 if (!ring->descs_unaligned) {
202 printk(KERN_ERR
203 "Failed to allocate ring (size=%d), aborting\n",
204 (int)ring->size);
205 return -ENOMEM;
206 }
207
208 ring->base_addr = ALIGN(ring->base_addr_unaligned,
209 ring->base_align);
210 ring->descs = (u8 *)ring->descs_unaligned +
211 (ring->base_addr - ring->base_addr_unaligned);
212
213 vnic_dev_clear_desc_ring(ring);
214
215 ring->desc_avail = ring->desc_count - 1;
216
217 return 0;
218}
219
220void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
221{
222 if (ring->descs) {
223 pci_free_consistent(vdev->pdev,
224 ring->size_unaligned,
225 ring->descs_unaligned,
226 ring->base_addr_unaligned);
227 ring->descs = NULL;
228 }
229}
230
231int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
232 u64 *a0, u64 *a1, int wait)
233{
234 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
235 int delay;
236 u32 status;
237 int dev_cmd_err[] = {
238 /* convert from fw's version of error.h to host's version */
239 0, /* ERR_SUCCESS */
240 EINVAL, /* ERR_EINVAL */
241 EFAULT, /* ERR_EFAULT */
242 EPERM, /* ERR_EPERM */
243 EBUSY, /* ERR_EBUSY */
244 };
245 int err;
246
247 status = ioread32(&devcmd->status);
248 if (status & STAT_BUSY) {
249 printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
250 return -EBUSY;
251 }
252
253 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
254 writeq(*a0, &devcmd->args[0]);
255 writeq(*a1, &devcmd->args[1]);
256 wmb();
257 }
258
259 iowrite32(cmd, &devcmd->cmd);
260
261 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
262 return 0;
263
264 for (delay = 0; delay < wait; delay++) {
265
266 udelay(100);
267
268 status = ioread32(&devcmd->status);
269 if (!(status & STAT_BUSY)) {
270
271 if (status & STAT_ERROR) {
272 err = dev_cmd_err[(int)readq(&devcmd->args[0])];
273 printk(KERN_ERR "Error %d devcmd %d\n",
274 err, _CMD_N(cmd));
275 return -err;
276 }
277
278 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
279 rmb();
280 *a0 = readq(&devcmd->args[0]);
281 *a1 = readq(&devcmd->args[1]);
282 }
283
284 return 0;
285 }
286 }
287
288 printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
289 return -ETIMEDOUT;
290}
291
292int vnic_dev_fw_info(struct vnic_dev *vdev,
293 struct vnic_devcmd_fw_info **fw_info)
294{
295 u64 a0, a1 = 0;
296 int wait = 1000;
297 int err = 0;
298
299 if (!vdev->fw_info) {
300 vdev->fw_info = pci_alloc_consistent(vdev->pdev,
301 sizeof(struct vnic_devcmd_fw_info),
302 &vdev->fw_info_pa);
303 if (!vdev->fw_info)
304 return -ENOMEM;
305
306 a0 = vdev->fw_info_pa;
307
308 /* only get fw_info once and cache it */
309 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
310 }
311
312 *fw_info = vdev->fw_info;
313
314 return err;
315}
316
317int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
318 void *value)
319{
320 u64 a0, a1;
321 int wait = 1000;
322 int err;
323
324 a0 = offset;
325 a1 = size;
326
327 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
328
329 switch (size) {
330 case 1:
331 *(u8 *)value = (u8)a0;
332 break;
333 case 2:
334 *(u16 *)value = (u16)a0;
335 break;
336 case 4:
337 *(u32 *)value = (u32)a0;
338 break;
339 case 8:
340 *(u64 *)value = a0;
341 break;
342 default:
343 BUG();
344 break;
345 }
346
347 return err;
348}
349
350int vnic_dev_stats_clear(struct vnic_dev *vdev)
351{
352 u64 a0 = 0, a1 = 0;
353 int wait = 1000;
354 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
355}
356
357int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
358{
359 u64 a0, a1;
360 int wait = 1000;
361
362 if (!vdev->stats) {
363 vdev->stats = pci_alloc_consistent(vdev->pdev,
364 sizeof(struct vnic_stats), &vdev->stats_pa);
365 if (!vdev->stats)
366 return -ENOMEM;
367 }
368
369 *stats = vdev->stats;
370 a0 = vdev->stats_pa;
371 a1 = sizeof(struct vnic_stats);
372
373 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
374}
375
376int vnic_dev_close(struct vnic_dev *vdev)
377{
378 u64 a0 = 0, a1 = 0;
379 int wait = 1000;
380 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
381}
382
383int vnic_dev_enable(struct vnic_dev *vdev)
384{
385 u64 a0 = 0, a1 = 0;
386 int wait = 1000;
387 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
388}
389
390int vnic_dev_disable(struct vnic_dev *vdev)
391{
392 u64 a0 = 0, a1 = 0;
393 int wait = 1000;
394 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
395}
396
397int vnic_dev_open(struct vnic_dev *vdev, int arg)
398{
399 u64 a0 = (u32)arg, a1 = 0;
400 int wait = 1000;
401 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
402}
403
404int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
405{
406 u64 a0 = 0, a1 = 0;
407 int wait = 1000;
408 int err;
409
410 *done = 0;
411
412 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
413 if (err)
414 return err;
415
416 *done = (a0 == 0);
417
418 return 0;
419}
420
421int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
422{
423 u64 a0 = (u32)arg, a1 = 0;
424 int wait = 1000;
425 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
426}
427
428int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
429{
430 u64 a0 = 0, a1 = 0;
431 int wait = 1000;
432 int err;
433
434 *done = 0;
435
436 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
437 if (err)
438 return err;
439
440 *done = (a0 == 0);
441
442 return 0;
443}
444
445int vnic_dev_hang_notify(struct vnic_dev *vdev)
446{
447 u64 a0, a1;
448 int wait = 1000;
449 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
450}
451
452int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
453{
454 u64 a0, a1;
455 int wait = 1000;
456 int err, i;
457
458 for (i = 0; i < ETH_ALEN; i++)
459 mac_addr[i] = 0;
460
461 err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
462 if (err)
463 return err;
464
465 for (i = 0; i < ETH_ALEN; i++)
466 mac_addr[i] = ((u8 *)&a0)[i];
467
468 return 0;
469}
470
471void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
472 int broadcast, int promisc, int allmulti)
473{
474 u64 a0, a1 = 0;
475 int wait = 1000;
476 int err;
477
478 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
479 (multicast ? CMD_PFILTER_MULTICAST : 0) |
480 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
481 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
482 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
483
484 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
485 if (err)
486 printk(KERN_ERR "Can't set packet filter\n");
487}
488
489void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
490{
491 u64 a0 = 0, a1 = 0;
492 int wait = 1000;
493 int err;
494 int i;
495
496 for (i = 0; i < ETH_ALEN; i++)
497 ((u8 *)&a0)[i] = addr[i];
498
499 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
500 if (err)
501 printk(KERN_ERR
502 "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
503 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
504 err);
505}
506
507void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
508{
509 u64 a0 = 0, a1 = 0;
510 int wait = 1000;
511 int err;
512 int i;
513
514 for (i = 0; i < ETH_ALEN; i++)
515 ((u8 *)&a0)[i] = addr[i];
516
517 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
518 if (err)
519 printk(KERN_ERR
520 "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
521 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
522 err);
523}
524
525int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
526{
527 u64 a0, a1;
528 int wait = 1000;
529
530 if (!vdev->notify) {
531 vdev->notify = pci_alloc_consistent(vdev->pdev,
532 sizeof(struct vnic_devcmd_notify),
533 &vdev->notify_pa);
534 if (!vdev->notify)
535 return -ENOMEM;
536 }
537
538 a0 = vdev->notify_pa;
539 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
540 a1 += sizeof(struct vnic_devcmd_notify);
541
542 return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
543}
544
545void vnic_dev_notify_unset(struct vnic_dev *vdev)
546{
547 u64 a0, a1;
548 int wait = 1000;
549
550 a0 = 0; /* paddr = 0 to unset notify buffer */
551 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
552 a1 += sizeof(struct vnic_devcmd_notify);
553
554 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
555}
556
557static int vnic_dev_notify_ready(struct vnic_dev *vdev)
558{
559 u32 *words;
560 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
561 unsigned int i;
562 u32 csum;
563
564 if (!vdev->notify)
565 return 0;
566
567 do {
568 csum = 0;
569 memcpy(&vdev->notify_copy, vdev->notify,
570 sizeof(struct vnic_devcmd_notify));
571 words = (u32 *)&vdev->notify_copy;
572 for (i = 1; i < nwords; i++)
573 csum += words[i];
574 } while (csum != words[0]);
575
576 return 1;
577}
578
579int vnic_dev_init(struct vnic_dev *vdev, int arg)
580{
581 u64 a0 = (u32)arg, a1 = 0;
582 int wait = 1000;
583 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
584}
585
586int vnic_dev_link_status(struct vnic_dev *vdev)
587{
588 if (vdev->linkstatus)
589 return *vdev->linkstatus;
590
591 if (!vnic_dev_notify_ready(vdev))
592 return 0;
593
594 return vdev->notify_copy.link_state;
595}
596
597u32 vnic_dev_port_speed(struct vnic_dev *vdev)
598{
599 if (!vnic_dev_notify_ready(vdev))
600 return 0;
601
602 return vdev->notify_copy.port_speed;
603}
604
605u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
606{
607 if (!vnic_dev_notify_ready(vdev))
608 return 0;
609
610 return vdev->notify_copy.msglvl;
611}
612
613u32 vnic_dev_mtu(struct vnic_dev *vdev)
614{
615 if (!vnic_dev_notify_ready(vdev))
616 return 0;
617
618 return vdev->notify_copy.mtu;
619}
620
621u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
622{
623 if (!vnic_dev_notify_ready(vdev))
624 return 0;
625
626 return vdev->notify_copy.link_down_cnt;
627}
628
629void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
630 enum vnic_dev_intr_mode intr_mode)
631{
632 vdev->intr_mode = intr_mode;
633}
634
635enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
636 struct vnic_dev *vdev)
637{
638 return vdev->intr_mode;
639}
640
641void vnic_dev_unregister(struct vnic_dev *vdev)
642{
643 if (vdev) {
644 if (vdev->notify)
645 pci_free_consistent(vdev->pdev,
646 sizeof(struct vnic_devcmd_notify),
647 vdev->notify,
648 vdev->notify_pa);
649 if (vdev->linkstatus)
650 pci_free_consistent(vdev->pdev,
651 sizeof(u32),
652 vdev->linkstatus,
653 vdev->linkstatus_pa);
654 if (vdev->stats)
655 pci_free_consistent(vdev->pdev,
656 sizeof(struct vnic_dev),
657 vdev->stats, vdev->stats_pa);
658 if (vdev->fw_info)
659 pci_free_consistent(vdev->pdev,
660 sizeof(struct vnic_devcmd_fw_info),
661 vdev->fw_info, vdev->fw_info_pa);
662 kfree(vdev);
663 }
664}
665
666struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
667 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
668{
669 if (!vdev) {
670 vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
671 if (!vdev)
672 return NULL;
673 }
674
675 vdev->priv = priv;
676 vdev->pdev = pdev;
677
678 if (vnic_dev_discover_res(vdev, bar))
679 goto err_out;
680
681 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
682 if (!vdev->devcmd)
683 goto err_out;
684
685 return vdev;
686
687err_out:
688 vnic_dev_unregister(vdev);
689 return NULL;
690}
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
new file mode 100644
index 00000000000..f9935a8a5a0
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -0,0 +1,161 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_DEV_H_
19#define _VNIC_DEV_H_
20
21#include "vnic_resource.h"
22#include "vnic_devcmd.h"
23
24/*
25 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
26 * Driver) when both are built with CONFIG options =y
27 */
28#define vnic_dev_priv fnic_dev_priv
29#define vnic_dev_get_res_count fnic_dev_get_res_count
30#define vnic_dev_get_res fnic_dev_get_res
31#define vnic_dev_desc_ring_size fnic_dev_desc_ring_siz
32#define vnic_dev_clear_desc_ring fnic_dev_clear_desc_ring
33#define vnic_dev_alloc_desc_ring fnic_dev_alloc_desc_ring
34#define vnic_dev_free_desc_ring fnic_dev_free_desc_ring
35#define vnic_dev_cmd fnic_dev_cmd
36#define vnic_dev_fw_info fnic_dev_fw_info
37#define vnic_dev_spec fnic_dev_spec
38#define vnic_dev_stats_clear fnic_dev_stats_clear
39#define vnic_dev_stats_dump fnic_dev_stats_dump
40#define vnic_dev_hang_notify fnic_dev_hang_notify
41#define vnic_dev_packet_filter fnic_dev_packet_filter
42#define vnic_dev_add_addr fnic_dev_add_addr
43#define vnic_dev_del_addr fnic_dev_del_addr
44#define vnic_dev_mac_addr fnic_dev_mac_addr
45#define vnic_dev_notify_set fnic_dev_notify_set
46#define vnic_dev_notify_unset fnic_dev_notify_unset
47#define vnic_dev_link_status fnic_dev_link_status
48#define vnic_dev_port_speed fnic_dev_port_speed
49#define vnic_dev_msg_lvl fnic_dev_msg_lvl
50#define vnic_dev_mtu fnic_dev_mtu
51#define vnic_dev_link_down_cnt fnic_dev_link_down_cnt
52#define vnic_dev_close fnic_dev_close
53#define vnic_dev_enable fnic_dev_enable
54#define vnic_dev_disable fnic_dev_disable
55#define vnic_dev_open fnic_dev_open
56#define vnic_dev_open_done fnic_dev_open_done
57#define vnic_dev_init fnic_dev_init
58#define vnic_dev_soft_reset fnic_dev_soft_reset
59#define vnic_dev_soft_reset_done fnic_dev_soft_reset_done
60#define vnic_dev_set_intr_mode fnic_dev_set_intr_mode
61#define vnic_dev_get_intr_mode fnic_dev_get_intr_mode
62#define vnic_dev_unregister fnic_dev_unregister
63#define vnic_dev_register fnic_dev_register
64
65#ifndef VNIC_PADDR_TARGET
66#define VNIC_PADDR_TARGET 0x0000000000000000ULL
67#endif
68
69#ifndef readq
70static inline u64 readq(void __iomem *reg)
71{
72 return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg);
73}
74
75static inline void writeq(u64 val, void __iomem *reg)
76{
77 writel(val & 0xffffffff, reg);
78 writel(val >> 32, reg + 0x4UL);
79}
80#endif
81
82enum vnic_dev_intr_mode {
83 VNIC_DEV_INTR_MODE_UNKNOWN,
84 VNIC_DEV_INTR_MODE_INTX,
85 VNIC_DEV_INTR_MODE_MSI,
86 VNIC_DEV_INTR_MODE_MSIX,
87};
88
89struct vnic_dev_bar {
90 void __iomem *vaddr;
91 dma_addr_t bus_addr;
92 unsigned long len;
93};
94
95struct vnic_dev_ring {
96 void *descs;
97 size_t size;
98 dma_addr_t base_addr;
99 size_t base_align;
100 void *descs_unaligned;
101 size_t size_unaligned;
102 dma_addr_t base_addr_unaligned;
103 unsigned int desc_size;
104 unsigned int desc_count;
105 unsigned int desc_avail;
106};
107
108struct vnic_dev;
109struct vnic_stats;
110
111void *vnic_dev_priv(struct vnic_dev *vdev);
112unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
113 enum vnic_res_type type);
114void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
115 unsigned int index);
116unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
117 unsigned int desc_count,
118 unsigned int desc_size);
119void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
120int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
121 unsigned int desc_count, unsigned int desc_size);
122void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
123 struct vnic_dev_ring *ring);
124int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
125 u64 *a0, u64 *a1, int wait);
126int vnic_dev_fw_info(struct vnic_dev *vdev,
127 struct vnic_devcmd_fw_info **fw_info);
128int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
129 unsigned int size, void *value);
130int vnic_dev_stats_clear(struct vnic_dev *vdev);
131int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
132int vnic_dev_hang_notify(struct vnic_dev *vdev);
133void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
134 int broadcast, int promisc, int allmulti);
135void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
136void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
137int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
138int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
139void vnic_dev_notify_unset(struct vnic_dev *vdev);
140int vnic_dev_link_status(struct vnic_dev *vdev);
141u32 vnic_dev_port_speed(struct vnic_dev *vdev);
142u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
143u32 vnic_dev_mtu(struct vnic_dev *vdev);
144u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
145int vnic_dev_close(struct vnic_dev *vdev);
146int vnic_dev_enable(struct vnic_dev *vdev);
147int vnic_dev_disable(struct vnic_dev *vdev);
148int vnic_dev_open(struct vnic_dev *vdev, int arg);
149int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
150int vnic_dev_init(struct vnic_dev *vdev, int arg);
151int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
152int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
153void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
154 enum vnic_dev_intr_mode intr_mode);
155enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
156void vnic_dev_unregister(struct vnic_dev *vdev);
157struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
158 void *priv, struct pci_dev *pdev,
159 struct vnic_dev_bar *bar);
160
161#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
new file mode 100644
index 00000000000..d62b9061bf1
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -0,0 +1,281 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_DEVCMD_H_
19#define _VNIC_DEVCMD_H_
20
21#define _CMD_NBITS 14
22#define _CMD_VTYPEBITS 10
23#define _CMD_FLAGSBITS 6
24#define _CMD_DIRBITS 2
25
26#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
27#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
28#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
29#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
30
31#define _CMD_NSHIFT 0
32#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
33#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
34#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
35
36/*
37 * Direction bits (from host perspective).
38 */
39#define _CMD_DIR_NONE 0U
40#define _CMD_DIR_WRITE 1U
41#define _CMD_DIR_READ 2U
42#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
43
44/*
45 * Flag bits.
46 */
47#define _CMD_FLAGS_NONE 0U
48#define _CMD_FLAGS_NOWAIT 1U
49
50/*
51 * vNIC type bits.
52 */
53#define _CMD_VTYPE_NONE 0U
54#define _CMD_VTYPE_ENET 1U
55#define _CMD_VTYPE_FC 2U
56#define _CMD_VTYPE_SCSI 4U
57#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
58
59/*
60 * Used to create cmds..
61*/
62#define _CMDCF(dir, flags, vtype, nr) \
63 (((dir) << _CMD_DIRSHIFT) | \
64 ((flags) << _CMD_FLAGSSHIFT) | \
65 ((vtype) << _CMD_VTYPESHIFT) | \
66 ((nr) << _CMD_NSHIFT))
67#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
68#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
69
70/*
71 * Used to decode cmds..
72*/
73#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
74#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
75#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
76#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
77
78enum vnic_devcmd_cmd {
79 CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
80
81 /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
82 CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
83
84 /* dev-specific block member:
85 * in: (u16)a0=offset,(u8)a1=size
86 * out: a0=value */
87 CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
88
89 /* stats clear */
90 CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
91
92 /* stats dump in mem: (u64)a0=paddr to stats area,
93 * (u16)a1=sizeof stats area */
94 CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
95
96 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
97 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
98
99 /* hang detection notification */
100 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
101
102 /* MAC address in (u48)a0 */
103 CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
104 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
105
106 /* disable/enable promisc mode: (u8)a0=0/1 */
107/***** XXX DEPRECATED *****/
108 CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
109
110 /* disable/enable all-multi mode: (u8)a0=0/1 */
111/***** XXX DEPRECATED *****/
112 CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
113
114 /* add addr from (u48)a0 */
115 CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
116 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
117
118 /* del addr from (u48)a0 */
119 CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
120 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
121
122 /* add VLAN id in (u16)a0 */
123 CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
124
125 /* del VLAN id in (u16)a0 */
126 CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
127
128 /* nic_cfg in (u32)a0 */
129 CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
130
131 /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
132 CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
133
134 /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
135 CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
136
137 /* initiate softreset */
138 CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
139
140 /* softreset status:
141 * out: a0=0 reset complete, a0=1 reset in progress */
142 CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
143
144 /* set struct vnic_devcmd_notify buffer in mem:
145 * in:
146 * (u64)a0=paddr to notify (set paddr=0 to unset)
147 * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
148 * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
149 * out:
150 * (u32)a1 = effective size
151 */
152 CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
153
154 /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
155 * (u8)a1=PXENV_UNDI_xxx */
156 CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
157
158 /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
159 CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
160
161 /* open status:
162 * out: a0=0 open complete, a0=1 open in progress */
163 CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
164
165 /* close vnic */
166 CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
167
168 /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
169 CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
170
171 /* variant of CMD_INIT, with provisioning info
172 * (u64)a0=paddr of vnic_devcmd_provinfo
173 * (u32)a1=sizeof provision info */
174 CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
175
176 /* enable virtual link */
177 CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
178
179 /* disable virtual link */
180 CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
181
182 /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
183 CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
184
185 /* init status:
186 * out: a0=0 init complete, a0=1 init in progress
187 * if a0=0, a1=errno */
188 CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
189
190 /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
191 * (u8)a1=INT13_CMD_xxx */
192 CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
193
194 /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
195 CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
196
197 /* undo initialize of virtual link */
198 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
199};
200
201/* flags for CMD_OPEN */
202#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
203
204/* flags for CMD_INIT */
205#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
206
207/* flags for CMD_PACKET_FILTER */
208#define CMD_PFILTER_DIRECTED 0x01
209#define CMD_PFILTER_MULTICAST 0x02
210#define CMD_PFILTER_BROADCAST 0x04
211#define CMD_PFILTER_PROMISCUOUS 0x08
212#define CMD_PFILTER_ALL_MULTICAST 0x10
213
214enum vnic_devcmd_status {
215 STAT_NONE = 0,
216 STAT_BUSY = 1 << 0, /* cmd in progress */
217 STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
218};
219
220enum vnic_devcmd_error {
221 ERR_SUCCESS = 0,
222 ERR_EINVAL = 1,
223 ERR_EFAULT = 2,
224 ERR_EPERM = 3,
225 ERR_EBUSY = 4,
226 ERR_ECMDUNKNOWN = 5,
227 ERR_EBADSTATE = 6,
228 ERR_ENOMEM = 7,
229 ERR_ETIMEDOUT = 8,
230 ERR_ELINKDOWN = 9,
231};
232
233struct vnic_devcmd_fw_info {
234 char fw_version[32];
235 char fw_build[32];
236 char hw_version[32];
237 char hw_serial_number[32];
238};
239
240struct vnic_devcmd_notify {
241 u32 csum; /* checksum over following words */
242
243 u32 link_state; /* link up == 1 */
244 u32 port_speed; /* effective port speed (rate limit) */
245 u32 mtu; /* MTU */
246 u32 msglvl; /* requested driver msg lvl */
247 u32 uif; /* uplink interface */
248 u32 status; /* status bits (see VNIC_STF_*) */
249 u32 error; /* error code (see ERR_*) for first ERR */
250 u32 link_down_cnt; /* running count of link down transitions */
251};
252#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
253
254struct vnic_devcmd_provinfo {
255 u8 oui[3];
256 u8 type;
257 u8 data[0];
258};
259
260/*
261 * Writing cmd register causes STAT_BUSY to get set in status register.
262 * When cmd completes, STAT_BUSY will be cleared.
263 *
264 * If cmd completed successfully STAT_ERROR will be clear
265 * and args registers contain cmd-specific results.
266 *
267 * If cmd error, STAT_ERROR will be set and args[0] contains error code.
268 *
269 * status register is read-only. While STAT_BUSY is set,
270 * all other register contents are read-only.
271 */
272
273/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
274#define VNIC_DEVCMD_NARGS 15
275struct vnic_devcmd {
276 u32 status; /* RO */
277 u32 cmd; /* RW */
278 u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
279};
280
281#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c
new file mode 100644
index 00000000000..4f4dc8793d2
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.c
@@ -0,0 +1,60 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include "vnic_dev.h"
25#include "vnic_intr.h"
26
27void vnic_intr_free(struct vnic_intr *intr)
28{
29 intr->ctrl = NULL;
30}
31
32int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
33 unsigned int index)
34{
35 intr->index = index;
36 intr->vdev = vdev;
37
38 intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
39 if (!intr->ctrl) {
40 printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
41 index);
42 return -EINVAL;
43 }
44
45 return 0;
46}
47
48void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
49 unsigned int coalescing_type, unsigned int mask_on_assertion)
50{
51 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
52 iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
53 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
54 iowrite32(0, &intr->ctrl->int_credits);
55}
56
57void vnic_intr_clean(struct vnic_intr *intr)
58{
59 iowrite32(0, &intr->ctrl->int_credits);
60}
diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h
new file mode 100644
index 00000000000..d5fb40e7c98
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_intr.h
@@ -0,0 +1,118 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_INTR_H_
19#define _VNIC_INTR_H_
20
21#include <linux/pci.h>
22#include "vnic_dev.h"
23
24/*
25 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
26 * Driver) when both are built with CONFIG options =y
27 */
28#define vnic_intr_unmask fnic_intr_unmask
29#define vnic_intr_mask fnic_intr_mask
30#define vnic_intr_return_credits fnic_intr_return_credits
31#define vnic_intr_credits fnic_intr_credits
32#define vnic_intr_return_all_credits fnic_intr_return_all_credits
33#define vnic_intr_legacy_pba fnic_intr_legacy_pba
34#define vnic_intr_free fnic_intr_free
35#define vnic_intr_alloc fnic_intr_alloc
36#define vnic_intr_init fnic_intr_init
37#define vnic_intr_clean fnic_intr_clean
38
39#define VNIC_INTR_TIMER_MAX 0xffff
40
41#define VNIC_INTR_TIMER_TYPE_ABS 0
42#define VNIC_INTR_TIMER_TYPE_QUIET 1
43
44/* Interrupt control */
45struct vnic_intr_ctrl {
46 u32 coalescing_timer; /* 0x00 */
47 u32 pad0;
48 u32 coalescing_value; /* 0x08 */
49 u32 pad1;
50 u32 coalescing_type; /* 0x10 */
51 u32 pad2;
52 u32 mask_on_assertion; /* 0x18 */
53 u32 pad3;
54 u32 mask; /* 0x20 */
55 u32 pad4;
56 u32 int_credits; /* 0x28 */
57 u32 pad5;
58 u32 int_credit_return; /* 0x30 */
59 u32 pad6;
60};
61
62struct vnic_intr {
63 unsigned int index;
64 struct vnic_dev *vdev;
65 struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
66};
67
68static inline void vnic_intr_unmask(struct vnic_intr *intr)
69{
70 iowrite32(0, &intr->ctrl->mask);
71}
72
73static inline void vnic_intr_mask(struct vnic_intr *intr)
74{
75 iowrite32(1, &intr->ctrl->mask);
76}
77
78static inline void vnic_intr_return_credits(struct vnic_intr *intr,
79 unsigned int credits, int unmask, int reset_timer)
80{
81#define VNIC_INTR_UNMASK_SHIFT 16
82#define VNIC_INTR_RESET_TIMER_SHIFT 17
83
84 u32 int_credit_return = (credits & 0xffff) |
85 (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
86 (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
87
88 iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
89}
90
91static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
92{
93 return ioread32(&intr->ctrl->int_credits);
94}
95
96static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
97{
98 unsigned int credits = vnic_intr_credits(intr);
99 int unmask = 1;
100 int reset_timer = 1;
101
102 vnic_intr_return_credits(intr, credits, unmask, reset_timer);
103}
104
105static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
106{
107 /* read PBA without clearing */
108 return ioread32(legacy_pba);
109}
110
111void vnic_intr_free(struct vnic_intr *intr);
112int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
113 unsigned int index);
114void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
115 unsigned int coalescing_type, unsigned int mask_on_assertion);
116void vnic_intr_clean(struct vnic_intr *intr);
117
118#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h
new file mode 100644
index 00000000000..f15b83eeace
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_nic.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_NIC_H_
19#define _VNIC_NIC_H_
20
21/*
22 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
23 * Driver) when both are built with CONFIG options =y
24 */
25#define vnic_set_nic_cfg fnic_set_nic_cfg
26
27#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
28#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
29#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
30#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
31#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
32#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
33#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
34#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
35#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
36#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
37#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
38#define NIC_CFG_RSS_ENABLE (1UL << 22)
39#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
40#define NIC_CFG_RSS_ENABLE_SHIFT 22
41#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
42#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
43#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
44#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
45#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
46#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
47
48static inline void vnic_set_nic_cfg(u32 *nic_cfg,
49 u8 rss_default_cpu, u8 rss_hash_type,
50 u8 rss_hash_bits, u8 rss_base_cpu,
51 u8 rss_enable, u8 tso_ipid_split_en,
52 u8 ig_vlan_strip_en)
53{
54 *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
55 ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
56 << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
57 ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
58 << NIC_CFG_RSS_HASH_BITS_SHIFT) |
59 ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
60 << NIC_CFG_RSS_BASE_CPU_SHIFT) |
61 ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
62 << NIC_CFG_RSS_ENABLE_SHIFT) |
63 ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
64 << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
65 ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
66 << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
67}
68
69#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h
new file mode 100644
index 00000000000..2d842f79d41
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_resource.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_RESOURCE_H_
19#define _VNIC_RESOURCE_H_
20
21#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
22#define VNIC_RES_VERSION 0x00000000L
23
24/* vNIC resource types */
25enum vnic_res_type {
26 RES_TYPE_EOL, /* End-of-list */
27 RES_TYPE_WQ, /* Work queues */
28 RES_TYPE_RQ, /* Receive queues */
29 RES_TYPE_CQ, /* Completion queues */
30 RES_TYPE_RSVD1,
31 RES_TYPE_NIC_CFG, /* Enet NIC config registers */
32 RES_TYPE_RSVD2,
33 RES_TYPE_RSVD3,
34 RES_TYPE_RSVD4,
35 RES_TYPE_RSVD5,
36 RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
37 RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
38 RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
39 RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
40 RES_TYPE_RSVD6,
41 RES_TYPE_RSVD7,
42 RES_TYPE_DEVCMD, /* Device command region */
43 RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
44
45 RES_TYPE_MAX, /* Count of resource types */
46};
47
48struct vnic_resource_header {
49 u32 magic;
50 u32 version;
51};
52
53struct vnic_resource {
54 u8 type;
55 u8 bar;
56 u8 pad[2];
57 u32 bar_offset;
58 u32 count;
59};
60
61#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
new file mode 100644
index 00000000000..bedd0d28563
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -0,0 +1,196 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include "vnic_dev.h"
24#include "vnic_rq.h"
25
26static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
27{
28 struct vnic_rq_buf *buf;
29 struct vnic_dev *vdev;
30 unsigned int i, j, count = rq->ring.desc_count;
31 unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
32
33 vdev = rq->vdev;
34
35 for (i = 0; i < blks; i++) {
36 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
37 if (!rq->bufs[i]) {
38 printk(KERN_ERR "Failed to alloc rq_bufs\n");
39 return -ENOMEM;
40 }
41 }
42
43 for (i = 0; i < blks; i++) {
44 buf = rq->bufs[i];
45 for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
46 buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
47 buf->desc = (u8 *)rq->ring.descs +
48 rq->ring.desc_size * buf->index;
49 if (buf->index + 1 == count) {
50 buf->next = rq->bufs[0];
51 break;
52 } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
53 buf->next = rq->bufs[i + 1];
54 } else {
55 buf->next = buf + 1;
56 buf++;
57 }
58 }
59 }
60
61 rq->to_use = rq->to_clean = rq->bufs[0];
62 rq->buf_index = 0;
63
64 return 0;
65}
66
67void vnic_rq_free(struct vnic_rq *rq)
68{
69 struct vnic_dev *vdev;
70 unsigned int i;
71
72 vdev = rq->vdev;
73
74 vnic_dev_free_desc_ring(vdev, &rq->ring);
75
76 for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
77 kfree(rq->bufs[i]);
78 rq->bufs[i] = NULL;
79 }
80
81 rq->ctrl = NULL;
82}
83
84int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
85 unsigned int desc_count, unsigned int desc_size)
86{
87 int err;
88
89 rq->index = index;
90 rq->vdev = vdev;
91
92 rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
93 if (!rq->ctrl) {
94 printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
95 return -EINVAL;
96 }
97
98 vnic_rq_disable(rq);
99
100 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
101 if (err)
102 return err;
103
104 err = vnic_rq_alloc_bufs(rq);
105 if (err) {
106 vnic_rq_free(rq);
107 return err;
108 }
109
110 return 0;
111}
112
113void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
114 unsigned int error_interrupt_enable,
115 unsigned int error_interrupt_offset)
116{
117 u64 paddr;
118 u32 fetch_index;
119
120 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
121 writeq(paddr, &rq->ctrl->ring_base);
122 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
123 iowrite32(cq_index, &rq->ctrl->cq_index);
124 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
125 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
126 iowrite32(0, &rq->ctrl->dropped_packet_count);
127 iowrite32(0, &rq->ctrl->error_status);
128
129 /* Use current fetch_index as the ring starting point */
130 fetch_index = ioread32(&rq->ctrl->fetch_index);
131 rq->to_use = rq->to_clean =
132 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
133 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
134 iowrite32(fetch_index, &rq->ctrl->posted_index);
135
136 rq->buf_index = 0;
137}
138
139unsigned int vnic_rq_error_status(struct vnic_rq *rq)
140{
141 return ioread32(&rq->ctrl->error_status);
142}
143
144void vnic_rq_enable(struct vnic_rq *rq)
145{
146 iowrite32(1, &rq->ctrl->enable);
147}
148
149int vnic_rq_disable(struct vnic_rq *rq)
150{
151 unsigned int wait;
152
153 iowrite32(0, &rq->ctrl->enable);
154
155 /* Wait for HW to ACK disable request */
156 for (wait = 0; wait < 100; wait++) {
157 if (!(ioread32(&rq->ctrl->running)))
158 return 0;
159 udelay(1);
160 }
161
162 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
163
164 return -ETIMEDOUT;
165}
166
167void vnic_rq_clean(struct vnic_rq *rq,
168 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
169{
170 struct vnic_rq_buf *buf;
171 u32 fetch_index;
172
173 BUG_ON(ioread32(&rq->ctrl->enable));
174
175 buf = rq->to_clean;
176
177 while (vnic_rq_desc_used(rq) > 0) {
178
179 (*buf_clean)(rq, buf);
180
181 buf = rq->to_clean = buf->next;
182 rq->ring.desc_avail++;
183 }
184
185 /* Use current fetch_index as the ring starting point */
186 fetch_index = ioread32(&rq->ctrl->fetch_index);
187 rq->to_use = rq->to_clean =
188 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
189 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
190 iowrite32(fetch_index, &rq->ctrl->posted_index);
191
192 rq->buf_index = 0;
193
194 vnic_dev_clear_desc_ring(&rq->ring);
195}
196
diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h
new file mode 100644
index 00000000000..aebdfbd6ad3
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_rq.h
@@ -0,0 +1,235 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_RQ_H_
19#define _VNIC_RQ_H_
20
21#include <linux/pci.h>
22#include "vnic_dev.h"
23#include "vnic_cq.h"
24
25/*
26 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
27 * Driver) when both are built with CONFIG options =y
28 */
29#define vnic_rq_desc_avail fnic_rq_desc_avail
30#define vnic_rq_desc_used fnic_rq_desc_used
31#define vnic_rq_next_desc fnic_rq_next_desc
32#define vnic_rq_next_index fnic_rq_next_index
33#define vnic_rq_next_buf_index fnic_rq_next_buf_index
34#define vnic_rq_post fnic_rq_post
35#define vnic_rq_posting_soon fnic_rq_posting_soon
36#define vnic_rq_return_descs fnic_rq_return_descs
37#define vnic_rq_service fnic_rq_service
38#define vnic_rq_fill fnic_rq_fill
39#define vnic_rq_free fnic_rq_free
40#define vnic_rq_alloc fnic_rq_alloc
41#define vnic_rq_init fnic_rq_init
42#define vnic_rq_error_status fnic_rq_error_status
43#define vnic_rq_enable fnic_rq_enable
44#define vnic_rq_disable fnic_rq_disable
45#define vnic_rq_clean fnic_rq_clean
46
47/* Receive queue control */
48struct vnic_rq_ctrl {
49 u64 ring_base; /* 0x00 */
50 u32 ring_size; /* 0x08 */
51 u32 pad0;
52 u32 posted_index; /* 0x10 */
53 u32 pad1;
54 u32 cq_index; /* 0x18 */
55 u32 pad2;
56 u32 enable; /* 0x20 */
57 u32 pad3;
58 u32 running; /* 0x28 */
59 u32 pad4;
60 u32 fetch_index; /* 0x30 */
61 u32 pad5;
62 u32 error_interrupt_enable; /* 0x38 */
63 u32 pad6;
64 u32 error_interrupt_offset; /* 0x40 */
65 u32 pad7;
66 u32 error_status; /* 0x48 */
67 u32 pad8;
68 u32 dropped_packet_count; /* 0x50 */
69 u32 pad9;
70 u32 dropped_packet_count_rc; /* 0x58 */
71 u32 pad10;
72};
73
74/* Break the vnic_rq_buf allocations into blocks of 64 entries */
75#define VNIC_RQ_BUF_BLK_ENTRIES 64
76#define VNIC_RQ_BUF_BLK_SZ \
77 (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
78#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
79 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
80#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
81
82struct vnic_rq_buf {
83 struct vnic_rq_buf *next;
84 dma_addr_t dma_addr;
85 void *os_buf;
86 unsigned int os_buf_index;
87 unsigned int len;
88 unsigned int index;
89 void *desc;
90};
91
92struct vnic_rq {
93 unsigned int index;
94 struct vnic_dev *vdev;
95 struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
96 struct vnic_dev_ring ring;
97 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
98 struct vnic_rq_buf *to_use;
99 struct vnic_rq_buf *to_clean;
100 void *os_buf_head;
101 unsigned int buf_index;
102 unsigned int pkts_outstanding;
103};
104
105static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
106{
107 /* how many does SW own? */
108 return rq->ring.desc_avail;
109}
110
111static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
112{
113 /* how many does HW own? */
114 return rq->ring.desc_count - rq->ring.desc_avail - 1;
115}
116
117static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
118{
119 return rq->to_use->desc;
120}
121
122static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
123{
124 return rq->to_use->index;
125}
126
127static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
128{
129 return rq->buf_index++;
130}
131
132static inline void vnic_rq_post(struct vnic_rq *rq,
133 void *os_buf, unsigned int os_buf_index,
134 dma_addr_t dma_addr, unsigned int len)
135{
136 struct vnic_rq_buf *buf = rq->to_use;
137
138 buf->os_buf = os_buf;
139 buf->os_buf_index = os_buf_index;
140 buf->dma_addr = dma_addr;
141 buf->len = len;
142
143 buf = buf->next;
144 rq->to_use = buf;
145 rq->ring.desc_avail--;
146
147 /* Move the posted_index every nth descriptor
148 */
149
150#ifndef VNIC_RQ_RETURN_RATE
151#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
152#endif
153
154 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
155 /* Adding write memory barrier prevents compiler and/or CPU
156 * reordering, thus avoiding descriptor posting before
157 * descriptor is initialized. Otherwise, hardware can read
158 * stale descriptor fields.
159 */
160 wmb();
161 iowrite32(buf->index, &rq->ctrl->posted_index);
162 }
163}
164
165static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
166{
167 return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
168}
169
170static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
171{
172 rq->ring.desc_avail += count;
173}
174
175enum desc_return_options {
176 VNIC_RQ_RETURN_DESC,
177 VNIC_RQ_DEFER_RETURN_DESC,
178};
179
180static inline void vnic_rq_service(struct vnic_rq *rq,
181 struct cq_desc *cq_desc, u16 completed_index,
182 int desc_return, void (*buf_service)(struct vnic_rq *rq,
183 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
184 int skipped, void *opaque), void *opaque)
185{
186 struct vnic_rq_buf *buf;
187 int skipped;
188
189 buf = rq->to_clean;
190 while (1) {
191
192 skipped = (buf->index != completed_index);
193
194 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
195
196 if (desc_return == VNIC_RQ_RETURN_DESC)
197 rq->ring.desc_avail++;
198
199 rq->to_clean = buf->next;
200
201 if (!skipped)
202 break;
203
204 buf = rq->to_clean;
205 }
206}
207
208static inline int vnic_rq_fill(struct vnic_rq *rq,
209 int (*buf_fill)(struct vnic_rq *rq))
210{
211 int err;
212
213 while (vnic_rq_desc_avail(rq) > 1) {
214
215 err = (*buf_fill)(rq);
216 if (err)
217 return err;
218 }
219
220 return 0;
221}
222
223void vnic_rq_free(struct vnic_rq *rq);
224int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
225 unsigned int desc_count, unsigned int desc_size);
226void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
227 unsigned int error_interrupt_enable,
228 unsigned int error_interrupt_offset);
229unsigned int vnic_rq_error_status(struct vnic_rq *rq);
230void vnic_rq_enable(struct vnic_rq *rq);
231int vnic_rq_disable(struct vnic_rq *rq);
232void vnic_rq_clean(struct vnic_rq *rq,
233 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
234
235#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
new file mode 100644
index 00000000000..46baa525400
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_SCSI_H_
19#define _VNIC_SCSI_H_
20
21#define VNIC_FNIC_WQ_COPY_COUNT_MIN 1
22#define VNIC_FNIC_WQ_COPY_COUNT_MAX 1
23
24#define VNIC_FNIC_WQ_DESCS_MIN 64
25#define VNIC_FNIC_WQ_DESCS_MAX 128
26
27#define VNIC_FNIC_WQ_COPY_DESCS_MIN 64
28#define VNIC_FNIC_WQ_COPY_DESCS_MAX 512
29
30#define VNIC_FNIC_RQ_DESCS_MIN 64
31#define VNIC_FNIC_RQ_DESCS_MAX 128
32
33#define VNIC_FNIC_EDTOV_MIN 1000
34#define VNIC_FNIC_EDTOV_MAX 255000
35#define VNIC_FNIC_EDTOV_DEF 2000
36
37#define VNIC_FNIC_RATOV_MIN 1000
38#define VNIC_FNIC_RATOV_MAX 255000
39
40#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256
41#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112
42
43#define VNIC_FNIC_FLOGI_RETRIES_MIN 0
44#define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff
45#define VNIC_FNIC_FLOGI_RETRIES_DEF 0xffffffff
46
47#define VNIC_FNIC_FLOGI_TIMEOUT_MIN 1000
48#define VNIC_FNIC_FLOGI_TIMEOUT_MAX 255000
49
50#define VNIC_FNIC_PLOGI_RETRIES_MIN 0
51#define VNIC_FNIC_PLOGI_RETRIES_MAX 255
52#define VNIC_FNIC_PLOGI_RETRIES_DEF 8
53
54#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000
55#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000
56
57#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256
58#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096
59
60#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0
61#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000
62
63#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MIN 0
64#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX 240000
65
66#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MIN 0
67#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255
68
69#define VNIC_FNIC_LUNS_PER_TARGET_MIN 1
70#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024
71
72/* Device-specific region: scsi configuration */
73struct vnic_fc_config {
74 u64 node_wwn;
75 u64 port_wwn;
76 u32 flags;
77 u32 wq_enet_desc_count;
78 u32 wq_copy_desc_count;
79 u32 rq_desc_count;
80 u32 flogi_retries;
81 u32 flogi_timeout;
82 u32 plogi_retries;
83 u32 plogi_timeout;
84 u32 io_throttle_count;
85 u32 link_down_timeout;
86 u32 port_down_timeout;
87 u32 port_down_io_retries;
88 u32 luns_per_tgt;
89 u16 maxdatafieldsize;
90 u16 ed_tov;
91 u16 ra_tov;
92 u16 intr_timer;
93 u8 intr_timer_type;
94};
95
96#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */
97#define VFCF_PERBI 0x2 /* persistent binding info available */
98
99#endif /* _VNIC_SCSI_H_ */
diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h
new file mode 100644
index 00000000000..5372e23c1cb
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_stats.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_STATS_H_
19#define _VNIC_STATS_H_
20
21/* Tx statistics */
22struct vnic_tx_stats {
23 u64 tx_frames_ok;
24 u64 tx_unicast_frames_ok;
25 u64 tx_multicast_frames_ok;
26 u64 tx_broadcast_frames_ok;
27 u64 tx_bytes_ok;
28 u64 tx_unicast_bytes_ok;
29 u64 tx_multicast_bytes_ok;
30 u64 tx_broadcast_bytes_ok;
31 u64 tx_drops;
32 u64 tx_errors;
33 u64 tx_tso;
34 u64 rsvd[16];
35};
36
37/* Rx statistics */
38struct vnic_rx_stats {
39 u64 rx_frames_ok;
40 u64 rx_frames_total;
41 u64 rx_unicast_frames_ok;
42 u64 rx_multicast_frames_ok;
43 u64 rx_broadcast_frames_ok;
44 u64 rx_bytes_ok;
45 u64 rx_unicast_bytes_ok;
46 u64 rx_multicast_bytes_ok;
47 u64 rx_broadcast_bytes_ok;
48 u64 rx_drop;
49 u64 rx_no_bufs;
50 u64 rx_errors;
51 u64 rx_rss;
52 u64 rx_crc_errors;
53 u64 rx_frames_64;
54 u64 rx_frames_127;
55 u64 rx_frames_255;
56 u64 rx_frames_511;
57 u64 rx_frames_1023;
58 u64 rx_frames_1518;
59 u64 rx_frames_to_max;
60 u64 rsvd[16];
61};
62
63struct vnic_stats {
64 struct vnic_tx_stats tx;
65 struct vnic_rx_stats rx;
66};
67
68#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
new file mode 100644
index 00000000000..1f9ea790d13
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include "vnic_dev.h"
24#include "vnic_wq.h"
25
26static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
27{
28 struct vnic_wq_buf *buf;
29 struct vnic_dev *vdev;
30 unsigned int i, j, count = wq->ring.desc_count;
31 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
32
33 vdev = wq->vdev;
34
35 for (i = 0; i < blks; i++) {
36 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
37 if (!wq->bufs[i]) {
38 printk(KERN_ERR "Failed to alloc wq_bufs\n");
39 return -ENOMEM;
40 }
41 }
42
43 for (i = 0; i < blks; i++) {
44 buf = wq->bufs[i];
45 for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
46 buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
47 buf->desc = (u8 *)wq->ring.descs +
48 wq->ring.desc_size * buf->index;
49 if (buf->index + 1 == count) {
50 buf->next = wq->bufs[0];
51 break;
52 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
53 buf->next = wq->bufs[i + 1];
54 } else {
55 buf->next = buf + 1;
56 buf++;
57 }
58 }
59 }
60
61 wq->to_use = wq->to_clean = wq->bufs[0];
62
63 return 0;
64}
65
66void vnic_wq_free(struct vnic_wq *wq)
67{
68 struct vnic_dev *vdev;
69 unsigned int i;
70
71 vdev = wq->vdev;
72
73 vnic_dev_free_desc_ring(vdev, &wq->ring);
74
75 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
76 kfree(wq->bufs[i]);
77 wq->bufs[i] = NULL;
78 }
79
80 wq->ctrl = NULL;
81
82}
83
84int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
85 unsigned int desc_count, unsigned int desc_size)
86{
87 int err;
88
89 wq->index = index;
90 wq->vdev = vdev;
91
92 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
93 if (!wq->ctrl) {
94 printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
95 return -EINVAL;
96 }
97
98 vnic_wq_disable(wq);
99
100 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
101 if (err)
102 return err;
103
104 err = vnic_wq_alloc_bufs(wq);
105 if (err) {
106 vnic_wq_free(wq);
107 return err;
108 }
109
110 return 0;
111}
112
113void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
114 unsigned int error_interrupt_enable,
115 unsigned int error_interrupt_offset)
116{
117 u64 paddr;
118
119 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
120 writeq(paddr, &wq->ctrl->ring_base);
121 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
122 iowrite32(0, &wq->ctrl->fetch_index);
123 iowrite32(0, &wq->ctrl->posted_index);
124 iowrite32(cq_index, &wq->ctrl->cq_index);
125 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
126 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
127 iowrite32(0, &wq->ctrl->error_status);
128}
129
130unsigned int vnic_wq_error_status(struct vnic_wq *wq)
131{
132 return ioread32(&wq->ctrl->error_status);
133}
134
135void vnic_wq_enable(struct vnic_wq *wq)
136{
137 iowrite32(1, &wq->ctrl->enable);
138}
139
140int vnic_wq_disable(struct vnic_wq *wq)
141{
142 unsigned int wait;
143
144 iowrite32(0, &wq->ctrl->enable);
145
146 /* Wait for HW to ACK disable request */
147 for (wait = 0; wait < 100; wait++) {
148 if (!(ioread32(&wq->ctrl->running)))
149 return 0;
150 udelay(1);
151 }
152
153 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
154
155 return -ETIMEDOUT;
156}
157
158void vnic_wq_clean(struct vnic_wq *wq,
159 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
160{
161 struct vnic_wq_buf *buf;
162
163 BUG_ON(ioread32(&wq->ctrl->enable));
164
165 buf = wq->to_clean;
166
167 while (vnic_wq_desc_used(wq) > 0) {
168
169 (*buf_clean)(wq, buf);
170
171 buf = wq->to_clean = buf->next;
172 wq->ring.desc_avail++;
173 }
174
175 wq->to_use = wq->to_clean = wq->bufs[0];
176
177 iowrite32(0, &wq->ctrl->fetch_index);
178 iowrite32(0, &wq->ctrl->posted_index);
179 iowrite32(0, &wq->ctrl->error_status);
180
181 vnic_dev_clear_desc_ring(&wq->ring);
182}
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h
new file mode 100644
index 00000000000..5cd094f7928
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq.h
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_WQ_H_
19#define _VNIC_WQ_H_
20
21#include <linux/pci.h>
22#include "vnic_dev.h"
23#include "vnic_cq.h"
24
25/*
26 * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
27 * Driver) when both are built with CONFIG options =y
28 */
29#define vnic_wq_desc_avail fnic_wq_desc_avail
30#define vnic_wq_desc_used fnic_wq_desc_used
31#define vnic_wq_next_desc fni_cwq_next_desc
32#define vnic_wq_post fnic_wq_post
33#define vnic_wq_service fnic_wq_service
34#define vnic_wq_free fnic_wq_free
35#define vnic_wq_alloc fnic_wq_alloc
36#define vnic_wq_init fnic_wq_init
37#define vnic_wq_error_status fnic_wq_error_status
38#define vnic_wq_enable fnic_wq_enable
39#define vnic_wq_disable fnic_wq_disable
40#define vnic_wq_clean fnic_wq_clean
41
42/* Work queue control */
43struct vnic_wq_ctrl {
44 u64 ring_base; /* 0x00 */
45 u32 ring_size; /* 0x08 */
46 u32 pad0;
47 u32 posted_index; /* 0x10 */
48 u32 pad1;
49 u32 cq_index; /* 0x18 */
50 u32 pad2;
51 u32 enable; /* 0x20 */
52 u32 pad3;
53 u32 running; /* 0x28 */
54 u32 pad4;
55 u32 fetch_index; /* 0x30 */
56 u32 pad5;
57 u32 dca_value; /* 0x38 */
58 u32 pad6;
59 u32 error_interrupt_enable; /* 0x40 */
60 u32 pad7;
61 u32 error_interrupt_offset; /* 0x48 */
62 u32 pad8;
63 u32 error_status; /* 0x50 */
64 u32 pad9;
65};
66
67struct vnic_wq_buf {
68 struct vnic_wq_buf *next;
69 dma_addr_t dma_addr;
70 void *os_buf;
71 unsigned int len;
72 unsigned int index;
73 int sop;
74 void *desc;
75};
76
77/* Break the vnic_wq_buf allocations into blocks of 64 entries */
78#define VNIC_WQ_BUF_BLK_ENTRIES 64
79#define VNIC_WQ_BUF_BLK_SZ \
80 (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
81#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
82 DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
83#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
84
85struct vnic_wq {
86 unsigned int index;
87 struct vnic_dev *vdev;
88 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
89 struct vnic_dev_ring ring;
90 struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
91 struct vnic_wq_buf *to_use;
92 struct vnic_wq_buf *to_clean;
93 unsigned int pkts_outstanding;
94};
95
96static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
97{
98 /* how many does SW own? */
99 return wq->ring.desc_avail;
100}
101
102static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
103{
104 /* how many does HW own? */
105 return wq->ring.desc_count - wq->ring.desc_avail - 1;
106}
107
108static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
109{
110 return wq->to_use->desc;
111}
112
113static inline void vnic_wq_post(struct vnic_wq *wq,
114 void *os_buf, dma_addr_t dma_addr,
115 unsigned int len, int sop, int eop)
116{
117 struct vnic_wq_buf *buf = wq->to_use;
118
119 buf->sop = sop;
120 buf->os_buf = eop ? os_buf : NULL;
121 buf->dma_addr = dma_addr;
122 buf->len = len;
123
124 buf = buf->next;
125 if (eop) {
126 /* Adding write memory barrier prevents compiler and/or CPU
127 * reordering, thus avoiding descriptor posting before
128 * descriptor is initialized. Otherwise, hardware can read
129 * stale descriptor fields.
130 */
131 wmb();
132 iowrite32(buf->index, &wq->ctrl->posted_index);
133 }
134 wq->to_use = buf;
135
136 wq->ring.desc_avail--;
137}
138
139static inline void vnic_wq_service(struct vnic_wq *wq,
140 struct cq_desc *cq_desc, u16 completed_index,
141 void (*buf_service)(struct vnic_wq *wq,
142 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
143 void *opaque)
144{
145 struct vnic_wq_buf *buf;
146
147 buf = wq->to_clean;
148 while (1) {
149
150 (*buf_service)(wq, cq_desc, buf, opaque);
151
152 wq->ring.desc_avail++;
153
154 wq->to_clean = buf->next;
155
156 if (buf->index == completed_index)
157 break;
158
159 buf = wq->to_clean;
160 }
161}
162
163void vnic_wq_free(struct vnic_wq *wq);
164int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
165 unsigned int desc_count, unsigned int desc_size);
166void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
167 unsigned int error_interrupt_enable,
168 unsigned int error_interrupt_offset);
169unsigned int vnic_wq_error_status(struct vnic_wq *wq);
170void vnic_wq_enable(struct vnic_wq *wq);
171int vnic_wq_disable(struct vnic_wq *wq);
172void vnic_wq_clean(struct vnic_wq *wq,
173 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
174
175#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c
new file mode 100644
index 00000000000..9eab7e7caf3
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.c
@@ -0,0 +1,117 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include "vnic_wq_copy.h"
24
25void vnic_wq_copy_enable(struct vnic_wq_copy *wq)
26{
27 iowrite32(1, &wq->ctrl->enable);
28}
29
30int vnic_wq_copy_disable(struct vnic_wq_copy *wq)
31{
32 unsigned int wait;
33
34 iowrite32(0, &wq->ctrl->enable);
35
36 /* Wait for HW to ACK disable request */
37 for (wait = 0; wait < 100; wait++) {
38 if (!(ioread32(&wq->ctrl->running)))
39 return 0;
40 udelay(1);
41 }
42
43 printk(KERN_ERR "Failed to disable Copy WQ[%d],"
44 " fetch index=%d, posted_index=%d\n",
45 wq->index, ioread32(&wq->ctrl->fetch_index),
46 ioread32(&wq->ctrl->posted_index));
47
48 return -ENODEV;
49}
50
51void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
52 void (*q_clean)(struct vnic_wq_copy *wq,
53 struct fcpio_host_req *wq_desc))
54{
55 BUG_ON(ioread32(&wq->ctrl->enable));
56
57 if (vnic_wq_copy_desc_in_use(wq))
58 vnic_wq_copy_service(wq, -1, q_clean);
59
60 wq->to_use_index = wq->to_clean_index = 0;
61
62 iowrite32(0, &wq->ctrl->fetch_index);
63 iowrite32(0, &wq->ctrl->posted_index);
64 iowrite32(0, &wq->ctrl->error_status);
65
66 vnic_dev_clear_desc_ring(&wq->ring);
67}
68
69void vnic_wq_copy_free(struct vnic_wq_copy *wq)
70{
71 struct vnic_dev *vdev;
72
73 vdev = wq->vdev;
74 vnic_dev_free_desc_ring(vdev, &wq->ring);
75 wq->ctrl = NULL;
76}
77
78int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
79 unsigned int index, unsigned int desc_count,
80 unsigned int desc_size)
81{
82 int err;
83
84 wq->index = index;
85 wq->vdev = vdev;
86 wq->to_use_index = wq->to_clean_index = 0;
87 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
88 if (!wq->ctrl) {
89 printk(KERN_ERR "Failed to hook COPY WQ[%d] resource\n", index);
90 return -EINVAL;
91 }
92
93 vnic_wq_copy_disable(wq);
94
95 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
96 if (err)
97 return err;
98
99 return 0;
100}
101
102void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
103 unsigned int error_interrupt_enable,
104 unsigned int error_interrupt_offset)
105{
106 u64 paddr;
107
108 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
109 writeq(paddr, &wq->ctrl->ring_base);
110 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
111 iowrite32(0, &wq->ctrl->fetch_index);
112 iowrite32(0, &wq->ctrl->posted_index);
113 iowrite32(cq_index, &wq->ctrl->cq_index);
114 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
115 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
116}
117
diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h
new file mode 100644
index 00000000000..6aff9740c3d
--- /dev/null
+++ b/drivers/scsi/fnic/vnic_wq_copy.h
@@ -0,0 +1,128 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _VNIC_WQ_COPY_H_
19#define _VNIC_WQ_COPY_H_
20
21#include <linux/pci.h>
22#include "vnic_wq.h"
23#include "fcpio.h"
24
25#define VNIC_WQ_COPY_MAX 1
26
27struct vnic_wq_copy {
28 unsigned int index;
29 struct vnic_dev *vdev;
30 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
31 struct vnic_dev_ring ring;
32 unsigned to_use_index;
33 unsigned to_clean_index;
34};
35
36static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
37{
38 return wq->ring.desc_avail;
39}
40
41static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
42{
43 return wq->ring.desc_count - 1 - wq->ring.desc_avail;
44}
45
46static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
47{
48 struct fcpio_host_req *desc = wq->ring.descs;
49 return &desc[wq->to_use_index];
50}
51
52static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
53{
54
55 ((wq->to_use_index + 1) == wq->ring.desc_count) ?
56 (wq->to_use_index = 0) : (wq->to_use_index++);
57 wq->ring.desc_avail--;
58
59 /* Adding write memory barrier prevents compiler and/or CPU
60 * reordering, thus avoiding descriptor posting before
61 * descriptor is initialized. Otherwise, hardware can read
62 * stale descriptor fields.
63 */
64 wmb();
65
66 iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
67}
68
69static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
70{
71 unsigned int cnt;
72
73 if (wq->to_clean_index <= index)
74 cnt = (index - wq->to_clean_index) + 1;
75 else
76 cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
77
78 wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
79 wq->ring.desc_avail += cnt;
80
81}
82
83static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
84 u16 completed_index,
85 void (*q_service)(struct vnic_wq_copy *wq,
86 struct fcpio_host_req *wq_desc))
87{
88 struct fcpio_host_req *wq_desc = wq->ring.descs;
89 unsigned int curr_index;
90
91 while (1) {
92
93 if (q_service)
94 (*q_service)(wq, &wq_desc[wq->to_clean_index]);
95
96 wq->ring.desc_avail++;
97
98 curr_index = wq->to_clean_index;
99
100 /* increment the to-clean index so that we start
101 * with an unprocessed index next time we enter the loop
102 */
103 ((wq->to_clean_index + 1) == wq->ring.desc_count) ?
104 (wq->to_clean_index = 0) : (wq->to_clean_index++);
105
106 if (curr_index == completed_index)
107 break;
108
109 /* we have cleaned all the entries */
110 if ((completed_index == (u16)-1) &&
111 (wq->to_clean_index == wq->to_use_index))
112 break;
113 }
114}
115
116void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
117int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
118void vnic_wq_copy_free(struct vnic_wq_copy *wq);
119int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
120 unsigned int index, unsigned int desc_count, unsigned int desc_size);
121void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
122 unsigned int error_interrupt_enable,
123 unsigned int error_interrupt_offset);
124void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
125 void (*q_clean)(struct vnic_wq_copy *wq,
126 struct fcpio_host_req *wq_desc));
127
128#endif /* _VNIC_WQ_COPY_H_ */
diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h
new file mode 100644
index 00000000000..b121cbad18b
--- /dev/null
+++ b/drivers/scsi/fnic/wq_enet_desc.h
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#ifndef _WQ_ENET_DESC_H_
19#define _WQ_ENET_DESC_H_
20
21/* Ethernet work queue descriptor: 16B */
22struct wq_enet_desc {
23 __le64 address;
24 __le16 length;
25 __le16 mss_loopback;
26 __le16 header_length_flags;
27 __le16 vlan_tag;
28};
29
30#define WQ_ENET_ADDR_BITS 64
31#define WQ_ENET_LEN_BITS 14
32#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
33#define WQ_ENET_MSS_BITS 14
34#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
35#define WQ_ENET_MSS_SHIFT 2
36#define WQ_ENET_LOOPBACK_SHIFT 1
37#define WQ_ENET_HDRLEN_BITS 10
38#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
39#define WQ_ENET_FLAGS_OM_BITS 2
40#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
41#define WQ_ENET_FLAGS_EOP_SHIFT 12
42#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
43#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
44#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
45
46#define WQ_ENET_OFFLOAD_MODE_CSUM 0
47#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
48#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
49#define WQ_ENET_OFFLOAD_MODE_TSO 3
50
51static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
52 u64 address, u16 length, u16 mss, u16 header_length,
53 u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
54 u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
55{
56 desc->address = cpu_to_le64(address);
57 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
58 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
59 WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
60 desc->header_length_flags = cpu_to_le16(
61 (header_length & WQ_ENET_HDRLEN_MASK) |
62 (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
63 (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
64 (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
65 (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
66 (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
67 desc->vlan_tag = cpu_to_le16(vlan_tag);
68}
69
70static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
71 u64 *address, u16 *length, u16 *mss, u16 *header_length,
72 u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
73 u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
74{
75 *address = le64_to_cpu(desc->address);
76 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
77 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
78 WQ_ENET_MSS_MASK;
79 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
80 WQ_ENET_LOOPBACK_SHIFT) & 1);
81 *header_length = le16_to_cpu(desc->header_length_flags) &
82 WQ_ENET_HDRLEN_MASK;
83 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
84 WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
85 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
86 WQ_ENET_FLAGS_EOP_SHIFT) & 1);
87 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
88 WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
89 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
90 WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
91 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
92 WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
93 *vlan_tag = le16_to_cpu(desc->vlan_tag);
94}
95
96#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 59349a316e1..1258da34fbc 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -152,6 +152,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
152 struct Scsi_Host *host, gdth_ha_str *ha) 152 struct Scsi_Host *host, gdth_ha_str *ha)
153{ 153{
154 int size = 0,len = 0; 154 int size = 0,len = 0;
155 int hlen;
155 off_t begin = 0,pos = 0; 156 off_t begin = 0,pos = 0;
156 int id, i, j, k, sec, flag; 157 int id, i, j, k, sec, flag;
157 int no_mdrv = 0, drv_no, is_mirr; 158 int no_mdrv = 0, drv_no, is_mirr;
@@ -192,11 +193,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
192 if (reserve_list[0] == 0xff) 193 if (reserve_list[0] == 0xff)
193 strcpy(hrec, "--"); 194 strcpy(hrec, "--");
194 else { 195 else {
195 sprintf(hrec, "%d", reserve_list[0]); 196 hlen = sprintf(hrec, "%d", reserve_list[0]);
196 for (i = 1; i < MAX_RES_ARGS; i++) { 197 for (i = 1; i < MAX_RES_ARGS; i++) {
197 if (reserve_list[i] == 0xff) 198 if (reserve_list[i] == 0xff)
198 break; 199 break;
199 sprintf(hrec,"%s,%d", hrec, reserve_list[i]); 200 hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
200 } 201 }
201 } 202 }
202 size = sprintf(buffer+len, 203 size = sprintf(buffer+len,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ea4abee7a2a..b4b805e8d7d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -110,7 +110,7 @@ static const struct {
110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, 110 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, 111 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, 112 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" }, 113 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, 114 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
115 115
116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, 116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
@@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *);
143static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); 143static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
144static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); 144static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
145static void ibmvfc_tgt_query_target(struct ibmvfc_target *); 145static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
146static void ibmvfc_npiv_logout(struct ibmvfc_host *);
146 147
147static const char *unknown_error = "unknown error"; 148static const char *unknown_error = "unknown error";
148 149
@@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
275 int fc_rsp_len = rsp->fcp_rsp_len; 276 int fc_rsp_len = rsp->fcp_rsp_len;
276 277
277 if ((rsp->flags & FCP_RSP_LEN_VALID) && 278 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
278 ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || 279 ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
279 rsp->data.info.rsp_code)) 280 rsp->data.info.rsp_code))
280 return DID_ERROR << 16; 281 return DID_ERROR << 16;
281 282
@@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
431 case IBMVFC_TGT_ACTION_DEL_RPORT: 432 case IBMVFC_TGT_ACTION_DEL_RPORT:
432 break; 433 break;
433 default: 434 default:
435 if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
436 tgt->add_rport = 0;
434 tgt->action = action; 437 tgt->action = action;
435 break; 438 break;
436 } 439 }
@@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
475 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) 478 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
476 vhost->action = action; 479 vhost->action = action;
477 break; 480 break;
481 case IBMVFC_HOST_ACTION_LOGO_WAIT:
482 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
483 vhost->action = action;
484 break;
478 case IBMVFC_HOST_ACTION_INIT_WAIT: 485 case IBMVFC_HOST_ACTION_INIT_WAIT:
479 if (vhost->action == IBMVFC_HOST_ACTION_INIT) 486 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
480 vhost->action = action; 487 vhost->action = action;
@@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
483 switch (vhost->action) { 490 switch (vhost->action) {
484 case IBMVFC_HOST_ACTION_INIT_WAIT: 491 case IBMVFC_HOST_ACTION_INIT_WAIT:
485 case IBMVFC_HOST_ACTION_NONE: 492 case IBMVFC_HOST_ACTION_NONE:
486 case IBMVFC_HOST_ACTION_TGT_ADD: 493 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
487 vhost->action = action; 494 vhost->action = action;
488 break; 495 break;
489 default: 496 default:
@@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
494 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) 501 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
495 vhost->action = action; 502 vhost->action = action;
496 break; 503 break;
504 case IBMVFC_HOST_ACTION_LOGO:
497 case IBMVFC_HOST_ACTION_INIT: 505 case IBMVFC_HOST_ACTION_INIT:
498 case IBMVFC_HOST_ACTION_TGT_DEL: 506 case IBMVFC_HOST_ACTION_TGT_DEL:
499 case IBMVFC_HOST_ACTION_QUERY_TGTS: 507 case IBMVFC_HOST_ACTION_QUERY_TGTS:
500 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 508 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
501 case IBMVFC_HOST_ACTION_TGT_ADD:
502 case IBMVFC_HOST_ACTION_NONE: 509 case IBMVFC_HOST_ACTION_NONE:
503 default: 510 default:
504 vhost->action = action; 511 vhost->action = action;
@@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
576 } 583 }
577 584
578 list_for_each_entry(tgt, &vhost->targets, queue) 585 list_for_each_entry(tgt, &vhost->targets, queue)
579 tgt->need_login = 1; 586 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
580 scsi_block_requests(vhost->host); 587 scsi_block_requests(vhost->host);
581 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 588 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
582 vhost->job_step = ibmvfc_npiv_login; 589 vhost->job_step = ibmvfc_npiv_login;
@@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
646 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 653 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
647 654
648 vhost->state = IBMVFC_NO_CRQ; 655 vhost->state = IBMVFC_NO_CRQ;
656 vhost->logged_in = 0;
649 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 657 dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
650 free_page((unsigned long)crq->msgs); 658 free_page((unsigned long)crq->msgs);
651} 659}
@@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
692 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 700 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
693 701
694 vhost->state = IBMVFC_NO_CRQ; 702 vhost->state = IBMVFC_NO_CRQ;
703 vhost->logged_in = 0;
695 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 704 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
696 705
697 /* Clean out the queue */ 706 /* Clean out the queue */
@@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
807} 816}
808 817
809/** 818/**
810 * __ibmvfc_reset_host - Reset the connection to the server (no locking) 819 * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
811 * @vhost: struct ibmvfc host to reset 820 * @vhost: struct ibmvfc host to reset
812 **/ 821 **/
813static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) 822static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
814{ 823{
815 int rc; 824 int rc;
816 825
@@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
826} 835}
827 836
828/** 837/**
829 * ibmvfc_reset_host - Reset the connection to the server 838 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
830 * @vhost: struct ibmvfc host to reset 839 * @vhost: struct ibmvfc host to reset
831 **/ 840 **/
841static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
842{
843 if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
844 !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
845 scsi_block_requests(vhost->host);
846 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
847 vhost->job_step = ibmvfc_npiv_logout;
848 wake_up(&vhost->work_wait_q);
849 } else
850 ibmvfc_hard_reset_host(vhost);
851}
852
853/**
854 * ibmvfc_reset_host - Reset the connection to the server
855 * @vhost: ibmvfc host struct
856 **/
832static void ibmvfc_reset_host(struct ibmvfc_host *vhost) 857static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
833{ 858{
834 unsigned long flags; 859 unsigned long flags;
@@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
842 * ibmvfc_retry_host_init - Retry host initialization if allowed 867 * ibmvfc_retry_host_init - Retry host initialization if allowed
843 * @vhost: ibmvfc host struct 868 * @vhost: ibmvfc host struct
844 * 869 *
870 * Returns: 1 if init will be retried / 0 if not
871 *
845 **/ 872 **/
846static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) 873static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
847{ 874{
875 int retry = 0;
876
848 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { 877 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
849 vhost->delay_init = 1; 878 vhost->delay_init = 1;
850 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { 879 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
@@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
853 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); 882 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
854 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) 883 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
855 __ibmvfc_reset_host(vhost); 884 __ibmvfc_reset_host(vhost);
856 else 885 else {
857 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 886 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
887 retry = 1;
888 }
858 } 889 }
859 890
860 wake_up(&vhost->work_wait_q); 891 wake_up(&vhost->work_wait_q);
892 return retry;
861} 893}
862 894
863/** 895/**
@@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1137 login_info->partition_num = vhost->partition_number; 1169 login_info->partition_num = vhost->partition_number;
1138 login_info->vfc_frame_version = 1; 1170 login_info->vfc_frame_version = 1;
1139 login_info->fcp_version = 3; 1171 login_info->fcp_version = 3;
1172 login_info->flags = IBMVFC_FLUSH_ON_HALT;
1140 if (vhost->client_migrated) 1173 if (vhost->client_migrated)
1141 login_info->flags = IBMVFC_CLIENT_MIGRATED; 1174 login_info->flags |= IBMVFC_CLIENT_MIGRATED;
1142 1175
1143 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; 1176 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1144 login_info->capabilities = IBMVFC_CAN_MIGRATE; 1177 login_info->capabilities = IBMVFC_CAN_MIGRATE;
@@ -1452,6 +1485,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
1452} 1485}
1453 1486
1454/** 1487/**
1488 * ibmvfc_relogin - Log back into the specified device
1489 * @sdev: scsi device struct
1490 *
1491 **/
1492static void ibmvfc_relogin(struct scsi_device *sdev)
1493{
1494 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1495 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1496 struct ibmvfc_target *tgt;
1497
1498 list_for_each_entry(tgt, &vhost->targets, queue) {
1499 if (rport == tgt->rport) {
1500 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
1501 break;
1502 }
1503 }
1504
1505 ibmvfc_reinit_host(vhost);
1506}
1507
1508/**
1455 * ibmvfc_scsi_done - Handle responses from commands 1509 * ibmvfc_scsi_done - Handle responses from commands
1456 * @evt: ibmvfc event to be handled 1510 * @evt: ibmvfc event to be handled
1457 * 1511 *
@@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1483 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) 1537 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1484 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); 1538 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1485 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) 1539 if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
1486 ibmvfc_reinit_host(evt->vhost); 1540 ibmvfc_relogin(cmnd->device);
1487 1541
1488 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) 1542 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1489 cmnd->result = (DID_ERROR << 16); 1543 cmnd->result = (DID_ERROR << 16);
@@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2148 struct ibmvfc_host *vhost) 2202 struct ibmvfc_host *vhost)
2149{ 2203{
2150 const char *desc = ibmvfc_get_ae_desc(crq->event); 2204 const char *desc = ibmvfc_get_ae_desc(crq->event);
2205 struct ibmvfc_target *tgt;
2151 2206
2152 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," 2207 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
2153 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); 2208 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
2154 2209
2155 switch (crq->event) { 2210 switch (crq->event) {
2156 case IBMVFC_AE_LINK_UP:
2157 case IBMVFC_AE_RESUME: 2211 case IBMVFC_AE_RESUME:
2212 switch (crq->link_state) {
2213 case IBMVFC_AE_LS_LINK_DOWN:
2214 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2215 break;
2216 case IBMVFC_AE_LS_LINK_DEAD:
2217 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2218 break;
2219 case IBMVFC_AE_LS_LINK_UP:
2220 case IBMVFC_AE_LS_LINK_BOUNCED:
2221 default:
2222 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2223 vhost->delay_init = 1;
2224 __ibmvfc_reset_host(vhost);
2225 break;
2226 };
2227
2228 break;
2229 case IBMVFC_AE_LINK_UP:
2158 vhost->events_to_log |= IBMVFC_AE_LINKUP; 2230 vhost->events_to_log |= IBMVFC_AE_LINKUP;
2159 vhost->delay_init = 1; 2231 vhost->delay_init = 1;
2160 __ibmvfc_reset_host(vhost); 2232 __ibmvfc_reset_host(vhost);
@@ -2168,9 +2240,23 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2168 case IBMVFC_AE_SCN_NPORT: 2240 case IBMVFC_AE_SCN_NPORT:
2169 case IBMVFC_AE_SCN_GROUP: 2241 case IBMVFC_AE_SCN_GROUP:
2170 vhost->events_to_log |= IBMVFC_AE_RSCN; 2242 vhost->events_to_log |= IBMVFC_AE_RSCN;
2243 ibmvfc_reinit_host(vhost);
2244 break;
2171 case IBMVFC_AE_ELS_LOGO: 2245 case IBMVFC_AE_ELS_LOGO:
2172 case IBMVFC_AE_ELS_PRLO: 2246 case IBMVFC_AE_ELS_PRLO:
2173 case IBMVFC_AE_ELS_PLOGI: 2247 case IBMVFC_AE_ELS_PLOGI:
2248 list_for_each_entry(tgt, &vhost->targets, queue) {
2249 if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
2250 break;
2251 if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
2252 continue;
2253 if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
2254 continue;
2255 if (crq->node_name && tgt->ids.node_name != crq->node_name)
2256 continue;
2257 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2258 }
2259
2174 ibmvfc_reinit_host(vhost); 2260 ibmvfc_reinit_host(vhost);
2175 break; 2261 break;
2176 case IBMVFC_AE_LINK_DOWN: 2262 case IBMVFC_AE_LINK_DOWN:
@@ -2222,6 +2308,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2222 return; 2308 return;
2223 case IBMVFC_CRQ_XPORT_EVENT: 2309 case IBMVFC_CRQ_XPORT_EVENT:
2224 vhost->state = IBMVFC_NO_CRQ; 2310 vhost->state = IBMVFC_NO_CRQ;
2311 vhost->logged_in = 0;
2225 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 2312 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2226 if (crq->format == IBMVFC_PARTITION_MIGRATED) { 2313 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2227 /* We need to re-setup the interpartition connection */ 2314 /* We need to re-setup the interpartition connection */
@@ -2299,7 +2386,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2299 done = 1; 2386 done = 1;
2300 } 2387 }
2301 2388
2302 if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE) 2389 if (vhost->scan_complete)
2303 done = 1; 2390 done = 1;
2304 spin_unlock_irqrestore(shost->host_lock, flags); 2391 spin_unlock_irqrestore(shost->host_lock, flags);
2305 return done; 2392 return done;
@@ -2434,14 +2521,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2434 vhost->login_buf->resp.partition_name); 2521 vhost->login_buf->resp.partition_name);
2435} 2522}
2436 2523
2437static struct device_attribute ibmvfc_host_partition_name = {
2438 .attr = {
2439 .name = "partition_name",
2440 .mode = S_IRUGO,
2441 },
2442 .show = ibmvfc_show_host_partition_name,
2443};
2444
2445static ssize_t ibmvfc_show_host_device_name(struct device *dev, 2524static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2446 struct device_attribute *attr, char *buf) 2525 struct device_attribute *attr, char *buf)
2447{ 2526{
@@ -2452,14 +2531,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
2452 vhost->login_buf->resp.device_name); 2531 vhost->login_buf->resp.device_name);
2453} 2532}
2454 2533
2455static struct device_attribute ibmvfc_host_device_name = {
2456 .attr = {
2457 .name = "device_name",
2458 .mode = S_IRUGO,
2459 },
2460 .show = ibmvfc_show_host_device_name,
2461};
2462
2463static ssize_t ibmvfc_show_host_loc_code(struct device *dev, 2534static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2464 struct device_attribute *attr, char *buf) 2535 struct device_attribute *attr, char *buf)
2465{ 2536{
@@ -2470,14 +2541,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
2470 vhost->login_buf->resp.port_loc_code); 2541 vhost->login_buf->resp.port_loc_code);
2471} 2542}
2472 2543
2473static struct device_attribute ibmvfc_host_loc_code = {
2474 .attr = {
2475 .name = "port_loc_code",
2476 .mode = S_IRUGO,
2477 },
2478 .show = ibmvfc_show_host_loc_code,
2479};
2480
2481static ssize_t ibmvfc_show_host_drc_name(struct device *dev, 2544static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2482 struct device_attribute *attr, char *buf) 2545 struct device_attribute *attr, char *buf)
2483{ 2546{
@@ -2488,14 +2551,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
2488 vhost->login_buf->resp.drc_name); 2551 vhost->login_buf->resp.drc_name);
2489} 2552}
2490 2553
2491static struct device_attribute ibmvfc_host_drc_name = {
2492 .attr = {
2493 .name = "drc_name",
2494 .mode = S_IRUGO,
2495 },
2496 .show = ibmvfc_show_host_drc_name,
2497};
2498
2499static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, 2554static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2500 struct device_attribute *attr, char *buf) 2555 struct device_attribute *attr, char *buf)
2501{ 2556{
@@ -2504,13 +2559,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
2504 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); 2559 return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
2505} 2560}
2506 2561
2507static struct device_attribute ibmvfc_host_npiv_version = { 2562static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
2508 .attr = { 2563 struct device_attribute *attr, char *buf)
2509 .name = "npiv_version", 2564{
2510 .mode = S_IRUGO, 2565 struct Scsi_Host *shost = class_to_shost(dev);
2511 }, 2566 struct ibmvfc_host *vhost = shost_priv(shost);
2512 .show = ibmvfc_show_host_npiv_version, 2567 return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
2513}; 2568}
2514 2569
2515/** 2570/**
2516 * ibmvfc_show_log_level - Show the adapter's error logging level 2571 * ibmvfc_show_log_level - Show the adapter's error logging level
@@ -2556,14 +2611,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
2556 return strlen(buf); 2611 return strlen(buf);
2557} 2612}
2558 2613
2559static struct device_attribute ibmvfc_log_level_attr = { 2614static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
2560 .attr = { 2615static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
2561 .name = "log_level", 2616static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
2562 .mode = S_IRUGO | S_IWUSR, 2617static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
2563 }, 2618static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
2564 .show = ibmvfc_show_log_level, 2619static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
2565 .store = ibmvfc_store_log_level 2620static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
2566}; 2621 ibmvfc_show_log_level, ibmvfc_store_log_level);
2567 2622
2568#ifdef CONFIG_SCSI_IBMVFC_TRACE 2623#ifdef CONFIG_SCSI_IBMVFC_TRACE
2569/** 2624/**
@@ -2612,12 +2667,13 @@ static struct bin_attribute ibmvfc_trace_attr = {
2612#endif 2667#endif
2613 2668
2614static struct device_attribute *ibmvfc_attrs[] = { 2669static struct device_attribute *ibmvfc_attrs[] = {
2615 &ibmvfc_host_partition_name, 2670 &dev_attr_partition_name,
2616 &ibmvfc_host_device_name, 2671 &dev_attr_device_name,
2617 &ibmvfc_host_loc_code, 2672 &dev_attr_port_loc_code,
2618 &ibmvfc_host_drc_name, 2673 &dev_attr_drc_name,
2619 &ibmvfc_host_npiv_version, 2674 &dev_attr_npiv_version,
2620 &ibmvfc_log_level_attr, 2675 &dev_attr_capabilities,
2676 &dev_attr_log_level,
2621 NULL 2677 NULL
2622}; 2678};
2623 2679
@@ -2774,15 +2830,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
2774 * @tgt: ibmvfc target struct 2830 * @tgt: ibmvfc target struct
2775 * @job_step: initialization job step 2831 * @job_step: initialization job step
2776 * 2832 *
2833 * Returns: 1 if step will be retried / 0 if not
2834 *
2777 **/ 2835 **/
2778static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, 2836static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2779 void (*job_step) (struct ibmvfc_target *)) 2837 void (*job_step) (struct ibmvfc_target *))
2780{ 2838{
2781 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { 2839 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
2782 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2840 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2783 wake_up(&tgt->vhost->work_wait_q); 2841 wake_up(&tgt->vhost->work_wait_q);
2842 return 0;
2784 } else 2843 } else
2785 ibmvfc_init_tgt(tgt, job_step); 2844 ibmvfc_init_tgt(tgt, job_step);
2845 return 1;
2786} 2846}
2787 2847
2788/* Defined in FC-LS */ 2848/* Defined in FC-LS */
@@ -2831,7 +2891,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2831 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; 2891 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
2832 struct ibmvfc_prli_svc_parms *parms = &rsp->parms; 2892 struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
2833 u32 status = rsp->common.status; 2893 u32 status = rsp->common.status;
2834 int index; 2894 int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
2835 2895
2836 vhost->discovery_threads--; 2896 vhost->discovery_threads--;
2837 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 2897 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2850,7 +2910,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2850 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; 2910 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
2851 if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) 2911 if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
2852 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; 2912 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
2853 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); 2913 tgt->add_rport = 1;
2854 } else 2914 } else
2855 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2915 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2856 } else if (prli_rsp[index].retry) 2916 } else if (prli_rsp[index].retry)
@@ -2867,13 +2927,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
2867 break; 2927 break;
2868 case IBMVFC_MAD_FAILED: 2928 case IBMVFC_MAD_FAILED:
2869 default: 2929 default:
2870 tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2871 ibmvfc_get_cmd_error(rsp->status, rsp->error),
2872 rsp->status, rsp->error, status);
2873 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 2930 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2874 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); 2931 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
2875 else 2932 else
2876 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 2933 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2934
2935 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
2936 ibmvfc_get_cmd_error(rsp->status, rsp->error),
2937 rsp->status, rsp->error, status);
2877 break; 2938 break;
2878 }; 2939 };
2879 2940
@@ -2932,6 +2993,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2932 struct ibmvfc_host *vhost = evt->vhost; 2993 struct ibmvfc_host *vhost = evt->vhost;
2933 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; 2994 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
2934 u32 status = rsp->common.status; 2995 u32 status = rsp->common.status;
2996 int level = IBMVFC_DEFAULT_LOG_LEVEL;
2935 2997
2936 vhost->discovery_threads--; 2998 vhost->discovery_threads--;
2937 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 2999 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2960,15 +3022,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
2960 break; 3022 break;
2961 case IBMVFC_MAD_FAILED: 3023 case IBMVFC_MAD_FAILED:
2962 default: 3024 default:
2963 tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2964 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
2965 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
2966 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2967
2968 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3025 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
2969 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); 3026 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
2970 else 3027 else
2971 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3028 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3029
3030 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3031 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3032 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3033 ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
2972 break; 3034 break;
2973 }; 3035 };
2974 3036
@@ -3129,13 +3191,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3129 case IBMVFC_MAD_SUCCESS: 3191 case IBMVFC_MAD_SUCCESS:
3130 tgt_dbg(tgt, "ADISC succeeded\n"); 3192 tgt_dbg(tgt, "ADISC succeeded\n");
3131 if (ibmvfc_adisc_needs_plogi(mad, tgt)) 3193 if (ibmvfc_adisc_needs_plogi(mad, tgt))
3132 tgt->need_login = 1; 3194 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3133 break; 3195 break;
3134 case IBMVFC_MAD_DRIVER_FAILED: 3196 case IBMVFC_MAD_DRIVER_FAILED:
3135 break; 3197 break;
3136 case IBMVFC_MAD_FAILED: 3198 case IBMVFC_MAD_FAILED:
3137 default: 3199 default:
3138 tgt->need_login = 1; 3200 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3139 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; 3201 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
3140 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; 3202 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
3141 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3203 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@@ -3322,6 +3384,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3322 struct ibmvfc_host *vhost = evt->vhost; 3384 struct ibmvfc_host *vhost = evt->vhost;
3323 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; 3385 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
3324 u32 status = rsp->common.status; 3386 u32 status = rsp->common.status;
3387 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3325 3388
3326 vhost->discovery_threads--; 3389 vhost->discovery_threads--;
3327 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3390 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -3341,19 +3404,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3341 break; 3404 break;
3342 case IBMVFC_MAD_FAILED: 3405 case IBMVFC_MAD_FAILED:
3343 default: 3406 default:
3344 tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3345 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3346 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3347 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3348
3349 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && 3407 if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
3350 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && 3408 rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
3351 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) 3409 rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
3352 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3410 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3353 else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3411 else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3354 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); 3412 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3355 else 3413 else
3356 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3414 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3415
3416 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3417 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
3418 ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
3419 ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
3357 break; 3420 break;
3358 }; 3421 };
3359 3422
@@ -3420,7 +3483,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
3420 } 3483 }
3421 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3484 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3422 3485
3423 tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); 3486 tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
3424 if (!tgt) { 3487 if (!tgt) {
3425 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", 3488 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
3426 scsi_id); 3489 scsi_id);
@@ -3472,6 +3535,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3472 struct ibmvfc_host *vhost = evt->vhost; 3535 struct ibmvfc_host *vhost = evt->vhost;
3473 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; 3536 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
3474 u32 mad_status = rsp->common.status; 3537 u32 mad_status = rsp->common.status;
3538 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3475 3539
3476 switch (mad_status) { 3540 switch (mad_status) {
3477 case IBMVFC_MAD_SUCCESS: 3541 case IBMVFC_MAD_SUCCESS:
@@ -3480,9 +3544,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3480 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); 3544 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
3481 break; 3545 break;
3482 case IBMVFC_MAD_FAILED: 3546 case IBMVFC_MAD_FAILED:
3483 dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n", 3547 level += ibmvfc_retry_host_init(vhost);
3484 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); 3548 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
3485 ibmvfc_retry_host_init(vhost); 3549 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3486 break; 3550 break;
3487 case IBMVFC_MAD_DRIVER_FAILED: 3551 case IBMVFC_MAD_DRIVER_FAILED:
3488 break; 3552 break;
@@ -3534,18 +3598,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3534 u32 mad_status = evt->xfer_iu->npiv_login.common.status; 3598 u32 mad_status = evt->xfer_iu->npiv_login.common.status;
3535 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; 3599 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
3536 unsigned int npiv_max_sectors; 3600 unsigned int npiv_max_sectors;
3601 int level = IBMVFC_DEFAULT_LOG_LEVEL;
3537 3602
3538 switch (mad_status) { 3603 switch (mad_status) {
3539 case IBMVFC_MAD_SUCCESS: 3604 case IBMVFC_MAD_SUCCESS:
3540 ibmvfc_free_event(evt); 3605 ibmvfc_free_event(evt);
3541 break; 3606 break;
3542 case IBMVFC_MAD_FAILED: 3607 case IBMVFC_MAD_FAILED:
3543 dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
3544 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3545 if (ibmvfc_retry_cmd(rsp->status, rsp->error)) 3608 if (ibmvfc_retry_cmd(rsp->status, rsp->error))
3546 ibmvfc_retry_host_init(vhost); 3609 level += ibmvfc_retry_host_init(vhost);
3547 else 3610 else
3548 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 3611 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3612 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
3613 ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
3549 ibmvfc_free_event(evt); 3614 ibmvfc_free_event(evt);
3550 return; 3615 return;
3551 case IBMVFC_MAD_CRQ_ERROR: 3616 case IBMVFC_MAD_CRQ_ERROR:
@@ -3578,6 +3643,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
3578 return; 3643 return;
3579 } 3644 }
3580 3645
3646 vhost->logged_in = 1;
3581 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); 3647 npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
3582 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", 3648 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
3583 rsp->partition_name, rsp->device_name, rsp->port_loc_code, 3649 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
@@ -3636,6 +3702,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
3636}; 3702};
3637 3703
3638/** 3704/**
3705 * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
3706 * @vhost: ibmvfc host struct
3707 *
3708 **/
3709static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
3710{
3711 struct ibmvfc_host *vhost = evt->vhost;
3712 u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
3713
3714 ibmvfc_free_event(evt);
3715
3716 switch (mad_status) {
3717 case IBMVFC_MAD_SUCCESS:
3718 if (list_empty(&vhost->sent) &&
3719 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
3720 ibmvfc_init_host(vhost, 0);
3721 return;
3722 }
3723 break;
3724 case IBMVFC_MAD_FAILED:
3725 case IBMVFC_MAD_NOT_SUPPORTED:
3726 case IBMVFC_MAD_CRQ_ERROR:
3727 case IBMVFC_MAD_DRIVER_FAILED:
3728 default:
3729 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
3730 break;
3731 }
3732
3733 ibmvfc_hard_reset_host(vhost);
3734}
3735
3736/**
3737 * ibmvfc_npiv_logout - Issue an NPIV Logout
3738 * @vhost: ibmvfc host struct
3739 *
3740 **/
3741static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
3742{
3743 struct ibmvfc_npiv_logout_mad *mad;
3744 struct ibmvfc_event *evt;
3745
3746 evt = ibmvfc_get_event(vhost);
3747 ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
3748
3749 mad = &evt->iu.npiv_logout;
3750 memset(mad, 0, sizeof(*mad));
3751 mad->common.version = 1;
3752 mad->common.opcode = IBMVFC_NPIV_LOGOUT;
3753 mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
3754
3755 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
3756
3757 if (!ibmvfc_send_event(evt, vhost, default_timeout))
3758 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
3759 else
3760 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3761}
3762
3763/**
3639 * ibmvfc_dev_init_to_do - Is there target initialization work to do? 3764 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
3640 * @vhost: ibmvfc host struct 3765 * @vhost: ibmvfc host struct
3641 * 3766 *
@@ -3671,6 +3796,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3671 switch (vhost->action) { 3796 switch (vhost->action) {
3672 case IBMVFC_HOST_ACTION_NONE: 3797 case IBMVFC_HOST_ACTION_NONE:
3673 case IBMVFC_HOST_ACTION_INIT_WAIT: 3798 case IBMVFC_HOST_ACTION_INIT_WAIT:
3799 case IBMVFC_HOST_ACTION_LOGO_WAIT:
3674 return 0; 3800 return 0;
3675 case IBMVFC_HOST_ACTION_TGT_INIT: 3801 case IBMVFC_HOST_ACTION_TGT_INIT:
3676 case IBMVFC_HOST_ACTION_QUERY_TGTS: 3802 case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -3683,9 +3809,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
3683 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) 3809 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
3684 return 0; 3810 return 0;
3685 return 1; 3811 return 1;
3812 case IBMVFC_HOST_ACTION_LOGO:
3686 case IBMVFC_HOST_ACTION_INIT: 3813 case IBMVFC_HOST_ACTION_INIT:
3687 case IBMVFC_HOST_ACTION_ALLOC_TGTS: 3814 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
3688 case IBMVFC_HOST_ACTION_TGT_ADD:
3689 case IBMVFC_HOST_ACTION_TGT_DEL: 3815 case IBMVFC_HOST_ACTION_TGT_DEL:
3690 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: 3816 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
3691 case IBMVFC_HOST_ACTION_QUERY: 3817 case IBMVFC_HOST_ACTION_QUERY:
@@ -3740,25 +3866,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
3740static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) 3866static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3741{ 3867{
3742 struct ibmvfc_host *vhost = tgt->vhost; 3868 struct ibmvfc_host *vhost = tgt->vhost;
3743 struct fc_rport *rport = tgt->rport; 3869 struct fc_rport *rport;
3744 unsigned long flags; 3870 unsigned long flags;
3745 3871
3746 if (rport) { 3872 tgt_dbg(tgt, "Adding rport\n");
3747 tgt_dbg(tgt, "Setting rport roles\n"); 3873 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3748 fc_remote_port_rolechg(rport, tgt->ids.roles); 3874 spin_lock_irqsave(vhost->host->host_lock, flags);
3749 spin_lock_irqsave(vhost->host->host_lock, flags); 3875
3750 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 3876 if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
3877 tgt_dbg(tgt, "Deleting rport\n");
3878 list_del(&tgt->queue);
3751 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3879 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3880 fc_remote_port_delete(rport);
3881 del_timer_sync(&tgt->timer);
3882 kref_put(&tgt->kref, ibmvfc_release_tgt);
3752 return; 3883 return;
3753 } 3884 }
3754 3885
3755 tgt_dbg(tgt, "Adding rport\n");
3756 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
3757 spin_lock_irqsave(vhost->host->host_lock, flags);
3758 tgt->rport = rport;
3759 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3760 if (rport) { 3886 if (rport) {
3761 tgt_dbg(tgt, "rport add succeeded\n"); 3887 tgt_dbg(tgt, "rport add succeeded\n");
3888 tgt->rport = rport;
3762 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; 3889 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
3763 rport->supported_classes = 0; 3890 rport->supported_classes = 0;
3764 tgt->target_id = rport->scsi_target_id; 3891 tgt->target_id = rport->scsi_target_id;
@@ -3789,8 +3916,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3789 vhost->events_to_log = 0; 3916 vhost->events_to_log = 0;
3790 switch (vhost->action) { 3917 switch (vhost->action) {
3791 case IBMVFC_HOST_ACTION_NONE: 3918 case IBMVFC_HOST_ACTION_NONE:
3919 case IBMVFC_HOST_ACTION_LOGO_WAIT:
3792 case IBMVFC_HOST_ACTION_INIT_WAIT: 3920 case IBMVFC_HOST_ACTION_INIT_WAIT:
3793 break; 3921 break;
3922 case IBMVFC_HOST_ACTION_LOGO:
3923 vhost->job_step(vhost);
3924 break;
3794 case IBMVFC_HOST_ACTION_INIT: 3925 case IBMVFC_HOST_ACTION_INIT:
3795 BUG_ON(vhost->state != IBMVFC_INITIALIZING); 3926 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
3796 if (vhost->delay_init) { 3927 if (vhost->delay_init) {
@@ -3836,11 +3967,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3836 3967
3837 if (vhost->state == IBMVFC_INITIALIZING) { 3968 if (vhost->state == IBMVFC_INITIALIZING) {
3838 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { 3969 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
3839 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); 3970 if (vhost->reinit) {
3840 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD); 3971 vhost->reinit = 0;
3841 vhost->init_retries = 0; 3972 scsi_block_requests(vhost->host);
3842 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3973 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3843 scsi_unblock_requests(vhost->host); 3974 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3975 } else {
3976 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
3977 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3978 wake_up(&vhost->init_wait_q);
3979 schedule_work(&vhost->rport_add_work_q);
3980 vhost->init_retries = 0;
3981 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3982 scsi_unblock_requests(vhost->host);
3983 }
3984
3844 return; 3985 return;
3845 } else { 3986 } else {
3846 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); 3987 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -3871,24 +4012,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
3871 if (!ibmvfc_dev_init_to_do(vhost)) 4012 if (!ibmvfc_dev_init_to_do(vhost))
3872 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); 4013 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
3873 break; 4014 break;
3874 case IBMVFC_HOST_ACTION_TGT_ADD:
3875 list_for_each_entry(tgt, &vhost->targets, queue) {
3876 if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
3877 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3878 ibmvfc_tgt_add_rport(tgt);
3879 return;
3880 }
3881 }
3882
3883 if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
3884 vhost->reinit = 0;
3885 scsi_block_requests(vhost->host);
3886 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
3887 } else {
3888 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3889 wake_up(&vhost->init_wait_q);
3890 }
3891 break;
3892 default: 4015 default:
3893 break; 4016 break;
3894 }; 4017 };
@@ -4118,6 +4241,56 @@ nomem:
4118} 4241}
4119 4242
4120/** 4243/**
4244 * ibmvfc_rport_add_thread - Worker thread for rport adds
4245 * @work: work struct
4246 *
4247 **/
4248static void ibmvfc_rport_add_thread(struct work_struct *work)
4249{
4250 struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
4251 rport_add_work_q);
4252 struct ibmvfc_target *tgt;
4253 struct fc_rport *rport;
4254 unsigned long flags;
4255 int did_work;
4256
4257 ENTER;
4258 spin_lock_irqsave(vhost->host->host_lock, flags);
4259 do {
4260 did_work = 0;
4261 if (vhost->state != IBMVFC_ACTIVE)
4262 break;
4263
4264 list_for_each_entry(tgt, &vhost->targets, queue) {
4265 if (tgt->add_rport) {
4266 did_work = 1;
4267 tgt->add_rport = 0;
4268 kref_get(&tgt->kref);
4269 rport = tgt->rport;
4270 if (!rport) {
4271 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4272 ibmvfc_tgt_add_rport(tgt);
4273 } else if (get_device(&rport->dev)) {
4274 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4275 tgt_dbg(tgt, "Setting rport roles\n");
4276 fc_remote_port_rolechg(rport, tgt->ids.roles);
4277 put_device(&rport->dev);
4278 }
4279
4280 kref_put(&tgt->kref, ibmvfc_release_tgt);
4281 spin_lock_irqsave(vhost->host->host_lock, flags);
4282 break;
4283 }
4284 }
4285 } while(did_work);
4286
4287 if (vhost->state == IBMVFC_ACTIVE)
4288 vhost->scan_complete = 1;
4289 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4290 LEAVE;
4291}
4292
4293/**
4121 * ibmvfc_probe - Adapter hot plug add entry point 4294 * ibmvfc_probe - Adapter hot plug add entry point
4122 * @vdev: vio device struct 4295 * @vdev: vio device struct
4123 * @id: vio device id struct 4296 * @id: vio device id struct
@@ -4160,6 +4333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
4160 strcpy(vhost->partition_name, "UNKNOWN"); 4333 strcpy(vhost->partition_name, "UNKNOWN");
4161 init_waitqueue_head(&vhost->work_wait_q); 4334 init_waitqueue_head(&vhost->work_wait_q);
4162 init_waitqueue_head(&vhost->init_wait_q); 4335 init_waitqueue_head(&vhost->init_wait_q);
4336 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
4163 4337
4164 if ((rc = ibmvfc_alloc_mem(vhost))) 4338 if ((rc = ibmvfc_alloc_mem(vhost)))
4165 goto free_scsi_host; 4339 goto free_scsi_host;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index ca1dcf7a756..c2668d7d67f 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.5" 32#define IBMVFC_DRIVER_VERSION "1.0.6"
33#define IBMVFC_DRIVER_DATE "(March 19, 2009)" 33#define IBMVFC_DRIVER_DATE "(May 28, 2009)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 60 35#define IBMVFC_DEFAULT_TIMEOUT 60
36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@@ -57,9 +57,10 @@
57 * Ensure we have resources for ERP and initialization: 57 * Ensure we have resources for ERP and initialization:
58 * 1 for ERP 58 * 1 for ERP
59 * 1 for initialization 59 * 1 for initialization
60 * 1 for NPIV Logout
60 * 2 for each discovery thread 61 * 2 for each discovery thread
61 */ 62 */
62#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2)) 63#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2))
63 64
64#define IBMVFC_MAD_SUCCESS 0x00 65#define IBMVFC_MAD_SUCCESS 0x00
65#define IBMVFC_MAD_NOT_SUPPORTED 0xF1 66#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
@@ -127,6 +128,7 @@ enum ibmvfc_mad_types {
127 IBMVFC_IMPLICIT_LOGOUT = 0x0040, 128 IBMVFC_IMPLICIT_LOGOUT = 0x0040,
128 IBMVFC_PASSTHRU = 0x0200, 129 IBMVFC_PASSTHRU = 0x0200,
129 IBMVFC_TMF_MAD = 0x0100, 130 IBMVFC_TMF_MAD = 0x0100,
131 IBMVFC_NPIV_LOGOUT = 0x0800,
130}; 132};
131 133
132struct ibmvfc_mad_common { 134struct ibmvfc_mad_common {
@@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad {
143 struct srp_direct_buf buffer; 145 struct srp_direct_buf buffer;
144}__attribute__((packed, aligned (8))); 146}__attribute__((packed, aligned (8)));
145 147
148struct ibmvfc_npiv_logout_mad {
149 struct ibmvfc_mad_common common;
150}__attribute__((packed, aligned (8)));
151
146#define IBMVFC_MAX_NAME 256 152#define IBMVFC_MAX_NAME 256
147 153
148struct ibmvfc_npiv_login { 154struct ibmvfc_npiv_login {
@@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp {
201#define IBMVFC_NATIVE_FC 0x01 207#define IBMVFC_NATIVE_FC 0x01
202#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 208#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
203 u32 reserved; 209 u32 reserved;
204 u64 capabilites; 210 u64 capabilities;
211#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
205 u32 max_cmds; 212 u32 max_cmds;
206 u32 scsi_id_sz; 213 u32 scsi_id_sz;
207 u64 max_dma_len; 214 u64 max_dma_len;
@@ -541,9 +548,17 @@ struct ibmvfc_crq_queue {
541 dma_addr_t msg_token; 548 dma_addr_t msg_token;
542}; 549};
543 550
551enum ibmvfc_ae_link_state {
552 IBMVFC_AE_LS_LINK_UP = 0x01,
553 IBMVFC_AE_LS_LINK_BOUNCED = 0x02,
554 IBMVFC_AE_LS_LINK_DOWN = 0x04,
555 IBMVFC_AE_LS_LINK_DEAD = 0x08,
556};
557
544struct ibmvfc_async_crq { 558struct ibmvfc_async_crq {
545 volatile u8 valid; 559 volatile u8 valid;
546 u8 pad[3]; 560 u8 link_state;
561 u8 pad[2];
547 u32 pad2; 562 u32 pad2;
548 volatile u64 event; 563 volatile u64 event;
549 volatile u64 scsi_id; 564 volatile u64 scsi_id;
@@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue {
561union ibmvfc_iu { 576union ibmvfc_iu {
562 struct ibmvfc_mad_common mad_common; 577 struct ibmvfc_mad_common mad_common;
563 struct ibmvfc_npiv_login_mad npiv_login; 578 struct ibmvfc_npiv_login_mad npiv_login;
579 struct ibmvfc_npiv_logout_mad npiv_logout;
564 struct ibmvfc_discover_targets discover_targets; 580 struct ibmvfc_discover_targets discover_targets;
565 struct ibmvfc_port_login plogi; 581 struct ibmvfc_port_login plogi;
566 struct ibmvfc_process_login prli; 582 struct ibmvfc_process_login prli;
@@ -575,7 +591,6 @@ enum ibmvfc_target_action {
575 IBMVFC_TGT_ACTION_NONE = 0, 591 IBMVFC_TGT_ACTION_NONE = 0,
576 IBMVFC_TGT_ACTION_INIT, 592 IBMVFC_TGT_ACTION_INIT,
577 IBMVFC_TGT_ACTION_INIT_WAIT, 593 IBMVFC_TGT_ACTION_INIT_WAIT,
578 IBMVFC_TGT_ACTION_ADD_RPORT,
579 IBMVFC_TGT_ACTION_DEL_RPORT, 594 IBMVFC_TGT_ACTION_DEL_RPORT,
580}; 595};
581 596
@@ -588,6 +603,7 @@ struct ibmvfc_target {
588 int target_id; 603 int target_id;
589 enum ibmvfc_target_action action; 604 enum ibmvfc_target_action action;
590 int need_login; 605 int need_login;
606 int add_rport;
591 int init_retries; 607 int init_retries;
592 u32 cancel_key; 608 u32 cancel_key;
593 struct ibmvfc_service_parms service_parms; 609 struct ibmvfc_service_parms service_parms;
@@ -627,6 +643,8 @@ struct ibmvfc_event_pool {
627 643
628enum ibmvfc_host_action { 644enum ibmvfc_host_action {
629 IBMVFC_HOST_ACTION_NONE = 0, 645 IBMVFC_HOST_ACTION_NONE = 0,
646 IBMVFC_HOST_ACTION_LOGO,
647 IBMVFC_HOST_ACTION_LOGO_WAIT,
630 IBMVFC_HOST_ACTION_INIT, 648 IBMVFC_HOST_ACTION_INIT,
631 IBMVFC_HOST_ACTION_INIT_WAIT, 649 IBMVFC_HOST_ACTION_INIT_WAIT,
632 IBMVFC_HOST_ACTION_QUERY, 650 IBMVFC_HOST_ACTION_QUERY,
@@ -635,7 +653,6 @@ enum ibmvfc_host_action {
635 IBMVFC_HOST_ACTION_ALLOC_TGTS, 653 IBMVFC_HOST_ACTION_ALLOC_TGTS,
636 IBMVFC_HOST_ACTION_TGT_INIT, 654 IBMVFC_HOST_ACTION_TGT_INIT,
637 IBMVFC_HOST_ACTION_TGT_DEL_FAILED, 655 IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
638 IBMVFC_HOST_ACTION_TGT_ADD,
639}; 656};
640 657
641enum ibmvfc_host_state { 658enum ibmvfc_host_state {
@@ -682,6 +699,8 @@ struct ibmvfc_host {
682 int client_migrated; 699 int client_migrated;
683 int reinit; 700 int reinit;
684 int delay_init; 701 int delay_init;
702 int scan_complete;
703 int logged_in;
685 int events_to_log; 704 int events_to_log;
686#define IBMVFC_AE_LINKUP 0x0001 705#define IBMVFC_AE_LINKUP 0x0001
687#define IBMVFC_AE_LINKDOWN 0x0002 706#define IBMVFC_AE_LINKDOWN 0x0002
@@ -692,6 +711,7 @@ struct ibmvfc_host {
692 void (*job_step) (struct ibmvfc_host *); 711 void (*job_step) (struct ibmvfc_host *);
693 struct task_struct *work_thread; 712 struct task_struct *work_thread;
694 struct tasklet_struct tasklet; 713 struct tasklet_struct tasklet;
714 struct work_struct rport_add_work_q;
695 wait_queue_head_t init_wait_q; 715 wait_queue_head_t init_wait_q;
696 wait_queue_head_t work_wait_q; 716 wait_queue_head_t work_wait_q;
697}; 717};
@@ -707,6 +727,12 @@ struct ibmvfc_host {
707#define tgt_err(t, fmt, ...) \ 727#define tgt_err(t, fmt, ...) \
708 dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) 728 dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
709 729
730#define tgt_log(t, level, fmt, ...) \
731 do { \
732 if ((t)->vhost->log_level >= level) \
733 tgt_err(t, fmt, ##__VA_ARGS__); \
734 } while (0)
735
710#define ibmvfc_dbg(vhost, ...) \ 736#define ibmvfc_dbg(vhost, ...) \
711 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) 737 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
712 738
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index c9aa7611e40..11d2602ae88 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -70,6 +70,7 @@
70#include <linux/moduleparam.h> 70#include <linux/moduleparam.h>
71#include <linux/dma-mapping.h> 71#include <linux/dma-mapping.h>
72#include <linux/delay.h> 72#include <linux/delay.h>
73#include <linux/of.h>
73#include <asm/firmware.h> 74#include <asm/firmware.h>
74#include <asm/vio.h> 75#include <asm/vio.h>
75#include <asm/firmware.h> 76#include <asm/firmware.h>
@@ -87,9 +88,15 @@
87 */ 88 */
88static int max_id = 64; 89static int max_id = 64;
89static int max_channel = 3; 90static int max_channel = 3;
90static int init_timeout = 5; 91static int init_timeout = 300;
92static int login_timeout = 60;
93static int info_timeout = 30;
94static int abort_timeout = 60;
95static int reset_timeout = 60;
91static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; 96static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
92static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; 97static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
98static int fast_fail = 1;
99static int client_reserve = 1;
93 100
94static struct scsi_transport_template *ibmvscsi_transport_template; 101static struct scsi_transport_template *ibmvscsi_transport_template;
95 102
@@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); 117MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
111module_param_named(max_requests, max_requests, int, S_IRUGO); 118module_param_named(max_requests, max_requests, int, S_IRUGO);
112MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); 119MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
120module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
121MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
122module_param_named(client_reserve, client_reserve, int, S_IRUGO );
123MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
113 124
114/* ------------------------------------------------------------ 125/* ------------------------------------------------------------
115 * Routines for the event pool and event structs 126 * Routines for the event pool and event structs
@@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
781/* ------------------------------------------------------------ 792/* ------------------------------------------------------------
782 * Routines for driver initialization 793 * Routines for driver initialization
783 */ 794 */
795
784/** 796/**
785 * adapter_info_rsp: - Handle response to MAD adapter info request 797 * map_persist_bufs: - Pre-map persistent data for adapter logins
786 * @evt_struct: srp_event_struct with the response 798 * @hostdata: ibmvscsi_host_data of host
787 * 799 *
788 * Used as a "done" callback by when sending adapter_info. Gets called 800 * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
789 * by ibmvscsi_handle_crq() 801 * Return 1 on error, 0 on success.
790*/ 802 */
791static void adapter_info_rsp(struct srp_event_struct *evt_struct) 803static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
792{ 804{
793 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
794 dma_unmap_single(hostdata->dev,
795 evt_struct->iu.mad.adapter_info.buffer,
796 evt_struct->iu.mad.adapter_info.common.length,
797 DMA_BIDIRECTIONAL);
798 805
799 if (evt_struct->xfer_iu->mad.adapter_info.common.status) { 806 hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
800 dev_err(hostdata->dev, "error %d getting adapter info\n", 807 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
801 evt_struct->xfer_iu->mad.adapter_info.common.status); 808
802 } else { 809 if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
803 dev_info(hostdata->dev, "host srp version: %s, " 810 dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
804 "host partition %s (%d), OS %d, max io %u\n", 811 return 1;
805 hostdata->madapter_info.srp_version,
806 hostdata->madapter_info.partition_name,
807 hostdata->madapter_info.partition_number,
808 hostdata->madapter_info.os_type,
809 hostdata->madapter_info.port_max_txu[0]);
810
811 if (hostdata->madapter_info.port_max_txu[0])
812 hostdata->host->max_sectors =
813 hostdata->madapter_info.port_max_txu[0] >> 9;
814
815 if (hostdata->madapter_info.os_type == 3 &&
816 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
817 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
818 hostdata->madapter_info.srp_version);
819 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
820 MAX_INDIRECT_BUFS);
821 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
822 }
823 } 812 }
813
814 hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
815 &hostdata->madapter_info,
816 sizeof(hostdata->madapter_info),
817 DMA_BIDIRECTIONAL);
818 if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
819 dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
820 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
821 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
822 return 1;
823 }
824
825 return 0;
824} 826}
825 827
826/** 828/**
827 * send_mad_adapter_info: - Sends the mad adapter info request 829 * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
828 * and stores the result so it can be retrieved with 830 * @hostdata: ibmvscsi_host_data of host
829 * sysfs. We COULD consider causing a failure if the 831 *
830 * returned SRP version doesn't match ours. 832 * Unmap the capabilities and adapter info DMA buffers
831 * @hostdata: ibmvscsi_host_data of host 833 */
832 * 834static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
833 * Returns zero if successful.
834*/
835static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
836{ 835{
837 struct viosrp_adapter_info *req; 836 dma_unmap_single(hostdata->dev, hostdata->caps_addr,
838 struct srp_event_struct *evt_struct; 837 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
839 unsigned long flags;
840 dma_addr_t addr;
841
842 evt_struct = get_event_struct(&hostdata->pool);
843 if (!evt_struct) {
844 dev_err(hostdata->dev,
845 "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
846 return;
847 }
848
849 init_event_struct(evt_struct,
850 adapter_info_rsp,
851 VIOSRP_MAD_FORMAT,
852 init_timeout);
853
854 req = &evt_struct->iu.mad.adapter_info;
855 memset(req, 0x00, sizeof(*req));
856
857 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
858 req->common.length = sizeof(hostdata->madapter_info);
859 req->buffer = addr = dma_map_single(hostdata->dev,
860 &hostdata->madapter_info,
861 sizeof(hostdata->madapter_info),
862 DMA_BIDIRECTIONAL);
863 838
864 if (dma_mapping_error(hostdata->dev, req->buffer)) { 839 dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
865 if (!firmware_has_feature(FW_FEATURE_CMO)) 840 sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
866 dev_err(hostdata->dev, 841}
867 "Unable to map request_buffer for "
868 "adapter_info!\n");
869 free_event_struct(&hostdata->pool, evt_struct);
870 return;
871 }
872
873 spin_lock_irqsave(hostdata->host->host_lock, flags);
874 if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
875 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
876 dma_unmap_single(hostdata->dev,
877 addr,
878 sizeof(hostdata->madapter_info),
879 DMA_BIDIRECTIONAL);
880 }
881 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
882};
883 842
884/** 843/**
885 * login_rsp: - Handle response to SRP login request 844 * login_rsp: - Handle response to SRP login request
@@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
909 } 868 }
910 869
911 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); 870 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
912 871 hostdata->client_migrated = 0;
913 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
914 dev_err(hostdata->dev, "Invalid request_limit.\n");
915 872
916 /* Now we know what the real request-limit is. 873 /* Now we know what the real request-limit is.
917 * This value is set rather than added to request_limit because 874 * This value is set rather than added to request_limit because
@@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct)
922 879
923 /* If we had any pending I/Os, kick them */ 880 /* If we had any pending I/Os, kick them */
924 scsi_unblock_requests(hostdata->host); 881 scsi_unblock_requests(hostdata->host);
925
926 send_mad_adapter_info(hostdata);
927 return;
928} 882}
929 883
930/** 884/**
931 * send_srp_login: - Sends the srp login 885 * send_srp_login: - Sends the srp login
932 * @hostdata: ibmvscsi_host_data of host 886 * @hostdata: ibmvscsi_host_data of host
933 * 887 *
934 * Returns zero if successful. 888 * Returns zero if successful.
935*/ 889*/
936static int send_srp_login(struct ibmvscsi_host_data *hostdata) 890static int send_srp_login(struct ibmvscsi_host_data *hostdata)
@@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
939 unsigned long flags; 893 unsigned long flags;
940 struct srp_login_req *login; 894 struct srp_login_req *login;
941 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); 895 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
942 if (!evt_struct) {
943 dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
944 return FAILED;
945 }
946 896
947 init_event_struct(evt_struct, 897 BUG_ON(!evt_struct);
948 login_rsp, 898 init_event_struct(evt_struct, login_rsp,
949 VIOSRP_SRP_FORMAT, 899 VIOSRP_SRP_FORMAT, login_timeout);
950 init_timeout);
951 900
952 login = &evt_struct->iu.srp.login_req; 901 login = &evt_struct->iu.srp.login_req;
953 memset(login, 0x00, sizeof(struct srp_login_req)); 902 memset(login, 0, sizeof(*login));
954 login->opcode = SRP_LOGIN_REQ; 903 login->opcode = SRP_LOGIN_REQ;
955 login->req_it_iu_len = sizeof(union srp_iu); 904 login->req_it_iu_len = sizeof(union srp_iu);
956 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; 905 login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
957 906
958 spin_lock_irqsave(hostdata->host->host_lock, flags); 907 spin_lock_irqsave(hostdata->host->host_lock, flags);
959 /* Start out with a request limit of 0, since this is negotiated in 908 /* Start out with a request limit of 0, since this is negotiated in
960 * the login request we are just sending and login requests always 909 * the login request we are just sending and login requests always
@@ -962,13 +911,241 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
962 */ 911 */
963 atomic_set(&hostdata->request_limit, 0); 912 atomic_set(&hostdata->request_limit, 0);
964 913
965 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); 914 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
966 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 915 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
967 dev_info(hostdata->dev, "sent SRP login\n"); 916 dev_info(hostdata->dev, "sent SRP login\n");
968 return rc; 917 return rc;
969}; 918};
970 919
971/** 920/**
921 * capabilities_rsp: - Handle response to MAD adapter capabilities request
922 * @evt_struct: srp_event_struct with the response
923 *
924 * Used as a "done" callback by when sending adapter_info.
925 */
926static void capabilities_rsp(struct srp_event_struct *evt_struct)
927{
928 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
929
930 if (evt_struct->xfer_iu->mad.capabilities.common.status) {
931 dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
932 evt_struct->xfer_iu->mad.capabilities.common.status);
933 } else {
934 if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
935 dev_info(hostdata->dev, "Partition migration not supported\n");
936
937 if (client_reserve) {
938 if (hostdata->caps.reserve.common.server_support ==
939 SERVER_SUPPORTS_CAP)
940 dev_info(hostdata->dev, "Client reserve enabled\n");
941 else
942 dev_info(hostdata->dev, "Client reserve not supported\n");
943 }
944 }
945
946 send_srp_login(hostdata);
947}
948
949/**
950 * send_mad_capabilities: - Sends the mad capabilities request
951 * and stores the result so it can be retrieved with
952 * @hostdata: ibmvscsi_host_data of host
953 */
954static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
955{
956 struct viosrp_capabilities *req;
957 struct srp_event_struct *evt_struct;
958 unsigned long flags;
959 struct device_node *of_node = hostdata->dev->archdata.of_node;
960 const char *location;
961
962 evt_struct = get_event_struct(&hostdata->pool);
963 BUG_ON(!evt_struct);
964
965 init_event_struct(evt_struct, capabilities_rsp,
966 VIOSRP_MAD_FORMAT, info_timeout);
967
968 req = &evt_struct->iu.mad.capabilities;
969 memset(req, 0, sizeof(*req));
970
971 hostdata->caps.flags = CAP_LIST_SUPPORTED;
972 if (hostdata->client_migrated)
973 hostdata->caps.flags |= CLIENT_MIGRATED;
974
975 strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
976 sizeof(hostdata->caps.name));
977 hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
978
979 location = of_get_property(of_node, "ibm,loc-code", NULL);
980 location = location ? location : dev_name(hostdata->dev);
981 strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
982 hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
983
984 req->common.type = VIOSRP_CAPABILITIES_TYPE;
985 req->buffer = hostdata->caps_addr;
986
987 hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
988 hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
989 hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
990 hostdata->caps.migration.ecl = 1;
991
992 if (client_reserve) {
993 hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
994 hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
995 hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
996 hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
997 req->common.length = sizeof(hostdata->caps);
998 } else
999 req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
1000
1001 spin_lock_irqsave(hostdata->host->host_lock, flags);
1002 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1003 dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
1004 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1005};
1006
1007/**
1008 * fast_fail_rsp: - Handle response to MAD enable fast fail
1009 * @evt_struct: srp_event_struct with the response
1010 *
1011 * Used as a "done" callback by when sending enable fast fail. Gets called
1012 * by ibmvscsi_handle_crq()
1013 */
1014static void fast_fail_rsp(struct srp_event_struct *evt_struct)
1015{
1016 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1017 u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
1018
1019 if (status == VIOSRP_MAD_NOT_SUPPORTED)
1020 dev_err(hostdata->dev, "fast_fail not supported in server\n");
1021 else if (status == VIOSRP_MAD_FAILED)
1022 dev_err(hostdata->dev, "fast_fail request failed\n");
1023 else if (status != VIOSRP_MAD_SUCCESS)
1024 dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
1025
1026 send_mad_capabilities(hostdata);
1027}
1028
1029/**
1030 * init_host - Start host initialization
1031 * @hostdata: ibmvscsi_host_data of host
1032 *
1033 * Returns zero if successful.
1034 */
1035static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
1036{
1037 int rc;
1038 unsigned long flags;
1039 struct viosrp_fast_fail *fast_fail_mad;
1040 struct srp_event_struct *evt_struct;
1041
1042 if (!fast_fail) {
1043 send_mad_capabilities(hostdata);
1044 return 0;
1045 }
1046
1047 evt_struct = get_event_struct(&hostdata->pool);
1048 BUG_ON(!evt_struct);
1049
1050 init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
1051
1052 fast_fail_mad = &evt_struct->iu.mad.fast_fail;
1053 memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
1054 fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
1055 fast_fail_mad->common.length = sizeof(*fast_fail_mad);
1056
1057 spin_lock_irqsave(hostdata->host->host_lock, flags);
1058 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1059 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1060 return rc;
1061}
1062
1063/**
1064 * adapter_info_rsp: - Handle response to MAD adapter info request
1065 * @evt_struct: srp_event_struct with the response
1066 *
1067 * Used as a "done" callback by when sending adapter_info. Gets called
1068 * by ibmvscsi_handle_crq()
1069*/
1070static void adapter_info_rsp(struct srp_event_struct *evt_struct)
1071{
1072 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
1073
1074 if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
1075 dev_err(hostdata->dev, "error %d getting adapter info\n",
1076 evt_struct->xfer_iu->mad.adapter_info.common.status);
1077 } else {
1078 dev_info(hostdata->dev, "host srp version: %s, "
1079 "host partition %s (%d), OS %d, max io %u\n",
1080 hostdata->madapter_info.srp_version,
1081 hostdata->madapter_info.partition_name,
1082 hostdata->madapter_info.partition_number,
1083 hostdata->madapter_info.os_type,
1084 hostdata->madapter_info.port_max_txu[0]);
1085
1086 if (hostdata->madapter_info.port_max_txu[0])
1087 hostdata->host->max_sectors =
1088 hostdata->madapter_info.port_max_txu[0] >> 9;
1089
1090 if (hostdata->madapter_info.os_type == 3 &&
1091 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
1092 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
1093 hostdata->madapter_info.srp_version);
1094 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
1095 MAX_INDIRECT_BUFS);
1096 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
1097 }
1098 }
1099
1100 enable_fast_fail(hostdata);
1101}
1102
1103/**
1104 * send_mad_adapter_info: - Sends the mad adapter info request
1105 * and stores the result so it can be retrieved with
1106 * sysfs. We COULD consider causing a failure if the
1107 * returned SRP version doesn't match ours.
1108 * @hostdata: ibmvscsi_host_data of host
1109 *
1110 * Returns zero if successful.
1111*/
1112static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
1113{
1114 struct viosrp_adapter_info *req;
1115 struct srp_event_struct *evt_struct;
1116 unsigned long flags;
1117
1118 evt_struct = get_event_struct(&hostdata->pool);
1119 BUG_ON(!evt_struct);
1120
1121 init_event_struct(evt_struct,
1122 adapter_info_rsp,
1123 VIOSRP_MAD_FORMAT,
1124 info_timeout);
1125
1126 req = &evt_struct->iu.mad.adapter_info;
1127 memset(req, 0x00, sizeof(*req));
1128
1129 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
1130 req->common.length = sizeof(hostdata->madapter_info);
1131 req->buffer = hostdata->adapter_info_addr;
1132
1133 spin_lock_irqsave(hostdata->host->host_lock, flags);
1134 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1135 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
1136 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1137};
1138
1139/**
1140 * init_adapter: Start virtual adapter initialization sequence
1141 *
1142 */
1143static void init_adapter(struct ibmvscsi_host_data *hostdata)
1144{
1145 send_mad_adapter_info(hostdata);
1146}
1147
1148/**
972 * sync_completion: Signal that a synchronous command has completed 1149 * sync_completion: Signal that a synchronous command has completed
973 * Note that after returning from this call, the evt_struct is freed. 1150 * Note that after returning from this call, the evt_struct is freed.
974 * the caller waiting on this completion shouldn't touch the evt_struct 1151 * the caller waiting on this completion shouldn't touch the evt_struct
@@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1029 init_event_struct(evt, 1206 init_event_struct(evt,
1030 sync_completion, 1207 sync_completion,
1031 VIOSRP_SRP_FORMAT, 1208 VIOSRP_SRP_FORMAT,
1032 init_timeout); 1209 abort_timeout);
1033 1210
1034 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1211 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1035 1212
@@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1043 evt->sync_srp = &srp_rsp; 1220 evt->sync_srp = &srp_rsp;
1044 1221
1045 init_completion(&evt->comp); 1222 init_completion(&evt->comp);
1046 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); 1223 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
1047 1224
1048 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) 1225 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1049 break; 1226 break;
@@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1152 init_event_struct(evt, 1329 init_event_struct(evt,
1153 sync_completion, 1330 sync_completion,
1154 VIOSRP_SRP_FORMAT, 1331 VIOSRP_SRP_FORMAT,
1155 init_timeout); 1332 reset_timeout);
1156 1333
1157 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1334 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1158 1335
@@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1165 evt->sync_srp = &srp_rsp; 1342 evt->sync_srp = &srp_rsp;
1166 1343
1167 init_completion(&evt->comp); 1344 init_completion(&evt->comp);
1168 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); 1345 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
1169 1346
1170 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) 1347 if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
1171 break; 1348 break;
@@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1281 if ((rc = ibmvscsi_ops->send_crq(hostdata, 1458 if ((rc = ibmvscsi_ops->send_crq(hostdata,
1282 0xC002000000000000LL, 0)) == 0) { 1459 0xC002000000000000LL, 0)) == 0) {
1283 /* Now login */ 1460 /* Now login */
1284 send_srp_login(hostdata); 1461 init_adapter(hostdata);
1285 } else { 1462 } else {
1286 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); 1463 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1287 } 1464 }
@@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1291 dev_info(hostdata->dev, "partner initialization complete\n"); 1468 dev_info(hostdata->dev, "partner initialization complete\n");
1292 1469
1293 /* Now login */ 1470 /* Now login */
1294 send_srp_login(hostdata); 1471 init_adapter(hostdata);
1295 break; 1472 break;
1296 default: 1473 default:
1297 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); 1474 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
@@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1303 if (crq->format == 0x06) { 1480 if (crq->format == 0x06) {
1304 /* We need to re-setup the interpartition connection */ 1481 /* We need to re-setup the interpartition connection */
1305 dev_info(hostdata->dev, "Re-enabling adapter!\n"); 1482 dev_info(hostdata->dev, "Re-enabling adapter!\n");
1483 hostdata->client_migrated = 1;
1306 purge_requests(hostdata, DID_REQUEUE); 1484 purge_requests(hostdata, DID_REQUEUE);
1307 if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, 1485 if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
1308 hostdata)) || 1486 hostdata)) ||
@@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1397 init_event_struct(evt_struct, 1575 init_event_struct(evt_struct,
1398 sync_completion, 1576 sync_completion,
1399 VIOSRP_MAD_FORMAT, 1577 VIOSRP_MAD_FORMAT,
1400 init_timeout); 1578 info_timeout);
1401 1579
1402 host_config = &evt_struct->iu.mad.host_config; 1580 host_config = &evt_struct->iu.mad.host_config;
1403 1581
@@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1419 1597
1420 init_completion(&evt_struct->comp); 1598 init_completion(&evt_struct->comp);
1421 spin_lock_irqsave(hostdata->host->host_lock, flags); 1599 spin_lock_irqsave(hostdata->host->host_lock, flags);
1422 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); 1600 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1423 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1601 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1424 if (rc == 0) 1602 if (rc == 0)
1425 wait_for_completion(&evt_struct->comp); 1603 wait_for_completion(&evt_struct->comp);
@@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1444 spin_lock_irqsave(shost->host_lock, lock_flags); 1622 spin_lock_irqsave(shost->host_lock, lock_flags);
1445 if (sdev->type == TYPE_DISK) { 1623 if (sdev->type == TYPE_DISK) {
1446 sdev->allow_restart = 1; 1624 sdev->allow_restart = 1;
1447 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); 1625 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1448 } 1626 }
1449 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); 1627 scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
1450 spin_unlock_irqrestore(shost->host_lock, lock_flags); 1628 spin_unlock_irqrestore(shost->host_lock, lock_flags);
@@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1471/* ------------------------------------------------------------ 1649/* ------------------------------------------------------------
1472 * sysfs attributes 1650 * sysfs attributes
1473 */ 1651 */
1652static ssize_t show_host_vhost_loc(struct device *dev,
1653 struct device_attribute *attr, char *buf)
1654{
1655 struct Scsi_Host *shost = class_to_shost(dev);
1656 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1657 int len;
1658
1659 len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
1660 hostdata->caps.loc);
1661 return len;
1662}
1663
1664static struct device_attribute ibmvscsi_host_vhost_loc = {
1665 .attr = {
1666 .name = "vhost_loc",
1667 .mode = S_IRUGO,
1668 },
1669 .show = show_host_vhost_loc,
1670};
1671
1672static ssize_t show_host_vhost_name(struct device *dev,
1673 struct device_attribute *attr, char *buf)
1674{
1675 struct Scsi_Host *shost = class_to_shost(dev);
1676 struct ibmvscsi_host_data *hostdata = shost_priv(shost);
1677 int len;
1678
1679 len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
1680 hostdata->caps.name);
1681 return len;
1682}
1683
1684static struct device_attribute ibmvscsi_host_vhost_name = {
1685 .attr = {
1686 .name = "vhost_name",
1687 .mode = S_IRUGO,
1688 },
1689 .show = show_host_vhost_name,
1690};
1691
1474static ssize_t show_host_srp_version(struct device *dev, 1692static ssize_t show_host_srp_version(struct device *dev,
1475 struct device_attribute *attr, char *buf) 1693 struct device_attribute *attr, char *buf)
1476{ 1694{
@@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = {
1594}; 1812};
1595 1813
1596static struct device_attribute *ibmvscsi_attrs[] = { 1814static struct device_attribute *ibmvscsi_attrs[] = {
1815 &ibmvscsi_host_vhost_loc,
1816 &ibmvscsi_host_vhost_name,
1597 &ibmvscsi_host_srp_version, 1817 &ibmvscsi_host_srp_version,
1598 &ibmvscsi_host_partition_name, 1818 &ibmvscsi_host_partition_name,
1599 &ibmvscsi_host_partition_number, 1819 &ibmvscsi_host_partition_number,
@@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1674 atomic_set(&hostdata->request_limit, -1); 1894 atomic_set(&hostdata->request_limit, -1);
1675 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; 1895 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
1676 1896
1897 if (map_persist_bufs(hostdata)) {
1898 dev_err(&vdev->dev, "couldn't map persistent buffers\n");
1899 goto persist_bufs_failed;
1900 }
1901
1677 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); 1902 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
1678 if (rc != 0 && rc != H_RESOURCE) { 1903 if (rc != 0 && rc != H_RESOURCE) {
1679 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); 1904 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
@@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1687 host->max_lun = 8; 1912 host->max_lun = 8;
1688 host->max_id = max_id; 1913 host->max_id = max_id;
1689 host->max_channel = max_channel; 1914 host->max_channel = max_channel;
1915 host->max_cmd_len = 16;
1690 1916
1691 if (scsi_add_host(hostdata->host, hostdata->dev)) 1917 if (scsi_add_host(hostdata->host, hostdata->dev))
1692 goto add_host_failed; 1918 goto add_host_failed;
@@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1733 init_pool_failed: 1959 init_pool_failed:
1734 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); 1960 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
1735 init_crq_failed: 1961 init_crq_failed:
1962 unmap_persist_bufs(hostdata);
1963 persist_bufs_failed:
1736 scsi_host_put(host); 1964 scsi_host_put(host);
1737 scsi_host_alloc_failed: 1965 scsi_host_alloc_failed:
1738 return -1; 1966 return -1;
@@ -1741,6 +1969,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1741static int ibmvscsi_remove(struct vio_dev *vdev) 1969static int ibmvscsi_remove(struct vio_dev *vdev)
1742{ 1970{
1743 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; 1971 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
1972 unmap_persist_bufs(hostdata);
1744 release_event_pool(&hostdata->pool, hostdata); 1973 release_event_pool(&hostdata->pool, hostdata);
1745 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, 1974 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
1746 max_events); 1975 max_events);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 2d4339d5e16..76425303def 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -90,6 +90,7 @@ struct event_pool {
90/* all driver data associated with a host adapter */ 90/* all driver data associated with a host adapter */
91struct ibmvscsi_host_data { 91struct ibmvscsi_host_data {
92 atomic_t request_limit; 92 atomic_t request_limit;
93 int client_migrated;
93 struct device *dev; 94 struct device *dev;
94 struct event_pool pool; 95 struct event_pool pool;
95 struct crq_queue queue; 96 struct crq_queue queue;
@@ -97,6 +98,9 @@ struct ibmvscsi_host_data {
97 struct list_head sent; 98 struct list_head sent;
98 struct Scsi_Host *host; 99 struct Scsi_Host *host;
99 struct mad_adapter_info_data madapter_info; 100 struct mad_adapter_info_data madapter_info;
101 struct capabilities caps;
102 dma_addr_t caps_addr;
103 dma_addr_t adapter_info_addr;
100}; 104};
101 105
102/* routines for managing a command/response queue */ 106/* routines for managing a command/response queue */
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 204604501ad..2cd735d1d19 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -37,6 +37,7 @@
37 37
38#define SRP_VERSION "16.a" 38#define SRP_VERSION "16.a"
39#define SRP_MAX_IU_LEN 256 39#define SRP_MAX_IU_LEN 256
40#define SRP_MAX_LOC_LEN 32
40 41
41union srp_iu { 42union srp_iu {
42 struct srp_login_req login_req; 43 struct srp_login_req login_req;
@@ -86,7 +87,37 @@ enum viosrp_mad_types {
86 VIOSRP_EMPTY_IU_TYPE = 0x01, 87 VIOSRP_EMPTY_IU_TYPE = 0x01,
87 VIOSRP_ERROR_LOG_TYPE = 0x02, 88 VIOSRP_ERROR_LOG_TYPE = 0x02,
88 VIOSRP_ADAPTER_INFO_TYPE = 0x03, 89 VIOSRP_ADAPTER_INFO_TYPE = 0x03,
89 VIOSRP_HOST_CONFIG_TYPE = 0x04 90 VIOSRP_HOST_CONFIG_TYPE = 0x04,
91 VIOSRP_CAPABILITIES_TYPE = 0x05,
92 VIOSRP_ENABLE_FAST_FAIL = 0x08,
93};
94
95enum viosrp_mad_status {
96 VIOSRP_MAD_SUCCESS = 0x00,
97 VIOSRP_MAD_NOT_SUPPORTED = 0xF1,
98 VIOSRP_MAD_FAILED = 0xF7,
99};
100
101enum viosrp_capability_type {
102 MIGRATION_CAPABILITIES = 0x01,
103 RESERVATION_CAPABILITIES = 0x02,
104};
105
106enum viosrp_capability_support {
107 SERVER_DOES_NOT_SUPPORTS_CAP = 0x0,
108 SERVER_SUPPORTS_CAP = 0x01,
109 SERVER_CAP_DATA = 0x02,
110};
111
112enum viosrp_reserve_type {
113 CLIENT_RESERVE_SCSI_2 = 0x01,
114};
115
116enum viosrp_capability_flag {
117 CLIENT_MIGRATED = 0x01,
118 CLIENT_RECONNECT = 0x02,
119 CAP_LIST_SUPPORTED = 0x04,
120 CAP_LIST_DATA = 0x08,
90}; 121};
91 122
92/* 123/*
@@ -127,11 +158,46 @@ struct viosrp_host_config {
127 u64 buffer; 158 u64 buffer;
128}; 159};
129 160
161struct viosrp_fast_fail {
162 struct mad_common common;
163};
164
165struct viosrp_capabilities {
166 struct mad_common common;
167 u64 buffer;
168};
169
170struct mad_capability_common {
171 u32 cap_type;
172 u16 length;
173 u16 server_support;
174};
175
176struct mad_reserve_cap {
177 struct mad_capability_common common;
178 u32 type;
179};
180
181struct mad_migration_cap {
182 struct mad_capability_common common;
183 u32 ecl;
184};
185
186struct capabilities{
187 u32 flags;
188 char name[SRP_MAX_LOC_LEN];
189 char loc[SRP_MAX_LOC_LEN];
190 struct mad_migration_cap migration;
191 struct mad_reserve_cap reserve;
192};
193
130union mad_iu { 194union mad_iu {
131 struct viosrp_empty_iu empty_iu; 195 struct viosrp_empty_iu empty_iu;
132 struct viosrp_error_log error_log; 196 struct viosrp_error_log error_log;
133 struct viosrp_adapter_info adapter_info; 197 struct viosrp_adapter_info adapter_info;
134 struct viosrp_host_config host_config; 198 struct viosrp_host_config host_config;
199 struct viosrp_fast_fail fast_fail;
200 struct viosrp_capabilities capabilities;
135}; 201};
136 202
137union viosrp_iu { 203union viosrp_iu {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index dd689ded860..0f8bc772b11 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -7003,6 +7003,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
7003 ioa_cfg->sdt_state = ABORT_DUMP; 7003 ioa_cfg->sdt_state = ABORT_DUMP;
7004 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; 7004 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7005 ioa_cfg->in_ioa_bringdown = 1; 7005 ioa_cfg->in_ioa_bringdown = 1;
7006 ioa_cfg->allow_cmds = 0;
7006 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 7007 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7007 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 7008 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7008} 7009}
@@ -7688,7 +7689,7 @@ static void __ipr_remove(struct pci_dev *pdev)
7688 * Return value: 7689 * Return value:
7689 * none 7690 * none
7690 **/ 7691 **/
7691static void ipr_remove(struct pci_dev *pdev) 7692static void __devexit ipr_remove(struct pci_dev *pdev)
7692{ 7693{
7693 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); 7694 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7694 7695
@@ -7864,7 +7865,7 @@ static struct pci_driver ipr_driver = {
7864 .name = IPR_NAME, 7865 .name = IPR_NAME,
7865 .id_table = ipr_pci_table, 7866 .id_table = ipr_pci_table,
7866 .probe = ipr_probe, 7867 .probe = ipr_probe,
7867 .remove = ipr_remove, 7868 .remove = __devexit_p(ipr_remove),
7868 .shutdown = ipr_shutdown, 7869 .shutdown = ipr_shutdown,
7869 .err_handler = &ipr_err_handler, 7870 .err_handler = &ipr_err_handler,
7870}; 7871};
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 992af05aacf..7af9bceb8aa 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1159,6 +1159,10 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1159 atomic_inc(&mp->stats.xid_not_found); 1159 atomic_inc(&mp->stats.xid_not_found);
1160 goto out; 1160 goto out;
1161 } 1161 }
1162 if (ep->esb_stat & ESB_ST_COMPLETE) {
1163 atomic_inc(&mp->stats.xid_not_found);
1164 goto out;
1165 }
1162 if (ep->rxid == FC_XID_UNKNOWN) 1166 if (ep->rxid == FC_XID_UNKNOWN)
1163 ep->rxid = ntohs(fh->fh_rx_id); 1167 ep->rxid = ntohs(fh->fh_rx_id);
1164 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { 1168 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 521f996f9b1..ad8b747837b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1896,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1896 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1896 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1897 break; 1897 break;
1898 case FC_CMD_ABORTED: 1898 case FC_CMD_ABORTED:
1899 sc_cmd->result = (DID_ABORT << 16) | fsp->io_status; 1899 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
1900 break; 1900 break;
1901 case FC_CMD_TIME_OUT: 1901 case FC_CMD_TIME_OUT:
1902 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; 1902 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 747d73c5c8a..7bfbff7e0ef 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -478,7 +478,7 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
478 if (PTR_ERR(fp) == -FC_EX_CLOSED) 478 if (PTR_ERR(fp) == -FC_EX_CLOSED)
479 return fc_rport_error(rport, fp); 479 return fc_rport_error(rport, fp);
480 480
481 if (rdata->retries < rdata->local_port->max_retry_count) { 481 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
482 FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", 482 FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
483 PTR_ERR(fp), fc_rport_state(rport)); 483 PTR_ERR(fp), fc_rport_state(rport));
484 rdata->retries++; 484 rdata->retries++;
@@ -1330,7 +1330,7 @@ int fc_rport_init(struct fc_lport *lport)
1330} 1330}
1331EXPORT_SYMBOL(fc_rport_init); 1331EXPORT_SYMBOL(fc_rport_init);
1332 1332
1333int fc_setup_rport() 1333int fc_setup_rport(void)
1334{ 1334{
1335 rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); 1335 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1336 if (!rport_event_queue) 1336 if (!rport_event_queue)
@@ -1339,7 +1339,7 @@ int fc_setup_rport()
1339} 1339}
1340EXPORT_SYMBOL(fc_setup_rport); 1340EXPORT_SYMBOL(fc_setup_rport);
1341 1341
1342void fc_destroy_rport() 1342void fc_destroy_rport(void)
1343{ 1343{
1344 destroy_workqueue(rport_event_queue); 1344 destroy_workqueue(rport_event_queue);
1345} 1345}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index e72b4ad47d3..59908aead53 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -81,7 +81,8 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
81 struct Scsi_Host *shost = conn->session->host; 81 struct Scsi_Host *shost = conn->session->host;
82 struct iscsi_host *ihost = shost_priv(shost); 82 struct iscsi_host *ihost = shost_priv(shost);
83 83
84 queue_work(ihost->workq, &conn->xmitwork); 84 if (ihost->workq)
85 queue_work(ihost->workq, &conn->xmitwork);
85} 86}
86EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); 87EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
87 88
@@ -109,11 +110,9 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
109 * if the window closed with IO queued, then kick the 110 * if the window closed with IO queued, then kick the
110 * xmit thread 111 * xmit thread
111 */ 112 */
112 if (!list_empty(&session->leadconn->xmitqueue) || 113 if (!list_empty(&session->leadconn->cmdqueue) ||
113 !list_empty(&session->leadconn->mgmtqueue)) { 114 !list_empty(&session->leadconn->mgmtqueue))
114 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 115 iscsi_conn_queue_work(session->leadconn);
115 iscsi_conn_queue_work(session->leadconn);
116 }
117 } 116 }
118} 117}
119EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 118EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
@@ -257,9 +256,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
257 itt_t itt; 256 itt_t itt;
258 int rc; 257 int rc;
259 258
260 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); 259 if (conn->session->tt->alloc_pdu) {
261 if (rc) 260 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
262 return rc; 261 if (rc)
262 return rc;
263 }
263 hdr = (struct iscsi_cmd *) task->hdr; 264 hdr = (struct iscsi_cmd *) task->hdr;
264 itt = hdr->itt; 265 itt = hdr->itt;
265 memset(hdr, 0, sizeof(*hdr)); 266 memset(hdr, 0, sizeof(*hdr));
@@ -364,7 +365,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
364 return -EIO; 365 return -EIO;
365 366
366 task->state = ISCSI_TASK_RUNNING; 367 task->state = ISCSI_TASK_RUNNING;
367 list_move_tail(&task->running, &conn->run_list);
368 368
369 conn->scsicmd_pdus_cnt++; 369 conn->scsicmd_pdus_cnt++;
370 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " 370 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@@ -380,26 +380,25 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
380} 380}
381 381
382/** 382/**
383 * iscsi_complete_command - finish a task 383 * iscsi_free_task - free a task
384 * @task: iscsi cmd task 384 * @task: iscsi cmd task
385 * 385 *
386 * Must be called with session lock. 386 * Must be called with session lock.
387 * This function returns the scsi command to scsi-ml or cleans 387 * This function returns the scsi command to scsi-ml or cleans
388 * up mgmt tasks then returns the task to the pool. 388 * up mgmt tasks then returns the task to the pool.
389 */ 389 */
390static void iscsi_complete_command(struct iscsi_task *task) 390static void iscsi_free_task(struct iscsi_task *task)
391{ 391{
392 struct iscsi_conn *conn = task->conn; 392 struct iscsi_conn *conn = task->conn;
393 struct iscsi_session *session = conn->session; 393 struct iscsi_session *session = conn->session;
394 struct scsi_cmnd *sc = task->sc; 394 struct scsi_cmnd *sc = task->sc;
395 395
396 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
397 task->itt, task->state, task->sc);
398
396 session->tt->cleanup_task(task); 399 session->tt->cleanup_task(task);
397 list_del_init(&task->running); 400 task->state = ISCSI_TASK_FREE;
398 task->state = ISCSI_TASK_COMPLETED;
399 task->sc = NULL; 401 task->sc = NULL;
400
401 if (conn->task == task)
402 conn->task = NULL;
403 /* 402 /*
404 * login task is preallocated so do not free 403 * login task is preallocated so do not free
405 */ 404 */
@@ -408,9 +407,6 @@ static void iscsi_complete_command(struct iscsi_task *task)
408 407
409 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); 408 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
410 409
411 if (conn->ping_task == task)
412 conn->ping_task = NULL;
413
414 if (sc) { 410 if (sc) {
415 task->sc = NULL; 411 task->sc = NULL;
416 /* SCSI eh reuses commands to verify us */ 412 /* SCSI eh reuses commands to verify us */
@@ -433,7 +429,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task);
433static void __iscsi_put_task(struct iscsi_task *task) 429static void __iscsi_put_task(struct iscsi_task *task)
434{ 430{
435 if (atomic_dec_and_test(&task->refcount)) 431 if (atomic_dec_and_test(&task->refcount))
436 iscsi_complete_command(task); 432 iscsi_free_task(task);
437} 433}
438 434
439void iscsi_put_task(struct iscsi_task *task) 435void iscsi_put_task(struct iscsi_task *task)
@@ -446,26 +442,74 @@ void iscsi_put_task(struct iscsi_task *task)
446} 442}
447EXPORT_SYMBOL_GPL(iscsi_put_task); 443EXPORT_SYMBOL_GPL(iscsi_put_task);
448 444
445/**
446 * iscsi_complete_task - finish a task
447 * @task: iscsi cmd task
448 * @state: state to complete task with
449 *
450 * Must be called with session lock.
451 */
452static void iscsi_complete_task(struct iscsi_task *task, int state)
453{
454 struct iscsi_conn *conn = task->conn;
455
456 ISCSI_DBG_SESSION(conn->session,
457 "complete task itt 0x%x state %d sc %p\n",
458 task->itt, task->state, task->sc);
459 if (task->state == ISCSI_TASK_COMPLETED ||
460 task->state == ISCSI_TASK_ABRT_TMF ||
461 task->state == ISCSI_TASK_ABRT_SESS_RECOV)
462 return;
463 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
464 task->state = state;
465
466 if (!list_empty(&task->running))
467 list_del_init(&task->running);
468
469 if (conn->task == task)
470 conn->task = NULL;
471
472 if (conn->ping_task == task)
473 conn->ping_task = NULL;
474
475 /* release get from queueing */
476 __iscsi_put_task(task);
477}
478
449/* 479/*
450 * session lock must be held 480 * session lock must be held and if not called for a task that is
481 * still pending or from the xmit thread, then xmit thread must
482 * be suspended.
451 */ 483 */
452static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, 484static void fail_scsi_task(struct iscsi_task *task, int err)
453 int err)
454{ 485{
486 struct iscsi_conn *conn = task->conn;
455 struct scsi_cmnd *sc; 487 struct scsi_cmnd *sc;
488 int state;
456 489
490 /*
491 * if a command completes and we get a successful tmf response
492 * we will hit this because the scsi eh abort code does not take
493 * a ref to the task.
494 */
457 sc = task->sc; 495 sc = task->sc;
458 if (!sc) 496 if (!sc)
459 return; 497 return;
460 498
461 if (task->state == ISCSI_TASK_PENDING) 499 if (task->state == ISCSI_TASK_PENDING) {
462 /* 500 /*
463 * cmd never made it to the xmit thread, so we should not count 501 * cmd never made it to the xmit thread, so we should not count
464 * the cmd in the sequencing 502 * the cmd in the sequencing
465 */ 503 */
466 conn->session->queued_cmdsn--; 504 conn->session->queued_cmdsn--;
505 /* it was never sent so just complete like normal */
506 state = ISCSI_TASK_COMPLETED;
507 } else if (err == DID_TRANSPORT_DISRUPTED)
508 state = ISCSI_TASK_ABRT_SESS_RECOV;
509 else
510 state = ISCSI_TASK_ABRT_TMF;
467 511
468 sc->result = err; 512 sc->result = err << 16;
469 if (!scsi_bidi_cmnd(sc)) 513 if (!scsi_bidi_cmnd(sc))
470 scsi_set_resid(sc, scsi_bufflen(sc)); 514 scsi_set_resid(sc, scsi_bufflen(sc));
471 else { 515 else {
@@ -473,10 +517,7 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
473 scsi_in(sc)->resid = scsi_in(sc)->length; 517 scsi_in(sc)->resid = scsi_in(sc)->length;
474 } 518 }
475 519
476 if (conn->task == task) 520 iscsi_complete_task(task, state);
477 conn->task = NULL;
478 /* release ref from queuecommand */
479 __iscsi_put_task(task);
480} 521}
481 522
482static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, 523static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -516,7 +557,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
516 session->state = ISCSI_STATE_LOGGING_OUT; 557 session->state = ISCSI_STATE_LOGGING_OUT;
517 558
518 task->state = ISCSI_TASK_RUNNING; 559 task->state = ISCSI_TASK_RUNNING;
519 list_move_tail(&task->running, &conn->mgmt_run_list);
520 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " 560 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
521 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, 561 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
522 hdr->itt, task->data_count); 562 hdr->itt, task->data_count);
@@ -528,6 +568,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
528 char *data, uint32_t data_size) 568 char *data, uint32_t data_size)
529{ 569{
530 struct iscsi_session *session = conn->session; 570 struct iscsi_session *session = conn->session;
571 struct iscsi_host *ihost = shost_priv(session->host);
531 struct iscsi_task *task; 572 struct iscsi_task *task;
532 itt_t itt; 573 itt_t itt;
533 574
@@ -544,6 +585,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
544 */ 585 */
545 task = conn->login_task; 586 task = conn->login_task;
546 else { 587 else {
588 if (session->state != ISCSI_STATE_LOGGED_IN)
589 return NULL;
590
547 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 591 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
548 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 592 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
549 593
@@ -559,6 +603,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
559 atomic_set(&task->refcount, 1); 603 atomic_set(&task->refcount, 1);
560 task->conn = conn; 604 task->conn = conn;
561 task->sc = NULL; 605 task->sc = NULL;
606 INIT_LIST_HEAD(&task->running);
607 task->state = ISCSI_TASK_PENDING;
562 608
563 if (data_size) { 609 if (data_size) {
564 memcpy(task->data, data, data_size); 610 memcpy(task->data, data, data_size);
@@ -566,11 +612,14 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
566 } else 612 } else
567 task->data_count = 0; 613 task->data_count = 0;
568 614
569 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { 615 if (conn->session->tt->alloc_pdu) {
570 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " 616 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
571 "pdu for mgmt task.\n"); 617 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
572 goto requeue_task; 618 "pdu for mgmt task.\n");
619 goto free_task;
620 }
573 } 621 }
622
574 itt = task->hdr->itt; 623 itt = task->hdr->itt;
575 task->hdr_len = sizeof(struct iscsi_hdr); 624 task->hdr_len = sizeof(struct iscsi_hdr);
576 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); 625 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
@@ -583,30 +632,22 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
583 task->conn->session->age); 632 task->conn->session->age);
584 } 633 }
585 634
586 INIT_LIST_HEAD(&task->running); 635 if (!ihost->workq) {
587 list_add_tail(&task->running, &conn->mgmtqueue);
588
589 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
590 if (iscsi_prep_mgmt_task(conn, task)) 636 if (iscsi_prep_mgmt_task(conn, task))
591 goto free_task; 637 goto free_task;
592 638
593 if (session->tt->xmit_task(task)) 639 if (session->tt->xmit_task(task))
594 goto free_task; 640 goto free_task;
595 641 } else {
596 } else 642 list_add_tail(&task->running, &conn->mgmtqueue);
597 iscsi_conn_queue_work(conn); 643 iscsi_conn_queue_work(conn);
644 }
598 645
599 return task; 646 return task;
600 647
601free_task: 648free_task:
602 __iscsi_put_task(task); 649 __iscsi_put_task(task);
603 return NULL; 650 return NULL;
604
605requeue_task:
606 if (task != conn->login_task)
607 __kfifo_put(session->cmdpool.queue, (void*)&task,
608 sizeof(void*));
609 return NULL;
610} 651}
611 652
612int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 653int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -701,11 +742,10 @@ invalid_datalen:
701 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 742 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
702 } 743 }
703out: 744out:
704 ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n", 745 ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
705 sc, sc->result, task->itt); 746 sc, sc->result, task->itt);
706 conn->scsirsp_pdus_cnt++; 747 conn->scsirsp_pdus_cnt++;
707 748 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
708 __iscsi_put_task(task);
709} 749}
710 750
711/** 751/**
@@ -724,6 +764,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
724 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) 764 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
725 return; 765 return;
726 766
767 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
727 sc->result = (DID_OK << 16) | rhdr->cmd_status; 768 sc->result = (DID_OK << 16) | rhdr->cmd_status;
728 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 769 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
729 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | 770 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
@@ -738,8 +779,11 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
738 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 779 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
739 } 780 }
740 781
782 ISCSI_DBG_SESSION(conn->session, "data in with status done "
783 "[sc %p res %d itt 0x%x]\n",
784 sc, sc->result, task->itt);
741 conn->scsirsp_pdus_cnt++; 785 conn->scsirsp_pdus_cnt++;
742 __iscsi_put_task(task); 786 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
743} 787}
744 788
745static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 789static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -823,7 +867,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
823 * 867 *
824 * The session lock must be held. 868 * The session lock must be held.
825 */ 869 */
826static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) 870struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
827{ 871{
828 struct iscsi_session *session = conn->session; 872 struct iscsi_session *session = conn->session;
829 int i; 873 int i;
@@ -840,6 +884,7 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
840 884
841 return session->cmds[i]; 885 return session->cmds[i];
842} 886}
887EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
843 888
844/** 889/**
845 * __iscsi_complete_pdu - complete pdu 890 * __iscsi_complete_pdu - complete pdu
@@ -959,7 +1004,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
959 } 1004 }
960 1005
961 iscsi_tmf_rsp(conn, hdr); 1006 iscsi_tmf_rsp(conn, hdr);
962 __iscsi_put_task(task); 1007 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
963 break; 1008 break;
964 case ISCSI_OP_NOOP_IN: 1009 case ISCSI_OP_NOOP_IN:
965 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 1010 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -977,7 +1022,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
977 goto recv_pdu; 1022 goto recv_pdu;
978 1023
979 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); 1024 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
980 __iscsi_put_task(task); 1025 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
981 break; 1026 break;
982 default: 1027 default:
983 rc = ISCSI_ERR_BAD_OPCODE; 1028 rc = ISCSI_ERR_BAD_OPCODE;
@@ -989,7 +1034,7 @@ out:
989recv_pdu: 1034recv_pdu:
990 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 1035 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
991 rc = ISCSI_ERR_CONN_FAILED; 1036 rc = ISCSI_ERR_CONN_FAILED;
992 __iscsi_put_task(task); 1037 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
993 return rc; 1038 return rc;
994} 1039}
995EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); 1040EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -1166,7 +1211,12 @@ void iscsi_requeue_task(struct iscsi_task *task)
1166{ 1211{
1167 struct iscsi_conn *conn = task->conn; 1212 struct iscsi_conn *conn = task->conn;
1168 1213
1169 list_move_tail(&task->running, &conn->requeue); 1214 /*
1215 * this may be on the requeue list already if the xmit_task callout
1216 * is handling the r2ts while we are adding new ones
1217 */
1218 if (list_empty(&task->running))
1219 list_add_tail(&task->running, &conn->requeue);
1170 iscsi_conn_queue_work(conn); 1220 iscsi_conn_queue_work(conn);
1171} 1221}
1172EXPORT_SYMBOL_GPL(iscsi_requeue_task); 1222EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1206,6 +1256,7 @@ check_mgmt:
1206 while (!list_empty(&conn->mgmtqueue)) { 1256 while (!list_empty(&conn->mgmtqueue)) {
1207 conn->task = list_entry(conn->mgmtqueue.next, 1257 conn->task = list_entry(conn->mgmtqueue.next,
1208 struct iscsi_task, running); 1258 struct iscsi_task, running);
1259 list_del_init(&conn->task->running);
1209 if (iscsi_prep_mgmt_task(conn, conn->task)) { 1260 if (iscsi_prep_mgmt_task(conn, conn->task)) {
1210 __iscsi_put_task(conn->task); 1261 __iscsi_put_task(conn->task);
1211 conn->task = NULL; 1262 conn->task = NULL;
@@ -1217,23 +1268,26 @@ check_mgmt:
1217 } 1268 }
1218 1269
1219 /* process pending command queue */ 1270 /* process pending command queue */
1220 while (!list_empty(&conn->xmitqueue)) { 1271 while (!list_empty(&conn->cmdqueue)) {
1221 if (conn->tmf_state == TMF_QUEUED) 1272 if (conn->tmf_state == TMF_QUEUED)
1222 break; 1273 break;
1223 1274
1224 conn->task = list_entry(conn->xmitqueue.next, 1275 conn->task = list_entry(conn->cmdqueue.next,
1225 struct iscsi_task, running); 1276 struct iscsi_task, running);
1277 list_del_init(&conn->task->running);
1226 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1278 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1227 fail_command(conn, conn->task, DID_IMM_RETRY << 16); 1279 fail_scsi_task(conn->task, DID_IMM_RETRY);
1228 continue; 1280 continue;
1229 } 1281 }
1230 rc = iscsi_prep_scsi_cmd_pdu(conn->task); 1282 rc = iscsi_prep_scsi_cmd_pdu(conn->task);
1231 if (rc) { 1283 if (rc) {
1232 if (rc == -ENOMEM) { 1284 if (rc == -ENOMEM) {
1285 list_add_tail(&conn->task->running,
1286 &conn->cmdqueue);
1233 conn->task = NULL; 1287 conn->task = NULL;
1234 goto again; 1288 goto again;
1235 } else 1289 } else
1236 fail_command(conn, conn->task, DID_ABORT << 16); 1290 fail_scsi_task(conn->task, DID_ABORT);
1237 continue; 1291 continue;
1238 } 1292 }
1239 rc = iscsi_xmit_task(conn); 1293 rc = iscsi_xmit_task(conn);
@@ -1260,8 +1314,8 @@ check_mgmt:
1260 1314
1261 conn->task = list_entry(conn->requeue.next, 1315 conn->task = list_entry(conn->requeue.next,
1262 struct iscsi_task, running); 1316 struct iscsi_task, running);
1317 list_del_init(&conn->task->running);
1263 conn->task->state = ISCSI_TASK_RUNNING; 1318 conn->task->state = ISCSI_TASK_RUNNING;
1264 list_move_tail(conn->requeue.next, &conn->run_list);
1265 rc = iscsi_xmit_task(conn); 1319 rc = iscsi_xmit_task(conn);
1266 if (rc) 1320 if (rc)
1267 goto again; 1321 goto again;
@@ -1328,6 +1382,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1328{ 1382{
1329 struct iscsi_cls_session *cls_session; 1383 struct iscsi_cls_session *cls_session;
1330 struct Scsi_Host *host; 1384 struct Scsi_Host *host;
1385 struct iscsi_host *ihost;
1331 int reason = 0; 1386 int reason = 0;
1332 struct iscsi_session *session; 1387 struct iscsi_session *session;
1333 struct iscsi_conn *conn; 1388 struct iscsi_conn *conn;
@@ -1338,6 +1393,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1338 sc->SCp.ptr = NULL; 1393 sc->SCp.ptr = NULL;
1339 1394
1340 host = sc->device->host; 1395 host = sc->device->host;
1396 ihost = shost_priv(host);
1341 spin_unlock(host->host_lock); 1397 spin_unlock(host->host_lock);
1342 1398
1343 cls_session = starget_to_session(scsi_target(sc->device)); 1399 cls_session = starget_to_session(scsi_target(sc->device));
@@ -1350,13 +1406,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1350 goto fault; 1406 goto fault;
1351 } 1407 }
1352 1408
1353 /* 1409 if (session->state != ISCSI_STATE_LOGGED_IN) {
1354 * ISCSI_STATE_FAILED is a temp. state. The recovery
1355 * code will decide what is best to do with command queued
1356 * during this time
1357 */
1358 if (session->state != ISCSI_STATE_LOGGED_IN &&
1359 session->state != ISCSI_STATE_FAILED) {
1360 /* 1410 /*
1361 * to handle the race between when we set the recovery state 1411 * to handle the race between when we set the recovery state
1362 * and block the session we requeue here (commands could 1412 * and block the session we requeue here (commands could
@@ -1364,12 +1414,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1364 * up because the block code is not locked) 1414 * up because the block code is not locked)
1365 */ 1415 */
1366 switch (session->state) { 1416 switch (session->state) {
1417 case ISCSI_STATE_FAILED:
1367 case ISCSI_STATE_IN_RECOVERY: 1418 case ISCSI_STATE_IN_RECOVERY:
1368 reason = FAILURE_SESSION_IN_RECOVERY; 1419 reason = FAILURE_SESSION_IN_RECOVERY;
1369 goto reject; 1420 sc->result = DID_IMM_RETRY << 16;
1421 break;
1370 case ISCSI_STATE_LOGGING_OUT: 1422 case ISCSI_STATE_LOGGING_OUT:
1371 reason = FAILURE_SESSION_LOGGING_OUT; 1423 reason = FAILURE_SESSION_LOGGING_OUT;
1372 goto reject; 1424 sc->result = DID_IMM_RETRY << 16;
1425 break;
1373 case ISCSI_STATE_RECOVERY_FAILED: 1426 case ISCSI_STATE_RECOVERY_FAILED:
1374 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1427 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1375 sc->result = DID_TRANSPORT_FAILFAST << 16; 1428 sc->result = DID_TRANSPORT_FAILFAST << 16;
@@ -1402,9 +1455,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1402 reason = FAILURE_OOM; 1455 reason = FAILURE_OOM;
1403 goto reject; 1456 goto reject;
1404 } 1457 }
1405 list_add_tail(&task->running, &conn->xmitqueue);
1406 1458
1407 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 1459 if (!ihost->workq) {
1408 reason = iscsi_prep_scsi_cmd_pdu(task); 1460 reason = iscsi_prep_scsi_cmd_pdu(task);
1409 if (reason) { 1461 if (reason) {
1410 if (reason == -ENOMEM) { 1462 if (reason == -ENOMEM) {
@@ -1419,8 +1471,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1419 reason = FAILURE_SESSION_NOT_READY; 1471 reason = FAILURE_SESSION_NOT_READY;
1420 goto prepd_reject; 1472 goto prepd_reject;
1421 } 1473 }
1422 } else 1474 } else {
1475 list_add_tail(&task->running, &conn->cmdqueue);
1423 iscsi_conn_queue_work(conn); 1476 iscsi_conn_queue_work(conn);
1477 }
1424 1478
1425 session->queued_cmdsn++; 1479 session->queued_cmdsn++;
1426 spin_unlock(&session->lock); 1480 spin_unlock(&session->lock);
@@ -1429,7 +1483,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1429 1483
1430prepd_reject: 1484prepd_reject:
1431 sc->scsi_done = NULL; 1485 sc->scsi_done = NULL;
1432 iscsi_complete_command(task); 1486 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1433reject: 1487reject:
1434 spin_unlock(&session->lock); 1488 spin_unlock(&session->lock);
1435 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", 1489 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@@ -1439,7 +1493,7 @@ reject:
1439 1493
1440prepd_fault: 1494prepd_fault:
1441 sc->scsi_done = NULL; 1495 sc->scsi_done = NULL;
1442 iscsi_complete_command(task); 1496 iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
1443fault: 1497fault:
1444 spin_unlock(&session->lock); 1498 spin_unlock(&session->lock);
1445 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", 1499 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
@@ -1608,44 +1662,24 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1608 * Fail commands. session lock held and recv side suspended and xmit 1662 * Fail commands. session lock held and recv side suspended and xmit
1609 * thread flushed 1663 * thread flushed
1610 */ 1664 */
1611static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, 1665static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
1612 int error) 1666 int error)
1613{ 1667{
1614 struct iscsi_task *task, *tmp; 1668 struct iscsi_task *task;
1615 1669 int i;
1616 if (conn->task) {
1617 if (lun == -1 ||
1618 (conn->task->sc && conn->task->sc->device->lun == lun))
1619 conn->task = NULL;
1620 }
1621 1670
1622 /* flush pending */ 1671 for (i = 0; i < conn->session->cmds_max; i++) {
1623 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { 1672 task = conn->session->cmds[i];
1624 if (lun == task->sc->device->lun || lun == -1) { 1673 if (!task->sc || task->state == ISCSI_TASK_FREE)
1625 ISCSI_DBG_SESSION(conn->session, 1674 continue;
1626 "failing pending sc %p itt 0x%x\n",
1627 task->sc, task->itt);
1628 fail_command(conn, task, error << 16);
1629 }
1630 }
1631 1675
1632 list_for_each_entry_safe(task, tmp, &conn->requeue, running) { 1676 if (lun != -1 && lun != task->sc->device->lun)
1633 if (lun == task->sc->device->lun || lun == -1) { 1677 continue;
1634 ISCSI_DBG_SESSION(conn->session,
1635 "failing requeued sc %p itt 0x%x\n",
1636 task->sc, task->itt);
1637 fail_command(conn, task, error << 16);
1638 }
1639 }
1640 1678
1641 /* fail all other running */ 1679 ISCSI_DBG_SESSION(conn->session,
1642 list_for_each_entry_safe(task, tmp, &conn->run_list, running) { 1680 "failing sc %p itt 0x%x state %d\n",
1643 if (lun == task->sc->device->lun || lun == -1) { 1681 task->sc, task->itt, task->state);
1644 ISCSI_DBG_SESSION(conn->session, 1682 fail_scsi_task(task, error);
1645 "failing in progress sc %p itt 0x%x\n",
1646 task->sc, task->itt);
1647 fail_command(conn, task, error << 16);
1648 }
1649 } 1683 }
1650} 1684}
1651 1685
@@ -1655,7 +1689,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
1655 struct iscsi_host *ihost = shost_priv(shost); 1689 struct iscsi_host *ihost = shost_priv(shost);
1656 1690
1657 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1691 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1658 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1692 if (ihost->workq)
1659 flush_workqueue(ihost->workq); 1693 flush_workqueue(ihost->workq);
1660} 1694}
1661EXPORT_SYMBOL_GPL(iscsi_suspend_tx); 1695EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
@@ -1663,8 +1697,23 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
1663static void iscsi_start_tx(struct iscsi_conn *conn) 1697static void iscsi_start_tx(struct iscsi_conn *conn)
1664{ 1698{
1665 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1699 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1666 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1700 iscsi_conn_queue_work(conn);
1667 iscsi_conn_queue_work(conn); 1701}
1702
1703/*
1704 * We want to make sure a ping is in flight. It has timed out.
1705 * And we are not busy processing a pdu that is making
1706 * progress but got started before the ping and is taking a while
1707 * to complete so the ping is just stuck behind it in a queue.
1708 */
1709static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
1710{
1711 if (conn->ping_task &&
1712 time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1713 (conn->ping_timeout * HZ), jiffies))
1714 return 1;
1715 else
1716 return 0;
1668} 1717}
1669 1718
1670static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1719static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1702,16 +1751,20 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1702 * if the ping timedout then we are in the middle of cleaning up 1751 * if the ping timedout then we are in the middle of cleaning up
1703 * and can let the iscsi eh handle it 1752 * and can let the iscsi eh handle it
1704 */ 1753 */
1705 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1754 if (iscsi_has_ping_timed_out(conn)) {
1706 (conn->ping_timeout * HZ), jiffies))
1707 rc = BLK_EH_RESET_TIMER; 1755 rc = BLK_EH_RESET_TIMER;
1756 goto done;
1757 }
1708 /* 1758 /*
1709 * if we are about to check the transport then give the command 1759 * if we are about to check the transport then give the command
1710 * more time 1760 * more time
1711 */ 1761 */
1712 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1762 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1713 jiffies)) 1763 jiffies)) {
1714 rc = BLK_EH_RESET_TIMER; 1764 rc = BLK_EH_RESET_TIMER;
1765 goto done;
1766 }
1767
1715 /* if in the middle of checking the transport then give us more time */ 1768 /* if in the middle of checking the transport then give us more time */
1716 if (conn->ping_task) 1769 if (conn->ping_task)
1717 rc = BLK_EH_RESET_TIMER; 1770 rc = BLK_EH_RESET_TIMER;
@@ -1738,13 +1791,13 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1738 1791
1739 recv_timeout *= HZ; 1792 recv_timeout *= HZ;
1740 last_recv = conn->last_recv; 1793 last_recv = conn->last_recv;
1741 if (conn->ping_task && 1794
1742 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), 1795 if (iscsi_has_ping_timed_out(conn)) {
1743 jiffies)) {
1744 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " 1796 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
1745 "expired, last rx %lu, last ping %lu, " 1797 "expired, recv timeout %d, last rx %lu, "
1746 "now %lu\n", conn->ping_timeout, last_recv, 1798 "last ping %lu, now %lu\n",
1747 conn->last_ping, jiffies); 1799 conn->ping_timeout, conn->recv_timeout,
1800 last_recv, conn->last_ping, jiffies);
1748 spin_unlock(&session->lock); 1801 spin_unlock(&session->lock);
1749 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1802 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1750 return; 1803 return;
@@ -1788,6 +1841,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1788 cls_session = starget_to_session(scsi_target(sc->device)); 1841 cls_session = starget_to_session(scsi_target(sc->device));
1789 session = cls_session->dd_data; 1842 session = cls_session->dd_data;
1790 1843
1844 ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc);
1845
1791 mutex_lock(&session->eh_mutex); 1846 mutex_lock(&session->eh_mutex);
1792 spin_lock_bh(&session->lock); 1847 spin_lock_bh(&session->lock);
1793 /* 1848 /*
@@ -1810,6 +1865,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1810 sc->SCp.phase != session->age) { 1865 sc->SCp.phase != session->age) {
1811 spin_unlock_bh(&session->lock); 1866 spin_unlock_bh(&session->lock);
1812 mutex_unlock(&session->eh_mutex); 1867 mutex_unlock(&session->eh_mutex);
1868 ISCSI_DBG_SESSION(session, "failing abort due to dropped "
1869 "session.\n");
1813 return FAILED; 1870 return FAILED;
1814 } 1871 }
1815 1872
@@ -1829,7 +1886,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1829 } 1886 }
1830 1887
1831 if (task->state == ISCSI_TASK_PENDING) { 1888 if (task->state == ISCSI_TASK_PENDING) {
1832 fail_command(conn, task, DID_ABORT << 16); 1889 fail_scsi_task(task, DID_ABORT);
1833 goto success; 1890 goto success;
1834 } 1891 }
1835 1892
@@ -1860,7 +1917,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1860 * then sent more data for the cmd. 1917 * then sent more data for the cmd.
1861 */ 1918 */
1862 spin_lock(&session->lock); 1919 spin_lock(&session->lock);
1863 fail_command(conn, task, DID_ABORT << 16); 1920 fail_scsi_task(task, DID_ABORT);
1864 conn->tmf_state = TMF_INITIAL; 1921 conn->tmf_state = TMF_INITIAL;
1865 spin_unlock(&session->lock); 1922 spin_unlock(&session->lock);
1866 iscsi_start_tx(conn); 1923 iscsi_start_tx(conn);
@@ -1967,7 +2024,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1967 iscsi_suspend_tx(conn); 2024 iscsi_suspend_tx(conn);
1968 2025
1969 spin_lock_bh(&session->lock); 2026 spin_lock_bh(&session->lock);
1970 fail_all_commands(conn, sc->device->lun, DID_ERROR); 2027 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
1971 conn->tmf_state = TMF_INITIAL; 2028 conn->tmf_state = TMF_INITIAL;
1972 spin_unlock_bh(&session->lock); 2029 spin_unlock_bh(&session->lock);
1973 2030
@@ -2274,6 +2331,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
2274 if (cmd_task_size) 2331 if (cmd_task_size)
2275 task->dd_data = &task[1]; 2332 task->dd_data = &task[1];
2276 task->itt = cmd_i; 2333 task->itt = cmd_i;
2334 task->state = ISCSI_TASK_FREE;
2277 INIT_LIST_HEAD(&task->running); 2335 INIT_LIST_HEAD(&task->running);
2278 } 2336 }
2279 2337
@@ -2360,10 +2418,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2360 conn->transport_timer.data = (unsigned long)conn; 2418 conn->transport_timer.data = (unsigned long)conn;
2361 conn->transport_timer.function = iscsi_check_transport_timeouts; 2419 conn->transport_timer.function = iscsi_check_transport_timeouts;
2362 2420
2363 INIT_LIST_HEAD(&conn->run_list);
2364 INIT_LIST_HEAD(&conn->mgmt_run_list);
2365 INIT_LIST_HEAD(&conn->mgmtqueue); 2421 INIT_LIST_HEAD(&conn->mgmtqueue);
2366 INIT_LIST_HEAD(&conn->xmitqueue); 2422 INIT_LIST_HEAD(&conn->cmdqueue);
2367 INIT_LIST_HEAD(&conn->requeue); 2423 INIT_LIST_HEAD(&conn->requeue);
2368 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2424 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
2369 2425
@@ -2531,27 +2587,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2531EXPORT_SYMBOL_GPL(iscsi_conn_start); 2587EXPORT_SYMBOL_GPL(iscsi_conn_start);
2532 2588
2533static void 2589static void
2534flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) 2590fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
2535{ 2591{
2536 struct iscsi_task *task, *tmp; 2592 struct iscsi_task *task;
2593 int i, state;
2537 2594
2538 /* handle pending */ 2595 for (i = 0; i < conn->session->cmds_max; i++) {
2539 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { 2596 task = conn->session->cmds[i];
2540 ISCSI_DBG_SESSION(session, "flushing pending mgmt task " 2597 if (task->sc)
2541 "itt 0x%x\n", task->itt); 2598 continue;
2542 /* release ref from prep task */
2543 __iscsi_put_task(task);
2544 }
2545 2599
2546 /* handle running */ 2600 if (task->state == ISCSI_TASK_FREE)
2547 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { 2601 continue;
2548 ISCSI_DBG_SESSION(session, "flushing running mgmt task " 2602
2549 "itt 0x%x\n", task->itt); 2603 ISCSI_DBG_SESSION(conn->session,
2550 /* release ref from prep task */ 2604 "failing mgmt itt 0x%x state %d\n",
2551 __iscsi_put_task(task); 2605 task->itt, task->state);
2552 } 2606 state = ISCSI_TASK_ABRT_SESS_RECOV;
2607 if (task->state == ISCSI_TASK_PENDING)
2608 state = ISCSI_TASK_COMPLETED;
2609 iscsi_complete_task(task, state);
2553 2610
2554 conn->task = NULL; 2611 }
2555} 2612}
2556 2613
2557static void iscsi_start_session_recovery(struct iscsi_session *session, 2614static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2559,8 +2616,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2559{ 2616{
2560 int old_stop_stage; 2617 int old_stop_stage;
2561 2618
2562 del_timer_sync(&conn->transport_timer);
2563
2564 mutex_lock(&session->eh_mutex); 2619 mutex_lock(&session->eh_mutex);
2565 spin_lock_bh(&session->lock); 2620 spin_lock_bh(&session->lock);
2566 if (conn->stop_stage == STOP_CONN_TERM) { 2621 if (conn->stop_stage == STOP_CONN_TERM) {
@@ -2578,13 +2633,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2578 session->state = ISCSI_STATE_TERMINATE; 2633 session->state = ISCSI_STATE_TERMINATE;
2579 else if (conn->stop_stage != STOP_CONN_RECOVER) 2634 else if (conn->stop_stage != STOP_CONN_RECOVER)
2580 session->state = ISCSI_STATE_IN_RECOVERY; 2635 session->state = ISCSI_STATE_IN_RECOVERY;
2636 spin_unlock_bh(&session->lock);
2637
2638 del_timer_sync(&conn->transport_timer);
2639 iscsi_suspend_tx(conn);
2581 2640
2641 spin_lock_bh(&session->lock);
2582 old_stop_stage = conn->stop_stage; 2642 old_stop_stage = conn->stop_stage;
2583 conn->stop_stage = flag; 2643 conn->stop_stage = flag;
2584 conn->c_stage = ISCSI_CONN_STOPPED; 2644 conn->c_stage = ISCSI_CONN_STOPPED;
2585 spin_unlock_bh(&session->lock); 2645 spin_unlock_bh(&session->lock);
2586 2646
2587 iscsi_suspend_tx(conn);
2588 /* 2647 /*
2589 * for connection level recovery we should not calculate 2648 * for connection level recovery we should not calculate
2590 * header digest. conn->hdr_size used for optimization 2649 * header digest. conn->hdr_size used for optimization
@@ -2605,11 +2664,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2605 * flush queues. 2664 * flush queues.
2606 */ 2665 */
2607 spin_lock_bh(&session->lock); 2666 spin_lock_bh(&session->lock);
2608 if (flag == STOP_CONN_RECOVER) 2667 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
2609 fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED); 2668 fail_mgmt_tasks(session, conn);
2610 else
2611 fail_all_commands(conn, -1, DID_ERROR);
2612 flush_control_queues(session, conn);
2613 spin_unlock_bh(&session->lock); 2669 spin_unlock_bh(&session->lock);
2614 mutex_unlock(&session->eh_mutex); 2670 mutex_unlock(&session->eh_mutex);
2615} 2671}
@@ -2651,6 +2707,23 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2651} 2707}
2652EXPORT_SYMBOL_GPL(iscsi_conn_bind); 2708EXPORT_SYMBOL_GPL(iscsi_conn_bind);
2653 2709
2710static int iscsi_switch_str_param(char **param, char *new_val_buf)
2711{
2712 char *new_val;
2713
2714 if (*param) {
2715 if (!strcmp(*param, new_val_buf))
2716 return 0;
2717 }
2718
2719 new_val = kstrdup(new_val_buf, GFP_NOIO);
2720 if (!new_val)
2721 return -ENOMEM;
2722
2723 kfree(*param);
2724 *param = new_val;
2725 return 0;
2726}
2654 2727
2655int iscsi_set_param(struct iscsi_cls_conn *cls_conn, 2728int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2656 enum iscsi_param param, char *buf, int buflen) 2729 enum iscsi_param param, char *buf, int buflen)
@@ -2723,38 +2796,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2723 sscanf(buf, "%u", &conn->exp_statsn); 2796 sscanf(buf, "%u", &conn->exp_statsn);
2724 break; 2797 break;
2725 case ISCSI_PARAM_USERNAME: 2798 case ISCSI_PARAM_USERNAME:
2726 kfree(session->username); 2799 return iscsi_switch_str_param(&session->username, buf);
2727 session->username = kstrdup(buf, GFP_KERNEL);
2728 if (!session->username)
2729 return -ENOMEM;
2730 break;
2731 case ISCSI_PARAM_USERNAME_IN: 2800 case ISCSI_PARAM_USERNAME_IN:
2732 kfree(session->username_in); 2801 return iscsi_switch_str_param(&session->username_in, buf);
2733 session->username_in = kstrdup(buf, GFP_KERNEL);
2734 if (!session->username_in)
2735 return -ENOMEM;
2736 break;
2737 case ISCSI_PARAM_PASSWORD: 2802 case ISCSI_PARAM_PASSWORD:
2738 kfree(session->password); 2803 return iscsi_switch_str_param(&session->password, buf);
2739 session->password = kstrdup(buf, GFP_KERNEL);
2740 if (!session->password)
2741 return -ENOMEM;
2742 break;
2743 case ISCSI_PARAM_PASSWORD_IN: 2804 case ISCSI_PARAM_PASSWORD_IN:
2744 kfree(session->password_in); 2805 return iscsi_switch_str_param(&session->password_in, buf);
2745 session->password_in = kstrdup(buf, GFP_KERNEL);
2746 if (!session->password_in)
2747 return -ENOMEM;
2748 break;
2749 case ISCSI_PARAM_TARGET_NAME: 2806 case ISCSI_PARAM_TARGET_NAME:
2750 /* this should not change between logins */ 2807 return iscsi_switch_str_param(&session->targetname, buf);
2751 if (session->targetname)
2752 break;
2753
2754 session->targetname = kstrdup(buf, GFP_KERNEL);
2755 if (!session->targetname)
2756 return -ENOMEM;
2757 break;
2758 case ISCSI_PARAM_TPGT: 2808 case ISCSI_PARAM_TPGT:
2759 sscanf(buf, "%d", &session->tpgt); 2809 sscanf(buf, "%d", &session->tpgt);
2760 break; 2810 break;
@@ -2762,25 +2812,11 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2762 sscanf(buf, "%d", &conn->persistent_port); 2812 sscanf(buf, "%d", &conn->persistent_port);
2763 break; 2813 break;
2764 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2814 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2765 /* 2815 return iscsi_switch_str_param(&conn->persistent_address, buf);
2766 * this is the address returned in discovery so it should
2767 * not change between logins.
2768 */
2769 if (conn->persistent_address)
2770 break;
2771
2772 conn->persistent_address = kstrdup(buf, GFP_KERNEL);
2773 if (!conn->persistent_address)
2774 return -ENOMEM;
2775 break;
2776 case ISCSI_PARAM_IFACE_NAME: 2816 case ISCSI_PARAM_IFACE_NAME:
2777 if (!session->ifacename) 2817 return iscsi_switch_str_param(&session->ifacename, buf);
2778 session->ifacename = kstrdup(buf, GFP_KERNEL);
2779 break;
2780 case ISCSI_PARAM_INITIATOR_NAME: 2818 case ISCSI_PARAM_INITIATOR_NAME:
2781 if (!session->initiatorname) 2819 return iscsi_switch_str_param(&session->initiatorname, buf);
2782 session->initiatorname = kstrdup(buf, GFP_KERNEL);
2783 break;
2784 default: 2820 default:
2785 return -ENOSYS; 2821 return -ENOSYS;
2786 } 2822 }
@@ -2851,10 +2887,7 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2851 len = sprintf(buf, "%s\n", session->ifacename); 2887 len = sprintf(buf, "%s\n", session->ifacename);
2852 break; 2888 break;
2853 case ISCSI_PARAM_INITIATOR_NAME: 2889 case ISCSI_PARAM_INITIATOR_NAME:
2854 if (!session->initiatorname) 2890 len = sprintf(buf, "%s\n", session->initiatorname);
2855 len = sprintf(buf, "%s\n", "unknown");
2856 else
2857 len = sprintf(buf, "%s\n", session->initiatorname);
2858 break; 2891 break;
2859 default: 2892 default:
2860 return -ENOSYS; 2893 return -ENOSYS;
@@ -2920,29 +2953,16 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2920 2953
2921 switch (param) { 2954 switch (param) {
2922 case ISCSI_HOST_PARAM_NETDEV_NAME: 2955 case ISCSI_HOST_PARAM_NETDEV_NAME:
2923 if (!ihost->netdev) 2956 len = sprintf(buf, "%s\n", ihost->netdev);
2924 len = sprintf(buf, "%s\n", "default");
2925 else
2926 len = sprintf(buf, "%s\n", ihost->netdev);
2927 break; 2957 break;
2928 case ISCSI_HOST_PARAM_HWADDRESS: 2958 case ISCSI_HOST_PARAM_HWADDRESS:
2929 if (!ihost->hwaddress) 2959 len = sprintf(buf, "%s\n", ihost->hwaddress);
2930 len = sprintf(buf, "%s\n", "default");
2931 else
2932 len = sprintf(buf, "%s\n", ihost->hwaddress);
2933 break; 2960 break;
2934 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2961 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2935 if (!ihost->initiatorname) 2962 len = sprintf(buf, "%s\n", ihost->initiatorname);
2936 len = sprintf(buf, "%s\n", "unknown");
2937 else
2938 len = sprintf(buf, "%s\n", ihost->initiatorname);
2939 break; 2963 break;
2940 case ISCSI_HOST_PARAM_IPADDRESS: 2964 case ISCSI_HOST_PARAM_IPADDRESS:
2941 if (!strlen(ihost->local_address)) 2965 len = sprintf(buf, "%s\n", ihost->local_address);
2942 len = sprintf(buf, "%s\n", "unknown");
2943 else
2944 len = sprintf(buf, "%s\n",
2945 ihost->local_address);
2946 break; 2966 break;
2947 default: 2967 default:
2948 return -ENOSYS; 2968 return -ENOSYS;
@@ -2959,17 +2979,11 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2959 2979
2960 switch (param) { 2980 switch (param) {
2961 case ISCSI_HOST_PARAM_NETDEV_NAME: 2981 case ISCSI_HOST_PARAM_NETDEV_NAME:
2962 if (!ihost->netdev) 2982 return iscsi_switch_str_param(&ihost->netdev, buf);
2963 ihost->netdev = kstrdup(buf, GFP_KERNEL);
2964 break;
2965 case ISCSI_HOST_PARAM_HWADDRESS: 2983 case ISCSI_HOST_PARAM_HWADDRESS:
2966 if (!ihost->hwaddress) 2984 return iscsi_switch_str_param(&ihost->hwaddress, buf);
2967 ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
2968 break;
2969 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2985 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2970 if (!ihost->initiatorname) 2986 return iscsi_switch_str_param(&ihost->initiatorname, buf);
2971 ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
2972 break;
2973 default: 2987 default:
2974 return -ENOSYS; 2988 return -ENOSYS;
2975 } 2989 }
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index b579ca9f483..2bc07090321 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
440 struct iscsi_tcp_task *tcp_task = task->dd_data; 440 struct iscsi_tcp_task *tcp_task = task->dd_data;
441 struct iscsi_r2t_info *r2t; 441 struct iscsi_r2t_info *r2t;
442 442
443 /* nothing to do for mgmt or pending tasks */ 443 /* nothing to do for mgmt */
444 if (!task->sc || task->state == ISCSI_TASK_PENDING) 444 if (!task->sc)
445 return; 445 return;
446 446
447 /* flush task's r2t queues */ 447 /* flush task's r2t queues */
@@ -473,7 +473,13 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
473 int datasn = be32_to_cpu(rhdr->datasn); 473 int datasn = be32_to_cpu(rhdr->datasn);
474 unsigned total_in_length = scsi_in(task->sc)->length; 474 unsigned total_in_length = scsi_in(task->sc)->length;
475 475
476 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); 476 /*
477 * lib iscsi will update this in the completion handling if there
478 * is status.
479 */
480 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
481 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
482
477 if (tcp_conn->in.datalen == 0) 483 if (tcp_conn->in.datalen == 0)
478 return 0; 484 return 0;
479 485
@@ -857,6 +863,12 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
857 int rc = 0; 863 int rc = 0;
858 864
859 ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset); 865 ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
866 /*
867 * Update for each skb instead of pdu, because over slow networks a
868 * data_in's data could take a while to read in. We also want to
869 * account for r2ts.
870 */
871 conn->last_recv = jiffies;
860 872
861 if (unlikely(conn->suspend_rx)) { 873 if (unlikely(conn->suspend_rx)) {
862 ISCSI_DBG_TCP(conn, "Rx suspended!\n"); 874 ISCSI_DBG_TCP(conn, "Rx suspended!\n");
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3da02e43678..54fa1e42dc4 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1927,21 +1927,21 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1927 /* do we need to support multiple segments? */ 1927 /* do we need to support multiple segments? */
1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1929 printk("%s: multiple segments req %u %u, rsp %u %u\n", 1929 printk("%s: multiple segments req %u %u, rsp %u %u\n",
1930 __func__, req->bio->bi_vcnt, req->data_len, 1930 __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
1931 rsp->bio->bi_vcnt, rsp->data_len); 1931 rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1932 return -EINVAL; 1932 return -EINVAL;
1933 } 1933 }
1934 1934
1935 ret = smp_execute_task(dev, bio_data(req->bio), req->data_len, 1935 ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req),
1936 bio_data(rsp->bio), rsp->data_len); 1936 bio_data(rsp->bio), blk_rq_bytes(rsp));
1937 if (ret > 0) { 1937 if (ret > 0) {
1938 /* positive number is the untransferred residual */ 1938 /* positive number is the untransferred residual */
1939 rsp->data_len = ret; 1939 rsp->resid_len = ret;
1940 req->data_len = 0; 1940 req->resid_len = 0;
1941 ret = 0; 1941 ret = 0;
1942 } else if (ret == 0) { 1942 } else if (ret == 0) {
1943 rsp->data_len = 0; 1943 rsp->resid_len = 0;
1944 req->data_len = 0; 1944 req->resid_len = 0;
1945 } 1945 }
1946 1946
1947 return ret; 1947 return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d110a366c48..1bc3b756799 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -134,24 +134,24 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
134{ 134{
135 u8 *req_data = NULL, *resp_data = NULL, *buf; 135 u8 *req_data = NULL, *resp_data = NULL, *buf;
136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
137 int error = -EINVAL, resp_data_len = rsp->data_len; 137 int error = -EINVAL;
138 138
139 /* eight is the minimum size for request and response frames */ 139 /* eight is the minimum size for request and response frames */
140 if (req->data_len < 8 || rsp->data_len < 8) 140 if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
141 goto out; 141 goto out;
142 142
143 if (bio_offset(req->bio) + req->data_len > PAGE_SIZE || 143 if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
144 bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) { 144 bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
145 shost_printk(KERN_ERR, shost, 145 shost_printk(KERN_ERR, shost,
146 "SMP request/response frame crosses page boundary"); 146 "SMP request/response frame crosses page boundary");
147 goto out; 147 goto out;
148 } 148 }
149 149
150 req_data = kzalloc(req->data_len, GFP_KERNEL); 150 req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);
151 151
152 /* make sure frame can always be built ... we copy 152 /* make sure frame can always be built ... we copy
153 * back only the requested length */ 153 * back only the requested length */
154 resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL); 154 resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);
155 155
156 if (!req_data || !resp_data) { 156 if (!req_data || !resp_data) {
157 error = -ENOMEM; 157 error = -ENOMEM;
@@ -160,7 +160,7 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
160 160
161 local_irq_disable(); 161 local_irq_disable();
162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio); 162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
163 memcpy(req_data, buf, req->data_len); 163 memcpy(req_data, buf, blk_rq_bytes(req));
164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0); 164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
165 local_irq_enable(); 165 local_irq_enable();
166 166
@@ -178,15 +178,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
178 178
179 switch (req_data[1]) { 179 switch (req_data[1]) {
180 case SMP_REPORT_GENERAL: 180 case SMP_REPORT_GENERAL:
181 req->data_len -= 8; 181 req->resid_len -= 8;
182 resp_data_len -= 32; 182 rsp->resid_len -= 32;
183 resp_data[2] = SMP_RESP_FUNC_ACC; 183 resp_data[2] = SMP_RESP_FUNC_ACC;
184 resp_data[9] = sas_ha->num_phys; 184 resp_data[9] = sas_ha->num_phys;
185 break; 185 break;
186 186
187 case SMP_REPORT_MANUF_INFO: 187 case SMP_REPORT_MANUF_INFO:
188 req->data_len -= 8; 188 req->resid_len -= 8;
189 resp_data_len -= 64; 189 rsp->resid_len -= 64;
190 resp_data[2] = SMP_RESP_FUNC_ACC; 190 resp_data[2] = SMP_RESP_FUNC_ACC;
191 memcpy(resp_data + 12, shost->hostt->name, 191 memcpy(resp_data + 12, shost->hostt->name,
192 SAS_EXPANDER_VENDOR_ID_LEN); 192 SAS_EXPANDER_VENDOR_ID_LEN);
@@ -199,13 +199,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
199 break; 199 break;
200 200
201 case SMP_DISCOVER: 201 case SMP_DISCOVER:
202 req->data_len -= 16; 202 req->resid_len -= 16;
203 if ((int)req->data_len < 0) { 203 if ((int)req->resid_len < 0) {
204 req->data_len = 0; 204 req->resid_len = 0;
205 error = -EINVAL; 205 error = -EINVAL;
206 goto out; 206 goto out;
207 } 207 }
208 resp_data_len -= 56; 208 rsp->resid_len -= 56;
209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]); 209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
210 break; 210 break;
211 211
@@ -215,13 +215,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
215 break; 215 break;
216 216
217 case SMP_REPORT_PHY_SATA: 217 case SMP_REPORT_PHY_SATA:
218 req->data_len -= 16; 218 req->resid_len -= 16;
219 if ((int)req->data_len < 0) { 219 if ((int)req->resid_len < 0) {
220 req->data_len = 0; 220 req->resid_len = 0;
221 error = -EINVAL; 221 error = -EINVAL;
222 goto out; 222 goto out;
223 } 223 }
224 resp_data_len -= 60; 224 rsp->resid_len -= 60;
225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]); 225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
226 break; 226 break;
227 227
@@ -238,13 +238,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
238 break; 238 break;
239 239
240 case SMP_PHY_CONTROL: 240 case SMP_PHY_CONTROL:
241 req->data_len -= 44; 241 req->resid_len -= 44;
242 if ((int)req->data_len < 0) { 242 if ((int)req->resid_len < 0) {
243 req->data_len = 0; 243 req->resid_len = 0;
244 error = -EINVAL; 244 error = -EINVAL;
245 goto out; 245 goto out;
246 } 246 }
247 resp_data_len -= 8; 247 rsp->resid_len -= 8;
248 sas_phy_control(sas_ha, req_data[9], req_data[10], 248 sas_phy_control(sas_ha, req_data[9], req_data[10],
249 req_data[32] >> 4, req_data[33] >> 4, 249 req_data[32] >> 4, req_data[33] >> 4,
250 resp_data); 250 resp_data);
@@ -261,11 +261,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
261 261
262 local_irq_disable(); 262 local_irq_disable();
263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio); 263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
264 memcpy(buf, resp_data, rsp->data_len); 264 memcpy(buf, resp_data, blk_rq_bytes(rsp));
265 flush_kernel_dcache_page(bio_page(rsp->bio)); 265 flush_kernel_dcache_page(bio_page(rsp->bio));
266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); 266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
267 local_irq_enable(); 267 local_irq_enable();
268 rsp->data_len = resp_data_len;
269 268
270 out: 269 out:
271 kfree(req_data); 270 kfree(req_data);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1105f9a111b..54056984909 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -23,6 +23,13 @@
23 23
24struct lpfc_sli2_slim; 24struct lpfc_sli2_slim;
25 25
26#define LPFC_PCI_DEV_LP 0x1
27#define LPFC_PCI_DEV_OC 0x2
28
29#define LPFC_SLI_REV2 2
30#define LPFC_SLI_REV3 3
31#define LPFC_SLI_REV4 4
32
26#define LPFC_MAX_TARGET 4096 /* max number of targets supported */ 33#define LPFC_MAX_TARGET 4096 /* max number of targets supported */
27#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els 34#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
28 requests */ 35 requests */
@@ -98,9 +105,11 @@ struct lpfc_dma_pool {
98}; 105};
99 106
100struct hbq_dmabuf { 107struct hbq_dmabuf {
108 struct lpfc_dmabuf hbuf;
101 struct lpfc_dmabuf dbuf; 109 struct lpfc_dmabuf dbuf;
102 uint32_t size; 110 uint32_t size;
103 uint32_t tag; 111 uint32_t tag;
112 struct lpfc_rcqe rcqe;
104}; 113};
105 114
106/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -134,7 +143,10 @@ typedef struct lpfc_vpd {
134 } rev; 143 } rev;
135 struct { 144 struct {
136#ifdef __BIG_ENDIAN_BITFIELD 145#ifdef __BIG_ENDIAN_BITFIELD
137 uint32_t rsvd2 :24; /* Reserved */ 146 uint32_t rsvd3 :19; /* Reserved */
147 uint32_t cdss : 1; /* Configure Data Security SLI */
148 uint32_t rsvd2 : 3; /* Reserved */
149 uint32_t cbg : 1; /* Configure BlockGuard */
138 uint32_t cmv : 1; /* Configure Max VPIs */ 150 uint32_t cmv : 1; /* Configure Max VPIs */
139 uint32_t ccrp : 1; /* Config Command Ring Polling */ 151 uint32_t ccrp : 1; /* Config Command Ring Polling */
140 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
@@ -152,7 +164,10 @@ typedef struct lpfc_vpd {
152 uint32_t csah : 1; /* Configure Synchronous Abort Handling */ 164 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
153 uint32_t ccrp : 1; /* Config Command Ring Polling */ 165 uint32_t ccrp : 1; /* Config Command Ring Polling */
154 uint32_t cmv : 1; /* Configure Max VPIs */ 166 uint32_t cmv : 1; /* Configure Max VPIs */
155 uint32_t rsvd2 :24; /* Reserved */ 167 uint32_t cbg : 1; /* Configure BlockGuard */
168 uint32_t rsvd2 : 3; /* Reserved */
169 uint32_t cdss : 1; /* Configure Data Security SLI */
170 uint32_t rsvd3 :19; /* Reserved */
156#endif 171#endif
157 } sli3Feat; 172 } sli3Feat;
158} lpfc_vpd_t; 173} lpfc_vpd_t;
@@ -264,8 +279,8 @@ enum hba_state {
264}; 279};
265 280
266struct lpfc_vport { 281struct lpfc_vport {
267 struct list_head listentry;
268 struct lpfc_hba *phba; 282 struct lpfc_hba *phba;
283 struct list_head listentry;
269 uint8_t port_type; 284 uint8_t port_type;
270#define LPFC_PHYSICAL_PORT 1 285#define LPFC_PHYSICAL_PORT 1
271#define LPFC_NPIV_PORT 2 286#define LPFC_NPIV_PORT 2
@@ -273,6 +288,9 @@ struct lpfc_vport {
273 enum discovery_state port_state; 288 enum discovery_state port_state;
274 289
275 uint16_t vpi; 290 uint16_t vpi;
291 uint16_t vfi;
292 uint8_t vfi_state;
293#define LPFC_VFI_REGISTERED 0x1
276 294
277 uint32_t fc_flag; /* FC flags */ 295 uint32_t fc_flag; /* FC flags */
278/* Several of these flags are HBA centric and should be moved to 296/* Several of these flags are HBA centric and should be moved to
@@ -385,6 +403,9 @@ struct lpfc_vport {
385#endif 403#endif
386 uint8_t stat_data_enabled; 404 uint8_t stat_data_enabled;
387 uint8_t stat_data_blocked; 405 uint8_t stat_data_blocked;
406 struct list_head rcv_buffer_list;
407 uint32_t vport_flag;
408#define STATIC_VPORT 1
388}; 409};
389 410
390struct hbq_s { 411struct hbq_s {
@@ -420,8 +441,66 @@ enum intr_type_t {
420}; 441};
421 442
422struct lpfc_hba { 443struct lpfc_hba {
444 /* SCSI interface function jump table entries */
445 int (*lpfc_new_scsi_buf)
446 (struct lpfc_vport *, int);
447 struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
448 (struct lpfc_hba *);
449 int (*lpfc_scsi_prep_dma_buf)
450 (struct lpfc_hba *, struct lpfc_scsi_buf *);
451 void (*lpfc_scsi_unprep_dma_buf)
452 (struct lpfc_hba *, struct lpfc_scsi_buf *);
453 void (*lpfc_release_scsi_buf)
454 (struct lpfc_hba *, struct lpfc_scsi_buf *);
455 void (*lpfc_rampdown_queue_depth)
456 (struct lpfc_hba *);
457 void (*lpfc_scsi_prep_cmnd)
458 (struct lpfc_vport *, struct lpfc_scsi_buf *,
459 struct lpfc_nodelist *);
460 int (*lpfc_scsi_prep_task_mgmt_cmd)
461 (struct lpfc_vport *, struct lpfc_scsi_buf *,
462 unsigned int, uint8_t);
463
464 /* IOCB interface function jump table entries */
465 int (*__lpfc_sli_issue_iocb)
466 (struct lpfc_hba *, uint32_t,
467 struct lpfc_iocbq *, uint32_t);
468 void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
469 struct lpfc_iocbq *);
470 int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
471
472
473 IOCB_t * (*lpfc_get_iocb_from_iocbq)
474 (struct lpfc_iocbq *);
475 void (*lpfc_scsi_cmd_iocb_cmpl)
476 (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
477
478 /* MBOX interface function jump table entries */
479 int (*lpfc_sli_issue_mbox)
480 (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
481 /* Slow-path IOCB process function jump table entries */
482 void (*lpfc_sli_handle_slow_ring_event)
483 (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
484 uint32_t mask);
485 /* INIT device interface function jump table entries */
486 int (*lpfc_sli_hbq_to_firmware)
487 (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
488 int (*lpfc_sli_brdrestart)
489 (struct lpfc_hba *);
490 int (*lpfc_sli_brdready)
491 (struct lpfc_hba *, uint32_t);
492 void (*lpfc_handle_eratt)
493 (struct lpfc_hba *);
494 void (*lpfc_stop_port)
495 (struct lpfc_hba *);
496
497
498 /* SLI4 specific HBA data structure */
499 struct lpfc_sli4_hba sli4_hba;
500
423 struct lpfc_sli sli; 501 struct lpfc_sli sli;
424 uint32_t sli_rev; /* SLI2 or SLI3 */ 502 uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
503 uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
425 uint32_t sli3_options; /* Mask of enabled SLI3 options */ 504 uint32_t sli3_options; /* Mask of enabled SLI3 options */
426#define LPFC_SLI3_HBQ_ENABLED 0x01 505#define LPFC_SLI3_HBQ_ENABLED 0x01
427#define LPFC_SLI3_NPIV_ENABLED 0x02 506#define LPFC_SLI3_NPIV_ENABLED 0x02
@@ -429,6 +508,7 @@ struct lpfc_hba {
429#define LPFC_SLI3_CRP_ENABLED 0x08 508#define LPFC_SLI3_CRP_ENABLED 0x08
430#define LPFC_SLI3_INB_ENABLED 0x10 509#define LPFC_SLI3_INB_ENABLED 0x10
431#define LPFC_SLI3_BG_ENABLED 0x20 510#define LPFC_SLI3_BG_ENABLED 0x20
511#define LPFC_SLI3_DSS_ENABLED 0x40
432 uint32_t iocb_cmd_size; 512 uint32_t iocb_cmd_size;
433 uint32_t iocb_rsp_size; 513 uint32_t iocb_rsp_size;
434 514
@@ -442,8 +522,13 @@ struct lpfc_hba {
442 522
443 uint32_t hba_flag; /* hba generic flags */ 523 uint32_t hba_flag; /* hba generic flags */
444#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 524#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
445 525#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
446#define DEFER_ERATT 0x4 /* Deferred error attention in progress */ 526#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
527#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */
528#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
529#define FCP_XRI_ABORT_EVENT 0x20
530#define ELS_XRI_ABORT_EVENT 0x40
531#define ASYNC_EVENT 0x80
447 struct lpfc_dmabuf slim2p; 532 struct lpfc_dmabuf slim2p;
448 533
449 MAILBOX_t *mbox; 534 MAILBOX_t *mbox;
@@ -502,6 +587,9 @@ struct lpfc_hba {
502 uint32_t cfg_poll; 587 uint32_t cfg_poll;
503 uint32_t cfg_poll_tmo; 588 uint32_t cfg_poll_tmo;
504 uint32_t cfg_use_msi; 589 uint32_t cfg_use_msi;
590 uint32_t cfg_fcp_imax;
591 uint32_t cfg_fcp_wq_count;
592 uint32_t cfg_fcp_eq_count;
505 uint32_t cfg_sg_seg_cnt; 593 uint32_t cfg_sg_seg_cnt;
506 uint32_t cfg_prot_sg_seg_cnt; 594 uint32_t cfg_prot_sg_seg_cnt;
507 uint32_t cfg_sg_dma_buf_size; 595 uint32_t cfg_sg_dma_buf_size;
@@ -511,6 +599,8 @@ struct lpfc_hba {
511 uint32_t cfg_enable_hba_reset; 599 uint32_t cfg_enable_hba_reset;
512 uint32_t cfg_enable_hba_heartbeat; 600 uint32_t cfg_enable_hba_heartbeat;
513 uint32_t cfg_enable_bg; 601 uint32_t cfg_enable_bg;
602 uint32_t cfg_enable_fip;
603 uint32_t cfg_log_verbose;
514 604
515 lpfc_vpd_t vpd; /* vital product data */ 605 lpfc_vpd_t vpd; /* vital product data */
516 606
@@ -526,11 +616,12 @@ struct lpfc_hba {
526 unsigned long data_flags; 616 unsigned long data_flags;
527 617
528 uint32_t hbq_in_use; /* HBQs in use flag */ 618 uint32_t hbq_in_use; /* HBQs in use flag */
529 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ 619 struct list_head rb_pend_list; /* Received buffers to be processed */
530 uint32_t hbq_count; /* Count of configured HBQs */ 620 uint32_t hbq_count; /* Count of configured HBQs */
531 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 621 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
532 622
533 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 623 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
624 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
534 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 625 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
535 void __iomem *slim_memmap_p; /* Kernel memory mapped address for 626 void __iomem *slim_memmap_p; /* Kernel memory mapped address for
536 PCI BAR0 */ 627 PCI BAR0 */
@@ -593,7 +684,8 @@ struct lpfc_hba {
593 /* pci_mem_pools */ 684 /* pci_mem_pools */
594 struct pci_pool *lpfc_scsi_dma_buf_pool; 685 struct pci_pool *lpfc_scsi_dma_buf_pool;
595 struct pci_pool *lpfc_mbuf_pool; 686 struct pci_pool *lpfc_mbuf_pool;
596 struct pci_pool *lpfc_hbq_pool; 687 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
688 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
597 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 689 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
598 690
599 mempool_t *mbox_mem_pool; 691 mempool_t *mbox_mem_pool;
@@ -609,6 +701,14 @@ struct lpfc_hba {
609 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 701 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
610 uint16_t max_vpi; /* Maximum virtual nports */ 702 uint16_t max_vpi; /* Maximum virtual nports */
611#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ 703#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
704 uint16_t max_vports; /*
705 * For IOV HBAs max_vpi can change
706 * after a reset. max_vports is max
707 * number of vports present. This can
708 * be greater than max_vpi.
709 */
710 uint16_t vpi_base;
711 uint16_t vfi_base;
612 unsigned long *vpi_bmask; /* vpi allocation table */ 712 unsigned long *vpi_bmask; /* vpi allocation table */
613 713
614 /* Data structure used by fabric iocb scheduler */ 714 /* Data structure used by fabric iocb scheduler */
@@ -667,6 +767,11 @@ struct lpfc_hba {
667/* Maximum number of events that can be outstanding at any time*/ 767/* Maximum number of events that can be outstanding at any time*/
668#define LPFC_MAX_EVT_COUNT 512 768#define LPFC_MAX_EVT_COUNT 512
669 atomic_t fast_event_count; 769 atomic_t fast_event_count;
770 struct lpfc_fcf fcf;
771 uint8_t fc_map[3];
772 uint8_t valid_vlan;
773 uint16_t vlan_id;
774 struct list_head fcf_conn_rec_list;
670}; 775};
671 776
672static inline struct Scsi_Host * 777static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c14f0cbdb12..d73e677201f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,8 +30,10 @@
30#include <scsi/scsi_tcq.h> 30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 34#include "lpfc_hw.h"
34#include "lpfc_sli.h" 35#include "lpfc_sli.h"
36#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 37#include "lpfc_nl.h"
36#include "lpfc_disc.h" 38#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 39#include "lpfc_scsi.h"
@@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
505 return -ENOMEM; 507 return -ENOMEM;
506 508
507 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 509 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
508 pmboxq->mb.mbxCommand = MBX_DOWN_LINK; 510 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
509 pmboxq->mb.mbxOwner = OWN_HOST; 511 pmboxq->u.mb.mbxOwner = OWN_HOST;
510 512
511 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); 513 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
512 514
513 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { 515 if ((mbxstatus == MBX_SUCCESS) &&
516 (pmboxq->u.mb.mbxStatus == 0 ||
517 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
514 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 518 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
515 lpfc_init_link(phba, pmboxq, phba->cfg_topology, 519 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
516 phba->cfg_link_speed); 520 phba->cfg_link_speed);
@@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
789 uint32_t *mrpi, uint32_t *arpi, 793 uint32_t *mrpi, uint32_t *arpi,
790 uint32_t *mvpi, uint32_t *avpi) 794 uint32_t *mvpi, uint32_t *avpi)
791{ 795{
792 struct lpfc_sli *psli = &phba->sli; 796 struct lpfc_sli *psli = &phba->sli;
797 struct lpfc_mbx_read_config *rd_config;
793 LPFC_MBOXQ_t *pmboxq; 798 LPFC_MBOXQ_t *pmboxq;
794 MAILBOX_t *pmb; 799 MAILBOX_t *pmb;
795 int rc = 0; 800 int rc = 0;
@@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
800 */ 805 */
801 if (phba->link_state < LPFC_LINK_DOWN || 806 if (phba->link_state < LPFC_LINK_DOWN ||
802 !phba->mbox_mem_pool || 807 !phba->mbox_mem_pool ||
803 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 808 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
804 return 0; 809 return 0;
805 810
806 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 811 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
811 return 0; 816 return 0;
812 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 817 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
813 818
814 pmb = &pmboxq->mb; 819 pmb = &pmboxq->u.mb;
815 pmb->mbxCommand = MBX_READ_CONFIG; 820 pmb->mbxCommand = MBX_READ_CONFIG;
816 pmb->mbxOwner = OWN_HOST; 821 pmb->mbxOwner = OWN_HOST;
817 pmboxq->context1 = NULL; 822 pmboxq->context1 = NULL;
818 823
819 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || 824 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
820 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 825 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
821 rc = MBX_NOT_FINISHED; 826 rc = MBX_NOT_FINISHED;
822 else 827 else
823 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 828 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
828 return 0; 833 return 0;
829 } 834 }
830 835
831 if (mrpi) 836 if (phba->sli_rev == LPFC_SLI_REV4) {
832 *mrpi = pmb->un.varRdConfig.max_rpi; 837 rd_config = &pmboxq->u.mqe.un.rd_config;
833 if (arpi) 838 if (mrpi)
834 *arpi = pmb->un.varRdConfig.avail_rpi; 839 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
835 if (mxri) 840 if (arpi)
836 *mxri = pmb->un.varRdConfig.max_xri; 841 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
837 if (axri) 842 phba->sli4_hba.max_cfg_param.rpi_used;
838 *axri = pmb->un.varRdConfig.avail_xri; 843 if (mxri)
839 if (mvpi) 844 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
840 *mvpi = pmb->un.varRdConfig.max_vpi; 845 if (axri)
841 if (avpi) 846 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
842 *avpi = pmb->un.varRdConfig.avail_vpi; 847 phba->sli4_hba.max_cfg_param.xri_used;
848 if (mvpi)
849 *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
850 if (avpi)
851 *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
852 phba->sli4_hba.max_cfg_param.vpi_used;
853 } else {
854 if (mrpi)
855 *mrpi = pmb->un.varRdConfig.max_rpi;
856 if (arpi)
857 *arpi = pmb->un.varRdConfig.avail_rpi;
858 if (mxri)
859 *mxri = pmb->un.varRdConfig.max_xri;
860 if (axri)
861 *axri = pmb->un.varRdConfig.avail_xri;
862 if (mvpi)
863 *mvpi = pmb->un.varRdConfig.max_vpi;
864 if (avpi)
865 *avpi = pmb->un.varRdConfig.avail_vpi;
866 }
843 867
844 mempool_free(pmboxq, phba->mbox_mem_pool); 868 mempool_free(pmboxq, phba->mbox_mem_pool);
845 return 1; 869 return 1;
@@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
2021# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 2045# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
2022# deluged with LOTS of information. 2046# deluged with LOTS of information.
2023# You can set a bit mask to record specific types of verbose messages: 2047# You can set a bit mask to record specific types of verbose messages:
2024# 2048# See lpfc_logmsh.h for definitions.
2025# LOG_ELS 0x1 ELS events
2026# LOG_DISCOVERY 0x2 Link discovery events
2027# LOG_MBOX 0x4 Mailbox events
2028# LOG_INIT 0x8 Initialization events
2029# LOG_LINK_EVENT 0x10 Link events
2030# LOG_FCP 0x40 FCP traffic history
2031# LOG_NODE 0x80 Node table events
2032# LOG_BG 0x200 BlockBuard events
2033# LOG_MISC 0x400 Miscellaneous events
2034# LOG_SLI 0x800 SLI events
2035# LOG_FCP_ERROR 0x1000 Only log FCP errors
2036# LOG_LIBDFC 0x2000 LIBDFC events
2037# LOG_ALL_MSG 0xffff LOG all messages
2038*/ 2049*/
2039LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, 2050LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
2040 "Verbose logging bit-mask"); 2051 "Verbose logging bit-mask");
2041 2052
2042/* 2053/*
@@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
2266static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, 2277static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
2267 lpfc_topology_show, lpfc_topology_store); 2278 lpfc_topology_show, lpfc_topology_store);
2268 2279
2280/**
2281 * lpfc_static_vport_show: Read callback function for
2282 * lpfc_static_vport sysfs file.
2283 * @dev: Pointer to class device object.
2284 * @attr: device attribute structure.
2285 * @buf: Data buffer.
2286 *
2287 * This function is the read call back function for
2288 * lpfc_static_vport sysfs file. The lpfc_static_vport
2289 * sysfs file report the mageability of the vport.
2290 **/
2291static ssize_t
2292lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
2293 char *buf)
2294{
2295 struct Scsi_Host *shost = class_to_shost(dev);
2296 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2297 if (vport->vport_flag & STATIC_VPORT)
2298 sprintf(buf, "1\n");
2299 else
2300 sprintf(buf, "0\n");
2301
2302 return strlen(buf);
2303}
2304
2305/*
2306 * Sysfs attribute to control the statistical data collection.
2307 */
2308static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
2309 lpfc_static_vport_show, NULL);
2269 2310
2270/** 2311/**
2271 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file 2312 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2341 if (vports == NULL) 2382 if (vports == NULL)
2342 return -ENOMEM; 2383 return -ENOMEM;
2343 2384
2344 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2385 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2345 v_shost = lpfc_shost_from_vport(vports[i]); 2386 v_shost = lpfc_shost_from_vport(vports[i]);
2346 spin_lock_irq(v_shost->host_lock); 2387 spin_lock_irq(v_shost->host_lock);
2347 /* Block and reset data collection */ 2388 /* Block and reset data collection */
@@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2356 phba->bucket_base = base; 2397 phba->bucket_base = base;
2357 phba->bucket_step = step; 2398 phba->bucket_step = step;
2358 2399
2359 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2400 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2360 v_shost = lpfc_shost_from_vport(vports[i]); 2401 v_shost = lpfc_shost_from_vport(vports[i]);
2361 2402
2362 /* Unblock data collection */ 2403 /* Unblock data collection */
@@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
2373 if (vports == NULL) 2414 if (vports == NULL)
2374 return -ENOMEM; 2415 return -ENOMEM;
2375 2416
2376 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2377 v_shost = lpfc_shost_from_vport(vports[i]); 2418 v_shost = lpfc_shost_from_vport(vports[i]);
2378 spin_lock_irq(shost->host_lock); 2419 spin_lock_irq(shost->host_lock);
2379 vports[i]->stat_data_blocked = 1; 2420 vports[i]->stat_data_blocked = 1;
@@ -2844,15 +2885,39 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
2844/* 2885/*
2845# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 2886# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
2846# support this feature 2887# support this feature
2847# 0 = MSI disabled 2888# 0 = MSI disabled (default)
2848# 1 = MSI enabled 2889# 1 = MSI enabled
2849# 2 = MSI-X enabled (default) 2890# 2 = MSI-X enabled
2850# Value range is [0,2]. Default value is 2. 2891# Value range is [0,2]. Default value is 0.
2851*/ 2892*/
2852LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " 2893LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
2853 "MSI-X (2), if possible"); 2894 "MSI-X (2), if possible");
2854 2895
2855/* 2896/*
2897# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
2898#
2899# Value range is [636,651042]. Default value is 10000.
2900*/
2901LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
2902 "Set the maximum number of fast-path FCP interrupts per second");
2903
2904/*
2905# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
2906#
2907# Value range is [1,31]. Default value is 4.
2908*/
2909LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
2910 "Set the number of fast-path FCP work queues, if possible");
2911
2912/*
2913# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
2914#
2915# Value range is [1,7]. Default value is 1.
2916*/
2917LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
2918 "Set the number of fast-path FCP event queues, if possible");
2919
2920/*
2856# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 2921# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
2857# 0 = HBA resets disabled 2922# 0 = HBA resets disabled
2858# 1 = HBA resets enabled (default) 2923# 1 = HBA resets enabled (default)
@@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2876*/ 2941*/
2877LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 2942LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2878 2943
2944/*
2945# lpfc_enable_fip: When set, FIP is required to start discovery. If not
2946# set, the driver will add an FCF record manually if the port has no
2947# FCF records available and start discovery.
2948# Value range is [0,1]. Default value is 1 (enabled)
2949*/
2950LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
2951
2879 2952
2880/* 2953/*
2881# lpfc_prot_mask: i 2954# lpfc_prot_mask: i
@@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
2942 &dev_attr_lpfc_peer_port_login, 3015 &dev_attr_lpfc_peer_port_login,
2943 &dev_attr_lpfc_nodev_tmo, 3016 &dev_attr_lpfc_nodev_tmo,
2944 &dev_attr_lpfc_devloss_tmo, 3017 &dev_attr_lpfc_devloss_tmo,
3018 &dev_attr_lpfc_enable_fip,
2945 &dev_attr_lpfc_fcp_class, 3019 &dev_attr_lpfc_fcp_class,
2946 &dev_attr_lpfc_use_adisc, 3020 &dev_attr_lpfc_use_adisc,
2947 &dev_attr_lpfc_ack0, 3021 &dev_attr_lpfc_ack0,
@@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
2969 &dev_attr_lpfc_poll, 3043 &dev_attr_lpfc_poll,
2970 &dev_attr_lpfc_poll_tmo, 3044 &dev_attr_lpfc_poll_tmo,
2971 &dev_attr_lpfc_use_msi, 3045 &dev_attr_lpfc_use_msi,
3046 &dev_attr_lpfc_fcp_imax,
3047 &dev_attr_lpfc_fcp_wq_count,
3048 &dev_attr_lpfc_fcp_eq_count,
2972 &dev_attr_lpfc_enable_bg, 3049 &dev_attr_lpfc_enable_bg,
2973 &dev_attr_lpfc_soft_wwnn, 3050 &dev_attr_lpfc_soft_wwnn,
2974 &dev_attr_lpfc_soft_wwpn, 3051 &dev_attr_lpfc_soft_wwpn,
@@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
2991 &dev_attr_lpfc_lun_queue_depth, 3068 &dev_attr_lpfc_lun_queue_depth,
2992 &dev_attr_lpfc_nodev_tmo, 3069 &dev_attr_lpfc_nodev_tmo,
2993 &dev_attr_lpfc_devloss_tmo, 3070 &dev_attr_lpfc_devloss_tmo,
3071 &dev_attr_lpfc_enable_fip,
2994 &dev_attr_lpfc_hba_queue_depth, 3072 &dev_attr_lpfc_hba_queue_depth,
2995 &dev_attr_lpfc_peer_port_login, 3073 &dev_attr_lpfc_peer_port_login,
2996 &dev_attr_lpfc_restrict_login, 3074 &dev_attr_lpfc_restrict_login,
@@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
3003 &dev_attr_lpfc_enable_da_id, 3081 &dev_attr_lpfc_enable_da_id,
3004 &dev_attr_lpfc_max_scsicmpl_time, 3082 &dev_attr_lpfc_max_scsicmpl_time,
3005 &dev_attr_lpfc_stat_data_ctrl, 3083 &dev_attr_lpfc_stat_data_ctrl,
3084 &dev_attr_lpfc_static_vport,
3006 NULL, 3085 NULL,
3007}; 3086};
3008 3087
@@ -3199,7 +3278,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3199 } 3278 }
3200 } 3279 }
3201 3280
3202 memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off, 3281 memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
3203 buf, count); 3282 buf, count);
3204 3283
3205 phba->sysfs_mbox.offset = off + count; 3284 phba->sysfs_mbox.offset = off + count;
@@ -3241,6 +3320,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3241 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3320 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3242 struct lpfc_hba *phba = vport->phba; 3321 struct lpfc_hba *phba = vport->phba;
3243 int rc; 3322 int rc;
3323 MAILBOX_t *pmb;
3244 3324
3245 if (off > MAILBOX_CMD_SIZE) 3325 if (off > MAILBOX_CMD_SIZE)
3246 return -ERANGE; 3326 return -ERANGE;
@@ -3265,8 +3345,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3265 if (off == 0 && 3345 if (off == 0 &&
3266 phba->sysfs_mbox.state == SMBOX_WRITING && 3346 phba->sysfs_mbox.state == SMBOX_WRITING &&
3267 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { 3347 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
3268 3348 pmb = &phba->sysfs_mbox.mbox->u.mb;
3269 switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { 3349 switch (pmb->mbxCommand) {
3270 /* Offline only */ 3350 /* Offline only */
3271 case MBX_INIT_LINK: 3351 case MBX_INIT_LINK:
3272 case MBX_DOWN_LINK: 3352 case MBX_DOWN_LINK:
@@ -3283,7 +3363,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3283 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3363 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3284 printk(KERN_WARNING "mbox_read:Command 0x%x " 3364 printk(KERN_WARNING "mbox_read:Command 0x%x "
3285 "is illegal in on-line state\n", 3365 "is illegal in on-line state\n",
3286 phba->sysfs_mbox.mbox->mb.mbxCommand); 3366 pmb->mbxCommand);
3287 sysfs_mbox_idle(phba); 3367 sysfs_mbox_idle(phba);
3288 spin_unlock_irq(&phba->hbalock); 3368 spin_unlock_irq(&phba->hbalock);
3289 return -EPERM; 3369 return -EPERM;
@@ -3319,13 +3399,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3319 case MBX_CONFIG_PORT: 3399 case MBX_CONFIG_PORT:
3320 case MBX_RUN_BIU_DIAG: 3400 case MBX_RUN_BIU_DIAG:
3321 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", 3401 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
3322 phba->sysfs_mbox.mbox->mb.mbxCommand); 3402 pmb->mbxCommand);
3323 sysfs_mbox_idle(phba); 3403 sysfs_mbox_idle(phba);
3324 spin_unlock_irq(&phba->hbalock); 3404 spin_unlock_irq(&phba->hbalock);
3325 return -EPERM; 3405 return -EPERM;
3326 default: 3406 default:
3327 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", 3407 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
3328 phba->sysfs_mbox.mbox->mb.mbxCommand); 3408 pmb->mbxCommand);
3329 sysfs_mbox_idle(phba); 3409 sysfs_mbox_idle(phba);
3330 spin_unlock_irq(&phba->hbalock); 3410 spin_unlock_irq(&phba->hbalock);
3331 return -EPERM; 3411 return -EPERM;
@@ -3335,14 +3415,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3335 * or RESTART mailbox commands until the HBA is restarted. 3415 * or RESTART mailbox commands until the HBA is restarted.
3336 */ 3416 */
3337 if (phba->pport->stopped && 3417 if (phba->pport->stopped &&
3338 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY && 3418 pmb->mbxCommand != MBX_DUMP_MEMORY &&
3339 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART && 3419 pmb->mbxCommand != MBX_RESTART &&
3340 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS && 3420 pmb->mbxCommand != MBX_WRITE_VPARMS &&
3341 phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN) 3421 pmb->mbxCommand != MBX_WRITE_WWN)
3342 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 3422 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
3343 "1259 mbox: Issued mailbox cmd " 3423 "1259 mbox: Issued mailbox cmd "
3344 "0x%x while in stopped state.\n", 3424 "0x%x while in stopped state.\n",
3345 phba->sysfs_mbox.mbox->mb.mbxCommand); 3425 pmb->mbxCommand);
3346 3426
3347 phba->sysfs_mbox.mbox->vport = vport; 3427 phba->sysfs_mbox.mbox->vport = vport;
3348 3428
@@ -3356,7 +3436,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3356 } 3436 }
3357 3437
3358 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3438 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3359 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ 3439 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
3360 3440
3361 spin_unlock_irq(&phba->hbalock); 3441 spin_unlock_irq(&phba->hbalock);
3362 rc = lpfc_sli_issue_mbox (phba, 3442 rc = lpfc_sli_issue_mbox (phba,
@@ -3368,8 +3448,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3368 spin_unlock_irq(&phba->hbalock); 3448 spin_unlock_irq(&phba->hbalock);
3369 rc = lpfc_sli_issue_mbox_wait (phba, 3449 rc = lpfc_sli_issue_mbox_wait (phba,
3370 phba->sysfs_mbox.mbox, 3450 phba->sysfs_mbox.mbox,
3371 lpfc_mbox_tmo_val(phba, 3451 lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
3372 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
3373 spin_lock_irq(&phba->hbalock); 3452 spin_lock_irq(&phba->hbalock);
3374 } 3453 }
3375 3454
@@ -3391,7 +3470,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3391 return -EAGAIN; 3470 return -EAGAIN;
3392 } 3471 }
3393 3472
3394 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); 3473 memcpy(buf, (uint8_t *) &pmb + off, count);
3395 3474
3396 phba->sysfs_mbox.offset = off + count; 3475 phba->sysfs_mbox.offset = off + count;
3397 3476
@@ -3585,6 +3664,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
3585 case LA_8GHZ_LINK: 3664 case LA_8GHZ_LINK:
3586 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 3665 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
3587 break; 3666 break;
3667 case LA_10GHZ_LINK:
3668 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
3669 break;
3588 default: 3670 default:
3589 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 3671 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
3590 break; 3672 break;
@@ -3652,7 +3734,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3652 */ 3734 */
3653 if (phba->link_state < LPFC_LINK_DOWN || 3735 if (phba->link_state < LPFC_LINK_DOWN ||
3654 !phba->mbox_mem_pool || 3736 !phba->mbox_mem_pool ||
3655 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 3737 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
3656 return NULL; 3738 return NULL;
3657 3739
3658 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 3740 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -3663,14 +3745,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
3663 return NULL; 3745 return NULL;
3664 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 3746 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
3665 3747
3666 pmb = &pmboxq->mb; 3748 pmb = &pmboxq->u.mb;
3667 pmb->mbxCommand = MBX_READ_STATUS; 3749 pmb->mbxCommand = MBX_READ_STATUS;
3668 pmb->mbxOwner = OWN_HOST; 3750 pmb->mbxOwner = OWN_HOST;
3669 pmboxq->context1 = NULL; 3751 pmboxq->context1 = NULL;
3670 pmboxq->vport = vport; 3752 pmboxq->vport = vport;
3671 3753
3672 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3754 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3673 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3755 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3674 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3756 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3675 else 3757 else
3676 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3758 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3695,7 +3777,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
3695 pmboxq->vport = vport; 3777 pmboxq->vport = vport;
3696 3778
3697 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3779 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3698 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3780 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3699 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3700 else 3782 else
3701 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3769,7 +3851,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3769 return; 3851 return;
3770 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3852 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3771 3853
3772 pmb = &pmboxq->mb; 3854 pmb = &pmboxq->u.mb;
3773 pmb->mbxCommand = MBX_READ_STATUS; 3855 pmb->mbxCommand = MBX_READ_STATUS;
3774 pmb->mbxOwner = OWN_HOST; 3856 pmb->mbxOwner = OWN_HOST;
3775 pmb->un.varWords[0] = 0x1; /* reset request */ 3857 pmb->un.varWords[0] = 0x1; /* reset request */
@@ -3777,7 +3859,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3777 pmboxq->vport = vport; 3859 pmboxq->vport = vport;
3778 3860
3779 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3861 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3780 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3862 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3781 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3863 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3782 else 3864 else
3783 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3865 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3795,7 +3877,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3795 pmboxq->vport = vport; 3877 pmboxq->vport = vport;
3796 3878
3797 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3879 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
3798 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 3880 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
3799 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3881 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
3800 else 3882 else
3801 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 3883 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3962,6 +4044,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
3962 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 4044 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3963} 4045}
3964 4046
4047/**
4048 * lpfc_hba_log_verbose_init - Set hba's log verbose level
4049 * @phba: Pointer to lpfc_hba struct.
4050 *
4051 * This function is called by the lpfc_get_cfgparam() routine to set the
4052 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
4053 * log messsage according to the module's lpfc_log_verbose parameter setting
4054 * before hba port or vport created.
4055 **/
4056static void
4057lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
4058{
4059 phba->cfg_log_verbose = verbose;
4060}
4061
3965struct fc_function_template lpfc_transport_functions = { 4062struct fc_function_template lpfc_transport_functions = {
3966 /* fixed attributes the driver supports */ 4063 /* fixed attributes the driver supports */
3967 .show_host_node_name = 1, 4064 .show_host_node_name = 1,
@@ -4105,6 +4202,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4105 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 4202 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
4106 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 4203 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
4107 lpfc_use_msi_init(phba, lpfc_use_msi); 4204 lpfc_use_msi_init(phba, lpfc_use_msi);
4205 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
4206 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
4207 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
4108 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4208 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4109 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4209 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4110 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4210 lpfc_enable_bg_init(phba, lpfc_enable_bg);
@@ -4113,26 +4213,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4113 phba->cfg_soft_wwpn = 0L; 4213 phba->cfg_soft_wwpn = 0L;
4114 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4214 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
4115 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); 4215 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
4116 /*
4117 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4118 * used to create the sg_dma_buf_pool must be dynamically calculated.
4119 * 2 segments are added since the IOCB needs a command and response bde.
4120 */
4121 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4122 sizeof(struct fcp_rsp) +
4123 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4124
4125 if (phba->cfg_enable_bg) {
4126 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4127 phba->cfg_sg_dma_buf_size +=
4128 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4129 }
4130
4131 /* Also reinitialize the host templates with new values. */
4132 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4133 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4134
4135 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4216 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4217 lpfc_enable_fip_init(phba, lpfc_enable_fip);
4218 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4219
4136 return; 4220 return;
4137} 4221}
4138 4222
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f88ce3f2619..d2a922997c0 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); 25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
27int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
26void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
27void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 29void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
28 30
@@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
35int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); 37int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
36void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 38void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
37void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 39void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
38int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, 40int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
39 LPFC_MBOXQ_t *, uint32_t); 41 LPFC_MBOXQ_t *, uint32_t);
40void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 42void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
41void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 43void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
42void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); 44void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
43void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); 45void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
44void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 46void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
47void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
45 48
46struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 49struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
47void lpfc_cleanup_rpis(struct lpfc_vport *, int); 50void lpfc_cleanup_rpis(struct lpfc_vport *, int);
48int lpfc_linkdown(struct lpfc_hba *); 51int lpfc_linkdown(struct lpfc_hba *);
52void lpfc_linkdown_port(struct lpfc_vport *);
49void lpfc_port_link_failure(struct lpfc_vport *); 53void lpfc_port_link_failure(struct lpfc_vport *);
50void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 54void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
51 55
@@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
54void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 58void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
55void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 59void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
56void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 60void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
61void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
57void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); 62void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
58void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 63void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
59struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, 64struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
105int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 110int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
106int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); 111int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
107int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); 112int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
113int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
108int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 114int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
109int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 115int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
110int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, 116int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *);
149void lpfc_unblock_mgmt_io(struct lpfc_hba *); 155void lpfc_unblock_mgmt_io(struct lpfc_hba *);
150void lpfc_offline_prep(struct lpfc_hba *); 156void lpfc_offline_prep(struct lpfc_hba *);
151void lpfc_offline(struct lpfc_hba *); 157void lpfc_offline(struct lpfc_hba *);
158void lpfc_reset_hba(struct lpfc_hba *);
152 159
153int lpfc_sli_setup(struct lpfc_hba *); 160int lpfc_sli_setup(struct lpfc_hba *);
154int lpfc_sli_queue_setup(struct lpfc_hba *); 161int lpfc_sli_queue_setup(struct lpfc_hba *);
155 162
156void lpfc_handle_eratt(struct lpfc_hba *); 163void lpfc_handle_eratt(struct lpfc_hba *);
157void lpfc_handle_latt(struct lpfc_hba *); 164void lpfc_handle_latt(struct lpfc_hba *);
158irqreturn_t lpfc_intr_handler(int, void *); 165irqreturn_t lpfc_sli_intr_handler(int, void *);
159irqreturn_t lpfc_sp_intr_handler(int, void *); 166irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
160irqreturn_t lpfc_fp_intr_handler(int, void *); 167irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
168irqreturn_t lpfc_sli4_intr_handler(int, void *);
169irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
170irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
161 171
162void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 172void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
163void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); 173void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
165void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 175void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
166void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 176void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
167LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 177LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
178void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
168void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 179void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
180int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
181int lpfc_mbox_dev_check(struct lpfc_hba *);
169int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 182int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
183void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
184void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
185void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
186void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
187void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
188void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
189void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
170 190
171void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 191void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
172 uint32_t , LPFC_MBOXQ_t *); 192 uint32_t , LPFC_MBOXQ_t *);
173struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); 193struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
174void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); 194void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
195struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
196void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
197void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
198 uint16_t);
199void lpfc_unregister_unused_fcf(struct lpfc_hba *);
175 200
176int lpfc_mem_alloc(struct lpfc_hba *); 201int lpfc_mem_alloc(struct lpfc_hba *, int align);
177void lpfc_mem_free(struct lpfc_hba *); 202void lpfc_mem_free(struct lpfc_hba *);
203void lpfc_mem_free_all(struct lpfc_hba *);
178void lpfc_stop_vport_timers(struct lpfc_vport *); 204void lpfc_stop_vport_timers(struct lpfc_vport *);
179 205
180void lpfc_poll_timeout(unsigned long ptr); 206void lpfc_poll_timeout(unsigned long ptr);
@@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
186uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 212uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
187void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, 213void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
188 uint32_t); 214 uint32_t);
215void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
189 216
190void lpfc_reset_barrier(struct lpfc_hba * phba); 217void lpfc_reset_barrier(struct lpfc_hba * phba);
191int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); 218int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
198int lpfc_sli_hba_down(struct lpfc_hba *); 225int lpfc_sli_hba_down(struct lpfc_hba *);
199int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 226int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
200int lpfc_sli_handle_mb_event(struct lpfc_hba *); 227int lpfc_sli_handle_mb_event(struct lpfc_hba *);
201int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); 228void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
202int lpfc_sli_check_eratt(struct lpfc_hba *); 229int lpfc_sli_check_eratt(struct lpfc_hba *);
203int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 230void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
204 struct lpfc_sli_ring *, uint32_t); 231 struct lpfc_sli_ring *, uint32_t);
232int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
205void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 233void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
206int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, 234int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
207 struct lpfc_iocbq *, uint32_t); 235 struct lpfc_iocbq *, uint32_t);
208void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 236void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
209void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 237void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
237 265
238int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 266int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
239 267
240int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, 268int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
241 struct lpfc_iocbq *, struct lpfc_iocbq *, 269 struct lpfc_iocbq *, struct lpfc_iocbq *,
242 uint32_t); 270 uint32_t);
243void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, 271void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
254const char* lpfc_info(struct Scsi_Host *); 282const char* lpfc_info(struct Scsi_Host *);
255int lpfc_scan_finished(struct Scsi_Host *, unsigned long); 283int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
256 284
285int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
286int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
287int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
288int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
289int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
290
257void lpfc_get_cfgparam(struct lpfc_hba *); 291void lpfc_get_cfgparam(struct lpfc_hba *);
258void lpfc_get_vport_cfgparam(struct lpfc_vport *); 292void lpfc_get_vport_cfgparam(struct lpfc_vport *);
259int lpfc_alloc_sysfs_attr(struct lpfc_vport *); 293int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
314 struct lpfc_iocbq *); 348 struct lpfc_iocbq *);
315struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); 349struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
316void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); 350void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
351void lpfc_create_static_vport(struct lpfc_hba *);
352void lpfc_stop_hba_timers(struct lpfc_hba *);
353void lpfc_stop_port(struct lpfc_hba *);
354void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
355int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
356void lpfc_start_fdiscs(struct lpfc_hba *phba);
317 357
318#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 358#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
319#define HBA_EVENT_RSCN 5 359#define HBA_EVENT_RSCN 5
320#define HBA_EVENT_LINK_UP 2 360#define HBA_EVENT_LINK_UP 2
321#define HBA_EVENT_LINK_DOWN 3 361#define HBA_EVENT_LINK_DOWN 3
362
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 896c7b0351e..1dbccfd3d02 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -32,8 +32,10 @@
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34 34
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
267 uint32_t tmo, uint8_t retry) 269 uint32_t tmo, uint8_t retry)
268{ 270{
269 struct lpfc_hba *phba = vport->phba; 271 struct lpfc_hba *phba = vport->phba;
270 struct lpfc_sli *psli = &phba->sli;
271 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
272 IOCB_t *icmd; 272 IOCB_t *icmd;
273 struct lpfc_iocbq *geniocb; 273 struct lpfc_iocbq *geniocb;
274 int rc; 274 int rc;
@@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; 331 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
332 geniocb->vport = vport; 332 geniocb->vport = vport;
333 geniocb->retry = retry; 333 geniocb->retry = retry;
334 rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); 334 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
335 335
336 if (rc == IOCB_ERROR) { 336 if (rc == IOCB_ERROR) {
337 lpfc_sli_release_iocbq(phba, geniocb); 337 lpfc_sli_release_iocbq(phba, geniocb);
@@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1578 case LA_8GHZ_LINK: 1578 case LA_8GHZ_LINK:
1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; 1579 ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
1580 break; 1580 break;
1581 case LA_10GHZ_LINK:
1582 ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
1583 break;
1581 default: 1584 default:
1582 ae->un.PortSpeed = 1585 ae->un.PortSpeed =
1583 HBA_PORTSPEED_UNKNOWN; 1586 HBA_PORTSPEED_UNKNOWN;
@@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1730 uint8_t *fwname; 1733 uint8_t *fwname;
1731 1734
1732 if (vp->rev.rBit) { 1735 if (vp->rev.rBit) {
1733 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1736 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1734 rev = vp->rev.sli2FwRev; 1737 rev = vp->rev.sli2FwRev;
1735 else 1738 else
1736 rev = vp->rev.sli1FwRev; 1739 rev = vp->rev.sli1FwRev;
@@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1756 } 1759 }
1757 b4 = (rev & 0x0000000f); 1760 b4 = (rev & 0x0000000f);
1758 1761
1759 if (psli->sli_flag & LPFC_SLI2_ACTIVE) 1762 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1760 fwname = vp->rev.sli2FwName; 1763 fwname = vp->rev.sli2FwName;
1761 else 1764 else
1762 fwname = vp->rev.sli1FwName; 1765 fwname = vp->rev.sli1FwName;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 52be5644e07..2b02b1fb39a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2007-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -33,8 +33,10 @@
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35 35
36#include "lpfc_hw4.h"
36#include "lpfc_hw.h" 37#include "lpfc_hw.h"
37#include "lpfc_sli.h" 38#include "lpfc_sli.h"
39#include "lpfc_sli4.h"
38#include "lpfc_nl.h" 40#include "lpfc_nl.h"
39#include "lpfc_disc.h" 41#include "lpfc_disc.h"
40#include "lpfc_scsi.h" 42#include "lpfc_scsi.h"
@@ -280,6 +282,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
280 struct lpfc_dmabuf *d_buf; 282 struct lpfc_dmabuf *d_buf;
281 struct hbq_dmabuf *hbq_buf; 283 struct hbq_dmabuf *hbq_buf;
282 284
285 if (phba->sli_rev != 3)
286 return 0;
283 cnt = LPFC_HBQINFO_SIZE; 287 cnt = LPFC_HBQINFO_SIZE;
284 spin_lock_irq(&phba->hbalock); 288 spin_lock_irq(&phba->hbalock);
285 289
@@ -489,12 +493,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
489 pring->next_cmdidx, pring->local_getidx, 493 pring->next_cmdidx, pring->local_getidx,
490 pring->flag, pgpp->rspPutInx, pring->numRiocb); 494 pring->flag, pgpp->rspPutInx, pring->numRiocb);
491 } 495 }
492 word0 = readl(phba->HAregaddr); 496
493 word1 = readl(phba->CAregaddr); 497 if (phba->sli_rev <= LPFC_SLI_REV3) {
494 word2 = readl(phba->HSregaddr); 498 word0 = readl(phba->HAregaddr);
495 word3 = readl(phba->HCregaddr); 499 word1 = readl(phba->CAregaddr);
496 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n", 500 word2 = readl(phba->HSregaddr);
497 word0, word1, word2, word3); 501 word3 = readl(phba->HCregaddr);
502 len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
503 "HC:%08x\n", word0, word1, word2, word3);
504 }
498 spin_unlock_irq(&phba->hbalock); 505 spin_unlock_irq(&phba->hbalock);
499 return len; 506 return len;
500} 507}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index ffd10897207..1142070e948 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -135,6 +135,7 @@ struct lpfc_nodelist {
135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ 135#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 136#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ 137#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
138#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */
138 139
139/* ndlp usage management macros */ 140/* ndlp usage management macros */
140#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ 141#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b8b34cf5c3d..6bdeb14878a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
84 uint32_t ha_copy; 86 uint32_t ha_copy;
85 87
86 if (vport->port_state >= LPFC_VPORT_READY || 88 if (vport->port_state >= LPFC_VPORT_READY ||
87 phba->link_state == LPFC_LINK_DOWN) 89 phba->link_state == LPFC_LINK_DOWN ||
90 phba->sli_rev > LPFC_SLI_REV3)
88 return 0; 91 return 0;
89 92
90 /* Read the HBA Host Attention Register */ 93 /* Read the HBA Host Attention Register */
@@ -219,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
219 icmd->un.elsreq64.myID = vport->fc_myDID; 222 icmd->un.elsreq64.myID = vport->fc_myDID;
220 223
221 /* For ELS_REQUEST64_CR, use the VPI by default */ 224 /* For ELS_REQUEST64_CR, use the VPI by default */
222 icmd->ulpContext = vport->vpi; 225 icmd->ulpContext = vport->vpi + phba->vpi_base;
223 icmd->ulpCt_h = 0; 226 icmd->ulpCt_h = 0;
224 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 227 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
225 if (elscmd == ELS_CMD_ECHO) 228 if (elscmd == ELS_CMD_ECHO)
@@ -305,7 +308,7 @@ els_iocb_free_pcmb_exit:
305 * 0 - successfully issued fabric registration login for @vport 308 * 0 - successfully issued fabric registration login for @vport
306 * -ENXIO -- failed to issue fabric registration login for @vport 309 * -ENXIO -- failed to issue fabric registration login for @vport
307 **/ 310 **/
308static int 311int
309lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 312lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
310{ 313{
311 struct lpfc_hba *phba = vport->phba; 314 struct lpfc_hba *phba = vport->phba;
@@ -345,8 +348,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
345 err = 4; 348 err = 4;
346 goto fail; 349 goto fail;
347 } 350 }
348 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 351 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
349 0);
350 if (rc) { 352 if (rc) {
351 err = 5; 353 err = 5;
352 goto fail_free_mbox; 354 goto fail_free_mbox;
@@ -386,6 +388,75 @@ fail:
386} 388}
387 389
388/** 390/**
391 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
392 * @vport: pointer to a host virtual N_Port data structure.
393 *
394 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
395 * the @vport. This mailbox command is necessary for FCoE only.
396 *
397 * Return code
398 * 0 - successfully issued REG_VFI for @vport
399 * A failure code otherwise.
400 **/
401static int
402lpfc_issue_reg_vfi(struct lpfc_vport *vport)
403{
404 struct lpfc_hba *phba = vport->phba;
405 LPFC_MBOXQ_t *mboxq;
406 struct lpfc_nodelist *ndlp;
407 struct serv_parm *sp;
408 struct lpfc_dmabuf *dmabuf;
409 int rc = 0;
410
411 sp = &phba->fc_fabparam;
412 ndlp = lpfc_findnode_did(vport, Fabric_DID);
413 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
414 rc = -ENODEV;
415 goto fail;
416 }
417
418 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
419 if (!dmabuf) {
420 rc = -ENOMEM;
421 goto fail;
422 }
423 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
424 if (!dmabuf->virt) {
425 rc = -ENOMEM;
426 goto fail_free_dmabuf;
427 }
428 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
429 if (!mboxq) {
430 rc = -ENOMEM;
431 goto fail_free_coherent;
432 }
433 vport->port_state = LPFC_FABRIC_CFG_LINK;
434 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
435 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
436 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
437 mboxq->vport = vport;
438 mboxq->context1 = dmabuf;
439 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
440 if (rc == MBX_NOT_FINISHED) {
441 rc = -ENXIO;
442 goto fail_free_mbox;
443 }
444 return 0;
445
446fail_free_mbox:
447 mempool_free(mboxq, phba->mbox_mem_pool);
448fail_free_coherent:
449 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
450fail_free_dmabuf:
451 kfree(dmabuf);
452fail:
453 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
454 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
455 "0289 Issue Register VFI failed: Err %d\n", rc);
456 return rc;
457}
458
459/**
389 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 460 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
390 * @vport: pointer to a host virtual N_Port data structure. 461 * @vport: pointer to a host virtual N_Port data structure.
391 * @ndlp: pointer to a node-list data structure. 462 * @ndlp: pointer to a node-list data structure.
@@ -497,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
497 } 568 }
498 } 569 }
499 570
500 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 571 if (phba->sli_rev < LPFC_SLI_REV4) {
501 572 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
502 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 573 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
503 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { 574 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
504 lpfc_register_new_vport(phba, vport, ndlp); 575 lpfc_register_new_vport(phba, vport, ndlp);
505 return 0; 576 else
577 lpfc_issue_fabric_reglogin(vport);
578 } else {
579 ndlp->nlp_type |= NLP_FABRIC;
580 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
581 if (vport->vfi_state & LPFC_VFI_REGISTERED) {
582 lpfc_start_fdiscs(phba);
583 lpfc_do_scr_ns_plogi(phba, vport);
584 } else
585 lpfc_issue_reg_vfi(vport);
506 } 586 }
507 lpfc_issue_fabric_reglogin(vport);
508 return 0; 587 return 0;
509} 588}
510
511/** 589/**
512 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 590 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
513 * @vport: pointer to a host virtual N_Port data structure. 591 * @vport: pointer to a host virtual N_Port data structure.
@@ -815,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
815 if (sp->cmn.fcphHigh < FC_PH3) 893 if (sp->cmn.fcphHigh < FC_PH3)
816 sp->cmn.fcphHigh = FC_PH3; 894 sp->cmn.fcphHigh = FC_PH3;
817 895
818 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 896 if (phba->sli_rev == LPFC_SLI_REV4) {
897 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
898 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
899 /* FLOGI needs to be 3 for WQE FCFI */
900 /* Set the fcfi to the fcfi we registered with */
901 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
902 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
819 sp->cmn.request_multiple_Nport = 1; 903 sp->cmn.request_multiple_Nport = 1;
820
821 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 904 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
822 icmd->ulpCt_h = 1; 905 icmd->ulpCt_h = 1;
823 icmd->ulpCt_l = 0; 906 icmd->ulpCt_l = 0;
@@ -930,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
930 if (!ndlp) 1013 if (!ndlp)
931 return 0; 1014 return 0;
932 lpfc_nlp_init(vport, ndlp, Fabric_DID); 1015 lpfc_nlp_init(vport, ndlp, Fabric_DID);
1016 /* Set the node type */
1017 ndlp->nlp_type |= NLP_FABRIC;
933 /* Put ndlp onto node list */ 1018 /* Put ndlp onto node list */
934 lpfc_enqueue_node(vport, ndlp); 1019 lpfc_enqueue_node(vport, ndlp);
935 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1020 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1350,14 +1435,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1350 IOCB_t *icmd; 1435 IOCB_t *icmd;
1351 struct lpfc_nodelist *ndlp; 1436 struct lpfc_nodelist *ndlp;
1352 struct lpfc_iocbq *elsiocb; 1437 struct lpfc_iocbq *elsiocb;
1353 struct lpfc_sli_ring *pring;
1354 struct lpfc_sli *psli; 1438 struct lpfc_sli *psli;
1355 uint8_t *pcmd; 1439 uint8_t *pcmd;
1356 uint16_t cmdsize; 1440 uint16_t cmdsize;
1357 int ret; 1441 int ret;
1358 1442
1359 psli = &phba->sli; 1443 psli = &phba->sli;
1360 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1361 1444
1362 ndlp = lpfc_findnode_did(vport, did); 1445 ndlp = lpfc_findnode_did(vport, did);
1363 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 1446 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@@ -1391,7 +1474,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1391 1474
1392 phba->fc_stat.elsXmitPLOGI++; 1475 phba->fc_stat.elsXmitPLOGI++;
1393 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 1476 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1394 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 1477 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1395 1478
1396 if (ret == IOCB_ERROR) { 1479 if (ret == IOCB_ERROR) {
1397 lpfc_els_free_iocb(phba, elsiocb); 1480 lpfc_els_free_iocb(phba, elsiocb);
@@ -1501,14 +1584,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1501 PRLI *npr; 1584 PRLI *npr;
1502 IOCB_t *icmd; 1585 IOCB_t *icmd;
1503 struct lpfc_iocbq *elsiocb; 1586 struct lpfc_iocbq *elsiocb;
1504 struct lpfc_sli_ring *pring;
1505 struct lpfc_sli *psli;
1506 uint8_t *pcmd; 1587 uint8_t *pcmd;
1507 uint16_t cmdsize; 1588 uint16_t cmdsize;
1508 1589
1509 psli = &phba->sli;
1510 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1511
1512 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 1590 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1513 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1591 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1514 ndlp->nlp_DID, ELS_CMD_PRLI); 1592 ndlp->nlp_DID, ELS_CMD_PRLI);
@@ -1550,7 +1628,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1550 spin_lock_irq(shost->host_lock); 1628 spin_lock_irq(shost->host_lock);
1551 ndlp->nlp_flag |= NLP_PRLI_SND; 1629 ndlp->nlp_flag |= NLP_PRLI_SND;
1552 spin_unlock_irq(shost->host_lock); 1630 spin_unlock_irq(shost->host_lock);
1553 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1631 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1632 IOCB_ERROR) {
1554 spin_lock_irq(shost->host_lock); 1633 spin_lock_irq(shost->host_lock);
1555 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1634 ndlp->nlp_flag &= ~NLP_PRLI_SND;
1556 spin_unlock_irq(shost->host_lock); 1635 spin_unlock_irq(shost->host_lock);
@@ -1608,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
1608 * and continue discovery. 1687 * and continue discovery.
1609 */ 1688 */
1610 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1689 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1611 !(vport->fc_flag & FC_RSCN_MODE)) { 1690 !(vport->fc_flag & FC_RSCN_MODE) &&
1691 (phba->sli_rev < LPFC_SLI_REV4)) {
1612 lpfc_issue_reg_vpi(phba, vport); 1692 lpfc_issue_reg_vpi(phba, vport);
1613 return; 1693 return;
1614 } 1694 }
@@ -1788,8 +1868,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1788 ADISC *ap; 1868 ADISC *ap;
1789 IOCB_t *icmd; 1869 IOCB_t *icmd;
1790 struct lpfc_iocbq *elsiocb; 1870 struct lpfc_iocbq *elsiocb;
1791 struct lpfc_sli *psli = &phba->sli;
1792 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1793 uint8_t *pcmd; 1871 uint8_t *pcmd;
1794 uint16_t cmdsize; 1872 uint16_t cmdsize;
1795 1873
@@ -1822,7 +1900,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1822 spin_lock_irq(shost->host_lock); 1900 spin_lock_irq(shost->host_lock);
1823 ndlp->nlp_flag |= NLP_ADISC_SND; 1901 ndlp->nlp_flag |= NLP_ADISC_SND;
1824 spin_unlock_irq(shost->host_lock); 1902 spin_unlock_irq(shost->host_lock);
1825 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1903 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1904 IOCB_ERROR) {
1826 spin_lock_irq(shost->host_lock); 1905 spin_lock_irq(shost->host_lock);
1827 ndlp->nlp_flag &= ~NLP_ADISC_SND; 1906 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1828 spin_unlock_irq(shost->host_lock); 1907 spin_unlock_irq(shost->host_lock);
@@ -1937,15 +2016,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1937 struct lpfc_hba *phba = vport->phba; 2016 struct lpfc_hba *phba = vport->phba;
1938 IOCB_t *icmd; 2017 IOCB_t *icmd;
1939 struct lpfc_iocbq *elsiocb; 2018 struct lpfc_iocbq *elsiocb;
1940 struct lpfc_sli_ring *pring;
1941 struct lpfc_sli *psli;
1942 uint8_t *pcmd; 2019 uint8_t *pcmd;
1943 uint16_t cmdsize; 2020 uint16_t cmdsize;
1944 int rc; 2021 int rc;
1945 2022
1946 psli = &phba->sli;
1947 pring = &psli->ring[LPFC_ELS_RING];
1948
1949 spin_lock_irq(shost->host_lock); 2023 spin_lock_irq(shost->host_lock);
1950 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2024 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1951 spin_unlock_irq(shost->host_lock); 2025 spin_unlock_irq(shost->host_lock);
@@ -1978,7 +2052,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1978 spin_lock_irq(shost->host_lock); 2052 spin_lock_irq(shost->host_lock);
1979 ndlp->nlp_flag |= NLP_LOGO_SND; 2053 ndlp->nlp_flag |= NLP_LOGO_SND;
1980 spin_unlock_irq(shost->host_lock); 2054 spin_unlock_irq(shost->host_lock);
1981 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2055 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1982 2056
1983 if (rc == IOCB_ERROR) { 2057 if (rc == IOCB_ERROR) {
1984 spin_lock_irq(shost->host_lock); 2058 spin_lock_irq(shost->host_lock);
@@ -2058,14 +2132,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2058 struct lpfc_hba *phba = vport->phba; 2132 struct lpfc_hba *phba = vport->phba;
2059 IOCB_t *icmd; 2133 IOCB_t *icmd;
2060 struct lpfc_iocbq *elsiocb; 2134 struct lpfc_iocbq *elsiocb;
2061 struct lpfc_sli_ring *pring;
2062 struct lpfc_sli *psli; 2135 struct lpfc_sli *psli;
2063 uint8_t *pcmd; 2136 uint8_t *pcmd;
2064 uint16_t cmdsize; 2137 uint16_t cmdsize;
2065 struct lpfc_nodelist *ndlp; 2138 struct lpfc_nodelist *ndlp;
2066 2139
2067 psli = &phba->sli; 2140 psli = &phba->sli;
2068 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2069 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 2141 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2070 2142
2071 ndlp = lpfc_findnode_did(vport, nportid); 2143 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2108,7 +2180,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2108 2180
2109 phba->fc_stat.elsXmitSCR++; 2181 phba->fc_stat.elsXmitSCR++;
2110 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2182 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2111 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2183 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2184 IOCB_ERROR) {
2112 /* The additional lpfc_nlp_put will cause the following 2185 /* The additional lpfc_nlp_put will cause the following
2113 * lpfc_els_free_iocb routine to trigger the rlease of 2186 * lpfc_els_free_iocb routine to trigger the rlease of
2114 * the node. 2187 * the node.
@@ -2152,7 +2225,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2152 struct lpfc_hba *phba = vport->phba; 2225 struct lpfc_hba *phba = vport->phba;
2153 IOCB_t *icmd; 2226 IOCB_t *icmd;
2154 struct lpfc_iocbq *elsiocb; 2227 struct lpfc_iocbq *elsiocb;
2155 struct lpfc_sli_ring *pring;
2156 struct lpfc_sli *psli; 2228 struct lpfc_sli *psli;
2157 FARP *fp; 2229 FARP *fp;
2158 uint8_t *pcmd; 2230 uint8_t *pcmd;
@@ -2162,7 +2234,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2162 struct lpfc_nodelist *ndlp; 2234 struct lpfc_nodelist *ndlp;
2163 2235
2164 psli = &phba->sli; 2236 psli = &phba->sli;
2165 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2166 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 2237 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2167 2238
2168 ndlp = lpfc_findnode_did(vport, nportid); 2239 ndlp = lpfc_findnode_did(vport, nportid);
@@ -2219,7 +2290,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2219 2290
2220 phba->fc_stat.elsXmitFARPR++; 2291 phba->fc_stat.elsXmitFARPR++;
2221 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 2292 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2222 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 2293 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2294 IOCB_ERROR) {
2223 /* The additional lpfc_nlp_put will cause the following 2295 /* The additional lpfc_nlp_put will cause the following
2224 * lpfc_els_free_iocb routine to trigger the release of 2296 * lpfc_els_free_iocb routine to trigger the release of
2225 * the node. 2297 * the node.
@@ -2949,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2949 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3021 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2950 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3022 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2951 3023
3024 /*
3025 * This routine is used to register and unregister in previous SLI
3026 * modes.
3027 */
3028 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3029 (phba->sli_rev == LPFC_SLI_REV4))
3030 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3031
2952 pmb->context1 = NULL; 3032 pmb->context1 = NULL;
2953 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3033 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2954 kfree(mp); 3034 kfree(mp);
@@ -2961,6 +3041,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2961 */ 3041 */
2962 lpfc_nlp_not_used(ndlp); 3042 lpfc_nlp_not_used(ndlp);
2963 } 3043 }
3044
2964 return; 3045 return;
2965} 3046}
2966 3047
@@ -3170,7 +3251,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3170 IOCB_t *icmd; 3251 IOCB_t *icmd;
3171 IOCB_t *oldcmd; 3252 IOCB_t *oldcmd;
3172 struct lpfc_iocbq *elsiocb; 3253 struct lpfc_iocbq *elsiocb;
3173 struct lpfc_sli_ring *pring;
3174 struct lpfc_sli *psli; 3254 struct lpfc_sli *psli;
3175 uint8_t *pcmd; 3255 uint8_t *pcmd;
3176 uint16_t cmdsize; 3256 uint16_t cmdsize;
@@ -3178,7 +3258,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3178 ELS_PKT *els_pkt_ptr; 3258 ELS_PKT *els_pkt_ptr;
3179 3259
3180 psli = &phba->sli; 3260 psli = &phba->sli;
3181 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3182 oldcmd = &oldiocb->iocb; 3261 oldcmd = &oldiocb->iocb;
3183 3262
3184 switch (flag) { 3263 switch (flag) {
@@ -3266,7 +3345,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3266 } 3345 }
3267 3346
3268 phba->fc_stat.elsXmitACC++; 3347 phba->fc_stat.elsXmitACC++;
3269 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3348 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3270 if (rc == IOCB_ERROR) { 3349 if (rc == IOCB_ERROR) {
3271 lpfc_els_free_iocb(phba, elsiocb); 3350 lpfc_els_free_iocb(phba, elsiocb);
3272 return 1; 3351 return 1;
@@ -3305,15 +3384,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3305 IOCB_t *icmd; 3384 IOCB_t *icmd;
3306 IOCB_t *oldcmd; 3385 IOCB_t *oldcmd;
3307 struct lpfc_iocbq *elsiocb; 3386 struct lpfc_iocbq *elsiocb;
3308 struct lpfc_sli_ring *pring;
3309 struct lpfc_sli *psli; 3387 struct lpfc_sli *psli;
3310 uint8_t *pcmd; 3388 uint8_t *pcmd;
3311 uint16_t cmdsize; 3389 uint16_t cmdsize;
3312 int rc; 3390 int rc;
3313 3391
3314 psli = &phba->sli; 3392 psli = &phba->sli;
3315 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3316
3317 cmdsize = 2 * sizeof(uint32_t); 3393 cmdsize = 2 * sizeof(uint32_t);
3318 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3394 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3319 ndlp->nlp_DID, ELS_CMD_LS_RJT); 3395 ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -3346,7 +3422,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3346 3422
3347 phba->fc_stat.elsXmitLSRJT++; 3423 phba->fc_stat.elsXmitLSRJT++;
3348 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3349 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3425 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3350 3426
3351 if (rc == IOCB_ERROR) { 3427 if (rc == IOCB_ERROR) {
3352 lpfc_els_free_iocb(phba, elsiocb); 3428 lpfc_els_free_iocb(phba, elsiocb);
@@ -3379,8 +3455,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3379 struct lpfc_nodelist *ndlp) 3455 struct lpfc_nodelist *ndlp)
3380{ 3456{
3381 struct lpfc_hba *phba = vport->phba; 3457 struct lpfc_hba *phba = vport->phba;
3382 struct lpfc_sli *psli = &phba->sli;
3383 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
3384 ADISC *ap; 3458 ADISC *ap;
3385 IOCB_t *icmd, *oldcmd; 3459 IOCB_t *icmd, *oldcmd;
3386 struct lpfc_iocbq *elsiocb; 3460 struct lpfc_iocbq *elsiocb;
@@ -3422,7 +3496,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3422 3496
3423 phba->fc_stat.elsXmitACC++; 3497 phba->fc_stat.elsXmitACC++;
3424 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3498 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3425 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3499 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3426 if (rc == IOCB_ERROR) { 3500 if (rc == IOCB_ERROR) {
3427 lpfc_els_free_iocb(phba, elsiocb); 3501 lpfc_els_free_iocb(phba, elsiocb);
3428 return 1; 3502 return 1;
@@ -3459,14 +3533,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3459 IOCB_t *icmd; 3533 IOCB_t *icmd;
3460 IOCB_t *oldcmd; 3534 IOCB_t *oldcmd;
3461 struct lpfc_iocbq *elsiocb; 3535 struct lpfc_iocbq *elsiocb;
3462 struct lpfc_sli_ring *pring;
3463 struct lpfc_sli *psli; 3536 struct lpfc_sli *psli;
3464 uint8_t *pcmd; 3537 uint8_t *pcmd;
3465 uint16_t cmdsize; 3538 uint16_t cmdsize;
3466 int rc; 3539 int rc;
3467 3540
3468 psli = &phba->sli; 3541 psli = &phba->sli;
3469 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
3470 3542
3471 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 3543 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
3472 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 3544 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -3520,7 +3592,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3520 phba->fc_stat.elsXmitACC++; 3592 phba->fc_stat.elsXmitACC++;
3521 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 3593 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3522 3594
3523 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3595 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3524 if (rc == IOCB_ERROR) { 3596 if (rc == IOCB_ERROR) {
3525 lpfc_els_free_iocb(phba, elsiocb); 3597 lpfc_els_free_iocb(phba, elsiocb);
3526 return 1; 3598 return 1;
@@ -3562,15 +3634,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3562 RNID *rn; 3634 RNID *rn;
3563 IOCB_t *icmd, *oldcmd; 3635 IOCB_t *icmd, *oldcmd;
3564 struct lpfc_iocbq *elsiocb; 3636 struct lpfc_iocbq *elsiocb;
3565 struct lpfc_sli_ring *pring;
3566 struct lpfc_sli *psli; 3637 struct lpfc_sli *psli;
3567 uint8_t *pcmd; 3638 uint8_t *pcmd;
3568 uint16_t cmdsize; 3639 uint16_t cmdsize;
3569 int rc; 3640 int rc;
3570 3641
3571 psli = &phba->sli; 3642 psli = &phba->sli;
3572 pring = &psli->ring[LPFC_ELS_RING];
3573
3574 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 3643 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
3575 + (2 * sizeof(struct lpfc_name)); 3644 + (2 * sizeof(struct lpfc_name));
3576 if (format) 3645 if (format)
@@ -3626,7 +3695,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3626 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 3695 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
3627 * it could be freed */ 3696 * it could be freed */
3628 3697
3629 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 3698 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3630 if (rc == IOCB_ERROR) { 3699 if (rc == IOCB_ERROR) {
3631 lpfc_els_free_iocb(phba, elsiocb); 3700 lpfc_els_free_iocb(phba, elsiocb);
3632 return 1; 3701 return 1;
@@ -3839,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
3839 payload_len -= sizeof(uint32_t); 3908 payload_len -= sizeof(uint32_t);
3840 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 3909 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
3841 case RSCN_ADDRESS_FORMAT_PORT: 3910 case RSCN_ADDRESS_FORMAT_PORT:
3842 if (ns_did.un.word == rscn_did.un.word) 3911 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
3912 && (ns_did.un.b.area == rscn_did.un.b.area)
3913 && (ns_did.un.b.id == rscn_did.un.b.id))
3843 goto return_did_out; 3914 goto return_did_out;
3844 break; 3915 break;
3845 case RSCN_ADDRESS_FORMAT_AREA: 3916 case RSCN_ADDRESS_FORMAT_AREA:
@@ -4300,7 +4371,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4300 lpfc_init_link(phba, mbox, 4371 lpfc_init_link(phba, mbox,
4301 phba->cfg_topology, 4372 phba->cfg_topology,
4302 phba->cfg_link_speed); 4373 phba->cfg_link_speed);
4303 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 4374 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4304 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4375 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4305 mbox->vport = vport; 4376 mbox->vport = vport;
4306 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4377 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4440,8 +4511,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4440static void 4511static void
4441lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4512lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4442{ 4513{
4443 struct lpfc_sli *psli = &phba->sli;
4444 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4445 MAILBOX_t *mb; 4514 MAILBOX_t *mb;
4446 IOCB_t *icmd; 4515 IOCB_t *icmd;
4447 RPS_RSP *rps_rsp; 4516 RPS_RSP *rps_rsp;
@@ -4451,7 +4520,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4451 uint16_t xri, status; 4520 uint16_t xri, status;
4452 uint32_t cmdsize; 4521 uint32_t cmdsize;
4453 4522
4454 mb = &pmb->mb; 4523 mb = &pmb->u.mb;
4455 4524
4456 ndlp = (struct lpfc_nodelist *) pmb->context2; 4525 ndlp = (struct lpfc_nodelist *) pmb->context2;
4457 xri = (uint16_t) ((unsigned long)(pmb->context1)); 4526 xri = (uint16_t) ((unsigned long)(pmb->context1));
@@ -4507,7 +4576,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4507 ndlp->nlp_rpi); 4576 ndlp->nlp_rpi);
4508 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4577 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4509 phba->fc_stat.elsXmitACC++; 4578 phba->fc_stat.elsXmitACC++;
4510 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) 4579 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
4511 lpfc_els_free_iocb(phba, elsiocb); 4580 lpfc_els_free_iocb(phba, elsiocb);
4512 return; 4581 return;
4513} 4582}
@@ -4616,8 +4685,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4616 IOCB_t *icmd, *oldcmd; 4685 IOCB_t *icmd, *oldcmd;
4617 RPL_RSP rpl_rsp; 4686 RPL_RSP rpl_rsp;
4618 struct lpfc_iocbq *elsiocb; 4687 struct lpfc_iocbq *elsiocb;
4619 struct lpfc_sli *psli = &phba->sli;
4620 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
4621 uint8_t *pcmd; 4688 uint8_t *pcmd;
4622 4689
4623 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4690 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4654,7 +4721,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4654 ndlp->nlp_rpi); 4721 ndlp->nlp_rpi);
4655 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4722 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4656 phba->fc_stat.elsXmitACC++; 4723 phba->fc_stat.elsXmitACC++;
4657 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 4724 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
4725 IOCB_ERROR) {
4658 lpfc_els_free_iocb(phba, elsiocb); 4726 lpfc_els_free_iocb(phba, elsiocb);
4659 return 1; 4727 return 1;
4660 } 4728 }
@@ -4883,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4883 } else { 4951 } else {
4884 /* FAN verified - skip FLOGI */ 4952 /* FAN verified - skip FLOGI */
4885 vport->fc_myDID = vport->fc_prevDID; 4953 vport->fc_myDID = vport->fc_prevDID;
4886 lpfc_issue_fabric_reglogin(vport); 4954 if (phba->sli_rev < LPFC_SLI_REV4)
4955 lpfc_issue_fabric_reglogin(vport);
4956 else
4957 lpfc_issue_reg_vfi(vport);
4887 } 4958 }
4888 } 4959 }
4889 return 0; 4960 return 0;
@@ -5566,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5566 5637
5567dropit: 5638dropit:
5568 if (vport && !(vport->load_flag & FC_UNLOADING)) 5639 if (vport && !(vport->load_flag & FC_UNLOADING))
5569 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 5640 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5570 "(%d):0111 Dropping received ELS cmd " 5641 "0111 Dropping received ELS cmd "
5571 "Data: x%x x%x x%x\n", 5642 "Data: x%x x%x x%x\n",
5572 vport->vpi, icmd->ulpStatus, 5643 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
5573 icmd->un.ulpWord[4], icmd->ulpTimeout);
5574 phba->fc_stat.elsRcvDrop++; 5644 phba->fc_stat.elsRcvDrop++;
5575} 5645}
5576 5646
@@ -5646,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5646 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 5716 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
5647 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 5717 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
5648 vport = phba->pport; 5718 vport = phba->pport;
5649 else { 5719 else
5650 uint16_t vpi = icmd->unsli3.rcvsli3.vpi; 5720 vport = lpfc_find_vport_by_vpid(phba,
5651 vport = lpfc_find_vport_by_vpid(phba, vpi); 5721 icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
5652 }
5653 } 5722 }
5654 /* If there are no BDEs associated 5723 /* If there are no BDEs associated
5655 * with this IOCB, there is nothing to do. 5724 * with this IOCB, there is nothing to do.
@@ -5781,7 +5850,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5781 struct lpfc_vport *vport = pmb->vport; 5850 struct lpfc_vport *vport = pmb->vport;
5782 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5783 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 5852 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5784 MAILBOX_t *mb = &pmb->mb; 5853 MAILBOX_t *mb = &pmb->u.mb;
5785 5854
5786 spin_lock_irq(shost->host_lock); 5855 spin_lock_irq(shost->host_lock);
5787 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 5856 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5818,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5818 5887
5819 } else { 5888 } else {
5820 if (vport == phba->pport) 5889 if (vport == phba->pport)
5821 lpfc_issue_fabric_reglogin(vport); 5890 if (phba->sli_rev < LPFC_SLI_REV4)
5891 lpfc_issue_fabric_reglogin(vport);
5892 else
5893 lpfc_issue_reg_vfi(vport);
5822 else 5894 else
5823 lpfc_do_scr_ns_plogi(phba, vport); 5895 lpfc_do_scr_ns_plogi(phba, vport);
5824 } 5896 }
@@ -5850,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
5850 5922
5851 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5923 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5852 if (mbox) { 5924 if (mbox) {
5853 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); 5925 lpfc_reg_vpi(vport, mbox);
5854 mbox->vport = vport; 5926 mbox->vport = vport;
5855 mbox->context2 = lpfc_nlp_get(ndlp); 5927 mbox->context2 = lpfc_nlp_get(ndlp);
5856 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 5928 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@@ -6139,7 +6211,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6139{ 6211{
6140 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6212 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6141 struct lpfc_hba *phba = vport->phba; 6213 struct lpfc_hba *phba = vport->phba;
6142 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6143 IOCB_t *icmd; 6214 IOCB_t *icmd;
6144 struct lpfc_iocbq *elsiocb; 6215 struct lpfc_iocbq *elsiocb;
6145 uint8_t *pcmd; 6216 uint8_t *pcmd;
@@ -6169,7 +6240,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6169 spin_lock_irq(shost->host_lock); 6240 spin_lock_irq(shost->host_lock);
6170 ndlp->nlp_flag |= NLP_LOGO_SND; 6241 ndlp->nlp_flag |= NLP_LOGO_SND;
6171 spin_unlock_irq(shost->host_lock); 6242 spin_unlock_irq(shost->host_lock);
6172 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 6243 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6244 IOCB_ERROR) {
6173 spin_lock_irq(shost->host_lock); 6245 spin_lock_irq(shost->host_lock);
6174 ndlp->nlp_flag &= ~NLP_LOGO_SND; 6246 ndlp->nlp_flag &= ~NLP_LOGO_SND;
6175 spin_unlock_irq(shost->host_lock); 6247 spin_unlock_irq(shost->host_lock);
@@ -6224,7 +6296,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
6224 struct lpfc_iocbq *iocb; 6296 struct lpfc_iocbq *iocb;
6225 unsigned long iflags; 6297 unsigned long iflags;
6226 int ret; 6298 int ret;
6227 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6228 IOCB_t *cmd; 6299 IOCB_t *cmd;
6229 6300
6230repeat: 6301repeat:
@@ -6248,7 +6319,7 @@ repeat:
6248 "Fabric sched1: ste:x%x", 6319 "Fabric sched1: ste:x%x",
6249 iocb->vport->port_state, 0, 0); 6320 iocb->vport->port_state, 0, 0);
6250 6321
6251 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6322 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6252 6323
6253 if (ret == IOCB_ERROR) { 6324 if (ret == IOCB_ERROR) {
6254 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6325 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6394,7 +6465,6 @@ static int
6394lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 6465lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6395{ 6466{
6396 unsigned long iflags; 6467 unsigned long iflags;
6397 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6398 int ready; 6468 int ready;
6399 int ret; 6469 int ret;
6400 6470
@@ -6418,7 +6488,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6418 "Fabric sched2: ste:x%x", 6488 "Fabric sched2: ste:x%x",
6419 iocb->vport->port_state, 0, 0); 6489 iocb->vport->port_state, 0, 0);
6420 6490
6421 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 6491 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6422 6492
6423 if (ret == IOCB_ERROR) { 6493 if (ret == IOCB_ERROR) {
6424 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 6494 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6524,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
6524 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6594 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6525 IOERR_SLI_ABORTED); 6595 IOERR_SLI_ABORTED);
6526} 6596}
6597
6598/**
6599 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
6600 * @phba: pointer to lpfc hba data structure.
6601 * @axri: pointer to the els xri abort wcqe structure.
6602 *
6603 * This routine is invoked by the worker thread to process a SLI4 slow-path
6604 * ELS aborted xri.
6605 **/
6606void
6607lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6608 struct sli4_wcqe_xri_aborted *axri)
6609{
6610 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
6611 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6612 unsigned long iflag = 0;
6613
6614 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6615 list_for_each_entry_safe(sglq_entry, sglq_next,
6616 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6617 if (sglq_entry->sli4_xritag == xri) {
6618 list_del(&sglq_entry->list);
6619 spin_unlock_irqrestore(
6620 &phba->sli4_hba.abts_sgl_list_lock,
6621 iflag);
6622 spin_lock_irqsave(&phba->hbalock, iflag);
6623
6624 list_add_tail(&sglq_entry->list,
6625 &phba->sli4_hba.lpfc_sgl_list);
6626 spin_unlock_irqrestore(&phba->hbalock, iflag);
6627 return;
6628 }
6629 }
6630 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
6631}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e764ce0bf70..35c41ae75be 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,10 +29,12 @@
29#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h> 30#include <scsi/scsi_transport_fc.h>
31 31
32#include "lpfc_hw4.h"
32#include "lpfc_hw.h" 33#include "lpfc_hw.h"
33#include "lpfc_nl.h" 34#include "lpfc_nl.h"
34#include "lpfc_disc.h" 35#include "lpfc_disc.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_scsi.h" 38#include "lpfc_scsi.h"
37#include "lpfc.h" 39#include "lpfc.h"
38#include "lpfc_logmsg.h" 40#include "lpfc_logmsg.h"
@@ -273,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
273 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 275 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
274 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 276 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
275 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 277 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
278
279 lpfc_unregister_unused_fcf(phba);
276} 280}
277 281
278/** 282/**
@@ -295,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
295 299
296 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 300 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
297 GFP_ATOMIC); 301 GFP_ATOMIC);
298 if (ret) 302 if (ret) {
299 atomic_inc(&phba->fast_event_count); 303 atomic_inc(&phba->fast_event_count);
300 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 304 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
301 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 305 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
306 }
302 return ret; 307 return ret;
303} 308}
304 309
@@ -491,6 +496,10 @@ lpfc_work_done(struct lpfc_hba *phba)
491 phba->work_ha = 0; 496 phba->work_ha = 0;
492 spin_unlock_irq(&phba->hbalock); 497 spin_unlock_irq(&phba->hbalock);
493 498
499 /* First, try to post the next mailbox command to SLI4 device */
500 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
501 lpfc_sli4_post_async_mbox(phba);
502
494 if (ha_copy & HA_ERATT) 503 if (ha_copy & HA_ERATT)
495 /* Handle the error attention event */ 504 /* Handle the error attention event */
496 lpfc_handle_eratt(phba); 505 lpfc_handle_eratt(phba);
@@ -501,9 +510,27 @@ lpfc_work_done(struct lpfc_hba *phba)
501 if (ha_copy & HA_LATT) 510 if (ha_copy & HA_LATT)
502 lpfc_handle_latt(phba); 511 lpfc_handle_latt(phba);
503 512
513 /* Process SLI4 events */
514 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
515 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
516 lpfc_sli4_fcp_xri_abort_event_proc(phba);
517 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
518 lpfc_sli4_els_xri_abort_event_proc(phba);
519 if (phba->hba_flag & ASYNC_EVENT)
520 lpfc_sli4_async_event_proc(phba);
521 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
522 spin_lock_irq(&phba->hbalock);
523 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
524 spin_unlock_irq(&phba->hbalock);
525 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
526 }
527 if (phba->hba_flag & HBA_RECEIVE_BUFFER)
528 lpfc_sli4_handle_received_buffer(phba);
529 }
530
504 vports = lpfc_create_vport_work_array(phba); 531 vports = lpfc_create_vport_work_array(phba);
505 if (vports != NULL) 532 if (vports != NULL)
506 for(i = 0; i <= phba->max_vpi; i++) { 533 for (i = 0; i <= phba->max_vports; i++) {
507 /* 534 /*
508 * We could have no vports in array if unloading, so if 535 * We could have no vports in array if unloading, so if
509 * this happens then just use the pport 536 * this happens then just use the pport
@@ -555,23 +582,24 @@ lpfc_work_done(struct lpfc_hba *phba)
555 /* 582 /*
556 * Turn on Ring interrupts 583 * Turn on Ring interrupts
557 */ 584 */
558 spin_lock_irq(&phba->hbalock); 585 if (phba->sli_rev <= LPFC_SLI_REV3) {
559 control = readl(phba->HCregaddr); 586 spin_lock_irq(&phba->hbalock);
560 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 587 control = readl(phba->HCregaddr);
561 lpfc_debugfs_slow_ring_trc(phba, 588 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
562 "WRK Enable ring: cntl:x%x hacopy:x%x", 589 lpfc_debugfs_slow_ring_trc(phba,
563 control, ha_copy, 0); 590 "WRK Enable ring: cntl:x%x hacopy:x%x",
564 591 control, ha_copy, 0);
565 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 592
566 writel(control, phba->HCregaddr); 593 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
567 readl(phba->HCregaddr); /* flush */ 594 writel(control, phba->HCregaddr);
568 } 595 readl(phba->HCregaddr); /* flush */
569 else { 596 } else {
570 lpfc_debugfs_slow_ring_trc(phba, 597 lpfc_debugfs_slow_ring_trc(phba,
571 "WRK Ring ok: cntl:x%x hacopy:x%x", 598 "WRK Ring ok: cntl:x%x hacopy:x%x",
572 control, ha_copy, 0); 599 control, ha_copy, 0);
600 }
601 spin_unlock_irq(&phba->hbalock);
573 } 602 }
574 spin_unlock_irq(&phba->hbalock);
575 } 603 }
576 lpfc_work_list_done(phba); 604 lpfc_work_list_done(phba);
577} 605}
@@ -689,7 +717,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
689 lpfc_can_disctmo(vport); 717 lpfc_can_disctmo(vport);
690} 718}
691 719
692static void 720void
693lpfc_linkdown_port(struct lpfc_vport *vport) 721lpfc_linkdown_port(struct lpfc_vport *vport)
694{ 722{
695 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -716,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
716 if (phba->link_state == LPFC_LINK_DOWN) 744 if (phba->link_state == LPFC_LINK_DOWN)
717 return 0; 745 return 0;
718 spin_lock_irq(&phba->hbalock); 746 spin_lock_irq(&phba->hbalock);
747 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
719 if (phba->link_state > LPFC_LINK_DOWN) { 748 if (phba->link_state > LPFC_LINK_DOWN) {
720 phba->link_state = LPFC_LINK_DOWN; 749 phba->link_state = LPFC_LINK_DOWN;
721 phba->pport->fc_flag &= ~FC_LBIT; 750 phba->pport->fc_flag &= ~FC_LBIT;
@@ -723,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
723 spin_unlock_irq(&phba->hbalock); 752 spin_unlock_irq(&phba->hbalock);
724 vports = lpfc_create_vport_work_array(phba); 753 vports = lpfc_create_vport_work_array(phba);
725 if (vports != NULL) 754 if (vports != NULL)
726 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 755 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
727 /* Issue a LINK DOWN event to all nodes */ 756 /* Issue a LINK DOWN event to all nodes */
728 lpfc_linkdown_port(vports[i]); 757 lpfc_linkdown_port(vports[i]);
729 } 758 }
@@ -833,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba)
833 862
834 vports = lpfc_create_vport_work_array(phba); 863 vports = lpfc_create_vport_work_array(phba);
835 if (vports != NULL) 864 if (vports != NULL)
836 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
837 lpfc_linkup_port(vports[i]); 866 lpfc_linkup_port(vports[i]);
838 lpfc_destroy_vport_work_array(phba, vports); 867 lpfc_destroy_vport_work_array(phba, vports);
839 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 868 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
869 (phba->sli_rev < LPFC_SLI_REV4))
840 lpfc_issue_clear_la(phba, phba->pport); 870 lpfc_issue_clear_la(phba, phba->pport);
841 871
842 return 0; 872 return 0;
@@ -854,7 +884,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
854 struct lpfc_vport *vport = pmb->vport; 884 struct lpfc_vport *vport = pmb->vport;
855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 885 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
856 struct lpfc_sli *psli = &phba->sli; 886 struct lpfc_sli *psli = &phba->sli;
857 MAILBOX_t *mb = &pmb->mb; 887 MAILBOX_t *mb = &pmb->u.mb;
858 uint32_t control; 888 uint32_t control;
859 889
860 /* Since we don't do discovery right now, turn these off here */ 890 /* Since we don't do discovery right now, turn these off here */
@@ -917,7 +947,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
917{ 947{
918 struct lpfc_vport *vport = pmb->vport; 948 struct lpfc_vport *vport = pmb->vport;
919 949
920 if (pmb->mb.mbxStatus) 950 if (pmb->u.mb.mbxStatus)
921 goto out; 951 goto out;
922 952
923 mempool_free(pmb, phba->mbox_mem_pool); 953 mempool_free(pmb, phba->mbox_mem_pool);
@@ -945,7 +975,7 @@ out:
945 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 975 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
946 "0306 CONFIG_LINK mbxStatus error x%x " 976 "0306 CONFIG_LINK mbxStatus error x%x "
947 "HBA state x%x\n", 977 "HBA state x%x\n",
948 pmb->mb.mbxStatus, vport->port_state); 978 pmb->u.mb.mbxStatus, vport->port_state);
949 mempool_free(pmb, phba->mbox_mem_pool); 979 mempool_free(pmb, phba->mbox_mem_pool);
950 980
951 lpfc_linkdown(phba); 981 lpfc_linkdown(phba);
@@ -959,9 +989,592 @@ out:
959} 989}
960 990
961static void 991static void
992lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
993{
994 struct lpfc_vport *vport = mboxq->vport;
995 unsigned long flags;
996
997 if (mboxq->u.mb.mbxStatus) {
998 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
999 "2017 REG_FCFI mbxStatus error x%x "
1000 "HBA state x%x\n",
1001 mboxq->u.mb.mbxStatus, vport->port_state);
1002 mempool_free(mboxq, phba->mbox_mem_pool);
1003 return;
1004 }
1005
1006 /* Start FCoE discovery by sending a FLOGI. */
1007 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1008 /* Set the FCFI registered flag */
1009 spin_lock_irqsave(&phba->hbalock, flags);
1010 phba->fcf.fcf_flag |= FCF_REGISTERED;
1011 spin_unlock_irqrestore(&phba->hbalock, flags);
1012 if (vport->port_state != LPFC_FLOGI) {
1013 spin_lock_irqsave(&phba->hbalock, flags);
1014 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1015 spin_unlock_irqrestore(&phba->hbalock, flags);
1016 lpfc_initial_flogi(vport);
1017 }
1018
1019 mempool_free(mboxq, phba->mbox_mem_pool);
1020 return;
1021}
1022
1023/**
1024 * lpfc_fab_name_match - Check if the fcf fabric name match.
1025 * @fab_name: pointer to fabric name.
1026 * @new_fcf_record: pointer to fcf record.
1027 *
1028 * This routine compare the fcf record's fabric name with provided
1029 * fabric name. If the fabric name are identical this function
1030 * returns 1 else return 0.
1031 **/
1032static uint32_t
1033lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1034{
1035 if ((fab_name[0] ==
1036 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
1037 (fab_name[1] ==
1038 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
1039 (fab_name[2] ==
1040 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
1041 (fab_name[3] ==
1042 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
1043 (fab_name[4] ==
1044 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
1045 (fab_name[5] ==
1046 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
1047 (fab_name[6] ==
1048 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
1049 (fab_name[7] ==
1050 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
1051 return 1;
1052 else
1053 return 0;
1054}
1055
1056/**
1057 * lpfc_mac_addr_match - Check if the fcf mac address match.
1058 * @phba: pointer to lpfc hba data structure.
1059 * @new_fcf_record: pointer to fcf record.
1060 *
1061 * This routine compare the fcf record's mac address with HBA's
1062 * FCF mac address. If the mac addresses are identical this function
1063 * returns 1 else return 0.
1064 **/
1065static uint32_t
1066lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1067{
1068 if ((phba->fcf.mac_addr[0] ==
1069 bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
1070 (phba->fcf.mac_addr[1] ==
1071 bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
1072 (phba->fcf.mac_addr[2] ==
1073 bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
1074 (phba->fcf.mac_addr[3] ==
1075 bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
1076 (phba->fcf.mac_addr[4] ==
1077 bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
1078 (phba->fcf.mac_addr[5] ==
1079 bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
1080 return 1;
1081 else
1082 return 0;
1083}
1084
1085/**
1086 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1087 * @phba: pointer to lpfc hba data structure.
1088 * @new_fcf_record: pointer to fcf record.
1089 *
1090 * This routine copies the FCF information from the FCF
1091 * record to lpfc_hba data structure.
1092 **/
1093static void
1094lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1095{
1096 phba->fcf.fabric_name[0] =
1097 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1098 phba->fcf.fabric_name[1] =
1099 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1100 phba->fcf.fabric_name[2] =
1101 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1102 phba->fcf.fabric_name[3] =
1103 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1104 phba->fcf.fabric_name[4] =
1105 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1106 phba->fcf.fabric_name[5] =
1107 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1108 phba->fcf.fabric_name[6] =
1109 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1110 phba->fcf.fabric_name[7] =
1111 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1112 phba->fcf.mac_addr[0] =
1113 bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1114 phba->fcf.mac_addr[1] =
1115 bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1116 phba->fcf.mac_addr[2] =
1117 bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1118 phba->fcf.mac_addr[3] =
1119 bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1120 phba->fcf.mac_addr[4] =
1121 bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1122 phba->fcf.mac_addr[5] =
1123 bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1124 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1125 phba->fcf.priority = new_fcf_record->fip_priority;
1126}
1127
1128/**
1129 * lpfc_register_fcf - Register the FCF with hba.
1130 * @phba: pointer to lpfc hba data structure.
1131 *
1132 * This routine issues a register fcfi mailbox command to register
1133 * the fcf with HBA.
1134 **/
1135static void
1136lpfc_register_fcf(struct lpfc_hba *phba)
1137{
1138 LPFC_MBOXQ_t *fcf_mbxq;
1139 int rc;
1140 unsigned long flags;
1141
1142 spin_lock_irqsave(&phba->hbalock, flags);
1143
1144 /* If the FCF is not availabe do nothing. */
1145 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1146 spin_unlock_irqrestore(&phba->hbalock, flags);
1147 return;
1148 }
1149
1150 /* The FCF is already registered, start discovery */
1151 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1152 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1153 spin_unlock_irqrestore(&phba->hbalock, flags);
1154 if (phba->pport->port_state != LPFC_FLOGI)
1155 lpfc_initial_flogi(phba->pport);
1156 return;
1157 }
1158 spin_unlock_irqrestore(&phba->hbalock, flags);
1159
1160 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1161 GFP_KERNEL);
1162 if (!fcf_mbxq)
1163 return;
1164
1165 lpfc_reg_fcfi(phba, fcf_mbxq);
1166 fcf_mbxq->vport = phba->pport;
1167 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1168 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1169 if (rc == MBX_NOT_FINISHED)
1170 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1171
1172 return;
1173}
1174
1175/**
1176 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1177 * @phba: pointer to lpfc hba data structure.
1178 * @new_fcf_record: pointer to fcf record.
1179 * @boot_flag: Indicates if this record used by boot bios.
1180 * @addr_mode: The address mode to be used by this FCF
1181 *
1182 * This routine compare the fcf record with connect list obtained from the
1183 * config region to decide if this FCF can be used for SAN discovery. It returns
1184 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1185 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1186 * is used by boot bios and addr_mode will indicate the addressing mode to be
1187 * used for this FCF when the function returns.
1188 * If the FCF record need to be used with a particular vlan id, the vlan is
1189 * set in the vlan_id on return of the function. If not VLAN tagging need to
1190 * be used with the FCF vlan_id will be set to 0xFFFF;
1191 **/
1192static int
1193lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1194 struct fcf_record *new_fcf_record,
1195 uint32_t *boot_flag, uint32_t *addr_mode,
1196 uint16_t *vlan_id)
1197{
1198 struct lpfc_fcf_conn_entry *conn_entry;
1199
1200 if (!phba->cfg_enable_fip) {
1201 *boot_flag = 0;
1202 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1203 new_fcf_record);
1204 if (phba->valid_vlan)
1205 *vlan_id = phba->vlan_id;
1206 else
1207 *vlan_id = 0xFFFF;
1208 return 1;
1209 }
1210
1211 /*
1212 * If there are no FCF connection table entry, driver connect to all
1213 * FCFs.
1214 */
1215 if (list_empty(&phba->fcf_conn_rec_list)) {
1216 *boot_flag = 0;
1217 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1218 new_fcf_record);
1219 *vlan_id = 0xFFFF;
1220 return 1;
1221 }
1222
1223 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
1224 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1225 continue;
1226
1227 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1228 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1229 new_fcf_record))
1230 continue;
1231
1232 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1233 /*
1234 * If the vlan bit map does not have the bit set for the
1235 * vlan id to be used, then it is not a match.
1236 */
1237 if (!(new_fcf_record->vlan_bitmap
1238 [conn_entry->conn_rec.vlan_tag / 8] &
1239 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1240 continue;
1241 }
1242
1243 /*
1244 * Check if the connection record specifies a required
1245 * addressing mode.
1246 */
1247 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1248 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1249
1250 /*
1251 * If SPMA required but FCF not support this continue.
1252 */
1253 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1254 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1255 new_fcf_record) & LPFC_FCF_SPMA))
1256 continue;
1257
1258 /*
1259 * If FPMA required but FCF not support this continue.
1260 */
1261 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1262 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1263 new_fcf_record) & LPFC_FCF_FPMA))
1264 continue;
1265 }
1266
1267 /*
1268 * This fcf record matches filtering criteria.
1269 */
1270 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1271 *boot_flag = 1;
1272 else
1273 *boot_flag = 0;
1274
1275 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1276 new_fcf_record);
1277 /*
1278 * If the user specified a required address mode, assign that
1279 * address mode
1280 */
1281 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1282 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1283 *addr_mode = (conn_entry->conn_rec.flags &
1284 FCFCNCT_AM_SPMA) ?
1285 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1286 /*
1287 * If the user specified a prefered address mode, use the
1288 * addr mode only if FCF support the addr_mode.
1289 */
1290 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1291 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1292 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1293 (*addr_mode & LPFC_FCF_SPMA))
1294 *addr_mode = LPFC_FCF_SPMA;
1295 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1296 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1297 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1298 (*addr_mode & LPFC_FCF_FPMA))
1299 *addr_mode = LPFC_FCF_FPMA;
1300 /*
1301 * If user did not specify any addressing mode, use FPMA if
1302 * possible else use SPMA.
1303 */
1304 else if (*addr_mode & LPFC_FCF_FPMA)
1305 *addr_mode = LPFC_FCF_FPMA;
1306
1307 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1308 *vlan_id = conn_entry->conn_rec.vlan_tag;
1309 else
1310 *vlan_id = 0xFFFF;
1311
1312 return 1;
1313 }
1314
1315 return 0;
1316}
1317
1318/**
1319 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1320 * @phba: pointer to lpfc hba data structure.
1321 * @mboxq: pointer to mailbox object.
1322 *
1323 * This function iterate through all the fcf records available in
1324 * HBA and choose the optimal FCF record for discovery. After finding
1325 * the FCF for discovery it register the FCF record and kick start
1326 * discovery.
1327 * If FCF_IN_USE flag is set in currently used FCF, the routine try to
1328 * use a FCF record which match fabric name and mac address of the
1329 * currently used FCF record.
1330 * If the driver support only one FCF, it will try to use the FCF record
1331 * used by BOOT_BIOS.
1332 */
1333void
1334lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1335{
1336 void *virt_addr;
1337 dma_addr_t phys_addr;
1338 uint8_t *bytep;
1339 struct lpfc_mbx_sge sge;
1340 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1341 uint32_t shdr_status, shdr_add_status;
1342 union lpfc_sli4_cfg_shdr *shdr;
1343 struct fcf_record *new_fcf_record;
1344 int rc;
1345 uint32_t boot_flag, addr_mode;
1346 uint32_t next_fcf_index;
1347 unsigned long flags;
1348 uint16_t vlan_id;
1349
1350 /* Get the first SGE entry from the non-embedded DMA memory. This
1351 * routine only uses a single SGE.
1352 */
1353 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1354 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1355 if (unlikely(!mboxq->sge_array)) {
1356 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1357 "2524 Failed to get the non-embedded SGE "
1358 "virtual address\n");
1359 goto out;
1360 }
1361 virt_addr = mboxq->sge_array->addr[0];
1362
1363 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1364 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1365 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1366 &shdr->response);
1367 /*
1368 * The FCF Record was read and there is no reason for the driver
1369 * to maintain the FCF record data or memory. Instead, just need
1370 * to book keeping the FCFIs can be used.
1371 */
1372 if (shdr_status || shdr_add_status) {
1373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1374 "2521 READ_FCF_RECORD mailbox failed "
1375 "with status x%x add_status x%x, mbx\n",
1376 shdr_status, shdr_add_status);
1377 goto out;
1378 }
1379 /* Interpreting the returned information of FCF records */
1380 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1381 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1382 sizeof(struct lpfc_mbx_read_fcf_tbl));
1383 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1384
1385 new_fcf_record = (struct fcf_record *)(virt_addr +
1386 sizeof(struct lpfc_mbx_read_fcf_tbl));
1387 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1388 sizeof(struct fcf_record));
1389 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1390
1391 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
1392 &boot_flag, &addr_mode,
1393 &vlan_id);
1394 /*
1395 * If the fcf record does not match with connect list entries
1396 * read the next entry.
1397 */
1398 if (!rc)
1399 goto read_next_fcf;
1400 /*
1401 * If this is not the first FCF discovery of the HBA, use last
1402 * FCF record for the discovery.
1403 */
1404 spin_lock_irqsave(&phba->hbalock, flags);
1405 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1406 if (lpfc_fab_name_match(phba->fcf.fabric_name,
1407 new_fcf_record) &&
1408 lpfc_mac_addr_match(phba, new_fcf_record)) {
1409 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1410 spin_unlock_irqrestore(&phba->hbalock, flags);
1411 goto out;
1412 }
1413 spin_unlock_irqrestore(&phba->hbalock, flags);
1414 goto read_next_fcf;
1415 }
1416 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
1417 /*
1418 * If the current FCF record does not have boot flag
1419 * set and new fcf record has boot flag set, use the
1420 * new fcf record.
1421 */
1422 if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1423 /* Use this FCF record */
1424 lpfc_copy_fcf_record(phba, new_fcf_record);
1425 phba->fcf.addr_mode = addr_mode;
1426 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1427 if (vlan_id != 0xFFFF) {
1428 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1429 phba->fcf.vlan_id = vlan_id;
1430 }
1431 spin_unlock_irqrestore(&phba->hbalock, flags);
1432 goto read_next_fcf;
1433 }
1434 /*
1435 * If the current FCF record has boot flag set and the
1436 * new FCF record does not have boot flag, read the next
1437 * FCF record.
1438 */
1439 if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
1440 spin_unlock_irqrestore(&phba->hbalock, flags);
1441 goto read_next_fcf;
1442 }
1443 /*
1444 * If there is a record with lower priority value for
1445 * the current FCF, use that record.
1446 */
1447 if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
1448 && (new_fcf_record->fip_priority <
1449 phba->fcf.priority)) {
1450 /* Use this FCF record */
1451 lpfc_copy_fcf_record(phba, new_fcf_record);
1452 phba->fcf.addr_mode = addr_mode;
1453 if (vlan_id != 0xFFFF) {
1454 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1455 phba->fcf.vlan_id = vlan_id;
1456 }
1457 spin_unlock_irqrestore(&phba->hbalock, flags);
1458 goto read_next_fcf;
1459 }
1460 spin_unlock_irqrestore(&phba->hbalock, flags);
1461 goto read_next_fcf;
1462 }
1463 /*
1464 * This is the first available FCF record, use this
1465 * record.
1466 */
1467 lpfc_copy_fcf_record(phba, new_fcf_record);
1468 phba->fcf.addr_mode = addr_mode;
1469 if (boot_flag)
1470 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
1471 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1472 if (vlan_id != 0xFFFF) {
1473 phba->fcf.fcf_flag |= FCF_VALID_VLAN;
1474 phba->fcf.vlan_id = vlan_id;
1475 }
1476 spin_unlock_irqrestore(&phba->hbalock, flags);
1477 goto read_next_fcf;
1478
1479read_next_fcf:
1480 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1481 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
1482 lpfc_register_fcf(phba);
1483 else
1484 lpfc_sli4_read_fcf_record(phba, next_fcf_index);
1485 return;
1486
1487out:
1488 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1489 lpfc_register_fcf(phba);
1490
1491 return;
1492}
1493
1494/**
1495 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1496 * @phba: pointer to lpfc hba data structure.
1497 *
1498 * This function loops through the list of vports on the @phba and issues an
1499 * FDISC if possible.
1500 */
1501void
1502lpfc_start_fdiscs(struct lpfc_hba *phba)
1503{
1504 struct lpfc_vport **vports;
1505 int i;
1506
1507 vports = lpfc_create_vport_work_array(phba);
1508 if (vports != NULL) {
1509 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1510 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1511 continue;
1512 /* There are no vpi for this vport */
1513 if (vports[i]->vpi > phba->max_vpi) {
1514 lpfc_vport_set_state(vports[i],
1515 FC_VPORT_FAILED);
1516 continue;
1517 }
1518 if (phba->fc_topology == TOPOLOGY_LOOP) {
1519 lpfc_vport_set_state(vports[i],
1520 FC_VPORT_LINKDOWN);
1521 continue;
1522 }
1523 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1524 lpfc_initial_fdisc(vports[i]);
1525 else {
1526 lpfc_vport_set_state(vports[i],
1527 FC_VPORT_NO_FABRIC_SUPP);
1528 lpfc_printf_vlog(vports[i], KERN_ERR,
1529 LOG_ELS,
1530 "0259 No NPIV "
1531 "Fabric support\n");
1532 }
1533 }
1534 }
1535 lpfc_destroy_vport_work_array(phba, vports);
1536}
1537
1538void
1539lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1540{
1541 struct lpfc_dmabuf *dmabuf = mboxq->context1;
1542 struct lpfc_vport *vport = mboxq->vport;
1543
1544 if (mboxq->u.mb.mbxStatus) {
1545 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1546 "2018 REG_VFI mbxStatus error x%x "
1547 "HBA state x%x\n",
1548 mboxq->u.mb.mbxStatus, vport->port_state);
1549 if (phba->fc_topology == TOPOLOGY_LOOP) {
1550 /* FLOGI failed, use loop map to make discovery list */
1551 lpfc_disc_list_loopmap(vport);
1552 /* Start discovery */
1553 lpfc_disc_start(vport);
1554 goto fail_free_mem;
1555 }
1556 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1557 goto fail_free_mem;
1558 }
1559 /* Mark the vport has registered with its VFI */
1560 vport->vfi_state |= LPFC_VFI_REGISTERED;
1561
1562 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1563 lpfc_start_fdiscs(phba);
1564 lpfc_do_scr_ns_plogi(phba, vport);
1565 }
1566
1567fail_free_mem:
1568 mempool_free(mboxq, phba->mbox_mem_pool);
1569 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
1570 kfree(dmabuf);
1571 return;
1572}
1573
1574static void
962lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1575lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
963{ 1576{
964 MAILBOX_t *mb = &pmb->mb; 1577 MAILBOX_t *mb = &pmb->u.mb;
965 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 1578 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
966 struct lpfc_vport *vport = pmb->vport; 1579 struct lpfc_vport *vport = pmb->vport;
967 1580
@@ -1012,13 +1625,13 @@ static void
1012lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) 1625lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1013{ 1626{
1014 struct lpfc_vport *vport = phba->pport; 1627 struct lpfc_vport *vport = phba->pport;
1015 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 1628 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
1016 int i; 1629 int i;
1017 struct lpfc_dmabuf *mp; 1630 struct lpfc_dmabuf *mp;
1018 int rc; 1631 int rc;
1632 struct fcf_record *fcf_record;
1019 1633
1020 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1634 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1021 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1022 1635
1023 spin_lock_irq(&phba->hbalock); 1636 spin_lock_irq(&phba->hbalock);
1024 switch (la->UlnkSpeed) { 1637 switch (la->UlnkSpeed) {
@@ -1034,6 +1647,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1034 case LA_8GHZ_LINK: 1647 case LA_8GHZ_LINK:
1035 phba->fc_linkspeed = LA_8GHZ_LINK; 1648 phba->fc_linkspeed = LA_8GHZ_LINK;
1036 break; 1649 break;
1650 case LA_10GHZ_LINK:
1651 phba->fc_linkspeed = LA_10GHZ_LINK;
1652 break;
1037 default: 1653 default:
1038 phba->fc_linkspeed = LA_UNKNW_LINK; 1654 phba->fc_linkspeed = LA_UNKNW_LINK;
1039 break; 1655 break;
@@ -1115,22 +1731,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1115 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1731 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1116 kfree(mp); 1732 kfree(mp);
1117 mempool_free(sparam_mbox, phba->mbox_mem_pool); 1733 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1118 if (cfglink_mbox)
1119 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1120 goto out; 1734 goto out;
1121 } 1735 }
1122 } 1736 }
1123 1737
1124 if (cfglink_mbox) { 1738 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
1739 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1740 if (!cfglink_mbox)
1741 goto out;
1125 vport->port_state = LPFC_LOCAL_CFG_LINK; 1742 vport->port_state = LPFC_LOCAL_CFG_LINK;
1126 lpfc_config_link(phba, cfglink_mbox); 1743 lpfc_config_link(phba, cfglink_mbox);
1127 cfglink_mbox->vport = vport; 1744 cfglink_mbox->vport = vport;
1128 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1745 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1129 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 1746 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1130 if (rc != MBX_NOT_FINISHED) 1747 if (rc == MBX_NOT_FINISHED) {
1131 return; 1748 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1132 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1749 goto out;
1750 }
1751 } else {
1752 /*
1753 * Add the driver's default FCF record at FCF index 0 now. This
1754 * is phase 1 implementation that support FCF index 0 and driver
1755 * defaults.
1756 */
1757 if (phba->cfg_enable_fip == 0) {
1758 fcf_record = kzalloc(sizeof(struct fcf_record),
1759 GFP_KERNEL);
1760 if (unlikely(!fcf_record)) {
1761 lpfc_printf_log(phba, KERN_ERR,
1762 LOG_MBOX | LOG_SLI,
1763 "2554 Could not allocate memmory for "
1764 "fcf record\n");
1765 rc = -ENODEV;
1766 goto out;
1767 }
1768
1769 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
1770 LPFC_FCOE_FCF_DEF_INDEX);
1771 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
1772 if (unlikely(rc)) {
1773 lpfc_printf_log(phba, KERN_ERR,
1774 LOG_MBOX | LOG_SLI,
1775 "2013 Could not manually add FCF "
1776 "record 0, status %d\n", rc);
1777 rc = -ENODEV;
1778 kfree(fcf_record);
1779 goto out;
1780 }
1781 kfree(fcf_record);
1782 }
1783 /*
1784 * The driver is expected to do FIP/FCF. Call the port
1785 * and get the FCF Table.
1786 */
1787 rc = lpfc_sli4_read_fcf_record(phba,
1788 LPFC_FCOE_FCF_GET_FIRST);
1789 if (rc)
1790 goto out;
1133 } 1791 }
1792
1793 return;
1134out: 1794out:
1135 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1795 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1136 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1796 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1147,10 +1807,12 @@ lpfc_enable_la(struct lpfc_hba *phba)
1147 struct lpfc_sli *psli = &phba->sli; 1807 struct lpfc_sli *psli = &phba->sli;
1148 spin_lock_irq(&phba->hbalock); 1808 spin_lock_irq(&phba->hbalock);
1149 psli->sli_flag |= LPFC_PROCESS_LA; 1809 psli->sli_flag |= LPFC_PROCESS_LA;
1150 control = readl(phba->HCregaddr); 1810 if (phba->sli_rev <= LPFC_SLI_REV3) {
1151 control |= HC_LAINT_ENA; 1811 control = readl(phba->HCregaddr);
1152 writel(control, phba->HCregaddr); 1812 control |= HC_LAINT_ENA;
1153 readl(phba->HCregaddr); /* flush */ 1813 writel(control, phba->HCregaddr);
1814 readl(phba->HCregaddr); /* flush */
1815 }
1154 spin_unlock_irq(&phba->hbalock); 1816 spin_unlock_irq(&phba->hbalock);
1155} 1817}
1156 1818
@@ -1159,6 +1821,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1159{ 1821{
1160 lpfc_linkdown(phba); 1822 lpfc_linkdown(phba);
1161 lpfc_enable_la(phba); 1823 lpfc_enable_la(phba);
1824 lpfc_unregister_unused_fcf(phba);
1162 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 1825 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1163} 1826}
1164 1827
@@ -1175,7 +1838,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1175 struct lpfc_vport *vport = pmb->vport; 1838 struct lpfc_vport *vport = pmb->vport;
1176 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1839 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1177 READ_LA_VAR *la; 1840 READ_LA_VAR *la;
1178 MAILBOX_t *mb = &pmb->mb; 1841 MAILBOX_t *mb = &pmb->u.mb;
1179 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1842 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1180 1843
1181 /* Unblock ELS traffic */ 1844 /* Unblock ELS traffic */
@@ -1190,7 +1853,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1190 goto lpfc_mbx_cmpl_read_la_free_mbuf; 1853 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1191 } 1854 }
1192 1855
1193 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA; 1856 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
1194 1857
1195 memcpy(&phba->alpa_map[0], mp->virt, 128); 1858 memcpy(&phba->alpa_map[0], mp->virt, 128);
1196 1859
@@ -1328,7 +1991,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1328static void 1991static void
1329lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1992lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1330{ 1993{
1331 MAILBOX_t *mb = &pmb->mb; 1994 MAILBOX_t *mb = &pmb->u.mb;
1332 struct lpfc_vport *vport = pmb->vport; 1995 struct lpfc_vport *vport = pmb->vport;
1333 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1996 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1334 1997
@@ -1381,7 +2044,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1381{ 2044{
1382 struct lpfc_vport *vport = pmb->vport; 2045 struct lpfc_vport *vport = pmb->vport;
1383 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2046 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1384 MAILBOX_t *mb = &pmb->mb; 2047 MAILBOX_t *mb = &pmb->u.mb;
1385 2048
1386 switch (mb->mbxStatus) { 2049 switch (mb->mbxStatus) {
1387 case 0x0011: 2050 case 0x0011:
@@ -1416,6 +2079,128 @@ out:
1416 return; 2079 return;
1417} 2080}
1418 2081
2082/**
2083 * lpfc_create_static_vport - Read HBA config region to create static vports.
2084 * @phba: pointer to lpfc hba data structure.
2085 *
2086 * This routine issue a DUMP mailbox command for config region 22 to get
2087 * the list of static vports to be created. The function create vports
2088 * based on the information returned from the HBA.
2089 **/
2090void
2091lpfc_create_static_vport(struct lpfc_hba *phba)
2092{
2093 LPFC_MBOXQ_t *pmb = NULL;
2094 MAILBOX_t *mb;
2095 struct static_vport_info *vport_info;
2096 int rc, i;
2097 struct fc_vport_identifiers vport_id;
2098 struct fc_vport *new_fc_vport;
2099 struct Scsi_Host *shost;
2100 struct lpfc_vport *vport;
2101 uint16_t offset = 0;
2102 uint8_t *vport_buff;
2103
2104 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2105 if (!pmb) {
2106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2107 "0542 lpfc_create_static_vport failed to"
2108 " allocate mailbox memory\n");
2109 return;
2110 }
2111
2112 mb = &pmb->u.mb;
2113
2114 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
2115 if (!vport_info) {
2116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2117 "0543 lpfc_create_static_vport failed to"
2118 " allocate vport_info\n");
2119 mempool_free(pmb, phba->mbox_mem_pool);
2120 return;
2121 }
2122
2123 vport_buff = (uint8_t *) vport_info;
2124 do {
2125 lpfc_dump_static_vport(phba, pmb, offset);
2126 pmb->vport = phba->pport;
2127 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
2128
2129 if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
2130 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2131 "0544 lpfc_create_static_vport failed to"
2132 " issue dump mailbox command ret 0x%x "
2133 "status 0x%x\n",
2134 rc, mb->mbxStatus);
2135 goto out;
2136 }
2137
2138 if (mb->un.varDmp.word_cnt >
2139 sizeof(struct static_vport_info) - offset)
2140 mb->un.varDmp.word_cnt =
2141 sizeof(struct static_vport_info) - offset;
2142
2143 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
2144 vport_buff + offset,
2145 mb->un.varDmp.word_cnt);
2146 offset += mb->un.varDmp.word_cnt;
2147
2148 } while (mb->un.varDmp.word_cnt &&
2149 offset < sizeof(struct static_vport_info));
2150
2151
2152 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
2153 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
2154 != VPORT_INFO_REV)) {
2155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2156 "0545 lpfc_create_static_vport bad"
2157 " information header 0x%x 0x%x\n",
2158 le32_to_cpu(vport_info->signature),
2159 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
2160
2161 goto out;
2162 }
2163
2164 shost = lpfc_shost_from_vport(phba->pport);
2165
2166 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
2167 memset(&vport_id, 0, sizeof(vport_id));
2168 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
2169 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
2170 if (!vport_id.port_name || !vport_id.node_name)
2171 continue;
2172
2173 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
2174 vport_id.vport_type = FC_PORTTYPE_NPIV;
2175 vport_id.disable = false;
2176 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
2177
2178 if (!new_fc_vport) {
2179 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2180 "0546 lpfc_create_static_vport failed to"
2181 " create vport \n");
2182 continue;
2183 }
2184
2185 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
2186 vport->vport_flag |= STATIC_VPORT;
2187 }
2188
2189out:
2190 /*
2191 * If this is timed out command, setting NULL to context2 tell SLI
2192 * layer not to use this buffer.
2193 */
2194 spin_lock_irq(&phba->hbalock);
2195 pmb->context2 = NULL;
2196 spin_unlock_irq(&phba->hbalock);
2197 kfree(vport_info);
2198 if (rc != MBX_TIMEOUT)
2199 mempool_free(pmb, phba->mbox_mem_pool);
2200
2201 return;
2202}
2203
1419/* 2204/*
1420 * This routine handles processing a Fabric REG_LOGIN mailbox 2205 * This routine handles processing a Fabric REG_LOGIN mailbox
1421 * command upon completion. It is setup in the LPFC_MBOXQ 2206 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1426,16 +2211,17 @@ void
1426lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2211lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1427{ 2212{
1428 struct lpfc_vport *vport = pmb->vport; 2213 struct lpfc_vport *vport = pmb->vport;
1429 MAILBOX_t *mb = &pmb->mb; 2214 MAILBOX_t *mb = &pmb->u.mb;
1430 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2215 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1431 struct lpfc_nodelist *ndlp; 2216 struct lpfc_nodelist *ndlp;
1432 struct lpfc_vport **vports;
1433 int i;
1434 2217
1435 ndlp = (struct lpfc_nodelist *) pmb->context2; 2218 ndlp = (struct lpfc_nodelist *) pmb->context2;
1436 pmb->context1 = NULL; 2219 pmb->context1 = NULL;
1437 pmb->context2 = NULL; 2220 pmb->context2 = NULL;
1438 if (mb->mbxStatus) { 2221 if (mb->mbxStatus) {
2222 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2223 "0258 Register Fabric login error: 0x%x\n",
2224 mb->mbxStatus);
1439 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2225 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1440 kfree(mp); 2226 kfree(mp);
1441 mempool_free(pmb, phba->mbox_mem_pool); 2227 mempool_free(pmb, phba->mbox_mem_pool);
@@ -1454,9 +2240,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1454 } 2240 }
1455 2241
1456 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2242 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1457 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1458 "0258 Register Fabric login error: 0x%x\n",
1459 mb->mbxStatus);
1460 /* Decrement the reference count to ndlp after the reference 2243 /* Decrement the reference count to ndlp after the reference
1461 * to the ndlp are done. 2244 * to the ndlp are done.
1462 */ 2245 */
@@ -1465,34 +2248,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1465 } 2248 }
1466 2249
1467 ndlp->nlp_rpi = mb->un.varWords[0]; 2250 ndlp->nlp_rpi = mb->un.varWords[0];
2251 ndlp->nlp_flag |= NLP_RPI_VALID;
1468 ndlp->nlp_type |= NLP_FABRIC; 2252 ndlp->nlp_type |= NLP_FABRIC;
1469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2253 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1470 2254
1471 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2255 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1472 vports = lpfc_create_vport_work_array(phba); 2256 lpfc_start_fdiscs(phba);
1473 if (vports != NULL)
1474 for(i = 0;
1475 i <= phba->max_vpi && vports[i] != NULL;
1476 i++) {
1477 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1478 continue;
1479 if (phba->fc_topology == TOPOLOGY_LOOP) {
1480 lpfc_vport_set_state(vports[i],
1481 FC_VPORT_LINKDOWN);
1482 continue;
1483 }
1484 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1485 lpfc_initial_fdisc(vports[i]);
1486 else {
1487 lpfc_vport_set_state(vports[i],
1488 FC_VPORT_NO_FABRIC_SUPP);
1489 lpfc_printf_vlog(vport, KERN_ERR,
1490 LOG_ELS,
1491 "0259 No NPIV "
1492 "Fabric support\n");
1493 }
1494 }
1495 lpfc_destroy_vport_work_array(phba, vports);
1496 lpfc_do_scr_ns_plogi(phba, vport); 2257 lpfc_do_scr_ns_plogi(phba, vport);
1497 } 2258 }
1498 2259
@@ -1516,13 +2277,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1516void 2277void
1517lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2278lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1518{ 2279{
1519 MAILBOX_t *mb = &pmb->mb; 2280 MAILBOX_t *mb = &pmb->u.mb;
1520 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2281 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1521 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 2282 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1522 struct lpfc_vport *vport = pmb->vport; 2283 struct lpfc_vport *vport = pmb->vport;
1523 2284
1524 if (mb->mbxStatus) { 2285 if (mb->mbxStatus) {
1525out: 2286out:
2287 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2288 "0260 Register NameServer error: 0x%x\n",
2289 mb->mbxStatus);
1526 /* decrement the node reference count held for this 2290 /* decrement the node reference count held for this
1527 * callback function. 2291 * callback function.
1528 */ 2292 */
@@ -1546,15 +2310,13 @@ out:
1546 return; 2310 return;
1547 } 2311 }
1548 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2312 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1549 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1550 "0260 Register NameServer error: 0x%x\n",
1551 mb->mbxStatus);
1552 return; 2313 return;
1553 } 2314 }
1554 2315
1555 pmb->context1 = NULL; 2316 pmb->context1 = NULL;
1556 2317
1557 ndlp->nlp_rpi = mb->un.varWords[0]; 2318 ndlp->nlp_rpi = mb->un.varWords[0];
2319 ndlp->nlp_flag |= NLP_RPI_VALID;
1558 ndlp->nlp_type |= NLP_FABRIC; 2320 ndlp->nlp_type |= NLP_FABRIC;
1559 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2321 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1560 2322
@@ -2055,7 +2817,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
2055 if (pring->ringno == LPFC_ELS_RING) { 2817 if (pring->ringno == LPFC_ELS_RING) {
2056 switch (icmd->ulpCommand) { 2818 switch (icmd->ulpCommand) {
2057 case CMD_GEN_REQUEST64_CR: 2819 case CMD_GEN_REQUEST64_CR:
2058 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) 2820 if (iocb->context_un.ndlp == ndlp)
2059 return 1; 2821 return 1;
2060 case CMD_ELS_REQUEST64_CR: 2822 case CMD_ELS_REQUEST64_CR:
2061 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) 2823 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
@@ -2102,7 +2864,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2102 */ 2864 */
2103 psli = &phba->sli; 2865 psli = &phba->sli;
2104 rpi = ndlp->nlp_rpi; 2866 rpi = ndlp->nlp_rpi;
2105 if (rpi) { 2867 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2106 /* Now process each ring */ 2868 /* Now process each ring */
2107 for (i = 0; i < psli->num_rings; i++) { 2869 for (i = 0; i < psli->num_rings; i++) {
2108 pring = &psli->ring[i]; 2870 pring = &psli->ring[i];
@@ -2150,7 +2912,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2150 LPFC_MBOXQ_t *mbox; 2912 LPFC_MBOXQ_t *mbox;
2151 int rc; 2913 int rc;
2152 2914
2153 if (ndlp->nlp_rpi) { 2915 if (ndlp->nlp_flag & NLP_RPI_VALID) {
2154 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2916 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2155 if (mbox) { 2917 if (mbox) {
2156 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 2918 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -2162,6 +2924,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2162 } 2924 }
2163 lpfc_no_rpi(phba, ndlp); 2925 lpfc_no_rpi(phba, ndlp);
2164 ndlp->nlp_rpi = 0; 2926 ndlp->nlp_rpi = 0;
2927 ndlp->nlp_flag &= ~NLP_RPI_VALID;
2165 return 1; 2928 return 1;
2166 } 2929 }
2167 return 0; 2930 return 0;
@@ -2252,7 +3015,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2252 3015
2253 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 3016 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
2254 if ((mb = phba->sli.mbox_active)) { 3017 if ((mb = phba->sli.mbox_active)) {
2255 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3018 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2256 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3019 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2257 mb->context2 = NULL; 3020 mb->context2 = NULL;
2258 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3021 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -2261,7 +3024,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2261 3024
2262 spin_lock_irq(&phba->hbalock); 3025 spin_lock_irq(&phba->hbalock);
2263 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 3026 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
2264 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 3027 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
2265 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3028 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
2266 mp = (struct lpfc_dmabuf *) (mb->context1); 3029 mp = (struct lpfc_dmabuf *) (mb->context1);
2267 if (mp) { 3030 if (mp) {
@@ -2309,13 +3072,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2309 int rc; 3072 int rc;
2310 3073
2311 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3074 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2312 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { 3075 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
3076 !(ndlp->nlp_flag & NLP_RPI_VALID)) {
2313 /* For this case we need to cleanup the default rpi 3077 /* For this case we need to cleanup the default rpi
2314 * allocated by the firmware. 3078 * allocated by the firmware.
2315 */ 3079 */
2316 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 3080 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2317 != NULL) { 3081 != NULL) {
2318 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, 3082 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
2319 (uint8_t *) &vport->fc_sparam, mbox, 0); 3083 (uint8_t *) &vport->fc_sparam, mbox, 0);
2320 if (rc) { 3084 if (rc) {
2321 mempool_free(mbox, phba->mbox_mem_pool); 3085 mempool_free(mbox, phba->mbox_mem_pool);
@@ -2553,7 +3317,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2553 * clear_la then don't send it. 3317 * clear_la then don't send it.
2554 */ 3318 */
2555 if ((phba->link_state >= LPFC_CLEAR_LA) || 3319 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2556 (vport->port_type != LPFC_PHYSICAL_PORT)) 3320 (vport->port_type != LPFC_PHYSICAL_PORT) ||
3321 (phba->sli_rev == LPFC_SLI_REV4))
2557 return; 3322 return;
2558 3323
2559 /* Link up discovery */ 3324 /* Link up discovery */
@@ -2582,7 +3347,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2582 3347
2583 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3348 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2584 if (regvpimbox) { 3349 if (regvpimbox) {
2585 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); 3350 lpfc_reg_vpi(vport, regvpimbox);
2586 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 3351 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2587 regvpimbox->vport = vport; 3352 regvpimbox->vport = vport;
2588 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 3353 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
@@ -2642,7 +3407,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
2642 */ 3407 */
2643 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3408 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2644 !(vport->fc_flag & FC_PT2PT) && 3409 !(vport->fc_flag & FC_PT2PT) &&
2645 !(vport->fc_flag & FC_RSCN_MODE)) { 3410 !(vport->fc_flag & FC_RSCN_MODE) &&
3411 (phba->sli_rev < LPFC_SLI_REV4)) {
2646 lpfc_issue_reg_vpi(phba, vport); 3412 lpfc_issue_reg_vpi(phba, vport);
2647 return; 3413 return;
2648 } 3414 }
@@ -2919,11 +3685,13 @@ restart_disc:
2919 * set port_state to PORT_READY if SLI2. 3685 * set port_state to PORT_READY if SLI2.
2920 * cmpl_reg_vpi will set port_state to READY for SLI3. 3686 * cmpl_reg_vpi will set port_state to READY for SLI3.
2921 */ 3687 */
2922 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3688 if (phba->sli_rev < LPFC_SLI_REV4) {
2923 lpfc_issue_reg_vpi(phba, vport); 3689 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2924 else { /* NPIV Not enabled */ 3690 lpfc_issue_reg_vpi(phba, vport);
2925 lpfc_issue_clear_la(phba, vport); 3691 else { /* NPIV Not enabled */
2926 vport->port_state = LPFC_VPORT_READY; 3692 lpfc_issue_clear_la(phba, vport);
3693 vport->port_state = LPFC_VPORT_READY;
3694 }
2927 } 3695 }
2928 3696
2929 /* Setup and issue mailbox INITIALIZE LINK command */ 3697 /* Setup and issue mailbox INITIALIZE LINK command */
@@ -2939,7 +3707,7 @@ restart_disc:
2939 lpfc_linkdown(phba); 3707 lpfc_linkdown(phba);
2940 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 3708 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2941 phba->cfg_link_speed); 3709 phba->cfg_link_speed);
2942 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 3710 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
2943 initlinkmbox->vport = vport; 3711 initlinkmbox->vport = vport;
2944 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3712 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2945 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 3713 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
@@ -2959,11 +3727,13 @@ restart_disc:
2959 * set port_state to PORT_READY if SLI2. 3727 * set port_state to PORT_READY if SLI2.
2960 * cmpl_reg_vpi will set port_state to READY for SLI3. 3728 * cmpl_reg_vpi will set port_state to READY for SLI3.
2961 */ 3729 */
2962 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3730 if (phba->sli_rev < LPFC_SLI_REV4) {
2963 lpfc_issue_reg_vpi(phba, vport); 3731 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2964 else { /* NPIV Not enabled */ 3732 lpfc_issue_reg_vpi(phba, vport);
2965 lpfc_issue_clear_la(phba, vport); 3733 else { /* NPIV Not enabled */
2966 vport->port_state = LPFC_VPORT_READY; 3734 lpfc_issue_clear_la(phba, vport);
3735 vport->port_state = LPFC_VPORT_READY;
3736 }
2967 } 3737 }
2968 break; 3738 break;
2969 3739
@@ -3036,7 +3806,7 @@ restart_disc:
3036void 3806void
3037lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3807lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3038{ 3808{
3039 MAILBOX_t *mb = &pmb->mb; 3809 MAILBOX_t *mb = &pmb->u.mb;
3040 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3810 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3041 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3811 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3042 struct lpfc_vport *vport = pmb->vport; 3812 struct lpfc_vport *vport = pmb->vport;
@@ -3044,6 +3814,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3044 pmb->context1 = NULL; 3814 pmb->context1 = NULL;
3045 3815
3046 ndlp->nlp_rpi = mb->un.varWords[0]; 3816 ndlp->nlp_rpi = mb->un.varWords[0];
3817 ndlp->nlp_flag |= NLP_RPI_VALID;
3047 ndlp->nlp_type |= NLP_FABRIC; 3818 ndlp->nlp_type |= NLP_FABRIC;
3048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3819 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3049 3820
@@ -3297,3 +4068,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
3297 return 1; 4068 return 1;
3298 return 0; 4069 return 0;
3299} 4070}
4071
4072/**
4073 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4074 * @phba: Pointer to hba context object.
4075 *
4076 * This function iterate through all FC nodes associated
4077 * will all vports to check if there is any node with
4078 * fc_rports associated with it. If there is an fc_rport
4079 * associated with the node, then the node is either in
4080 * discovered state or its devloss_timer is pending.
4081 */
4082static int
4083lpfc_fcf_inuse(struct lpfc_hba *phba)
4084{
4085 struct lpfc_vport **vports;
4086 int i, ret = 0;
4087 struct lpfc_nodelist *ndlp;
4088 struct Scsi_Host *shost;
4089
4090 vports = lpfc_create_vport_work_array(phba);
4091
4092 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4093 shost = lpfc_shost_from_vport(vports[i]);
4094 spin_lock_irq(shost->host_lock);
4095 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4096 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
4097 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
4098 ret = 1;
4099 spin_unlock_irq(shost->host_lock);
4100 goto out;
4101 }
4102 }
4103 spin_unlock_irq(shost->host_lock);
4104 }
4105out:
4106 lpfc_destroy_vport_work_array(phba, vports);
4107 return ret;
4108}
4109
4110/**
4111 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4112 * @phba: Pointer to hba context object.
4113 * @mboxq: Pointer to mailbox object.
4114 *
4115 * This function frees memory associated with the mailbox command.
4116 */
4117static void
4118lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4119{
4120 struct lpfc_vport *vport = mboxq->vport;
4121
4122 if (mboxq->u.mb.mbxStatus) {
4123 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4124 "2555 UNREG_VFI mbxStatus error x%x "
4125 "HBA state x%x\n",
4126 mboxq->u.mb.mbxStatus, vport->port_state);
4127 }
4128 mempool_free(mboxq, phba->mbox_mem_pool);
4129 return;
4130}
4131
4132/**
4133 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4134 * @phba: Pointer to hba context object.
4135 * @mboxq: Pointer to mailbox object.
4136 *
4137 * This function frees memory associated with the mailbox command.
4138 */
4139static void
4140lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4141{
4142 struct lpfc_vport *vport = mboxq->vport;
4143
4144 if (mboxq->u.mb.mbxStatus) {
4145 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4146 "2550 UNREG_FCFI mbxStatus error x%x "
4147 "HBA state x%x\n",
4148 mboxq->u.mb.mbxStatus, vport->port_state);
4149 }
4150 mempool_free(mboxq, phba->mbox_mem_pool);
4151 return;
4152}
4153
4154/**
4155 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
4156 * @phba: Pointer to hba context object.
4157 *
4158 * This function check if there are any connected remote port for the FCF and
4159 * if all the devices are disconnected, this function unregister FCFI.
4160 * This function also tries to use another FCF for discovery.
4161 */
4162void
4163lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4164{
4165 LPFC_MBOXQ_t *mbox;
4166 int rc;
4167 struct lpfc_vport **vports;
4168 int i;
4169
4170 spin_lock_irq(&phba->hbalock);
4171 /*
4172 * If HBA is not running in FIP mode or
4173 * If HBA does not support FCoE or
4174 * If FCF is not registered.
4175 * do nothing.
4176 */
4177 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4178 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4179 (phba->cfg_enable_fip == 0)) {
4180 spin_unlock_irq(&phba->hbalock);
4181 return;
4182 }
4183 spin_unlock_irq(&phba->hbalock);
4184
4185 if (lpfc_fcf_inuse(phba))
4186 return;
4187
4188
4189 /* Unregister VPIs */
4190 vports = lpfc_create_vport_work_array(phba);
4191 if (vports &&
4192 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4193 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4194 lpfc_mbx_unreg_vpi(vports[i]);
4195 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4196 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
4197 }
4198 lpfc_destroy_vport_work_array(phba, vports);
4199
4200 /* Unregister VFI */
4201 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4202 if (!mbox) {
4203 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4204 "2556 UNREG_VFI mbox allocation failed"
4205 "HBA state x%x\n",
4206 phba->pport->port_state);
4207 return;
4208 }
4209
4210 lpfc_unreg_vfi(mbox, phba->pport->vfi);
4211 mbox->vport = phba->pport;
4212 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4213
4214 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4215 if (rc == MBX_NOT_FINISHED) {
4216 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4217 "2557 UNREG_VFI issue mbox failed rc x%x "
4218 "HBA state x%x\n",
4219 rc, phba->pport->port_state);
4220 mempool_free(mbox, phba->mbox_mem_pool);
4221 return;
4222 }
4223
4224 /* Unregister FCF */
4225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4226 if (!mbox) {
4227 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4228 "2551 UNREG_FCFI mbox allocation failed"
4229 "HBA state x%x\n",
4230 phba->pport->port_state);
4231 return;
4232 }
4233
4234 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
4235 mbox->vport = phba->pport;
4236 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
4237 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4238
4239 if (rc == MBX_NOT_FINISHED) {
4240 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4241 "2552 UNREG_FCFI issue mbox failed rc x%x "
4242 "HBA state x%x\n",
4243 rc, phba->pport->port_state);
4244 mempool_free(mbox, phba->mbox_mem_pool);
4245 return;
4246 }
4247
4248 spin_lock_irq(&phba->hbalock);
4249 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
4250 FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
4251 FCF_VALID_VLAN);
4252 spin_unlock_irq(&phba->hbalock);
4253
4254 /*
4255 * If driver is not unloading, check if there is any other
4256 * FCF record that can be used for discovery.
4257 */
4258 if ((phba->pport->load_flag & FC_UNLOADING) ||
4259 (phba->link_state < LPFC_LINK_UP))
4260 return;
4261
4262 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
4263
4264 if (rc)
4265 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4266 "2553 lpfc_unregister_unused_fcf failed to read FCF"
4267 " record HBA state x%x\n",
4268 phba->pport->port_state);
4269}
4270
4271/**
4272 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
4273 * @phba: Pointer to hba context object.
4274 * @buff: Buffer containing the FCF connection table as in the config
4275 * region.
4276 * This function create driver data structure for the FCF connection
4277 * record table read from config region 23.
4278 */
4279static void
4280lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4281 uint8_t *buff)
4282{
4283 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4284 struct lpfc_fcf_conn_hdr *conn_hdr;
4285 struct lpfc_fcf_conn_rec *conn_rec;
4286 uint32_t record_count;
4287 int i;
4288
4289 /* Free the current connect table */
4290 list_for_each_entry_safe(conn_entry, next_conn_entry,
4291 &phba->fcf_conn_rec_list, list)
4292 kfree(conn_entry);
4293
4294 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4295 record_count = conn_hdr->length * sizeof(uint32_t)/
4296 sizeof(struct lpfc_fcf_conn_rec);
4297
4298 conn_rec = (struct lpfc_fcf_conn_rec *)
4299 (buff + sizeof(struct lpfc_fcf_conn_hdr));
4300
4301 for (i = 0; i < record_count; i++) {
4302 if (!(conn_rec[i].flags & FCFCNCT_VALID))
4303 continue;
4304 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
4305 GFP_KERNEL);
4306 if (!conn_entry) {
4307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4308 "2566 Failed to allocate connection"
4309 " table entry\n");
4310 return;
4311 }
4312
4313 memcpy(&conn_entry->conn_rec, &conn_rec[i],
4314 sizeof(struct lpfc_fcf_conn_rec));
4315 conn_entry->conn_rec.vlan_tag =
4316 le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
4317 conn_entry->conn_rec.flags =
4318 le16_to_cpu(conn_entry->conn_rec.flags);
4319 list_add_tail(&conn_entry->list,
4320 &phba->fcf_conn_rec_list);
4321 }
4322}
4323
4324/**
4325 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
4326 * @phba: Pointer to hba context object.
4327 * @buff: Buffer containing the FCoE parameter data structure.
4328 *
4329 * This function update driver data structure with config
4330 * parameters read from config region 23.
4331 */
4332static void
4333lpfc_read_fcoe_param(struct lpfc_hba *phba,
4334 uint8_t *buff)
4335{
4336 struct lpfc_fip_param_hdr *fcoe_param_hdr;
4337 struct lpfc_fcoe_params *fcoe_param;
4338
4339 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
4340 buff;
4341 fcoe_param = (struct lpfc_fcoe_params *)
4342 buff + sizeof(struct lpfc_fip_param_hdr);
4343
4344 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
4345 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4346 return;
4347
4348 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4349 FIPP_MODE_ON)
4350 phba->cfg_enable_fip = 1;
4351
4352 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4353 FIPP_MODE_OFF)
4354 phba->cfg_enable_fip = 0;
4355
4356 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4357 phba->valid_vlan = 1;
4358 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
4359 0xFFF;
4360 }
4361
4362 phba->fc_map[0] = fcoe_param->fc_map[0];
4363 phba->fc_map[1] = fcoe_param->fc_map[1];
4364 phba->fc_map[2] = fcoe_param->fc_map[2];
4365 return;
4366}
4367
4368/**
4369 * lpfc_get_rec_conf23 - Get a record type in config region data.
4370 * @buff: Buffer containing config region 23 data.
4371 * @size: Size of the data buffer.
4372 * @rec_type: Record type to be searched.
4373 *
4374 * This function searches config region data to find the begining
4375 * of the record specified by record_type. If record found, this
4376 * function return pointer to the record else return NULL.
4377 */
4378static uint8_t *
4379lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
4380{
4381 uint32_t offset = 0, rec_length;
4382
4383 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
4384 (size < sizeof(uint32_t)))
4385 return NULL;
4386
4387 rec_length = buff[offset + 1];
4388
4389 /*
4390 * One TLV record has one word header and number of data words
4391 * specified in the rec_length field of the record header.
4392 */
4393 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
4394 <= size) {
4395 if (buff[offset] == rec_type)
4396 return &buff[offset];
4397
4398 if (buff[offset] == LPFC_REGION23_LAST_REC)
4399 return NULL;
4400
4401 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
4402 rec_length = buff[offset + 1];
4403 }
4404 return NULL;
4405}
4406
4407/**
4408 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
4409 * @phba: Pointer to lpfc_hba data structure.
4410 * @buff: Buffer containing config region 23 data.
4411 * @size: Size of the data buffer.
4412 *
4413 * This fuction parse the FCoE config parameters in config region 23 and
4414 * populate driver data structure with the parameters.
4415 */
4416void
4417lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
4418 uint8_t *buff,
4419 uint32_t size)
4420{
4421 uint32_t offset = 0, rec_length;
4422 uint8_t *rec_ptr;
4423
4424 /*
4425 * If data size is less than 2 words signature and version cannot be
4426 * verified.
4427 */
4428 if (size < 2*sizeof(uint32_t))
4429 return;
4430
4431 /* Check the region signature first */
4432 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
4433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4434 "2567 Config region 23 has bad signature\n");
4435 return;
4436 }
4437
4438 offset += 4;
4439
4440 /* Check the data structure version */
4441 if (buff[offset] != LPFC_REGION23_VERSION) {
4442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4443 "2568 Config region 23 has bad version\n");
4444 return;
4445 }
4446 offset += 4;
4447
4448 rec_length = buff[offset + 1];
4449
4450 /* Read FCoE param record */
4451 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4452 size - offset, FCOE_PARAM_TYPE);
4453 if (rec_ptr)
4454 lpfc_read_fcoe_param(phba, rec_ptr);
4455
4456 /* Read FCF connection table */
4457 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
4458 size - offset, FCOE_CONN_TBL_TYPE);
4459 if (rec_ptr)
4460 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
4461
4462}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 4168c7b498b..02aa016b93e 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -471,6 +471,35 @@ struct serv_parm { /* Structure is in Big Endian format */
471}; 471};
472 472
473/* 473/*
474 * Virtual Fabric Tagging Header
475 */
476struct fc_vft_header {
477 uint32_t word0;
478#define fc_vft_hdr_r_ctl_SHIFT 24
479#define fc_vft_hdr_r_ctl_MASK 0xFF
480#define fc_vft_hdr_r_ctl_WORD word0
481#define fc_vft_hdr_ver_SHIFT 22
482#define fc_vft_hdr_ver_MASK 0x3
483#define fc_vft_hdr_ver_WORD word0
484#define fc_vft_hdr_type_SHIFT 18
485#define fc_vft_hdr_type_MASK 0xF
486#define fc_vft_hdr_type_WORD word0
487#define fc_vft_hdr_e_SHIFT 16
488#define fc_vft_hdr_e_MASK 0x1
489#define fc_vft_hdr_e_WORD word0
490#define fc_vft_hdr_priority_SHIFT 13
491#define fc_vft_hdr_priority_MASK 0x7
492#define fc_vft_hdr_priority_WORD word0
493#define fc_vft_hdr_vf_id_SHIFT 1
494#define fc_vft_hdr_vf_id_MASK 0xFFF
495#define fc_vft_hdr_vf_id_WORD word0
496 uint32_t word1;
497#define fc_vft_hdr_hopct_SHIFT 24
498#define fc_vft_hdr_hopct_MASK 0xFF
499#define fc_vft_hdr_hopct_WORD word1
500};
501
502/*
474 * Extended Link Service LS_COMMAND codes (Payload Word 0) 503 * Extended Link Service LS_COMMAND codes (Payload Word 0)
475 */ 504 */
476#ifdef __BIG_ENDIAN_BITFIELD 505#ifdef __BIG_ENDIAN_BITFIELD
@@ -1152,6 +1181,9 @@ typedef struct {
1152#define PCI_DEVICE_ID_HORNET 0xfe05 1181#define PCI_DEVICE_ID_HORNET 0xfe05
1153#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 1182#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
1154#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1186#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705
1155 1187
1156#define JEDEC_ID_ADDRESS 0x0080001c 1188#define JEDEC_ID_ADDRESS 0x0080001c
1157#define FIREFLY_JEDEC_ID 0x1ACC 1189#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1342,15 +1374,21 @@ typedef struct { /* FireFly BIU registers */
1342#define MBX_READ_LA64 0x95 1374#define MBX_READ_LA64 0x95
1343#define MBX_REG_VPI 0x96 1375#define MBX_REG_VPI 0x96
1344#define MBX_UNREG_VPI 0x97 1376#define MBX_UNREG_VPI 0x97
1345#define MBX_REG_VNPID 0x96
1346#define MBX_UNREG_VNPID 0x97
1347 1377
1348#define MBX_WRITE_WWN 0x98 1378#define MBX_WRITE_WWN 0x98
1349#define MBX_SET_DEBUG 0x99 1379#define MBX_SET_DEBUG 0x99
1350#define MBX_LOAD_EXP_ROM 0x9C 1380#define MBX_LOAD_EXP_ROM 0x9C
1351 1381#define MBX_SLI4_CONFIG 0x9B
1352#define MBX_MAX_CMDS 0x9D 1382#define MBX_SLI4_REQ_FTRS 0x9D
1383#define MBX_MAX_CMDS 0x9E
1384#define MBX_RESUME_RPI 0x9E
1353#define MBX_SLI2_CMD_MASK 0x80 1385#define MBX_SLI2_CMD_MASK 0x80
1386#define MBX_REG_VFI 0x9F
1387#define MBX_REG_FCFI 0xA0
1388#define MBX_UNREG_VFI 0xA1
1389#define MBX_UNREG_FCFI 0xA2
1390#define MBX_INIT_VFI 0xA3
1391#define MBX_INIT_VPI 0xA4
1354 1392
1355/* IOCB Commands */ 1393/* IOCB Commands */
1356 1394
@@ -1440,6 +1478,16 @@ typedef struct { /* FireFly BIU registers */
1440#define CMD_IOCB_LOGENTRY_CN 0x94 1478#define CMD_IOCB_LOGENTRY_CN 0x94
1441#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 1479#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
1442 1480
1481/* Unhandled Data Security SLI Commands */
1482#define DSSCMD_IWRITE64_CR 0xD8
1483#define DSSCMD_IWRITE64_CX 0xD9
1484#define DSSCMD_IREAD64_CR 0xDA
1485#define DSSCMD_IREAD64_CX 0xDB
1486#define DSSCMD_INVALIDATE_DEK 0xDC
1487#define DSSCMD_SET_KEK 0xDD
1488#define DSSCMD_GET_KEK_ID 0xDE
1489#define DSSCMD_GEN_XFER 0xDF
1490
1443#define CMD_MAX_IOCB_CMD 0xE6 1491#define CMD_MAX_IOCB_CMD 0xE6
1444#define CMD_IOCB_MASK 0xff 1492#define CMD_IOCB_MASK 0xff
1445 1493
@@ -1466,6 +1514,7 @@ typedef struct { /* FireFly BIU registers */
1466#define MBXERR_BAD_RCV_LENGTH 14 1514#define MBXERR_BAD_RCV_LENGTH 14
1467#define MBXERR_DMA_ERROR 15 1515#define MBXERR_DMA_ERROR 15
1468#define MBXERR_ERROR 16 1516#define MBXERR_ERROR 16
1517#define MBXERR_LINK_DOWN 0x33
1469#define MBX_NOT_FINISHED 255 1518#define MBX_NOT_FINISHED 255
1470 1519
1471#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ 1520#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
@@ -1504,32 +1553,6 @@ struct ulp_bde {
1504#endif 1553#endif
1505}; 1554};
1506 1555
1507struct ulp_bde64 { /* SLI-2 */
1508 union ULP_BDE_TUS {
1509 uint32_t w;
1510 struct {
1511#ifdef __BIG_ENDIAN_BITFIELD
1512 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1513 VALUE !! */
1514 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1515#else /* __LITTLE_ENDIAN_BITFIELD */
1516 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
1517 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
1518 VALUE !! */
1519#endif
1520#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
1521#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
1522#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
1523#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
1524#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
1525#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
1526#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
1527 } f;
1528 } tus;
1529 uint32_t addrLow;
1530 uint32_t addrHigh;
1531};
1532
1533typedef struct ULP_BDL { /* SLI-2 */ 1556typedef struct ULP_BDL { /* SLI-2 */
1534#ifdef __BIG_ENDIAN_BITFIELD 1557#ifdef __BIG_ENDIAN_BITFIELD
1535 uint32_t bdeFlags:8; /* BDL Flags */ 1558 uint32_t bdeFlags:8; /* BDL Flags */
@@ -2287,7 +2310,7 @@ typedef struct {
2287 uint32_t rsvd3; 2310 uint32_t rsvd3;
2288 uint32_t rsvd4; 2311 uint32_t rsvd4;
2289 uint32_t rsvd5; 2312 uint32_t rsvd5;
2290 uint16_t rsvd6; 2313 uint16_t vfi;
2291 uint16_t vpi; 2314 uint16_t vpi;
2292#else /* __LITTLE_ENDIAN */ 2315#else /* __LITTLE_ENDIAN */
2293 uint32_t rsvd1; 2316 uint32_t rsvd1;
@@ -2297,7 +2320,7 @@ typedef struct {
2297 uint32_t rsvd4; 2320 uint32_t rsvd4;
2298 uint32_t rsvd5; 2321 uint32_t rsvd5;
2299 uint16_t vpi; 2322 uint16_t vpi;
2300 uint16_t rsvd6; 2323 uint16_t vfi;
2301#endif 2324#endif
2302} REG_VPI_VAR; 2325} REG_VPI_VAR;
2303 2326
@@ -2457,7 +2480,7 @@ typedef struct {
2457 uint32_t entry_index:16; 2480 uint32_t entry_index:16;
2458#endif 2481#endif
2459 2482
2460 uint32_t rsvd1; 2483 uint32_t sli4_length;
2461 uint32_t word_cnt; 2484 uint32_t word_cnt;
2462 uint32_t resp_offset; 2485 uint32_t resp_offset;
2463} DUMP_VAR; 2486} DUMP_VAR;
@@ -2470,9 +2493,32 @@ typedef struct {
2470#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2493#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2471#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2494#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2472 2495
2496#define DMP_REGION_VPORT 0x16 /* VPort info region */
2497#define DMP_VPORT_REGION_SIZE 0x200
2498#define DMP_MBOX_OFFSET_WORD 0x5
2499
2500#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */
2501#define DMP_FCOEPARAM_RGN_SIZE 0x400
2502
2473#define WAKE_UP_PARMS_REGION_ID 4 2503#define WAKE_UP_PARMS_REGION_ID 4
2474#define WAKE_UP_PARMS_WORD_SIZE 15 2504#define WAKE_UP_PARMS_WORD_SIZE 15
2475 2505
2506struct vport_rec {
2507 uint8_t wwpn[8];
2508 uint8_t wwnn[8];
2509};
2510
2511#define VPORT_INFO_SIG 0x32324752
2512#define VPORT_INFO_REV_MASK 0xff
2513#define VPORT_INFO_REV 0x1
2514#define MAX_STATIC_VPORT_COUNT 16
2515struct static_vport_info {
2516 uint32_t signature;
2517 uint32_t rev;
2518 struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
2519 uint32_t resvd[66];
2520};
2521
2476/* Option rom version structure */ 2522/* Option rom version structure */
2477struct prog_id { 2523struct prog_id {
2478#ifdef __BIG_ENDIAN_BITFIELD 2524#ifdef __BIG_ENDIAN_BITFIELD
@@ -2697,7 +2743,9 @@ typedef struct {
2697#endif 2743#endif
2698 2744
2699#ifdef __BIG_ENDIAN_BITFIELD 2745#ifdef __BIG_ENDIAN_BITFIELD
2700 uint32_t rsvd1 : 23; /* Reserved */ 2746 uint32_t rsvd1 : 19; /* Reserved */
2747 uint32_t cdss : 1; /* Configure Data Security SLI */
2748 uint32_t rsvd2 : 3; /* Reserved */
2701 uint32_t cbg : 1; /* Configure BlockGuard */ 2749 uint32_t cbg : 1; /* Configure BlockGuard */
2702 uint32_t cmv : 1; /* Configure Max VPIs */ 2750 uint32_t cmv : 1; /* Configure Max VPIs */
2703 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2751 uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2717,10 +2765,14 @@ typedef struct {
2717 uint32_t ccrp : 1; /* Config Command Ring Polling */ 2765 uint32_t ccrp : 1; /* Config Command Ring Polling */
2718 uint32_t cmv : 1; /* Configure Max VPIs */ 2766 uint32_t cmv : 1; /* Configure Max VPIs */
2719 uint32_t cbg : 1; /* Configure BlockGuard */ 2767 uint32_t cbg : 1; /* Configure BlockGuard */
2720 uint32_t rsvd1 : 23; /* Reserved */ 2768 uint32_t rsvd2 : 3; /* Reserved */
2769 uint32_t cdss : 1; /* Configure Data Security SLI */
2770 uint32_t rsvd1 : 19; /* Reserved */
2721#endif 2771#endif
2722#ifdef __BIG_ENDIAN_BITFIELD 2772#ifdef __BIG_ENDIAN_BITFIELD
2723 uint32_t rsvd2 : 23; /* Reserved */ 2773 uint32_t rsvd3 : 19; /* Reserved */
2774 uint32_t gdss : 1; /* Configure Data Security SLI */
2775 uint32_t rsvd4 : 3; /* Reserved */
2724 uint32_t gbg : 1; /* Grant BlockGuard */ 2776 uint32_t gbg : 1; /* Grant BlockGuard */
2725 uint32_t gmv : 1; /* Grant Max VPIs */ 2777 uint32_t gmv : 1; /* Grant Max VPIs */
2726 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2778 uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2740,7 +2792,9 @@ typedef struct {
2740 uint32_t gcrp : 1; /* Grant Command Ring Polling */ 2792 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2741 uint32_t gmv : 1; /* Grant Max VPIs */ 2793 uint32_t gmv : 1; /* Grant Max VPIs */
2742 uint32_t gbg : 1; /* Grant BlockGuard */ 2794 uint32_t gbg : 1; /* Grant BlockGuard */
2743 uint32_t rsvd2 : 23; /* Reserved */ 2795 uint32_t rsvd4 : 3; /* Reserved */
2796 uint32_t gdss : 1; /* Configure Data Security SLI */
2797 uint32_t rsvd3 : 19; /* Reserved */
2744#endif 2798#endif
2745 2799
2746#ifdef __BIG_ENDIAN_BITFIELD 2800#ifdef __BIG_ENDIAN_BITFIELD
@@ -2753,20 +2807,20 @@ typedef struct {
2753 2807
2754#ifdef __BIG_ENDIAN_BITFIELD 2808#ifdef __BIG_ENDIAN_BITFIELD
2755 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2809 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2756 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2810 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2757#else /* __LITTLE_ENDIAN */ 2811#else /* __LITTLE_ENDIAN */
2758 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ 2812 uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */
2759 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ 2813 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2760#endif 2814#endif
2761 2815
2762 uint32_t rsvd4; /* Reserved */ 2816 uint32_t rsvd6; /* Reserved */
2763 2817
2764#ifdef __BIG_ENDIAN_BITFIELD 2818#ifdef __BIG_ENDIAN_BITFIELD
2765 uint32_t rsvd5 : 16; /* Reserved */ 2819 uint32_t rsvd7 : 16; /* Reserved */
2766 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2820 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2767#else /* __LITTLE_ENDIAN */ 2821#else /* __LITTLE_ENDIAN */
2768 uint32_t max_vpi : 16; /* Max number of virt N-Ports */ 2822 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2769 uint32_t rsvd5 : 16; /* Reserved */ 2823 uint32_t rsvd7 : 16; /* Reserved */
2770#endif 2824#endif
2771 2825
2772} CONFIG_PORT_VAR; 2826} CONFIG_PORT_VAR;
@@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
3666#define MENLO_TIMEOUT 30 3720#define MENLO_TIMEOUT 30
3667#define SETVAR_MLOMNT 0x103107 3721#define SETVAR_MLOMNT 0x103107
3668#define SETVAR_MLORST 0x103007 3722#define SETVAR_MLORST 0x103007
3723
3724#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 00000000000..39c34b3ad29
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,2141 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21/* Macros to deal with bit fields. Each bit field must have 3 #defines
22 * associated with it (_SHIFT, _MASK, and _WORD).
23 * EG. For a bit field that is in the 7th bit of the "field4" field of a
24 * structure and is 2 bits in size the following #defines must exist:
25 * struct temp {
26 * uint32_t field1;
27 * uint32_t field2;
28 * uint32_t field3;
29 * uint32_t field4;
30 * #define example_bit_field_SHIFT 7
31 * #define example_bit_field_MASK 0x03
32 * #define example_bit_field_WORD field4
33 * uint32_t field5;
34 * };
35 * Then the macros below may be used to get or set the value of that field.
36 * EG. To get the value of the bit field from the above example:
37 * struct temp t1;
38 * value = bf_get(example_bit_field, &t1);
39 * And then to set that bit field:
40 * bf_set(example_bit_field, &t1, 2);
41 * Or clear that bit field:
42 * bf_set(example_bit_field, &t1, 0);
43 */
44#define bf_get(name, ptr) \
45 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
46#define bf_set(name, ptr, value) \
47 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
48 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
49
50struct dma_address {
51 uint32_t addr_lo;
52 uint32_t addr_hi;
53};
54
55#define LPFC_SLI4_BAR0 1
56#define LPFC_SLI4_BAR1 2
57#define LPFC_SLI4_BAR2 4
58
59#define LPFC_SLI4_MBX_EMBED true
60#define LPFC_SLI4_MBX_NEMBED false
61
62#define LPFC_SLI4_MB_WORD_COUNT 64
63#define LPFC_MAX_MQ_PAGE 8
64#define LPFC_MAX_WQ_PAGE 8
65#define LPFC_MAX_CQ_PAGE 4
66#define LPFC_MAX_EQ_PAGE 8
67
68#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */
69#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */
70#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */
71
72/* Define SLI4 Alignment requirements. */
73#define LPFC_ALIGN_16_BYTE 16
74#define LPFC_ALIGN_64_BYTE 64
75
76/* Define SLI4 specific definitions. */
77#define LPFC_MQ_CQE_BYTE_OFFSET 256
78#define LPFC_MBX_CMD_HDR_LENGTH 16
79#define LPFC_MBX_ERROR_RANGE 0x4000
80#define LPFC_BMBX_BIT1_ADDR_HI 0x2
81#define LPFC_BMBX_BIT1_ADDR_LO 0
82#define LPFC_RPI_HDR_COUNT 64
83#define LPFC_HDR_TEMPLATE_SIZE 4096
84#define LPFC_RPI_ALLOC_ERROR 0xFFFF
85#define LPFC_FCF_RECORD_WD_CNT 132
86#define LPFC_ENTIRE_FCF_DATABASE 0
87#define LPFC_DFLT_FCF_INDEX 0
88
89/* Virtual function numbers */
90#define LPFC_VF0 0
91#define LPFC_VF1 1
92#define LPFC_VF2 2
93#define LPFC_VF3 3
94#define LPFC_VF4 4
95#define LPFC_VF5 5
96#define LPFC_VF6 6
97#define LPFC_VF7 7
98#define LPFC_VF8 8
99#define LPFC_VF9 9
100#define LPFC_VF10 10
101#define LPFC_VF11 11
102#define LPFC_VF12 12
103#define LPFC_VF13 13
104#define LPFC_VF14 14
105#define LPFC_VF15 15
106#define LPFC_VF16 16
107#define LPFC_VF17 17
108#define LPFC_VF18 18
109#define LPFC_VF19 19
110#define LPFC_VF20 20
111#define LPFC_VF21 21
112#define LPFC_VF22 22
113#define LPFC_VF23 23
114#define LPFC_VF24 24
115#define LPFC_VF25 25
116#define LPFC_VF26 26
117#define LPFC_VF27 27
118#define LPFC_VF28 28
119#define LPFC_VF29 29
120#define LPFC_VF30 30
121#define LPFC_VF31 31
122
123/* PCI function numbers */
124#define LPFC_PCI_FUNC0 0
125#define LPFC_PCI_FUNC1 1
126#define LPFC_PCI_FUNC2 2
127#define LPFC_PCI_FUNC3 3
128#define LPFC_PCI_FUNC4 4
129
130/* Active interrupt test count */
131#define LPFC_ACT_INTR_CNT 4
132
133/* Delay Multiplier constant */
134#define LPFC_DMULT_CONST 651042
135#define LPFC_MIM_IMAX 636
136#define LPFC_FP_DEF_IMAX 10000
137#define LPFC_SP_DEF_IMAX 10000
138
139struct ulp_bde64 {
140 union ULP_BDE_TUS {
141 uint32_t w;
142 struct {
143#ifdef __BIG_ENDIAN_BITFIELD
144 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
145 VALUE !! */
146 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
147#else /* __LITTLE_ENDIAN_BITFIELD */
148 uint32_t bdeSize:24; /* Size of buffer (in bytes) */
149 uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
150 VALUE !! */
151#endif
152#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
153#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
154#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
155#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
156#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
157#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
158#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
159 } f;
160 } tus;
161 uint32_t addrLow;
162 uint32_t addrHigh;
163};
164
165struct lpfc_sli4_flags {
166 uint32_t word0;
167#define lpfc_fip_flag_SHIFT 0
168#define lpfc_fip_flag_MASK 0x00000001
169#define lpfc_fip_flag_WORD word0
170};
171
172/* event queue entry structure */
173struct lpfc_eqe {
174 uint32_t word0;
175#define lpfc_eqe_resource_id_SHIFT 16
176#define lpfc_eqe_resource_id_MASK 0x000000FF
177#define lpfc_eqe_resource_id_WORD word0
178#define lpfc_eqe_minor_code_SHIFT 4
179#define lpfc_eqe_minor_code_MASK 0x00000FFF
180#define lpfc_eqe_minor_code_WORD word0
181#define lpfc_eqe_major_code_SHIFT 1
182#define lpfc_eqe_major_code_MASK 0x00000007
183#define lpfc_eqe_major_code_WORD word0
184#define lpfc_eqe_valid_SHIFT 0
185#define lpfc_eqe_valid_MASK 0x00000001
186#define lpfc_eqe_valid_WORD word0
187};
188
189/* completion queue entry structure (common fields for all cqe types) */
190struct lpfc_cqe {
191 uint32_t reserved0;
192 uint32_t reserved1;
193 uint32_t reserved2;
194 uint32_t word3;
195#define lpfc_cqe_valid_SHIFT 31
196#define lpfc_cqe_valid_MASK 0x00000001
197#define lpfc_cqe_valid_WORD word3
198#define lpfc_cqe_code_SHIFT 16
199#define lpfc_cqe_code_MASK 0x000000FF
200#define lpfc_cqe_code_WORD word3
201};
202
203/* Completion Queue Entry Status Codes */
204#define CQE_STATUS_SUCCESS 0x0
205#define CQE_STATUS_FCP_RSP_FAILURE 0x1
206#define CQE_STATUS_REMOTE_STOP 0x2
207#define CQE_STATUS_LOCAL_REJECT 0x3
208#define CQE_STATUS_NPORT_RJT 0x4
209#define CQE_STATUS_FABRIC_RJT 0x5
210#define CQE_STATUS_NPORT_BSY 0x6
211#define CQE_STATUS_FABRIC_BSY 0x7
212#define CQE_STATUS_INTERMED_RSP 0x8
213#define CQE_STATUS_LS_RJT 0x9
214#define CQE_STATUS_CMD_REJECT 0xb
215#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
216#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
217
218/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
219#define CQE_HW_STATUS_NO_ERR 0x0
220#define CQE_HW_STATUS_UNDERRUN 0x1
221#define CQE_HW_STATUS_OVERRUN 0x2
222
223/* Completion Queue Entry Codes */
224#define CQE_CODE_COMPL_WQE 0x1
225#define CQE_CODE_RELEASE_WQE 0x2
226#define CQE_CODE_RECEIVE 0x4
227#define CQE_CODE_XRI_ABORTED 0x5
228
229/* completion queue entry for wqe completions */
230struct lpfc_wcqe_complete {
231 uint32_t word0;
232#define lpfc_wcqe_c_request_tag_SHIFT 16
233#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF
234#define lpfc_wcqe_c_request_tag_WORD word0
235#define lpfc_wcqe_c_status_SHIFT 8
236#define lpfc_wcqe_c_status_MASK 0x000000FF
237#define lpfc_wcqe_c_status_WORD word0
238#define lpfc_wcqe_c_hw_status_SHIFT 0
239#define lpfc_wcqe_c_hw_status_MASK 0x000000FF
240#define lpfc_wcqe_c_hw_status_WORD word0
241 uint32_t total_data_placed;
242 uint32_t parameter;
243 uint32_t word3;
244#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
245#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
246#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD
247#define lpfc_wcqe_c_xb_SHIFT 28
248#define lpfc_wcqe_c_xb_MASK 0x00000001
249#define lpfc_wcqe_c_xb_WORD word3
250#define lpfc_wcqe_c_pv_SHIFT 27
251#define lpfc_wcqe_c_pv_MASK 0x00000001
252#define lpfc_wcqe_c_pv_WORD word3
253#define lpfc_wcqe_c_priority_SHIFT 24
254#define lpfc_wcqe_c_priority_MASK 0x00000007
255#define lpfc_wcqe_c_priority_WORD word3
256#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
257#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
258#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
259};
260
261/* completion queue entry for wqe release */
262struct lpfc_wcqe_release {
263 uint32_t reserved0;
264 uint32_t reserved1;
265 uint32_t word2;
266#define lpfc_wcqe_r_wq_id_SHIFT 16
267#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF
268#define lpfc_wcqe_r_wq_id_WORD word2
269#define lpfc_wcqe_r_wqe_index_SHIFT 0
270#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF
271#define lpfc_wcqe_r_wqe_index_WORD word2
272 uint32_t word3;
273#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT
274#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK
275#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD
276#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT
277#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK
278#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD
279};
280
281struct sli4_wcqe_xri_aborted {
282 uint32_t word0;
283#define lpfc_wcqe_xa_status_SHIFT 8
284#define lpfc_wcqe_xa_status_MASK 0x000000FF
285#define lpfc_wcqe_xa_status_WORD word0
286 uint32_t parameter;
287 uint32_t word2;
288#define lpfc_wcqe_xa_remote_xid_SHIFT 16
289#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF
290#define lpfc_wcqe_xa_remote_xid_WORD word2
291#define lpfc_wcqe_xa_xri_SHIFT 0
292#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF
293#define lpfc_wcqe_xa_xri_WORD word2
294 uint32_t word3;
295#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT
296#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK
297#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD
298#define lpfc_wcqe_xa_ia_SHIFT 30
299#define lpfc_wcqe_xa_ia_MASK 0x00000001
300#define lpfc_wcqe_xa_ia_WORD word3
301#define CQE_XRI_ABORTED_IA_REMOTE 0
302#define CQE_XRI_ABORTED_IA_LOCAL 1
303#define lpfc_wcqe_xa_br_SHIFT 29
304#define lpfc_wcqe_xa_br_MASK 0x00000001
305#define lpfc_wcqe_xa_br_WORD word3
306#define CQE_XRI_ABORTED_BR_BA_ACC 0
307#define CQE_XRI_ABORTED_BR_BA_RJT 1
308#define lpfc_wcqe_xa_eo_SHIFT 28
309#define lpfc_wcqe_xa_eo_MASK 0x00000001
310#define lpfc_wcqe_xa_eo_WORD word3
311#define CQE_XRI_ABORTED_EO_REMOTE 0
312#define CQE_XRI_ABORTED_EO_LOCAL 1
313#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT
314#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK
315#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD
316};
317
318/* completion queue entry structure for rqe completion */
319struct lpfc_rcqe {
320 uint32_t word0;
321#define lpfc_rcqe_bindex_SHIFT 16
322#define lpfc_rcqe_bindex_MASK 0x0000FFF
323#define lpfc_rcqe_bindex_WORD word0
324#define lpfc_rcqe_status_SHIFT 8
325#define lpfc_rcqe_status_MASK 0x000000FF
326#define lpfc_rcqe_status_WORD word0
327#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */
328#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
329#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
330#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
331 uint32_t reserved1;
332 uint32_t word2;
333#define lpfc_rcqe_length_SHIFT 16
334#define lpfc_rcqe_length_MASK 0x0000FFFF
335#define lpfc_rcqe_length_WORD word2
336#define lpfc_rcqe_rq_id_SHIFT 6
337#define lpfc_rcqe_rq_id_MASK 0x000003FF
338#define lpfc_rcqe_rq_id_WORD word2
339#define lpfc_rcqe_fcf_id_SHIFT 0
340#define lpfc_rcqe_fcf_id_MASK 0x0000003F
341#define lpfc_rcqe_fcf_id_WORD word2
342 uint32_t word3;
343#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
344#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
345#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD
346#define lpfc_rcqe_port_SHIFT 30
347#define lpfc_rcqe_port_MASK 0x00000001
348#define lpfc_rcqe_port_WORD word3
349#define lpfc_rcqe_hdr_length_SHIFT 24
350#define lpfc_rcqe_hdr_length_MASK 0x0000001F
351#define lpfc_rcqe_hdr_length_WORD word3
352#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT
353#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK
354#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD
355#define lpfc_rcqe_eof_SHIFT 8
356#define lpfc_rcqe_eof_MASK 0x000000FF
357#define lpfc_rcqe_eof_WORD word3
358#define FCOE_EOFn 0x41
359#define FCOE_EOFt 0x42
360#define FCOE_EOFni 0x49
361#define FCOE_EOFa 0x50
362#define lpfc_rcqe_sof_SHIFT 0
363#define lpfc_rcqe_sof_MASK 0x000000FF
364#define lpfc_rcqe_sof_WORD word3
365#define FCOE_SOFi2 0x2d
366#define FCOE_SOFi3 0x2e
367#define FCOE_SOFn2 0x35
368#define FCOE_SOFn3 0x36
369};
370
371struct lpfc_wqe_generic{
372 struct ulp_bde64 bde;
373 uint32_t word3;
374 uint32_t word4;
375 uint32_t word5;
376 uint32_t word6;
377#define lpfc_wqe_gen_context_SHIFT 16
378#define lpfc_wqe_gen_context_MASK 0x0000FFFF
379#define lpfc_wqe_gen_context_WORD word6
380#define lpfc_wqe_gen_xri_SHIFT 0
381#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
382#define lpfc_wqe_gen_xri_WORD word6
383 uint32_t word7;
384#define lpfc_wqe_gen_lnk_SHIFT 23
385#define lpfc_wqe_gen_lnk_MASK 0x00000001
386#define lpfc_wqe_gen_lnk_WORD word7
387#define lpfc_wqe_gen_erp_SHIFT 22
388#define lpfc_wqe_gen_erp_MASK 0x00000001
389#define lpfc_wqe_gen_erp_WORD word7
390#define lpfc_wqe_gen_pu_SHIFT 20
391#define lpfc_wqe_gen_pu_MASK 0x00000003
392#define lpfc_wqe_gen_pu_WORD word7
393#define lpfc_wqe_gen_class_SHIFT 16
394#define lpfc_wqe_gen_class_MASK 0x00000007
395#define lpfc_wqe_gen_class_WORD word7
396#define lpfc_wqe_gen_command_SHIFT 8
397#define lpfc_wqe_gen_command_MASK 0x000000FF
398#define lpfc_wqe_gen_command_WORD word7
399#define lpfc_wqe_gen_status_SHIFT 4
400#define lpfc_wqe_gen_status_MASK 0x0000000F
401#define lpfc_wqe_gen_status_WORD word7
402#define lpfc_wqe_gen_ct_SHIFT 2
403#define lpfc_wqe_gen_ct_MASK 0x00000007
404#define lpfc_wqe_gen_ct_WORD word7
405 uint32_t abort_tag;
406 uint32_t word9;
407#define lpfc_wqe_gen_request_tag_SHIFT 0
408#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
409#define lpfc_wqe_gen_request_tag_WORD word9
410 uint32_t word10;
411#define lpfc_wqe_gen_ccp_SHIFT 24
412#define lpfc_wqe_gen_ccp_MASK 0x000000FF
413#define lpfc_wqe_gen_ccp_WORD word10
414#define lpfc_wqe_gen_ccpe_SHIFT 23
415#define lpfc_wqe_gen_ccpe_MASK 0x00000001
416#define lpfc_wqe_gen_ccpe_WORD word10
417#define lpfc_wqe_gen_pv_SHIFT 19
418#define lpfc_wqe_gen_pv_MASK 0x00000001
419#define lpfc_wqe_gen_pv_WORD word10
420#define lpfc_wqe_gen_pri_SHIFT 16
421#define lpfc_wqe_gen_pri_MASK 0x00000007
422#define lpfc_wqe_gen_pri_WORD word10
423 uint32_t word11;
424#define lpfc_wqe_gen_cq_id_SHIFT 16
425#define lpfc_wqe_gen_cq_id_MASK 0x000003FF
426#define lpfc_wqe_gen_cq_id_WORD word11
427#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff
428#define lpfc_wqe_gen_wqec_SHIFT 7
429#define lpfc_wqe_gen_wqec_MASK 0x00000001
430#define lpfc_wqe_gen_wqec_WORD word11
431#define lpfc_wqe_gen_cmd_type_SHIFT 0
432#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
433#define lpfc_wqe_gen_cmd_type_WORD word11
434 uint32_t payload[4];
435};
436
437struct lpfc_rqe {
438 uint32_t address_hi;
439 uint32_t address_lo;
440};
441
442/* buffer descriptors */
443struct lpfc_bde4 {
444 uint32_t addr_hi;
445 uint32_t addr_lo;
446 uint32_t word2;
447#define lpfc_bde4_last_SHIFT 31
448#define lpfc_bde4_last_MASK 0x00000001
449#define lpfc_bde4_last_WORD word2
450#define lpfc_bde4_sge_offset_SHIFT 0
451#define lpfc_bde4_sge_offset_MASK 0x000003FF
452#define lpfc_bde4_sge_offset_WORD word2
453 uint32_t word3;
454#define lpfc_bde4_length_SHIFT 0
455#define lpfc_bde4_length_MASK 0x000000FF
456#define lpfc_bde4_length_WORD word3
457};
458
459struct lpfc_register {
460 uint32_t word0;
461};
462
463#define LPFC_UERR_STATUS_HI 0x00A4
464#define LPFC_UERR_STATUS_LO 0x00A0
465#define LPFC_ONLINE0 0x00B0
466#define LPFC_ONLINE1 0x00B4
467#define LPFC_SCRATCHPAD 0x0058
468
469/* BAR0 Registers */
470#define LPFC_HST_STATE 0x00AC
471#define lpfc_hst_state_perr_SHIFT 31
472#define lpfc_hst_state_perr_MASK 0x1
473#define lpfc_hst_state_perr_WORD word0
474#define lpfc_hst_state_sfi_SHIFT 30
475#define lpfc_hst_state_sfi_MASK 0x1
476#define lpfc_hst_state_sfi_WORD word0
477#define lpfc_hst_state_nip_SHIFT 29
478#define lpfc_hst_state_nip_MASK 0x1
479#define lpfc_hst_state_nip_WORD word0
480#define lpfc_hst_state_ipc_SHIFT 28
481#define lpfc_hst_state_ipc_MASK 0x1
482#define lpfc_hst_state_ipc_WORD word0
483#define lpfc_hst_state_xrom_SHIFT 27
484#define lpfc_hst_state_xrom_MASK 0x1
485#define lpfc_hst_state_xrom_WORD word0
486#define lpfc_hst_state_dl_SHIFT 26
487#define lpfc_hst_state_dl_MASK 0x1
488#define lpfc_hst_state_dl_WORD word0
489#define lpfc_hst_state_port_status_SHIFT 0
490#define lpfc_hst_state_port_status_MASK 0xFFFF
491#define lpfc_hst_state_port_status_WORD word0
492
493#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000
494#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001
495#define LPFC_POST_STAGE_HOST_RDY 0x0002
496#define LPFC_POST_STAGE_BE_RESET 0x0003
497#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100
498#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101
499#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200
500#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201
501#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300
502#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301
503#define LPFC_POST_STAGE_DDR_TEST_START 0x0400
504#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401
505#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600
506#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601
507#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700
508#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701
509#define LPFC_POST_STAGE_ARMFW_START 0x0800
510#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900
511#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901
512#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00
513#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01
514#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00
515#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01
516#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02
517#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03
518#define LPFC_POST_STAGE_PARSE_XML 0x0B04
519#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05
520#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06
521#define LPFC_POST_STAGE_RC_DONE 0x0B07
522#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08
523#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00
524#define LPFC_POST_STAGE_ARMFW_READY 0xC000
525#define LPFC_POST_STAGE_ARMFW_UE 0xF000
526
527#define lpfc_scratchpad_slirev_SHIFT 4
528#define lpfc_scratchpad_slirev_MASK 0xF
529#define lpfc_scratchpad_slirev_WORD word0
530#define lpfc_scratchpad_chiptype_SHIFT 8
531#define lpfc_scratchpad_chiptype_MASK 0xFF
532#define lpfc_scratchpad_chiptype_WORD word0
533#define lpfc_scratchpad_featurelevel1_SHIFT 16
534#define lpfc_scratchpad_featurelevel1_MASK 0xFF
535#define lpfc_scratchpad_featurelevel1_WORD word0
536#define lpfc_scratchpad_featurelevel2_SHIFT 24
537#define lpfc_scratchpad_featurelevel2_MASK 0xFF
538#define lpfc_scratchpad_featurelevel2_WORD word0
539
540/* BAR1 Registers */
541#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
542#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
543
544#define LPFC_HST_ISR0 0x0C18
545#define LPFC_HST_ISR1 0x0C1C
546#define LPFC_HST_ISR2 0x0C20
547#define LPFC_HST_ISR3 0x0C24
548#define LPFC_HST_ISR4 0x0C28
549
550#define LPFC_HST_IMR0 0x0C48
551#define LPFC_HST_IMR1 0x0C4C
552#define LPFC_HST_IMR2 0x0C50
553#define LPFC_HST_IMR3 0x0C54
554#define LPFC_HST_IMR4 0x0C58
555
556#define LPFC_HST_ISCR0 0x0C78
557#define LPFC_HST_ISCR1 0x0C7C
558#define LPFC_HST_ISCR2 0x0C80
559#define LPFC_HST_ISCR3 0x0C84
560#define LPFC_HST_ISCR4 0x0C88
561
562#define LPFC_SLI4_INTR0 BIT0
563#define LPFC_SLI4_INTR1 BIT1
564#define LPFC_SLI4_INTR2 BIT2
565#define LPFC_SLI4_INTR3 BIT3
566#define LPFC_SLI4_INTR4 BIT4
567#define LPFC_SLI4_INTR5 BIT5
568#define LPFC_SLI4_INTR6 BIT6
569#define LPFC_SLI4_INTR7 BIT7
570#define LPFC_SLI4_INTR8 BIT8
571#define LPFC_SLI4_INTR9 BIT9
572#define LPFC_SLI4_INTR10 BIT10
573#define LPFC_SLI4_INTR11 BIT11
574#define LPFC_SLI4_INTR12 BIT12
575#define LPFC_SLI4_INTR13 BIT13
576#define LPFC_SLI4_INTR14 BIT14
577#define LPFC_SLI4_INTR15 BIT15
578#define LPFC_SLI4_INTR16 BIT16
579#define LPFC_SLI4_INTR17 BIT17
580#define LPFC_SLI4_INTR18 BIT18
581#define LPFC_SLI4_INTR19 BIT19
582#define LPFC_SLI4_INTR20 BIT20
583#define LPFC_SLI4_INTR21 BIT21
584#define LPFC_SLI4_INTR22 BIT22
585#define LPFC_SLI4_INTR23 BIT23
586#define LPFC_SLI4_INTR24 BIT24
587#define LPFC_SLI4_INTR25 BIT25
588#define LPFC_SLI4_INTR26 BIT26
589#define LPFC_SLI4_INTR27 BIT27
590#define LPFC_SLI4_INTR28 BIT28
591#define LPFC_SLI4_INTR29 BIT29
592#define LPFC_SLI4_INTR30 BIT30
593#define LPFC_SLI4_INTR31 BIT31
594
595/* BAR2 Registers */
596#define LPFC_RQ_DOORBELL 0x00A0
597#define lpfc_rq_doorbell_num_posted_SHIFT 16
598#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
599#define lpfc_rq_doorbell_num_posted_WORD word0
600#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */
601#define lpfc_rq_doorbell_id_SHIFT 0
602#define lpfc_rq_doorbell_id_MASK 0x03FF
603#define lpfc_rq_doorbell_id_WORD word0
604
605#define LPFC_WQ_DOORBELL 0x0040
606#define lpfc_wq_doorbell_num_posted_SHIFT 24
607#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
608#define lpfc_wq_doorbell_num_posted_WORD word0
609#define lpfc_wq_doorbell_index_SHIFT 16
610#define lpfc_wq_doorbell_index_MASK 0x00FF
611#define lpfc_wq_doorbell_index_WORD word0
612#define lpfc_wq_doorbell_id_SHIFT 0
613#define lpfc_wq_doorbell_id_MASK 0xFFFF
614#define lpfc_wq_doorbell_id_WORD word0
615
616#define LPFC_EQCQ_DOORBELL 0x0120
617#define lpfc_eqcq_doorbell_arm_SHIFT 29
618#define lpfc_eqcq_doorbell_arm_MASK 0x0001
619#define lpfc_eqcq_doorbell_arm_WORD word0
620#define lpfc_eqcq_doorbell_num_released_SHIFT 16
621#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF
622#define lpfc_eqcq_doorbell_num_released_WORD word0
623#define lpfc_eqcq_doorbell_qt_SHIFT 10
624#define lpfc_eqcq_doorbell_qt_MASK 0x0001
625#define lpfc_eqcq_doorbell_qt_WORD word0
626#define LPFC_QUEUE_TYPE_COMPLETION 0
627#define LPFC_QUEUE_TYPE_EVENT 1
628#define lpfc_eqcq_doorbell_eqci_SHIFT 9
629#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
630#define lpfc_eqcq_doorbell_eqci_WORD word0
631#define lpfc_eqcq_doorbell_cqid_SHIFT 0
632#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF
633#define lpfc_eqcq_doorbell_cqid_WORD word0
634#define lpfc_eqcq_doorbell_eqid_SHIFT 0
635#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF
636#define lpfc_eqcq_doorbell_eqid_WORD word0
637
638#define LPFC_BMBX 0x0160
639#define lpfc_bmbx_addr_SHIFT 2
640#define lpfc_bmbx_addr_MASK 0x3FFFFFFF
641#define lpfc_bmbx_addr_WORD word0
642#define lpfc_bmbx_hi_SHIFT 1
643#define lpfc_bmbx_hi_MASK 0x0001
644#define lpfc_bmbx_hi_WORD word0
645#define lpfc_bmbx_rdy_SHIFT 0
646#define lpfc_bmbx_rdy_MASK 0x0001
647#define lpfc_bmbx_rdy_WORD word0
648
649#define LPFC_MQ_DOORBELL 0x0140
650#define lpfc_mq_doorbell_num_posted_SHIFT 16
651#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF
652#define lpfc_mq_doorbell_num_posted_WORD word0
653#define lpfc_mq_doorbell_id_SHIFT 0
654#define lpfc_mq_doorbell_id_MASK 0x03FF
655#define lpfc_mq_doorbell_id_WORD word0
656
657struct lpfc_sli4_cfg_mhdr {
658 uint32_t word1;
659#define lpfc_mbox_hdr_emb_SHIFT 0
660#define lpfc_mbox_hdr_emb_MASK 0x00000001
661#define lpfc_mbox_hdr_emb_WORD word1
662#define lpfc_mbox_hdr_sge_cnt_SHIFT 3
663#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F
664#define lpfc_mbox_hdr_sge_cnt_WORD word1
665 uint32_t payload_length;
666 uint32_t tag_lo;
667 uint32_t tag_hi;
668 uint32_t reserved5;
669};
670
671union lpfc_sli4_cfg_shdr {
672 struct {
673 uint32_t word6;
674#define lpfc_mbox_hdr_opcode_SHIFT 0
675#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
676#define lpfc_mbox_hdr_opcode_WORD word6
677#define lpfc_mbox_hdr_subsystem_SHIFT 8
678#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
679#define lpfc_mbox_hdr_subsystem_WORD word6
680#define lpfc_mbox_hdr_port_number_SHIFT 16
681#define lpfc_mbox_hdr_port_number_MASK 0x000000FF
682#define lpfc_mbox_hdr_port_number_WORD word6
683#define lpfc_mbox_hdr_domain_SHIFT 24
684#define lpfc_mbox_hdr_domain_MASK 0x000000FF
685#define lpfc_mbox_hdr_domain_WORD word6
686 uint32_t timeout;
687 uint32_t request_length;
688 uint32_t reserved9;
689 } request;
690 struct {
691 uint32_t word6;
692#define lpfc_mbox_hdr_opcode_SHIFT 0
693#define lpfc_mbox_hdr_opcode_MASK 0x000000FF
694#define lpfc_mbox_hdr_opcode_WORD word6
695#define lpfc_mbox_hdr_subsystem_SHIFT 8
696#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF
697#define lpfc_mbox_hdr_subsystem_WORD word6
698#define lpfc_mbox_hdr_domain_SHIFT 24
699#define lpfc_mbox_hdr_domain_MASK 0x000000FF
700#define lpfc_mbox_hdr_domain_WORD word6
701 uint32_t word7;
702#define lpfc_mbox_hdr_status_SHIFT 0
703#define lpfc_mbox_hdr_status_MASK 0x000000FF
704#define lpfc_mbox_hdr_status_WORD word7
705#define lpfc_mbox_hdr_add_status_SHIFT 8
706#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
707#define lpfc_mbox_hdr_add_status_WORD word7
708 uint32_t response_length;
709 uint32_t actual_response_length;
710 } response;
711};
712
713/* Mailbox structures */
714struct mbox_header {
715 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
716 union lpfc_sli4_cfg_shdr cfg_shdr;
717};
718
719/* Subsystem Definitions */
720#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
721#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
722
723/* Device Specific Definitions */
724
725/* The HOST ENDIAN defines are in Big Endian format. */
726#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF
727#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
728
729/* Common Opcodes */
730#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
731#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
732#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
733#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
734#define LPFC_MBOX_OPCODE_NOP 0x21
735#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
736#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
737#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
738#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
739
740/* FCoE Opcodes */
741#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
742#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02
743#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03
744#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04
745#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05
746#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06
747#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08
748#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
749#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
750#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
751
752/* Mailbox command structures */
753struct eq_context {
754 uint32_t word0;
755#define lpfc_eq_context_size_SHIFT 31
756#define lpfc_eq_context_size_MASK 0x00000001
757#define lpfc_eq_context_size_WORD word0
758#define LPFC_EQE_SIZE_4 0x0
759#define LPFC_EQE_SIZE_16 0x1
760#define lpfc_eq_context_valid_SHIFT 29
761#define lpfc_eq_context_valid_MASK 0x00000001
762#define lpfc_eq_context_valid_WORD word0
763 uint32_t word1;
764#define lpfc_eq_context_count_SHIFT 26
765#define lpfc_eq_context_count_MASK 0x00000003
766#define lpfc_eq_context_count_WORD word1
767#define LPFC_EQ_CNT_256 0x0
768#define LPFC_EQ_CNT_512 0x1
769#define LPFC_EQ_CNT_1024 0x2
770#define LPFC_EQ_CNT_2048 0x3
771#define LPFC_EQ_CNT_4096 0x4
772 uint32_t word2;
773#define lpfc_eq_context_delay_multi_SHIFT 13
774#define lpfc_eq_context_delay_multi_MASK 0x000003FF
775#define lpfc_eq_context_delay_multi_WORD word2
776 uint32_t reserved3;
777};
778
779struct sgl_page_pairs {
780 uint32_t sgl_pg0_addr_lo;
781 uint32_t sgl_pg0_addr_hi;
782 uint32_t sgl_pg1_addr_lo;
783 uint32_t sgl_pg1_addr_hi;
784};
785
786struct lpfc_mbx_post_sgl_pages {
787 struct mbox_header header;
788 uint32_t word0;
789#define lpfc_post_sgl_pages_xri_SHIFT 0
790#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF
791#define lpfc_post_sgl_pages_xri_WORD word0
792#define lpfc_post_sgl_pages_xricnt_SHIFT 16
793#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF
794#define lpfc_post_sgl_pages_xricnt_WORD word0
795 struct sgl_page_pairs sgl_pg_pairs[1];
796};
797
798/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
799struct lpfc_mbx_post_uembed_sgl_page1 {
800 union lpfc_sli4_cfg_shdr cfg_shdr;
801 uint32_t word0;
802 struct sgl_page_pairs sgl_pg_pairs;
803};
804
805struct lpfc_mbx_sge {
806 uint32_t pa_lo;
807 uint32_t pa_hi;
808 uint32_t length;
809};
810
811struct lpfc_mbx_nembed_cmd {
812 struct lpfc_sli4_cfg_mhdr cfg_mhdr;
813#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
814 struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
815};
816
817struct lpfc_mbx_nembed_sge_virt {
818 void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
819};
820
821struct lpfc_mbx_eq_create {
822 struct mbox_header header;
823 union {
824 struct {
825 uint32_t word0;
826#define lpfc_mbx_eq_create_num_pages_SHIFT 0
827#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF
828#define lpfc_mbx_eq_create_num_pages_WORD word0
829 struct eq_context context;
830 struct dma_address page[LPFC_MAX_EQ_PAGE];
831 } request;
832 struct {
833 uint32_t word0;
834#define lpfc_mbx_eq_create_q_id_SHIFT 0
835#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF
836#define lpfc_mbx_eq_create_q_id_WORD word0
837 } response;
838 } u;
839};
840
841struct lpfc_mbx_eq_destroy {
842 struct mbox_header header;
843 union {
844 struct {
845 uint32_t word0;
846#define lpfc_mbx_eq_destroy_q_id_SHIFT 0
847#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF
848#define lpfc_mbx_eq_destroy_q_id_WORD word0
849 } request;
850 struct {
851 uint32_t word0;
852 } response;
853 } u;
854};
855
856struct lpfc_mbx_nop {
857 struct mbox_header header;
858 uint32_t context[2];
859};
860
861struct cq_context {
862 uint32_t word0;
863#define lpfc_cq_context_event_SHIFT 31
864#define lpfc_cq_context_event_MASK 0x00000001
865#define lpfc_cq_context_event_WORD word0
866#define lpfc_cq_context_valid_SHIFT 29
867#define lpfc_cq_context_valid_MASK 0x00000001
868#define lpfc_cq_context_valid_WORD word0
869#define lpfc_cq_context_count_SHIFT 27
870#define lpfc_cq_context_count_MASK 0x00000003
871#define lpfc_cq_context_count_WORD word0
872#define LPFC_CQ_CNT_256 0x0
873#define LPFC_CQ_CNT_512 0x1
874#define LPFC_CQ_CNT_1024 0x2
875 uint32_t word1;
876#define lpfc_cq_eq_id_SHIFT 22
877#define lpfc_cq_eq_id_MASK 0x000000FF
878#define lpfc_cq_eq_id_WORD word1
879 uint32_t reserved0;
880 uint32_t reserved1;
881};
882
883struct lpfc_mbx_cq_create {
884 struct mbox_header header;
885 union {
886 struct {
887 uint32_t word0;
888#define lpfc_mbx_cq_create_num_pages_SHIFT 0
889#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF
890#define lpfc_mbx_cq_create_num_pages_WORD word0
891 struct cq_context context;
892 struct dma_address page[LPFC_MAX_CQ_PAGE];
893 } request;
894 struct {
895 uint32_t word0;
896#define lpfc_mbx_cq_create_q_id_SHIFT 0
897#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF
898#define lpfc_mbx_cq_create_q_id_WORD word0
899 } response;
900 } u;
901};
902
903struct lpfc_mbx_cq_destroy {
904 struct mbox_header header;
905 union {
906 struct {
907 uint32_t word0;
908#define lpfc_mbx_cq_destroy_q_id_SHIFT 0
909#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF
910#define lpfc_mbx_cq_destroy_q_id_WORD word0
911 } request;
912 struct {
913 uint32_t word0;
914 } response;
915 } u;
916};
917
918struct wq_context {
919 uint32_t reserved0;
920 uint32_t reserved1;
921 uint32_t reserved2;
922 uint32_t reserved3;
923};
924
925struct lpfc_mbx_wq_create {
926 struct mbox_header header;
927 union {
928 struct {
929 uint32_t word0;
930#define lpfc_mbx_wq_create_num_pages_SHIFT 0
931#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
932#define lpfc_mbx_wq_create_num_pages_WORD word0
933#define lpfc_mbx_wq_create_cq_id_SHIFT 16
934#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
935#define lpfc_mbx_wq_create_cq_id_WORD word0
936 struct dma_address page[LPFC_MAX_WQ_PAGE];
937 } request;
938 struct {
939 uint32_t word0;
940#define lpfc_mbx_wq_create_q_id_SHIFT 0
941#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
942#define lpfc_mbx_wq_create_q_id_WORD word0
943 } response;
944 } u;
945};
946
947struct lpfc_mbx_wq_destroy {
948 struct mbox_header header;
949 union {
950 struct {
951 uint32_t word0;
952#define lpfc_mbx_wq_destroy_q_id_SHIFT 0
953#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF
954#define lpfc_mbx_wq_destroy_q_id_WORD word0
955 } request;
956 struct {
957 uint32_t word0;
958 } response;
959 } u;
960};
961
962#define LPFC_HDR_BUF_SIZE 128
963#define LPFC_DATA_BUF_SIZE 4096
964struct rq_context {
965 uint32_t word0;
966#define lpfc_rq_context_rq_size_SHIFT 16
967#define lpfc_rq_context_rq_size_MASK 0x0000000F
968#define lpfc_rq_context_rq_size_WORD word0
969#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */
970#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
971#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
972#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
973 uint32_t reserved1;
974 uint32_t word2;
975#define lpfc_rq_context_cq_id_SHIFT 16
976#define lpfc_rq_context_cq_id_MASK 0x000003FF
977#define lpfc_rq_context_cq_id_WORD word2
978#define lpfc_rq_context_buf_size_SHIFT 0
979#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
980#define lpfc_rq_context_buf_size_WORD word2
981 uint32_t reserved3;
982};
983
984struct lpfc_mbx_rq_create {
985 struct mbox_header header;
986 union {
987 struct {
988 uint32_t word0;
989#define lpfc_mbx_rq_create_num_pages_SHIFT 0
990#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
991#define lpfc_mbx_rq_create_num_pages_WORD word0
992 struct rq_context context;
993 struct dma_address page[LPFC_MAX_WQ_PAGE];
994 } request;
995 struct {
996 uint32_t word0;
997#define lpfc_mbx_rq_create_q_id_SHIFT 0
998#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
999#define lpfc_mbx_rq_create_q_id_WORD word0
1000 } response;
1001 } u;
1002};
1003
1004struct lpfc_mbx_rq_destroy {
1005 struct mbox_header header;
1006 union {
1007 struct {
1008 uint32_t word0;
1009#define lpfc_mbx_rq_destroy_q_id_SHIFT 0
1010#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF
1011#define lpfc_mbx_rq_destroy_q_id_WORD word0
1012 } request;
1013 struct {
1014 uint32_t word0;
1015 } response;
1016 } u;
1017};
1018
1019struct mq_context {
1020 uint32_t word0;
1021#define lpfc_mq_context_cq_id_SHIFT 22
1022#define lpfc_mq_context_cq_id_MASK 0x000003FF
1023#define lpfc_mq_context_cq_id_WORD word0
1024#define lpfc_mq_context_count_SHIFT 16
1025#define lpfc_mq_context_count_MASK 0x0000000F
1026#define lpfc_mq_context_count_WORD word0
1027#define LPFC_MQ_CNT_16 0x5
1028#define LPFC_MQ_CNT_32 0x6
1029#define LPFC_MQ_CNT_64 0x7
1030#define LPFC_MQ_CNT_128 0x8
1031 uint32_t word1;
1032#define lpfc_mq_context_valid_SHIFT 31
1033#define lpfc_mq_context_valid_MASK 0x00000001
1034#define lpfc_mq_context_valid_WORD word1
1035 uint32_t reserved2;
1036 uint32_t reserved3;
1037};
1038
1039struct lpfc_mbx_mq_create {
1040 struct mbox_header header;
1041 union {
1042 struct {
1043 uint32_t word0;
1044#define lpfc_mbx_mq_create_num_pages_SHIFT 0
1045#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF
1046#define lpfc_mbx_mq_create_num_pages_WORD word0
1047 struct mq_context context;
1048 struct dma_address page[LPFC_MAX_MQ_PAGE];
1049 } request;
1050 struct {
1051 uint32_t word0;
1052#define lpfc_mbx_mq_create_q_id_SHIFT 0
1053#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
1054#define lpfc_mbx_mq_create_q_id_WORD word0
1055 } response;
1056 } u;
1057};
1058
1059struct lpfc_mbx_mq_destroy {
1060 struct mbox_header header;
1061 union {
1062 struct {
1063 uint32_t word0;
1064#define lpfc_mbx_mq_destroy_q_id_SHIFT 0
1065#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF
1066#define lpfc_mbx_mq_destroy_q_id_WORD word0
1067 } request;
1068 struct {
1069 uint32_t word0;
1070 } response;
1071 } u;
1072};
1073
1074struct lpfc_mbx_post_hdr_tmpl {
1075 struct mbox_header header;
1076 uint32_t word10;
1077#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0
1078#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF
1079#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10
1080#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16
1081#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF
1082#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10
1083 uint32_t rpi_paddr_lo;
1084 uint32_t rpi_paddr_hi;
1085};
1086
1087struct sli4_sge { /* SLI-4 */
1088 uint32_t addr_hi;
1089 uint32_t addr_lo;
1090
1091 uint32_t word2;
1092#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/
1093#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF
1094#define lpfc_sli4_sge_offset_WORD word2
1095#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets
1096 this flag !! */
1097#define lpfc_sli4_sge_last_MASK 0x00000001
1098#define lpfc_sli4_sge_last_WORD word2
1099 uint32_t word3;
1100#define lpfc_sli4_sge_len_SHIFT 0
1101#define lpfc_sli4_sge_len_MASK 0x0001FFFF
1102#define lpfc_sli4_sge_len_WORD word3
1103};
1104
1105struct fcf_record {
1106 uint32_t max_rcv_size;
1107 uint32_t fka_adv_period;
1108 uint32_t fip_priority;
1109 uint32_t word3;
1110#define lpfc_fcf_record_mac_0_SHIFT 0
1111#define lpfc_fcf_record_mac_0_MASK 0x000000FF
1112#define lpfc_fcf_record_mac_0_WORD word3
1113#define lpfc_fcf_record_mac_1_SHIFT 8
1114#define lpfc_fcf_record_mac_1_MASK 0x000000FF
1115#define lpfc_fcf_record_mac_1_WORD word3
1116#define lpfc_fcf_record_mac_2_SHIFT 16
1117#define lpfc_fcf_record_mac_2_MASK 0x000000FF
1118#define lpfc_fcf_record_mac_2_WORD word3
1119#define lpfc_fcf_record_mac_3_SHIFT 24
1120#define lpfc_fcf_record_mac_3_MASK 0x000000FF
1121#define lpfc_fcf_record_mac_3_WORD word3
1122 uint32_t word4;
1123#define lpfc_fcf_record_mac_4_SHIFT 0
1124#define lpfc_fcf_record_mac_4_MASK 0x000000FF
1125#define lpfc_fcf_record_mac_4_WORD word4
1126#define lpfc_fcf_record_mac_5_SHIFT 8
1127#define lpfc_fcf_record_mac_5_MASK 0x000000FF
1128#define lpfc_fcf_record_mac_5_WORD word4
1129#define lpfc_fcf_record_fcf_avail_SHIFT 16
1130#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
1131#define lpfc_fcf_record_fc_avail_WORD word4
1132#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
1133#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
1134#define lpfc_fcf_record_mac_addr_prov_WORD word4
1135#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */
1136#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */
1137 uint32_t word5;
1138#define lpfc_fcf_record_fab_name_0_SHIFT 0
1139#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF
1140#define lpfc_fcf_record_fab_name_0_WORD word5
1141#define lpfc_fcf_record_fab_name_1_SHIFT 8
1142#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF
1143#define lpfc_fcf_record_fab_name_1_WORD word5
1144#define lpfc_fcf_record_fab_name_2_SHIFT 16
1145#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF
1146#define lpfc_fcf_record_fab_name_2_WORD word5
1147#define lpfc_fcf_record_fab_name_3_SHIFT 24
1148#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF
1149#define lpfc_fcf_record_fab_name_3_WORD word5
1150 uint32_t word6;
1151#define lpfc_fcf_record_fab_name_4_SHIFT 0
1152#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF
1153#define lpfc_fcf_record_fab_name_4_WORD word6
1154#define lpfc_fcf_record_fab_name_5_SHIFT 8
1155#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF
1156#define lpfc_fcf_record_fab_name_5_WORD word6
1157#define lpfc_fcf_record_fab_name_6_SHIFT 16
1158#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF
1159#define lpfc_fcf_record_fab_name_6_WORD word6
1160#define lpfc_fcf_record_fab_name_7_SHIFT 24
1161#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF
1162#define lpfc_fcf_record_fab_name_7_WORD word6
1163 uint32_t word7;
1164#define lpfc_fcf_record_fc_map_0_SHIFT 0
1165#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF
1166#define lpfc_fcf_record_fc_map_0_WORD word7
1167#define lpfc_fcf_record_fc_map_1_SHIFT 8
1168#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF
1169#define lpfc_fcf_record_fc_map_1_WORD word7
1170#define lpfc_fcf_record_fc_map_2_SHIFT 16
1171#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF
1172#define lpfc_fcf_record_fc_map_2_WORD word7
1173#define lpfc_fcf_record_fcf_valid_SHIFT 24
1174#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF
1175#define lpfc_fcf_record_fcf_valid_WORD word7
1176 uint32_t word8;
1177#define lpfc_fcf_record_fcf_index_SHIFT 0
1178#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF
1179#define lpfc_fcf_record_fcf_index_WORD word8
1180#define lpfc_fcf_record_fcf_state_SHIFT 16
1181#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
1182#define lpfc_fcf_record_fcf_state_WORD word8
1183 uint8_t vlan_bitmap[512];
1184};
1185
1186struct lpfc_mbx_read_fcf_tbl {
1187 union lpfc_sli4_cfg_shdr cfg_shdr;
1188 union {
1189 struct {
1190 uint32_t word10;
1191#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0
1192#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF
1193#define lpfc_mbx_read_fcf_tbl_indx_WORD word10
1194 } request;
1195 struct {
1196 uint32_t eventag;
1197 } response;
1198 } u;
1199 uint32_t word11;
1200#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0
1201#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF
1202#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11
1203};
1204
1205struct lpfc_mbx_add_fcf_tbl_entry {
1206 union lpfc_sli4_cfg_shdr cfg_shdr;
1207 uint32_t word10;
1208#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0
1209#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF
1210#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10
1211 struct lpfc_mbx_sge fcf_sge;
1212};
1213
1214struct lpfc_mbx_del_fcf_tbl_entry {
1215 struct mbox_header header;
1216 uint32_t word10;
1217#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0
1218#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF
1219#define lpfc_mbx_del_fcf_tbl_count_WORD word10
1220#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16
1221#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF
1222#define lpfc_mbx_del_fcf_tbl_index_WORD word10
1223};
1224
1225/* Status field for embedded SLI_CONFIG mailbox command */
1226#define STATUS_SUCCESS 0x0
1227#define STATUS_FAILED 0x1
1228#define STATUS_ILLEGAL_REQUEST 0x2
1229#define STATUS_ILLEGAL_FIELD 0x3
1230#define STATUS_INSUFFICIENT_BUFFER 0x4
1231#define STATUS_UNAUTHORIZED_REQUEST 0x5
1232#define STATUS_FLASHROM_SAVE_FAILED 0x17
1233#define STATUS_FLASHROM_RESTORE_FAILED 0x18
1234#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a
1235#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b
1236#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c
1237#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d
1238#define STATUS_ASSERT_FAILED 0x1e
1239#define STATUS_INVALID_SESSION 0x1f
1240#define STATUS_INVALID_CONNECTION 0x20
1241#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21
1242#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24
1243#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25
1244#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26
1245#define STATUS_FLASHROM_READ_FAILED 0x27
1246#define STATUS_POLL_IOCTL_TIMEOUT 0x28
1247#define STATUS_ERROR_ACITMAIN 0x2a
1248#define STATUS_REBOOT_REQUIRED 0x2c
1249#define STATUS_FCF_IN_USE 0x3a
1250
1251struct lpfc_mbx_sli4_config {
1252 struct mbox_header header;
1253};
1254
1255struct lpfc_mbx_init_vfi {
1256 uint32_t word1;
1257#define lpfc_init_vfi_vr_SHIFT 31
1258#define lpfc_init_vfi_vr_MASK 0x00000001
1259#define lpfc_init_vfi_vr_WORD word1
1260#define lpfc_init_vfi_vt_SHIFT 30
1261#define lpfc_init_vfi_vt_MASK 0x00000001
1262#define lpfc_init_vfi_vt_WORD word1
1263#define lpfc_init_vfi_vf_SHIFT 29
1264#define lpfc_init_vfi_vf_MASK 0x00000001
1265#define lpfc_init_vfi_vf_WORD word1
1266#define lpfc_init_vfi_vfi_SHIFT 0
1267#define lpfc_init_vfi_vfi_MASK 0x0000FFFF
1268#define lpfc_init_vfi_vfi_WORD word1
1269 uint32_t word2;
1270#define lpfc_init_vfi_fcfi_SHIFT 0
1271#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF
1272#define lpfc_init_vfi_fcfi_WORD word2
1273 uint32_t word3;
1274#define lpfc_init_vfi_pri_SHIFT 13
1275#define lpfc_init_vfi_pri_MASK 0x00000007
1276#define lpfc_init_vfi_pri_WORD word3
1277#define lpfc_init_vfi_vf_id_SHIFT 1
1278#define lpfc_init_vfi_vf_id_MASK 0x00000FFF
1279#define lpfc_init_vfi_vf_id_WORD word3
1280 uint32_t word4;
1281#define lpfc_init_vfi_hop_count_SHIFT 24
1282#define lpfc_init_vfi_hop_count_MASK 0x000000FF
1283#define lpfc_init_vfi_hop_count_WORD word4
1284};
1285
1286struct lpfc_mbx_reg_vfi {
1287 uint32_t word1;
1288#define lpfc_reg_vfi_vp_SHIFT 28
1289#define lpfc_reg_vfi_vp_MASK 0x00000001
1290#define lpfc_reg_vfi_vp_WORD word1
1291#define lpfc_reg_vfi_vfi_SHIFT 0
1292#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF
1293#define lpfc_reg_vfi_vfi_WORD word1
1294 uint32_t word2;
1295#define lpfc_reg_vfi_vpi_SHIFT 16
1296#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF
1297#define lpfc_reg_vfi_vpi_WORD word2
1298#define lpfc_reg_vfi_fcfi_SHIFT 0
1299#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
1300#define lpfc_reg_vfi_fcfi_WORD word2
1301 uint32_t word3_rsvd;
1302 uint32_t word4_rsvd;
1303 struct ulp_bde64 bde;
1304 uint32_t word8_rsvd;
1305 uint32_t word9_rsvd;
1306 uint32_t word10;
1307#define lpfc_reg_vfi_nport_id_SHIFT 0
1308#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
1309#define lpfc_reg_vfi_nport_id_WORD word10
1310};
1311
1312struct lpfc_mbx_init_vpi {
1313 uint32_t word1;
1314#define lpfc_init_vpi_vfi_SHIFT 16
1315#define lpfc_init_vpi_vfi_MASK 0x0000FFFF
1316#define lpfc_init_vpi_vfi_WORD word1
1317#define lpfc_init_vpi_vpi_SHIFT 0
1318#define lpfc_init_vpi_vpi_MASK 0x0000FFFF
1319#define lpfc_init_vpi_vpi_WORD word1
1320};
1321
1322struct lpfc_mbx_read_vpi {
1323 uint32_t word1_rsvd;
1324 uint32_t word2;
1325#define lpfc_mbx_read_vpi_vnportid_SHIFT 0
1326#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF
1327#define lpfc_mbx_read_vpi_vnportid_WORD word2
1328 uint32_t word3_rsvd;
1329 uint32_t word4;
1330#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0
1331#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF
1332#define lpfc_mbx_read_vpi_acq_alpa_WORD word4
1333#define lpfc_mbx_read_vpi_pb_SHIFT 15
1334#define lpfc_mbx_read_vpi_pb_MASK 0x00000001
1335#define lpfc_mbx_read_vpi_pb_WORD word4
1336#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16
1337#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF
1338#define lpfc_mbx_read_vpi_spec_alpa_WORD word4
1339#define lpfc_mbx_read_vpi_ns_SHIFT 30
1340#define lpfc_mbx_read_vpi_ns_MASK 0x00000001
1341#define lpfc_mbx_read_vpi_ns_WORD word4
1342#define lpfc_mbx_read_vpi_hl_SHIFT 31
1343#define lpfc_mbx_read_vpi_hl_MASK 0x00000001
1344#define lpfc_mbx_read_vpi_hl_WORD word4
1345 uint32_t word5_rsvd;
1346 uint32_t word6;
1347#define lpfc_mbx_read_vpi_vpi_SHIFT 0
1348#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF
1349#define lpfc_mbx_read_vpi_vpi_WORD word6
1350 uint32_t word7;
1351#define lpfc_mbx_read_vpi_mac_0_SHIFT 0
1352#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF
1353#define lpfc_mbx_read_vpi_mac_0_WORD word7
1354#define lpfc_mbx_read_vpi_mac_1_SHIFT 8
1355#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF
1356#define lpfc_mbx_read_vpi_mac_1_WORD word7
1357#define lpfc_mbx_read_vpi_mac_2_SHIFT 16
1358#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF
1359#define lpfc_mbx_read_vpi_mac_2_WORD word7
1360#define lpfc_mbx_read_vpi_mac_3_SHIFT 24
1361#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF
1362#define lpfc_mbx_read_vpi_mac_3_WORD word7
1363 uint32_t word8;
1364#define lpfc_mbx_read_vpi_mac_4_SHIFT 0
1365#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF
1366#define lpfc_mbx_read_vpi_mac_4_WORD word8
1367#define lpfc_mbx_read_vpi_mac_5_SHIFT 8
1368#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF
1369#define lpfc_mbx_read_vpi_mac_5_WORD word8
1370#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16
1371#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF
1372#define lpfc_mbx_read_vpi_vlan_tag_WORD word8
1373#define lpfc_mbx_read_vpi_vv_SHIFT 28
1374#define lpfc_mbx_read_vpi_vv_MASK 0x0000001
1375#define lpfc_mbx_read_vpi_vv_WORD word8
1376};
1377
1378struct lpfc_mbx_unreg_vfi {
1379 uint32_t word1_rsvd;
1380 uint32_t word2;
1381#define lpfc_unreg_vfi_vfi_SHIFT 0
1382#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF
1383#define lpfc_unreg_vfi_vfi_WORD word2
1384};
1385
1386struct lpfc_mbx_resume_rpi {
1387 uint32_t word1;
1388#define lpfc_resume_rpi_rpi_SHIFT 0
1389#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF
1390#define lpfc_resume_rpi_rpi_WORD word1
1391 uint32_t event_tag;
1392 uint32_t word3_rsvd;
1393 uint32_t word4_rsvd;
1394 uint32_t word5_rsvd;
1395 uint32_t word6;
1396#define lpfc_resume_rpi_vpi_SHIFT 0
1397#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
1398#define lpfc_resume_rpi_vpi_WORD word6
1399#define lpfc_resume_rpi_vfi_SHIFT 16
1400#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
1401#define lpfc_resume_rpi_vfi_WORD word6
1402};
1403
1404#define REG_FCF_INVALID_QID 0xFFFF
1405struct lpfc_mbx_reg_fcfi {
1406 uint32_t word1;
1407#define lpfc_reg_fcfi_info_index_SHIFT 0
1408#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF
1409#define lpfc_reg_fcfi_info_index_WORD word1
1410#define lpfc_reg_fcfi_fcfi_SHIFT 16
1411#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF
1412#define lpfc_reg_fcfi_fcfi_WORD word1
1413 uint32_t word2;
1414#define lpfc_reg_fcfi_rq_id1_SHIFT 0
1415#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF
1416#define lpfc_reg_fcfi_rq_id1_WORD word2
1417#define lpfc_reg_fcfi_rq_id0_SHIFT 16
1418#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF
1419#define lpfc_reg_fcfi_rq_id0_WORD word2
1420 uint32_t word3;
1421#define lpfc_reg_fcfi_rq_id3_SHIFT 0
1422#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF
1423#define lpfc_reg_fcfi_rq_id3_WORD word3
1424#define lpfc_reg_fcfi_rq_id2_SHIFT 16
1425#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF
1426#define lpfc_reg_fcfi_rq_id2_WORD word3
1427 uint32_t word4;
1428#define lpfc_reg_fcfi_type_match0_SHIFT 24
1429#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF
1430#define lpfc_reg_fcfi_type_match0_WORD word4
1431#define lpfc_reg_fcfi_type_mask0_SHIFT 16
1432#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF
1433#define lpfc_reg_fcfi_type_mask0_WORD word4
1434#define lpfc_reg_fcfi_rctl_match0_SHIFT 8
1435#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF
1436#define lpfc_reg_fcfi_rctl_match0_WORD word4
1437#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0
1438#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF
1439#define lpfc_reg_fcfi_rctl_mask0_WORD word4
1440 uint32_t word5;
1441#define lpfc_reg_fcfi_type_match1_SHIFT 24
1442#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF
1443#define lpfc_reg_fcfi_type_match1_WORD word5
1444#define lpfc_reg_fcfi_type_mask1_SHIFT 16
1445#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF
1446#define lpfc_reg_fcfi_type_mask1_WORD word5
1447#define lpfc_reg_fcfi_rctl_match1_SHIFT 8
1448#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF
1449#define lpfc_reg_fcfi_rctl_match1_WORD word5
1450#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0
1451#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF
1452#define lpfc_reg_fcfi_rctl_mask1_WORD word5
1453 uint32_t word6;
1454#define lpfc_reg_fcfi_type_match2_SHIFT 24
1455#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF
1456#define lpfc_reg_fcfi_type_match2_WORD word6
1457#define lpfc_reg_fcfi_type_mask2_SHIFT 16
1458#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF
1459#define lpfc_reg_fcfi_type_mask2_WORD word6
1460#define lpfc_reg_fcfi_rctl_match2_SHIFT 8
1461#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF
1462#define lpfc_reg_fcfi_rctl_match2_WORD word6
1463#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0
1464#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF
1465#define lpfc_reg_fcfi_rctl_mask2_WORD word6
1466 uint32_t word7;
1467#define lpfc_reg_fcfi_type_match3_SHIFT 24
1468#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF
1469#define lpfc_reg_fcfi_type_match3_WORD word7
1470#define lpfc_reg_fcfi_type_mask3_SHIFT 16
1471#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF
1472#define lpfc_reg_fcfi_type_mask3_WORD word7
1473#define lpfc_reg_fcfi_rctl_match3_SHIFT 8
1474#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF
1475#define lpfc_reg_fcfi_rctl_match3_WORD word7
1476#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0
1477#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF
1478#define lpfc_reg_fcfi_rctl_mask3_WORD word7
1479 uint32_t word8;
1480#define lpfc_reg_fcfi_mam_SHIFT 13
1481#define lpfc_reg_fcfi_mam_MASK 0x00000003
1482#define lpfc_reg_fcfi_mam_WORD word8
1483#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */
1484#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */
1485#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */
1486#define lpfc_reg_fcfi_vv_SHIFT 12
1487#define lpfc_reg_fcfi_vv_MASK 0x00000001
1488#define lpfc_reg_fcfi_vv_WORD word8
1489#define lpfc_reg_fcfi_vlan_tag_SHIFT 0
1490#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF
1491#define lpfc_reg_fcfi_vlan_tag_WORD word8
1492};
1493
1494struct lpfc_mbx_unreg_fcfi {
1495 uint32_t word1_rsv;
1496 uint32_t word2;
1497#define lpfc_unreg_fcfi_SHIFT 0
1498#define lpfc_unreg_fcfi_MASK 0x0000FFFF
1499#define lpfc_unreg_fcfi_WORD word2
1500};
1501
1502struct lpfc_mbx_read_rev {
1503 uint32_t word1;
1504#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16
1505#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F
1506#define lpfc_mbx_rd_rev_sli_lvl_WORD word1
1507#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
1508#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
1509#define lpfc_mbx_rd_rev_fcoe_WORD word1
1510#define lpfc_mbx_rd_rev_vpd_SHIFT 29
1511#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
1512#define lpfc_mbx_rd_rev_vpd_WORD word1
1513 uint32_t first_hw_rev;
1514 uint32_t second_hw_rev;
1515 uint32_t word4_rsvd;
1516 uint32_t third_hw_rev;
1517 uint32_t word6;
1518#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0
1519#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF
1520#define lpfc_mbx_rd_rev_fcph_low_WORD word6
1521#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8
1522#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF
1523#define lpfc_mbx_rd_rev_fcph_high_WORD word6
1524#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16
1525#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF
1526#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6
1527#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24
1528#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF
1529#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6
1530 uint32_t word7_rsvd;
1531 uint32_t fw_id_rev;
1532 uint8_t fw_name[16];
1533 uint32_t ulp_fw_id_rev;
1534 uint8_t ulp_fw_name[16];
1535 uint32_t word18_47_rsvd[30];
1536 uint32_t word48;
1537#define lpfc_mbx_rd_rev_avail_len_SHIFT 0
1538#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF
1539#define lpfc_mbx_rd_rev_avail_len_WORD word48
1540 uint32_t vpd_paddr_low;
1541 uint32_t vpd_paddr_high;
1542 uint32_t avail_vpd_len;
1543 uint32_t rsvd_52_63[12];
1544};
1545
1546struct lpfc_mbx_read_config {
1547 uint32_t word1;
1548#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0
1549#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF
1550#define lpfc_mbx_rd_conf_max_bbc_WORD word1
1551#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8
1552#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF
1553#define lpfc_mbx_rd_conf_init_bbc_WORD word1
1554 uint32_t word2;
1555#define lpfc_mbx_rd_conf_nport_did_SHIFT 0
1556#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF
1557#define lpfc_mbx_rd_conf_nport_did_WORD word2
1558#define lpfc_mbx_rd_conf_topology_SHIFT 24
1559#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
1560#define lpfc_mbx_rd_conf_topology_WORD word2
1561 uint32_t word3;
1562#define lpfc_mbx_rd_conf_ao_SHIFT 0
1563#define lpfc_mbx_rd_conf_ao_MASK 0x00000001
1564#define lpfc_mbx_rd_conf_ao_WORD word3
1565#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8
1566#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F
1567#define lpfc_mbx_rd_conf_bb_scn_WORD word3
1568#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12
1569#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F
1570#define lpfc_mbx_rd_conf_cbb_scn_WORD word3
1571#define lpfc_mbx_rd_conf_mc_SHIFT 29
1572#define lpfc_mbx_rd_conf_mc_MASK 0x00000001
1573#define lpfc_mbx_rd_conf_mc_WORD word3
1574 uint32_t word4;
1575#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0
1576#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF
1577#define lpfc_mbx_rd_conf_e_d_tov_WORD word4
1578 uint32_t word5;
1579#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0
1580#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF
1581#define lpfc_mbx_rd_conf_lp_tov_WORD word5
1582 uint32_t word6;
1583#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0
1584#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF
1585#define lpfc_mbx_rd_conf_r_a_tov_WORD word6
1586 uint32_t word7;
1587#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0
1588#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF
1589#define lpfc_mbx_rd_conf_r_t_tov_WORD word7
1590 uint32_t word8;
1591#define lpfc_mbx_rd_conf_al_tov_SHIFT 0
1592#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F
1593#define lpfc_mbx_rd_conf_al_tov_WORD word8
1594 uint32_t word9;
1595#define lpfc_mbx_rd_conf_lmt_SHIFT 0
1596#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF
1597#define lpfc_mbx_rd_conf_lmt_WORD word9
1598 uint32_t word10;
1599#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0
1600#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF
1601#define lpfc_mbx_rd_conf_max_alpa_WORD word10
1602 uint32_t word11_rsvd;
1603 uint32_t word12;
1604#define lpfc_mbx_rd_conf_xri_base_SHIFT 0
1605#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF
1606#define lpfc_mbx_rd_conf_xri_base_WORD word12
1607#define lpfc_mbx_rd_conf_xri_count_SHIFT 16
1608#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF
1609#define lpfc_mbx_rd_conf_xri_count_WORD word12
1610 uint32_t word13;
1611#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0
1612#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF
1613#define lpfc_mbx_rd_conf_rpi_base_WORD word13
1614#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16
1615#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF
1616#define lpfc_mbx_rd_conf_rpi_count_WORD word13
1617 uint32_t word14;
1618#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0
1619#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF
1620#define lpfc_mbx_rd_conf_vpi_base_WORD word14
1621#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16
1622#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF
1623#define lpfc_mbx_rd_conf_vpi_count_WORD word14
1624 uint32_t word15;
1625#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0
1626#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF
1627#define lpfc_mbx_rd_conf_vfi_base_WORD word15
1628#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16
1629#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF
1630#define lpfc_mbx_rd_conf_vfi_count_WORD word15
1631 uint32_t word16;
1632#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0
1633#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF
1634#define lpfc_mbx_rd_conf_fcfi_base_WORD word16
1635#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16
1636#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF
1637#define lpfc_mbx_rd_conf_fcfi_count_WORD word16
1638 uint32_t word17;
1639#define lpfc_mbx_rd_conf_rq_count_SHIFT 0
1640#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF
1641#define lpfc_mbx_rd_conf_rq_count_WORD word17
1642#define lpfc_mbx_rd_conf_eq_count_SHIFT 16
1643#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF
1644#define lpfc_mbx_rd_conf_eq_count_WORD word17
1645 uint32_t word18;
1646#define lpfc_mbx_rd_conf_wq_count_SHIFT 0
1647#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF
1648#define lpfc_mbx_rd_conf_wq_count_WORD word18
1649#define lpfc_mbx_rd_conf_cq_count_SHIFT 16
1650#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF
1651#define lpfc_mbx_rd_conf_cq_count_WORD word18
1652};
1653
1654struct lpfc_mbx_request_features {
1655 uint32_t word1;
1656#define lpfc_mbx_rq_ftr_qry_SHIFT 0
1657#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001
1658#define lpfc_mbx_rq_ftr_qry_WORD word1
1659 uint32_t word2;
1660#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0
1661#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001
1662#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2
1663#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1
1664#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001
1665#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2
1666#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2
1667#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001
1668#define lpfc_mbx_rq_ftr_rq_dif_WORD word2
1669#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3
1670#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001
1671#define lpfc_mbx_rq_ftr_rq_vf_WORD word2
1672#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4
1673#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001
1674#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2
1675#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5
1676#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001
1677#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2
1678#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6
1679#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001
1680#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2
1681#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
1682#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
1683#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
1684 uint32_t word3;
1685#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
1686#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
1687#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3
1688#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1
1689#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001
1690#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3
1691#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2
1692#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001
1693#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3
1694#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3
1695#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001
1696#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3
1697#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4
1698#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001
1699#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3
1700#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5
1701#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001
1702#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3
1703#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6
1704#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001
1705#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3
1706#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
1707#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
1708#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
1709};
1710
1711/* Mailbox Completion Queue Error Messages */
1712#define MB_CQE_STATUS_SUCCESS 0x0
1713#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
1714#define MB_CQE_STATUS_INVALID_PARAMETER 0x2
1715#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3
1716#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
1717#define MB_CQE_STATUS_DMA_FAILED 0x5
1718
1719/* mailbox queue entry structure */
1720struct lpfc_mqe {
1721 uint32_t word0;
1722#define lpfc_mqe_status_SHIFT 16
1723#define lpfc_mqe_status_MASK 0x0000FFFF
1724#define lpfc_mqe_status_WORD word0
1725#define lpfc_mqe_command_SHIFT 8
1726#define lpfc_mqe_command_MASK 0x000000FF
1727#define lpfc_mqe_command_WORD word0
1728 union {
1729 uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
1730 /* sli4 mailbox commands */
1731 struct lpfc_mbx_sli4_config sli4_config;
1732 struct lpfc_mbx_init_vfi init_vfi;
1733 struct lpfc_mbx_reg_vfi reg_vfi;
1734 struct lpfc_mbx_reg_vfi unreg_vfi;
1735 struct lpfc_mbx_init_vpi init_vpi;
1736 struct lpfc_mbx_resume_rpi resume_rpi;
1737 struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
1738 struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
1739 struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
1740 struct lpfc_mbx_reg_fcfi reg_fcfi;
1741 struct lpfc_mbx_unreg_fcfi unreg_fcfi;
1742 struct lpfc_mbx_mq_create mq_create;
1743 struct lpfc_mbx_eq_create eq_create;
1744 struct lpfc_mbx_cq_create cq_create;
1745 struct lpfc_mbx_wq_create wq_create;
1746 struct lpfc_mbx_rq_create rq_create;
1747 struct lpfc_mbx_mq_destroy mq_destroy;
1748 struct lpfc_mbx_eq_destroy eq_destroy;
1749 struct lpfc_mbx_cq_destroy cq_destroy;
1750 struct lpfc_mbx_wq_destroy wq_destroy;
1751 struct lpfc_mbx_rq_destroy rq_destroy;
1752 struct lpfc_mbx_post_sgl_pages post_sgl_pages;
1753 struct lpfc_mbx_nembed_cmd nembed_cmd;
1754 struct lpfc_mbx_read_rev read_rev;
1755 struct lpfc_mbx_read_vpi read_vpi;
1756 struct lpfc_mbx_read_config rd_config;
1757 struct lpfc_mbx_request_features req_ftrs;
1758 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
1759 struct lpfc_mbx_nop nop;
1760 } un;
1761};
1762
1763struct lpfc_mcqe {
1764 uint32_t word0;
1765#define lpfc_mcqe_status_SHIFT 0
1766#define lpfc_mcqe_status_MASK 0x0000FFFF
1767#define lpfc_mcqe_status_WORD word0
1768#define lpfc_mcqe_ext_status_SHIFT 16
1769#define lpfc_mcqe_ext_status_MASK 0x0000FFFF
1770#define lpfc_mcqe_ext_status_WORD word0
1771 uint32_t mcqe_tag0;
1772 uint32_t mcqe_tag1;
1773 uint32_t trailer;
1774#define lpfc_trailer_valid_SHIFT 31
1775#define lpfc_trailer_valid_MASK 0x00000001
1776#define lpfc_trailer_valid_WORD trailer
1777#define lpfc_trailer_async_SHIFT 30
1778#define lpfc_trailer_async_MASK 0x00000001
1779#define lpfc_trailer_async_WORD trailer
1780#define lpfc_trailer_hpi_SHIFT 29
1781#define lpfc_trailer_hpi_MASK 0x00000001
1782#define lpfc_trailer_hpi_WORD trailer
1783#define lpfc_trailer_completed_SHIFT 28
1784#define lpfc_trailer_completed_MASK 0x00000001
1785#define lpfc_trailer_completed_WORD trailer
1786#define lpfc_trailer_consumed_SHIFT 27
1787#define lpfc_trailer_consumed_MASK 0x00000001
1788#define lpfc_trailer_consumed_WORD trailer
1789#define lpfc_trailer_type_SHIFT 16
1790#define lpfc_trailer_type_MASK 0x000000FF
1791#define lpfc_trailer_type_WORD trailer
1792#define lpfc_trailer_code_SHIFT 8
1793#define lpfc_trailer_code_MASK 0x000000FF
1794#define lpfc_trailer_code_WORD trailer
1795#define LPFC_TRAILER_CODE_LINK 0x1
1796#define LPFC_TRAILER_CODE_FCOE 0x2
1797#define LPFC_TRAILER_CODE_DCBX 0x3
1798};
1799
1800struct lpfc_acqe_link {
1801 uint32_t word0;
1802#define lpfc_acqe_link_speed_SHIFT 24
1803#define lpfc_acqe_link_speed_MASK 0x000000FF
1804#define lpfc_acqe_link_speed_WORD word0
1805#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0
1806#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1
1807#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2
1808#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3
1809#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4
1810#define lpfc_acqe_link_duplex_SHIFT 16
1811#define lpfc_acqe_link_duplex_MASK 0x000000FF
1812#define lpfc_acqe_link_duplex_WORD word0
1813#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0
1814#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1
1815#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2
1816#define lpfc_acqe_link_status_SHIFT 8
1817#define lpfc_acqe_link_status_MASK 0x000000FF
1818#define lpfc_acqe_link_status_WORD word0
1819#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0
1820#define LPFC_ASYNC_LINK_STATUS_UP 0x1
1821#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2
1822#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3
1823#define lpfc_acqe_link_physical_SHIFT 0
1824#define lpfc_acqe_link_physical_MASK 0x000000FF
1825#define lpfc_acqe_link_physical_WORD word0
1826#define LPFC_ASYNC_LINK_PORT_A 0x0
1827#define LPFC_ASYNC_LINK_PORT_B 0x1
1828 uint32_t word1;
1829#define lpfc_acqe_link_fault_SHIFT 0
1830#define lpfc_acqe_link_fault_MASK 0x000000FF
1831#define lpfc_acqe_link_fault_WORD word1
1832#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
1833#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
1834#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
1835 uint32_t event_tag;
1836 uint32_t trailer;
1837};
1838
1839struct lpfc_acqe_fcoe {
1840 uint32_t fcf_index;
1841 uint32_t word1;
1842#define lpfc_acqe_fcoe_fcf_count_SHIFT 0
1843#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF
1844#define lpfc_acqe_fcoe_fcf_count_WORD word1
1845#define lpfc_acqe_fcoe_event_type_SHIFT 16
1846#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF
1847#define lpfc_acqe_fcoe_event_type_WORD word1
1848#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
1849#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
1850#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
1851 uint32_t event_tag;
1852 uint32_t trailer;
1853};
1854
1855struct lpfc_acqe_dcbx {
1856 uint32_t tlv_ttl;
1857 uint32_t reserved;
1858 uint32_t event_tag;
1859 uint32_t trailer;
1860};
1861
1862/*
1863 * Define the bootstrap mailbox (bmbx) region used to communicate
1864 * mailbox command between the host and port. The mailbox consists
1865 * of a payload area of 256 bytes and a completion queue of length
1866 * 16 bytes.
1867 */
1868struct lpfc_bmbx_create {
1869 struct lpfc_mqe mqe;
1870 struct lpfc_mcqe mcqe;
1871};
1872
1873#define SGL_ALIGN_SZ 64
1874#define SGL_PAGE_SIZE 4096
1875/* align SGL addr on a size boundary - adjust address up */
1876#define NO_XRI ((uint16_t)-1)
1877struct wqe_common {
1878 uint32_t word6;
1879#define wqe_xri_SHIFT 0
1880#define wqe_xri_MASK 0x0000FFFF
1881#define wqe_xri_WORD word6
1882#define wqe_ctxt_tag_SHIFT 16
1883#define wqe_ctxt_tag_MASK 0x0000FFFF
1884#define wqe_ctxt_tag_WORD word6
1885 uint32_t word7;
1886#define wqe_ct_SHIFT 2
1887#define wqe_ct_MASK 0x00000003
1888#define wqe_ct_WORD word7
1889#define wqe_status_SHIFT 4
1890#define wqe_status_MASK 0x0000000f
1891#define wqe_status_WORD word7
1892#define wqe_cmnd_SHIFT 8
1893#define wqe_cmnd_MASK 0x000000ff
1894#define wqe_cmnd_WORD word7
1895#define wqe_class_SHIFT 16
1896#define wqe_class_MASK 0x00000007
1897#define wqe_class_WORD word7
1898#define wqe_pu_SHIFT 20
1899#define wqe_pu_MASK 0x00000003
1900#define wqe_pu_WORD word7
1901#define wqe_erp_SHIFT 22
1902#define wqe_erp_MASK 0x00000001
1903#define wqe_erp_WORD word7
1904#define wqe_lnk_SHIFT 23
1905#define wqe_lnk_MASK 0x00000001
1906#define wqe_lnk_WORD word7
1907#define wqe_tmo_SHIFT 24
1908#define wqe_tmo_MASK 0x000000ff
1909#define wqe_tmo_WORD word7
1910 uint32_t abort_tag; /* word 8 in WQE */
1911 uint32_t word9;
1912#define wqe_reqtag_SHIFT 0
1913#define wqe_reqtag_MASK 0x0000FFFF
1914#define wqe_reqtag_WORD word9
1915#define wqe_rcvoxid_SHIFT 16
1916#define wqe_rcvoxid_MASK 0x0000FFFF
1917#define wqe_rcvoxid_WORD word9
1918 uint32_t word10;
1919#define wqe_pri_SHIFT 16
1920#define wqe_pri_MASK 0x00000007
1921#define wqe_pri_WORD word10
1922#define wqe_pv_SHIFT 19
1923#define wqe_pv_MASK 0x00000001
1924#define wqe_pv_WORD word10
1925#define wqe_xc_SHIFT 21
1926#define wqe_xc_MASK 0x00000001
1927#define wqe_xc_WORD word10
1928#define wqe_ccpe_SHIFT 23
1929#define wqe_ccpe_MASK 0x00000001
1930#define wqe_ccpe_WORD word10
1931#define wqe_ccp_SHIFT 24
1932#define wqe_ccp_MASK 0x000000ff
1933#define wqe_ccp_WORD word10
1934 uint32_t word11;
1935#define wqe_cmd_type_SHIFT 0
1936#define wqe_cmd_type_MASK 0x0000000f
1937#define wqe_cmd_type_WORD word11
1938#define wqe_wqec_SHIFT 7
1939#define wqe_wqec_MASK 0x00000001
1940#define wqe_wqec_WORD word11
1941#define wqe_cqid_SHIFT 16
1942#define wqe_cqid_MASK 0x000003ff
1943#define wqe_cqid_WORD word11
1944};
1945
1946struct wqe_did {
1947 uint32_t word5;
1948#define wqe_els_did_SHIFT 0
1949#define wqe_els_did_MASK 0x00FFFFFF
1950#define wqe_els_did_WORD word5
1951#define wqe_xmit_bls_ar_SHIFT 30
1952#define wqe_xmit_bls_ar_MASK 0x00000001
1953#define wqe_xmit_bls_ar_WORD word5
1954#define wqe_xmit_bls_xo_SHIFT 31
1955#define wqe_xmit_bls_xo_MASK 0x00000001
1956#define wqe_xmit_bls_xo_WORD word5
1957};
1958
1959struct els_request64_wqe {
1960 struct ulp_bde64 bde;
1961 uint32_t payload_len;
1962 uint32_t word4;
1963#define els_req64_sid_SHIFT 0
1964#define els_req64_sid_MASK 0x00FFFFFF
1965#define els_req64_sid_WORD word4
1966#define els_req64_sp_SHIFT 24
1967#define els_req64_sp_MASK 0x00000001
1968#define els_req64_sp_WORD word4
1969#define els_req64_vf_SHIFT 25
1970#define els_req64_vf_MASK 0x00000001
1971#define els_req64_vf_WORD word4
1972 struct wqe_did wqe_dest;
1973 struct wqe_common wqe_com; /* words 6-11 */
1974 uint32_t word12;
1975#define els_req64_vfid_SHIFT 1
1976#define els_req64_vfid_MASK 0x00000FFF
1977#define els_req64_vfid_WORD word12
1978#define els_req64_pri_SHIFT 13
1979#define els_req64_pri_MASK 0x00000007
1980#define els_req64_pri_WORD word12
1981 uint32_t word13;
1982#define els_req64_hopcnt_SHIFT 24
1983#define els_req64_hopcnt_MASK 0x000000ff
1984#define els_req64_hopcnt_WORD word13
1985 uint32_t reserved[2];
1986};
1987
1988struct xmit_els_rsp64_wqe {
1989 struct ulp_bde64 bde;
1990 uint32_t rsvd3;
1991 uint32_t rsvd4;
1992 struct wqe_did wqe_dest;
1993 struct wqe_common wqe_com; /* words 6-11 */
1994 uint32_t rsvd_12_15[4];
1995};
1996
1997struct xmit_bls_rsp64_wqe {
1998 uint32_t payload0;
1999 uint32_t word1;
2000#define xmit_bls_rsp64_rxid_SHIFT 0
2001#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
2002#define xmit_bls_rsp64_rxid_WORD word1
2003#define xmit_bls_rsp64_oxid_SHIFT 16
2004#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
2005#define xmit_bls_rsp64_oxid_WORD word1
2006 uint32_t word2;
2007#define xmit_bls_rsp64_seqcntlo_SHIFT 0
2008#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2009#define xmit_bls_rsp64_seqcntlo_WORD word2
2010#define xmit_bls_rsp64_seqcnthi_SHIFT 16
2011#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
2012#define xmit_bls_rsp64_seqcnthi_WORD word2
2013 uint32_t rsrvd3;
2014 uint32_t rsrvd4;
2015 struct wqe_did wqe_dest;
2016 struct wqe_common wqe_com; /* words 6-11 */
2017 uint32_t rsvd_12_15[4];
2018};
2019struct wqe_rctl_dfctl {
2020 uint32_t word5;
2021#define wqe_si_SHIFT 2
2022#define wqe_si_MASK 0x000000001
2023#define wqe_si_WORD word5
2024#define wqe_la_SHIFT 3
2025#define wqe_la_MASK 0x000000001
2026#define wqe_la_WORD word5
2027#define wqe_ls_SHIFT 7
2028#define wqe_ls_MASK 0x000000001
2029#define wqe_ls_WORD word5
2030#define wqe_dfctl_SHIFT 8
2031#define wqe_dfctl_MASK 0x0000000ff
2032#define wqe_dfctl_WORD word5
2033#define wqe_type_SHIFT 16
2034#define wqe_type_MASK 0x0000000ff
2035#define wqe_type_WORD word5
2036#define wqe_rctl_SHIFT 24
2037#define wqe_rctl_MASK 0x0000000ff
2038#define wqe_rctl_WORD word5
2039};
2040
2041struct xmit_seq64_wqe {
2042 struct ulp_bde64 bde;
2043 uint32_t paylaod_offset;
2044 uint32_t relative_offset;
2045 struct wqe_rctl_dfctl wge_ctl;
2046 struct wqe_common wqe_com; /* words 6-11 */
2047 /* Note: word10 different REVISIT */
2048 uint32_t xmit_len;
2049 uint32_t rsvd_12_15[3];
2050};
2051struct xmit_bcast64_wqe {
2052 struct ulp_bde64 bde;
2053 uint32_t paylaod_len;
2054 uint32_t rsvd4;
2055 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2056 struct wqe_common wqe_com; /* words 6-11 */
2057 uint32_t rsvd_12_15[4];
2058};
2059
2060struct gen_req64_wqe {
2061 struct ulp_bde64 bde;
2062 uint32_t command_len;
2063 uint32_t payload_len;
2064 struct wqe_rctl_dfctl wge_ctl; /* word 5 */
2065 struct wqe_common wqe_com; /* words 6-11 */
2066 uint32_t rsvd_12_15[4];
2067};
2068
2069struct create_xri_wqe {
2070 uint32_t rsrvd[5]; /* words 0-4 */
2071 struct wqe_did wqe_dest; /* word 5 */
2072 struct wqe_common wqe_com; /* words 6-11 */
2073 uint32_t rsvd_12_15[4]; /* word 12-15 */
2074};
2075
2076#define T_REQUEST_TAG 3
2077#define T_XRI_TAG 1
2078
2079struct abort_cmd_wqe {
2080 uint32_t rsrvd[3];
2081 uint32_t word3;
2082#define abort_cmd_ia_SHIFT 0
2083#define abort_cmd_ia_MASK 0x000000001
2084#define abort_cmd_ia_WORD word3
2085#define abort_cmd_criteria_SHIFT 8
2086#define abort_cmd_criteria_MASK 0x0000000ff
2087#define abort_cmd_criteria_WORD word3
2088 uint32_t rsrvd4;
2089 uint32_t rsrvd5;
2090 struct wqe_common wqe_com; /* words 6-11 */
2091 uint32_t rsvd_12_15[4]; /* word 12-15 */
2092};
2093
2094struct fcp_iwrite64_wqe {
2095 struct ulp_bde64 bde;
2096 uint32_t payload_len;
2097 uint32_t total_xfer_len;
2098 uint32_t initial_xfer_len;
2099 struct wqe_common wqe_com; /* words 6-11 */
2100 uint32_t rsvd_12_15[4]; /* word 12-15 */
2101};
2102
2103struct fcp_iread64_wqe {
2104 struct ulp_bde64 bde;
2105 uint32_t payload_len; /* word 3 */
2106 uint32_t total_xfer_len; /* word 4 */
2107 uint32_t rsrvd5; /* word 5 */
2108 struct wqe_common wqe_com; /* words 6-11 */
2109 uint32_t rsvd_12_15[4]; /* word 12-15 */
2110};
2111
2112struct fcp_icmnd64_wqe {
2113 struct ulp_bde64 bde; /* words 0-2 */
2114 uint32_t rsrvd[3]; /* words 3-5 */
2115 struct wqe_common wqe_com; /* words 6-11 */
2116 uint32_t rsvd_12_15[4]; /* word 12-15 */
2117};
2118
2119
2120union lpfc_wqe {
2121 uint32_t words[16];
2122 struct lpfc_wqe_generic generic;
2123 struct fcp_icmnd64_wqe fcp_icmd;
2124 struct fcp_iread64_wqe fcp_iread;
2125 struct fcp_iwrite64_wqe fcp_iwrite;
2126 struct abort_cmd_wqe abort_cmd;
2127 struct create_xri_wqe create_xri;
2128 struct xmit_bcast64_wqe xmit_bcast64;
2129 struct xmit_seq64_wqe xmit_sequence;
2130 struct xmit_bls_rsp64_wqe xmit_bls_rsp;
2131 struct xmit_els_rsp64_wqe xmit_els_rsp;
2132 struct els_request64_wqe els_req;
2133 struct gen_req64_wqe gen_req;
2134};
2135
2136#define FCP_COMMAND 0x0
2137#define FCP_COMMAND_DATA_OUT 0x1
2138#define ELS_COMMAND_NON_FIP 0xC
2139#define ELS_COMMAND_FIP 0xD
2140#define OTHER_COMMAND 0x8
2141
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 86d1bdcbf2d..2f5907f92ee 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -34,8 +34,10 @@
34#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h> 35#include <scsi/scsi_transport_fc.h>
36 36
37#include "lpfc_hw4.h"
37#include "lpfc_hw.h" 38#include "lpfc_hw.h"
38#include "lpfc_sli.h" 39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
39#include "lpfc_nl.h" 41#include "lpfc_nl.h"
40#include "lpfc_disc.h" 42#include "lpfc_disc.h"
41#include "lpfc_scsi.h" 43#include "lpfc_scsi.h"
@@ -51,9 +53,23 @@ char *_dump_buf_dif;
51unsigned long _dump_buf_dif_order; 53unsigned long _dump_buf_dif_order;
52spinlock_t _dump_buf_lock; 54spinlock_t _dump_buf_lock;
53 55
54static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
55static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 56static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
56static int lpfc_post_rcv_buf(struct lpfc_hba *); 57static int lpfc_post_rcv_buf(struct lpfc_hba *);
58static int lpfc_sli4_queue_create(struct lpfc_hba *);
59static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
60static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
61static int lpfc_setup_endian_order(struct lpfc_hba *);
62static int lpfc_sli4_read_config(struct lpfc_hba *);
63static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
64static void lpfc_free_sgl_list(struct lpfc_hba *);
65static int lpfc_init_sgl_list(struct lpfc_hba *);
66static int lpfc_init_active_sgl_array(struct lpfc_hba *);
67static void lpfc_free_active_sgl(struct lpfc_hba *);
68static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
69static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
70static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
71static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
72static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
57 73
58static struct scsi_transport_template *lpfc_transport_template = NULL; 74static struct scsi_transport_template *lpfc_transport_template = NULL;
59static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 75static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -92,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
92 return -ENOMEM; 108 return -ENOMEM;
93 } 109 }
94 110
95 mb = &pmb->mb; 111 mb = &pmb->u.mb;
96 phba->link_state = LPFC_INIT_MBX_CMDS; 112 phba->link_state = LPFC_INIT_MBX_CMDS;
97 113
98 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 114 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
@@ -205,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
205 mb->mbxCommand, mb->mbxStatus); 221 mb->mbxCommand, mb->mbxStatus);
206 mb->un.varDmp.word_cnt = 0; 222 mb->un.varDmp.word_cnt = 0;
207 } 223 }
224 /* dump mem may return a zero when finished or we got a
225 * mailbox error, either way we are done.
226 */
227 if (mb->un.varDmp.word_cnt == 0)
228 break;
208 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 229 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
209 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 230 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
210 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 231 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -233,7 +254,7 @@ out_free_mbox:
233static void 254static void
234lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 255lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
235{ 256{
236 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 257 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
237 phba->temp_sensor_support = 1; 258 phba->temp_sensor_support = 1;
238 else 259 else
239 phba->temp_sensor_support = 0; 260 phba->temp_sensor_support = 0;
@@ -260,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
260 /* character array used for decoding dist type. */ 281 /* character array used for decoding dist type. */
261 char dist_char[] = "nabx"; 282 char dist_char[] = "nabx";
262 283
263 if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { 284 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
264 mempool_free(pmboxq, phba->mbox_mem_pool); 285 mempool_free(pmboxq, phba->mbox_mem_pool);
265 return; 286 return;
266 } 287 }
@@ -268,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
268 prg = (struct prog_id *) &prog_id_word; 289 prg = (struct prog_id *) &prog_id_word;
269 290
270 /* word 7 contain option rom version */ 291 /* word 7 contain option rom version */
271 prog_id_word = pmboxq->mb.un.varWords[7]; 292 prog_id_word = pmboxq->u.mb.un.varWords[7];
272 293
273 /* Decode the Option rom version word to a readable string */ 294 /* Decode the Option rom version word to a readable string */
274 if (prg->dist < 4) 295 if (prg->dist < 4)
@@ -325,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
325 phba->link_state = LPFC_HBA_ERROR; 346 phba->link_state = LPFC_HBA_ERROR;
326 return -ENOMEM; 347 return -ENOMEM;
327 } 348 }
328 mb = &pmb->mb; 349 mb = &pmb->u.mb;
329 350
330 /* Get login parameters for NID. */ 351 /* Get login parameters for NID. */
331 lpfc_read_sparam(phba, pmb, 0); 352 lpfc_read_sparam(phba, pmb, 0);
@@ -364,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
364 /* Update the fc_host data structures with new wwn. */ 385 /* Update the fc_host data structures with new wwn. */
365 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 386 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
366 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 387 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
388 fc_host_max_npiv_vports(shost) = phba->max_vpi;
367 389
368 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 390 /* If no serial number in VPD data, use low 6 bytes of WWNN */
369 /* This should be consolidated into parse_vpd ? - mr */ 391 /* This should be consolidated into parse_vpd ? - mr */
@@ -460,17 +482,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 482 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
461 "0352 Config MSI mailbox command " 483 "0352 Config MSI mailbox command "
462 "failed, mbxCmd x%x, mbxStatus x%x\n", 484 "failed, mbxCmd x%x, mbxStatus x%x\n",
463 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 485 pmb->u.mb.mbxCommand,
486 pmb->u.mb.mbxStatus);
464 mempool_free(pmb, phba->mbox_mem_pool); 487 mempool_free(pmb, phba->mbox_mem_pool);
465 return -EIO; 488 return -EIO;
466 } 489 }
467 } 490 }
468 491
492 spin_lock_irq(&phba->hbalock);
469 /* Initialize ERATT handling flag */ 493 /* Initialize ERATT handling flag */
470 phba->hba_flag &= ~HBA_ERATT_HANDLED; 494 phba->hba_flag &= ~HBA_ERATT_HANDLED;
471 495
472 /* Enable appropriate host interrupts */ 496 /* Enable appropriate host interrupts */
473 spin_lock_irq(&phba->hbalock);
474 status = readl(phba->HCregaddr); 497 status = readl(phba->HCregaddr);
475 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 498 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
476 if (psli->num_rings > 0) 499 if (psli->num_rings > 0)
@@ -571,16 +594,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
571{ 594{
572 struct lpfc_vport **vports; 595 struct lpfc_vport **vports;
573 int i; 596 int i;
574 /* Disable interrupts */ 597
575 writel(0, phba->HCregaddr); 598 if (phba->sli_rev <= LPFC_SLI_REV3) {
576 readl(phba->HCregaddr); /* flush */ 599 /* Disable interrupts */
600 writel(0, phba->HCregaddr);
601 readl(phba->HCregaddr); /* flush */
602 }
577 603
578 if (phba->pport->load_flag & FC_UNLOADING) 604 if (phba->pport->load_flag & FC_UNLOADING)
579 lpfc_cleanup_discovery_resources(phba->pport); 605 lpfc_cleanup_discovery_resources(phba->pport);
580 else { 606 else {
581 vports = lpfc_create_vport_work_array(phba); 607 vports = lpfc_create_vport_work_array(phba);
582 if (vports != NULL) 608 if (vports != NULL)
583 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 609 for (i = 0; i <= phba->max_vports &&
610 vports[i] != NULL; i++)
584 lpfc_cleanup_discovery_resources(vports[i]); 611 lpfc_cleanup_discovery_resources(vports[i]);
585 lpfc_destroy_vport_work_array(phba, vports); 612 lpfc_destroy_vport_work_array(phba, vports);
586 } 613 }
@@ -588,7 +615,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
588} 615}
589 616
590/** 617/**
591 * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset 618 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
592 * @phba: pointer to lpfc HBA data structure. 619 * @phba: pointer to lpfc HBA data structure.
593 * 620 *
594 * This routine will do uninitialization after the HBA is reset when bring 621 * This routine will do uninitialization after the HBA is reset when bring
@@ -598,8 +625,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
598 * 0 - sucess. 625 * 0 - sucess.
599 * Any other value - error. 626 * Any other value - error.
600 **/ 627 **/
601int 628static int
602lpfc_hba_down_post(struct lpfc_hba *phba) 629lpfc_hba_down_post_s3(struct lpfc_hba *phba)
603{ 630{
604 struct lpfc_sli *psli = &phba->sli; 631 struct lpfc_sli *psli = &phba->sli;
605 struct lpfc_sli_ring *pring; 632 struct lpfc_sli_ring *pring;
@@ -642,6 +669,77 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
642 669
643 return 0; 670 return 0;
644} 671}
672/**
673 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
674 * @phba: pointer to lpfc HBA data structure.
675 *
676 * This routine will do uninitialization after the HBA is reset when bring
677 * down the SLI Layer.
678 *
679 * Return codes
680 * 0 - sucess.
681 * Any other value - error.
682 **/
683static int
684lpfc_hba_down_post_s4(struct lpfc_hba *phba)
685{
686 struct lpfc_scsi_buf *psb, *psb_next;
687 LIST_HEAD(aborts);
688 int ret;
689 unsigned long iflag = 0;
690 ret = lpfc_hba_down_post_s3(phba);
691 if (ret)
692 return ret;
693 /* At this point in time the HBA is either reset or DOA. Either
694 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
695 * on the lpfc_sgl_list so that it can either be freed if the
696 * driver is unloading or reposted if the driver is restarting
697 * the port.
698 */
699 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
700 /* scsl_buf_list */
701 /* abts_sgl_list_lock required because worker thread uses this
702 * list.
703 */
704 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
705 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
706 &phba->sli4_hba.lpfc_sgl_list);
707 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
708 /* abts_scsi_buf_list_lock required because worker thread uses this
709 * list.
710 */
711 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
712 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
713 &aborts);
714 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
715 spin_unlock_irq(&phba->hbalock);
716
717 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
718 psb->pCmd = NULL;
719 psb->status = IOSTAT_SUCCESS;
720 }
721 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
722 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
723 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
724 return 0;
725}
726
727/**
728 * lpfc_hba_down_post - Wrapper func for hba down post routine
729 * @phba: pointer to lpfc HBA data structure.
730 *
731 * This routine wraps the actual SLI3 or SLI4 routine for performing
732 * uninitialization after the HBA is reset when bring down the SLI Layer.
733 *
734 * Return codes
735 * 0 - sucess.
736 * Any other value - error.
737 **/
738int
739lpfc_hba_down_post(struct lpfc_hba *phba)
740{
741 return (*phba->lpfc_hba_down_post)(phba);
742}
645 743
646/** 744/**
647 * lpfc_hb_timeout - The HBA-timer timeout handler 745 * lpfc_hb_timeout - The HBA-timer timeout handler
@@ -809,7 +907,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
809 "taking this port offline.\n"); 907 "taking this port offline.\n");
810 908
811 spin_lock_irq(&phba->hbalock); 909 spin_lock_irq(&phba->hbalock);
812 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 910 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
813 spin_unlock_irq(&phba->hbalock); 911 spin_unlock_irq(&phba->hbalock);
814 912
815 lpfc_offline_prep(phba); 913 lpfc_offline_prep(phba);
@@ -834,13 +932,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
834 struct lpfc_sli *psli = &phba->sli; 932 struct lpfc_sli *psli = &phba->sli;
835 933
836 spin_lock_irq(&phba->hbalock); 934 spin_lock_irq(&phba->hbalock);
837 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 935 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
838 spin_unlock_irq(&phba->hbalock); 936 spin_unlock_irq(&phba->hbalock);
839 lpfc_offline_prep(phba); 937 lpfc_offline_prep(phba);
840 938
841 lpfc_offline(phba); 939 lpfc_offline(phba);
842 lpfc_reset_barrier(phba); 940 lpfc_reset_barrier(phba);
941 spin_lock_irq(&phba->hbalock);
843 lpfc_sli_brdreset(phba); 942 lpfc_sli_brdreset(phba);
943 spin_unlock_irq(&phba->hbalock);
844 lpfc_hba_down_post(phba); 944 lpfc_hba_down_post(phba);
845 lpfc_sli_brdready(phba, HS_MBRDY); 945 lpfc_sli_brdready(phba, HS_MBRDY);
846 lpfc_unblock_mgmt_io(phba); 946 lpfc_unblock_mgmt_io(phba);
@@ -849,6 +949,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
849} 949}
850 950
851/** 951/**
952 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
953 * @phba: pointer to lpfc hba data structure.
954 *
955 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
956 * other than Port Error 6 has been detected.
957 **/
958static void
959lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
960{
961 lpfc_offline_prep(phba);
962 lpfc_offline(phba);
963 lpfc_sli4_brdreset(phba);
964 lpfc_hba_down_post(phba);
965 lpfc_sli4_post_status_check(phba);
966 lpfc_unblock_mgmt_io(phba);
967 phba->link_state = LPFC_HBA_ERROR;
968}
969
970/**
852 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 971 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
853 * @phba: pointer to lpfc hba data structure. 972 * @phba: pointer to lpfc hba data structure.
854 * 973 *
@@ -864,6 +983,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
864 struct lpfc_sli_ring *pring; 983 struct lpfc_sli_ring *pring;
865 struct lpfc_sli *psli = &phba->sli; 984 struct lpfc_sli *psli = &phba->sli;
866 985
986 /* If the pci channel is offline, ignore possible errors,
987 * since we cannot communicate with the pci card anyway.
988 */
989 if (pci_channel_offline(phba->pcidev)) {
990 spin_lock_irq(&phba->hbalock);
991 phba->hba_flag &= ~DEFER_ERATT;
992 spin_unlock_irq(&phba->hbalock);
993 return;
994 }
995
867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
868 "0479 Deferred Adapter Hardware Error " 997 "0479 Deferred Adapter Hardware Error "
869 "Data: x%x x%x x%x\n", 998 "Data: x%x x%x x%x\n",
@@ -871,7 +1000,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
871 phba->work_status[0], phba->work_status[1]); 1000 phba->work_status[0], phba->work_status[1]);
872 1001
873 spin_lock_irq(&phba->hbalock); 1002 spin_lock_irq(&phba->hbalock);
874 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1003 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
875 spin_unlock_irq(&phba->hbalock); 1004 spin_unlock_irq(&phba->hbalock);
876 1005
877 1006
@@ -909,13 +1038,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
909 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1038 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
910 phba->work_hs = old_host_status & ~HS_FFER1; 1039 phba->work_hs = old_host_status & ~HS_FFER1;
911 1040
1041 spin_lock_irq(&phba->hbalock);
912 phba->hba_flag &= ~DEFER_ERATT; 1042 phba->hba_flag &= ~DEFER_ERATT;
1043 spin_unlock_irq(&phba->hbalock);
913 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1044 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
914 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1045 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
915} 1046}
916 1047
1048static void
1049lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1050{
1051 struct lpfc_board_event_header board_event;
1052 struct Scsi_Host *shost;
1053
1054 board_event.event_type = FC_REG_BOARD_EVENT;
1055 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1056 shost = lpfc_shost_from_vport(phba->pport);
1057 fc_host_post_vendor_event(shost, fc_get_event_number(),
1058 sizeof(board_event),
1059 (char *) &board_event,
1060 LPFC_NL_VENDOR_ID);
1061}
1062
917/** 1063/**
918 * lpfc_handle_eratt - The HBA hardware error handler 1064 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
919 * @phba: pointer to lpfc hba data structure. 1065 * @phba: pointer to lpfc hba data structure.
920 * 1066 *
921 * This routine is invoked to handle the following HBA hardware error 1067 * This routine is invoked to handle the following HBA hardware error
@@ -924,8 +1070,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
924 * 2 - DMA ring index out of range 1070 * 2 - DMA ring index out of range
925 * 3 - Mailbox command came back as unknown 1071 * 3 - Mailbox command came back as unknown
926 **/ 1072 **/
927void 1073static void
928lpfc_handle_eratt(struct lpfc_hba *phba) 1074lpfc_handle_eratt_s3(struct lpfc_hba *phba)
929{ 1075{
930 struct lpfc_vport *vport = phba->pport; 1076 struct lpfc_vport *vport = phba->pport;
931 struct lpfc_sli *psli = &phba->sli; 1077 struct lpfc_sli *psli = &phba->sli;
@@ -934,24 +1080,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
934 unsigned long temperature; 1080 unsigned long temperature;
935 struct temp_event temp_event_data; 1081 struct temp_event temp_event_data;
936 struct Scsi_Host *shost; 1082 struct Scsi_Host *shost;
937 struct lpfc_board_event_header board_event;
938 1083
939 /* If the pci channel is offline, ignore possible errors, 1084 /* If the pci channel is offline, ignore possible errors,
940 * since we cannot communicate with the pci card anyway. */ 1085 * since we cannot communicate with the pci card anyway.
941 if (pci_channel_offline(phba->pcidev)) 1086 */
1087 if (pci_channel_offline(phba->pcidev)) {
1088 spin_lock_irq(&phba->hbalock);
1089 phba->hba_flag &= ~DEFER_ERATT;
1090 spin_unlock_irq(&phba->hbalock);
942 return; 1091 return;
1092 }
1093
943 /* If resets are disabled then leave the HBA alone and return */ 1094 /* If resets are disabled then leave the HBA alone and return */
944 if (!phba->cfg_enable_hba_reset) 1095 if (!phba->cfg_enable_hba_reset)
945 return; 1096 return;
946 1097
947 /* Send an internal error event to mgmt application */ 1098 /* Send an internal error event to mgmt application */
948 board_event.event_type = FC_REG_BOARD_EVENT; 1099 lpfc_board_errevt_to_mgmt(phba);
949 board_event.subcategory = LPFC_EVENT_PORTINTERR;
950 shost = lpfc_shost_from_vport(phba->pport);
951 fc_host_post_vendor_event(shost, fc_get_event_number(),
952 sizeof(board_event),
953 (char *) &board_event,
954 LPFC_NL_VENDOR_ID);
955 1100
956 if (phba->hba_flag & DEFER_ERATT) 1101 if (phba->hba_flag & DEFER_ERATT)
957 lpfc_handle_deferred_eratt(phba); 1102 lpfc_handle_deferred_eratt(phba);
@@ -965,7 +1110,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
965 phba->work_status[0], phba->work_status[1]); 1110 phba->work_status[0], phba->work_status[1]);
966 1111
967 spin_lock_irq(&phba->hbalock); 1112 spin_lock_irq(&phba->hbalock);
968 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1113 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
969 spin_unlock_irq(&phba->hbalock); 1114 spin_unlock_irq(&phba->hbalock);
970 1115
971 /* 1116 /*
@@ -1037,6 +1182,65 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
1037} 1182}
1038 1183
1039/** 1184/**
1185 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1186 * @phba: pointer to lpfc hba data structure.
1187 *
1188 * This routine is invoked to handle the SLI4 HBA hardware error attention
1189 * conditions.
1190 **/
1191static void
1192lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1193{
1194 struct lpfc_vport *vport = phba->pport;
1195 uint32_t event_data;
1196 struct Scsi_Host *shost;
1197
1198 /* If the pci channel is offline, ignore possible errors, since
1199 * we cannot communicate with the pci card anyway.
1200 */
1201 if (pci_channel_offline(phba->pcidev))
1202 return;
1203 /* If resets are disabled then leave the HBA alone and return */
1204 if (!phba->cfg_enable_hba_reset)
1205 return;
1206
1207 /* Send an internal error event to mgmt application */
1208 lpfc_board_errevt_to_mgmt(phba);
1209
1210 /* For now, the actual action for SLI4 device handling is not
1211 * specified yet, just treated it as adaptor hardware failure
1212 */
1213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1214 "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
1215 phba->work_status[0], phba->work_status[1]);
1216
1217 event_data = FC_REG_DUMP_EVENT;
1218 shost = lpfc_shost_from_vport(vport);
1219 fc_host_post_vendor_event(shost, fc_get_event_number(),
1220 sizeof(event_data), (char *) &event_data,
1221 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1222
1223 lpfc_sli4_offline_eratt(phba);
1224}
1225
1226/**
1227 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1228 * @phba: pointer to lpfc HBA data structure.
1229 *
1230 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1231 * routine from the API jump table function pointer from the lpfc_hba struct.
1232 *
1233 * Return codes
1234 * 0 - sucess.
1235 * Any other value - error.
1236 **/
1237void
1238lpfc_handle_eratt(struct lpfc_hba *phba)
1239{
1240 (*phba->lpfc_handle_eratt)(phba);
1241}
1242
1243/**
1040 * lpfc_handle_latt - The HBA link event handler 1244 * lpfc_handle_latt - The HBA link event handler
1041 * @phba: pointer to lpfc hba data structure. 1245 * @phba: pointer to lpfc hba data structure.
1042 * 1246 *
@@ -1137,7 +1341,7 @@ lpfc_handle_latt_err_exit:
1137 * 0 - pointer to the VPD passed in is NULL 1341 * 0 - pointer to the VPD passed in is NULL
1138 * 1 - success 1342 * 1 - success
1139 **/ 1343 **/
1140static int 1344int
1141lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1345lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1142{ 1346{
1143 uint8_t lenlo, lenhi; 1347 uint8_t lenlo, lenhi;
@@ -1292,6 +1496,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1292 uint16_t dev_id = phba->pcidev->device; 1496 uint16_t dev_id = phba->pcidev->device;
1293 int max_speed; 1497 int max_speed;
1294 int GE = 0; 1498 int GE = 0;
1499 int oneConnect = 0; /* default is not a oneConnect */
1295 struct { 1500 struct {
1296 char * name; 1501 char * name;
1297 int max_speed; 1502 int max_speed;
@@ -1437,6 +1642,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1437 case PCI_DEVICE_ID_PROTEUS_S: 1642 case PCI_DEVICE_ID_PROTEUS_S:
1438 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1643 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1439 break; 1644 break;
1645 case PCI_DEVICE_ID_TIGERSHARK:
1646 oneConnect = 1;
1647 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1648 break;
1649 case PCI_DEVICE_ID_TIGERSHARK_S:
1650 oneConnect = 1;
1651 m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
1652 break;
1440 default: 1653 default:
1441 m = (typeof(m)){ NULL }; 1654 m = (typeof(m)){ NULL };
1442 break; 1655 break;
@@ -1444,13 +1657,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1444 1657
1445 if (mdp && mdp[0] == '\0') 1658 if (mdp && mdp[0] == '\0')
1446 snprintf(mdp, 79,"%s", m.name); 1659 snprintf(mdp, 79,"%s", m.name);
1447 if (descp && descp[0] == '\0') 1660 /* oneConnect hba requires special processing, they are all initiators
1448 snprintf(descp, 255, 1661 * and we put the port number on the end
1449 "Emulex %s %d%s %s %s", 1662 */
1450 m.name, m.max_speed, 1663 if (descp && descp[0] == '\0') {
1451 (GE) ? "GE" : "Gb", 1664 if (oneConnect)
1452 m.bus, 1665 snprintf(descp, 255,
1453 (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); 1666 "Emulex OneConnect %s, FCoE Initiator, Port %s",
1667 m.name,
1668 phba->Port);
1669 else
1670 snprintf(descp, 255,
1671 "Emulex %s %d%s %s %s",
1672 m.name, m.max_speed,
1673 (GE) ? "GE" : "Gb",
1674 m.bus,
1675 (GE) ? "FCoE Adapter" :
1676 "Fibre Channel Adapter");
1677 }
1454} 1678}
1455 1679
1456/** 1680/**
@@ -1533,7 +1757,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1533 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1757 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1534 icmd->ulpLe = 1; 1758 icmd->ulpLe = 1;
1535 1759
1536 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1760 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
1761 IOCB_ERROR) {
1537 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1762 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1538 kfree(mp1); 1763 kfree(mp1);
1539 cnt++; 1764 cnt++;
@@ -1761,7 +1986,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1761 * Lets wait for this to happen, if needed. 1986 * Lets wait for this to happen, if needed.
1762 */ 1987 */
1763 while (!list_empty(&vport->fc_nodes)) { 1988 while (!list_empty(&vport->fc_nodes)) {
1764
1765 if (i++ > 3000) { 1989 if (i++ > 3000) {
1766 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1990 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1767 "0233 Nodelist not empty\n"); 1991 "0233 Nodelist not empty\n");
@@ -1782,7 +2006,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
1782 /* Wait for any activity on ndlps to settle */ 2006 /* Wait for any activity on ndlps to settle */
1783 msleep(10); 2007 msleep(10);
1784 } 2008 }
1785 return;
1786} 2009}
1787 2010
1788/** 2011/**
@@ -1803,22 +2026,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
1803} 2026}
1804 2027
1805/** 2028/**
1806 * lpfc_stop_phba_timers - Stop all the timers associated with an HBA 2029 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
1807 * @phba: pointer to lpfc hba data structure. 2030 * @phba: pointer to lpfc hba data structure.
1808 * 2031 *
1809 * This routine stops all the timers associated with a HBA. This function is 2032 * This routine stops all the timers associated with a HBA. This function is
1810 * invoked before either putting a HBA offline or unloading the driver. 2033 * invoked before either putting a HBA offline or unloading the driver.
1811 **/ 2034 **/
1812static void 2035void
1813lpfc_stop_phba_timers(struct lpfc_hba *phba) 2036lpfc_stop_hba_timers(struct lpfc_hba *phba)
1814{ 2037{
1815 del_timer_sync(&phba->fcp_poll_timer);
1816 lpfc_stop_vport_timers(phba->pport); 2038 lpfc_stop_vport_timers(phba->pport);
1817 del_timer_sync(&phba->sli.mbox_tmo); 2039 del_timer_sync(&phba->sli.mbox_tmo);
1818 del_timer_sync(&phba->fabric_block_timer); 2040 del_timer_sync(&phba->fabric_block_timer);
1819 phba->hb_outstanding = 0;
1820 del_timer_sync(&phba->hb_tmofunc);
1821 del_timer_sync(&phba->eratt_poll); 2041 del_timer_sync(&phba->eratt_poll);
2042 del_timer_sync(&phba->hb_tmofunc);
2043 phba->hb_outstanding = 0;
2044
2045 switch (phba->pci_dev_grp) {
2046 case LPFC_PCI_DEV_LP:
2047 /* Stop any LightPulse device specific driver timers */
2048 del_timer_sync(&phba->fcp_poll_timer);
2049 break;
2050 case LPFC_PCI_DEV_OC:
2051 /* Stop any OneConnect device sepcific driver timers */
2052 break;
2053 default:
2054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2055 "0297 Invalid device group (x%x)\n",
2056 phba->pci_dev_grp);
2057 break;
2058 }
1822 return; 2059 return;
1823} 2060}
1824 2061
@@ -1878,14 +2115,21 @@ lpfc_online(struct lpfc_hba *phba)
1878 return 1; 2115 return 1;
1879 } 2116 }
1880 2117
1881 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 2118 if (phba->sli_rev == LPFC_SLI_REV4) {
1882 lpfc_unblock_mgmt_io(phba); 2119 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
1883 return 1; 2120 lpfc_unblock_mgmt_io(phba);
2121 return 1;
2122 }
2123 } else {
2124 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2125 lpfc_unblock_mgmt_io(phba);
2126 return 1;
2127 }
1884 } 2128 }
1885 2129
1886 vports = lpfc_create_vport_work_array(phba); 2130 vports = lpfc_create_vport_work_array(phba);
1887 if (vports != NULL) 2131 if (vports != NULL)
1888 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2132 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1889 struct Scsi_Host *shost; 2133 struct Scsi_Host *shost;
1890 shost = lpfc_shost_from_vport(vports[i]); 2134 shost = lpfc_shost_from_vport(vports[i]);
1891 spin_lock_irq(shost->host_lock); 2135 spin_lock_irq(shost->host_lock);
@@ -1947,11 +2191,12 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1947 /* Issue an unreg_login to all nodes on all vports */ 2191 /* Issue an unreg_login to all nodes on all vports */
1948 vports = lpfc_create_vport_work_array(phba); 2192 vports = lpfc_create_vport_work_array(phba);
1949 if (vports != NULL) { 2193 if (vports != NULL) {
1950 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2194 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1951 struct Scsi_Host *shost; 2195 struct Scsi_Host *shost;
1952 2196
1953 if (vports[i]->load_flag & FC_UNLOADING) 2197 if (vports[i]->load_flag & FC_UNLOADING)
1954 continue; 2198 continue;
2199 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
1955 shost = lpfc_shost_from_vport(vports[i]); 2200 shost = lpfc_shost_from_vport(vports[i]);
1956 list_for_each_entry_safe(ndlp, next_ndlp, 2201 list_for_each_entry_safe(ndlp, next_ndlp,
1957 &vports[i]->fc_nodes, 2202 &vports[i]->fc_nodes,
@@ -1975,7 +2220,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1975 } 2220 }
1976 lpfc_destroy_vport_work_array(phba, vports); 2221 lpfc_destroy_vport_work_array(phba, vports);
1977 2222
1978 lpfc_sli_flush_mbox_queue(phba); 2223 lpfc_sli_mbox_sys_shutdown(phba);
1979} 2224}
1980 2225
1981/** 2226/**
@@ -1996,11 +2241,11 @@ lpfc_offline(struct lpfc_hba *phba)
1996 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2241 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1997 return; 2242 return;
1998 2243
1999 /* stop all timers associated with this hba */ 2244 /* stop port and all timers associated with this hba */
2000 lpfc_stop_phba_timers(phba); 2245 lpfc_stop_port(phba);
2001 vports = lpfc_create_vport_work_array(phba); 2246 vports = lpfc_create_vport_work_array(phba);
2002 if (vports != NULL) 2247 if (vports != NULL)
2003 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 2248 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2004 lpfc_stop_vport_timers(vports[i]); 2249 lpfc_stop_vport_timers(vports[i]);
2005 lpfc_destroy_vport_work_array(phba, vports); 2250 lpfc_destroy_vport_work_array(phba, vports);
2006 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2251 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2013,7 +2258,7 @@ lpfc_offline(struct lpfc_hba *phba)
2013 spin_unlock_irq(&phba->hbalock); 2258 spin_unlock_irq(&phba->hbalock);
2014 vports = lpfc_create_vport_work_array(phba); 2259 vports = lpfc_create_vport_work_array(phba);
2015 if (vports != NULL) 2260 if (vports != NULL)
2016 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 2261 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2017 shost = lpfc_shost_from_vport(vports[i]); 2262 shost = lpfc_shost_from_vport(vports[i]);
2018 spin_lock_irq(shost->host_lock); 2263 spin_lock_irq(shost->host_lock);
2019 vports[i]->work_port_events = 0; 2264 vports[i]->work_port_events = 0;
@@ -2106,6 +2351,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2106 shost->max_lun = vport->cfg_max_luns; 2351 shost->max_lun = vport->cfg_max_luns;
2107 shost->this_id = -1; 2352 shost->this_id = -1;
2108 shost->max_cmd_len = 16; 2353 shost->max_cmd_len = 16;
2354 if (phba->sli_rev == LPFC_SLI_REV4) {
2355 shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
2356 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2357 }
2109 2358
2110 /* 2359 /*
2111 * Set initial can_queue value since 0 is no longer supported and 2360 * Set initial can_queue value since 0 is no longer supported and
@@ -2123,6 +2372,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2123 2372
2124 /* Initialize all internally managed lists. */ 2373 /* Initialize all internally managed lists. */
2125 INIT_LIST_HEAD(&vport->fc_nodes); 2374 INIT_LIST_HEAD(&vport->fc_nodes);
2375 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2126 spin_lock_init(&vport->work_port_lock); 2376 spin_lock_init(&vport->work_port_lock);
2127 2377
2128 init_timer(&vport->fc_disctmo); 2378 init_timer(&vport->fc_disctmo);
@@ -2314,15 +2564,3461 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2314} 2564}
2315 2565
2316/** 2566/**
2317 * lpfc_enable_msix - Enable MSI-X interrupt mode 2567 * lpfc_stop_port_s3 - Stop SLI3 device port
2568 * @phba: pointer to lpfc hba data structure.
2569 *
2570 * This routine is invoked to stop an SLI3 device port, it stops the device
2571 * from generating interrupts and stops the device driver's timers for the
2572 * device.
2573 **/
2574static void
2575lpfc_stop_port_s3(struct lpfc_hba *phba)
2576{
2577 /* Clear all interrupt enable conditions */
2578 writel(0, phba->HCregaddr);
2579 readl(phba->HCregaddr); /* flush */
2580 /* Clear all pending interrupts */
2581 writel(0xffffffff, phba->HAregaddr);
2582 readl(phba->HAregaddr); /* flush */
2583
2584 /* Reset some HBA SLI setup states */
2585 lpfc_stop_hba_timers(phba);
2586 phba->pport->work_port_events = 0;
2587}
2588
2589/**
2590 * lpfc_stop_port_s4 - Stop SLI4 device port
2591 * @phba: pointer to lpfc hba data structure.
2592 *
2593 * This routine is invoked to stop an SLI4 device port, it stops the device
2594 * from generating interrupts and stops the device driver's timers for the
2595 * device.
2596 **/
2597static void
2598lpfc_stop_port_s4(struct lpfc_hba *phba)
2599{
2600 /* Reset some HBA SLI4 setup states */
2601 lpfc_stop_hba_timers(phba);
2602 phba->pport->work_port_events = 0;
2603 phba->sli4_hba.intr_enable = 0;
2604 /* Hard clear it for now, shall have more graceful way to wait later */
2605 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2606}
2607
2608/**
2609 * lpfc_stop_port - Wrapper function for stopping hba port
2610 * @phba: Pointer to HBA context object.
2611 *
2612 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2613 * the API jump table function pointer from the lpfc_hba struct.
2614 **/
2615void
2616lpfc_stop_port(struct lpfc_hba *phba)
2617{
2618 phba->lpfc_stop_port(phba);
2619}
2620
2621/**
2622 * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
2623 * @phba: pointer to lpfc hba data structure.
2624 *
2625 * This routine is invoked to remove the driver default fcf record from
2626 * the port. This routine currently acts on FCF Index 0.
2627 *
2628 **/
2629void
2630lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2631{
2632 int rc = 0;
2633 LPFC_MBOXQ_t *mboxq;
2634 struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
2635 uint32_t mbox_tmo, req_len;
2636 uint32_t shdr_status, shdr_add_status;
2637
2638 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2639 if (!mboxq) {
2640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2641 "2020 Failed to allocate mbox for ADD_FCF cmd\n");
2642 return;
2643 }
2644
2645 req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
2646 sizeof(struct lpfc_sli4_cfg_mhdr);
2647 rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2648 LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
2649 req_len, LPFC_SLI4_MBX_EMBED);
2650 /*
2651 * In phase 1, there is a single FCF index, 0. In phase2, the driver
2652 * supports multiple FCF indices.
2653 */
2654 del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
2655 bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
2656 bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
2657 phba->fcf.fcf_indx);
2658
2659 if (!phba->sli4_hba.intr_enable)
2660 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2661 else {
2662 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
2663 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
2664 }
2665 /* The IOCTL status is embedded in the mailbox subheader. */
2666 shdr_status = bf_get(lpfc_mbox_hdr_status,
2667 &del_fcf_record->header.cfg_shdr.response);
2668 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2669 &del_fcf_record->header.cfg_shdr.response);
2670 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2671 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2672 "2516 DEL FCF of default FCF Index failed "
2673 "mbx status x%x, status x%x add_status x%x\n",
2674 rc, shdr_status, shdr_add_status);
2675 }
2676 if (rc != MBX_TIMEOUT)
2677 mempool_free(mboxq, phba->mbox_mem_pool);
2678}
2679
2680/**
2681 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2682 * @phba: pointer to lpfc hba data structure.
2683 * @acqe_link: pointer to the async link completion queue entry.
2684 *
2685 * This routine is to parse the SLI4 link-attention link fault code and
2686 * translate it into the base driver's read link attention mailbox command
2687 * status.
2688 *
2689 * Return: Link-attention status in terms of base driver's coding.
2690 **/
2691static uint16_t
2692lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
2693 struct lpfc_acqe_link *acqe_link)
2694{
2695 uint16_t latt_fault;
2696
2697 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
2698 case LPFC_ASYNC_LINK_FAULT_NONE:
2699 case LPFC_ASYNC_LINK_FAULT_LOCAL:
2700 case LPFC_ASYNC_LINK_FAULT_REMOTE:
2701 latt_fault = 0;
2702 break;
2703 default:
2704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2705 "0398 Invalid link fault code: x%x\n",
2706 bf_get(lpfc_acqe_link_fault, acqe_link));
2707 latt_fault = MBXERR_ERROR;
2708 break;
2709 }
2710 return latt_fault;
2711}
2712
2713/**
2714 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
2715 * @phba: pointer to lpfc hba data structure.
2716 * @acqe_link: pointer to the async link completion queue entry.
2717 *
2718 * This routine is to parse the SLI4 link attention type and translate it
2719 * into the base driver's link attention type coding.
2720 *
2721 * Return: Link attention type in terms of base driver's coding.
2722 **/
2723static uint8_t
2724lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
2725 struct lpfc_acqe_link *acqe_link)
2726{
2727 uint8_t att_type;
2728
2729 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
2730 case LPFC_ASYNC_LINK_STATUS_DOWN:
2731 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
2732 att_type = AT_LINK_DOWN;
2733 break;
2734 case LPFC_ASYNC_LINK_STATUS_UP:
2735 /* Ignore physical link up events - wait for logical link up */
2736 att_type = AT_RESERVED;
2737 break;
2738 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
2739 att_type = AT_LINK_UP;
2740 break;
2741 default:
2742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2743 "0399 Invalid link attention type: x%x\n",
2744 bf_get(lpfc_acqe_link_status, acqe_link));
2745 att_type = AT_RESERVED;
2746 break;
2747 }
2748 return att_type;
2749}
2750
2751/**
2752 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
2753 * @phba: pointer to lpfc hba data structure.
2754 * @acqe_link: pointer to the async link completion queue entry.
2755 *
2756 * This routine is to parse the SLI4 link-attention link speed and translate
2757 * it into the base driver's link-attention link speed coding.
2758 *
2759 * Return: Link-attention link speed in terms of base driver's coding.
2760 **/
2761static uint8_t
2762lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
2763 struct lpfc_acqe_link *acqe_link)
2764{
2765 uint8_t link_speed;
2766
2767 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
2768 case LPFC_ASYNC_LINK_SPEED_ZERO:
2769 link_speed = LA_UNKNW_LINK;
2770 break;
2771 case LPFC_ASYNC_LINK_SPEED_10MBPS:
2772 link_speed = LA_UNKNW_LINK;
2773 break;
2774 case LPFC_ASYNC_LINK_SPEED_100MBPS:
2775 link_speed = LA_UNKNW_LINK;
2776 break;
2777 case LPFC_ASYNC_LINK_SPEED_1GBPS:
2778 link_speed = LA_1GHZ_LINK;
2779 break;
2780 case LPFC_ASYNC_LINK_SPEED_10GBPS:
2781 link_speed = LA_10GHZ_LINK;
2782 break;
2783 default:
2784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2785 "0483 Invalid link-attention link speed: x%x\n",
2786 bf_get(lpfc_acqe_link_speed, acqe_link));
2787 link_speed = LA_UNKNW_LINK;
2788 break;
2789 }
2790 return link_speed;
2791}
2792
2793/**
2794 * lpfc_sli4_async_link_evt - Process the asynchronous link event
2795 * @phba: pointer to lpfc hba data structure.
2796 * @acqe_link: pointer to the async link completion queue entry.
2797 *
2798 * This routine is to handle the SLI4 asynchronous link event.
2799 **/
2800static void
2801lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2802 struct lpfc_acqe_link *acqe_link)
2803{
2804 struct lpfc_dmabuf *mp;
2805 LPFC_MBOXQ_t *pmb;
2806 MAILBOX_t *mb;
2807 READ_LA_VAR *la;
2808 uint8_t att_type;
2809
2810 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2811 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2812 return;
2813 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2814 if (!pmb) {
2815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2816 "0395 The mboxq allocation failed\n");
2817 return;
2818 }
2819 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2820 if (!mp) {
2821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2822 "0396 The lpfc_dmabuf allocation failed\n");
2823 goto out_free_pmb;
2824 }
2825 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2826 if (!mp->virt) {
2827 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2828 "0397 The mbuf allocation failed\n");
2829 goto out_free_dmabuf;
2830 }
2831
2832 /* Cleanup any outstanding ELS commands */
2833 lpfc_els_flush_all_cmd(phba);
2834
2835 /* Block ELS IOCBs until we have done process link event */
2836 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2837
2838 /* Update link event statistics */
2839 phba->sli.slistat.link_event++;
2840
2841 /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
2842 lpfc_read_la(phba, pmb, mp);
2843 pmb->vport = phba->pport;
2844
2845 /* Parse and translate status field */
2846 mb = &pmb->u.mb;
2847 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
2848
2849 /* Parse and translate link attention fields */
2850 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
2851 la->eventTag = acqe_link->event_tag;
2852 la->attType = att_type;
2853 la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
2854
2855 /* Fake the the following irrelvant fields */
2856 la->topology = TOPOLOGY_PT_PT;
2857 la->granted_AL_PA = 0;
2858 la->il = 0;
2859 la->pb = 0;
2860 la->fa = 0;
2861 la->mm = 0;
2862
2863 /* Keep the link status for extra SLI4 state machine reference */
2864 phba->sli4_hba.link_state.speed =
2865 bf_get(lpfc_acqe_link_speed, acqe_link);
2866 phba->sli4_hba.link_state.duplex =
2867 bf_get(lpfc_acqe_link_duplex, acqe_link);
2868 phba->sli4_hba.link_state.status =
2869 bf_get(lpfc_acqe_link_status, acqe_link);
2870 phba->sli4_hba.link_state.physical =
2871 bf_get(lpfc_acqe_link_physical, acqe_link);
2872 phba->sli4_hba.link_state.fault =
2873 bf_get(lpfc_acqe_link_fault, acqe_link);
2874
2875 /* Invoke the lpfc_handle_latt mailbox command callback function */
2876 lpfc_mbx_cmpl_read_la(phba, pmb);
2877
2878 return;
2879
2880out_free_dmabuf:
2881 kfree(mp);
2882out_free_pmb:
2883 mempool_free(pmb, phba->mbox_mem_pool);
2884}
2885
2886/**
2887 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
2888 * @phba: pointer to lpfc hba data structure.
2889 * @acqe_link: pointer to the async fcoe completion queue entry.
2890 *
2891 * This routine is to handle the SLI4 asynchronous fcoe event.
2892 **/
2893static void
2894lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2895 struct lpfc_acqe_fcoe *acqe_fcoe)
2896{
2897 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2898 int rc;
2899
2900 switch (event_type) {
2901 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2902 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2903 "2546 New FCF found index 0x%x tag 0x%x \n",
2904 acqe_fcoe->fcf_index,
2905 acqe_fcoe->event_tag);
2906 /*
2907 * If the current FCF is in discovered state,
2908 * do nothing.
2909 */
2910 spin_lock_irq(&phba->hbalock);
2911 if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
2912 spin_unlock_irq(&phba->hbalock);
2913 break;
2914 }
2915 spin_unlock_irq(&phba->hbalock);
2916
2917 /* Read the FCF table and re-discover SAN. */
2918 rc = lpfc_sli4_read_fcf_record(phba,
2919 LPFC_FCOE_FCF_GET_FIRST);
2920 if (rc)
2921 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2922 "2547 Read FCF record failed 0x%x\n",
2923 rc);
2924 break;
2925
2926 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2928 "2548 FCF Table full count 0x%x tag 0x%x \n",
2929 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2930 acqe_fcoe->event_tag);
2931 break;
2932
2933 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2934 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2935 "2549 FCF disconnected fron network index 0x%x"
2936 " tag 0x%x \n", acqe_fcoe->fcf_index,
2937 acqe_fcoe->event_tag);
2938 /* If the event is not for currently used fcf do nothing */
2939 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
2940 break;
2941 /*
2942 * Currently, driver support only one FCF - so treat this as
2943 * a link down.
2944 */
2945 lpfc_linkdown(phba);
2946 /* Unregister FCF if no devices connected to it */
2947 lpfc_unregister_unused_fcf(phba);
2948 break;
2949
2950 default:
2951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2952 "0288 Unknown FCoE event type 0x%x event tag "
2953 "0x%x\n", event_type, acqe_fcoe->event_tag);
2954 break;
2955 }
2956}
2957
2958/**
2959 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
2960 * @phba: pointer to lpfc hba data structure.
2961 * @acqe_link: pointer to the async dcbx completion queue entry.
2962 *
2963 * This routine is to handle the SLI4 asynchronous dcbx event.
2964 **/
2965static void
2966lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2967 struct lpfc_acqe_dcbx *acqe_dcbx)
2968{
2969 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2970 "0290 The SLI4 DCBX asynchronous event is not "
2971 "handled yet\n");
2972}
2973
2974/**
2975 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
2976 * @phba: pointer to lpfc hba data structure.
2977 *
2978 * This routine is invoked by the worker thread to process all the pending
2979 * SLI4 asynchronous events.
2980 **/
2981void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
2982{
2983 struct lpfc_cq_event *cq_event;
2984
2985 /* First, declare the async event has been handled */
2986 spin_lock_irq(&phba->hbalock);
2987 phba->hba_flag &= ~ASYNC_EVENT;
2988 spin_unlock_irq(&phba->hbalock);
2989 /* Now, handle all the async events */
2990 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
2991 /* Get the first event from the head of the event queue */
2992 spin_lock_irq(&phba->hbalock);
2993 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
2994 cq_event, struct lpfc_cq_event, list);
2995 spin_unlock_irq(&phba->hbalock);
2996 /* Process the asynchronous event */
2997 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
2998 case LPFC_TRAILER_CODE_LINK:
2999 lpfc_sli4_async_link_evt(phba,
3000 &cq_event->cqe.acqe_link);
3001 break;
3002 case LPFC_TRAILER_CODE_FCOE:
3003 lpfc_sli4_async_fcoe_evt(phba,
3004 &cq_event->cqe.acqe_fcoe);
3005 break;
3006 case LPFC_TRAILER_CODE_DCBX:
3007 lpfc_sli4_async_dcbx_evt(phba,
3008 &cq_event->cqe.acqe_dcbx);
3009 break;
3010 default:
3011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3012 "1804 Invalid asynchrous event code: "
3013 "x%x\n", bf_get(lpfc_trailer_code,
3014 &cq_event->cqe.mcqe_cmpl));
3015 break;
3016 }
3017 /* Free the completion event processed to the free pool */
3018 lpfc_sli4_cq_event_release(phba, cq_event);
3019 }
3020}
3021
3022/**
3023 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3024 * @phba: pointer to lpfc hba data structure.
3025 * @dev_grp: The HBA PCI-Device group number.
3026 *
3027 * This routine is invoked to set up the per HBA PCI-Device group function
3028 * API jump table entries.
3029 *
3030 * Return: 0 if success, otherwise -ENODEV
3031 **/
3032int
3033lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3034{
3035 int rc;
3036
3037 /* Set up lpfc PCI-device group */
3038 phba->pci_dev_grp = dev_grp;
3039
3040 /* The LPFC_PCI_DEV_OC uses SLI4 */
3041 if (dev_grp == LPFC_PCI_DEV_OC)
3042 phba->sli_rev = LPFC_SLI_REV4;
3043
3044 /* Set up device INIT API function jump table */
3045 rc = lpfc_init_api_table_setup(phba, dev_grp);
3046 if (rc)
3047 return -ENODEV;
3048 /* Set up SCSI API function jump table */
3049 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3050 if (rc)
3051 return -ENODEV;
3052 /* Set up SLI API function jump table */
3053 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3054 if (rc)
3055 return -ENODEV;
3056 /* Set up MBOX API function jump table */
3057 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3058 if (rc)
3059 return -ENODEV;
3060
3061 return 0;
3062}
3063
3064/**
3065 * lpfc_log_intr_mode - Log the active interrupt mode
3066 * @phba: pointer to lpfc hba data structure.
3067 * @intr_mode: active interrupt mode adopted.
3068 *
3069 * This routine it invoked to log the currently used active interrupt mode
3070 * to the device.
3071 **/
3072static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3073{
3074 switch (intr_mode) {
3075 case 0:
3076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3077 "0470 Enable INTx interrupt mode.\n");
3078 break;
3079 case 1:
3080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3081 "0481 Enabled MSI interrupt mode.\n");
3082 break;
3083 case 2:
3084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3085 "0480 Enabled MSI-X interrupt mode.\n");
3086 break;
3087 default:
3088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3089 "0482 Illegal interrupt mode.\n");
3090 break;
3091 }
3092 return;
3093}
3094
3095/**
3096 * lpfc_enable_pci_dev - Enable a generic PCI device.
3097 * @phba: pointer to lpfc hba data structure.
3098 *
3099 * This routine is invoked to enable the PCI device that is common to all
3100 * PCI devices.
3101 *
3102 * Return codes
3103 * 0 - sucessful
3104 * other values - error
3105 **/
3106static int
3107lpfc_enable_pci_dev(struct lpfc_hba *phba)
3108{
3109 struct pci_dev *pdev;
3110 int bars;
3111
3112 /* Obtain PCI device reference */
3113 if (!phba->pcidev)
3114 goto out_error;
3115 else
3116 pdev = phba->pcidev;
3117 /* Select PCI BARs */
3118 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3119 /* Enable PCI device */
3120 if (pci_enable_device_mem(pdev))
3121 goto out_error;
3122 /* Request PCI resource for the device */
3123 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3124 goto out_disable_device;
3125 /* Set up device as PCI master and save state for EEH */
3126 pci_set_master(pdev);
3127 pci_try_set_mwi(pdev);
3128 pci_save_state(pdev);
3129
3130 return 0;
3131
3132out_disable_device:
3133 pci_disable_device(pdev);
3134out_error:
3135 return -ENODEV;
3136}
3137
3138/**
3139 * lpfc_disable_pci_dev - Disable a generic PCI device.
3140 * @phba: pointer to lpfc hba data structure.
3141 *
3142 * This routine is invoked to disable the PCI device that is common to all
3143 * PCI devices.
3144 **/
3145static void
3146lpfc_disable_pci_dev(struct lpfc_hba *phba)
3147{
3148 struct pci_dev *pdev;
3149 int bars;
3150
3151 /* Obtain PCI device reference */
3152 if (!phba->pcidev)
3153 return;
3154 else
3155 pdev = phba->pcidev;
3156 /* Select PCI BARs */
3157 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3158 /* Release PCI resource and disable PCI device */
3159 pci_release_selected_regions(pdev, bars);
3160 pci_disable_device(pdev);
3161 /* Null out PCI private reference to driver */
3162 pci_set_drvdata(pdev, NULL);
3163
3164 return;
3165}
3166
3167/**
3168 * lpfc_reset_hba - Reset a hba
3169 * @phba: pointer to lpfc hba data structure.
3170 *
3171 * This routine is invoked to reset a hba device. It brings the HBA
3172 * offline, performs a board restart, and then brings the board back
3173 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3174 * on outstanding mailbox commands.
3175 **/
3176void
3177lpfc_reset_hba(struct lpfc_hba *phba)
3178{
3179 /* If resets are disabled then set error state and return. */
3180 if (!phba->cfg_enable_hba_reset) {
3181 phba->link_state = LPFC_HBA_ERROR;
3182 return;
3183 }
3184 lpfc_offline_prep(phba);
3185 lpfc_offline(phba);
3186 lpfc_sli_brdrestart(phba);
3187 lpfc_online(phba);
3188 lpfc_unblock_mgmt_io(phba);
3189}
3190
3191/**
3192 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
3193 * @phba: pointer to lpfc hba data structure.
3194 *
3195 * This routine is invoked to set up the driver internal resources specific to
3196 * support the SLI-3 HBA device it attached to.
3197 *
3198 * Return codes
3199 * 0 - sucessful
3200 * other values - error
3201 **/
3202static int
3203lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3204{
3205 struct lpfc_sli *psli;
3206
3207 /*
3208 * Initialize timers used by driver
3209 */
3210
3211 /* Heartbeat timer */
3212 init_timer(&phba->hb_tmofunc);
3213 phba->hb_tmofunc.function = lpfc_hb_timeout;
3214 phba->hb_tmofunc.data = (unsigned long)phba;
3215
3216 psli = &phba->sli;
3217 /* MBOX heartbeat timer */
3218 init_timer(&psli->mbox_tmo);
3219 psli->mbox_tmo.function = lpfc_mbox_timeout;
3220 psli->mbox_tmo.data = (unsigned long) phba;
3221 /* FCP polling mode timer */
3222 init_timer(&phba->fcp_poll_timer);
3223 phba->fcp_poll_timer.function = lpfc_poll_timeout;
3224 phba->fcp_poll_timer.data = (unsigned long) phba;
3225 /* Fabric block timer */
3226 init_timer(&phba->fabric_block_timer);
3227 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3228 phba->fabric_block_timer.data = (unsigned long) phba;
3229 /* EA polling mode timer */
3230 init_timer(&phba->eratt_poll);
3231 phba->eratt_poll.function = lpfc_poll_eratt;
3232 phba->eratt_poll.data = (unsigned long) phba;
3233
3234 /* Host attention work mask setup */
3235 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
3236 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
3237
3238 /* Get all the module params for configuring this host */
3239 lpfc_get_cfgparam(phba);
3240 /*
3241 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3242 * used to create the sg_dma_buf_pool must be dynamically calculated.
3243 * 2 segments are added since the IOCB needs a command and response bde.
3244 */
3245 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
3246 sizeof(struct fcp_rsp) +
3247 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
3248
3249 if (phba->cfg_enable_bg) {
3250 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
3251 phba->cfg_sg_dma_buf_size +=
3252 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
3253 }
3254
3255 /* Also reinitialize the host templates with new values. */
3256 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3257 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
3258
3259 phba->max_vpi = LPFC_MAX_VPI;
3260 /* This will be set to correct value after config_port mbox */
3261 phba->max_vports = 0;
3262
3263 /*
3264 * Initialize the SLI Layer to run with lpfc HBAs.
3265 */
3266 lpfc_sli_setup(phba);
3267 lpfc_sli_queue_setup(phba);
3268
3269 /* Allocate device driver memory */
3270 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
3271 return -ENOMEM;
3272
3273 return 0;
3274}
3275
3276/**
3277 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
3278 * @phba: pointer to lpfc hba data structure.
3279 *
3280 * This routine is invoked to unset the driver internal resources set up
3281 * specific for supporting the SLI-3 HBA device it attached to.
3282 **/
3283static void
3284lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
3285{
3286 /* Free device driver memory allocated */
3287 lpfc_mem_free_all(phba);
3288
3289 return;
3290}
3291
3292/**
3293 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
3294 * @phba: pointer to lpfc hba data structure.
3295 *
3296 * This routine is invoked to set up the driver internal resources specific to
3297 * support the SLI-4 HBA device it attached to.
3298 *
3299 * Return codes
3300 * 0 - sucessful
3301 * other values - error
3302 **/
3303static int
3304lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3305{
3306 struct lpfc_sli *psli;
3307 int rc;
3308 int i, hbq_count;
3309
3310 /* Before proceed, wait for POST done and device ready */
3311 rc = lpfc_sli4_post_status_check(phba);
3312 if (rc)
3313 return -ENODEV;
3314
3315 /*
3316 * Initialize timers used by driver
3317 */
3318
3319 /* Heartbeat timer */
3320 init_timer(&phba->hb_tmofunc);
3321 phba->hb_tmofunc.function = lpfc_hb_timeout;
3322 phba->hb_tmofunc.data = (unsigned long)phba;
3323
3324 psli = &phba->sli;
3325 /* MBOX heartbeat timer */
3326 init_timer(&psli->mbox_tmo);
3327 psli->mbox_tmo.function = lpfc_mbox_timeout;
3328 psli->mbox_tmo.data = (unsigned long) phba;
3329 /* Fabric block timer */
3330 init_timer(&phba->fabric_block_timer);
3331 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
3332 phba->fabric_block_timer.data = (unsigned long) phba;
3333 /* EA polling mode timer */
3334 init_timer(&phba->eratt_poll);
3335 phba->eratt_poll.function = lpfc_poll_eratt;
3336 phba->eratt_poll.data = (unsigned long) phba;
3337 /*
3338 * We need to do a READ_CONFIG mailbox command here before
3339 * calling lpfc_get_cfgparam. For VFs this will report the
3340 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
3341 * All of the resources allocated
3342 * for this Port are tied to these values.
3343 */
3344 /* Get all the module params for configuring this host */
3345 lpfc_get_cfgparam(phba);
3346 phba->max_vpi = LPFC_MAX_VPI;
3347 /* This will be set to correct value after the read_config mbox */
3348 phba->max_vports = 0;
3349
3350 /* Program the default value of vlan_id and fc_map */
3351 phba->valid_vlan = 0;
3352 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3353 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3354 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3355
3356 /*
3357 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3358 * used to create the sg_dma_buf_pool must be dynamically calculated.
3359 * 2 segments are added since the IOCB needs a command and response bde.
3360 * To insure that the scsi sgl does not cross a 4k page boundary only
3361 * sgl sizes of 1k, 2k, 4k, and 8k are supported.
3362 * Table of sgl sizes and seg_cnt:
3363 * sgl size, sg_seg_cnt total seg
3364 * 1k 50 52
3365 * 2k 114 116
3366 * 4k 242 244
3367 * 8k 498 500
3368 * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
3369 * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
3370 * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
3371 * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
3372 */
3373 if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
3374 phba->cfg_sg_seg_cnt = 50;
3375 else if (phba->cfg_sg_seg_cnt <= 114)
3376 phba->cfg_sg_seg_cnt = 114;
3377 else if (phba->cfg_sg_seg_cnt <= 242)
3378 phba->cfg_sg_seg_cnt = 242;
3379 else
3380 phba->cfg_sg_seg_cnt = 498;
3381
3382 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
3383 + sizeof(struct fcp_rsp);
3384 phba->cfg_sg_dma_buf_size +=
3385 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
3386
3387 /* Initialize buffer queue management fields */
3388 hbq_count = lpfc_sli_hbq_count();
3389 for (i = 0; i < hbq_count; ++i)
3390 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
3391 INIT_LIST_HEAD(&phba->rb_pend_list);
3392 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
3393 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
3394
3395 /*
3396 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
3397 */
3398 /* Initialize the Abort scsi buffer list used by driver */
3399 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
3400 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
3401 /* This abort list used by worker thread */
3402 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
3403
3404 /*
3405 * Initialize dirver internal slow-path work queues
3406 */
3407
3408 /* Driver internel slow-path CQ Event pool */
3409 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3410 /* Response IOCB work queue list */
3411 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
3412 /* Asynchronous event CQ Event work queue list */
3413 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3414 /* Fast-path XRI aborted CQ Event work queue list */
3415 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
3416 /* Slow-path XRI aborted CQ Event work queue list */
3417 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
3418 /* Receive queue CQ Event work queue list */
3419 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
3420
3421 /* Initialize the driver internal SLI layer lists. */
3422 lpfc_sli_setup(phba);
3423 lpfc_sli_queue_setup(phba);
3424
3425 /* Allocate device driver memory */
3426 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
3427 if (rc)
3428 return -ENOMEM;
3429
3430 /* Create the bootstrap mailbox command */
3431 rc = lpfc_create_bootstrap_mbox(phba);
3432 if (unlikely(rc))
3433 goto out_free_mem;
3434
3435 /* Set up the host's endian order with the device. */
3436 rc = lpfc_setup_endian_order(phba);
3437 if (unlikely(rc))
3438 goto out_free_bsmbx;
3439
3440 /* Set up the hba's configuration parameters. */
3441 rc = lpfc_sli4_read_config(phba);
3442 if (unlikely(rc))
3443 goto out_free_bsmbx;
3444
3445 /* Perform a function reset */
3446 rc = lpfc_pci_function_reset(phba);
3447 if (unlikely(rc))
3448 goto out_free_bsmbx;
3449
3450 /* Create all the SLI4 queues */
3451 rc = lpfc_sli4_queue_create(phba);
3452 if (rc)
3453 goto out_free_bsmbx;
3454
3455 /* Create driver internal CQE event pool */
3456 rc = lpfc_sli4_cq_event_pool_create(phba);
3457 if (rc)
3458 goto out_destroy_queue;
3459
3460 /* Initialize and populate the iocb list per host */
3461 rc = lpfc_init_sgl_list(phba);
3462 if (rc) {
3463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3464 "1400 Failed to initialize sgl list.\n");
3465 goto out_destroy_cq_event_pool;
3466 }
3467 rc = lpfc_init_active_sgl_array(phba);
3468 if (rc) {
3469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3470 "1430 Failed to initialize sgl list.\n");
3471 goto out_free_sgl_list;
3472 }
3473
3474 rc = lpfc_sli4_init_rpi_hdrs(phba);
3475 if (rc) {
3476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3477 "1432 Failed to initialize rpi headers.\n");
3478 goto out_free_active_sgl;
3479 }
3480
3481 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3482 phba->cfg_fcp_eq_count), GFP_KERNEL);
3483 if (!phba->sli4_hba.fcp_eq_hdl) {
3484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3485 "2572 Failed allocate memory for fast-path "
3486 "per-EQ handle array\n");
3487 goto out_remove_rpi_hdrs;
3488 }
3489
3490 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
3491 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
3492 if (!phba->sli4_hba.msix_entries) {
3493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3494 "2573 Failed allocate memory for msi-x "
3495 "interrupt vector entries\n");
3496 goto out_free_fcp_eq_hdl;
3497 }
3498
3499 return rc;
3500
3501out_free_fcp_eq_hdl:
3502 kfree(phba->sli4_hba.fcp_eq_hdl);
3503out_remove_rpi_hdrs:
3504 lpfc_sli4_remove_rpi_hdrs(phba);
3505out_free_active_sgl:
3506 lpfc_free_active_sgl(phba);
3507out_free_sgl_list:
3508 lpfc_free_sgl_list(phba);
3509out_destroy_cq_event_pool:
3510 lpfc_sli4_cq_event_pool_destroy(phba);
3511out_destroy_queue:
3512 lpfc_sli4_queue_destroy(phba);
3513out_free_bsmbx:
3514 lpfc_destroy_bootstrap_mbox(phba);
3515out_free_mem:
3516 lpfc_mem_free(phba);
3517 return rc;
3518}
3519
3520/**
3521 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
3522 * @phba: pointer to lpfc hba data structure.
3523 *
3524 * This routine is invoked to unset the driver internal resources set up
3525 * specific for supporting the SLI-4 HBA device it attached to.
3526 **/
3527static void
3528lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3529{
3530 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
3531
3532 /* unregister default FCFI from the HBA */
3533 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
3534
3535 /* Free the default FCR table */
3536 lpfc_sli_remove_dflt_fcf(phba);
3537
3538 /* Free memory allocated for msi-x interrupt vector entries */
3539 kfree(phba->sli4_hba.msix_entries);
3540
3541 /* Free memory allocated for fast-path work queue handles */
3542 kfree(phba->sli4_hba.fcp_eq_hdl);
3543
3544 /* Free the allocated rpi headers. */
3545 lpfc_sli4_remove_rpi_hdrs(phba);
3546
3547 /* Free the ELS sgl list */
3548 lpfc_free_active_sgl(phba);
3549 lpfc_free_sgl_list(phba);
3550
3551 /* Free the SCSI sgl management array */
3552 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3553
3554 /* Free the SLI4 queues */
3555 lpfc_sli4_queue_destroy(phba);
3556
3557 /* Free the completion queue EQ event pool */
3558 lpfc_sli4_cq_event_release_all(phba);
3559 lpfc_sli4_cq_event_pool_destroy(phba);
3560
3561 /* Reset SLI4 HBA FCoE function */
3562 lpfc_pci_function_reset(phba);
3563
3564 /* Free the bsmbx region. */
3565 lpfc_destroy_bootstrap_mbox(phba);
3566
3567 /* Free the SLI Layer memory with SLI4 HBAs */
3568 lpfc_mem_free_all(phba);
3569
3570 /* Free the current connect table */
3571 list_for_each_entry_safe(conn_entry, next_conn_entry,
3572 &phba->fcf_conn_rec_list, list)
3573 kfree(conn_entry);
3574
3575 return;
3576}
3577
3578/**
3579 * lpfc_init_api_table_setup - Set up init api fucntion jump table
3580 * @phba: The hba struct for which this call is being executed.
3581 * @dev_grp: The HBA PCI-Device group number.
3582 *
3583 * This routine sets up the device INIT interface API function jump table
3584 * in @phba struct.
3585 *
3586 * Returns: 0 - success, -ENODEV - failure.
3587 **/
3588int
3589lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3590{
3591 switch (dev_grp) {
3592 case LPFC_PCI_DEV_LP:
3593 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
3594 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
3595 phba->lpfc_stop_port = lpfc_stop_port_s3;
3596 break;
3597 case LPFC_PCI_DEV_OC:
3598 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
3599 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
3600 phba->lpfc_stop_port = lpfc_stop_port_s4;
3601 break;
3602 default:
3603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3604 "1431 Invalid HBA PCI-device group: 0x%x\n",
3605 dev_grp);
3606 return -ENODEV;
3607 break;
3608 }
3609 return 0;
3610}
3611
3612/**
3613 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
3614 * @phba: pointer to lpfc hba data structure.
3615 *
3616 * This routine is invoked to set up the driver internal resources before the
3617 * device specific resource setup to support the HBA device it attached to.
3618 *
3619 * Return codes
3620 * 0 - sucessful
3621 * other values - error
3622 **/
3623static int
3624lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
3625{
3626 /*
3627 * Driver resources common to all SLI revisions
3628 */
3629 atomic_set(&phba->fast_event_count, 0);
3630 spin_lock_init(&phba->hbalock);
3631
3632 /* Initialize ndlp management spinlock */
3633 spin_lock_init(&phba->ndlp_lock);
3634
3635 INIT_LIST_HEAD(&phba->port_list);
3636 INIT_LIST_HEAD(&phba->work_list);
3637 init_waitqueue_head(&phba->wait_4_mlo_m_q);
3638
3639 /* Initialize the wait queue head for the kernel thread */
3640 init_waitqueue_head(&phba->work_waitq);
3641
3642 /* Initialize the scsi buffer list used by driver for scsi IO */
3643 spin_lock_init(&phba->scsi_buf_list_lock);
3644 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
3645
3646 /* Initialize the fabric iocb list */
3647 INIT_LIST_HEAD(&phba->fabric_iocb_list);
3648
3649 /* Initialize list to save ELS buffers */
3650 INIT_LIST_HEAD(&phba->elsbuf);
3651
3652 /* Initialize FCF connection rec list */
3653 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
3654
3655 return 0;
3656}
3657
3658/**
3659 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
3660 * @phba: pointer to lpfc hba data structure.
3661 *
3662 * This routine is invoked to set up the driver internal resources after the
3663 * device specific resource setup to support the HBA device it attached to.
3664 *
3665 * Return codes
3666 * 0 - sucessful
3667 * other values - error
3668 **/
3669static int
3670lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
3671{
3672 int error;
3673
3674 /* Startup the kernel thread for this host adapter. */
3675 phba->worker_thread = kthread_run(lpfc_do_work, phba,
3676 "lpfc_worker_%d", phba->brd_no);
3677 if (IS_ERR(phba->worker_thread)) {
3678 error = PTR_ERR(phba->worker_thread);
3679 return error;
3680 }
3681
3682 return 0;
3683}
3684
3685/**
3686 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
3687 * @phba: pointer to lpfc hba data structure.
3688 *
3689 * This routine is invoked to unset the driver internal resources set up after
3690 * the device specific resource setup for supporting the HBA device it
3691 * attached to.
3692 **/
3693static void
3694lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
3695{
3696 /* Stop kernel worker thread */
3697 kthread_stop(phba->worker_thread);
3698}
3699
3700/**
3701 * lpfc_free_iocb_list - Free iocb list.
3702 * @phba: pointer to lpfc hba data structure.
3703 *
3704 * This routine is invoked to free the driver's IOCB list and memory.
3705 **/
3706static void
3707lpfc_free_iocb_list(struct lpfc_hba *phba)
3708{
3709 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
3710
3711 spin_lock_irq(&phba->hbalock);
3712 list_for_each_entry_safe(iocbq_entry, iocbq_next,
3713 &phba->lpfc_iocb_list, list) {
3714 list_del(&iocbq_entry->list);
3715 kfree(iocbq_entry);
3716 phba->total_iocbq_bufs--;
3717 }
3718 spin_unlock_irq(&phba->hbalock);
3719
3720 return;
3721}
3722
3723/**
3724 * lpfc_init_iocb_list - Allocate and initialize iocb list.
3725 * @phba: pointer to lpfc hba data structure.
3726 *
3727 * This routine is invoked to allocate and initizlize the driver's IOCB
3728 * list and set up the IOCB tag array accordingly.
3729 *
3730 * Return codes
3731 * 0 - sucessful
3732 * other values - error
3733 **/
3734static int
3735lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
3736{
3737 struct lpfc_iocbq *iocbq_entry = NULL;
3738 uint16_t iotag;
3739 int i;
3740
3741 /* Initialize and populate the iocb list per host. */
3742 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
3743 for (i = 0; i < iocb_count; i++) {
3744 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
3745 if (iocbq_entry == NULL) {
3746 printk(KERN_ERR "%s: only allocated %d iocbs of "
3747 "expected %d count. Unloading driver.\n",
3748 __func__, i, LPFC_IOCB_LIST_CNT);
3749 goto out_free_iocbq;
3750 }
3751
3752 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
3753 if (iotag == 0) {
3754 kfree(iocbq_entry);
3755 printk(KERN_ERR "%s: failed to allocate IOTAG. "
3756 "Unloading driver.\n", __func__);
3757 goto out_free_iocbq;
3758 }
3759 iocbq_entry->sli4_xritag = NO_XRI;
3760
3761 spin_lock_irq(&phba->hbalock);
3762 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
3763 phba->total_iocbq_bufs++;
3764 spin_unlock_irq(&phba->hbalock);
3765 }
3766
3767 return 0;
3768
3769out_free_iocbq:
3770 lpfc_free_iocb_list(phba);
3771
3772 return -ENOMEM;
3773}
3774
3775/**
3776 * lpfc_free_sgl_list - Free sgl list.
3777 * @phba: pointer to lpfc hba data structure.
3778 *
3779 * This routine is invoked to free the driver's sgl list and memory.
3780 **/
3781static void
3782lpfc_free_sgl_list(struct lpfc_hba *phba)
3783{
3784 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
3785 LIST_HEAD(sglq_list);
3786 int rc = 0;
3787
3788 spin_lock_irq(&phba->hbalock);
3789 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
3790 spin_unlock_irq(&phba->hbalock);
3791
3792 list_for_each_entry_safe(sglq_entry, sglq_next,
3793 &sglq_list, list) {
3794 list_del(&sglq_entry->list);
3795 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
3796 kfree(sglq_entry);
3797 phba->sli4_hba.total_sglq_bufs--;
3798 }
3799 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3800 if (rc) {
3801 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3802 "2005 Unable to deregister pages from HBA: %x", rc);
3803 }
3804 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3805}
3806
3807/**
3808 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
3809 * @phba: pointer to lpfc hba data structure.
3810 *
3811 * This routine is invoked to allocate the driver's active sgl memory.
3812 * This array will hold the sglq_entry's for active IOs.
3813 **/
3814static int
3815lpfc_init_active_sgl_array(struct lpfc_hba *phba)
3816{
3817 int size;
3818 size = sizeof(struct lpfc_sglq *);
3819 size *= phba->sli4_hba.max_cfg_param.max_xri;
3820
3821 phba->sli4_hba.lpfc_sglq_active_list =
3822 kzalloc(size, GFP_KERNEL);
3823 if (!phba->sli4_hba.lpfc_sglq_active_list)
3824 return -ENOMEM;
3825 return 0;
3826}
3827
3828/**
3829 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
3830 * @phba: pointer to lpfc hba data structure.
3831 *
3832 * This routine is invoked to walk through the array of active sglq entries
3833 * and free all of the resources.
3834 * This is just a place holder for now.
3835 **/
3836static void
3837lpfc_free_active_sgl(struct lpfc_hba *phba)
3838{
3839 kfree(phba->sli4_hba.lpfc_sglq_active_list);
3840}
3841
3842/**
3843 * lpfc_init_sgl_list - Allocate and initialize sgl list.
3844 * @phba: pointer to lpfc hba data structure.
3845 *
3846 * This routine is invoked to allocate and initizlize the driver's sgl
3847 * list and set up the sgl xritag tag array accordingly.
3848 *
3849 * Return codes
3850 * 0 - sucessful
3851 * other values - error
3852 **/
3853static int
3854lpfc_init_sgl_list(struct lpfc_hba *phba)
3855{
3856 struct lpfc_sglq *sglq_entry = NULL;
3857 int i;
3858 int els_xri_cnt;
3859
3860 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3861 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3862 "2400 lpfc_init_sgl_list els %d.\n",
3863 els_xri_cnt);
3864 /* Initialize and populate the sglq list per host/VF. */
3865 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
3866 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
3867
3868 /* Sanity check on XRI management */
3869 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
3870 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3871 "2562 No room left for SCSI XRI allocation: "
3872 "max_xri=%d, els_xri=%d\n",
3873 phba->sli4_hba.max_cfg_param.max_xri,
3874 els_xri_cnt);
3875 return -ENOMEM;
3876 }
3877
3878 /* Allocate memory for the ELS XRI management array */
3879 phba->sli4_hba.lpfc_els_sgl_array =
3880 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
3881 GFP_KERNEL);
3882
3883 if (!phba->sli4_hba.lpfc_els_sgl_array) {
3884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3885 "2401 Failed to allocate memory for ELS "
3886 "XRI management array of size %d.\n",
3887 els_xri_cnt);
3888 return -ENOMEM;
3889 }
3890
3891 /* Keep the SCSI XRI into the XRI management array */
3892 phba->sli4_hba.scsi_xri_max =
3893 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3894 phba->sli4_hba.scsi_xri_cnt = 0;
3895
3896 phba->sli4_hba.lpfc_scsi_psb_array =
3897 kzalloc((sizeof(struct lpfc_scsi_buf *) *
3898 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
3899
3900 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
3901 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3902 "2563 Failed to allocate memory for SCSI "
3903 "XRI management array of size %d.\n",
3904 phba->sli4_hba.scsi_xri_max);
3905 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3906 return -ENOMEM;
3907 }
3908
3909 for (i = 0; i < els_xri_cnt; i++) {
3910 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
3911 if (sglq_entry == NULL) {
3912 printk(KERN_ERR "%s: only allocated %d sgls of "
3913 "expected %d count. Unloading driver.\n",
3914 __func__, i, els_xri_cnt);
3915 goto out_free_mem;
3916 }
3917
3918 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
3919 if (sglq_entry->sli4_xritag == NO_XRI) {
3920 kfree(sglq_entry);
3921 printk(KERN_ERR "%s: failed to allocate XRI.\n"
3922 "Unloading driver.\n", __func__);
3923 goto out_free_mem;
3924 }
3925 sglq_entry->buff_type = GEN_BUFF_TYPE;
3926 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
3927 if (sglq_entry->virt == NULL) {
3928 kfree(sglq_entry);
3929 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
3930 "Unloading driver.\n", __func__);
3931 goto out_free_mem;
3932 }
3933 sglq_entry->sgl = sglq_entry->virt;
3934 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3935
3936 /* The list order is used by later block SGL registraton */
3937 spin_lock_irq(&phba->hbalock);
3938 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
3939 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
3940 phba->sli4_hba.total_sglq_bufs++;
3941 spin_unlock_irq(&phba->hbalock);
3942 }
3943 return 0;
3944
3945out_free_mem:
3946 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
3947 lpfc_free_sgl_list(phba);
3948 return -ENOMEM;
3949}
3950
3951/**
3952 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
3953 * @phba: pointer to lpfc hba data structure.
3954 *
3955 * This routine is invoked to post rpi header templates to the
3956 * HBA consistent with the SLI-4 interface spec. This routine
3957 * posts a PAGE_SIZE memory region to the port to hold up to
3958 * PAGE_SIZE modulo 64 rpi context headers.
3959 * No locks are held here because this is an initialization routine
3960 * called only from probe or lpfc_online when interrupts are not
3961 * enabled and the driver is reinitializing the device.
3962 *
3963 * Return codes
3964 * 0 - sucessful
3965 * ENOMEM - No availble memory
3966 * EIO - The mailbox failed to complete successfully.
3967 **/
3968int
3969lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
3970{
3971 int rc = 0;
3972 int longs;
3973 uint16_t rpi_count;
3974 struct lpfc_rpi_hdr *rpi_hdr;
3975
3976 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
3977
3978 /*
3979 * Provision an rpi bitmask range for discovery. The total count
3980 * is the difference between max and base + 1.
3981 */
3982 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
3983 phba->sli4_hba.max_cfg_param.max_rpi - 1;
3984
3985 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
3986 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
3987 GFP_KERNEL);
3988 if (!phba->sli4_hba.rpi_bmask)
3989 return -ENOMEM;
3990
3991 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
3992 if (!rpi_hdr) {
3993 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3994 "0391 Error during rpi post operation\n");
3995 lpfc_sli4_remove_rpis(phba);
3996 rc = -ENODEV;
3997 }
3998
3999 return rc;
4000}
4001
4002/**
4003 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4004 * @phba: pointer to lpfc hba data structure.
4005 *
4006 * This routine is invoked to allocate a single 4KB memory region to
4007 * support rpis and stores them in the phba. This single region
4008 * provides support for up to 64 rpis. The region is used globally
4009 * by the device.
4010 *
4011 * Returns:
4012 * A valid rpi hdr on success.
4013 * A NULL pointer on any failure.
4014 **/
4015struct lpfc_rpi_hdr *
4016lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4017{
4018 uint16_t rpi_limit, curr_rpi_range;
4019 struct lpfc_dmabuf *dmabuf;
4020 struct lpfc_rpi_hdr *rpi_hdr;
4021
4022 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4023 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4024
4025 spin_lock_irq(&phba->hbalock);
4026 curr_rpi_range = phba->sli4_hba.next_rpi;
4027 spin_unlock_irq(&phba->hbalock);
4028
4029 /*
4030 * The port has a limited number of rpis. The increment here
4031 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4032 * and to allow the full max_rpi range per port.
4033 */
4034 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4035 return NULL;
4036
4037 /*
4038 * First allocate the protocol header region for the port. The
4039 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4040 */
4041 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4042 if (!dmabuf)
4043 return NULL;
4044
4045 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4046 LPFC_HDR_TEMPLATE_SIZE,
4047 &dmabuf->phys,
4048 GFP_KERNEL);
4049 if (!dmabuf->virt) {
4050 rpi_hdr = NULL;
4051 goto err_free_dmabuf;
4052 }
4053
4054 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4055 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4056 rpi_hdr = NULL;
4057 goto err_free_coherent;
4058 }
4059
4060 /* Save the rpi header data for cleanup later. */
4061 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4062 if (!rpi_hdr)
4063 goto err_free_coherent;
4064
4065 rpi_hdr->dmabuf = dmabuf;
4066 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4067 rpi_hdr->page_count = 1;
4068 spin_lock_irq(&phba->hbalock);
4069 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4070 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4071
4072 /*
4073 * The next_rpi stores the next module-64 rpi value to post
4074 * in any subsequent rpi memory region postings.
4075 */
4076 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
4077 spin_unlock_irq(&phba->hbalock);
4078 return rpi_hdr;
4079
4080 err_free_coherent:
4081 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4082 dmabuf->virt, dmabuf->phys);
4083 err_free_dmabuf:
4084 kfree(dmabuf);
4085 return NULL;
4086}
4087
4088/**
4089 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
2318 * @phba: pointer to lpfc hba data structure. 4090 * @phba: pointer to lpfc hba data structure.
2319 * 4091 *
2320 * This routine is invoked to enable the MSI-X interrupt vectors. The kernel 4092 * This routine is invoked to remove all memory resources allocated
2321 * function pci_enable_msix() is called to enable the MSI-X vectors. Note that 4093 * to support rpis. This routine presumes the caller has released all
2322 * pci_enable_msix(), once invoked, enables either all or nothing, depending 4094 * rpis consumed by fabric or port logins and is prepared to have
2323 * on the current availability of PCI vector resources. The device driver is 4095 * the header pages removed.
2324 * responsible for calling the individual request_irq() to register each MSI-X 4096 **/
2325 * vector with a interrupt handler, which is done in this function. Note that 4097void
4098lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4099{
4100 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4101
4102 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4103 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4104 list_del(&rpi_hdr->list);
4105 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4106 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4107 kfree(rpi_hdr->dmabuf);
4108 kfree(rpi_hdr);
4109 }
4110
4111 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4112 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
4113}
4114
4115/**
4116 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
4117 * @pdev: pointer to pci device data structure.
4118 *
4119 * This routine is invoked to allocate the driver hba data structure for an
4120 * HBA device. If the allocation is successful, the phba reference to the
4121 * PCI device data structure is set.
4122 *
4123 * Return codes
4124 * pointer to @phba - sucessful
4125 * NULL - error
4126 **/
4127static struct lpfc_hba *
4128lpfc_hba_alloc(struct pci_dev *pdev)
4129{
4130 struct lpfc_hba *phba;
4131
4132 /* Allocate memory for HBA structure */
4133 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4134 if (!phba) {
4135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4136 "1417 Failed to allocate hba struct.\n");
4137 return NULL;
4138 }
4139
4140 /* Set reference to PCI device in HBA structure */
4141 phba->pcidev = pdev;
4142
4143 /* Assign an unused board number */
4144 phba->brd_no = lpfc_get_instance();
4145 if (phba->brd_no < 0) {
4146 kfree(phba);
4147 return NULL;
4148 }
4149
4150 return phba;
4151}
4152
4153/**
4154 * lpfc_hba_free - Free driver hba data structure with a device.
4155 * @phba: pointer to lpfc hba data structure.
4156 *
4157 * This routine is invoked to free the driver hba data structure with an
4158 * HBA device.
4159 **/
4160static void
4161lpfc_hba_free(struct lpfc_hba *phba)
4162{
4163 /* Release the driver assigned board number */
4164 idr_remove(&lpfc_hba_index, phba->brd_no);
4165
4166 kfree(phba);
4167 return;
4168}
4169
4170/**
4171 * lpfc_create_shost - Create hba physical port with associated scsi host.
4172 * @phba: pointer to lpfc hba data structure.
4173 *
4174 * This routine is invoked to create HBA physical port and associate a SCSI
4175 * host with it.
4176 *
4177 * Return codes
4178 * 0 - sucessful
4179 * other values - error
4180 **/
4181static int
4182lpfc_create_shost(struct lpfc_hba *phba)
4183{
4184 struct lpfc_vport *vport;
4185 struct Scsi_Host *shost;
4186
4187 /* Initialize HBA FC structure */
4188 phba->fc_edtov = FF_DEF_EDTOV;
4189 phba->fc_ratov = FF_DEF_RATOV;
4190 phba->fc_altov = FF_DEF_ALTOV;
4191 phba->fc_arbtov = FF_DEF_ARBTOV;
4192
4193 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4194 if (!vport)
4195 return -ENODEV;
4196
4197 shost = lpfc_shost_from_vport(vport);
4198 phba->pport = vport;
4199 lpfc_debugfs_initialize(vport);
4200 /* Put reference to SCSI host to driver's device private data */
4201 pci_set_drvdata(phba->pcidev, shost);
4202
4203 return 0;
4204}
4205
4206/**
4207 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
4208 * @phba: pointer to lpfc hba data structure.
4209 *
4210 * This routine is invoked to destroy HBA physical port and the associated
4211 * SCSI host.
4212 **/
4213static void
4214lpfc_destroy_shost(struct lpfc_hba *phba)
4215{
4216 struct lpfc_vport *vport = phba->pport;
4217
4218 /* Destroy physical port that associated with the SCSI host */
4219 destroy_port(vport);
4220
4221 return;
4222}
4223
4224/**
4225 * lpfc_setup_bg - Setup Block guard structures and debug areas.
4226 * @phba: pointer to lpfc hba data structure.
4227 * @shost: the shost to be used to detect Block guard settings.
4228 *
4229 * This routine sets up the local Block guard protocol settings for @shost.
4230 * This routine also allocates memory for debugging bg buffers.
4231 **/
4232static void
4233lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4234{
4235 int pagecnt = 10;
4236 if (lpfc_prot_mask && lpfc_prot_guard) {
4237 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4238 "1478 Registering BlockGuard with the "
4239 "SCSI layer\n");
4240 scsi_host_set_prot(shost, lpfc_prot_mask);
4241 scsi_host_set_guard(shost, lpfc_prot_guard);
4242 }
4243 if (!_dump_buf_data) {
4244 while (pagecnt) {
4245 spin_lock_init(&_dump_buf_lock);
4246 _dump_buf_data =
4247 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4248 if (_dump_buf_data) {
4249 printk(KERN_ERR "BLKGRD allocated %d pages for "
4250 "_dump_buf_data at 0x%p\n",
4251 (1 << pagecnt), _dump_buf_data);
4252 _dump_buf_data_order = pagecnt;
4253 memset(_dump_buf_data, 0,
4254 ((1 << PAGE_SHIFT) << pagecnt));
4255 break;
4256 } else
4257 --pagecnt;
4258 }
4259 if (!_dump_buf_data_order)
4260 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4261 "memory for hexdump\n");
4262 } else
4263 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
4264 "\n", _dump_buf_data);
4265 if (!_dump_buf_dif) {
4266 while (pagecnt) {
4267 _dump_buf_dif =
4268 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4269 if (_dump_buf_dif) {
4270 printk(KERN_ERR "BLKGRD allocated %d pages for "
4271 "_dump_buf_dif at 0x%p\n",
4272 (1 << pagecnt), _dump_buf_dif);
4273 _dump_buf_dif_order = pagecnt;
4274 memset(_dump_buf_dif, 0,
4275 ((1 << PAGE_SHIFT) << pagecnt));
4276 break;
4277 } else
4278 --pagecnt;
4279 }
4280 if (!_dump_buf_dif_order)
4281 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
4282 "memory for hexdump\n");
4283 } else
4284 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
4285 _dump_buf_dif);
4286}
4287
4288/**
4289 * lpfc_post_init_setup - Perform necessary device post initialization setup.
4290 * @phba: pointer to lpfc hba data structure.
4291 *
4292 * This routine is invoked to perform all the necessary post initialization
4293 * setup for the device.
4294 **/
4295static void
4296lpfc_post_init_setup(struct lpfc_hba *phba)
4297{
4298 struct Scsi_Host *shost;
4299 struct lpfc_adapter_event_header adapter_event;
4300
4301 /* Get the default values for Model Name and Description */
4302 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
4303
4304 /*
4305 * hba setup may have changed the hba_queue_depth so we need to
4306 * adjust the value of can_queue.
4307 */
4308 shost = pci_get_drvdata(phba->pcidev);
4309 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4310 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4311 lpfc_setup_bg(phba, shost);
4312
4313 lpfc_host_attrib_init(shost);
4314
4315 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
4316 spin_lock_irq(shost->host_lock);
4317 lpfc_poll_start_timer(phba);
4318 spin_unlock_irq(shost->host_lock);
4319 }
4320
4321 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4322 "0428 Perform SCSI scan\n");
4323 /* Send board arrival event to upper layer */
4324 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
4325 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
4326 fc_host_post_vendor_event(shost, fc_get_event_number(),
4327 sizeof(adapter_event),
4328 (char *) &adapter_event,
4329 LPFC_NL_VENDOR_ID);
4330 return;
4331}
4332
4333/**
4334 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
4335 * @phba: pointer to lpfc hba data structure.
4336 *
4337 * This routine is invoked to set up the PCI device memory space for device
4338 * with SLI-3 interface spec.
4339 *
4340 * Return codes
4341 * 0 - sucessful
4342 * other values - error
4343 **/
4344static int
4345lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4346{
4347 struct pci_dev *pdev;
4348 unsigned long bar0map_len, bar2map_len;
4349 int i, hbq_count;
4350 void *ptr;
4351 int error = -ENODEV;
4352
4353 /* Obtain PCI device reference */
4354 if (!phba->pcidev)
4355 return error;
4356 else
4357 pdev = phba->pcidev;
4358
4359 /* Set the device DMA mask size */
4360 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
4361 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
4362 return error;
4363
4364 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4365 * required by each mapping.
4366 */
4367 phba->pci_bar0_map = pci_resource_start(pdev, 0);
4368 bar0map_len = pci_resource_len(pdev, 0);
4369
4370 phba->pci_bar2_map = pci_resource_start(pdev, 2);
4371 bar2map_len = pci_resource_len(pdev, 2);
4372
4373 /* Map HBA SLIM to a kernel virtual address. */
4374 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
4375 if (!phba->slim_memmap_p) {
4376 dev_printk(KERN_ERR, &pdev->dev,
4377 "ioremap failed for SLIM memory.\n");
4378 goto out;
4379 }
4380
4381 /* Map HBA Control Registers to a kernel virtual address. */
4382 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
4383 if (!phba->ctrl_regs_memmap_p) {
4384 dev_printk(KERN_ERR, &pdev->dev,
4385 "ioremap failed for HBA control registers.\n");
4386 goto out_iounmap_slim;
4387 }
4388
4389 /* Allocate memory for SLI-2 structures */
4390 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
4391 SLI2_SLIM_SIZE,
4392 &phba->slim2p.phys,
4393 GFP_KERNEL);
4394 if (!phba->slim2p.virt)
4395 goto out_iounmap;
4396
4397 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4398 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4399 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4400 phba->IOCBs = (phba->slim2p.virt +
4401 offsetof(struct lpfc_sli2_slim, IOCBs));
4402
4403 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
4404 lpfc_sli_hbq_size(),
4405 &phba->hbqslimp.phys,
4406 GFP_KERNEL);
4407 if (!phba->hbqslimp.virt)
4408 goto out_free_slim;
4409
4410 hbq_count = lpfc_sli_hbq_count();
4411 ptr = phba->hbqslimp.virt;
4412 for (i = 0; i < hbq_count; ++i) {
4413 phba->hbqs[i].hbq_virt = ptr;
4414 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4415 ptr += (lpfc_hbq_defs[i]->entry_count *
4416 sizeof(struct lpfc_hbq_entry));
4417 }
4418 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
4419 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
4420
4421 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
4422
4423 INIT_LIST_HEAD(&phba->rb_pend_list);
4424
4425 phba->MBslimaddr = phba->slim_memmap_p;
4426 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
4427 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
4428 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
4429 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
4430
4431 return 0;
4432
4433out_free_slim:
4434 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4435 phba->slim2p.virt, phba->slim2p.phys);
4436out_iounmap:
4437 iounmap(phba->ctrl_regs_memmap_p);
4438out_iounmap_slim:
4439 iounmap(phba->slim_memmap_p);
4440out:
4441 return error;
4442}
4443
4444/**
4445 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
4446 * @phba: pointer to lpfc hba data structure.
4447 *
4448 * This routine is invoked to unset the PCI device memory space for device
4449 * with SLI-3 interface spec.
4450 **/
4451static void
4452lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
4453{
4454 struct pci_dev *pdev;
4455
4456 /* Obtain PCI device reference */
4457 if (!phba->pcidev)
4458 return;
4459 else
4460 pdev = phba->pcidev;
4461
4462 /* Free coherent DMA memory allocated */
4463 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
4464 phba->hbqslimp.virt, phba->hbqslimp.phys);
4465 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
4466 phba->slim2p.virt, phba->slim2p.phys);
4467
4468 /* I/O memory unmap */
4469 iounmap(phba->ctrl_regs_memmap_p);
4470 iounmap(phba->slim_memmap_p);
4471
4472 return;
4473}
4474
4475/**
4476 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
4477 * @phba: pointer to lpfc hba data structure.
4478 *
4479 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
4480 * done and check status.
4481 *
4482 * Return 0 if successful, otherwise -ENODEV.
4483 **/
4484int
4485lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4486{
4487 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4488 uint32_t onlnreg0, onlnreg1;
4489 int i, port_error = -ENODEV;
4490
4491 if (!phba->sli4_hba.STAregaddr)
4492 return -ENODEV;
4493
4494 /* With uncoverable error, log the error message and return error */
4495 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4496 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4497 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4498 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4499 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4500 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4501 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4502 "1422 HBA Unrecoverable error: "
4503 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4504 "online0_reg=0x%x, online1_reg=0x%x\n",
4505 uerrlo_reg.word0, uerrhi_reg.word0,
4506 onlnreg0, onlnreg1);
4507 }
4508 return -ENODEV;
4509 }
4510
4511 /* Wait up to 30 seconds for the SLI Port POST done and ready */
4512 for (i = 0; i < 3000; i++) {
4513 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
4514 /* Encounter fatal POST error, break out */
4515 if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
4516 port_error = -ENODEV;
4517 break;
4518 }
4519 if (LPFC_POST_STAGE_ARMFW_READY ==
4520 bf_get(lpfc_hst_state_port_status, &sta_reg)) {
4521 port_error = 0;
4522 break;
4523 }
4524 msleep(10);
4525 }
4526
4527 if (port_error)
4528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4529 "1408 Failure HBA POST Status: sta_reg=0x%x, "
4530 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
4531 "dl=x%x, pstatus=x%x\n", sta_reg.word0,
4532 bf_get(lpfc_hst_state_perr, &sta_reg),
4533 bf_get(lpfc_hst_state_sfi, &sta_reg),
4534 bf_get(lpfc_hst_state_nip, &sta_reg),
4535 bf_get(lpfc_hst_state_ipc, &sta_reg),
4536 bf_get(lpfc_hst_state_xrom, &sta_reg),
4537 bf_get(lpfc_hst_state_dl, &sta_reg),
4538 bf_get(lpfc_hst_state_port_status, &sta_reg));
4539
4540 /* Log device information */
4541 scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
4542 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4543 "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
4544 "FeatureL1=0x%x, FeatureL2=0x%x\n",
4545 bf_get(lpfc_scratchpad_chiptype, &scratchpad),
4546 bf_get(lpfc_scratchpad_slirev, &scratchpad),
4547 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4548 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4549
4550 return port_error;
4551}
4552
4553/**
4554 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
4555 * @phba: pointer to lpfc hba data structure.
4556 *
4557 * This routine is invoked to set up SLI4 BAR0 PCI config space register
4558 * memory map.
4559 **/
4560static void
4561lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4562{
4563 phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4564 LPFC_UERR_STATUS_LO;
4565 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4566 LPFC_UERR_STATUS_HI;
4567 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
4568 LPFC_ONLINE0;
4569 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
4570 LPFC_ONLINE1;
4571 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4572 LPFC_SCRATCHPAD;
4573}
4574
4575/**
4576 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
4577 * @phba: pointer to lpfc hba data structure.
4578 *
4579 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
4580 * memory map.
4581 **/
4582static void
4583lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
4584{
4585
4586 phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4587 LPFC_HST_STATE;
4588 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4589 LPFC_HST_ISR0;
4590 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4591 LPFC_HST_IMR0;
4592 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
4593 LPFC_HST_ISCR0;
4594 return;
4595}
4596
4597/**
4598 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
4599 * @phba: pointer to lpfc hba data structure.
4600 * @vf: virtual function number
4601 *
4602 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
4603 * based on the given viftual function number, @vf.
4604 *
4605 * Return 0 if successful, otherwise -ENODEV.
4606 **/
4607static int
4608lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
4609{
4610 if (vf > LPFC_VIR_FUNC_MAX)
4611 return -ENODEV;
4612
4613 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4614 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
4615 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4616 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
4617 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4618 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
4619 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4620 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
4621 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
4622 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
4623 return 0;
4624}
4625
4626/**
4627 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
4628 * @phba: pointer to lpfc hba data structure.
4629 *
4630 * This routine is invoked to create the bootstrap mailbox
4631 * region consistent with the SLI-4 interface spec. This
4632 * routine allocates all memory necessary to communicate
4633 * mailbox commands to the port and sets up all alignment
4634 * needs. No locks are expected to be held when calling
4635 * this routine.
4636 *
4637 * Return codes
4638 * 0 - sucessful
4639 * ENOMEM - could not allocated memory.
4640 **/
4641static int
4642lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
4643{
4644 uint32_t bmbx_size;
4645 struct lpfc_dmabuf *dmabuf;
4646 struct dma_address *dma_address;
4647 uint32_t pa_addr;
4648 uint64_t phys_addr;
4649
4650 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4651 if (!dmabuf)
4652 return -ENOMEM;
4653
4654 /*
4655 * The bootstrap mailbox region is comprised of 2 parts
4656 * plus an alignment restriction of 16 bytes.
4657 */
4658 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
4659 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4660 bmbx_size,
4661 &dmabuf->phys,
4662 GFP_KERNEL);
4663 if (!dmabuf->virt) {
4664 kfree(dmabuf);
4665 return -ENOMEM;
4666 }
4667 memset(dmabuf->virt, 0, bmbx_size);
4668
4669 /*
4670 * Initialize the bootstrap mailbox pointers now so that the register
4671 * operations are simple later. The mailbox dma address is required
4672 * to be 16-byte aligned. Also align the virtual memory as each
4673 * maibox is copied into the bmbx mailbox region before issuing the
4674 * command to the port.
4675 */
4676 phba->sli4_hba.bmbx.dmabuf = dmabuf;
4677 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
4678
4679 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
4680 LPFC_ALIGN_16_BYTE);
4681 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
4682 LPFC_ALIGN_16_BYTE);
4683
4684 /*
4685 * Set the high and low physical addresses now. The SLI4 alignment
4686 * requirement is 16 bytes and the mailbox is posted to the port
4687 * as two 30-bit addresses. The other data is a bit marking whether
4688 * the 30-bit address is the high or low address.
4689 * Upcast bmbx aphys to 64bits so shift instruction compiles
4690 * clean on 32 bit machines.
4691 */
4692 dma_address = &phba->sli4_hba.bmbx.dma_address;
4693 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
4694 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
4695 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
4696 LPFC_BMBX_BIT1_ADDR_HI);
4697
4698 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
4699 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
4700 LPFC_BMBX_BIT1_ADDR_LO);
4701 return 0;
4702}
4703
4704/**
4705 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
4706 * @phba: pointer to lpfc hba data structure.
4707 *
4708 * This routine is invoked to teardown the bootstrap mailbox
4709 * region and release all host resources. This routine requires
4710 * the caller to ensure all mailbox commands recovered, no
4711 * additional mailbox comands are sent, and interrupts are disabled
4712 * before calling this routine.
4713 *
4714 **/
4715static void
4716lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
4717{
4718 dma_free_coherent(&phba->pcidev->dev,
4719 phba->sli4_hba.bmbx.bmbx_size,
4720 phba->sli4_hba.bmbx.dmabuf->virt,
4721 phba->sli4_hba.bmbx.dmabuf->phys);
4722
4723 kfree(phba->sli4_hba.bmbx.dmabuf);
4724 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
4725}
4726
4727/**
4728 * lpfc_sli4_read_config - Get the config parameters.
4729 * @phba: pointer to lpfc hba data structure.
4730 *
4731 * This routine is invoked to read the configuration parameters from the HBA.
4732 * The configuration parameters are used to set the base and maximum values
4733 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
4734 * allocation for the port.
4735 *
4736 * Return codes
4737 * 0 - sucessful
4738 * ENOMEM - No availble memory
4739 * EIO - The mailbox failed to complete successfully.
4740 **/
4741static int
4742lpfc_sli4_read_config(struct lpfc_hba *phba)
4743{
4744 LPFC_MBOXQ_t *pmb;
4745 struct lpfc_mbx_read_config *rd_config;
4746 uint32_t rc = 0;
4747
4748 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4749 if (!pmb) {
4750 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4751 "2011 Unable to allocate memory for issuing "
4752 "SLI_CONFIG_SPECIAL mailbox command\n");
4753 return -ENOMEM;
4754 }
4755
4756 lpfc_read_config(phba, pmb);
4757
4758 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4759 if (rc != MBX_SUCCESS) {
4760 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4761 "2012 Mailbox failed , mbxCmd x%x "
4762 "READ_CONFIG, mbxStatus x%x\n",
4763 bf_get(lpfc_mqe_command, &pmb->u.mqe),
4764 bf_get(lpfc_mqe_status, &pmb->u.mqe));
4765 rc = -EIO;
4766 } else {
4767 rd_config = &pmb->u.mqe.un.rd_config;
4768 phba->sli4_hba.max_cfg_param.max_xri =
4769 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
4770 phba->sli4_hba.max_cfg_param.xri_base =
4771 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
4772 phba->sli4_hba.max_cfg_param.max_vpi =
4773 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
4774 phba->sli4_hba.max_cfg_param.vpi_base =
4775 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
4776 phba->sli4_hba.max_cfg_param.max_rpi =
4777 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
4778 phba->sli4_hba.max_cfg_param.rpi_base =
4779 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
4780 phba->sli4_hba.max_cfg_param.max_vfi =
4781 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
4782 phba->sli4_hba.max_cfg_param.vfi_base =
4783 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
4784 phba->sli4_hba.max_cfg_param.max_fcfi =
4785 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
4786 phba->sli4_hba.max_cfg_param.fcfi_base =
4787 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
4788 phba->sli4_hba.max_cfg_param.max_eq =
4789 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
4790 phba->sli4_hba.max_cfg_param.max_rq =
4791 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
4792 phba->sli4_hba.max_cfg_param.max_wq =
4793 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
4794 phba->sli4_hba.max_cfg_param.max_cq =
4795 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
4796 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
4797 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
4798 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4799 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4800 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4801 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
4802 phba->max_vports = phba->max_vpi;
4803 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4804 "2003 cfg params XRI(B:%d M:%d), "
4805 "VPI(B:%d M:%d) "
4806 "VFI(B:%d M:%d) "
4807 "RPI(B:%d M:%d) "
4808 "FCFI(B:%d M:%d)\n",
4809 phba->sli4_hba.max_cfg_param.xri_base,
4810 phba->sli4_hba.max_cfg_param.max_xri,
4811 phba->sli4_hba.max_cfg_param.vpi_base,
4812 phba->sli4_hba.max_cfg_param.max_vpi,
4813 phba->sli4_hba.max_cfg_param.vfi_base,
4814 phba->sli4_hba.max_cfg_param.max_vfi,
4815 phba->sli4_hba.max_cfg_param.rpi_base,
4816 phba->sli4_hba.max_cfg_param.max_rpi,
4817 phba->sli4_hba.max_cfg_param.fcfi_base,
4818 phba->sli4_hba.max_cfg_param.max_fcfi);
4819 }
4820 mempool_free(pmb, phba->mbox_mem_pool);
4821
4822 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
4823 if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
4824 phba->cfg_hba_queue_depth =
4825 phba->sli4_hba.max_cfg_param.max_xri;
4826 return rc;
4827}
4828
4829/**
4830 * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
4831 * @phba: pointer to lpfc hba data structure.
4832 *
4833 * This routine is invoked to setup the host-side endian order to the
4834 * HBA consistent with the SLI-4 interface spec.
4835 *
4836 * Return codes
4837 * 0 - sucessful
4838 * ENOMEM - No availble memory
4839 * EIO - The mailbox failed to complete successfully.
4840 **/
4841static int
4842lpfc_setup_endian_order(struct lpfc_hba *phba)
4843{
4844 LPFC_MBOXQ_t *mboxq;
4845 uint32_t rc = 0;
4846 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
4847 HOST_ENDIAN_HIGH_WORD1};
4848
4849 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4850 if (!mboxq) {
4851 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4852 "0492 Unable to allocate memory for issuing "
4853 "SLI_CONFIG_SPECIAL mailbox command\n");
4854 return -ENOMEM;
4855 }
4856
4857 /*
4858 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
4859 * words to contain special data values and no other data.
4860 */
4861 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
4862 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
4863 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4864 if (rc != MBX_SUCCESS) {
4865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4866 "0493 SLI_CONFIG_SPECIAL mailbox failed with "
4867 "status x%x\n",
4868 rc);
4869 rc = -EIO;
4870 }
4871
4872 mempool_free(mboxq, phba->mbox_mem_pool);
4873 return rc;
4874}
4875
4876/**
4877 * lpfc_sli4_queue_create - Create all the SLI4 queues
4878 * @phba: pointer to lpfc hba data structure.
4879 *
4880 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
4881 * operation. For each SLI4 queue type, the parameters such as queue entry
4882 * count (queue depth) shall be taken from the module parameter. For now,
4883 * we just use some constant number as place holder.
4884 *
4885 * Return codes
4886 * 0 - sucessful
4887 * ENOMEM - No availble memory
4888 * EIO - The mailbox failed to complete successfully.
4889 **/
4890static int
4891lpfc_sli4_queue_create(struct lpfc_hba *phba)
4892{
4893 struct lpfc_queue *qdesc;
4894 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
4895 int cfg_fcp_wq_count;
4896 int cfg_fcp_eq_count;
4897
4898 /*
4899 * Sanity check for confiugred queue parameters against the run-time
4900 * device parameters
4901 */
4902
4903 /* Sanity check on FCP fast-path WQ parameters */
4904 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
4905 if (cfg_fcp_wq_count >
4906 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
4907 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
4908 LPFC_SP_WQN_DEF;
4909 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
4910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4911 "2581 Not enough WQs (%d) from "
4912 "the pci function for supporting "
4913 "FCP WQs (%d)\n",
4914 phba->sli4_hba.max_cfg_param.max_wq,
4915 phba->cfg_fcp_wq_count);
4916 goto out_error;
4917 }
4918 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4919 "2582 Not enough WQs (%d) from the pci "
4920 "function for supporting the requested "
4921 "FCP WQs (%d), the actual FCP WQs can "
4922 "be supported: %d\n",
4923 phba->sli4_hba.max_cfg_param.max_wq,
4924 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
4925 }
4926 /* The actual number of FCP work queues adopted */
4927 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
4928
4929 /* Sanity check on FCP fast-path EQ parameters */
4930 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
4931 if (cfg_fcp_eq_count >
4932 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
4933 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
4934 LPFC_SP_EQN_DEF;
4935 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
4936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4937 "2574 Not enough EQs (%d) from the "
4938 "pci function for supporting FCP "
4939 "EQs (%d)\n",
4940 phba->sli4_hba.max_cfg_param.max_eq,
4941 phba->cfg_fcp_eq_count);
4942 goto out_error;
4943 }
4944 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4945 "2575 Not enough EQs (%d) from the pci "
4946 "function for supporting the requested "
4947 "FCP EQs (%d), the actual FCP EQs can "
4948 "be supported: %d\n",
4949 phba->sli4_hba.max_cfg_param.max_eq,
4950 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
4951 }
4952 /* It does not make sense to have more EQs than WQs */
4953 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4954 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4955 "2593 The number of FCP EQs (%d) is more "
4956 "than the number of FCP WQs (%d), take "
4957 "the number of FCP EQs same as than of "
4958 "WQs (%d)\n", cfg_fcp_eq_count,
4959 phba->cfg_fcp_wq_count,
4960 phba->cfg_fcp_wq_count);
4961 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
4962 }
4963 /* The actual number of FCP event queues adopted */
4964 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
4965 /* The overall number of event queues used */
4966 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
4967
4968 /*
4969 * Create Event Queues (EQs)
4970 */
4971
4972 /* Get EQ depth from module parameter, fake the default for now */
4973 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
4974 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
4975
4976 /* Create slow path event queue */
4977 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4978 phba->sli4_hba.eq_ecount);
4979 if (!qdesc) {
4980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4981 "0496 Failed allocate slow-path EQ\n");
4982 goto out_error;
4983 }
4984 phba->sli4_hba.sp_eq = qdesc;
4985
4986 /* Create fast-path FCP Event Queue(s) */
4987 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
4988 phba->cfg_fcp_eq_count), GFP_KERNEL);
4989 if (!phba->sli4_hba.fp_eq) {
4990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4991 "2576 Failed allocate memory for fast-path "
4992 "EQ record array\n");
4993 goto out_free_sp_eq;
4994 }
4995 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
4996 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
4997 phba->sli4_hba.eq_ecount);
4998 if (!qdesc) {
4999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5000 "0497 Failed allocate fast-path EQ\n");
5001 goto out_free_fp_eq;
5002 }
5003 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
5004 }
5005
5006 /*
5007 * Create Complete Queues (CQs)
5008 */
5009
5010 /* Get CQ depth from module parameter, fake the default for now */
5011 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
5012 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
5013
5014 /* Create slow-path Mailbox Command Complete Queue */
5015 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5016 phba->sli4_hba.cq_ecount);
5017 if (!qdesc) {
5018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5019 "0500 Failed allocate slow-path mailbox CQ\n");
5020 goto out_free_fp_eq;
5021 }
5022 phba->sli4_hba.mbx_cq = qdesc;
5023
5024 /* Create slow-path ELS Complete Queue */
5025 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5026 phba->sli4_hba.cq_ecount);
5027 if (!qdesc) {
5028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5029 "0501 Failed allocate slow-path ELS CQ\n");
5030 goto out_free_mbx_cq;
5031 }
5032 phba->sli4_hba.els_cq = qdesc;
5033
5034 /* Create slow-path Unsolicited Receive Complete Queue */
5035 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5036 phba->sli4_hba.cq_ecount);
5037 if (!qdesc) {
5038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5039 "0502 Failed allocate slow-path USOL RX CQ\n");
5040 goto out_free_els_cq;
5041 }
5042 phba->sli4_hba.rxq_cq = qdesc;
5043
5044 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5045 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
5046 phba->cfg_fcp_eq_count), GFP_KERNEL);
5047 if (!phba->sli4_hba.fcp_cq) {
5048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5049 "2577 Failed allocate memory for fast-path "
5050 "CQ record array\n");
5051 goto out_free_rxq_cq;
5052 }
5053 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5054 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5055 phba->sli4_hba.cq_ecount);
5056 if (!qdesc) {
5057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5058 "0499 Failed allocate fast-path FCP "
5059 "CQ (%d)\n", fcp_cqidx);
5060 goto out_free_fcp_cq;
5061 }
5062 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
5063 }
5064
5065 /* Create Mailbox Command Queue */
5066 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
5067 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
5068
5069 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
5070 phba->sli4_hba.mq_ecount);
5071 if (!qdesc) {
5072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5073 "0505 Failed allocate slow-path MQ\n");
5074 goto out_free_fcp_cq;
5075 }
5076 phba->sli4_hba.mbx_wq = qdesc;
5077
5078 /*
5079 * Create all the Work Queues (WQs)
5080 */
5081 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
5082 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
5083
5084 /* Create slow-path ELS Work Queue */
5085 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5086 phba->sli4_hba.wq_ecount);
5087 if (!qdesc) {
5088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5089 "0504 Failed allocate slow-path ELS WQ\n");
5090 goto out_free_mbx_wq;
5091 }
5092 phba->sli4_hba.els_wq = qdesc;
5093
5094 /* Create fast-path FCP Work Queue(s) */
5095 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
5096 phba->cfg_fcp_wq_count), GFP_KERNEL);
5097 if (!phba->sli4_hba.fcp_wq) {
5098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5099 "2578 Failed allocate memory for fast-path "
5100 "WQ record array\n");
5101 goto out_free_els_wq;
5102 }
5103 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5104 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
5105 phba->sli4_hba.wq_ecount);
5106 if (!qdesc) {
5107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5108 "0503 Failed allocate fast-path FCP "
5109 "WQ (%d)\n", fcp_wqidx);
5110 goto out_free_fcp_wq;
5111 }
5112 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
5113 }
5114
5115 /*
5116 * Create Receive Queue (RQ)
5117 */
5118 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
5119 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
5120
5121 /* Create Receive Queue for header */
5122 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5123 phba->sli4_hba.rq_ecount);
5124 if (!qdesc) {
5125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5126 "0506 Failed allocate receive HRQ\n");
5127 goto out_free_fcp_wq;
5128 }
5129 phba->sli4_hba.hdr_rq = qdesc;
5130
5131 /* Create Receive Queue for data */
5132 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
5133 phba->sli4_hba.rq_ecount);
5134 if (!qdesc) {
5135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5136 "0507 Failed allocate receive DRQ\n");
5137 goto out_free_hdr_rq;
5138 }
5139 phba->sli4_hba.dat_rq = qdesc;
5140
5141 return 0;
5142
5143out_free_hdr_rq:
5144 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5145 phba->sli4_hba.hdr_rq = NULL;
5146out_free_fcp_wq:
5147 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
5148 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
5149 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
5150 }
5151 kfree(phba->sli4_hba.fcp_wq);
5152out_free_els_wq:
5153 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5154 phba->sli4_hba.els_wq = NULL;
5155out_free_mbx_wq:
5156 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5157 phba->sli4_hba.mbx_wq = NULL;
5158out_free_fcp_cq:
5159 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
5160 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
5161 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5162 }
5163 kfree(phba->sli4_hba.fcp_cq);
5164out_free_rxq_cq:
5165 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5166 phba->sli4_hba.rxq_cq = NULL;
5167out_free_els_cq:
5168 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5169 phba->sli4_hba.els_cq = NULL;
5170out_free_mbx_cq:
5171 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5172 phba->sli4_hba.mbx_cq = NULL;
5173out_free_fp_eq:
5174 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
5175 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
5176 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
5177 }
5178 kfree(phba->sli4_hba.fp_eq);
5179out_free_sp_eq:
5180 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5181 phba->sli4_hba.sp_eq = NULL;
5182out_error:
5183 return -ENOMEM;
5184}
5185
5186/**
5187 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
5188 * @phba: pointer to lpfc hba data structure.
5189 *
5190 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
5191 * operation.
5192 *
5193 * Return codes
5194 * 0 - sucessful
5195 * ENOMEM - No availble memory
5196 * EIO - The mailbox failed to complete successfully.
5197 **/
5198static void
5199lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5200{
5201 int fcp_qidx;
5202
5203 /* Release mailbox command work queue */
5204 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
5205 phba->sli4_hba.mbx_wq = NULL;
5206
5207 /* Release ELS work queue */
5208 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
5209 phba->sli4_hba.els_wq = NULL;
5210
5211 /* Release FCP work queue */
5212 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5213 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
5214 kfree(phba->sli4_hba.fcp_wq);
5215 phba->sli4_hba.fcp_wq = NULL;
5216
5217 /* Release unsolicited receive queue */
5218 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
5219 phba->sli4_hba.hdr_rq = NULL;
5220 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5221 phba->sli4_hba.dat_rq = NULL;
5222
5223 /* Release unsolicited receive complete queue */
5224 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5225 phba->sli4_hba.rxq_cq = NULL;
5226
5227 /* Release ELS complete queue */
5228 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5229 phba->sli4_hba.els_cq = NULL;
5230
5231 /* Release mailbox command complete queue */
5232 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
5233 phba->sli4_hba.mbx_cq = NULL;
5234
5235 /* Release FCP response complete queue */
5236 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5237 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
5238 kfree(phba->sli4_hba.fcp_cq);
5239 phba->sli4_hba.fcp_cq = NULL;
5240
5241 /* Release fast-path event queue */
5242 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5243 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
5244 kfree(phba->sli4_hba.fp_eq);
5245 phba->sli4_hba.fp_eq = NULL;
5246
5247 /* Release slow-path event queue */
5248 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
5249 phba->sli4_hba.sp_eq = NULL;
5250
5251 return;
5252}
5253
5254/**
5255 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
5256 * @phba: pointer to lpfc hba data structure.
5257 *
5258 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
5259 * operation.
5260 *
5261 * Return codes
5262 * 0 - sucessful
5263 * ENOMEM - No availble memory
5264 * EIO - The mailbox failed to complete successfully.
5265 **/
5266int
5267lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5268{
5269 int rc = -ENOMEM;
5270 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5271 int fcp_cq_index = 0;
5272
5273 /*
5274 * Set up Event Queues (EQs)
5275 */
5276
5277 /* Set up slow-path event queue */
5278 if (!phba->sli4_hba.sp_eq) {
5279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5280 "0520 Slow-path EQ not allocated\n");
5281 goto out_error;
5282 }
5283 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
5284 LPFC_SP_DEF_IMAX);
5285 if (rc) {
5286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5287 "0521 Failed setup of slow-path EQ: "
5288 "rc = 0x%x\n", rc);
5289 goto out_error;
5290 }
5291 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5292 "2583 Slow-path EQ setup: queue-id=%d\n",
5293 phba->sli4_hba.sp_eq->queue_id);
5294
5295 /* Set up fast-path event queue */
5296 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
5297 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
5298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5299 "0522 Fast-path EQ (%d) not "
5300 "allocated\n", fcp_eqidx);
5301 goto out_destroy_fp_eq;
5302 }
5303 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
5304 phba->cfg_fcp_imax);
5305 if (rc) {
5306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5307 "0523 Failed setup of fast-path EQ "
5308 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
5309 goto out_destroy_fp_eq;
5310 }
5311 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5312 "2584 Fast-path EQ setup: "
5313 "queue[%d]-id=%d\n", fcp_eqidx,
5314 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
5315 }
5316
5317 /*
5318 * Set up Complete Queues (CQs)
5319 */
5320
5321 /* Set up slow-path MBOX Complete Queue as the first CQ */
5322 if (!phba->sli4_hba.mbx_cq) {
5323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5324 "0528 Mailbox CQ not allocated\n");
5325 goto out_destroy_fp_eq;
5326 }
5327 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
5328 LPFC_MCQ, LPFC_MBOX);
5329 if (rc) {
5330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5331 "0529 Failed setup of slow-path mailbox CQ: "
5332 "rc = 0x%x\n", rc);
5333 goto out_destroy_fp_eq;
5334 }
5335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5336 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
5337 phba->sli4_hba.mbx_cq->queue_id,
5338 phba->sli4_hba.sp_eq->queue_id);
5339
5340 /* Set up slow-path ELS Complete Queue */
5341 if (!phba->sli4_hba.els_cq) {
5342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5343 "0530 ELS CQ not allocated\n");
5344 goto out_destroy_mbx_cq;
5345 }
5346 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
5347 LPFC_WCQ, LPFC_ELS);
5348 if (rc) {
5349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5350 "0531 Failed setup of slow-path ELS CQ: "
5351 "rc = 0x%x\n", rc);
5352 goto out_destroy_mbx_cq;
5353 }
5354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5355 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
5356 phba->sli4_hba.els_cq->queue_id,
5357 phba->sli4_hba.sp_eq->queue_id);
5358
5359 /* Set up slow-path Unsolicited Receive Complete Queue */
5360 if (!phba->sli4_hba.rxq_cq) {
5361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5362 "0532 USOL RX CQ not allocated\n");
5363 goto out_destroy_els_cq;
5364 }
5365 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5366 LPFC_RCQ, LPFC_USOL);
5367 if (rc) {
5368 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5369 "0533 Failed setup of slow-path USOL RX CQ: "
5370 "rc = 0x%x\n", rc);
5371 goto out_destroy_els_cq;
5372 }
5373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5374 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5375 phba->sli4_hba.rxq_cq->queue_id,
5376 phba->sli4_hba.sp_eq->queue_id);
5377
5378 /* Set up fast-path FCP Response Complete Queue */
5379 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5380 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
5381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5382 "0526 Fast-path FCP CQ (%d) not "
5383 "allocated\n", fcp_cqidx);
5384 goto out_destroy_fcp_cq;
5385 }
5386 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
5387 phba->sli4_hba.fp_eq[fcp_cqidx],
5388 LPFC_WCQ, LPFC_FCP);
5389 if (rc) {
5390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5391 "0527 Failed setup of fast-path FCP "
5392 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
5393 goto out_destroy_fcp_cq;
5394 }
5395 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5396 "2588 FCP CQ setup: cq[%d]-id=%d, "
5397 "parent eq[%d]-id=%d\n",
5398 fcp_cqidx,
5399 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
5400 fcp_cqidx,
5401 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
5402 }
5403
5404 /*
5405 * Set up all the Work Queues (WQs)
5406 */
5407
5408 /* Set up Mailbox Command Queue */
5409 if (!phba->sli4_hba.mbx_wq) {
5410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5411 "0538 Slow-path MQ not allocated\n");
5412 goto out_destroy_fcp_cq;
5413 }
5414 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
5415 phba->sli4_hba.mbx_cq, LPFC_MBOX);
5416 if (rc) {
5417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5418 "0539 Failed setup of slow-path MQ: "
5419 "rc = 0x%x\n", rc);
5420 goto out_destroy_fcp_cq;
5421 }
5422 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5423 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
5424 phba->sli4_hba.mbx_wq->queue_id,
5425 phba->sli4_hba.mbx_cq->queue_id);
5426
5427 /* Set up slow-path ELS Work Queue */
5428 if (!phba->sli4_hba.els_wq) {
5429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5430 "0536 Slow-path ELS WQ not allocated\n");
5431 goto out_destroy_mbx_wq;
5432 }
5433 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
5434 phba->sli4_hba.els_cq, LPFC_ELS);
5435 if (rc) {
5436 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5437 "0537 Failed setup of slow-path ELS WQ: "
5438 "rc = 0x%x\n", rc);
5439 goto out_destroy_mbx_wq;
5440 }
5441 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5442 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
5443 phba->sli4_hba.els_wq->queue_id,
5444 phba->sli4_hba.els_cq->queue_id);
5445
5446 /* Set up fast-path FCP Work Queue */
5447 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
5448 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5450 "0534 Fast-path FCP WQ (%d) not "
5451 "allocated\n", fcp_wqidx);
5452 goto out_destroy_fcp_wq;
5453 }
5454 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
5455 phba->sli4_hba.fcp_cq[fcp_cq_index],
5456 LPFC_FCP);
5457 if (rc) {
5458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5459 "0535 Failed setup of fast-path FCP "
5460 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
5461 goto out_destroy_fcp_wq;
5462 }
5463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5464 "2591 FCP WQ setup: wq[%d]-id=%d, "
5465 "parent cq[%d]-id=%d\n",
5466 fcp_wqidx,
5467 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
5468 fcp_cq_index,
5469 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
5470 /* Round robin FCP Work Queue's Completion Queue assignment */
5471 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
5472 }
5473
5474 /*
5475 * Create Receive Queue (RQ)
5476 */
5477 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5479 "0540 Receive Queue not allocated\n");
5480 goto out_destroy_fcp_wq;
5481 }
5482 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5483 phba->sli4_hba.rxq_cq, LPFC_USOL);
5484 if (rc) {
5485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5486 "0541 Failed setup of Receive Queue: "
5487 "rc = 0x%x\n", rc);
5488 goto out_destroy_fcp_wq;
5489 }
5490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5491 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
5492 "parent cq-id=%d\n",
5493 phba->sli4_hba.hdr_rq->queue_id,
5494 phba->sli4_hba.dat_rq->queue_id,
5495 phba->sli4_hba.rxq_cq->queue_id);
5496 return 0;
5497
5498out_destroy_fcp_wq:
5499 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
5500 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
5501 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5502out_destroy_mbx_wq:
5503 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5504out_destroy_fcp_cq:
5505 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5506 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5507 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5508out_destroy_els_cq:
5509 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5510out_destroy_mbx_cq:
5511 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5512out_destroy_fp_eq:
5513 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
5514 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
5515 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5516out_error:
5517 return rc;
5518}
5519
5520/**
5521 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
5522 * @phba: pointer to lpfc hba data structure.
5523 *
5524 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
5525 * operation.
5526 *
5527 * Return codes
5528 * 0 - sucessful
5529 * ENOMEM - No availble memory
5530 * EIO - The mailbox failed to complete successfully.
5531 **/
5532void
5533lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5534{
5535 int fcp_qidx;
5536
5537 /* Unset mailbox command work queue */
5538 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
5539 /* Unset ELS work queue */
5540 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
5541 /* Unset unsolicited receive queue */
5542 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
5543 /* Unset FCP work queue */
5544 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
5545 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
5546 /* Unset mailbox command complete queue */
5547 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5548 /* Unset ELS complete queue */
5549 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5550 /* Unset unsolicited receive complete queue */
5551 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5552 /* Unset FCP response complete queue */
5553 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5554 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
5555 /* Unset fast-path event queue */
5556 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5557 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
5558 /* Unset slow-path event queue */
5559 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
5560}
5561
5562/**
5563 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
5564 * @phba: pointer to lpfc hba data structure.
5565 *
5566 * This routine is invoked to allocate and set up a pool of completion queue
5567 * events. The body of the completion queue event is a completion queue entry
5568 * CQE. For now, this pool is used for the interrupt service routine to queue
5569 * the following HBA completion queue events for the worker thread to process:
5570 * - Mailbox asynchronous events
5571 * - Receive queue completion unsolicited events
5572 * Later, this can be used for all the slow-path events.
5573 *
5574 * Return codes
5575 * 0 - sucessful
5576 * -ENOMEM - No availble memory
5577 **/
5578static int
5579lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
5580{
5581 struct lpfc_cq_event *cq_event;
5582 int i;
5583
5584 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
5585 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
5586 if (!cq_event)
5587 goto out_pool_create_fail;
5588 list_add_tail(&cq_event->list,
5589 &phba->sli4_hba.sp_cqe_event_pool);
5590 }
5591 return 0;
5592
5593out_pool_create_fail:
5594 lpfc_sli4_cq_event_pool_destroy(phba);
5595 return -ENOMEM;
5596}
5597
5598/**
5599 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
5600 * @phba: pointer to lpfc hba data structure.
5601 *
5602 * This routine is invoked to free the pool of completion queue events at
5603 * driver unload time. Note that, it is the responsibility of the driver
5604 * cleanup routine to free all the outstanding completion-queue events
5605 * allocated from this pool back into the pool before invoking this routine
5606 * to destroy the pool.
5607 **/
5608static void
5609lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
5610{
5611 struct lpfc_cq_event *cq_event, *next_cq_event;
5612
5613 list_for_each_entry_safe(cq_event, next_cq_event,
5614 &phba->sli4_hba.sp_cqe_event_pool, list) {
5615 list_del(&cq_event->list);
5616 kfree(cq_event);
5617 }
5618}
5619
5620/**
5621 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5622 * @phba: pointer to lpfc hba data structure.
5623 *
5624 * This routine is the lock free version of the API invoked to allocate a
5625 * completion-queue event from the free pool.
5626 *
5627 * Return: Pointer to the newly allocated completion-queue event if successful
5628 * NULL otherwise.
5629 **/
5630struct lpfc_cq_event *
5631__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5632{
5633 struct lpfc_cq_event *cq_event = NULL;
5634
5635 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
5636 struct lpfc_cq_event, list);
5637 return cq_event;
5638}
5639
5640/**
5641 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
5642 * @phba: pointer to lpfc hba data structure.
5643 *
5644 * This routine is the lock version of the API invoked to allocate a
5645 * completion-queue event from the free pool.
5646 *
5647 * Return: Pointer to the newly allocated completion-queue event if successful
5648 * NULL otherwise.
5649 **/
5650struct lpfc_cq_event *
5651lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
5652{
5653 struct lpfc_cq_event *cq_event;
5654 unsigned long iflags;
5655
5656 spin_lock_irqsave(&phba->hbalock, iflags);
5657 cq_event = __lpfc_sli4_cq_event_alloc(phba);
5658 spin_unlock_irqrestore(&phba->hbalock, iflags);
5659 return cq_event;
5660}
5661
5662/**
5663 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5664 * @phba: pointer to lpfc hba data structure.
5665 * @cq_event: pointer to the completion queue event to be freed.
5666 *
5667 * This routine is the lock free version of the API invoked to release a
5668 * completion-queue event back into the free pool.
5669 **/
5670void
5671__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5672 struct lpfc_cq_event *cq_event)
5673{
5674 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
5675}
5676
5677/**
5678 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
5679 * @phba: pointer to lpfc hba data structure.
5680 * @cq_event: pointer to the completion queue event to be freed.
5681 *
5682 * This routine is the lock version of the API invoked to release a
5683 * completion-queue event back into the free pool.
5684 **/
5685void
5686lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
5687 struct lpfc_cq_event *cq_event)
5688{
5689 unsigned long iflags;
5690 spin_lock_irqsave(&phba->hbalock, iflags);
5691 __lpfc_sli4_cq_event_release(phba, cq_event);
5692 spin_unlock_irqrestore(&phba->hbalock, iflags);
5693}
5694
5695/**
5696 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
5697 * @phba: pointer to lpfc hba data structure.
5698 *
5699 * This routine is to free all the pending completion-queue events to the
5700 * back into the free pool for device reset.
5701 **/
5702static void
5703lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
5704{
5705 LIST_HEAD(cqelist);
5706 struct lpfc_cq_event *cqe;
5707 unsigned long iflags;
5708
5709 /* Retrieve all the pending WCQEs from pending WCQE lists */
5710 spin_lock_irqsave(&phba->hbalock, iflags);
5711 /* Pending FCP XRI abort events */
5712 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
5713 &cqelist);
5714 /* Pending ELS XRI abort events */
5715 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
5716 &cqelist);
5717 /* Pending asynnc events */
5718 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
5719 &cqelist);
5720 spin_unlock_irqrestore(&phba->hbalock, iflags);
5721
5722 while (!list_empty(&cqelist)) {
5723 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
5724 lpfc_sli4_cq_event_release(phba, cqe);
5725 }
5726}
5727
5728/**
5729 * lpfc_pci_function_reset - Reset pci function.
5730 * @phba: pointer to lpfc hba data structure.
5731 *
5732 * This routine is invoked to request a PCI function reset. It will destroys
5733 * all resources assigned to the PCI function which originates this request.
5734 *
5735 * Return codes
5736 * 0 - sucessful
5737 * ENOMEM - No availble memory
5738 * EIO - The mailbox failed to complete successfully.
5739 **/
5740int
5741lpfc_pci_function_reset(struct lpfc_hba *phba)
5742{
5743 LPFC_MBOXQ_t *mboxq;
5744 uint32_t rc = 0;
5745 uint32_t shdr_status, shdr_add_status;
5746 union lpfc_sli4_cfg_shdr *shdr;
5747
5748 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5749 if (!mboxq) {
5750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5751 "0494 Unable to allocate memory for issuing "
5752 "SLI_FUNCTION_RESET mailbox command\n");
5753 return -ENOMEM;
5754 }
5755
5756 /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
5757 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5758 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
5759 LPFC_SLI4_MBX_EMBED);
5760 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5761 shdr = (union lpfc_sli4_cfg_shdr *)
5762 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5763 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5764 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5765 if (rc != MBX_TIMEOUT)
5766 mempool_free(mboxq, phba->mbox_mem_pool);
5767 if (shdr_status || shdr_add_status || rc) {
5768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5769 "0495 SLI_FUNCTION_RESET mailbox failed with "
5770 "status x%x add_status x%x, mbx status x%x\n",
5771 shdr_status, shdr_add_status, rc);
5772 rc = -ENXIO;
5773 }
5774 return rc;
5775}
5776
5777/**
5778 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
5779 * @phba: pointer to lpfc hba data structure.
5780 * @cnt: number of nop mailbox commands to send.
5781 *
5782 * This routine is invoked to send a number @cnt of NOP mailbox command and
5783 * wait for each command to complete.
5784 *
5785 * Return: the number of NOP mailbox command completed.
5786 **/
5787static int
5788lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
5789{
5790 LPFC_MBOXQ_t *mboxq;
5791 int length, cmdsent;
5792 uint32_t mbox_tmo;
5793 uint32_t rc = 0;
5794 uint32_t shdr_status, shdr_add_status;
5795 union lpfc_sli4_cfg_shdr *shdr;
5796
5797 if (cnt == 0) {
5798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5799 "2518 Requested to send 0 NOP mailbox cmd\n");
5800 return cnt;
5801 }
5802
5803 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5804 if (!mboxq) {
5805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5806 "2519 Unable to allocate memory for issuing "
5807 "NOP mailbox command\n");
5808 return 0;
5809 }
5810
5811 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
5812 length = (sizeof(struct lpfc_mbx_nop) -
5813 sizeof(struct lpfc_sli4_cfg_mhdr));
5814 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5815 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
5816
5817 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5818 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
5819 if (!phba->sli4_hba.intr_enable)
5820 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5821 else
5822 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
5823 if (rc == MBX_TIMEOUT)
5824 break;
5825 /* Check return status */
5826 shdr = (union lpfc_sli4_cfg_shdr *)
5827 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
5828 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5829 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
5830 &shdr->response);
5831 if (shdr_status || shdr_add_status || rc) {
5832 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5833 "2520 NOP mailbox command failed "
5834 "status x%x add_status x%x mbx "
5835 "status x%x\n", shdr_status,
5836 shdr_add_status, rc);
5837 break;
5838 }
5839 }
5840
5841 if (rc != MBX_TIMEOUT)
5842 mempool_free(mboxq, phba->mbox_mem_pool);
5843
5844 return cmdsent;
5845}
5846
5847/**
5848 * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
5849 * @phba: pointer to lpfc hba data structure.
5850 * @fcfi: fcf index.
5851 *
5852 * This routine is invoked to unregister a FCFI from device.
5853 **/
5854void
5855lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
5856{
5857 LPFC_MBOXQ_t *mbox;
5858 uint32_t mbox_tmo;
5859 int rc;
5860 unsigned long flags;
5861
5862 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5863
5864 if (!mbox)
5865 return;
5866
5867 lpfc_unreg_fcfi(mbox, fcfi);
5868
5869 if (!phba->sli4_hba.intr_enable)
5870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5871 else {
5872 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5873 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5874 }
5875 if (rc != MBX_TIMEOUT)
5876 mempool_free(mbox, phba->mbox_mem_pool);
5877 if (rc != MBX_SUCCESS)
5878 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5879 "2517 Unregister FCFI command failed "
5880 "status %d, mbxStatus x%x\n", rc,
5881 bf_get(lpfc_mqe_status, &mbox->u.mqe));
5882 else {
5883 spin_lock_irqsave(&phba->hbalock, flags);
5884 /* Mark the FCFI is no longer registered */
5885 phba->fcf.fcf_flag &=
5886 ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
5887 spin_unlock_irqrestore(&phba->hbalock, flags);
5888 }
5889}
5890
5891/**
5892 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
5893 * @phba: pointer to lpfc hba data structure.
5894 *
5895 * This routine is invoked to set up the PCI device memory space for device
5896 * with SLI-4 interface spec.
5897 *
5898 * Return codes
5899 * 0 - sucessful
5900 * other values - error
5901 **/
5902static int
5903lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
5904{
5905 struct pci_dev *pdev;
5906 unsigned long bar0map_len, bar1map_len, bar2map_len;
5907 int error = -ENODEV;
5908
5909 /* Obtain PCI device reference */
5910 if (!phba->pcidev)
5911 return error;
5912 else
5913 pdev = phba->pcidev;
5914
5915 /* Set the device DMA mask size */
5916 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
5917 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5918 return error;
5919
5920 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
5921 * number of bytes required by each mapping. They are actually
5922 * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
5923 */
5924 phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
5925 bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
5926
5927 phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
5928 bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
5929
5930 phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
5931 bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
5932
5933 /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
5934 phba->sli4_hba.conf_regs_memmap_p =
5935 ioremap(phba->pci_bar0_map, bar0map_len);
5936 if (!phba->sli4_hba.conf_regs_memmap_p) {
5937 dev_printk(KERN_ERR, &pdev->dev,
5938 "ioremap failed for SLI4 PCI config registers.\n");
5939 goto out;
5940 }
5941
5942 /* Map SLI4 HBA Control Register base to a kernel virtual address. */
5943 phba->sli4_hba.ctrl_regs_memmap_p =
5944 ioremap(phba->pci_bar1_map, bar1map_len);
5945 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
5946 dev_printk(KERN_ERR, &pdev->dev,
5947 "ioremap failed for SLI4 HBA control registers.\n");
5948 goto out_iounmap_conf;
5949 }
5950
5951 /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
5952 phba->sli4_hba.drbl_regs_memmap_p =
5953 ioremap(phba->pci_bar2_map, bar2map_len);
5954 if (!phba->sli4_hba.drbl_regs_memmap_p) {
5955 dev_printk(KERN_ERR, &pdev->dev,
5956 "ioremap failed for SLI4 HBA doorbell registers.\n");
5957 goto out_iounmap_ctrl;
5958 }
5959
5960 /* Set up BAR0 PCI config space register memory map */
5961 lpfc_sli4_bar0_register_memmap(phba);
5962
5963 /* Set up BAR1 register memory map */
5964 lpfc_sli4_bar1_register_memmap(phba);
5965
5966 /* Set up BAR2 register memory map */
5967 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
5968 if (error)
5969 goto out_iounmap_all;
5970
5971 return 0;
5972
5973out_iounmap_all:
5974 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
5975out_iounmap_ctrl:
5976 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
5977out_iounmap_conf:
5978 iounmap(phba->sli4_hba.conf_regs_memmap_p);
5979out:
5980 return error;
5981}
5982
5983/**
5984 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
5985 * @phba: pointer to lpfc hba data structure.
5986 *
5987 * This routine is invoked to unset the PCI device memory space for device
5988 * with SLI-4 interface spec.
5989 **/
5990static void
5991lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
5992{
5993 struct pci_dev *pdev;
5994
5995 /* Obtain PCI device reference */
5996 if (!phba->pcidev)
5997 return;
5998 else
5999 pdev = phba->pcidev;
6000
6001 /* Free coherent DMA memory allocated */
6002
6003 /* Unmap I/O memory space */
6004 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
6005 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
6006 iounmap(phba->sli4_hba.conf_regs_memmap_p);
6007
6008 return;
6009}
6010
6011/**
6012 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
6013 * @phba: pointer to lpfc hba data structure.
6014 *
6015 * This routine is invoked to enable the MSI-X interrupt vectors to device
6016 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
6017 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
6018 * invoked, enables either all or nothing, depending on the current
6019 * availability of PCI vector resources. The device driver is responsible
6020 * for calling the individual request_irq() to register each MSI-X vector
6021 * with a interrupt handler, which is done in this function. Note that
2326 * later when device is unloading, the driver should always call free_irq() 6022 * later when device is unloading, the driver should always call free_irq()
2327 * on all MSI-X vectors it has done request_irq() on before calling 6023 * on all MSI-X vectors it has done request_irq() on before calling
2328 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 6024 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
@@ -2333,7 +6029,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2333 * other values - error 6029 * other values - error
2334 **/ 6030 **/
2335static int 6031static int
2336lpfc_enable_msix(struct lpfc_hba *phba) 6032lpfc_sli_enable_msix(struct lpfc_hba *phba)
2337{ 6033{
2338 int rc, i; 6034 int rc, i;
2339 LPFC_MBOXQ_t *pmb; 6035 LPFC_MBOXQ_t *pmb;
@@ -2349,20 +6045,21 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2349 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2350 "0420 PCI enable MSI-X failed (%d)\n", rc); 6046 "0420 PCI enable MSI-X failed (%d)\n", rc);
2351 goto msi_fail_out; 6047 goto msi_fail_out;
2352 } else 6048 }
2353 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 6049 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6050 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2355 "0477 MSI-X entry[%d]: vector=x%x " 6051 "0477 MSI-X entry[%d]: vector=x%x "
2356 "message=%d\n", i, 6052 "message=%d\n", i,
2357 phba->msix_entries[i].vector, 6053 phba->msix_entries[i].vector,
2358 phba->msix_entries[i].entry); 6054 phba->msix_entries[i].entry);
2359 /* 6055 /*
2360 * Assign MSI-X vectors to interrupt handlers 6056 * Assign MSI-X vectors to interrupt handlers
2361 */ 6057 */
2362 6058
2363 /* vector-0 is associated to slow-path handler */ 6059 /* vector-0 is associated to slow-path handler */
2364 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, 6060 rc = request_irq(phba->msix_entries[0].vector,
2365 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); 6061 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
6062 LPFC_SP_DRIVER_HANDLER_NAME, phba);
2366 if (rc) { 6063 if (rc) {
2367 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2368 "0421 MSI-X slow-path request_irq failed " 6065 "0421 MSI-X slow-path request_irq failed "
@@ -2371,8 +6068,9 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2371 } 6068 }
2372 6069
2373 /* vector-1 is associated to fast-path handler */ 6070 /* vector-1 is associated to fast-path handler */
2374 rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, 6071 rc = request_irq(phba->msix_entries[1].vector,
2375 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); 6072 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
6073 LPFC_FP_DRIVER_HANDLER_NAME, phba);
2376 6074
2377 if (rc) { 6075 if (rc) {
2378 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6076 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2401,7 +6099,7 @@ lpfc_enable_msix(struct lpfc_hba *phba)
2401 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6099 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2402 "0351 Config MSI mailbox command failed, " 6100 "0351 Config MSI mailbox command failed, "
2403 "mbxCmd x%x, mbxStatus x%x\n", 6101 "mbxCmd x%x, mbxStatus x%x\n",
2404 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 6102 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
2405 goto mbx_fail_out; 6103 goto mbx_fail_out;
2406 } 6104 }
2407 6105
@@ -2428,14 +6126,14 @@ msi_fail_out:
2428} 6126}
2429 6127
2430/** 6128/**
2431 * lpfc_disable_msix - Disable MSI-X interrupt mode 6129 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
2432 * @phba: pointer to lpfc hba data structure. 6130 * @phba: pointer to lpfc hba data structure.
2433 * 6131 *
2434 * This routine is invoked to release the MSI-X vectors and then disable the 6132 * This routine is invoked to release the MSI-X vectors and then disable the
2435 * MSI-X interrupt mode. 6133 * MSI-X interrupt mode to device with SLI-3 interface spec.
2436 **/ 6134 **/
2437static void 6135static void
2438lpfc_disable_msix(struct lpfc_hba *phba) 6136lpfc_sli_disable_msix(struct lpfc_hba *phba)
2439{ 6137{
2440 int i; 6138 int i;
2441 6139
@@ -2444,23 +6142,26 @@ lpfc_disable_msix(struct lpfc_hba *phba)
2444 free_irq(phba->msix_entries[i].vector, phba); 6142 free_irq(phba->msix_entries[i].vector, phba);
2445 /* Disable MSI-X */ 6143 /* Disable MSI-X */
2446 pci_disable_msix(phba->pcidev); 6144 pci_disable_msix(phba->pcidev);
6145
6146 return;
2447} 6147}
2448 6148
2449/** 6149/**
2450 * lpfc_enable_msi - Enable MSI interrupt mode 6150 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
2451 * @phba: pointer to lpfc hba data structure. 6151 * @phba: pointer to lpfc hba data structure.
2452 * 6152 *
2453 * This routine is invoked to enable the MSI interrupt mode. The kernel 6153 * This routine is invoked to enable the MSI interrupt mode to device with
2454 * function pci_enable_msi() is called to enable the MSI vector. The 6154 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
2455 * device driver is responsible for calling the request_irq() to register 6155 * enable the MSI vector. The device driver is responsible for calling the
2456 * MSI vector with a interrupt the handler, which is done in this function. 6156 * request_irq() to register MSI vector with a interrupt the handler, which
6157 * is done in this function.
2457 * 6158 *
2458 * Return codes 6159 * Return codes
2459 * 0 - sucessful 6160 * 0 - sucessful
2460 * other values - error 6161 * other values - error
2461 */ 6162 */
2462static int 6163static int
2463lpfc_enable_msi(struct lpfc_hba *phba) 6164lpfc_sli_enable_msi(struct lpfc_hba *phba)
2464{ 6165{
2465 int rc; 6166 int rc;
2466 6167
@@ -2474,7 +6175,7 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2474 return rc; 6175 return rc;
2475 } 6176 }
2476 6177
2477 rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6178 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
2478 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6179 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2479 if (rc) { 6180 if (rc) {
2480 pci_disable_msi(phba->pcidev); 6181 pci_disable_msi(phba->pcidev);
@@ -2485,17 +6186,17 @@ lpfc_enable_msi(struct lpfc_hba *phba)
2485} 6186}
2486 6187
2487/** 6188/**
2488 * lpfc_disable_msi - Disable MSI interrupt mode 6189 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
2489 * @phba: pointer to lpfc hba data structure. 6190 * @phba: pointer to lpfc hba data structure.
2490 * 6191 *
2491 * This routine is invoked to disable the MSI interrupt mode. The driver 6192 * This routine is invoked to disable the MSI interrupt mode to device with
2492 * calls free_irq() on MSI vector it has done request_irq() on before 6193 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
2493 * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and 6194 * done request_irq() on before calling pci_disable_msi(). Failure to do so
2494 * a device will be left with MSI enabled and leaks its vector. 6195 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6196 * its vector.
2495 */ 6197 */
2496
2497static void 6198static void
2498lpfc_disable_msi(struct lpfc_hba *phba) 6199lpfc_sli_disable_msi(struct lpfc_hba *phba)
2499{ 6200{
2500 free_irq(phba->pcidev->irq, phba); 6201 free_irq(phba->pcidev->irq, phba);
2501 pci_disable_msi(phba->pcidev); 6202 pci_disable_msi(phba->pcidev);
@@ -2503,80 +6204,298 @@ lpfc_disable_msi(struct lpfc_hba *phba)
2503} 6204}
2504 6205
2505/** 6206/**
2506 * lpfc_log_intr_mode - Log the active interrupt mode 6207 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
2507 * @phba: pointer to lpfc hba data structure. 6208 * @phba: pointer to lpfc hba data structure.
2508 * @intr_mode: active interrupt mode adopted.
2509 * 6209 *
2510 * This routine it invoked to log the currently used active interrupt mode 6210 * This routine is invoked to enable device interrupt and associate driver's
2511 * to the device. 6211 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
2512 */ 6212 * spec. Depends on the interrupt mode configured to the driver, the driver
6213 * will try to fallback from the configured interrupt mode to an interrupt
6214 * mode which is supported by the platform, kernel, and device in the order
6215 * of:
6216 * MSI-X -> MSI -> IRQ.
6217 *
6218 * Return codes
6219 * 0 - sucessful
6220 * other values - error
6221 **/
6222static uint32_t
6223lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
6224{
6225 uint32_t intr_mode = LPFC_INTR_ERROR;
6226 int retval;
6227
6228 if (cfg_mode == 2) {
6229 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
6230 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
6231 if (!retval) {
6232 /* Now, try to enable MSI-X interrupt mode */
6233 retval = lpfc_sli_enable_msix(phba);
6234 if (!retval) {
6235 /* Indicate initialization to MSI-X mode */
6236 phba->intr_type = MSIX;
6237 intr_mode = 2;
6238 }
6239 }
6240 }
6241
6242 /* Fallback to MSI if MSI-X initialization failed */
6243 if (cfg_mode >= 1 && phba->intr_type == NONE) {
6244 retval = lpfc_sli_enable_msi(phba);
6245 if (!retval) {
6246 /* Indicate initialization to MSI mode */
6247 phba->intr_type = MSI;
6248 intr_mode = 1;
6249 }
6250 }
6251
6252 /* Fallback to INTx if both MSI-X/MSI initalization failed */
6253 if (phba->intr_type == NONE) {
6254 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
6255 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6256 if (!retval) {
6257 /* Indicate initialization to INTx mode */
6258 phba->intr_type = INTx;
6259 intr_mode = 0;
6260 }
6261 }
6262 return intr_mode;
6263}
6264
6265/**
6266 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
6267 * @phba: pointer to lpfc hba data structure.
6268 *
6269 * This routine is invoked to disable device interrupt and disassociate the
6270 * driver's interrupt handler(s) from interrupt vector(s) to device with
6271 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
6272 * release the interrupt vector(s) for the message signaled interrupt.
6273 **/
2513static void 6274static void
2514lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 6275lpfc_sli_disable_intr(struct lpfc_hba *phba)
2515{ 6276{
2516 switch (intr_mode) { 6277 /* Disable the currently initialized interrupt mode */
2517 case 0: 6278 if (phba->intr_type == MSIX)
2518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6279 lpfc_sli_disable_msix(phba);
2519 "0470 Enable INTx interrupt mode.\n"); 6280 else if (phba->intr_type == MSI)
2520 break; 6281 lpfc_sli_disable_msi(phba);
2521 case 1: 6282 else if (phba->intr_type == INTx)
6283 free_irq(phba->pcidev->irq, phba);
6284
6285 /* Reset interrupt management states */
6286 phba->intr_type = NONE;
6287 phba->sli.slistat.sli_intr = 0;
6288
6289 return;
6290}
6291
6292/**
6293 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
6294 * @phba: pointer to lpfc hba data structure.
6295 *
6296 * This routine is invoked to enable the MSI-X interrupt vectors to device
6297 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
6298 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
6299 * enables either all or nothing, depending on the current availability of
6300 * PCI vector resources. The device driver is responsible for calling the
6301 * individual request_irq() to register each MSI-X vector with a interrupt
6302 * handler, which is done in this function. Note that later when device is
6303 * unloading, the driver should always call free_irq() on all MSI-X vectors
6304 * it has done request_irq() on before calling pci_disable_msix(). Failure
6305 * to do so results in a BUG_ON() and a device will be left with MSI-X
6306 * enabled and leaks its vectors.
6307 *
6308 * Return codes
6309 * 0 - sucessful
6310 * other values - error
6311 **/
6312static int
6313lpfc_sli4_enable_msix(struct lpfc_hba *phba)
6314{
6315 int rc, index;
6316
6317 /* Set up MSI-X multi-message vectors */
6318 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
6319 phba->sli4_hba.msix_entries[index].entry = index;
6320
6321 /* Configure MSI-X capability structure */
6322 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
6323 phba->sli4_hba.cfg_eqn);
6324 if (rc) {
2522 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2523 "0481 Enabled MSI interrupt mode.\n"); 6326 "0484 PCI enable MSI-X failed (%d)\n", rc);
2524 break; 6327 goto msi_fail_out;
2525 case 2: 6328 }
6329 /* Log MSI-X vector assignment */
6330 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
2526 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2527 "0480 Enabled MSI-X interrupt mode.\n"); 6332 "0489 MSI-X entry[%d]: vector=x%x "
2528 break; 6333 "message=%d\n", index,
2529 default: 6334 phba->sli4_hba.msix_entries[index].vector,
2530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6335 phba->sli4_hba.msix_entries[index].entry);
2531 "0482 Illegal interrupt mode.\n"); 6336 /*
2532 break; 6337 * Assign MSI-X vectors to interrupt handlers
6338 */
6339
6340 /* The first vector must associated to slow-path handler for MQ */
6341 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
6342 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
6343 LPFC_SP_DRIVER_HANDLER_NAME, phba);
6344 if (rc) {
6345 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6346 "0485 MSI-X slow-path request_irq failed "
6347 "(%d)\n", rc);
6348 goto msi_fail_out;
2533 } 6349 }
2534 return; 6350
6351 /* The rest of the vector(s) are associated to fast-path handler(s) */
6352 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
6353 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
6354 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
6355 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
6356 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
6357 LPFC_FP_DRIVER_HANDLER_NAME,
6358 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6359 if (rc) {
6360 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6361 "0486 MSI-X fast-path (%d) "
6362 "request_irq failed (%d)\n", index, rc);
6363 goto cfg_fail_out;
6364 }
6365 }
6366
6367 return rc;
6368
6369cfg_fail_out:
6370 /* free the irq already requested */
6371 for (--index; index >= 1; index--)
6372 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
6373 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6374
6375 /* free the irq already requested */
6376 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
6377
6378msi_fail_out:
6379 /* Unconfigure MSI-X capability structure */
6380 pci_disable_msix(phba->pcidev);
6381 return rc;
2535} 6382}
2536 6383
6384/**
6385 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
6386 * @phba: pointer to lpfc hba data structure.
6387 *
6388 * This routine is invoked to release the MSI-X vectors and then disable the
6389 * MSI-X interrupt mode to device with SLI-4 interface spec.
6390 **/
2537static void 6391static void
2538lpfc_stop_port(struct lpfc_hba *phba) 6392lpfc_sli4_disable_msix(struct lpfc_hba *phba)
2539{ 6393{
2540 /* Clear all interrupt enable conditions */ 6394 int index;
2541 writel(0, phba->HCregaddr);
2542 readl(phba->HCregaddr); /* flush */
2543 /* Clear all pending interrupts */
2544 writel(0xffffffff, phba->HAregaddr);
2545 readl(phba->HAregaddr); /* flush */
2546 6395
2547 /* Reset some HBA SLI setup states */ 6396 /* Free up MSI-X multi-message vectors */
2548 lpfc_stop_phba_timers(phba); 6397 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
2549 phba->pport->work_port_events = 0; 6398
6399 for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
6400 free_irq(phba->sli4_hba.msix_entries[index].vector,
6401 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
6402 /* Disable MSI-X */
6403 pci_disable_msix(phba->pcidev);
6404
6405 return;
6406}
6407
6408/**
6409 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
6410 * @phba: pointer to lpfc hba data structure.
6411 *
6412 * This routine is invoked to enable the MSI interrupt mode to device with
6413 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
6414 * to enable the MSI vector. The device driver is responsible for calling
6415 * the request_irq() to register MSI vector with a interrupt the handler,
6416 * which is done in this function.
6417 *
6418 * Return codes
6419 * 0 - sucessful
6420 * other values - error
6421 **/
6422static int
6423lpfc_sli4_enable_msi(struct lpfc_hba *phba)
6424{
6425 int rc, index;
6426
6427 rc = pci_enable_msi(phba->pcidev);
6428 if (!rc)
6429 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6430 "0487 PCI enable MSI mode success.\n");
6431 else {
6432 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6433 "0488 PCI enable MSI mode failed (%d)\n", rc);
6434 return rc;
6435 }
6436
6437 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
6438 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
6439 if (rc) {
6440 pci_disable_msi(phba->pcidev);
6441 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6442 "0490 MSI request_irq failed (%d)\n", rc);
6443 }
6444
6445 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
6446 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6447 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6448 }
6449
6450 return rc;
6451}
2550 6452
6453/**
6454 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
6455 * @phba: pointer to lpfc hba data structure.
6456 *
6457 * This routine is invoked to disable the MSI interrupt mode to device with
6458 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
6459 * done request_irq() on before calling pci_disable_msi(). Failure to do so
6460 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
6461 * its vector.
6462 **/
6463static void
6464lpfc_sli4_disable_msi(struct lpfc_hba *phba)
6465{
6466 free_irq(phba->pcidev->irq, phba);
6467 pci_disable_msi(phba->pcidev);
2551 return; 6468 return;
2552} 6469}
2553 6470
2554/** 6471/**
2555 * lpfc_enable_intr - Enable device interrupt 6472 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
2556 * @phba: pointer to lpfc hba data structure. 6473 * @phba: pointer to lpfc hba data structure.
2557 * 6474 *
2558 * This routine is invoked to enable device interrupt and associate driver's 6475 * This routine is invoked to enable device interrupt and associate driver's
2559 * interrupt handler(s) to interrupt vector(s). Depends on the interrupt 6476 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
2560 * mode configured to the driver, the driver will try to fallback from the 6477 * interface spec. Depends on the interrupt mode configured to the driver,
2561 * configured interrupt mode to an interrupt mode which is supported by the 6478 * the driver will try to fallback from the configured interrupt mode to an
2562 * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. 6479 * interrupt mode which is supported by the platform, kernel, and device in
6480 * the order of:
6481 * MSI-X -> MSI -> IRQ.
2563 * 6482 *
2564 * Return codes 6483 * Return codes
2565 * 0 - sucessful 6484 * 0 - sucessful
2566 * other values - error 6485 * other values - error
2567 **/ 6486 **/
2568static uint32_t 6487static uint32_t
2569lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 6488lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2570{ 6489{
2571 uint32_t intr_mode = LPFC_INTR_ERROR; 6490 uint32_t intr_mode = LPFC_INTR_ERROR;
2572 int retval; 6491 int retval, index;
2573 6492
2574 if (cfg_mode == 2) { 6493 if (cfg_mode == 2) {
2575 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 6494 /* Preparation before conf_msi mbox cmd */
2576 retval = lpfc_sli_config_port(phba, 3); 6495 retval = 0;
2577 if (!retval) { 6496 if (!retval) {
2578 /* Now, try to enable MSI-X interrupt mode */ 6497 /* Now, try to enable MSI-X interrupt mode */
2579 retval = lpfc_enable_msix(phba); 6498 retval = lpfc_sli4_enable_msix(phba);
2580 if (!retval) { 6499 if (!retval) {
2581 /* Indicate initialization to MSI-X mode */ 6500 /* Indicate initialization to MSI-X mode */
2582 phba->intr_type = MSIX; 6501 phba->intr_type = MSIX;
@@ -2587,7 +6506,7 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2587 6506
2588 /* Fallback to MSI if MSI-X initialization failed */ 6507 /* Fallback to MSI if MSI-X initialization failed */
2589 if (cfg_mode >= 1 && phba->intr_type == NONE) { 6508 if (cfg_mode >= 1 && phba->intr_type == NONE) {
2590 retval = lpfc_enable_msi(phba); 6509 retval = lpfc_sli4_enable_msi(phba);
2591 if (!retval) { 6510 if (!retval) {
2592 /* Indicate initialization to MSI mode */ 6511 /* Indicate initialization to MSI mode */
2593 phba->intr_type = MSI; 6512 phba->intr_type = MSI;
@@ -2597,34 +6516,39 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2597 6516
2598 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 6517 /* Fallback to INTx if both MSI-X/MSI initalization failed */
2599 if (phba->intr_type == NONE) { 6518 if (phba->intr_type == NONE) {
2600 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 6519 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
2601 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 6520 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2602 if (!retval) { 6521 if (!retval) {
2603 /* Indicate initialization to INTx mode */ 6522 /* Indicate initialization to INTx mode */
2604 phba->intr_type = INTx; 6523 phba->intr_type = INTx;
2605 intr_mode = 0; 6524 intr_mode = 0;
6525 for (index = 0; index < phba->cfg_fcp_eq_count;
6526 index++) {
6527 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
6528 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
6529 }
2606 } 6530 }
2607 } 6531 }
2608 return intr_mode; 6532 return intr_mode;
2609} 6533}
2610 6534
2611/** 6535/**
2612 * lpfc_disable_intr - Disable device interrupt 6536 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
2613 * @phba: pointer to lpfc hba data structure. 6537 * @phba: pointer to lpfc hba data structure.
2614 * 6538 *
2615 * This routine is invoked to disable device interrupt and disassociate the 6539 * This routine is invoked to disable device interrupt and disassociate
2616 * driver's interrupt handler(s) from interrupt vector(s). Depending on the 6540 * the driver's interrupt handler(s) from interrupt vector(s) to device
2617 * interrupt mode, the driver will release the interrupt vector(s) for the 6541 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
2618 * message signaled interrupt. 6542 * will release the interrupt vector(s) for the message signaled interrupt.
2619 **/ 6543 **/
2620static void 6544static void
2621lpfc_disable_intr(struct lpfc_hba *phba) 6545lpfc_sli4_disable_intr(struct lpfc_hba *phba)
2622{ 6546{
2623 /* Disable the currently initialized interrupt mode */ 6547 /* Disable the currently initialized interrupt mode */
2624 if (phba->intr_type == MSIX) 6548 if (phba->intr_type == MSIX)
2625 lpfc_disable_msix(phba); 6549 lpfc_sli4_disable_msix(phba);
2626 else if (phba->intr_type == MSI) 6550 else if (phba->intr_type == MSI)
2627 lpfc_disable_msi(phba); 6551 lpfc_sli4_disable_msi(phba);
2628 else if (phba->intr_type == INTx) 6552 else if (phba->intr_type == INTx)
2629 free_irq(phba->pcidev->irq, phba); 6553 free_irq(phba->pcidev->irq, phba);
2630 6554
@@ -2636,263 +6560,233 @@ lpfc_disable_intr(struct lpfc_hba *phba)
2636} 6560}
2637 6561
2638/** 6562/**
2639 * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem 6563 * lpfc_unset_hba - Unset SLI3 hba device initialization
2640 * @pdev: pointer to PCI device 6564 * @phba: pointer to lpfc hba data structure.
2641 * @pid: pointer to PCI device identifier
2642 *
2643 * This routine is to be registered to the kernel's PCI subsystem. When an
2644 * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
2645 * PCI device-specific information of the device and driver to see if the
2646 * driver state that it can support this kind of device. If the match is
2647 * successful, the driver core invokes this routine. If this routine
2648 * determines it can claim the HBA, it does all the initialization that it
2649 * needs to do to handle the HBA properly.
2650 * 6565 *
2651 * Return code 6566 * This routine is invoked to unset the HBA device initialization steps to
2652 * 0 - driver can claim the device 6567 * a device with SLI-3 interface spec.
2653 * negative value - driver can not claim the device
2654 **/ 6568 **/
2655static int __devinit 6569static void
2656lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 6570lpfc_unset_hba(struct lpfc_hba *phba)
2657{ 6571{
2658 struct lpfc_vport *vport = NULL; 6572 struct lpfc_vport *vport = phba->pport;
2659 struct lpfc_hba *phba; 6573 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2660 struct lpfc_sli *psli;
2661 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
2662 struct Scsi_Host *shost = NULL;
2663 void *ptr;
2664 unsigned long bar0map_len, bar2map_len;
2665 int error = -ENODEV, retval;
2666 int i, hbq_count;
2667 uint16_t iotag;
2668 uint32_t cfg_mode, intr_mode;
2669 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2670 struct lpfc_adapter_event_header adapter_event;
2671
2672 if (pci_enable_device_mem(pdev))
2673 goto out;
2674 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
2675 goto out_disable_device;
2676 6574
2677 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); 6575 spin_lock_irq(shost->host_lock);
2678 if (!phba) 6576 vport->load_flag |= FC_UNLOADING;
2679 goto out_release_regions; 6577 spin_unlock_irq(shost->host_lock);
2680 6578
2681 atomic_set(&phba->fast_event_count, 0); 6579 lpfc_stop_hba_timers(phba);
2682 spin_lock_init(&phba->hbalock);
2683 6580
2684 /* Initialize ndlp management spinlock */ 6581 phba->pport->work_port_events = 0;
2685 spin_lock_init(&phba->ndlp_lock);
2686 6582
2687 phba->pcidev = pdev; 6583 lpfc_sli_hba_down(phba);
2688 6584
2689 /* Assign an unused board number */ 6585 lpfc_sli_brdrestart(phba);
2690 if ((phba->brd_no = lpfc_get_instance()) < 0)
2691 goto out_free_phba;
2692 6586
2693 INIT_LIST_HEAD(&phba->port_list); 6587 lpfc_sli_disable_intr(phba);
2694 init_waitqueue_head(&phba->wait_4_mlo_m_q);
2695 /*
2696 * Get all the module params for configuring this host and then
2697 * establish the host.
2698 */
2699 lpfc_get_cfgparam(phba);
2700 phba->max_vpi = LPFC_MAX_VPI;
2701 6588
2702 /* Initialize timers used by driver */ 6589 return;
2703 init_timer(&phba->hb_tmofunc); 6590}
2704 phba->hb_tmofunc.function = lpfc_hb_timeout;
2705 phba->hb_tmofunc.data = (unsigned long)phba;
2706 6591
2707 psli = &phba->sli; 6592/**
2708 init_timer(&psli->mbox_tmo); 6593 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
2709 psli->mbox_tmo.function = lpfc_mbox_timeout; 6594 * @phba: pointer to lpfc hba data structure.
2710 psli->mbox_tmo.data = (unsigned long) phba; 6595 *
2711 init_timer(&phba->fcp_poll_timer); 6596 * This routine is invoked to unset the HBA device initialization steps to
2712 phba->fcp_poll_timer.function = lpfc_poll_timeout; 6597 * a device with SLI-4 interface spec.
2713 phba->fcp_poll_timer.data = (unsigned long) phba; 6598 **/
2714 init_timer(&phba->fabric_block_timer); 6599static void
2715 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 6600lpfc_sli4_unset_hba(struct lpfc_hba *phba)
2716 phba->fabric_block_timer.data = (unsigned long) phba; 6601{
2717 init_timer(&phba->eratt_poll); 6602 struct lpfc_vport *vport = phba->pport;
2718 phba->eratt_poll.function = lpfc_poll_eratt; 6603 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2719 phba->eratt_poll.data = (unsigned long) phba;
2720 6604
2721 pci_set_master(pdev); 6605 spin_lock_irq(shost->host_lock);
2722 pci_save_state(pdev); 6606 vport->load_flag |= FC_UNLOADING;
2723 pci_try_set_mwi(pdev); 6607 spin_unlock_irq(shost->host_lock);
2724 6608
2725 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) 6609 phba->pport->work_port_events = 0;
2726 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0)
2727 goto out_idr_remove;
2728 6610
2729 /* 6611 lpfc_sli4_hba_down(phba);
2730 * Get the bus address of Bar0 and Bar2 and the number of bytes
2731 * required by each mapping.
2732 */
2733 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
2734 bar0map_len = pci_resource_len(phba->pcidev, 0);
2735 6612
2736 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 6613 lpfc_sli4_disable_intr(phba);
2737 bar2map_len = pci_resource_len(phba->pcidev, 2);
2738 6614
2739 /* Map HBA SLIM to a kernel virtual address. */ 6615 return;
2740 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6616}
2741 if (!phba->slim_memmap_p) {
2742 error = -ENODEV;
2743 dev_printk(KERN_ERR, &pdev->dev,
2744 "ioremap failed for SLIM memory.\n");
2745 goto out_idr_remove;
2746 }
2747
2748 /* Map HBA Control Registers to a kernel virtual address. */
2749 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
2750 if (!phba->ctrl_regs_memmap_p) {
2751 error = -ENODEV;
2752 dev_printk(KERN_ERR, &pdev->dev,
2753 "ioremap failed for HBA control registers.\n");
2754 goto out_iounmap_slim;
2755 }
2756 6617
2757 /* Allocate memory for SLI-2 structures */ 6618/**
2758 phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, 6619 * lpfc_sli4_hba_unset - Unset the fcoe hba
2759 SLI2_SLIM_SIZE, 6620 * @phba: Pointer to HBA context object.
2760 &phba->slim2p.phys, 6621 *
2761 GFP_KERNEL); 6622 * This function is called in the SLI4 code path to reset the HBA's FCoE
2762 if (!phba->slim2p.virt) 6623 * function. The caller is not required to hold any lock. This routine
2763 goto out_iounmap; 6624 * issues PCI function reset mailbox command to reset the FCoE function.
6625 * At the end of the function, it calls lpfc_hba_down_post function to
6626 * free any pending commands.
6627 **/
6628static void
6629lpfc_sli4_hba_unset(struct lpfc_hba *phba)
6630{
6631 int wait_cnt = 0;
6632 LPFC_MBOXQ_t *mboxq;
2764 6633
2765 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 6634 lpfc_stop_hba_timers(phba);
2766 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6635 phba->sli4_hba.intr_enable = 0;
2767 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
2768 phba->IOCBs = (phba->slim2p.virt +
2769 offsetof(struct lpfc_sli2_slim, IOCBs));
2770 6636
2771 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 6637 /*
2772 lpfc_sli_hbq_size(), 6638 * Gracefully wait out the potential current outstanding asynchronous
2773 &phba->hbqslimp.phys, 6639 * mailbox command.
2774 GFP_KERNEL); 6640 */
2775 if (!phba->hbqslimp.virt)
2776 goto out_free_slim;
2777 6641
2778 hbq_count = lpfc_sli_hbq_count(); 6642 /* First, block any pending async mailbox command from posted */
2779 ptr = phba->hbqslimp.virt; 6643 spin_lock_irq(&phba->hbalock);
2780 for (i = 0; i < hbq_count; ++i) { 6644 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
2781 phba->hbqs[i].hbq_virt = ptr; 6645 spin_unlock_irq(&phba->hbalock);
2782 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6646 /* Now, trying to wait it out if we can */
2783 ptr += (lpfc_hbq_defs[i]->entry_count * 6647 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2784 sizeof(struct lpfc_hbq_entry)); 6648 msleep(10);
6649 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
6650 break;
2785 } 6651 }
2786 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6652 /* Forcefully release the outstanding mailbox command if timed out */
2787 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6653 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2788 6654 spin_lock_irq(&phba->hbalock);
2789 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6655 mboxq = phba->sli.mbox_active;
2790 6656 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
2791 INIT_LIST_HEAD(&phba->hbqbuf_in_list); 6657 __lpfc_mbox_cmpl_put(phba, mboxq);
2792 6658 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2793 /* Initialize the SLI Layer to run with lpfc HBAs. */ 6659 phba->sli.mbox_active = NULL;
2794 lpfc_sli_setup(phba); 6660 spin_unlock_irq(&phba->hbalock);
2795 lpfc_sli_queue_setup(phba);
2796
2797 retval = lpfc_mem_alloc(phba);
2798 if (retval) {
2799 error = retval;
2800 goto out_free_hbqslimp;
2801 } 6661 }
2802 6662
2803 /* Initialize and populate the iocb list per host. */ 6663 /* Tear down the queues in the HBA */
2804 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 6664 lpfc_sli4_queue_unset(phba);
2805 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
2806 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
2807 if (iocbq_entry == NULL) {
2808 printk(KERN_ERR "%s: only allocated %d iocbs of "
2809 "expected %d count. Unloading driver.\n",
2810 __func__, i, LPFC_IOCB_LIST_CNT);
2811 error = -ENOMEM;
2812 goto out_free_iocbq;
2813 }
2814 6665
2815 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 6666 /* Disable PCI subsystem interrupt */
2816 if (iotag == 0) { 6667 lpfc_sli4_disable_intr(phba);
2817 kfree (iocbq_entry);
2818 printk(KERN_ERR "%s: failed to allocate IOTAG. "
2819 "Unloading driver.\n",
2820 __func__);
2821 error = -ENOMEM;
2822 goto out_free_iocbq;
2823 }
2824 6668
2825 spin_lock_irq(&phba->hbalock); 6669 /* Stop kthread signal shall trigger work_done one more time */
2826 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 6670 kthread_stop(phba->worker_thread);
2827 phba->total_iocbq_bufs++;
2828 spin_unlock_irq(&phba->hbalock);
2829 }
2830 6671
2831 /* Initialize HBA structure */ 6672 /* Stop the SLI4 device port */
2832 phba->fc_edtov = FF_DEF_EDTOV; 6673 phba->pport->work_port_events = 0;
2833 phba->fc_ratov = FF_DEF_RATOV; 6674}
2834 phba->fc_altov = FF_DEF_ALTOV;
2835 phba->fc_arbtov = FF_DEF_ARBTOV;
2836 6675
2837 INIT_LIST_HEAD(&phba->work_list); 6676/**
2838 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 6677 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
2839 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 6678 * @pdev: pointer to PCI device
6679 * @pid: pointer to PCI device identifier
6680 *
6681 * This routine is to be called to attach a device with SLI-3 interface spec
6682 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
6683 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
6684 * information of the device and driver to see if the driver state that it can
6685 * support this kind of device. If the match is successful, the driver core
6686 * invokes this routine. If this routine determines it can claim the HBA, it
6687 * does all the initialization that it needs to do to handle the HBA properly.
6688 *
6689 * Return code
6690 * 0 - driver can claim the device
6691 * negative value - driver can not claim the device
6692 **/
6693static int __devinit
6694lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6695{
6696 struct lpfc_hba *phba;
6697 struct lpfc_vport *vport = NULL;
6698 int error;
6699 uint32_t cfg_mode, intr_mode;
2840 6700
2841 /* Initialize the wait queue head for the kernel thread */ 6701 /* Allocate memory for HBA structure */
2842 init_waitqueue_head(&phba->work_waitq); 6702 phba = lpfc_hba_alloc(pdev);
6703 if (!phba)
6704 return -ENOMEM;
2843 6705
2844 /* Startup the kernel thread for this host adapter. */ 6706 /* Perform generic PCI device enabling operation */
2845 phba->worker_thread = kthread_run(lpfc_do_work, phba, 6707 error = lpfc_enable_pci_dev(phba);
2846 "lpfc_worker_%d", phba->brd_no); 6708 if (error) {
2847 if (IS_ERR(phba->worker_thread)) { 6709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2848 error = PTR_ERR(phba->worker_thread); 6710 "1401 Failed to enable pci device.\n");
2849 goto out_free_iocbq; 6711 goto out_free_phba;
2850 } 6712 }
2851 6713
2852 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 6714 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
2853 spin_lock_init(&phba->scsi_buf_list_lock); 6715 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
2854 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 6716 if (error)
6717 goto out_disable_pci_dev;
2855 6718
2856 /* Initialize list of fabric iocbs */ 6719 /* Set up SLI-3 specific device PCI memory space */
2857 INIT_LIST_HEAD(&phba->fabric_iocb_list); 6720 error = lpfc_sli_pci_mem_setup(phba);
6721 if (error) {
6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6723 "1402 Failed to set up pci memory space.\n");
6724 goto out_disable_pci_dev;
6725 }
2858 6726
2859 /* Initialize list to save ELS buffers */ 6727 /* Set up phase-1 common device driver resources */
2860 INIT_LIST_HEAD(&phba->elsbuf); 6728 error = lpfc_setup_driver_resource_phase1(phba);
6729 if (error) {
6730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6731 "1403 Failed to set up driver resource.\n");
6732 goto out_unset_pci_mem_s3;
6733 }
2861 6734
2862 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6735 /* Set up SLI-3 specific device driver resources */
2863 if (!vport) 6736 error = lpfc_sli_driver_resource_setup(phba);
2864 goto out_kthread_stop; 6737 if (error) {
6738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6739 "1404 Failed to set up driver resource.\n");
6740 goto out_unset_pci_mem_s3;
6741 }
2865 6742
2866 shost = lpfc_shost_from_vport(vport); 6743 /* Initialize and populate the iocb list per host */
2867 phba->pport = vport; 6744 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
2868 lpfc_debugfs_initialize(vport); 6745 if (error) {
6746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6747 "1405 Failed to initialize iocb list.\n");
6748 goto out_unset_driver_resource_s3;
6749 }
2869 6750
2870 pci_set_drvdata(pdev, shost); 6751 /* Set up common device driver resources */
6752 error = lpfc_setup_driver_resource_phase2(phba);
6753 if (error) {
6754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6755 "1406 Failed to set up driver resource.\n");
6756 goto out_free_iocb_list;
6757 }
2871 6758
2872 phba->MBslimaddr = phba->slim_memmap_p; 6759 /* Create SCSI host to the physical port */
2873 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6760 error = lpfc_create_shost(phba);
2874 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6761 if (error) {
2875 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2876 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6763 "1407 Failed to create scsi host.\n");
6764 goto out_unset_driver_resource;
6765 }
2877 6766
2878 /* Configure sysfs attributes */ 6767 /* Configure sysfs attributes */
2879 if (lpfc_alloc_sysfs_attr(vport)) { 6768 vport = phba->pport;
6769 error = lpfc_alloc_sysfs_attr(vport);
6770 if (error) {
2880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2881 "1476 Failed to allocate sysfs attr\n"); 6772 "1476 Failed to allocate sysfs attr\n");
2882 error = -ENOMEM; 6773 goto out_destroy_shost;
2883 goto out_destroy_port;
2884 } 6774 }
2885 6775
6776 /* Now, trying to enable interrupt and bring up the device */
2886 cfg_mode = phba->cfg_use_msi; 6777 cfg_mode = phba->cfg_use_msi;
2887 while (true) { 6778 while (true) {
6779 /* Put device to a known state before enabling interrupt */
6780 lpfc_stop_port(phba);
2888 /* Configure and enable interrupt */ 6781 /* Configure and enable interrupt */
2889 intr_mode = lpfc_enable_intr(phba, cfg_mode); 6782 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
2890 if (intr_mode == LPFC_INTR_ERROR) { 6783 if (intr_mode == LPFC_INTR_ERROR) {
2891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2892 "0426 Failed to enable interrupt.\n"); 6785 "0431 Failed to enable interrupt.\n");
6786 error = -ENODEV;
2893 goto out_free_sysfs_attr; 6787 goto out_free_sysfs_attr;
2894 } 6788 }
2895 /* HBA SLI setup */ 6789 /* SLI-3 HBA setup */
2896 if (lpfc_sli_hba_setup(phba)) { 6790 if (lpfc_sli_hba_setup(phba)) {
2897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2898 "1477 Failed to set up hba\n"); 6792 "1477 Failed to set up hba\n");
@@ -2902,185 +6796,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2902 6796
2903 /* Wait 50ms for the interrupts of previous mailbox commands */ 6797 /* Wait 50ms for the interrupts of previous mailbox commands */
2904 msleep(50); 6798 msleep(50);
2905 /* Check active interrupts received */ 6799 /* Check active interrupts on message signaled interrupts */
2906 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 6800 if (intr_mode == 0 ||
6801 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
2907 /* Log the current active interrupt mode */ 6802 /* Log the current active interrupt mode */
2908 phba->intr_mode = intr_mode; 6803 phba->intr_mode = intr_mode;
2909 lpfc_log_intr_mode(phba, intr_mode); 6804 lpfc_log_intr_mode(phba, intr_mode);
2910 break; 6805 break;
2911 } else { 6806 } else {
2912 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2913 "0451 Configure interrupt mode (%d) " 6808 "0447 Configure interrupt mode (%d) "
2914 "failed active interrupt test.\n", 6809 "failed active interrupt test.\n",
2915 intr_mode); 6810 intr_mode);
2916 if (intr_mode == 0) {
2917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2918 "0479 Failed to enable "
2919 "interrupt.\n");
2920 error = -ENODEV;
2921 goto out_remove_device;
2922 }
2923 /* Stop HBA SLI setups */
2924 lpfc_stop_port(phba);
2925 /* Disable the current interrupt mode */ 6811 /* Disable the current interrupt mode */
2926 lpfc_disable_intr(phba); 6812 lpfc_sli_disable_intr(phba);
2927 /* Try next level of interrupt mode */ 6813 /* Try next level of interrupt mode */
2928 cfg_mode = --intr_mode; 6814 cfg_mode = --intr_mode;
2929 } 6815 }
2930 } 6816 }
2931 6817
2932 /* 6818 /* Perform post initialization setup */
2933 * hba setup may have changed the hba_queue_depth so we need to adjust 6819 lpfc_post_init_setup(phba);
2934 * the value of can_queue.
2935 */
2936 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2937 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
2938
2939 if (lpfc_prot_mask && lpfc_prot_guard) {
2940 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2941 "1478 Registering BlockGuard with the "
2942 "SCSI layer\n");
2943 6820
2944 scsi_host_set_prot(shost, lpfc_prot_mask); 6821 /* Check if there are static vports to be created. */
2945 scsi_host_set_guard(shost, lpfc_prot_guard); 6822 lpfc_create_static_vport(phba);
2946 }
2947 }
2948
2949 if (!_dump_buf_data) {
2950 int pagecnt = 10;
2951 while (pagecnt) {
2952 spin_lock_init(&_dump_buf_lock);
2953 _dump_buf_data =
2954 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2955 if (_dump_buf_data) {
2956 printk(KERN_ERR "BLKGRD allocated %d pages for "
2957 "_dump_buf_data at 0x%p\n",
2958 (1 << pagecnt), _dump_buf_data);
2959 _dump_buf_data_order = pagecnt;
2960 memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
2961 << pagecnt));
2962 break;
2963 } else {
2964 --pagecnt;
2965 }
2966
2967 }
2968
2969 if (!_dump_buf_data_order)
2970 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2971 "memory for hexdump\n");
2972
2973 } else {
2974 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
2975 "\n", _dump_buf_data);
2976 }
2977
2978
2979 if (!_dump_buf_dif) {
2980 int pagecnt = 10;
2981 while (pagecnt) {
2982 _dump_buf_dif =
2983 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2984 if (_dump_buf_dif) {
2985 printk(KERN_ERR "BLKGRD allocated %d pages for "
2986 "_dump_buf_dif at 0x%p\n",
2987 (1 << pagecnt), _dump_buf_dif);
2988 _dump_buf_dif_order = pagecnt;
2989 memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
2990 << pagecnt));
2991 break;
2992 } else {
2993 --pagecnt;
2994 }
2995
2996 }
2997
2998 if (!_dump_buf_dif_order)
2999 printk(KERN_ERR "BLKGRD ERROR unable to allocate "
3000 "memory for hexdump\n");
3001
3002 } else {
3003 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
3004 _dump_buf_dif);
3005 }
3006
3007 lpfc_host_attrib_init(shost);
3008
3009 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3010 spin_lock_irq(shost->host_lock);
3011 lpfc_poll_start_timer(phba);
3012 spin_unlock_irq(shost->host_lock);
3013 }
3014
3015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3016 "0428 Perform SCSI scan\n");
3017 /* Send board arrival event to upper layer */
3018 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
3019 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
3020 fc_host_post_vendor_event(shost, fc_get_event_number(),
3021 sizeof(adapter_event),
3022 (char *) &adapter_event,
3023 LPFC_NL_VENDOR_ID);
3024 6823
3025 return 0; 6824 return 0;
3026 6825
3027out_remove_device: 6826out_remove_device:
3028 spin_lock_irq(shost->host_lock); 6827 lpfc_unset_hba(phba);
3029 vport->load_flag |= FC_UNLOADING;
3030 spin_unlock_irq(shost->host_lock);
3031 lpfc_stop_phba_timers(phba);
3032 phba->pport->work_port_events = 0;
3033 lpfc_disable_intr(phba);
3034 lpfc_sli_hba_down(phba);
3035 lpfc_sli_brdrestart(phba);
3036out_free_sysfs_attr: 6828out_free_sysfs_attr:
3037 lpfc_free_sysfs_attr(vport); 6829 lpfc_free_sysfs_attr(vport);
3038out_destroy_port: 6830out_destroy_shost:
3039 destroy_port(vport); 6831 lpfc_destroy_shost(phba);
3040out_kthread_stop: 6832out_unset_driver_resource:
3041 kthread_stop(phba->worker_thread); 6833 lpfc_unset_driver_resource_phase2(phba);
3042out_free_iocbq: 6834out_free_iocb_list:
3043 list_for_each_entry_safe(iocbq_entry, iocbq_next, 6835 lpfc_free_iocb_list(phba);
3044 &phba->lpfc_iocb_list, list) { 6836out_unset_driver_resource_s3:
3045 kfree(iocbq_entry); 6837 lpfc_sli_driver_resource_unset(phba);
3046 phba->total_iocbq_bufs--; 6838out_unset_pci_mem_s3:
3047 } 6839 lpfc_sli_pci_mem_unset(phba);
3048 lpfc_mem_free(phba); 6840out_disable_pci_dev:
3049out_free_hbqslimp: 6841 lpfc_disable_pci_dev(phba);
3050 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3051 phba->hbqslimp.virt, phba->hbqslimp.phys);
3052out_free_slim:
3053 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3054 phba->slim2p.virt, phba->slim2p.phys);
3055out_iounmap:
3056 iounmap(phba->ctrl_regs_memmap_p);
3057out_iounmap_slim:
3058 iounmap(phba->slim_memmap_p);
3059out_idr_remove:
3060 idr_remove(&lpfc_hba_index, phba->brd_no);
3061out_free_phba: 6842out_free_phba:
3062 kfree(phba); 6843 lpfc_hba_free(phba);
3063out_release_regions:
3064 pci_release_selected_regions(pdev, bars);
3065out_disable_device:
3066 pci_disable_device(pdev);
3067out:
3068 pci_set_drvdata(pdev, NULL);
3069 if (shost)
3070 scsi_host_put(shost);
3071 return error; 6844 return error;
3072} 6845}
3073 6846
3074/** 6847/**
3075 * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem 6848 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
3076 * @pdev: pointer to PCI device 6849 * @pdev: pointer to PCI device
3077 * 6850 *
3078 * This routine is to be registered to the kernel's PCI subsystem. When an 6851 * This routine is to be called to disattach a device with SLI-3 interface
3079 * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup 6852 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
3080 * for the HBA device to be removed from the PCI subsystem properly. 6853 * removed from PCI bus, it performs all the necessary cleanup for the HBA
6854 * device to be removed from the PCI subsystem properly.
3081 **/ 6855 **/
3082static void __devexit 6856static void __devexit
3083lpfc_pci_remove_one(struct pci_dev *pdev) 6857lpfc_pci_remove_one_s3(struct pci_dev *pdev)
3084{ 6858{
3085 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6859 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3086 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6860 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -3098,7 +6872,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3098 /* Release all the vports against this physical port */ 6872 /* Release all the vports against this physical port */
3099 vports = lpfc_create_vport_work_array(phba); 6873 vports = lpfc_create_vport_work_array(phba);
3100 if (vports != NULL) 6874 if (vports != NULL)
3101 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) 6875 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
3102 fc_vport_terminate(vports[i]->fc_vport); 6876 fc_vport_terminate(vports[i]->fc_vport);
3103 lpfc_destroy_vport_work_array(phba, vports); 6877 lpfc_destroy_vport_work_array(phba, vports);
3104 6878
@@ -3120,7 +6894,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3120 /* Final cleanup of txcmplq and reset the HBA */ 6894 /* Final cleanup of txcmplq and reset the HBA */
3121 lpfc_sli_brdrestart(phba); 6895 lpfc_sli_brdrestart(phba);
3122 6896
3123 lpfc_stop_phba_timers(phba); 6897 lpfc_stop_hba_timers(phba);
3124 spin_lock_irq(&phba->hbalock); 6898 spin_lock_irq(&phba->hbalock);
3125 list_del_init(&vport->listentry); 6899 list_del_init(&vport->listentry);
3126 spin_unlock_irq(&phba->hbalock); 6900 spin_unlock_irq(&phba->hbalock);
@@ -3128,7 +6902,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3128 lpfc_debugfs_terminate(vport); 6902 lpfc_debugfs_terminate(vport);
3129 6903
3130 /* Disable interrupt */ 6904 /* Disable interrupt */
3131 lpfc_disable_intr(phba); 6905 lpfc_sli_disable_intr(phba);
3132 6906
3133 pci_set_drvdata(pdev, NULL); 6907 pci_set_drvdata(pdev, NULL);
3134 scsi_host_put(shost); 6908 scsi_host_put(shost);
@@ -3138,7 +6912,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3138 * corresponding pools here. 6912 * corresponding pools here.
3139 */ 6913 */
3140 lpfc_scsi_free(phba); 6914 lpfc_scsi_free(phba);
3141 lpfc_mem_free(phba); 6915 lpfc_mem_free_all(phba);
3142 6916
3143 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6917 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3144 phba->hbqslimp.virt, phba->hbqslimp.phys); 6918 phba->hbqslimp.virt, phba->hbqslimp.phys);
@@ -3151,36 +6925,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
3151 iounmap(phba->ctrl_regs_memmap_p); 6925 iounmap(phba->ctrl_regs_memmap_p);
3152 iounmap(phba->slim_memmap_p); 6926 iounmap(phba->slim_memmap_p);
3153 6927
3154 idr_remove(&lpfc_hba_index, phba->brd_no); 6928 lpfc_hba_free(phba);
3155
3156 kfree(phba);
3157 6929
3158 pci_release_selected_regions(pdev, bars); 6930 pci_release_selected_regions(pdev, bars);
3159 pci_disable_device(pdev); 6931 pci_disable_device(pdev);
3160} 6932}
3161 6933
3162/** 6934/**
3163 * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management 6935 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
3164 * @pdev: pointer to PCI device 6936 * @pdev: pointer to PCI device
3165 * @msg: power management message 6937 * @msg: power management message
3166 * 6938 *
3167 * This routine is to be registered to the kernel's PCI subsystem to support 6939 * This routine is to be called from the kernel's PCI subsystem to support
3168 * system Power Management (PM). When PM invokes this method, it quiesces the 6940 * system Power Management (PM) to device with SLI-3 interface spec. When
3169 * device by stopping the driver's worker thread for the device, turning off 6941 * PM invokes this method, it quiesces the device by stopping the driver's
3170 * device's interrupt and DMA, and bring the device offline. Note that as the 6942 * worker thread for the device, turning off device's interrupt and DMA,
3171 * driver implements the minimum PM requirements to a power-aware driver's PM 6943 * and bring the device offline. Note that as the driver implements the
3172 * support for suspend/resume -- all the possible PM messages (SUSPEND, 6944 * minimum PM requirements to a power-aware driver's PM support for the
3173 * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND 6945 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
3174 * and the driver will fully reinitialize its device during resume() method 6946 * to the suspend() method call will be treated as SUSPEND and the driver will
3175 * call, the driver will set device to PCI_D3hot state in PCI config space 6947 * fully reinitialize its device during resume() method call, the driver will
3176 * instead of setting it according to the @msg provided by the PM. 6948 * set device to PCI_D3hot state in PCI config space instead of setting it
6949 * according to the @msg provided by the PM.
3177 * 6950 *
3178 * Return code 6951 * Return code
3179 * 0 - driver suspended the device 6952 * 0 - driver suspended the device
3180 * Error otherwise 6953 * Error otherwise
3181 **/ 6954 **/
3182static int 6955static int
3183lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 6956lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
3184{ 6957{
3185 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6958 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3186 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 6959 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3194,7 +6967,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3194 kthread_stop(phba->worker_thread); 6967 kthread_stop(phba->worker_thread);
3195 6968
3196 /* Disable interrupt from device */ 6969 /* Disable interrupt from device */
3197 lpfc_disable_intr(phba); 6970 lpfc_sli_disable_intr(phba);
3198 6971
3199 /* Save device state to PCI config space */ 6972 /* Save device state to PCI config space */
3200 pci_save_state(pdev); 6973 pci_save_state(pdev);
@@ -3204,25 +6977,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3204} 6977}
3205 6978
3206/** 6979/**
3207 * lpfc_pci_resume_one - lpfc PCI func to resume device for power management 6980 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
3208 * @pdev: pointer to PCI device 6981 * @pdev: pointer to PCI device
3209 * 6982 *
3210 * This routine is to be registered to the kernel's PCI subsystem to support 6983 * This routine is to be called from the kernel's PCI subsystem to support
3211 * system Power Management (PM). When PM invokes this method, it restores 6984 * system Power Management (PM) to device with SLI-3 interface spec. When PM
3212 * the device's PCI config space state and fully reinitializes the device 6985 * invokes this method, it restores the device's PCI config space state and
3213 * and brings it online. Note that as the driver implements the minimum PM 6986 * fully reinitializes the device and brings it online. Note that as the
3214 * requirements to a power-aware driver's PM for suspend/resume -- all 6987 * driver implements the minimum PM requirements to a power-aware driver's
3215 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 6988 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
3216 * method call will be treated as SUSPEND and the driver will fully 6989 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
3217 * reinitialize its device during resume() method call, the device will be 6990 * driver will fully reinitialize its device during resume() method call,
3218 * set to PCI_D0 directly in PCI config space before restoring the state. 6991 * the device will be set to PCI_D0 directly in PCI config space before
6992 * restoring the state.
3219 * 6993 *
3220 * Return code 6994 * Return code
3221 * 0 - driver suspended the device 6995 * 0 - driver suspended the device
3222 * Error otherwise 6996 * Error otherwise
3223 **/ 6997 **/
3224static int 6998static int
3225lpfc_pci_resume_one(struct pci_dev *pdev) 6999lpfc_pci_resume_one_s3(struct pci_dev *pdev)
3226{ 7000{
3227 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7001 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3228 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7002 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3250,7 +7024,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3250 } 7024 }
3251 7025
3252 /* Configure and enable interrupt */ 7026 /* Configure and enable interrupt */
3253 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7027 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3254 if (intr_mode == LPFC_INTR_ERROR) { 7028 if (intr_mode == LPFC_INTR_ERROR) {
3255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3256 "0430 PM resume Failed to enable interrupt\n"); 7030 "0430 PM resume Failed to enable interrupt\n");
@@ -3269,23 +7043,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
3269} 7043}
3270 7044
3271/** 7045/**
3272 * lpfc_io_error_detected - Driver method for handling PCI I/O error detected 7046 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
3273 * @pdev: pointer to PCI device. 7047 * @pdev: pointer to PCI device.
3274 * @state: the current PCI connection state. 7048 * @state: the current PCI connection state.
3275 * 7049 *
3276 * This routine is registered to the PCI subsystem for error handling. This 7050 * This routine is called from the PCI subsystem for I/O error handling to
3277 * function is called by the PCI subsystem after a PCI bus error affecting 7051 * device with SLI-3 interface spec. This function is called by the PCI
3278 * this device has been detected. When this function is invoked, it will 7052 * subsystem after a PCI bus error affecting this device has been detected.
3279 * need to stop all the I/Os and interrupt(s) to the device. Once that is 7053 * When this function is invoked, it will need to stop all the I/Os and
3280 * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to 7054 * interrupt(s) to the device. Once that is done, it will return
3281 * perform proper recovery as desired. 7055 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
7056 * as desired.
3282 * 7057 *
3283 * Return codes 7058 * Return codes
3284 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7059 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
3285 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7060 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3286 **/ 7061 **/
3287static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 7062static pci_ers_result_t
3288 pci_channel_state_t state) 7063lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
3289{ 7064{
3290 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7065 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3291 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7066 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3312,30 +7087,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
3312 lpfc_sli_abort_iocb_ring(phba, pring); 7087 lpfc_sli_abort_iocb_ring(phba, pring);
3313 7088
3314 /* Disable interrupt */ 7089 /* Disable interrupt */
3315 lpfc_disable_intr(phba); 7090 lpfc_sli_disable_intr(phba);
3316 7091
3317 /* Request a slot reset. */ 7092 /* Request a slot reset. */
3318 return PCI_ERS_RESULT_NEED_RESET; 7093 return PCI_ERS_RESULT_NEED_RESET;
3319} 7094}
3320 7095
3321/** 7096/**
3322 * lpfc_io_slot_reset - Restart a PCI device from scratch 7097 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
3323 * @pdev: pointer to PCI device. 7098 * @pdev: pointer to PCI device.
3324 * 7099 *
3325 * This routine is registered to the PCI subsystem for error handling. This is 7100 * This routine is called from the PCI subsystem for error handling to
3326 * called after PCI bus has been reset to restart the PCI card from scratch, 7101 * device with SLI-3 interface spec. This is called after PCI bus has been
3327 * as if from a cold-boot. During the PCI subsystem error recovery, after the 7102 * reset to restart the PCI card from scratch, as if from a cold-boot.
3328 * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform 7103 * During the PCI subsystem error recovery, after driver returns
3329 * proper error recovery and then call this routine before calling the .resume 7104 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
3330 * method to recover the device. This function will initialize the HBA device, 7105 * recovery and then call this routine before calling the .resume method
3331 * enable the interrupt, but it will just put the HBA to offline state without 7106 * to recover the device. This function will initialize the HBA device,
3332 * passing any I/O traffic. 7107 * enable the interrupt, but it will just put the HBA to offline state
7108 * without passing any I/O traffic.
3333 * 7109 *
3334 * Return codes 7110 * Return codes
3335 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 7111 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
3336 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7112 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3337 */ 7113 */
3338static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 7114static pci_ers_result_t
7115lpfc_io_slot_reset_s3(struct pci_dev *pdev)
3339{ 7116{
3340 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7117 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3341 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7118 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3354,11 +7131,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3354 pci_set_master(pdev); 7131 pci_set_master(pdev);
3355 7132
3356 spin_lock_irq(&phba->hbalock); 7133 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 7134 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock); 7135 spin_unlock_irq(&phba->hbalock);
3359 7136
3360 /* Configure and enable interrupt */ 7137 /* Configure and enable interrupt */
3361 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 7138 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
3362 if (intr_mode == LPFC_INTR_ERROR) { 7139 if (intr_mode == LPFC_INTR_ERROR) {
3363 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3364 "0427 Cannot re-enable interrupt after " 7141 "0427 Cannot re-enable interrupt after "
@@ -3378,20 +7155,713 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3378} 7155}
3379 7156
3380/** 7157/**
3381 * lpfc_io_resume - Resume PCI I/O operation 7158 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
3382 * @pdev: pointer to PCI device 7159 * @pdev: pointer to PCI device
3383 * 7160 *
3384 * This routine is registered to the PCI subsystem for error handling. It is 7161 * This routine is called from the PCI subsystem for error handling to device
3385 * called when kernel error recovery tells the lpfc driver that it is ok to 7162 * with SLI-3 interface spec. It is called when kernel error recovery tells
3386 * resume normal PCI operation after PCI bus error recovery. After this call, 7163 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
3387 * traffic can start to flow from this device again. 7164 * error recovery. After this call, traffic can start to flow from this device
7165 * again.
3388 */ 7166 */
3389static void lpfc_io_resume(struct pci_dev *pdev) 7167static void
7168lpfc_io_resume_s3(struct pci_dev *pdev)
7169{
7170 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7171 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7172
7173 lpfc_online(phba);
7174}
7175
7176/**
7177 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
7178 * @phba: pointer to lpfc hba data structure.
7179 *
7180 * returns the number of ELS/CT IOCBs to reserve
7181 **/
7182int
7183lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7184{
7185 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7186
7187 if (max_xri <= 100)
7188 return 4;
7189 else if (max_xri <= 256)
7190 return 8;
7191 else if (max_xri <= 512)
7192 return 16;
7193 else if (max_xri <= 1024)
7194 return 32;
7195 else
7196 return 48;
7197}
7198
7199/**
7200 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
7201 * @pdev: pointer to PCI device
7202 * @pid: pointer to PCI device identifier
7203 *
7204 * This routine is called from the kernel's PCI subsystem to device with
7205 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7206 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7207 * information of the device and driver to see if the driver state that it
7208 * can support this kind of device. If the match is successful, the driver
7209 * core invokes this routine. If this routine determines it can claim the HBA,
7210 * it does all the initialization that it needs to do to handle the HBA
7211 * properly.
7212 *
7213 * Return code
7214 * 0 - driver can claim the device
7215 * negative value - driver can not claim the device
7216 **/
7217static int __devinit
7218lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7219{
7220 struct lpfc_hba *phba;
7221 struct lpfc_vport *vport = NULL;
7222 int error;
7223 uint32_t cfg_mode, intr_mode;
7224 int mcnt;
7225
7226 /* Allocate memory for HBA structure */
7227 phba = lpfc_hba_alloc(pdev);
7228 if (!phba)
7229 return -ENOMEM;
7230
7231 /* Perform generic PCI device enabling operation */
7232 error = lpfc_enable_pci_dev(phba);
7233 if (error) {
7234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7235 "1409 Failed to enable pci device.\n");
7236 goto out_free_phba;
7237 }
7238
7239 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
7240 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
7241 if (error)
7242 goto out_disable_pci_dev;
7243
7244 /* Set up SLI-4 specific device PCI memory space */
7245 error = lpfc_sli4_pci_mem_setup(phba);
7246 if (error) {
7247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7248 "1410 Failed to set up pci memory space.\n");
7249 goto out_disable_pci_dev;
7250 }
7251
7252 /* Set up phase-1 common device driver resources */
7253 error = lpfc_setup_driver_resource_phase1(phba);
7254 if (error) {
7255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7256 "1411 Failed to set up driver resource.\n");
7257 goto out_unset_pci_mem_s4;
7258 }
7259
7260 /* Set up SLI-4 Specific device driver resources */
7261 error = lpfc_sli4_driver_resource_setup(phba);
7262 if (error) {
7263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7264 "1412 Failed to set up driver resource.\n");
7265 goto out_unset_pci_mem_s4;
7266 }
7267
7268 /* Initialize and populate the iocb list per host */
7269 error = lpfc_init_iocb_list(phba,
7270 phba->sli4_hba.max_cfg_param.max_xri);
7271 if (error) {
7272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7273 "1413 Failed to initialize iocb list.\n");
7274 goto out_unset_driver_resource_s4;
7275 }
7276
7277 /* Set up common device driver resources */
7278 error = lpfc_setup_driver_resource_phase2(phba);
7279 if (error) {
7280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7281 "1414 Failed to set up driver resource.\n");
7282 goto out_free_iocb_list;
7283 }
7284
7285 /* Create SCSI host to the physical port */
7286 error = lpfc_create_shost(phba);
7287 if (error) {
7288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7289 "1415 Failed to create scsi host.\n");
7290 goto out_unset_driver_resource;
7291 }
7292
7293 /* Configure sysfs attributes */
7294 vport = phba->pport;
7295 error = lpfc_alloc_sysfs_attr(vport);
7296 if (error) {
7297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7298 "1416 Failed to allocate sysfs attr\n");
7299 goto out_destroy_shost;
7300 }
7301
7302 /* Now, trying to enable interrupt and bring up the device */
7303 cfg_mode = phba->cfg_use_msi;
7304 while (true) {
7305 /* Put device to a known state before enabling interrupt */
7306 lpfc_stop_port(phba);
7307 /* Configure and enable interrupt */
7308 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
7309 if (intr_mode == LPFC_INTR_ERROR) {
7310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7311 "0426 Failed to enable interrupt.\n");
7312 error = -ENODEV;
7313 goto out_free_sysfs_attr;
7314 }
7315 /* Set up SLI-4 HBA */
7316 if (lpfc_sli4_hba_setup(phba)) {
7317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7318 "1421 Failed to set up hba\n");
7319 error = -ENODEV;
7320 goto out_disable_intr;
7321 }
7322
7323 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
7324 if (intr_mode != 0)
7325 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
7326 LPFC_ACT_INTR_CNT);
7327
7328 /* Check active interrupts received only for MSI/MSI-X */
7329 if (intr_mode == 0 ||
7330 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
7331 /* Log the current active interrupt mode */
7332 phba->intr_mode = intr_mode;
7333 lpfc_log_intr_mode(phba, intr_mode);
7334 break;
7335 }
7336 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7337 "0451 Configure interrupt mode (%d) "
7338 "failed active interrupt test.\n",
7339 intr_mode);
7340 /* Unset the preivous SLI-4 HBA setup */
7341 lpfc_sli4_unset_hba(phba);
7342 /* Try next level of interrupt mode */
7343 cfg_mode = --intr_mode;
7344 }
7345
7346 /* Perform post initialization setup */
7347 lpfc_post_init_setup(phba);
7348
7349 return 0;
7350
7351out_disable_intr:
7352 lpfc_sli4_disable_intr(phba);
7353out_free_sysfs_attr:
7354 lpfc_free_sysfs_attr(vport);
7355out_destroy_shost:
7356 lpfc_destroy_shost(phba);
7357out_unset_driver_resource:
7358 lpfc_unset_driver_resource_phase2(phba);
7359out_free_iocb_list:
7360 lpfc_free_iocb_list(phba);
7361out_unset_driver_resource_s4:
7362 lpfc_sli4_driver_resource_unset(phba);
7363out_unset_pci_mem_s4:
7364 lpfc_sli4_pci_mem_unset(phba);
7365out_disable_pci_dev:
7366 lpfc_disable_pci_dev(phba);
7367out_free_phba:
7368 lpfc_hba_free(phba);
7369 return error;
7370}
7371
7372/**
7373 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
7374 * @pdev: pointer to PCI device
7375 *
7376 * This routine is called from the kernel's PCI subsystem to device with
7377 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
7378 * removed from PCI bus, it performs all the necessary cleanup for the HBA
7379 * device to be removed from the PCI subsystem properly.
7380 **/
7381static void __devexit
7382lpfc_pci_remove_one_s4(struct pci_dev *pdev)
7383{
7384 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7385 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7386 struct lpfc_vport **vports;
7387 struct lpfc_hba *phba = vport->phba;
7388 int i;
7389
7390 /* Mark the device unloading flag */
7391 spin_lock_irq(&phba->hbalock);
7392 vport->load_flag |= FC_UNLOADING;
7393 spin_unlock_irq(&phba->hbalock);
7394
7395 /* Free the HBA sysfs attributes */
7396 lpfc_free_sysfs_attr(vport);
7397
7398 /* Release all the vports against this physical port */
7399 vports = lpfc_create_vport_work_array(phba);
7400 if (vports != NULL)
7401 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
7402 fc_vport_terminate(vports[i]->fc_vport);
7403 lpfc_destroy_vport_work_array(phba, vports);
7404
7405 /* Remove FC host and then SCSI host with the physical port */
7406 fc_remove_host(shost);
7407 scsi_remove_host(shost);
7408
7409 /* Perform cleanup on the physical port */
7410 lpfc_cleanup(vport);
7411
7412 /*
7413 * Bring down the SLI Layer. This step disables all interrupts,
7414 * clears the rings, discards all mailbox commands, and resets
7415 * the HBA FCoE function.
7416 */
7417 lpfc_debugfs_terminate(vport);
7418 lpfc_sli4_hba_unset(phba);
7419
7420 spin_lock_irq(&phba->hbalock);
7421 list_del_init(&vport->listentry);
7422 spin_unlock_irq(&phba->hbalock);
7423
7424 /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
7425 * buffers are released to their corresponding pools here.
7426 */
7427 lpfc_scsi_free(phba);
7428 lpfc_sli4_driver_resource_unset(phba);
7429
7430 /* Unmap adapter Control and Doorbell registers */
7431 lpfc_sli4_pci_mem_unset(phba);
7432
7433 /* Release PCI resources and disable device's PCI function */
7434 scsi_host_put(shost);
7435 lpfc_disable_pci_dev(phba);
7436
7437 /* Finally, free the driver's device data structure */
7438 lpfc_hba_free(phba);
7439
7440 return;
7441}
7442
7443/**
7444 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
7445 * @pdev: pointer to PCI device
7446 * @msg: power management message
7447 *
7448 * This routine is called from the kernel's PCI subsystem to support system
7449 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
7450 * this method, it quiesces the device by stopping the driver's worker
7451 * thread for the device, turning off device's interrupt and DMA, and bring
7452 * the device offline. Note that as the driver implements the minimum PM
7453 * requirements to a power-aware driver's PM support for suspend/resume -- all
7454 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
7455 * method call will be treated as SUSPEND and the driver will fully
7456 * reinitialize its device during resume() method call, the driver will set
7457 * device to PCI_D3hot state in PCI config space instead of setting it
7458 * according to the @msg provided by the PM.
7459 *
7460 * Return code
7461 * 0 - driver suspended the device
7462 * Error otherwise
7463 **/
7464static int
7465lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
3390{ 7466{
3391 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7467 struct Scsi_Host *shost = pci_get_drvdata(pdev);
3392 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7468 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3393 7469
7470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7471 "0298 PCI device Power Management suspend.\n");
7472
7473 /* Bring down the device */
7474 lpfc_offline_prep(phba);
7475 lpfc_offline(phba);
7476 kthread_stop(phba->worker_thread);
7477
7478 /* Disable interrupt from device */
7479 lpfc_sli4_disable_intr(phba);
7480
7481 /* Save device state to PCI config space */
7482 pci_save_state(pdev);
7483 pci_set_power_state(pdev, PCI_D3hot);
7484
7485 return 0;
7486}
7487
7488/**
7489 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
7490 * @pdev: pointer to PCI device
7491 *
7492 * This routine is called from the kernel's PCI subsystem to support system
7493 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
7494 * this method, it restores the device's PCI config space state and fully
7495 * reinitializes the device and brings it online. Note that as the driver
7496 * implements the minimum PM requirements to a power-aware driver's PM for
7497 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
7498 * to the suspend() method call will be treated as SUSPEND and the driver
7499 * will fully reinitialize its device during resume() method call, the device
7500 * will be set to PCI_D0 directly in PCI config space before restoring the
7501 * state.
7502 *
7503 * Return code
7504 * 0 - driver suspended the device
7505 * Error otherwise
7506 **/
7507static int
7508lpfc_pci_resume_one_s4(struct pci_dev *pdev)
7509{
7510 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7511 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7512 uint32_t intr_mode;
7513 int error;
7514
7515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7516 "0292 PCI device Power Management resume.\n");
7517
7518 /* Restore device state from PCI config space */
7519 pci_set_power_state(pdev, PCI_D0);
7520 pci_restore_state(pdev);
7521 if (pdev->is_busmaster)
7522 pci_set_master(pdev);
7523
7524 /* Startup the kernel thread for this host adapter. */
7525 phba->worker_thread = kthread_run(lpfc_do_work, phba,
7526 "lpfc_worker_%d", phba->brd_no);
7527 if (IS_ERR(phba->worker_thread)) {
7528 error = PTR_ERR(phba->worker_thread);
7529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7530 "0293 PM resume failed to start worker "
7531 "thread: error=x%x.\n", error);
7532 return error;
7533 }
7534
7535 /* Configure and enable interrupt */
7536 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
7537 if (intr_mode == LPFC_INTR_ERROR) {
7538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7539 "0294 PM resume Failed to enable interrupt\n");
7540 return -EIO;
7541 } else
7542 phba->intr_mode = intr_mode;
7543
7544 /* Restart HBA and bring it online */
7545 lpfc_sli_brdrestart(phba);
3394 lpfc_online(phba); 7546 lpfc_online(phba);
7547
7548 /* Log the current active interrupt mode */
7549 lpfc_log_intr_mode(phba, phba->intr_mode);
7550
7551 return 0;
7552}
7553
7554/**
7555 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
7556 * @pdev: pointer to PCI device.
7557 * @state: the current PCI connection state.
7558 *
7559 * This routine is called from the PCI subsystem for error handling to device
7560 * with SLI-4 interface spec. This function is called by the PCI subsystem
7561 * after a PCI bus error affecting this device has been detected. When this
7562 * function is invoked, it will need to stop all the I/Os and interrupt(s)
7563 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
7564 * for the PCI subsystem to perform proper recovery as desired.
7565 *
7566 * Return codes
7567 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7568 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7569 **/
7570static pci_ers_result_t
7571lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
7572{
7573 return PCI_ERS_RESULT_NEED_RESET;
7574}
7575
7576/**
7577 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
7578 * @pdev: pointer to PCI device.
7579 *
7580 * This routine is called from the PCI subsystem for error handling to device
7581 * with SLI-4 interface spec. It is called after PCI bus has been reset to
7582 * restart the PCI card from scratch, as if from a cold-boot. During the
7583 * PCI subsystem error recovery, after the driver returns
7584 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
7585 * recovery and then call this routine before calling the .resume method to
7586 * recover the device. This function will initialize the HBA device, enable
7587 * the interrupt, but it will just put the HBA to offline state without
7588 * passing any I/O traffic.
7589 *
7590 * Return codes
7591 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7592 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7593 */
7594static pci_ers_result_t
7595lpfc_io_slot_reset_s4(struct pci_dev *pdev)
7596{
7597 return PCI_ERS_RESULT_RECOVERED;
7598}
7599
7600/**
7601 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
7602 * @pdev: pointer to PCI device
7603 *
7604 * This routine is called from the PCI subsystem for error handling to device
7605 * with SLI-4 interface spec. It is called when kernel error recovery tells
7606 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
7607 * error recovery. After this call, traffic can start to flow from this device
7608 * again.
7609 **/
7610static void
7611lpfc_io_resume_s4(struct pci_dev *pdev)
7612{
7613 return;
7614}
7615
7616/**
7617 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
7618 * @pdev: pointer to PCI device
7619 * @pid: pointer to PCI device identifier
7620 *
7621 * This routine is to be registered to the kernel's PCI subsystem. When an
7622 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
7623 * at PCI device-specific information of the device and driver to see if the
7624 * driver state that it can support this kind of device. If the match is
7625 * successful, the driver core invokes this routine. This routine dispatches
7626 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
7627 * do all the initialization that it needs to do to handle the HBA device
7628 * properly.
7629 *
7630 * Return code
7631 * 0 - driver can claim the device
7632 * negative value - driver can not claim the device
7633 **/
7634static int __devinit
7635lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7636{
7637 int rc;
7638 uint16_t dev_id;
7639
7640 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
7641 return -ENODEV;
7642
7643 switch (dev_id) {
7644 case PCI_DEVICE_ID_TIGERSHARK:
7645 case PCI_DEVICE_ID_TIGERSHARK_S:
7646 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break;
7648 default:
7649 rc = lpfc_pci_probe_one_s3(pdev, pid);
7650 break;
7651 }
7652 return rc;
7653}
7654
7655/**
7656 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
7657 * @pdev: pointer to PCI device
7658 *
7659 * This routine is to be registered to the kernel's PCI subsystem. When an
7660 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
7661 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
7662 * remove routine, which will perform all the necessary cleanup for the
7663 * device to be removed from the PCI subsystem properly.
7664 **/
7665static void __devexit
7666lpfc_pci_remove_one(struct pci_dev *pdev)
7667{
7668 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7669 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7670
7671 switch (phba->pci_dev_grp) {
7672 case LPFC_PCI_DEV_LP:
7673 lpfc_pci_remove_one_s3(pdev);
7674 break;
7675 case LPFC_PCI_DEV_OC:
7676 lpfc_pci_remove_one_s4(pdev);
7677 break;
7678 default:
7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7680 "1424 Invalid PCI device group: 0x%x\n",
7681 phba->pci_dev_grp);
7682 break;
7683 }
7684 return;
7685}
7686
7687/**
7688 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
7689 * @pdev: pointer to PCI device
7690 * @msg: power management message
7691 *
7692 * This routine is to be registered to the kernel's PCI subsystem to support
7693 * system Power Management (PM). When PM invokes this method, it dispatches
7694 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
7695 * suspend the device.
7696 *
7697 * Return code
7698 * 0 - driver suspended the device
7699 * Error otherwise
7700 **/
7701static int
7702lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
7703{
7704 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7705 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7706 int rc = -ENODEV;
7707
7708 switch (phba->pci_dev_grp) {
7709 case LPFC_PCI_DEV_LP:
7710 rc = lpfc_pci_suspend_one_s3(pdev, msg);
7711 break;
7712 case LPFC_PCI_DEV_OC:
7713 rc = lpfc_pci_suspend_one_s4(pdev, msg);
7714 break;
7715 default:
7716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7717 "1425 Invalid PCI device group: 0x%x\n",
7718 phba->pci_dev_grp);
7719 break;
7720 }
7721 return rc;
7722}
7723
7724/**
7725 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
7726 * @pdev: pointer to PCI device
7727 *
7728 * This routine is to be registered to the kernel's PCI subsystem to support
7729 * system Power Management (PM). When PM invokes this method, it dispatches
7730 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
7731 * resume the device.
7732 *
7733 * Return code
7734 * 0 - driver suspended the device
7735 * Error otherwise
7736 **/
7737static int
7738lpfc_pci_resume_one(struct pci_dev *pdev)
7739{
7740 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7741 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7742 int rc = -ENODEV;
7743
7744 switch (phba->pci_dev_grp) {
7745 case LPFC_PCI_DEV_LP:
7746 rc = lpfc_pci_resume_one_s3(pdev);
7747 break;
7748 case LPFC_PCI_DEV_OC:
7749 rc = lpfc_pci_resume_one_s4(pdev);
7750 break;
7751 default:
7752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7753 "1426 Invalid PCI device group: 0x%x\n",
7754 phba->pci_dev_grp);
7755 break;
7756 }
7757 return rc;
7758}
7759
7760/**
7761 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
7762 * @pdev: pointer to PCI device.
7763 * @state: the current PCI connection state.
7764 *
7765 * This routine is registered to the PCI subsystem for error handling. This
7766 * function is called by the PCI subsystem after a PCI bus error affecting
7767 * this device has been detected. When this routine is invoked, it dispatches
7768 * the action to the proper SLI-3 or SLI-4 device error detected handling
7769 * routine, which will perform the proper error detected operation.
7770 *
7771 * Return codes
7772 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7773 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7774 **/
7775static pci_ers_result_t
7776lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7777{
7778 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7779 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7780 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7781
7782 switch (phba->pci_dev_grp) {
7783 case LPFC_PCI_DEV_LP:
7784 rc = lpfc_io_error_detected_s3(pdev, state);
7785 break;
7786 case LPFC_PCI_DEV_OC:
7787 rc = lpfc_io_error_detected_s4(pdev, state);
7788 break;
7789 default:
7790 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7791 "1427 Invalid PCI device group: 0x%x\n",
7792 phba->pci_dev_grp);
7793 break;
7794 }
7795 return rc;
7796}
7797
7798/**
7799 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
7800 * @pdev: pointer to PCI device.
7801 *
7802 * This routine is registered to the PCI subsystem for error handling. This
7803 * function is called after PCI bus has been reset to restart the PCI card
7804 * from scratch, as if from a cold-boot. When this routine is invoked, it
7805 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
7806 * routine, which will perform the proper device reset.
7807 *
7808 * Return codes
7809 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
7810 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7811 **/
7812static pci_ers_result_t
7813lpfc_io_slot_reset(struct pci_dev *pdev)
7814{
7815 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7816 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7817 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
7818
7819 switch (phba->pci_dev_grp) {
7820 case LPFC_PCI_DEV_LP:
7821 rc = lpfc_io_slot_reset_s3(pdev);
7822 break;
7823 case LPFC_PCI_DEV_OC:
7824 rc = lpfc_io_slot_reset_s4(pdev);
7825 break;
7826 default:
7827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7828 "1428 Invalid PCI device group: 0x%x\n",
7829 phba->pci_dev_grp);
7830 break;
7831 }
7832 return rc;
7833}
7834
7835/**
7836 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
7837 * @pdev: pointer to PCI device
7838 *
7839 * This routine is registered to the PCI subsystem for error handling. It
7840 * is called when kernel error recovery tells the lpfc driver that it is
7841 * OK to resume normal PCI operation after PCI bus error recovery. When
7842 * this routine is invoked, it dispatches the action to the proper SLI-3
7843 * or SLI-4 device io_resume routine, which will resume the device operation.
7844 **/
7845static void
7846lpfc_io_resume(struct pci_dev *pdev)
7847{
7848 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7850
7851 switch (phba->pci_dev_grp) {
7852 case LPFC_PCI_DEV_LP:
7853 lpfc_io_resume_s3(pdev);
7854 break;
7855 case LPFC_PCI_DEV_OC:
7856 lpfc_io_resume_s4(pdev);
7857 break;
7858 default:
7859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7860 "1429 Invalid PCI device group: 0x%x\n",
7861 phba->pci_dev_grp);
7862 break;
7863 }
7864 return;
3395} 7865}
3396 7866
3397static struct pci_device_id lpfc_id_table[] = { 7867static struct pci_device_id lpfc_id_table[] = {
@@ -3469,6 +7939,10 @@ static struct pci_device_id lpfc_id_table[] = {
3469 PCI_ANY_ID, PCI_ANY_ID, }, 7939 PCI_ANY_ID, PCI_ANY_ID, },
3470 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 7940 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
3471 PCI_ANY_ID, PCI_ANY_ID, }, 7941 PCI_ANY_ID, PCI_ANY_ID, },
7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7943 PCI_ANY_ID, PCI_ANY_ID, },
7944 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
7945 PCI_ANY_ID, PCI_ANY_ID, },
3472 { 0 } 7946 { 0 }
3473}; 7947};
3474 7948
@@ -3486,7 +7960,7 @@ static struct pci_driver lpfc_driver = {
3486 .probe = lpfc_pci_probe_one, 7960 .probe = lpfc_pci_probe_one,
3487 .remove = __devexit_p(lpfc_pci_remove_one), 7961 .remove = __devexit_p(lpfc_pci_remove_one),
3488 .suspend = lpfc_pci_suspend_one, 7962 .suspend = lpfc_pci_suspend_one,
3489 .resume = lpfc_pci_resume_one, 7963 .resume = lpfc_pci_resume_one,
3490 .err_handler = &lpfc_err_handler, 7964 .err_handler = &lpfc_err_handler,
3491}; 7965};
3492 7966
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 1aa85709b01..954ba57970a 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,33 +18,39 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LOG_ELS 0x1 /* ELS events */ 21#define LOG_ELS 0x00000001 /* ELS events */
22#define LOG_DISCOVERY 0x2 /* Link discovery events */ 22#define LOG_DISCOVERY 0x00000002 /* Link discovery events */
23#define LOG_MBOX 0x4 /* Mailbox events */ 23#define LOG_MBOX 0x00000004 /* Mailbox events */
24#define LOG_INIT 0x8 /* Initialization events */ 24#define LOG_INIT 0x00000008 /* Initialization events */
25#define LOG_LINK_EVENT 0x10 /* Link events */ 25#define LOG_LINK_EVENT 0x00000010 /* Link events */
26#define LOG_IP 0x20 /* IP traffic history */ 26#define LOG_IP 0x00000020 /* IP traffic history */
27#define LOG_FCP 0x40 /* FCP traffic history */ 27#define LOG_FCP 0x00000040 /* FCP traffic history */
28#define LOG_NODE 0x80 /* Node table events */ 28#define LOG_NODE 0x00000080 /* Node table events */
29#define LOG_TEMP 0x100 /* Temperature sensor events */ 29#define LOG_TEMP 0x00000100 /* Temperature sensor events */
30#define LOG_BG 0x200 /* BlockGuard events */ 30#define LOG_BG 0x00000200 /* BlockGuard events */
31#define LOG_MISC 0x400 /* Miscellaneous events */ 31#define LOG_MISC 0x00000400 /* Miscellaneous events */
32#define LOG_SLI 0x800 /* SLI events */ 32#define LOG_SLI 0x00000800 /* SLI events */
33#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 33#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
34#define LOG_LIBDFC 0x2000 /* Libdfc events */ 34#define LOG_LIBDFC 0x00002000 /* Libdfc events */
35#define LOG_VPORT 0x4000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOG_ALL_MSG 0xffff /* LOG all messages */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
37 39
38#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
39 do { \ 41do { \
40 { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ 42 { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
41 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 43 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
42 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ 44 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
43 } while (0) 45} while (0)
44 46
45#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 47#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
46 do { \ 48do { \
47 { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ 49 { uint32_t log_verbose = (phba)->pport ? \
50 (phba)->pport->cfg_log_verbose : \
51 (phba)->cfg_log_verbose; \
52 if (((mask) & log_verbose) || (level[1] <= '3')) \
48 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ 53 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
49 fmt, phba->brd_no, ##arg); } \ 54 fmt, phba->brd_no, ##arg); \
50 } while (0) 55 } \
56} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 134fc7fc212..b9b451c0901 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -39,6 +41,44 @@
39#include "lpfc_compat.h" 41#include "lpfc_compat.h"
40 42
41/** 43/**
44 * lpfc_dump_static_vport - Dump HBA's static vport information.
45 * @phba: pointer to lpfc hba data structure.
46 * @pmb: pointer to the driver internal queue element for mailbox command.
47 * @offset: offset for dumping vport info.
48 *
49 * The dump mailbox command provides a method for the device driver to obtain
50 * various types of information from the HBA device.
51 *
52 * This routine prepares the mailbox command for dumping list of static
53 * vports to be created.
54 **/
55void
56lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
57 uint16_t offset)
58{
59 MAILBOX_t *mb;
60 void *ctx;
61
62 mb = &pmb->u.mb;
63 ctx = pmb->context2;
64
65 /* Setup to dump vport info region */
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.cv = 1;
69 mb->un.varDmp.type = DMP_NV_PARAMS;
70 mb->un.varDmp.entry_index = offset;
71 mb->un.varDmp.region_id = DMP_REGION_VPORT;
72 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
73 mb->un.varDmp.co = 0;
74 mb->un.varDmp.resp_offset = 0;
75 pmb->context2 = ctx;
76 mb->mbxOwner = OWN_HOST;
77
78 return;
79}
80
81/**
42 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory 82 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
43 * @phba: pointer to lpfc hba data structure. 83 * @phba: pointer to lpfc hba data structure.
44 * @pmb: pointer to the driver internal queue element for mailbox command. 84 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
58 MAILBOX_t *mb; 98 MAILBOX_t *mb;
59 void *ctx; 99 void *ctx;
60 100
61 mb = &pmb->mb; 101 mb = &pmb->u.mb;
62 ctx = pmb->context2; 102 ctx = pmb->context2;
63 103
64 /* Setup to dump VPD region */ 104 /* Setup to dump VPD region */
@@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
90 MAILBOX_t *mb; 130 MAILBOX_t *mb;
91 void *ctx; 131 void *ctx;
92 132
93 mb = &pmb->mb; 133 mb = &pmb->u.mb;
94 /* Save context so that we can restore after memset */ 134 /* Save context so that we can restore after memset */
95 ctx = pmb->context2; 135 ctx = pmb->context2;
96 136
@@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
125{ 165{
126 MAILBOX_t *mb; 166 MAILBOX_t *mb;
127 167
128 mb = &pmb->mb; 168 mb = &pmb->u.mb;
129 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 169 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
130 mb->mbxCommand = MBX_READ_NV; 170 mb->mbxCommand = MBX_READ_NV;
131 mb->mbxOwner = OWN_HOST; 171 mb->mbxOwner = OWN_HOST;
@@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
151{ 191{
152 MAILBOX_t *mb; 192 MAILBOX_t *mb;
153 193
154 mb = &pmb->mb; 194 mb = &pmb->u.mb;
155 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 195 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
156 mb->mbxCommand = MBX_ASYNCEVT_ENABLE; 196 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
157 mb->un.varCfgAsyncEvent.ring = ring; 197 mb->un.varCfgAsyncEvent.ring = ring;
@@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
177{ 217{
178 MAILBOX_t *mb; 218 MAILBOX_t *mb;
179 219
180 mb = &pmb->mb; 220 mb = &pmb->u.mb;
181 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 221 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
182 mb->mbxCommand = MBX_HEARTBEAT; 222 mb->mbxCommand = MBX_HEARTBEAT;
183 mb->mbxOwner = OWN_HOST; 223 mb->mbxOwner = OWN_HOST;
@@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
211 struct lpfc_sli *psli; 251 struct lpfc_sli *psli;
212 252
213 psli = &phba->sli; 253 psli = &phba->sli;
214 mb = &pmb->mb; 254 mb = &pmb->u.mb;
215 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 255 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
216 256
217 INIT_LIST_HEAD(&mp->list); 257 INIT_LIST_HEAD(&mp->list);
@@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
248{ 288{
249 MAILBOX_t *mb; 289 MAILBOX_t *mb;
250 290
251 mb = &pmb->mb; 291 mb = &pmb->u.mb;
252 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 292 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
253 293
254 mb->un.varClearLA.eventTag = phba->fc_eventTag; 294 mb->un.varClearLA.eventTag = phba->fc_eventTag;
@@ -275,7 +315,7 @@ void
275lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 315lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
276{ 316{
277 struct lpfc_vport *vport = phba->pport; 317 struct lpfc_vport *vport = phba->pport;
278 MAILBOX_t *mb = &pmb->mb; 318 MAILBOX_t *mb = &pmb->u.mb;
279 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 319 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
280 320
281 /* NEW_FEATURE 321 /* NEW_FEATURE
@@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
321int 361int
322lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 362lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
323{ 363{
324 MAILBOX_t *mb = &pmb->mb; 364 MAILBOX_t *mb = &pmb->u.mb;
325 uint32_t attentionConditions[2]; 365 uint32_t attentionConditions[2];
326 366
327 /* Sanity check */ 367 /* Sanity check */
@@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba,
405 struct lpfc_sli *psli; 445 struct lpfc_sli *psli;
406 MAILBOX_t *mb; 446 MAILBOX_t *mb;
407 447
408 mb = &pmb->mb; 448 mb = &pmb->u.mb;
409 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 449 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
410 450
411 psli = &phba->sli; 451 psli = &phba->sli;
@@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
492 struct lpfc_sli *psli; 532 struct lpfc_sli *psli;
493 533
494 psli = &phba->sli; 534 psli = &phba->sli;
495 mb = &pmb->mb; 535 mb = &pmb->u.mb;
496 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 536 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
497 537
498 mb->mbxOwner = OWN_HOST; 538 mb->mbxOwner = OWN_HOST;
@@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
515 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 555 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
516 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 556 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
517 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 557 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
518 mb->un.varRdSparm.vpi = vpi; 558 mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
519 559
520 /* save address for completion */ 560 /* save address for completion */
521 pmb->context1 = mp; 561 pmb->context1 = mp;
@@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
544{ 584{
545 MAILBOX_t *mb; 585 MAILBOX_t *mb;
546 586
547 mb = &pmb->mb; 587 mb = &pmb->u.mb;
548 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 588 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
549 589
550 mb->un.varUnregDID.did = did; 590 mb->un.varUnregDID.did = did;
591 if (vpi != 0xffff)
592 vpi += phba->vpi_base;
551 mb->un.varUnregDID.vpi = vpi; 593 mb->un.varUnregDID.vpi = vpi;
552 594
553 mb->mbxCommand = MBX_UNREG_D_ID; 595 mb->mbxCommand = MBX_UNREG_D_ID;
@@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
573{ 615{
574 MAILBOX_t *mb; 616 MAILBOX_t *mb;
575 617
576 mb = &pmb->mb; 618 mb = &pmb->u.mb;
577 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 619 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
578 620
579 mb->mbxCommand = MBX_READ_CONFIG; 621 mb->mbxCommand = MBX_READ_CONFIG;
@@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
598{ 640{
599 MAILBOX_t *mb; 641 MAILBOX_t *mb;
600 642
601 mb = &pmb->mb; 643 mb = &pmb->u.mb;
602 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
603 645
604 mb->mbxCommand = MBX_READ_LNK_STAT; 646 mb->mbxCommand = MBX_READ_LNK_STAT;
@@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
607} 649}
608 650
609/** 651/**
610 * lpfc_reg_login - Prepare a mailbox command for registering remote login 652 * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
611 * @phba: pointer to lpfc hba data structure. 653 * @phba: pointer to lpfc hba data structure.
612 * @vpi: virtual N_Port identifier. 654 * @vpi: virtual N_Port identifier.
613 * @did: remote port identifier. 655 * @did: remote port identifier.
@@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
631 * 1 - DMA memory allocation failed 673 * 1 - DMA memory allocation failed
632 **/ 674 **/
633int 675int
634lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, 676lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
635 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) 677 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
636{ 678{
637 MAILBOX_t *mb = &pmb->mb; 679 MAILBOX_t *mb = &pmb->u.mb;
638 uint8_t *sparam; 680 uint8_t *sparam;
639 struct lpfc_dmabuf *mp; 681 struct lpfc_dmabuf *mp;
640 682
641 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 683 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
642 684
643 mb->un.varRegLogin.rpi = 0; 685 mb->un.varRegLogin.rpi = 0;
644 mb->un.varRegLogin.vpi = vpi; 686 if (phba->sli_rev == LPFC_SLI_REV4) {
687 mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
688 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
689 return 1;
690 }
691
692 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
645 mb->un.varRegLogin.did = did; 693 mb->un.varRegLogin.did = did;
646 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ 694 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
647 695
@@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
697{ 745{
698 MAILBOX_t *mb; 746 MAILBOX_t *mb;
699 747
700 mb = &pmb->mb; 748 mb = &pmb->u.mb;
701 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 749 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
702 750
703 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 751 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
704 mb->un.varUnregLogin.rsvd1 = 0; 752 mb->un.varUnregLogin.rsvd1 = 0;
705 mb->un.varUnregLogin.vpi = vpi; 753 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
706 754
707 mb->mbxCommand = MBX_UNREG_LOGIN; 755 mb->mbxCommand = MBX_UNREG_LOGIN;
708 mb->mbxOwner = OWN_HOST; 756 mb->mbxOwner = OWN_HOST;
757
709 return; 758 return;
710} 759}
711 760
@@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
725 * This routine prepares the mailbox command for registering a virtual N_Port. 774 * This routine prepares the mailbox command for registering a virtual N_Port.
726 **/ 775 **/
727void 776void
728lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, 777lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
729 LPFC_MBOXQ_t *pmb)
730{ 778{
731 MAILBOX_t *mb = &pmb->mb; 779 MAILBOX_t *mb = &pmb->u.mb;
732 780
733 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 781 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
734 782
735 mb->un.varRegVpi.vpi = vpi; 783 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
736 mb->un.varRegVpi.sid = sid; 784 mb->un.varRegVpi.sid = vport->fc_myDID;
785 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
737 786
738 mb->mbxCommand = MBX_REG_VPI; 787 mb->mbxCommand = MBX_REG_VPI;
739 mb->mbxOwner = OWN_HOST; 788 mb->mbxOwner = OWN_HOST;
@@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
760void 809void
761lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) 810lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
762{ 811{
763 MAILBOX_t *mb = &pmb->mb; 812 MAILBOX_t *mb = &pmb->u.mb;
764 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 813 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
765 814
766 mb->un.varUnregVpi.vpi = vpi; 815 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
767 816
768 mb->mbxCommand = MBX_UNREG_VPI; 817 mb->mbxCommand = MBX_UNREG_VPI;
769 mb->mbxOwner = OWN_HOST; 818 mb->mbxOwner = OWN_HOST;
@@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
852void 901void
853lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 902lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
854{ 903{
855 MAILBOX_t *mb = &pmb->mb; 904 MAILBOX_t *mb = &pmb->u.mb;
856 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 905 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
857 mb->un.varRdRev.cv = 1; 906 mb->un.varRdRev.cv = 1;
858 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ 907 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
945 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) 994 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
946{ 995{
947 int i; 996 int i;
948 MAILBOX_t *mb = &pmb->mb; 997 MAILBOX_t *mb = &pmb->u.mb;
949 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; 998 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
950 999
951 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 1000 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -1020,7 +1069,7 @@ void
1020lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 1069lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1021{ 1070{
1022 int i; 1071 int i;
1023 MAILBOX_t *mb = &pmb->mb; 1072 MAILBOX_t *mb = &pmb->u.mb;
1024 struct lpfc_sli *psli; 1073 struct lpfc_sli *psli;
1025 struct lpfc_sli_ring *pring; 1074 struct lpfc_sli_ring *pring;
1026 1075
@@ -1075,7 +1124,7 @@ void
1075lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1124lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1076{ 1125{
1077 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; 1126 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1078 MAILBOX_t *mb = &pmb->mb; 1127 MAILBOX_t *mb = &pmb->u.mb;
1079 dma_addr_t pdma_addr; 1128 dma_addr_t pdma_addr;
1080 uint32_t bar_low, bar_high; 1129 uint32_t bar_low, bar_high;
1081 size_t offset; 1130 size_t offset;
@@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1099 1148
1100 /* If HBA supports SLI=3 ask for it */ 1149 /* If HBA supports SLI=3 ask for it */
1101 1150
1102 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { 1151 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1103 if (phba->cfg_enable_bg) 1152 if (phba->cfg_enable_bg)
1104 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ 1153 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
1154 mb->un.varCfgPort.cdss = 1; /* Configure Security */
1105 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1155 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1106 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1156 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1107 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ 1157 mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
1108 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); 1158 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1109 if (phba->max_vpi && phba->cfg_enable_npiv && 1159 if (phba->max_vpi && phba->cfg_enable_npiv &&
1110 phba->vpd.sli3Feat.cmv) { 1160 phba->vpd.sli3Feat.cmv) {
1111 mb->un.varCfgPort.max_vpi = phba->max_vpi; 1161 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1112 mb->un.varCfgPort.cmv = 1; 1162 mb->un.varCfgPort.cmv = 1;
1113 } else 1163 } else
1114 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; 1164 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1115 } else 1165 } else
1116 phba->sli_rev = 2; 1166 phba->sli_rev = LPFC_SLI_REV2;
1117 mb->un.varCfgPort.sli_mode = phba->sli_rev; 1167 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1118 1168
1119 /* Now setup pcb */ 1169 /* Now setup pcb */
@@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1245void 1295void
1246lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1296lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1247{ 1297{
1248 MAILBOX_t *mb = &pmb->mb; 1298 MAILBOX_t *mb = &pmb->u.mb;
1249 1299
1250 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 1300 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1251 mb->mbxCommand = MBX_KILL_BOARD; 1301 mb->mbxCommand = MBX_KILL_BOARD;
@@ -1305,29 +1355,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
1305} 1355}
1306 1356
1307/** 1357/**
1358 * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
1359 * @phba: pointer to lpfc hba data structure.
1360 * @mbq: pointer to the driver internal queue element for mailbox command.
1361 *
1362 * This routine put the completed mailbox command into the mailbox command
1363 * complete list. This is the unlocked version of the routine. The mailbox
1364 * complete list is used by the driver worker thread to process mailbox
1365 * complete callback functions outside the driver interrupt handler.
1366 **/
1367void
1368__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1369{
1370 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1371}
1372
1373/**
1308 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list 1374 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
1309 * @phba: pointer to lpfc hba data structure. 1375 * @phba: pointer to lpfc hba data structure.
1310 * @mbq: pointer to the driver internal queue element for mailbox command. 1376 * @mbq: pointer to the driver internal queue element for mailbox command.
1311 * 1377 *
1312 * This routine put the completed mailbox command into the mailbox command 1378 * This routine put the completed mailbox command into the mailbox command
1313 * complete list. This routine is called from driver interrupt handler 1379 * complete list. This is the locked version of the routine. The mailbox
1314 * context.The mailbox complete list is used by the driver worker thread 1380 * complete list is used by the driver worker thread to process mailbox
1315 * to process mailbox complete callback functions outside the driver interrupt 1381 * complete callback functions outside the driver interrupt handler.
1316 * handler.
1317 **/ 1382 **/
1318void 1383void
1319lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1384lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1320{ 1385{
1321 unsigned long iflag; 1386 unsigned long iflag;
1322 1387
1323 /* This function expects to be called from interrupt context */ 1388 /* This function expects to be called from interrupt context */
1324 spin_lock_irqsave(&phba->hbalock, iflag); 1389 spin_lock_irqsave(&phba->hbalock, iflag);
1325 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); 1390 __lpfc_mbox_cmpl_put(phba, mbq);
1326 spin_unlock_irqrestore(&phba->hbalock, iflag); 1391 spin_unlock_irqrestore(&phba->hbalock, iflag);
1327 return; 1392 return;
1328} 1393}
1329 1394
1330/** 1395/**
1396 * lpfc_mbox_cmd_check - Check the validality of a mailbox command
1397 * @phba: pointer to lpfc hba data structure.
1398 * @mboxq: pointer to the driver internal queue element for mailbox command.
1399 *
1400 * This routine is to check whether a mailbox command is valid to be issued.
1401 * This check will be performed by both the mailbox issue API when a client
1402 * is to issue a mailbox command to the mailbox transport.
1403 *
1404 * Return 0 - pass the check, -ENODEV - fail the check
1405 **/
1406int
1407lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1408{
1409 /* Mailbox command that have a completion handler must also have a
1410 * vport specified.
1411 */
1412 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1413 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1414 if (!mboxq->vport) {
1415 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1416 "1814 Mbox x%x failed, no vport\n",
1417 mboxq->u.mb.mbxCommand);
1418 dump_stack();
1419 return -ENODEV;
1420 }
1421 }
1422 return 0;
1423}
1424
1425/**
1426 * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
1427 * @phba: pointer to lpfc hba data structure.
1428 *
1429 * This routine is to check whether the HBA device is ready for posting a
1430 * mailbox command. It is used by the mailbox transport API at the time the
1431 * to post a mailbox command to the device.
1432 *
1433 * Return 0 - pass the check, -ENODEV - fail the check
1434 **/
1435int
1436lpfc_mbox_dev_check(struct lpfc_hba *phba)
1437{
1438 /* If the PCI channel is in offline state, do not issue mbox */
1439 if (unlikely(pci_channel_offline(phba->pcidev)))
1440 return -ENODEV;
1441
1442 /* If the HBA is in error state, do not issue mbox */
1443 if (phba->link_state == LPFC_HBA_ERROR)
1444 return -ENODEV;
1445
1446 return 0;
1447}
1448
1449/**
1331 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value 1450 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
1332 * @phba: pointer to lpfc hba data structure. 1451 * @phba: pointer to lpfc hba data structure.
1333 * @cmd: mailbox command code. 1452 * @cmd: mailbox command code.
@@ -1350,6 +1469,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
1350 case MBX_WRITE_WWN: /* 0x98 */ 1469 case MBX_WRITE_WWN: /* 0x98 */
1351 case MBX_LOAD_EXP_ROM: /* 0x9C */ 1470 case MBX_LOAD_EXP_ROM: /* 0x9C */
1352 return LPFC_MBOX_TMO_FLASH_CMD; 1471 return LPFC_MBOX_TMO_FLASH_CMD;
1472 case MBX_SLI4_CONFIG: /* 0x9b */
1473 return LPFC_MBOX_SLI4_CONFIG_TMO;
1353 } 1474 }
1354 return LPFC_MBOX_TMO; 1475 return LPFC_MBOX_TMO;
1355} 1476}
1477
1478/**
1479 * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
1480 * @mbox: pointer to lpfc mbox command.
1481 * @sgentry: sge entry index.
1482 * @phyaddr: physical address for the sge
1483 * @length: Length of the sge.
1484 *
1485 * This routine sets up an entry in the non-embedded mailbox command at the sge
1486 * index location.
1487 **/
1488void
1489lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1490 dma_addr_t phyaddr, uint32_t length)
1491{
1492 struct lpfc_mbx_nembed_cmd *nembed_sge;
1493
1494 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1495 &mbox->u.mqe.un.nembed_cmd;
1496 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1497 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1498 nembed_sge->sge[sgentry].length = length;
1499}
1500
1501/**
1502 * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
1503 * @mbox: pointer to lpfc mbox command.
1504 * @sgentry: sge entry index.
1505 *
1506 * This routine gets an entry from the non-embedded mailbox command at the sge
1507 * index location.
1508 **/
1509void
1510lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1511 struct lpfc_mbx_sge *sge)
1512{
1513 struct lpfc_mbx_nembed_cmd *nembed_sge;
1514
1515 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1516 &mbox->u.mqe.un.nembed_cmd;
1517 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1518 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1519 sge->length = nembed_sge->sge[sgentry].length;
1520}
1521
1522/**
1523 * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
1524 * @phba: pointer to lpfc hba data structure.
1525 * @mbox: pointer to lpfc mbox command.
1526 *
1527 * This routine frees SLI4 specific mailbox command for sending IOCTL command.
1528 **/
1529void
1530lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1531{
1532 struct lpfc_mbx_sli4_config *sli4_cfg;
1533 struct lpfc_mbx_sge sge;
1534 dma_addr_t phyaddr;
1535 uint32_t sgecount, sgentry;
1536
1537 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1538
1539 /* For embedded mbox command, just free the mbox command */
1540 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1541 mempool_free(mbox, phba->mbox_mem_pool);
1542 return;
1543 }
1544
1545 /* For non-embedded mbox command, we need to free the pages first */
1546 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1547 /* There is nothing we can do if there is no sge address array */
1548 if (unlikely(!mbox->sge_array)) {
1549 mempool_free(mbox, phba->mbox_mem_pool);
1550 return;
1551 }
1552 /* Each non-embedded DMA memory was allocated in the length of a page */
1553 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1554 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1555 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1556 dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
1557 mbox->sge_array->addr[sgentry], phyaddr);
1558 }
1559 /* Free the sge address array memory */
1560 kfree(mbox->sge_array);
1561 /* Finally, free the mailbox command itself */
1562 mempool_free(mbox, phba->mbox_mem_pool);
1563}
1564
1565/**
1566 * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
1567 * @phba: pointer to lpfc hba data structure.
1568 * @mbox: pointer to lpfc mbox command.
1569 * @subsystem: The sli4 config sub mailbox subsystem.
1570 * @opcode: The sli4 config sub mailbox command opcode.
1571 * @length: Length of the sli4 config mailbox command.
1572 *
1573 * This routine sets up the header fields of SLI4 specific mailbox command
1574 * for sending IOCTL command.
1575 *
1576 * Return: the actual length of the mbox command allocated (mostly useful
1577 * for none embedded mailbox command).
1578 **/
1579int
1580lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1581 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1582{
1583 struct lpfc_mbx_sli4_config *sli4_config;
1584 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1585 uint32_t alloc_len;
1586 uint32_t resid_len;
1587 uint32_t pagen, pcount;
1588 void *viraddr;
1589 dma_addr_t phyaddr;
1590
1591 /* Set up SLI4 mailbox command header fields */
1592 memset(mbox, 0, sizeof(*mbox));
1593 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1594
1595 /* Set up SLI4 ioctl command header fields */
1596 sli4_config = &mbox->u.mqe.un.sli4_config;
1597
1598 /* Setup for the embedded mbox command */
1599 if (emb) {
1600 /* Set up main header fields */
1601 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1602 sli4_config->header.cfg_mhdr.payload_length =
1603 LPFC_MBX_CMD_HDR_LENGTH + length;
1604 /* Set up sub-header fields following main header */
1605 bf_set(lpfc_mbox_hdr_opcode,
1606 &sli4_config->header.cfg_shdr.request, opcode);
1607 bf_set(lpfc_mbox_hdr_subsystem,
1608 &sli4_config->header.cfg_shdr.request, subsystem);
1609 sli4_config->header.cfg_shdr.request.request_length = length;
1610 return length;
1611 }
1612
1613 /* Setup for the none-embedded mbox command */
1614 pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
1615 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1616 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1617 /* Allocate record for keeping SGE virtual addresses */
1618 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1619 GFP_KERNEL);
1620 if (!mbox->sge_array)
1621 return 0;
1622
1623 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1624 /* The DMA memory is always allocated in the length of a
1625 * page even though the last SGE might not fill up to a
1626 * page, this is used as a priori size of PAGE_SIZE for
1627 * the later DMA memory free.
1628 */
1629 viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
1630 &phyaddr, GFP_KERNEL);
1631 /* In case of malloc fails, proceed with whatever we have */
1632 if (!viraddr)
1633 break;
1634 mbox->sge_array->addr[pagen] = viraddr;
1635 /* Keep the first page for later sub-header construction */
1636 if (pagen == 0)
1637 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1638 resid_len = length - alloc_len;
1639 if (resid_len > PAGE_SIZE) {
1640 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1641 PAGE_SIZE);
1642 alloc_len += PAGE_SIZE;
1643 } else {
1644 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1645 resid_len);
1646 alloc_len = length;
1647 }
1648 }
1649
1650 /* Set up main header fields in mailbox command */
1651 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1652 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1653
1654 /* Set up sub-header fields into the first page */
1655 if (pagen > 0) {
1656 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1657 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1658 cfg_shdr->request.request_length =
1659 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1660 }
1661 /* The sub-header is in DMA memory, which needs endian converstion */
1662 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1663 sizeof(union lpfc_sli4_cfg_shdr));
1664
1665 return alloc_len;
1666}
1667
1668/**
1669 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
1670 * @phba: pointer to lpfc hba data structure.
1671 * @mbox: pointer to lpfc mbox command.
1672 *
1673 * This routine gets the opcode from a SLI4 specific mailbox command for
1674 * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
1675 * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
1676 * returned.
1677 **/
1678uint8_t
1679lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1680{
1681 struct lpfc_mbx_sli4_config *sli4_cfg;
1682 union lpfc_sli4_cfg_shdr *cfg_shdr;
1683
1684 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1685 return 0;
1686 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1687
1688 /* For embedded mbox command, get opcode from embedded sub-header*/
1689 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1690 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1691 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1692 }
1693
1694 /* For non-embedded mbox command, get opcode from first dma page */
1695 if (unlikely(!mbox->sge_array))
1696 return 0;
1697 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1698 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1699}
1700
1701/**
1702 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
1703 * @mboxq: pointer to lpfc mbox command.
1704 *
1705 * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
1706 * mailbox command.
1707 **/
1708void
1709lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1710{
1711 /* Set up SLI4 mailbox command header fields */
1712 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
1713 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
1714
1715 /* Set up host requested features. */
1716 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1717
1718 /* Virtual fabrics and FIPs are not supported yet. */
1719 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1720
1721 /* Enable DIF (block guard) only if configured to do so. */
1722 if (phba->cfg_enable_bg)
1723 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
1724
1725 /* Enable NPIV only if configured to do so. */
1726 if (phba->max_vpi && phba->cfg_enable_npiv)
1727 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
1728
1729 return;
1730}
1731
1732/**
1733 * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
1734 * @mbox: pointer to lpfc mbox command to initialize.
1735 * @vport: Vport associated with the VF.
1736 *
1737 * This routine initializes @mbox to all zeros and then fills in the mailbox
1738 * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
1739 * in the context of an FCF. The driver issues this command to setup a VFI
1740 * before issuing a FLOGI to login to the VSAN. The driver should also issue a
1741 * REG_VFI after a successful VSAN login.
1742 **/
1743void
1744lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1745{
1746 struct lpfc_mbx_init_vfi *init_vfi;
1747
1748 memset(mbox, 0, sizeof(*mbox));
1749 init_vfi = &mbox->u.mqe.un.init_vfi;
1750 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
1751 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1752 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1753 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
1754 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
1755}
1756
1757/**
1758 * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
1759 * @mbox: pointer to lpfc mbox command to initialize.
1760 * @vport: vport associated with the VF.
1761 * @phys: BDE DMA bus address used to send the service parameters to the HBA.
1762 *
1763 * This routine initializes @mbox to all zeros and then fills in the mailbox
1764 * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
1765 * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
1766 * fabrics identified by VFI in the context of an FCF.
1767 **/
1768void
1769lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1770{
1771 struct lpfc_mbx_reg_vfi *reg_vfi;
1772
1773 memset(mbox, 0, sizeof(*mbox));
1774 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1775 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1776 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1777 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
1778 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1779 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
1780 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1781 reg_vfi->bde.addrLow = putPaddrLow(phys);
1782 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
1783 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1784 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
1785}
1786
1787/**
1788 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
1789 * @mbox: pointer to lpfc mbox command to initialize.
1790 * @vpi: VPI to be initialized.
1791 *
1792 * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
1793 * command to activate a virtual N_Port. The HBA assigns a MAC address to use
1794 * with the virtual N Port. The SLI Host issues this command before issuing a
1795 * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
1796 * successful virtual NPort login.
1797 **/
1798void
1799lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
1800{
1801 memset(mbox, 0, sizeof(*mbox));
1802 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1803 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
1804}
1805
1806/**
1807 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
1808 * @mbox: pointer to lpfc mbox command to initialize.
1809 * @vfi: VFI to be unregistered.
1810 *
1811 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
1812 * (logical NPort) into the inactive state. The SLI Host must have logged out
1813 * and unregistered all remote N_Ports to abort any activity on the virtual
1814 * fabric. The SLI Port posts the mailbox response after marking the virtual
1815 * fabric inactive.
1816 **/
1817void
1818lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
1819{
1820 memset(mbox, 0, sizeof(*mbox));
1821 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
1822 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
1823}
1824
1825/**
1826 * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
1827 * @phba: pointer to the hba structure containing.
1828 * @mbox: pointer to lpfc mbox command to initialize.
1829 *
1830 * This function create a SLI4 dump mailbox command to dump FCoE
1831 * parameters stored in region 23.
1832 **/
1833int
1834lpfc_dump_fcoe_param(struct lpfc_hba *phba,
1835 struct lpfcMboxq *mbox)
1836{
1837 struct lpfc_dmabuf *mp = NULL;
1838 MAILBOX_t *mb;
1839
1840 memset(mbox, 0, sizeof(*mbox));
1841 mb = &mbox->u.mb;
1842
1843 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1844 if (mp)
1845 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1846
1847 if (!mp || !mp->virt) {
1848 kfree(mp);
1849 /* dump_fcoe_param failed to allocate memory */
1850 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
1851 "2569 lpfc_dump_fcoe_param: memory"
1852 " allocation failed \n");
1853 return 1;
1854 }
1855
1856 memset(mp->virt, 0, LPFC_BPL_SIZE);
1857 INIT_LIST_HEAD(&mp->list);
1858
1859 /* save address for completion */
1860 mbox->context1 = (uint8_t *) mp;
1861
1862 mb->mbxCommand = MBX_DUMP_MEMORY;
1863 mb->un.varDmp.type = DMP_NV_PARAMS;
1864 mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
1865 mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
1866 mb->un.varWords[3] = putPaddrLow(mp->phys);
1867 mb->un.varWords[4] = putPaddrHigh(mp->phys);
1868 return 0;
1869}
1870
1871/**
1872 * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
1873 * @phba: pointer to the hba structure containing the FCF index and RQ ID.
1874 * @mbox: pointer to lpfc mbox command to initialize.
1875 *
1876 * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
1877 * SLI Host uses the command to activate an FCF after it has acquired FCF
1878 * information via a READ_FCF mailbox command. This mailbox command also is used
1879 * to indicate where received unsolicited frames from this FCF will be sent. By
1880 * default this routine will set up the FCF to forward all unsolicited frames
1881 * the the RQ ID passed in the @phba. This can be overridden by the caller for
1882 * more complicated setups.
1883 **/
1884void
1885lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1886{
1887 struct lpfc_mbx_reg_fcfi *reg_fcfi;
1888
1889 memset(mbox, 0, sizeof(*mbox));
1890 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
1891 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
1892 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
1893 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
1894 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
1895 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
1896 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
1897 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
1898 bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
1899 (~phba->fcf.addr_mode) & 0x3);
1900 if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
1901 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
1902 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
1903 }
1904}
1905
1906/**
1907 * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
1908 * @mbox: pointer to lpfc mbox command to initialize.
1909 * @fcfi: FCFI to be unregistered.
1910 *
1911 * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
1912 * The SLI Host uses the command to inactivate an FCFI.
1913 **/
1914void
1915lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
1916{
1917 memset(mbox, 0, sizeof(*mbox));
1918 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
1919 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
1920}
1921
1922/**
1923 * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
1924 * @mbox: pointer to lpfc mbox command to initialize.
1925 * @ndlp: The nodelist structure that describes the RPI to resume.
1926 *
1927 * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
1928 * link event.
1929 **/
1930void
1931lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1932{
1933 struct lpfc_mbx_resume_rpi *resume_rpi;
1934
1935 memset(mbox, 0, sizeof(*mbox));
1936 resume_rpi = &mbox->u.mqe.un.resume_rpi;
1937 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
1938 bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
1939 bf_set(lpfc_resume_rpi_vpi, resume_rpi,
1940 ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
1941 bf_set(lpfc_resume_rpi_vfi, resume_rpi,
1942 ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
1943}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 35a97673339..e198c917c13 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -45,7 +47,7 @@
45 * @phba: HBA to allocate pools for 47 * @phba: HBA to allocate pools for
46 * 48 *
47 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, 49 * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
48 * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools 50 * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
49 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 51 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
50 * 52 *
51 * Notes: Not interrupt-safe. Must be called with no locks held. If any 53 * Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -56,19 +58,30 @@
56 * -ENOMEM on failure (if any memory allocations fail) 58 * -ENOMEM on failure (if any memory allocations fail)
57 **/ 59 **/
58int 60int
59lpfc_mem_alloc(struct lpfc_hba * phba) 61lpfc_mem_alloc(struct lpfc_hba *phba, int align)
60{ 62{
61 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 63 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
62 int longs; 64 int longs;
63 int i; 65 int i;
64 66
65 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", 67 if (phba->sli_rev == LPFC_SLI_REV4)
66 phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); 68 phba->lpfc_scsi_dma_buf_pool =
69 pci_pool_create("lpfc_scsi_dma_buf_pool",
70 phba->pcidev,
71 phba->cfg_sg_dma_buf_size,
72 phba->cfg_sg_dma_buf_size,
73 0);
74 else
75 phba->lpfc_scsi_dma_buf_pool =
76 pci_pool_create("lpfc_scsi_dma_buf_pool",
77 phba->pcidev, phba->cfg_sg_dma_buf_size,
78 align, 0);
67 if (!phba->lpfc_scsi_dma_buf_pool) 79 if (!phba->lpfc_scsi_dma_buf_pool)
68 goto fail; 80 goto fail;
69 81
70 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, 82 phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
71 LPFC_BPL_SIZE, 8,0); 83 LPFC_BPL_SIZE,
84 align, 0);
72 if (!phba->lpfc_mbuf_pool) 85 if (!phba->lpfc_mbuf_pool)
73 goto fail_free_dma_buf_pool; 86 goto fail_free_dma_buf_pool;
74 87
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
97 sizeof(struct lpfc_nodelist)); 110 sizeof(struct lpfc_nodelist));
98 if (!phba->nlp_mem_pool) 111 if (!phba->nlp_mem_pool)
99 goto fail_free_mbox_pool; 112 goto fail_free_mbox_pool;
100 113 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
101 phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, 114 phba->pcidev,
102 LPFC_BPL_SIZE, 8, 0); 115 LPFC_HDR_BUF_SIZE, align, 0);
103 if (!phba->lpfc_hbq_pool) 116 if (!phba->lpfc_hrb_pool)
104 goto fail_free_nlp_mem_pool; 117 goto fail_free_nlp_mem_pool;
118 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
119 phba->pcidev,
120 LPFC_DATA_BUF_SIZE, align, 0);
121 if (!phba->lpfc_drb_pool)
122 goto fail_free_hbq_pool;
105 123
106 /* vpi zero is reserved for the physical port so add 1 to max */ 124 /* vpi zero is reserved for the physical port so add 1 to max */
107 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; 125 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
108 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); 126 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
109 if (!phba->vpi_bmask) 127 if (!phba->vpi_bmask)
110 goto fail_free_hbq_pool; 128 goto fail_free_dbq_pool;
111 129
112 return 0; 130 return 0;
113 131
132 fail_free_dbq_pool:
133 pci_pool_destroy(phba->lpfc_drb_pool);
134 phba->lpfc_drb_pool = NULL;
114 fail_free_hbq_pool: 135 fail_free_hbq_pool:
115 lpfc_sli_hbqbuf_free_all(phba); 136 pci_pool_destroy(phba->lpfc_hrb_pool);
116 pci_pool_destroy(phba->lpfc_hbq_pool); 137 phba->lpfc_hrb_pool = NULL;
117 fail_free_nlp_mem_pool: 138 fail_free_nlp_mem_pool:
118 mempool_destroy(phba->nlp_mem_pool); 139 mempool_destroy(phba->nlp_mem_pool);
119 phba->nlp_mem_pool = NULL; 140 phba->nlp_mem_pool = NULL;
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
136} 157}
137 158
138/** 159/**
139 * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc 160 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
140 * @phba: HBA to free memory for 161 * @phba: HBA to free memory for
141 * 162 *
142 * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, 163 * Description: Free the memory allocated by lpfc_mem_alloc routine. This
143 * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and 164 * routine is a the counterpart of lpfc_mem_alloc.
144 * lpfc_nodelist. Also frees the VPI bitmask
145 * 165 *
146 * Returns: None 166 * Returns: None
147 **/ 167 **/
148void 168void
149lpfc_mem_free(struct lpfc_hba * phba) 169lpfc_mem_free(struct lpfc_hba *phba)
150{ 170{
151 struct lpfc_sli *psli = &phba->sli;
152 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
153 LPFC_MBOXQ_t *mbox, *next_mbox;
154 struct lpfc_dmabuf *mp;
155 int i; 171 int i;
172 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
156 173
174 /* Free VPI bitmask memory */
157 kfree(phba->vpi_bmask); 175 kfree(phba->vpi_bmask);
176
177 /* Free HBQ pools */
158 lpfc_sli_hbqbuf_free_all(phba); 178 lpfc_sli_hbqbuf_free_all(phba);
179 pci_pool_destroy(phba->lpfc_drb_pool);
180 phba->lpfc_drb_pool = NULL;
181 pci_pool_destroy(phba->lpfc_hrb_pool);
182 phba->lpfc_hrb_pool = NULL;
183
184 /* Free NLP memory pool */
185 mempool_destroy(phba->nlp_mem_pool);
186 phba->nlp_mem_pool = NULL;
187
188 /* Free mbox memory pool */
189 mempool_destroy(phba->mbox_mem_pool);
190 phba->mbox_mem_pool = NULL;
191
192 /* Free MBUF memory pool */
193 for (i = 0; i < pool->current_count; i++)
194 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
195 pool->elements[i].phys);
196 kfree(pool->elements);
197
198 pci_pool_destroy(phba->lpfc_mbuf_pool);
199 phba->lpfc_mbuf_pool = NULL;
159 200
201 /* Free DMA buffer memory pool */
202 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
203 phba->lpfc_scsi_dma_buf_pool = NULL;
204
205 return;
206}
207
208/**
209 * lpfc_mem_free_all - Frees all PCI and driver memory
210 * @phba: HBA to free memory for
211 *
212 * Description: Free memory from PCI and driver memory pools and also those
213 * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
214 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
215 * the VPI bitmask.
216 *
217 * Returns: None
218 **/
219void
220lpfc_mem_free_all(struct lpfc_hba *phba)
221{
222 struct lpfc_sli *psli = &phba->sli;
223 LPFC_MBOXQ_t *mbox, *next_mbox;
224 struct lpfc_dmabuf *mp;
225
226 /* Free memory used in mailbox queue back to mailbox memory pool */
160 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 227 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
161 mp = (struct lpfc_dmabuf *) (mbox->context1); 228 mp = (struct lpfc_dmabuf *) (mbox->context1);
162 if (mp) { 229 if (mp) {
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
166 list_del(&mbox->list); 233 list_del(&mbox->list);
167 mempool_free(mbox, phba->mbox_mem_pool); 234 mempool_free(mbox, phba->mbox_mem_pool);
168 } 235 }
236 /* Free memory used in mailbox cmpl list back to mailbox memory pool */
169 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 237 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
170 mp = (struct lpfc_dmabuf *) (mbox->context1); 238 mp = (struct lpfc_dmabuf *) (mbox->context1);
171 if (mp) { 239 if (mp) {
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
175 list_del(&mbox->list); 243 list_del(&mbox->list);
176 mempool_free(mbox, phba->mbox_mem_pool); 244 mempool_free(mbox, phba->mbox_mem_pool);
177 } 245 }
178 246 /* Free the active mailbox command back to the mailbox memory pool */
247 spin_lock_irq(&phba->hbalock);
179 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 248 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
249 spin_unlock_irq(&phba->hbalock);
180 if (psli->mbox_active) { 250 if (psli->mbox_active) {
181 mbox = psli->mbox_active; 251 mbox = psli->mbox_active;
182 mp = (struct lpfc_dmabuf *) (mbox->context1); 252 mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
188 psli->mbox_active = NULL; 258 psli->mbox_active = NULL;
189 } 259 }
190 260
191 for (i = 0; i < pool->current_count; i++) 261 /* Free and destroy all the allocated memory pools */
192 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 262 lpfc_mem_free(phba);
193 pool->elements[i].phys);
194 kfree(pool->elements);
195
196 pci_pool_destroy(phba->lpfc_hbq_pool);
197 mempool_destroy(phba->nlp_mem_pool);
198 mempool_destroy(phba->mbox_mem_pool);
199
200 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
201 pci_pool_destroy(phba->lpfc_mbuf_pool);
202
203 phba->lpfc_hbq_pool = NULL;
204 phba->nlp_mem_pool = NULL;
205 phba->mbox_mem_pool = NULL;
206 phba->lpfc_scsi_dma_buf_pool = NULL;
207 phba->lpfc_mbuf_pool = NULL;
208 263
209 /* Free the iocb lookup array */ 264 /* Free the iocb lookup array */
210 kfree(psli->iocbq_lookup); 265 kfree(psli->iocbq_lookup);
211 psli->iocbq_lookup = NULL; 266 psli->iocbq_lookup = NULL;
267
268 return;
212} 269}
213 270
214/** 271/**
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
305 * lpfc_els_hbq_alloc - Allocate an HBQ buffer 362 * lpfc_els_hbq_alloc - Allocate an HBQ buffer
306 * @phba: HBA to allocate HBQ buffer for 363 * @phba: HBA to allocate HBQ buffer for
307 * 364 *
308 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI 365 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
309 * pool along a non-DMA-mapped container for it. 366 * pool along a non-DMA-mapped container for it.
310 * 367 *
311 * Notes: Not interrupt-safe. Must be called with no locks held. 368 * Notes: Not interrupt-safe. Must be called with no locks held.
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
323 if (!hbqbp) 380 if (!hbqbp)
324 return NULL; 381 return NULL;
325 382
326 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, 383 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
327 &hbqbp->dbuf.phys); 384 &hbqbp->dbuf.phys);
328 if (!hbqbp->dbuf.virt) { 385 if (!hbqbp->dbuf.virt) {
329 kfree(hbqbp); 386 kfree(hbqbp);
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
334} 391}
335 392
336/** 393/**
337 * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc 394 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
338 * @phba: HBA buffer was allocated for 395 * @phba: HBA buffer was allocated for
339 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc 396 * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
340 * 397 *
@@ -348,12 +405,73 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
348void 405void
349lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 406lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
350{ 407{
351 pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 408 pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
352 kfree(hbqbp); 409 kfree(hbqbp);
353 return; 410 return;
354} 411}
355 412
356/** 413/**
414 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
415 * @phba: HBA to allocate a receive buffer for
416 *
417 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
418 * pool along a non-DMA-mapped container for it.
419 *
420 * Notes: Not interrupt-safe. Must be called with no locks held.
421 *
422 * Returns:
423 * pointer to HBQ on success
424 * NULL on failure
425 **/
426struct hbq_dmabuf *
427lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
428{
429 struct hbq_dmabuf *dma_buf;
430
431 dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
432 if (!dma_buf)
433 return NULL;
434
435 dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
436 &dma_buf->hbuf.phys);
437 if (!dma_buf->hbuf.virt) {
438 kfree(dma_buf);
439 return NULL;
440 }
441 dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
442 &dma_buf->dbuf.phys);
443 if (!dma_buf->dbuf.virt) {
444 pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
445 dma_buf->hbuf.phys);
446 kfree(dma_buf);
447 return NULL;
448 }
449 dma_buf->size = LPFC_BPL_SIZE;
450 return dma_buf;
451}
452
453/**
454 * lpfc_sli4_rb_free - Frees a receive buffer
455 * @phba: HBA buffer was allocated for
456 * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
457 *
458 * Description: Frees both the container and the DMA-mapped buffers returned by
459 * lpfc_sli4_rb_alloc.
460 *
461 * Notes: Can be called with or without locks held.
462 *
463 * Returns: None
464 **/
465void
466lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
467{
468 pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
469 pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
470 kfree(dmab);
471 return;
472}
473
474/**
357 * lpfc_in_buf_free - Free a DMA buffer 475 * lpfc_in_buf_free - Free a DMA buffer
358 * @phba: HBA buffer is associated with 476 * @phba: HBA buffer is associated with
359 * @mp: Buffer to free 477 * @mp: Buffer to free
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 08cdc77af41..09f659f77bb 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
1 /******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -28,8 +28,10 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_transport_fc.h> 29#include <scsi/scsi_transport_fc.h>
30 30
31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
32#include "lpfc_sli.h" 33#include "lpfc_sli.h"
34#include "lpfc_sli4.h"
33#include "lpfc_nl.h" 35#include "lpfc_nl.h"
34#include "lpfc_disc.h" 36#include "lpfc_disc.h"
35#include "lpfc_scsi.h" 37#include "lpfc_scsi.h"
@@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
361 if (!mbox) 363 if (!mbox)
362 goto out; 364 goto out;
363 365
364 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, 366 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
365 (uint8_t *) sp, mbox, 0); 367 (uint8_t *) sp, mbox, 0);
366 if (rc) { 368 if (rc) {
367 mempool_free(mbox, phba->mbox_mem_pool); 369 mempool_free(mbox, phba->mbox_mem_pool);
@@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
495 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
496 else 498 else
497 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
500 if ((ndlp->nlp_type & NLP_FABRIC) &&
501 vport->port_type == LPFC_NPIV_PORT) {
502 lpfc_linkdown_port(vport);
503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
504 spin_lock_irq(shost->host_lock);
505 ndlp->nlp_flag |= NLP_DELAY_TMO;
506 spin_unlock_irq(shost->host_lock);
498 507
499 if ((!(ndlp->nlp_type & NLP_FABRIC) && 508 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
500 ((ndlp->nlp_type & NLP_FCP_TARGET) || 509 } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
501 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 510 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
502 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 511 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
512 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
503 /* Only try to re-login if this is NOT a Fabric Node */ 513 /* Only try to re-login if this is NOT a Fabric Node */
504 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 514 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
505 spin_lock_irq(shost->host_lock); 515 spin_lock_irq(shost->host_lock);
@@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
567{ 577{
568 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 578 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
569 579
570 if (!ndlp->nlp_rpi) { 580 if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
571 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 581 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
572 return 0; 582 return 0;
573 } 583 }
@@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
857 867
858 lpfc_unreg_rpi(vport, ndlp); 868 lpfc_unreg_rpi(vport, ndlp);
859 869
860 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, 870 if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
861 (uint8_t *) sp, mbox, 0) == 0) { 871 (uint8_t *) sp, mbox, 0) == 0) {
862 switch (ndlp->nlp_DID) { 872 switch (ndlp->nlp_DID) {
863 case NameServer_DID: 873 case NameServer_DID:
@@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1068 struct lpfc_iocbq *cmdiocb, *rspiocb; 1078 struct lpfc_iocbq *cmdiocb, *rspiocb;
1069 IOCB_t *irsp; 1079 IOCB_t *irsp;
1070 ADISC *ap; 1080 ADISC *ap;
1081 int rc;
1071 1082
1072 cmdiocb = (struct lpfc_iocbq *) arg; 1083 cmdiocb = (struct lpfc_iocbq *) arg;
1073 rspiocb = cmdiocb->context_un.rsp_iocb; 1084 rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1093 return ndlp->nlp_state; 1104 return ndlp->nlp_state;
1094 } 1105 }
1095 1106
1107 if (phba->sli_rev == LPFC_SLI_REV4) {
1108 rc = lpfc_sli4_resume_rpi(ndlp);
1109 if (rc) {
1110 /* Stay in state and retry. */
1111 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1112 return ndlp->nlp_state;
1113 }
1114 }
1115
1096 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1116 if (ndlp->nlp_type & NLP_FCP_TARGET) {
1097 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1117 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1098 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1118 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1100 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1120 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1121 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1102 } 1122 }
1123
1103 return ndlp->nlp_state; 1124 return ndlp->nlp_state;
1104} 1125}
1105 1126
@@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1190 1211
1191 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1212 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1192 if ((mb = phba->sli.mbox_active)) { 1213 if ((mb = phba->sli.mbox_active)) {
1193 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1214 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1194 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1215 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1195 lpfc_nlp_put(ndlp); 1216 lpfc_nlp_put(ndlp);
1196 mb->context2 = NULL; 1217 mb->context2 = NULL;
@@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1200 1221
1201 spin_lock_irq(&phba->hbalock); 1222 spin_lock_irq(&phba->hbalock);
1202 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1203 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1204 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1205 mp = (struct lpfc_dmabuf *) (mb->context1); 1226 mp = (struct lpfc_dmabuf *) (mb->context1);
1206 if (mp) { 1227 if (mp) {
@@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1251{ 1272{
1252 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1273 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1253 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1274 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1254 MAILBOX_t *mb = &pmb->mb; 1275 MAILBOX_t *mb = &pmb->u.mb;
1255 uint32_t did = mb->un.varWords[1]; 1276 uint32_t did = mb->un.varWords[1];
1256 1277
1257 if (mb->mbxStatus) { 1278 if (mb->mbxStatus) {
@@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1283 } 1304 }
1284 1305
1285 ndlp->nlp_rpi = mb->un.varWords[0]; 1306 ndlp->nlp_rpi = mb->un.varWords[0];
1307 ndlp->nlp_flag |= NLP_RPI_VALID;
1286 1308
1287 /* Only if we are not a fabric nport do we issue PRLI */ 1309 /* Only if we are not a fabric nport do we issue PRLI */
1288 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1310 if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
1878 void *arg, uint32_t evt) 1900 void *arg, uint32_t evt)
1879{ 1901{
1880 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; 1902 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1881 MAILBOX_t *mb = &pmb->mb; 1903 MAILBOX_t *mb = &pmb->u.mb;
1882 1904
1883 if (!mb->mbxStatus) 1905 if (!mb->mbxStatus) {
1884 ndlp->nlp_rpi = mb->un.varWords[0]; 1906 ndlp->nlp_rpi = mb->un.varWords[0];
1885 else { 1907 ndlp->nlp_flag |= NLP_RPI_VALID;
1908 } else {
1886 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1909 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1887 lpfc_drop_node(vport, ndlp); 1910 lpfc_drop_node(vport, ndlp);
1888 return NLP_STE_FREED_NODE; 1911 return NLP_STE_FREED_NODE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 167b66dd34c..7991ba1980a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -31,8 +31,10 @@
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32 32
33#include "lpfc_version.h" 33#include "lpfc_version.h"
34#include "lpfc_hw4.h"
34#include "lpfc_hw.h" 35#include "lpfc_hw.h"
35#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
36#include "lpfc_nl.h" 38#include "lpfc_nl.h"
37#include "lpfc_disc.h" 39#include "lpfc_disc.h"
38#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -57,6 +59,8 @@ static char *dif_op_str[] = {
57 "SCSI_PROT_READ_CONVERT", 59 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT" 60 "SCSI_PROT_WRITE_CONVERT"
59}; 61};
62static void
63lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
60 64
61static void 65static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd) 66lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@ -325,7 +329,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
325 329
326 vports = lpfc_create_vport_work_array(phba); 330 vports = lpfc_create_vport_work_array(phba);
327 if (vports != NULL) 331 if (vports != NULL)
328 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 332 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
329 shost = lpfc_shost_from_vport(vports[i]); 333 shost = lpfc_shost_from_vport(vports[i]);
330 shost_for_each_device(sdev, shost) { 334 shost_for_each_device(sdev, shost) {
331 new_queue_depth = 335 new_queue_depth =
@@ -379,7 +383,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
379 383
380 vports = lpfc_create_vport_work_array(phba); 384 vports = lpfc_create_vport_work_array(phba);
381 if (vports != NULL) 385 if (vports != NULL)
382 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 386 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
383 shost = lpfc_shost_from_vport(vports[i]); 387 shost = lpfc_shost_from_vport(vports[i]);
384 shost_for_each_device(sdev, shost) { 388 shost_for_each_device(sdev, shost) {
385 if (vports[i]->cfg_lun_queue_depth <= 389 if (vports[i]->cfg_lun_queue_depth <=
@@ -427,7 +431,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
427 431
428 vports = lpfc_create_vport_work_array(phba); 432 vports = lpfc_create_vport_work_array(phba);
429 if (vports != NULL) 433 if (vports != NULL)
430 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 434 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
431 shost = lpfc_shost_from_vport(vports[i]); 435 shost = lpfc_shost_from_vport(vports[i]);
432 shost_for_each_device(sdev, shost) { 436 shost_for_each_device(sdev, shost) {
433 rport = starget_to_rport(scsi_target(sdev)); 437 rport = starget_to_rport(scsi_target(sdev));
@@ -438,22 +442,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
438} 442}
439 443
440/** 444/**
441 * lpfc_new_scsi_buf - Scsi buffer allocator 445 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
442 * @vport: The virtual port for which this call being executed. 446 * @vport: The virtual port for which this call being executed.
447 * @num_to_allocate: The requested number of buffers to allocate.
443 * 448 *
444 * This routine allocates a scsi buffer, which contains all the necessary 449 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
445 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 450 * the scsi buffer contains all the necessary information needed to initiate
446 * contains information to build the IOCB. The DMAable region contains 451 * a SCSI I/O. The non-DMAable buffer region contains information to build
447 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to 452 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
448 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL 453 * and the initial BPL. In addition to allocating memory, the FCP CMND and
449 * and the BPL BDE is setup in the IOCB. 454 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
450 * 455 *
451 * Return codes: 456 * Return codes:
452 * NULL - Error 457 * int - number of scsi buffers that were allocated.
453 * Pointer to lpfc_scsi_buf data structure - Success 458 * 0 = failure, less than num_to_alloc is a partial failure.
454 **/ 459 **/
455static struct lpfc_scsi_buf * 460static int
456lpfc_new_scsi_buf(struct lpfc_vport *vport) 461lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
457{ 462{
458 struct lpfc_hba *phba = vport->phba; 463 struct lpfc_hba *phba = vport->phba;
459 struct lpfc_scsi_buf *psb; 464 struct lpfc_scsi_buf *psb;
@@ -463,107 +468,401 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
463 dma_addr_t pdma_phys_fcp_rsp; 468 dma_addr_t pdma_phys_fcp_rsp;
464 dma_addr_t pdma_phys_bpl; 469 dma_addr_t pdma_phys_bpl;
465 uint16_t iotag; 470 uint16_t iotag;
471 int bcnt;
466 472
467 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 473 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
468 if (!psb) 474 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
469 return NULL; 475 if (!psb)
476 break;
477
478 /*
479 * Get memory from the pci pool to map the virt space to pci
480 * bus space for an I/O. The DMA buffer includes space for the
481 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
482 * necessary to support the sg_tablesize.
483 */
484 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
485 GFP_KERNEL, &psb->dma_handle);
486 if (!psb->data) {
487 kfree(psb);
488 break;
489 }
490
491 /* Initialize virtual ptrs to dma_buf region. */
492 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
493
494 /* Allocate iotag for psb->cur_iocbq. */
495 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
496 if (iotag == 0) {
497 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
498 psb->data, psb->dma_handle);
499 kfree(psb);
500 break;
501 }
502 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
503
504 psb->fcp_cmnd = psb->data;
505 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
506 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
507 sizeof(struct fcp_rsp);
508
509 /* Initialize local short-hand pointers. */
510 bpl = psb->fcp_bpl;
511 pdma_phys_fcp_cmd = psb->dma_handle;
512 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
513 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
514 sizeof(struct fcp_rsp);
515
516 /*
517 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
518 * are sg list bdes. Initialize the first two and leave the
519 * rest for queuecommand.
520 */
521 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
522 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
523 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
524 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
526
527 /* Setup the physical region for the FCP RSP */
528 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
529 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
530 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
531 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
532 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
533
534 /*
535 * Since the IOCB for the FCP I/O is built into this
536 * lpfc_scsi_buf, initialize it with all known data now.
537 */
538 iocb = &psb->cur_iocbq.iocb;
539 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
540 if ((phba->sli_rev == 3) &&
541 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
542 /* fill in immediate fcp command BDE */
543 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
544 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
545 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
546 unsli3.fcp_ext.icd);
547 iocb->un.fcpi64.bdl.addrHigh = 0;
548 iocb->ulpBdeCount = 0;
549 iocb->ulpLe = 0;
550 /* fill in responce BDE */
551 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
552 BUFF_TYPE_BDE_64;
553 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
554 sizeof(struct fcp_rsp);
555 iocb->unsli3.fcp_ext.rbde.addrLow =
556 putPaddrLow(pdma_phys_fcp_rsp);
557 iocb->unsli3.fcp_ext.rbde.addrHigh =
558 putPaddrHigh(pdma_phys_fcp_rsp);
559 } else {
560 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
561 iocb->un.fcpi64.bdl.bdeSize =
562 (2 * sizeof(struct ulp_bde64));
563 iocb->un.fcpi64.bdl.addrLow =
564 putPaddrLow(pdma_phys_bpl);
565 iocb->un.fcpi64.bdl.addrHigh =
566 putPaddrHigh(pdma_phys_bpl);
567 iocb->ulpBdeCount = 1;
568 iocb->ulpLe = 1;
569 }
570 iocb->ulpClass = CLASS3;
571 psb->status = IOSTAT_SUCCESS;
572 /* Put it back into the SCSI buffer list */
573 lpfc_release_scsi_buf_s4(phba, psb);
470 574
471 /*
472 * Get memory from the pci pool to map the virt space to pci bus space
473 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
474 * struct fcp_rsp and the number of bde's necessary to support the
475 * sg_tablesize.
476 */
477 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
478 &psb->dma_handle);
479 if (!psb->data) {
480 kfree(psb);
481 return NULL;
482 } 575 }
483 576
484 /* Initialize virtual ptrs to dma_buf region. */ 577 return bcnt;
485 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 578}
486 579
487 /* Allocate iotag for psb->cur_iocbq. */ 580/**
488 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 581 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
489 if (iotag == 0) { 582 * @phba: pointer to lpfc hba data structure.
490 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 583 * @axri: pointer to the fcp xri abort wcqe structure.
491 psb->data, psb->dma_handle); 584 *
492 kfree (psb); 585 * This routine is invoked by the worker thread to process a SLI4 fast-path
493 return NULL; 586 * FCP aborted xri.
587 **/
588void
589lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
590 struct sli4_wcqe_xri_aborted *axri)
591{
592 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
593 struct lpfc_scsi_buf *psb, *next_psb;
594 unsigned long iflag = 0;
595
596 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
597 list_for_each_entry_safe(psb, next_psb,
598 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
599 if (psb->cur_iocbq.sli4_xritag == xri) {
600 list_del(&psb->list);
601 psb->status = IOSTAT_SUCCESS;
602 spin_unlock_irqrestore(
603 &phba->sli4_hba.abts_scsi_buf_list_lock,
604 iflag);
605 lpfc_release_scsi_buf_s4(phba, psb);
606 return;
607 }
608 }
609 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
610 iflag);
611}
612
613/**
614 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
615 * @phba: pointer to lpfc hba data structure.
616 *
617 * This routine walks the list of scsi buffers that have been allocated and
618 * repost them to the HBA by using SGL block post. This is needed after a
619 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
620 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
621 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
622 *
623 * Returns: 0 = success, non-zero failure.
624 **/
625int
626lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
627{
628 struct lpfc_scsi_buf *psb;
629 int index, status, bcnt = 0, rcnt = 0, rc = 0;
630 LIST_HEAD(sblist);
631
632 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
633 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
634 if (psb) {
635 /* Remove from SCSI buffer list */
636 list_del(&psb->list);
637 /* Add it to a local SCSI buffer list */
638 list_add_tail(&psb->list, &sblist);
639 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
640 bcnt = rcnt;
641 rcnt = 0;
642 }
643 } else
644 /* A hole present in the XRI array, need to skip */
645 bcnt = rcnt;
646
647 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
648 /* End of XRI array for SCSI buffer, complete */
649 bcnt = rcnt;
650
651 /* Continue until collect up to a nembed page worth of sgls */
652 if (bcnt == 0)
653 continue;
654 /* Now, post the SCSI buffer list sgls as a block */
655 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
656 /* Reset SCSI buffer count for next round of posting */
657 bcnt = 0;
658 while (!list_empty(&sblist)) {
659 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
660 list);
661 if (status) {
662 /* Put this back on the abort scsi list */
663 psb->status = IOSTAT_LOCAL_REJECT;
664 psb->result = IOERR_ABORT_REQUESTED;
665 rc++;
666 } else
667 psb->status = IOSTAT_SUCCESS;
668 /* Put it back into the SCSI buffer list */
669 lpfc_release_scsi_buf_s4(phba, psb);
670 }
494 } 671 }
495 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 672 return rc;
673}
496 674
497 psb->fcp_cmnd = psb->data; 675/**
498 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 676 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
499 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 677 * @vport: The virtual port for which this call being executed.
500 sizeof(struct fcp_rsp); 678 * @num_to_allocate: The requested number of buffers to allocate.
679 *
680 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
681 * the scsi buffer contains all the necessary information needed to initiate
682 * a SCSI I/O.
683 *
684 * Return codes:
685 * int - number of scsi buffers that were allocated.
686 * 0 = failure, less than num_to_alloc is a partial failure.
687 **/
688static int
689lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
690{
691 struct lpfc_hba *phba = vport->phba;
692 struct lpfc_scsi_buf *psb;
693 struct sli4_sge *sgl;
694 IOCB_t *iocb;
695 dma_addr_t pdma_phys_fcp_cmd;
696 dma_addr_t pdma_phys_fcp_rsp;
697 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
698 uint16_t iotag, last_xritag = NO_XRI;
699 int status = 0, index;
700 int bcnt;
701 int non_sequential_xri = 0;
702 int rc = 0;
703 LIST_HEAD(sblist);
704
705 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
706 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
707 if (!psb)
708 break;
501 709
502 /* Initialize local short-hand pointers. */ 710 /*
503 bpl = psb->fcp_bpl; 711 * Get memory from the pci pool to map the virt space to pci bus
504 pdma_phys_fcp_cmd = psb->dma_handle; 712 * space for an I/O. The DMA buffer includes space for the
505 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 713 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
506 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + 714 * necessary to support the sg_tablesize.
507 sizeof(struct fcp_rsp); 715 */
716 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
717 GFP_KERNEL, &psb->dma_handle);
718 if (!psb->data) {
719 kfree(psb);
720 break;
721 }
508 722
509 /* 723 /* Initialize virtual ptrs to dma_buf region. */
510 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 724 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
511 * list bdes. Initialize the first two and leave the rest for
512 * queuecommand.
513 */
514 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
515 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
516 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
517 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
518 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
519
520 /* Setup the physical region for the FCP RSP */
521 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
522 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
523 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
524 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
526 725
527 /* 726 /* Allocate iotag for psb->cur_iocbq. */
528 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 727 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
529 * initialize it with all known data now. 728 if (iotag == 0) {
530 */ 729 kfree(psb);
531 iocb = &psb->cur_iocbq.iocb; 730 break;
532 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 731 }
533 if ((phba->sli_rev == 3) && 732
534 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 733 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
535 /* fill in immediate fcp command BDE */ 734 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
536 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 735 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
736 psb->data, psb->dma_handle);
737 kfree(psb);
738 break;
739 }
740 if (last_xritag != NO_XRI
741 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
742 non_sequential_xri = 1;
743 } else
744 list_add_tail(&psb->list, &sblist);
745 last_xritag = psb->cur_iocbq.sli4_xritag;
746
747 index = phba->sli4_hba.scsi_xri_cnt++;
748 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
749
750 psb->fcp_bpl = psb->data;
751 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
752 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
753 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
754 sizeof(struct fcp_cmnd));
755
756 /* Initialize local short-hand pointers. */
757 sgl = (struct sli4_sge *)psb->fcp_bpl;
758 pdma_phys_bpl = psb->dma_handle;
759 pdma_phys_fcp_cmd =
760 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
761 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
762 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
763
764 /*
765 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
766 * are sg list bdes. Initialize the first two and leave the
767 * rest for queuecommand.
768 */
769 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
770 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
771 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
772 bf_set(lpfc_sli4_sge_last, sgl, 0);
773 sgl->word2 = cpu_to_le32(sgl->word2);
774 sgl->word3 = cpu_to_le32(sgl->word3);
775 sgl++;
776
777 /* Setup the physical region for the FCP RSP */
778 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
779 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
780 bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
781 bf_set(lpfc_sli4_sge_last, sgl, 1);
782 sgl->word2 = cpu_to_le32(sgl->word2);
783 sgl->word3 = cpu_to_le32(sgl->word3);
784
785 /*
786 * Since the IOCB for the FCP I/O is built into this
787 * lpfc_scsi_buf, initialize it with all known data now.
788 */
789 iocb = &psb->cur_iocbq.iocb;
790 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
791 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
792 /* setting the BLP size to 2 * sizeof BDE may not be correct.
793 * We are setting the bpl to point to out sgl. An sgl's
794 * entries are 16 bytes, a bpl entries are 12 bytes.
795 */
537 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 796 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
538 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 797 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
539 unsli3.fcp_ext.icd); 798 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
540 iocb->un.fcpi64.bdl.addrHigh = 0;
541 iocb->ulpBdeCount = 0;
542 iocb->ulpLe = 0;
543 /* fill in responce BDE */
544 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
545 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
546 sizeof(struct fcp_rsp);
547 iocb->unsli3.fcp_ext.rbde.addrLow =
548 putPaddrLow(pdma_phys_fcp_rsp);
549 iocb->unsli3.fcp_ext.rbde.addrHigh =
550 putPaddrHigh(pdma_phys_fcp_rsp);
551 } else {
552 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
553 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
554 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
555 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
556 iocb->ulpBdeCount = 1; 799 iocb->ulpBdeCount = 1;
557 iocb->ulpLe = 1; 800 iocb->ulpLe = 1;
801 iocb->ulpClass = CLASS3;
802 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
803 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
804 else
805 pdma_phys_bpl1 = 0;
806 psb->dma_phys_bpl = pdma_phys_bpl;
807 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
808 if (non_sequential_xri) {
809 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
810 pdma_phys_bpl1,
811 psb->cur_iocbq.sli4_xritag);
812 if (status) {
813 /* Put this back on the abort scsi list */
814 psb->status = IOSTAT_LOCAL_REJECT;
815 psb->result = IOERR_ABORT_REQUESTED;
816 rc++;
817 } else
818 psb->status = IOSTAT_SUCCESS;
819 /* Put it back into the SCSI buffer list */
820 lpfc_release_scsi_buf_s4(phba, psb);
821 break;
822 }
823 }
824 if (bcnt) {
825 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
826 /* Reset SCSI buffer count for next round of posting */
827 while (!list_empty(&sblist)) {
828 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
829 list);
830 if (status) {
831 /* Put this back on the abort scsi list */
832 psb->status = IOSTAT_LOCAL_REJECT;
833 psb->result = IOERR_ABORT_REQUESTED;
834 rc++;
835 } else
836 psb->status = IOSTAT_SUCCESS;
837 /* Put it back into the SCSI buffer list */
838 lpfc_release_scsi_buf_s4(phba, psb);
839 }
558 } 840 }
559 iocb->ulpClass = CLASS3;
560 841
561 return psb; 842 return bcnt + non_sequential_xri - rc;
562} 843}
563 844
564/** 845/**
565 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba 846 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
566 * @phba: The Hba for which this call is being executed. 847 * @vport: The virtual port for which this call being executed.
848 * @num_to_allocate: The requested number of buffers to allocate.
849 *
850 * This routine wraps the actual SCSI buffer allocator function pointer from
851 * the lpfc_hba struct.
852 *
853 * Return codes:
854 * int - number of scsi buffers that were allocated.
855 * 0 = failure, less than num_to_alloc is a partial failure.
856 **/
857static inline int
858lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
859{
860 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
861}
862
863/**
864 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
865 * @phba: The HBA for which this call is being executed.
567 * 866 *
568 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 867 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
569 * and returns to caller. 868 * and returns to caller.
@@ -591,7 +890,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
591} 890}
592 891
593/** 892/**
594 * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list 893 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
595 * @phba: The Hba for which this call is being executed. 894 * @phba: The Hba for which this call is being executed.
596 * @psb: The scsi buffer which is being released. 895 * @psb: The scsi buffer which is being released.
597 * 896 *
@@ -599,7 +898,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
599 * lpfc_scsi_buf_list list. 898 * lpfc_scsi_buf_list list.
600 **/ 899 **/
601static void 900static void
602lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 901lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
603{ 902{
604 unsigned long iflag = 0; 903 unsigned long iflag = 0;
605 904
@@ -610,21 +909,69 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
610} 909}
611 910
612/** 911/**
613 * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer 912 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
913 * @phba: The Hba for which this call is being executed.
914 * @psb: The scsi buffer which is being released.
915 *
916 * This routine releases @psb scsi buffer by adding it to tail of @phba
917 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
918 * and cannot be reused for at least RA_TOV amount of time if it was
919 * aborted.
920 **/
921static void
922lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
923{
924 unsigned long iflag = 0;
925
926 if (psb->status == IOSTAT_LOCAL_REJECT
927 && psb->result == IOERR_ABORT_REQUESTED) {
928 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
929 iflag);
930 psb->pCmd = NULL;
931 list_add_tail(&psb->list,
932 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
933 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
934 iflag);
935 } else {
936
937 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
938 psb->pCmd = NULL;
939 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
940 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
941 }
942}
943
944/**
945 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
946 * @phba: The Hba for which this call is being executed.
947 * @psb: The scsi buffer which is being released.
948 *
949 * This routine releases @psb scsi buffer by adding it to tail of @phba
950 * lpfc_scsi_buf_list list.
951 **/
952static void
953lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
954{
955
956 phba->lpfc_release_scsi_buf(phba, psb);
957}
958
959/**
960 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
614 * @phba: The Hba for which this call is being executed. 961 * @phba: The Hba for which this call is being executed.
615 * @lpfc_cmd: The scsi buffer which is going to be mapped. 962 * @lpfc_cmd: The scsi buffer which is going to be mapped.
616 * 963 *
617 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 964 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
618 * field of @lpfc_cmd. This routine scans through sg elements and format the 965 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
619 * bdea. This routine also initializes all IOCB fields which are dependent on 966 * through sg elements and format the bdea. This routine also initializes all
620 * scsi command request buffer. 967 * IOCB fields which are dependent on scsi command request buffer.
621 * 968 *
622 * Return codes: 969 * Return codes:
623 * 1 - Error 970 * 1 - Error
624 * 0 - Success 971 * 0 - Success
625 **/ 972 **/
626static int 973static int
627lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 974lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
628{ 975{
629 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 976 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
630 struct scatterlist *sgel = NULL; 977 struct scatterlist *sgel = NULL;
@@ -827,8 +1174,8 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
827 * @reftag: out: ref tag (reference tag) 1174 * @reftag: out: ref tag (reference tag)
828 * 1175 *
829 * Description: 1176 * Description:
830 * Extract DIF paramters from the command if possible. Otherwise, 1177 * Extract DIF parameters from the command if possible. Otherwise,
831 * use default paratmers. 1178 * use default parameters.
832 * 1179 *
833 **/ 1180 **/
834static inline void 1181static inline void
@@ -1312,10 +1659,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1312 uint32_t bgstat = bgf->bgstat; 1659 uint32_t bgstat = bgf->bgstat;
1313 uint64_t failing_sector = 0; 1660 uint64_t failing_sector = 0;
1314 1661
1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx " 1662 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
1316 "bgstat=0x%x bghm=0x%x\n", 1663 "bgstat=0x%x bghm=0x%x\n",
1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1664 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1318 cmd->request->nr_sectors, bgstat, bghm); 1665 blk_rq_sectors(cmd->request), bgstat, bghm);
1319 1666
1320 spin_lock(&_dump_buf_lock); 1667 spin_lock(&_dump_buf_lock);
1321 if (!_dump_buf_done) { 1668 if (!_dump_buf_done) {
@@ -1412,6 +1759,133 @@ out:
1412} 1759}
1413 1760
1414/** 1761/**
1762 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1763 * @phba: The Hba for which this call is being executed.
1764 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1765 *
1766 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1767 * field of @lpfc_cmd for device with SLI-4 interface spec.
1768 *
1769 * Return codes:
1770 * 1 - Error
1771 * 0 - Success
1772 **/
1773static int
1774lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1775{
1776 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1777 struct scatterlist *sgel = NULL;
1778 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1779 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1780 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1781 dma_addr_t physaddr;
1782 uint32_t num_bde = 0;
1783 uint32_t dma_len;
1784 uint32_t dma_offset = 0;
1785 int nseg;
1786
1787 /*
1788 * There are three possibilities here - use scatter-gather segment, use
1789 * the single mapping, or neither. Start the lpfc command prep by
1790 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1791 * data bde entry.
1792 */
1793 if (scsi_sg_count(scsi_cmnd)) {
1794 /*
1795 * The driver stores the segment count returned from pci_map_sg
1796 * because this a count of dma-mappings used to map the use_sg
1797 * pages. They are not guaranteed to be the same for those
1798 * architectures that implement an IOMMU.
1799 */
1800
1801 nseg = scsi_dma_map(scsi_cmnd);
1802 if (unlikely(!nseg))
1803 return 1;
1804 sgl += 1;
1805 /* clear the last flag in the fcp_rsp map entry */
1806 sgl->word2 = le32_to_cpu(sgl->word2);
1807 bf_set(lpfc_sli4_sge_last, sgl, 0);
1808 sgl->word2 = cpu_to_le32(sgl->word2);
1809 sgl += 1;
1810
1811 lpfc_cmd->seg_cnt = nseg;
1812 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1813 printk(KERN_ERR "%s: Too many sg segments from "
1814 "dma_map_sg. Config %d, seg_cnt %d\n",
1815 __func__, phba->cfg_sg_seg_cnt,
1816 lpfc_cmd->seg_cnt);
1817 scsi_dma_unmap(scsi_cmnd);
1818 return 1;
1819 }
1820
1821 /*
1822 * The driver established a maximum scatter-gather segment count
1823 * during probe that limits the number of sg elements in any
1824 * single scsi command. Just run through the seg_cnt and format
1825 * the sge's.
1826 * When using SLI-3 the driver will try to fit all the BDEs into
1827 * the IOCB. If it can't then the BDEs get added to a BPL as it
1828 * does for SLI-2 mode.
1829 */
1830 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1831 physaddr = sg_dma_address(sgel);
1832 dma_len = sg_dma_len(sgel);
1833 bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1834 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1835 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1836 if ((num_bde + 1) == nseg)
1837 bf_set(lpfc_sli4_sge_last, sgl, 1);
1838 else
1839 bf_set(lpfc_sli4_sge_last, sgl, 0);
1840 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1841 sgl->word2 = cpu_to_le32(sgl->word2);
1842 sgl->word3 = cpu_to_le32(sgl->word3);
1843 dma_offset += dma_len;
1844 sgl++;
1845 }
1846 } else {
1847 sgl += 1;
1848 /* clear the last flag in the fcp_rsp map entry */
1849 sgl->word2 = le32_to_cpu(sgl->word2);
1850 bf_set(lpfc_sli4_sge_last, sgl, 1);
1851 sgl->word2 = cpu_to_le32(sgl->word2);
1852 }
1853
1854 /*
1855 * Finish initializing those IOCB fields that are dependent on the
1856 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1857 * explicitly reinitialized.
1858 * all iocb memory resources are reused.
1859 */
1860 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1861
1862 /*
1863 * Due to difference in data length between DIF/non-DIF paths,
1864 * we need to set word 4 of IOCB here
1865 */
1866 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1867 return 0;
1868}
1869
1870/**
1871 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
1872 * @phba: The Hba for which this call is being executed.
1873 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1874 *
1875 * This routine wraps the actual DMA mapping function pointer from the
1876 * lpfc_hba struct.
1877 *
1878 * Return codes:
1879 * 1 - Error
1880 * 0 - Success
1881 **/
1882static inline int
1883lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1884{
1885 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1886}
1887
1888/**
1415 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 1889 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1416 * @phba: Pointer to hba context object. 1890 * @phba: Pointer to hba context object.
1417 * @vport: Pointer to vport object. 1891 * @vport: Pointer to vport object.
@@ -1504,15 +1978,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1504} 1978}
1505 1979
1506/** 1980/**
1507 * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather 1981 * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev
1508 * @phba: The Hba for which this call is being executed. 1982 * @phba: The HBA for which this call is being executed.
1509 * @psb: The scsi buffer which is going to be un-mapped. 1983 * @psb: The scsi buffer which is going to be un-mapped.
1510 * 1984 *
1511 * This routine does DMA un-mapping of scatter gather list of scsi command 1985 * This routine does DMA un-mapping of scatter gather list of scsi command
1512 * field of @lpfc_cmd. 1986 * field of @lpfc_cmd for device with SLI-3 interface spec.
1513 **/ 1987 **/
1514static void 1988static void
1515lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 1989lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1516{ 1990{
1517 /* 1991 /*
1518 * There are only two special cases to consider. (1) the scsi command 1992 * There are only two special cases to consider. (1) the scsi command
@@ -1529,6 +2003,36 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
1529} 2003}
1530 2004
1531/** 2005/**
2006 * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
2007 * @phba: The Hba for which this call is being executed.
2008 * @psb: The scsi buffer which is going to be un-mapped.
2009 *
2010 * This routine does DMA un-mapping of scatter gather list of scsi command
2011 * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
2012 * remove the sgl for this scsi buffer then we will do it here. For now
2013 * we should be able to just call the sli3 unprep routine.
2014 **/
2015static void
2016lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2017{
2018 lpfc_scsi_unprep_dma_buf_s3(phba, psb);
2019}
2020
2021/**
2022 * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
2023 * @phba: The Hba for which this call is being executed.
2024 * @psb: The scsi buffer which is going to be un-mapped.
2025 *
2026 * This routine does DMA un-mapping of scatter gather list of scsi command
2027 * field of @lpfc_cmd for device with SLI-4 interface spec.
2028 **/
2029static void
2030lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2031{
2032 phba->lpfc_scsi_unprep_dma_buf(phba, psb);
2033}
2034
2035/**
1532 * lpfc_handler_fcp_err - FCP response handler 2036 * lpfc_handler_fcp_err - FCP response handler
1533 * @vport: The virtual port for which this call is being executed. 2037 * @vport: The virtual port for which this call is being executed.
1534 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2038 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
@@ -1676,7 +2180,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1676 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 2180 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
1677 * @phba: The Hba for which this call is being executed. 2181 * @phba: The Hba for which this call is being executed.
1678 * @pIocbIn: The command IOCBQ for the scsi cmnd. 2182 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1679 * @pIocbOut: The response IOCBQ for the scsi cmnd . 2183 * @pIocbOut: The response IOCBQ for the scsi cmnd.
1680 * 2184 *
1681 * This routine assigns scsi command result by looking into response IOCB 2185 * This routine assigns scsi command result by looking into response IOCB
1682 * status field appropriately. This routine handles QUEUE FULL condition as 2186 * status field appropriately. This routine handles QUEUE FULL condition as
@@ -1957,16 +2461,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1957} 2461}
1958 2462
1959/** 2463/**
1960 * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit 2464 * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev
1961 * @vport: The virtual port for which this call is being executed. 2465 * @vport: The virtual port for which this call is being executed.
1962 * @lpfc_cmd: The scsi command which needs to send. 2466 * @lpfc_cmd: The scsi command which needs to send.
1963 * @pnode: Pointer to lpfc_nodelist. 2467 * @pnode: Pointer to lpfc_nodelist.
1964 * 2468 *
1965 * This routine initializes fcp_cmnd and iocb data structure from scsi command 2469 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1966 * to transfer. 2470 * to transfer for device with SLI3 interface spec.
1967 **/ 2471 **/
1968static void 2472static void
1969lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2473lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1970 struct lpfc_nodelist *pnode) 2474 struct lpfc_nodelist *pnode)
1971{ 2475{
1972 struct lpfc_hba *phba = vport->phba; 2476 struct lpfc_hba *phba = vport->phba;
@@ -2013,8 +2517,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2013 if (scsi_sg_count(scsi_cmnd)) { 2517 if (scsi_sg_count(scsi_cmnd)) {
2014 if (datadir == DMA_TO_DEVICE) { 2518 if (datadir == DMA_TO_DEVICE) {
2015 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 2519 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2016 iocb_cmd->un.fcpi.fcpi_parm = 0; 2520 if (phba->sli_rev < LPFC_SLI_REV4) {
2017 iocb_cmd->ulpPU = 0; 2521 iocb_cmd->un.fcpi.fcpi_parm = 0;
2522 iocb_cmd->ulpPU = 0;
2523 } else
2524 iocb_cmd->ulpPU = PARM_READ_CHECK;
2018 fcp_cmnd->fcpCntl3 = WRITE_DATA; 2525 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2019 phba->fc4OutputRequests++; 2526 phba->fc4OutputRequests++;
2020 } else { 2527 } else {
@@ -2051,20 +2558,60 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2051} 2558}
2052 2559
2053/** 2560/**
2054 * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit 2561 * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
2562 * @vport: The virtual port for which this call is being executed.
2563 * @lpfc_cmd: The scsi command which needs to send.
2564 * @pnode: Pointer to lpfc_nodelist.
2565 *
2566 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2567 * to transfer for device with SLI4 interface spec.
2568 **/
2569static void
2570lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2571 struct lpfc_nodelist *pnode)
2572{
2573 /*
2574 * The prep cmnd routines do not touch the sgl or its
2575 * entries. We may not have to do anything different.
2576 * I will leave this function in place until we can
2577 * run some IO through the driver and determine if changes
2578 * are needed.
2579 */
2580 return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
2581}
2582
2583/**
2584 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2585 * @vport: The virtual port for which this call is being executed.
2586 * @lpfc_cmd: The scsi command which needs to send.
2587 * @pnode: Pointer to lpfc_nodelist.
2588 *
2589 * This routine wraps the actual convert SCSI cmnd function pointer from
2590 * the lpfc_hba struct.
2591 **/
2592static inline void
2593lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2594 struct lpfc_nodelist *pnode)
2595{
2596 vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
2597}
2598
2599/**
2600 * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
2055 * @vport: The virtual port for which this call is being executed. 2601 * @vport: The virtual port for which this call is being executed.
2056 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2602 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2057 * @lun: Logical unit number. 2603 * @lun: Logical unit number.
2058 * @task_mgmt_cmd: SCSI task management command. 2604 * @task_mgmt_cmd: SCSI task management command.
2059 * 2605 *
2060 * This routine creates FCP information unit corresponding to @task_mgmt_cmd. 2606 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2607 * for device with SLI-3 interface spec.
2061 * 2608 *
2062 * Return codes: 2609 * Return codes:
2063 * 0 - Error 2610 * 0 - Error
2064 * 1 - Success 2611 * 1 - Success
2065 **/ 2612 **/
2066static int 2613static int
2067lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 2614lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
2068 struct lpfc_scsi_buf *lpfc_cmd, 2615 struct lpfc_scsi_buf *lpfc_cmd,
2069 unsigned int lun, 2616 unsigned int lun,
2070 uint8_t task_mgmt_cmd) 2617 uint8_t task_mgmt_cmd)
@@ -2114,6 +2661,107 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2114} 2661}
2115 2662
2116/** 2663/**
2664 * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
2665 * @vport: The virtual port for which this call is being executed.
2666 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2667 * @lun: Logical unit number.
2668 * @task_mgmt_cmd: SCSI task management command.
2669 *
2670 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2671 * for device with SLI-4 interface spec.
2672 *
2673 * Return codes:
2674 * 0 - Error
2675 * 1 - Success
2676 **/
2677static int
2678lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
2679 struct lpfc_scsi_buf *lpfc_cmd,
2680 unsigned int lun,
2681 uint8_t task_mgmt_cmd)
2682{
2683 /*
2684 * The prep cmnd routines do not touch the sgl or its
2685 * entries. We may not have to do anything different.
2686 * I will leave this function in place until we can
2687 * run some IO through the driver and determine if changes
2688 * are needed.
2689 */
2690 return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
2691 task_mgmt_cmd);
2692}
2693
2694/**
2695 * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
2696 * @vport: The virtual port for which this call is being executed.
2697 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2698 * @lun: Logical unit number.
2699 * @task_mgmt_cmd: SCSI task management command.
2700 *
2701 * This routine wraps the actual convert SCSI TM to FCP information unit
2702 * function pointer from the lpfc_hba struct.
2703 *
2704 * Return codes:
2705 * 0 - Error
2706 * 1 - Success
2707 **/
2708static inline int
2709lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2710 struct lpfc_scsi_buf *lpfc_cmd,
2711 unsigned int lun,
2712 uint8_t task_mgmt_cmd)
2713{
2714 struct lpfc_hba *phba = vport->phba;
2715
2716 return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2717 task_mgmt_cmd);
2718}
2719
2720/**
2721 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2722 * @phba: The hba struct for which this call is being executed.
2723 * @dev_grp: The HBA PCI-Device group number.
2724 *
2725 * This routine sets up the SCSI interface API function jump table in @phba
2726 * struct.
2727 * Returns: 0 - success, -ENODEV - failure.
2728 **/
2729int
2730lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2731{
2732
2733 switch (dev_grp) {
2734 case LPFC_PCI_DEV_LP:
2735 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2736 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2737 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
2738 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
2739 phba->lpfc_scsi_prep_task_mgmt_cmd =
2740 lpfc_scsi_prep_task_mgmt_cmd_s3;
2741 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2742 break;
2743 case LPFC_PCI_DEV_OC:
2744 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2745 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2746 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
2747 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
2748 phba->lpfc_scsi_prep_task_mgmt_cmd =
2749 lpfc_scsi_prep_task_mgmt_cmd_s4;
2750 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2751 break;
2752 default:
2753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2754 "1418 Invalid HBA PCI-device group: 0x%x\n",
2755 dev_grp);
2756 return -ENODEV;
2757 break;
2758 }
2759 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2760 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2761 return 0;
2762}
2763
2764/**
2117 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command 2765 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2118 * @phba: The Hba for which this call is being executed. 2766 * @phba: The Hba for which this call is being executed.
2119 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 2767 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
@@ -2178,9 +2826,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2178 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2826 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2179 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", 2827 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2180 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 2828 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2181 status = lpfc_sli_issue_iocb_wait(phba, 2829 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2182 &phba->sli.ring[phba->sli.fcp_ring], 2830 iocbq, iocbqrsp, lpfc_cmd->timeout);
2183 iocbq, iocbqrsp, lpfc_cmd->timeout);
2184 if (status != IOCB_SUCCESS) { 2831 if (status != IOCB_SUCCESS) {
2185 if (status == IOCB_TIMEDOUT) { 2832 if (status == IOCB_TIMEDOUT) {
2186 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 2833 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2305,7 +2952,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2305 struct Scsi_Host *shost = cmnd->device->host; 2952 struct Scsi_Host *shost = cmnd->device->host;
2306 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2953 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2307 struct lpfc_hba *phba = vport->phba; 2954 struct lpfc_hba *phba = vport->phba;
2308 struct lpfc_sli *psli = &phba->sli;
2309 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2955 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2310 struct lpfc_nodelist *ndlp = rdata->pnode; 2956 struct lpfc_nodelist *ndlp = rdata->pnode;
2311 struct lpfc_scsi_buf *lpfc_cmd; 2957 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2378,15 +3024,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2378 if (cmnd->cmnd[0] == READ_10) 3024 if (cmnd->cmnd[0] == READ_10)
2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3025 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2380 "9035 BLKGRD: READ @ sector %llu, " 3026 "9035 BLKGRD: READ @ sector %llu, "
2381 "count %lu\n", 3027 "count %u\n",
2382 (unsigned long long)scsi_get_lba(cmnd), 3028 (unsigned long long)scsi_get_lba(cmnd),
2383 cmnd->request->nr_sectors); 3029 blk_rq_sectors(cmnd->request));
2384 else if (cmnd->cmnd[0] == WRITE_10) 3030 else if (cmnd->cmnd[0] == WRITE_10)
2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3031 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2386 "9036 BLKGRD: WRITE @ sector %llu, " 3032 "9036 BLKGRD: WRITE @ sector %llu, "
2387 "count %lu cmd=%p\n", 3033 "count %u cmd=%p\n",
2388 (unsigned long long)scsi_get_lba(cmnd), 3034 (unsigned long long)scsi_get_lba(cmnd),
2389 cmnd->request->nr_sectors, 3035 blk_rq_sectors(cmnd->request),
2390 cmnd); 3036 cmnd);
2391 3037
2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 3038 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2406,15 +3052,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2406 if (cmnd->cmnd[0] == READ_10) 3052 if (cmnd->cmnd[0] == READ_10)
2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3053 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2408 "9040 dbg: READ @ sector %llu, " 3054 "9040 dbg: READ @ sector %llu, "
2409 "count %lu\n", 3055 "count %u\n",
2410 (unsigned long long)scsi_get_lba(cmnd), 3056 (unsigned long long)scsi_get_lba(cmnd),
2411 cmnd->request->nr_sectors); 3057 blk_rq_sectors(cmnd->request));
2412 else if (cmnd->cmnd[0] == WRITE_10) 3058 else if (cmnd->cmnd[0] == WRITE_10)
2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3059 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2414 "9041 dbg: WRITE @ sector %llu, " 3060 "9041 dbg: WRITE @ sector %llu, "
2415 "count %lu cmd=%p\n", 3061 "count %u cmd=%p\n",
2416 (unsigned long long)scsi_get_lba(cmnd), 3062 (unsigned long long)scsi_get_lba(cmnd),
2417 cmnd->request->nr_sectors, cmnd); 3063 blk_rq_sectors(cmnd->request), cmnd);
2418 else 3064 else
2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3065 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2420 "9042 dbg: parser not implemented\n"); 3066 "9042 dbg: parser not implemented\n");
@@ -2427,7 +3073,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2427 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 3073 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2428 3074
2429 atomic_inc(&ndlp->cmd_pending); 3075 atomic_inc(&ndlp->cmd_pending);
2430 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 3076 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
2431 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 3077 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2432 if (err) { 3078 if (err) {
2433 atomic_dec(&ndlp->cmd_pending); 3079 atomic_dec(&ndlp->cmd_pending);
@@ -2490,7 +3136,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2490 struct Scsi_Host *shost = cmnd->device->host; 3136 struct Scsi_Host *shost = cmnd->device->host;
2491 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3137 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2492 struct lpfc_hba *phba = vport->phba; 3138 struct lpfc_hba *phba = vport->phba;
2493 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
2494 struct lpfc_iocbq *iocb; 3139 struct lpfc_iocbq *iocb;
2495 struct lpfc_iocbq *abtsiocb; 3140 struct lpfc_iocbq *abtsiocb;
2496 struct lpfc_scsi_buf *lpfc_cmd; 3141 struct lpfc_scsi_buf *lpfc_cmd;
@@ -2531,7 +3176,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2531 icmd = &abtsiocb->iocb; 3176 icmd = &abtsiocb->iocb;
2532 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 3177 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2533 icmd->un.acxri.abortContextTag = cmd->ulpContext; 3178 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2534 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 3179 if (phba->sli_rev == LPFC_SLI_REV4)
3180 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3181 else
3182 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2535 3183
2536 icmd->ulpLe = 1; 3184 icmd->ulpLe = 1;
2537 icmd->ulpClass = cmd->ulpClass; 3185 icmd->ulpClass = cmd->ulpClass;
@@ -2542,7 +3190,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2542 3190
2543 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3191 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2544 abtsiocb->vport = vport; 3192 abtsiocb->vport = vport;
2545 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 3193 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3194 IOCB_ERROR) {
2546 lpfc_sli_release_iocbq(phba, abtsiocb); 3195 lpfc_sli_release_iocbq(phba, abtsiocb);
2547 ret = FAILED; 3196 ret = FAILED;
2548 goto out; 3197 goto out;
@@ -2668,8 +3317,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
2668 "0703 Issue target reset to TGT %d LUN %d " 3317 "0703 Issue target reset to TGT %d LUN %d "
2669 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 3318 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
2670 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 3319 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
2671 status = lpfc_sli_issue_iocb_wait(phba, 3320 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2672 &phba->sli.ring[phba->sli.fcp_ring],
2673 iocbq, iocbqrsp, lpfc_cmd->timeout); 3321 iocbq, iocbqrsp, lpfc_cmd->timeout);
2674 if (status == IOCB_TIMEDOUT) { 3322 if (status == IOCB_TIMEDOUT) {
2675 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3323 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2825,11 +3473,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2825{ 3473{
2826 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3474 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2827 struct lpfc_hba *phba = vport->phba; 3475 struct lpfc_hba *phba = vport->phba;
2828 struct lpfc_scsi_buf *scsi_buf = NULL;
2829 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 3476 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2830 uint32_t total = 0, i; 3477 uint32_t total = 0;
2831 uint32_t num_to_alloc = 0; 3478 uint32_t num_to_alloc = 0;
2832 unsigned long flags; 3479 int num_allocated = 0;
2833 3480
2834 if (!rport || fc_remote_port_chkready(rport)) 3481 if (!rport || fc_remote_port_chkready(rport))
2835 return -ENXIO; 3482 return -ENXIO;
@@ -2863,20 +3510,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
2863 (phba->cfg_hba_queue_depth - total)); 3510 (phba->cfg_hba_queue_depth - total));
2864 num_to_alloc = phba->cfg_hba_queue_depth - total; 3511 num_to_alloc = phba->cfg_hba_queue_depth - total;
2865 } 3512 }
2866 3513 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
2867 for (i = 0; i < num_to_alloc; i++) { 3514 if (num_to_alloc != num_allocated) {
2868 scsi_buf = lpfc_new_scsi_buf(vport); 3515 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2869 if (!scsi_buf) { 3516 "0708 Allocation request of %d "
2870 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3517 "command buffers did not succeed. "
2871 "0706 Failed to allocate " 3518 "Allocated %d buffers.\n",
2872 "command buffer\n"); 3519 num_to_alloc, num_allocated);
2873 break;
2874 }
2875
2876 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2877 phba->total_scsi_bufs++;
2878 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2879 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2880 } 3520 }
2881 return 0; 3521 return 0;
2882} 3522}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index c7c440d5fa2..65dfc8bd5b4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
140 struct fcp_rsp *fcp_rsp; 140 struct fcp_rsp *fcp_rsp;
141 struct ulp_bde64 *fcp_bpl; 141 struct ulp_bde64 *fcp_bpl;
142 142
143 dma_addr_t dma_phys_bpl;
144
143 /* cur_iocbq has phys of the dma-able buffer. 145 /* cur_iocbq has phys of the dma-able buffer.
144 * Iotag is in here 146 * Iotag is in here
145 */ 147 */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index eb5c75c45ba..ff04daf18f4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -29,9 +29,12 @@
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
32 33
34#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 35#include "lpfc_hw.h"
34#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 38#include "lpfc_nl.h"
36#include "lpfc_disc.h" 39#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -40,24 +43,7 @@
40#include "lpfc_logmsg.h" 43#include "lpfc_logmsg.h"
41#include "lpfc_compat.h" 44#include "lpfc_compat.h"
42#include "lpfc_debugfs.h" 45#include "lpfc_debugfs.h"
43 46#include "lpfc_vport.h"
44/*
45 * Define macro to log: Mailbox command x%x cannot issue Data
46 * This allows multiple uses of lpfc_msgBlk0311
47 * w/o perturbing log msg utility.
48 */
49#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
50 lpfc_printf_log(phba, \
51 KERN_INFO, \
52 LOG_MBOX | LOG_SLI, \
53 "(%d):0311 Mailbox command x%x cannot " \
54 "issue Data: x%x x%x x%x\n", \
55 pmbox->vport ? pmbox->vport->vpi : 0, \
56 pmbox->mb.mbxCommand, \
57 phba->pport->port_state, \
58 psli->sli_flag, \
59 flag)
60
61 47
62/* There are only four IOCB completion types. */ 48/* There are only four IOCB completion types. */
63typedef enum _lpfc_iocb_type { 49typedef enum _lpfc_iocb_type {
@@ -67,6 +53,350 @@ typedef enum _lpfc_iocb_type {
67 LPFC_ABORT_IOCB 53 LPFC_ABORT_IOCB
68} lpfc_iocb_type; 54} lpfc_iocb_type;
69 55
56
57/* Provide function prototypes local to this module. */
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *);
62
63static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{
66 return &iocbq->iocb;
67}
68
69/**
70 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
71 * @q: The Work Queue to operate on.
72 * @wqe: The work Queue Entry to put on the Work queue.
73 *
74 * This routine will copy the contents of @wqe to the next available entry on
75 * the @q. This function will then ring the Work Queue Doorbell to signal the
76 * HBA to start processing the Work Queue Entry. This function returns 0 if
77 * successful. If no entries are available on @q then this function will return
78 * -ENOMEM.
79 * The caller is expected to hold the hbalock when calling this routine.
80 **/
81static uint32_t
82lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
83{
84 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
85 struct lpfc_register doorbell;
86 uint32_t host_index;
87
88 /* If the host has not yet processed the next entry then we are done */
89 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
90 return -ENOMEM;
91 /* set consumption flag every once in a while */
92 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
93 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
94
95 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
96
97 /* Update the host index before invoking device */
98 host_index = q->host_index;
99 q->host_index = ((q->host_index + 1) % q->entry_count);
100
101 /* Ring Doorbell */
102 doorbell.word0 = 0;
103 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
104 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
105 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
106 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
107 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
108
109 return 0;
110}
111
112/**
113 * lpfc_sli4_wq_release - Updates internal hba index for WQ
114 * @q: The Work Queue to operate on.
115 * @index: The index to advance the hba index to.
116 *
117 * This routine will update the HBA index of a queue to reflect consumption of
118 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
119 * an entry the host calls this function to update the queue's internal
120 * pointers. This routine returns the number of entries that were consumed by
121 * the HBA.
122 **/
123static uint32_t
124lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
125{
126 uint32_t released = 0;
127
128 if (q->hba_index == index)
129 return 0;
130 do {
131 q->hba_index = ((q->hba_index + 1) % q->entry_count);
132 released++;
133 } while (q->hba_index != index);
134 return released;
135}
136
137/**
138 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
139 * @q: The Mailbox Queue to operate on.
140 * @wqe: The Mailbox Queue Entry to put on the Work queue.
141 *
142 * This routine will copy the contents of @mqe to the next available entry on
143 * the @q. This function will then ring the Work Queue Doorbell to signal the
144 * HBA to start processing the Work Queue Entry. This function returns 0 if
145 * successful. If no entries are available on @q then this function will return
146 * -ENOMEM.
147 * The caller is expected to hold the hbalock when calling this routine.
148 **/
149static uint32_t
150lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
151{
152 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
153 struct lpfc_register doorbell;
154 uint32_t host_index;
155
156 /* If the host has not yet processed the next entry then we are done */
157 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
158 return -ENOMEM;
159 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
160 /* Save off the mailbox pointer for completion */
161 q->phba->mbox = (MAILBOX_t *)temp_mqe;
162
163 /* Update the host index before invoking device */
164 host_index = q->host_index;
165 q->host_index = ((q->host_index + 1) % q->entry_count);
166
167 /* Ring Doorbell */
168 doorbell.word0 = 0;
169 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
170 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
171 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
172 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
173 return 0;
174}
175
176/**
177 * lpfc_sli4_mq_release - Updates internal hba index for MQ
178 * @q: The Mailbox Queue to operate on.
179 *
180 * This routine will update the HBA index of a queue to reflect consumption of
181 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
182 * an entry the host calls this function to update the queue's internal
183 * pointers. This routine returns the number of entries that were consumed by
184 * the HBA.
185 **/
186static uint32_t
187lpfc_sli4_mq_release(struct lpfc_queue *q)
188{
189 /* Clear the mailbox pointer for completion */
190 q->phba->mbox = NULL;
191 q->hba_index = ((q->hba_index + 1) % q->entry_count);
192 return 1;
193}
194
195/**
196 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
197 * @q: The Event Queue to get the first valid EQE from
198 *
199 * This routine will get the first valid Event Queue Entry from @q, update
200 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
201 * the Queue (no more work to do), or the Queue is full of EQEs that have been
202 * processed, but not popped back to the HBA then this routine will return NULL.
203 **/
204static struct lpfc_eqe *
205lpfc_sli4_eq_get(struct lpfc_queue *q)
206{
207 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
208
209 /* If the next EQE is not valid then we are done */
210 if (!bf_get(lpfc_eqe_valid, eqe))
211 return NULL;
212 /* If the host has not yet processed the next entry then we are done */
213 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
214 return NULL;
215
216 q->hba_index = ((q->hba_index + 1) % q->entry_count);
217 return eqe;
218}
219
220/**
221 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
222 * @q: The Event Queue that the host has completed processing for.
223 * @arm: Indicates whether the host wants to arms this CQ.
224 *
225 * This routine will mark all Event Queue Entries on @q, from the last
226 * known completed entry to the last entry that was processed, as completed
227 * by clearing the valid bit for each completion queue entry. Then it will
228 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
229 * The internal host index in the @q will be updated by this routine to indicate
230 * that the host has finished processing the entries. The @arm parameter
231 * indicates that the queue should be rearmed when ringing the doorbell.
232 *
233 * This function will return the number of EQEs that were popped.
234 **/
235uint32_t
236lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
237{
238 uint32_t released = 0;
239 struct lpfc_eqe *temp_eqe;
240 struct lpfc_register doorbell;
241
242 /* while there are valid entries */
243 while (q->hba_index != q->host_index) {
244 temp_eqe = q->qe[q->host_index].eqe;
245 bf_set(lpfc_eqe_valid, temp_eqe, 0);
246 released++;
247 q->host_index = ((q->host_index + 1) % q->entry_count);
248 }
249 if (unlikely(released == 0 && !arm))
250 return 0;
251
252 /* ring doorbell for number popped */
253 doorbell.word0 = 0;
254 if (arm) {
255 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
256 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
257 }
258 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
262 return released;
263}
264
265/**
266 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
267 * @q: The Completion Queue to get the first valid CQE from
268 *
269 * This routine will get the first valid Completion Queue Entry from @q, update
270 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
271 * the Queue (no more work to do), or the Queue is full of CQEs that have been
272 * processed, but not popped back to the HBA then this routine will return NULL.
273 **/
274static struct lpfc_cqe *
275lpfc_sli4_cq_get(struct lpfc_queue *q)
276{
277 struct lpfc_cqe *cqe;
278
279 /* If the next CQE is not valid then we are done */
280 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
281 return NULL;
282 /* If the host has not yet processed the next entry then we are done */
283 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
284 return NULL;
285
286 cqe = q->qe[q->hba_index].cqe;
287 q->hba_index = ((q->hba_index + 1) % q->entry_count);
288 return cqe;
289}
290
291/**
292 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
293 * @q: The Completion Queue that the host has completed processing for.
294 * @arm: Indicates whether the host wants to arms this CQ.
295 *
296 * This routine will mark all Completion queue entries on @q, from the last
297 * known completed entry to the last entry that was processed, as completed
298 * by clearing the valid bit for each completion queue entry. Then it will
299 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
300 * The internal host index in the @q will be updated by this routine to indicate
301 * that the host has finished processing the entries. The @arm parameter
302 * indicates that the queue should be rearmed when ringing the doorbell.
303 *
304 * This function will return the number of CQEs that were released.
305 **/
306uint32_t
307lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
308{
309 uint32_t released = 0;
310 struct lpfc_cqe *temp_qe;
311 struct lpfc_register doorbell;
312
313 /* while there are valid entries */
314 while (q->hba_index != q->host_index) {
315 temp_qe = q->qe[q->host_index].cqe;
316 bf_set(lpfc_cqe_valid, temp_qe, 0);
317 released++;
318 q->host_index = ((q->host_index + 1) % q->entry_count);
319 }
320 if (unlikely(released == 0 && !arm))
321 return 0;
322
323 /* ring doorbell for number popped */
324 doorbell.word0 = 0;
325 if (arm)
326 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
327 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
328 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
329 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
330 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
331 return released;
332}
333
334/**
335 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
336 * @q: The Header Receive Queue to operate on.
337 * @wqe: The Receive Queue Entry to put on the Receive queue.
338 *
339 * This routine will copy the contents of @wqe to the next available entry on
340 * the @q. This function will then ring the Receive Queue Doorbell to signal the
341 * HBA to start processing the Receive Queue Entry. This function returns the
342 * index that the rqe was copied to if successful. If no entries are available
343 * on @q then this function will return -ENOMEM.
344 * The caller is expected to hold the hbalock when calling this routine.
345 **/
346static int
347lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
348 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
349{
350 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
351 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
352 struct lpfc_register doorbell;
353 int put_index = hq->host_index;
354
355 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
356 return -EINVAL;
357 if (hq->host_index != dq->host_index)
358 return -EINVAL;
359 /* If the host has not yet processed the next entry then we are done */
360 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
361 return -EBUSY;
362 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
363 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
364
365 /* Update the host index to point to the next slot */
366 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
367 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
368
369 /* Ring The Header Receive Queue Doorbell */
370 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
371 doorbell.word0 = 0;
372 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
373 LPFC_RQ_POST_BATCH);
374 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
375 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
376 }
377 return put_index;
378}
379
380/**
381 * lpfc_sli4_rq_release - Updates internal hba index for RQ
382 * @q: The Header Receive Queue to operate on.
383 *
384 * This routine will update the HBA index of a queue to reflect consumption of
385 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
386 * consumed an entry the host calls this function to update the queue's
387 * internal pointers. This routine returns the number of entries that were
388 * consumed by the HBA.
389 **/
390static uint32_t
391lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
392{
393 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
394 return 0;
395 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
396 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
397 return 1;
398}
399
70/** 400/**
71 * lpfc_cmd_iocb - Get next command iocb entry in the ring 401 * lpfc_cmd_iocb - Get next command iocb entry in the ring
72 * @phba: Pointer to HBA context object. 402 * @phba: Pointer to HBA context object.
@@ -121,6 +451,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
121} 451}
122 452
123/** 453/**
454 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
455 * @phba: Pointer to HBA context object.
456 * @xritag: XRI value.
457 *
458 * This function clears the sglq pointer from the array of acive
459 * sglq's. The xritag that is passed in is used to index into the
460 * array. Before the xritag can be used it needs to be adjusted
461 * by subtracting the xribase.
462 *
463 * Returns sglq ponter = success, NULL = Failure.
464 **/
465static struct lpfc_sglq *
466__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
467{
468 uint16_t adj_xri;
469 struct lpfc_sglq *sglq;
470 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
471 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
472 return NULL;
473 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
474 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
475 return sglq;
476}
477
478/**
479 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
480 * @phba: Pointer to HBA context object.
481 * @xritag: XRI value.
482 *
483 * This function returns the sglq pointer from the array of acive
484 * sglq's. The xritag that is passed in is used to index into the
485 * array. Before the xritag can be used it needs to be adjusted
486 * by subtracting the xribase.
487 *
488 * Returns sglq ponter = success, NULL = Failure.
489 **/
490static struct lpfc_sglq *
491__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
492{
493 uint16_t adj_xri;
494 struct lpfc_sglq *sglq;
495 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
496 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
497 return NULL;
498 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
499 return sglq;
500}
501
502/**
503 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
504 * @phba: Pointer to HBA context object.
505 *
506 * This function is called with hbalock held. This function
507 * Gets a new driver sglq object from the sglq list. If the
508 * list is not empty then it is successful, it returns pointer to the newly
509 * allocated sglq object else it returns NULL.
510 **/
511static struct lpfc_sglq *
512__lpfc_sli_get_sglq(struct lpfc_hba *phba)
513{
514 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
515 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
520 return sglq;
521}
522
523/**
124 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 524 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
125 * @phba: Pointer to HBA context object. 525 * @phba: Pointer to HBA context object.
126 * 526 *
@@ -142,7 +542,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
142} 542}
143 543
144/** 544/**
145 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 545 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
146 * @phba: Pointer to HBA context object. 546 * @phba: Pointer to HBA context object.
147 * @iocbq: Pointer to driver iocb object. 547 * @iocbq: Pointer to driver iocb object.
148 * 548 *
@@ -150,9 +550,62 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
150 * iocb object to the iocb pool. The iotag in the iocb object 550 * iocb object to the iocb pool. The iotag in the iocb object
151 * does not change for each use of the iocb object. This function 551 * does not change for each use of the iocb object. This function
152 * clears all other fields of the iocb object when it is freed. 552 * clears all other fields of the iocb object when it is freed.
553 * The sqlq structure that holds the xritag and phys and virtual
554 * mappings for the scatter gather list is retrieved from the
555 * active array of sglq. The get of the sglq pointer also clears
556 * the entry in the array. If the status of the IO indiactes that
557 * this IO was aborted then the sglq entry it put on the
558 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
559 * IO has good status or fails for any other reason then the sglq
560 * entry is added to the free list (lpfc_sgl_list).
153 **/ 561 **/
154static void 562static void
155__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 563__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
564{
565 struct lpfc_sglq *sglq;
566 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
567 unsigned long iflag;
568
569 if (iocbq->sli4_xritag == NO_XRI)
570 sglq = NULL;
571 else
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
576 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag);
580 list_add(&sglq->list,
581 &phba->sli4_hba.lpfc_abts_els_sgl_list);
582 spin_unlock_irqrestore(
583 &phba->sli4_hba.abts_sgl_list_lock, iflag);
584 } else
585 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
586 }
587
588
589 /*
590 * Clean all volatile data fields, preserve iotag and node struct.
591 */
592 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
593 iocbq->sli4_xritag = NO_XRI;
594 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
595}
596
597/**
598 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
599 * @phba: Pointer to HBA context object.
600 * @iocbq: Pointer to driver iocb object.
601 *
602 * This function is called with hbalock held to release driver
603 * iocb object to the iocb pool. The iotag in the iocb object
604 * does not change for each use of the iocb object. This function
605 * clears all other fields of the iocb object when it is freed.
606 **/
607static void
608__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
156{ 609{
157 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 610 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
158 611
@@ -160,10 +613,27 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
160 * Clean all volatile data fields, preserve iotag and node struct. 613 * Clean all volatile data fields, preserve iotag and node struct.
161 */ 614 */
162 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 615 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
616 iocbq->sli4_xritag = NO_XRI;
163 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 617 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
164} 618}
165 619
166/** 620/**
621 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
622 * @phba: Pointer to HBA context object.
623 * @iocbq: Pointer to driver iocb object.
624 *
625 * This function is called with hbalock held to release driver
626 * iocb object to the iocb pool. The iotag in the iocb object
627 * does not change for each use of the iocb object. This function
628 * clears all other fields of the iocb object when it is freed.
629 **/
630static void
631__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
632{
633 phba->__lpfc_sli_release_iocbq(phba, iocbq);
634}
635
636/**
167 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 637 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
168 * @phba: Pointer to HBA context object. 638 * @phba: Pointer to HBA context object.
169 * @iocbq: Pointer to driver iocb object. 639 * @iocbq: Pointer to driver iocb object.
@@ -281,6 +751,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
281 case CMD_GEN_REQUEST64_CR: 751 case CMD_GEN_REQUEST64_CR:
282 case CMD_GEN_REQUEST64_CX: 752 case CMD_GEN_REQUEST64_CX:
283 case CMD_XMIT_ELS_RSP64_CX: 753 case CMD_XMIT_ELS_RSP64_CX:
754 case DSSCMD_IWRITE64_CR:
755 case DSSCMD_IWRITE64_CX:
756 case DSSCMD_IREAD64_CR:
757 case DSSCMD_IREAD64_CX:
758 case DSSCMD_INVALIDATE_DEK:
759 case DSSCMD_SET_KEK:
760 case DSSCMD_GET_KEK_ID:
761 case DSSCMD_GEN_XFER:
284 type = LPFC_SOL_IOCB; 762 type = LPFC_SOL_IOCB;
285 break; 763 break;
286 case CMD_ABORT_XRI_CN: 764 case CMD_ABORT_XRI_CN:
@@ -348,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
348 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 826 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
349 if (!pmb) 827 if (!pmb)
350 return -ENOMEM; 828 return -ENOMEM;
351 pmbox = &pmb->mb; 829 pmbox = &pmb->u.mb;
352 phba->link_state = LPFC_INIT_MBX_CMDS; 830 phba->link_state = LPFC_INIT_MBX_CMDS;
353 for (i = 0; i < psli->num_rings; i++) { 831 for (i = 0; i < psli->num_rings; i++) {
354 lpfc_config_ring(phba, i, pmb); 832 lpfc_config_ring(phba, i, pmb);
@@ -779,8 +1257,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
779 phba->hbqs[i].buffer_count = 0; 1257 phba->hbqs[i].buffer_count = 0;
780 } 1258 }
781 /* Return all HBQ buffer that are in-fly */ 1259 /* Return all HBQ buffer that are in-fly */
782 list_for_each_entry_safe(dmabuf, next_dmabuf, 1260 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
783 &phba->hbqbuf_in_list, list) { 1261 list) {
784 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1262 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
785 list_del(&hbq_buf->dbuf.list); 1263 list_del(&hbq_buf->dbuf.list);
786 if (hbq_buf->tag == -1) { 1264 if (hbq_buf->tag == -1) {
@@ -814,10 +1292,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
814 * pointer to the hbq entry if it successfully post the buffer 1292 * pointer to the hbq entry if it successfully post the buffer
815 * else it will return NULL. 1293 * else it will return NULL.
816 **/ 1294 **/
817static struct lpfc_hbq_entry * 1295static int
818lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1296lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
819 struct hbq_dmabuf *hbq_buf) 1297 struct hbq_dmabuf *hbq_buf)
820{ 1298{
1299 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1300}
1301
1302/**
1303 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1304 * @phba: Pointer to HBA context object.
1305 * @hbqno: HBQ number.
1306 * @hbq_buf: Pointer to HBQ buffer.
1307 *
1308 * This function is called with the hbalock held to post a hbq buffer to the
1309 * firmware. If the function finds an empty slot in the HBQ, it will post the
1310 * buffer and place it on the hbq_buffer_list. The function will return zero if
1311 * it successfully post the buffer else it will return an error.
1312 **/
1313static int
1314lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1315 struct hbq_dmabuf *hbq_buf)
1316{
821 struct lpfc_hbq_entry *hbqe; 1317 struct lpfc_hbq_entry *hbqe;
822 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1318 dma_addr_t physaddr = hbq_buf->dbuf.phys;
823 1319
@@ -838,8 +1334,40 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
838 /* flush */ 1334 /* flush */
839 readl(phba->hbq_put + hbqno); 1335 readl(phba->hbq_put + hbqno);
840 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1336 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
841 } 1337 return 0;
842 return hbqe; 1338 } else
1339 return -ENOMEM;
1340}
1341
1342/**
1343 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1344 * @phba: Pointer to HBA context object.
1345 * @hbqno: HBQ number.
1346 * @hbq_buf: Pointer to HBQ buffer.
1347 *
1348 * This function is called with the hbalock held to post an RQE to the SLI4
1349 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1350 * the hbq_buffer_list and return zero, otherwise it will return an error.
1351 **/
1352static int
1353lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1354 struct hbq_dmabuf *hbq_buf)
1355{
1356 int rc;
1357 struct lpfc_rqe hrqe;
1358 struct lpfc_rqe drqe;
1359
1360 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1361 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1362 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1363 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1364 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1365 &hrqe, &drqe);
1366 if (rc < 0)
1367 return rc;
1368 hbq_buf->tag = rc;
1369 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1370 return 0;
843} 1371}
844 1372
845/* HBQ for ELS and CT traffic. */ 1373/* HBQ for ELS and CT traffic. */
@@ -914,7 +1442,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
914 dbuf.list); 1442 dbuf.list);
915 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1443 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
916 (hbqno << 16)); 1444 (hbqno << 16));
917 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1445 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
918 phba->hbqs[hbqno].buffer_count++; 1446 phba->hbqs[hbqno].buffer_count++;
919 posted++; 1447 posted++;
920 } else 1448 } else
@@ -965,6 +1493,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
965} 1493}
966 1494
967/** 1495/**
1496 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1497 * @phba: Pointer to HBA context object.
1498 * @hbqno: HBQ number.
1499 *
1500 * This function removes the first hbq buffer on an hbq list and returns a
1501 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1502 **/
1503static struct hbq_dmabuf *
1504lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1505{
1506 struct lpfc_dmabuf *d_buf;
1507
1508 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1509 if (!d_buf)
1510 return NULL;
1511 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1512}
1513
1514/**
968 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1515 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
969 * @phba: Pointer to HBA context object. 1516 * @phba: Pointer to HBA context object.
970 * @tag: Tag of the hbq buffer. 1517 * @tag: Tag of the hbq buffer.
@@ -985,12 +1532,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
985 if (hbqno >= LPFC_MAX_HBQS) 1532 if (hbqno >= LPFC_MAX_HBQS)
986 return NULL; 1533 return NULL;
987 1534
1535 spin_lock_irq(&phba->hbalock);
988 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1536 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
989 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1537 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
990 if (hbq_buf->tag == tag) { 1538 if (hbq_buf->tag == tag) {
1539 spin_unlock_irq(&phba->hbalock);
991 return hbq_buf; 1540 return hbq_buf;
992 } 1541 }
993 } 1542 }
1543 spin_unlock_irq(&phba->hbalock);
994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
995 "1803 Bad hbq tag. Data: x%x x%x\n", 1545 "1803 Bad hbq tag. Data: x%x x%x\n",
996 tag, phba->hbqs[tag >> 16].buffer_count); 1546 tag, phba->hbqs[tag >> 16].buffer_count);
@@ -1013,9 +1563,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1013 1563
1014 if (hbq_buffer) { 1564 if (hbq_buffer) {
1015 hbqno = hbq_buffer->tag >> 16; 1565 hbqno = hbq_buffer->tag >> 16;
1016 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1566 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1017 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1567 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1018 }
1019 } 1568 }
1020} 1569}
1021 1570
@@ -1086,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1086 case MBX_HEARTBEAT: 1635 case MBX_HEARTBEAT:
1087 case MBX_PORT_CAPABILITIES: 1636 case MBX_PORT_CAPABILITIES:
1088 case MBX_PORT_IOV_CONTROL: 1637 case MBX_PORT_IOV_CONTROL:
1638 case MBX_SLI4_CONFIG:
1639 case MBX_SLI4_REQ_FTRS:
1640 case MBX_REG_FCFI:
1641 case MBX_UNREG_FCFI:
1642 case MBX_REG_VFI:
1643 case MBX_UNREG_VFI:
1644 case MBX_INIT_VPI:
1645 case MBX_INIT_VFI:
1646 case MBX_RESUME_RPI:
1089 ret = mbxCommand; 1647 ret = mbxCommand;
1090 break; 1648 break;
1091 default: 1649 default:
@@ -1106,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1106 * will wake up thread waiting on the wait queue pointed by context1 1664 * will wake up thread waiting on the wait queue pointed by context1
1107 * of the mailbox. 1665 * of the mailbox.
1108 **/ 1666 **/
1109static void 1667void
1110lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1668lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1111{ 1669{
1112 wait_queue_head_t *pdone_q; 1670 wait_queue_head_t *pdone_q;
@@ -1140,7 +1698,7 @@ void
1140lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1698lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1141{ 1699{
1142 struct lpfc_dmabuf *mp; 1700 struct lpfc_dmabuf *mp;
1143 uint16_t rpi; 1701 uint16_t rpi, vpi;
1144 int rc; 1702 int rc;
1145 1703
1146 mp = (struct lpfc_dmabuf *) (pmb->context1); 1704 mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1150,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1150 kfree(mp); 1708 kfree(mp);
1151 } 1709 }
1152 1710
1711 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1712 (phba->sli_rev == LPFC_SLI_REV4))
1713 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1714
1153 /* 1715 /*
1154 * If a REG_LOGIN succeeded after node is destroyed or node 1716 * If a REG_LOGIN succeeded after node is destroyed or node
1155 * is in re-discovery driver need to cleanup the RPI. 1717 * is in re-discovery driver need to cleanup the RPI.
1156 */ 1718 */
1157 if (!(phba->pport->load_flag & FC_UNLOADING) && 1719 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1158 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 1720 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1159 !pmb->mb.mbxStatus) { 1721 !pmb->u.mb.mbxStatus) {
1160 1722 rpi = pmb->u.mb.un.varWords[0];
1161 rpi = pmb->mb.un.varWords[0]; 1723 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1162 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 1724 lpfc_unreg_login(phba, vpi, rpi, pmb);
1163 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1725 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1164 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1726 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1165 if (rc != MBX_NOT_FINISHED) 1727 if (rc != MBX_NOT_FINISHED)
1166 return; 1728 return;
1167 } 1729 }
1168 1730
1169 mempool_free(pmb, phba->mbox_mem_pool); 1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1170 return; 1732 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else
1734 mempool_free(pmb, phba->mbox_mem_pool);
1171} 1735}
1172 1736
1173/** 1737/**
@@ -1204,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1204 if (pmb == NULL) 1768 if (pmb == NULL)
1205 break; 1769 break;
1206 1770
1207 pmbox = &pmb->mb; 1771 pmbox = &pmb->u.mb;
1208 1772
1209 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 1773 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1210 if (pmb->vport) { 1774 if (pmb->vport) {
@@ -1233,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1233 /* Unknow mailbox command compl */ 1797 /* Unknow mailbox command compl */
1234 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1235 "(%d):0323 Unknown Mailbox command " 1799 "(%d):0323 Unknown Mailbox command "
1236 "%x Cmpl\n", 1800 "x%x (x%x) Cmpl\n",
1237 pmb->vport ? pmb->vport->vpi : 0, 1801 pmb->vport ? pmb->vport->vpi : 0,
1238 pmbox->mbxCommand); 1802 pmbox->mbxCommand,
1803 lpfc_sli4_mbox_opcode_get(phba, pmb));
1239 phba->link_state = LPFC_HBA_ERROR; 1804 phba->link_state = LPFC_HBA_ERROR;
1240 phba->work_hs = HS_FFER3; 1805 phba->work_hs = HS_FFER3;
1241 lpfc_handle_eratt(phba); 1806 lpfc_handle_eratt(phba);
@@ -1250,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1250 LOG_MBOX | LOG_SLI, 1815 LOG_MBOX | LOG_SLI,
1251 "(%d):0305 Mbox cmd cmpl " 1816 "(%d):0305 Mbox cmd cmpl "
1252 "error - RETRYing Data: x%x " 1817 "error - RETRYing Data: x%x "
1253 "x%x x%x x%x\n", 1818 "(x%x) x%x x%x x%x\n",
1254 pmb->vport ? pmb->vport->vpi :0, 1819 pmb->vport ? pmb->vport->vpi :0,
1255 pmbox->mbxCommand, 1820 pmbox->mbxCommand,
1821 lpfc_sli4_mbox_opcode_get(phba,
1822 pmb),
1256 pmbox->mbxStatus, 1823 pmbox->mbxStatus,
1257 pmbox->un.varWords[0], 1824 pmbox->un.varWords[0],
1258 pmb->vport->port_state); 1825 pmb->vport->port_state);
1259 pmbox->mbxStatus = 0; 1826 pmbox->mbxStatus = 0;
1260 pmbox->mbxOwner = OWN_HOST; 1827 pmbox->mbxOwner = OWN_HOST;
1261 spin_lock_irq(&phba->hbalock);
1262 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1263 spin_unlock_irq(&phba->hbalock);
1264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1828 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1265 if (rc == MBX_SUCCESS) 1829 if (rc != MBX_NOT_FINISHED)
1266 continue; 1830 continue;
1267 } 1831 }
1268 } 1832 }
1269 1833
1270 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 1834 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1271 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 1835 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1272 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 1836 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1273 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 1837 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1274 pmb->vport ? pmb->vport->vpi : 0, 1838 pmb->vport ? pmb->vport->vpi : 0,
1275 pmbox->mbxCommand, 1839 pmbox->mbxCommand,
1840 lpfc_sli4_mbox_opcode_get(phba, pmb),
1276 pmb->mbox_cmpl, 1841 pmb->mbox_cmpl,
1277 *((uint32_t *) pmbox), 1842 *((uint32_t *) pmbox),
1278 pmbox->un.varWords[0], 1843 pmbox->un.varWords[0],
@@ -1317,6 +1882,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
1317 return &hbq_entry->dbuf; 1882 return &hbq_entry->dbuf;
1318} 1883}
1319 1884
1885/**
1886 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
1887 * @phba: Pointer to HBA context object.
1888 * @pring: Pointer to driver SLI ring object.
1889 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
1890 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
1891 * @fch_type: the type for the first frame of the sequence.
1892 *
1893 * This function is called with no lock held. This function uses the r_ctl and
1894 * type of the received sequence to find the correct callback function to call
1895 * to process the sequence.
1896 **/
1897static int
1898lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1899 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1900 uint32_t fch_type)
1901{
1902 int i;
1903
1904 /* unSolicited Responses */
1905 if (pring->prt[0].profile) {
1906 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1907 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1908 saveq);
1909 return 1;
1910 }
1911 /* We must search, based on rctl / type
1912 for the right routine */
1913 for (i = 0; i < pring->num_mask; i++) {
1914 if ((pring->prt[i].rctl == fch_r_ctl) &&
1915 (pring->prt[i].type == fch_type)) {
1916 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1917 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1918 (phba, pring, saveq);
1919 return 1;
1920 }
1921 }
1922 return 0;
1923}
1320 1924
1321/** 1925/**
1322 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 1926 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
@@ -1339,7 +1943,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1339 IOCB_t * irsp; 1943 IOCB_t * irsp;
1340 WORD5 * w5p; 1944 WORD5 * w5p;
1341 uint32_t Rctl, Type; 1945 uint32_t Rctl, Type;
1342 uint32_t match, i; 1946 uint32_t match;
1343 struct lpfc_iocbq *iocbq; 1947 struct lpfc_iocbq *iocbq;
1344 struct lpfc_dmabuf *dmzbuf; 1948 struct lpfc_dmabuf *dmzbuf;
1345 1949
@@ -1482,35 +2086,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1482 } 2086 }
1483 } 2087 }
1484 2088
1485 /* unSolicited Responses */ 2089 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
1486 if (pring->prt[0].profile) {
1487 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1488 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1489 saveq);
1490 match = 1;
1491 } else {
1492 /* We must search, based on rctl / type
1493 for the right routine */
1494 for (i = 0; i < pring->num_mask; i++) {
1495 if ((pring->prt[i].rctl == Rctl)
1496 && (pring->prt[i].type == Type)) {
1497 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1498 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1499 (phba, pring, saveq);
1500 match = 1;
1501 break;
1502 }
1503 }
1504 }
1505 if (match == 0) {
1506 /* Unexpected Rctl / Type received */
1507 /* Ring <ringno> handler: unexpected
1508 Rctl <Rctl> Type <Type> received */
1509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2090 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1510 "0313 Ring %d handler: unexpected Rctl x%x " 2091 "0313 Ring %d handler: unexpected Rctl x%x "
1511 "Type x%x received\n", 2092 "Type x%x received\n",
1512 pring->ringno, Rctl, Type); 2093 pring->ringno, Rctl, Type);
1513 } 2094
1514 return 1; 2095 return 1;
1515} 2096}
1516 2097
@@ -1552,6 +2133,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1552} 2133}
1553 2134
1554/** 2135/**
2136 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2137 * @phba: Pointer to HBA context object.
2138 * @pring: Pointer to driver SLI ring object.
2139 * @iotag: IOCB tag.
2140 *
2141 * This function looks up the iocb_lookup table to get the command iocb
2142 * corresponding to the given iotag. This function is called with the
2143 * hbalock held.
2144 * This function returns the command iocb object if it finds the command
2145 * iocb else returns NULL.
2146 **/
2147static struct lpfc_iocbq *
2148lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2149 struct lpfc_sli_ring *pring, uint16_t iotag)
2150{
2151 struct lpfc_iocbq *cmd_iocb;
2152
2153 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2154 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2155 list_del_init(&cmd_iocb->list);
2156 pring->txcmplq_cnt--;
2157 return cmd_iocb;
2158 }
2159
2160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2161 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2162 iotag, phba->sli.last_iotag);
2163 return NULL;
2164}
2165
2166/**
1555 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2167 * lpfc_sli_process_sol_iocb - process solicited iocb completion
1556 * @phba: Pointer to HBA context object. 2168 * @phba: Pointer to HBA context object.
1557 * @pring: Pointer to driver SLI ring object. 2169 * @pring: Pointer to driver SLI ring object.
@@ -1954,7 +2566,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1954 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2566 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1955 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2567 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1956 spin_unlock_irqrestore(&phba->hbalock, iflag); 2568 spin_unlock_irqrestore(&phba->hbalock, iflag);
1957 lpfc_rampdown_queue_depth(phba); 2569 phba->lpfc_rampdown_queue_depth(phba);
1958 spin_lock_irqsave(&phba->hbalock, iflag); 2570 spin_lock_irqsave(&phba->hbalock, iflag);
1959 } 2571 }
1960 2572
@@ -2068,39 +2680,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2068} 2680}
2069 2681
2070/** 2682/**
2071 * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings 2683 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
2684 * @phba: Pointer to HBA context object.
2685 * @pring: Pointer to driver SLI ring object.
2686 * @rspiocbp: Pointer to driver response IOCB object.
2687 *
2688 * This function is called from the worker thread when there is a slow-path
2689 * response IOCB to process. This function chains all the response iocbs until
2690 * seeing the iocb with the LE bit set. The function will call
2691 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
2692 * completion of a command iocb. The function will call the
2693 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
2694 * The function frees the resources or calls the completion handler if this
2695 * iocb is an abort completion. The function returns NULL when the response
2696 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
2697 * this function shall chain the iocb on to the iocb_continueq and return the
2698 * response iocb passed in.
2699 **/
2700static struct lpfc_iocbq *
2701lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2702 struct lpfc_iocbq *rspiocbp)
2703{
2704 struct lpfc_iocbq *saveq;
2705 struct lpfc_iocbq *cmdiocbp;
2706 struct lpfc_iocbq *next_iocb;
2707 IOCB_t *irsp = NULL;
2708 uint32_t free_saveq;
2709 uint8_t iocb_cmd_type;
2710 lpfc_iocb_type type;
2711 unsigned long iflag;
2712 int rc;
2713
2714 spin_lock_irqsave(&phba->hbalock, iflag);
2715 /* First add the response iocb to the countinueq list */
2716 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2717 pring->iocb_continueq_cnt++;
2718
2719 /* Now, determine whetehr the list is completed for processing */
2720 irsp = &rspiocbp->iocb;
2721 if (irsp->ulpLe) {
2722 /*
2723 * By default, the driver expects to free all resources
2724 * associated with this iocb completion.
2725 */
2726 free_saveq = 1;
2727 saveq = list_get_first(&pring->iocb_continueq,
2728 struct lpfc_iocbq, list);
2729 irsp = &(saveq->iocb);
2730 list_del_init(&pring->iocb_continueq);
2731 pring->iocb_continueq_cnt = 0;
2732
2733 pring->stats.iocb_rsp++;
2734
2735 /*
2736 * If resource errors reported from HBA, reduce
2737 * queuedepths of the SCSI device.
2738 */
2739 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2740 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2741 spin_unlock_irqrestore(&phba->hbalock, iflag);
2742 phba->lpfc_rampdown_queue_depth(phba);
2743 spin_lock_irqsave(&phba->hbalock, iflag);
2744 }
2745
2746 if (irsp->ulpStatus) {
2747 /* Rsp ring <ringno> error: IOCB */
2748 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2749 "0328 Rsp Ring %d error: "
2750 "IOCB Data: "
2751 "x%x x%x x%x x%x "
2752 "x%x x%x x%x x%x "
2753 "x%x x%x x%x x%x "
2754 "x%x x%x x%x x%x\n",
2755 pring->ringno,
2756 irsp->un.ulpWord[0],
2757 irsp->un.ulpWord[1],
2758 irsp->un.ulpWord[2],
2759 irsp->un.ulpWord[3],
2760 irsp->un.ulpWord[4],
2761 irsp->un.ulpWord[5],
2762 *(((uint32_t *) irsp) + 6),
2763 *(((uint32_t *) irsp) + 7),
2764 *(((uint32_t *) irsp) + 8),
2765 *(((uint32_t *) irsp) + 9),
2766 *(((uint32_t *) irsp) + 10),
2767 *(((uint32_t *) irsp) + 11),
2768 *(((uint32_t *) irsp) + 12),
2769 *(((uint32_t *) irsp) + 13),
2770 *(((uint32_t *) irsp) + 14),
2771 *(((uint32_t *) irsp) + 15));
2772 }
2773
2774 /*
2775 * Fetch the IOCB command type and call the correct completion
2776 * routine. Solicited and Unsolicited IOCBs on the ELS ring
2777 * get freed back to the lpfc_iocb_list by the discovery
2778 * kernel thread.
2779 */
2780 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2781 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2782 switch (type) {
2783 case LPFC_SOL_IOCB:
2784 spin_unlock_irqrestore(&phba->hbalock, iflag);
2785 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2786 spin_lock_irqsave(&phba->hbalock, iflag);
2787 break;
2788
2789 case LPFC_UNSOL_IOCB:
2790 spin_unlock_irqrestore(&phba->hbalock, iflag);
2791 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2792 spin_lock_irqsave(&phba->hbalock, iflag);
2793 if (!rc)
2794 free_saveq = 0;
2795 break;
2796
2797 case LPFC_ABORT_IOCB:
2798 cmdiocbp = NULL;
2799 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2800 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2801 saveq);
2802 if (cmdiocbp) {
2803 /* Call the specified completion routine */
2804 if (cmdiocbp->iocb_cmpl) {
2805 spin_unlock_irqrestore(&phba->hbalock,
2806 iflag);
2807 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2808 saveq);
2809 spin_lock_irqsave(&phba->hbalock,
2810 iflag);
2811 } else
2812 __lpfc_sli_release_iocbq(phba,
2813 cmdiocbp);
2814 }
2815 break;
2816
2817 case LPFC_UNKNOWN_IOCB:
2818 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2819 char adaptermsg[LPFC_MAX_ADPTMSG];
2820 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2821 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2822 MAX_MSG_DATA);
2823 dev_warn(&((phba->pcidev)->dev),
2824 "lpfc%d: %s\n",
2825 phba->brd_no, adaptermsg);
2826 } else {
2827 /* Unknown IOCB command */
2828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2829 "0335 Unknown IOCB "
2830 "command Data: x%x "
2831 "x%x x%x x%x\n",
2832 irsp->ulpCommand,
2833 irsp->ulpStatus,
2834 irsp->ulpIoTag,
2835 irsp->ulpContext);
2836 }
2837 break;
2838 }
2839
2840 if (free_saveq) {
2841 list_for_each_entry_safe(rspiocbp, next_iocb,
2842 &saveq->list, list) {
2843 list_del(&rspiocbp->list);
2844 __lpfc_sli_release_iocbq(phba, rspiocbp);
2845 }
2846 __lpfc_sli_release_iocbq(phba, saveq);
2847 }
2848 rspiocbp = NULL;
2849 }
2850 spin_unlock_irqrestore(&phba->hbalock, iflag);
2851 return rspiocbp;
2852}
2853
2854/**
2855 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
2072 * @phba: Pointer to HBA context object. 2856 * @phba: Pointer to HBA context object.
2073 * @pring: Pointer to driver SLI ring object. 2857 * @pring: Pointer to driver SLI ring object.
2074 * @mask: Host attention register mask for this ring. 2858 * @mask: Host attention register mask for this ring.
2075 * 2859 *
2076 * This function is called from the worker thread when there is a ring 2860 * This routine wraps the actual slow_ring event process routine from the
2077 * event for non-fcp rings. The caller does not hold any lock . 2861 * API jump table function pointer from the lpfc_hba struct.
2078 * The function processes each response iocb in the response ring until it
2079 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2080 * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
2081 * response iocb indicates a completion of a command iocb. The function
2082 * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
2083 * iocb. The function frees the resources or calls the completion handler if
2084 * this iocb is an abort completion. The function returns 0 when the allocated
2085 * iocbs are not freed, otherwise returns 1.
2086 **/ 2862 **/
2087int 2863void
2088lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2864lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2089 struct lpfc_sli_ring *pring, uint32_t mask) 2865 struct lpfc_sli_ring *pring, uint32_t mask)
2090{ 2866{
2867 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2868}
2869
2870/**
2871 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
2872 * @phba: Pointer to HBA context object.
2873 * @pring: Pointer to driver SLI ring object.
2874 * @mask: Host attention register mask for this ring.
2875 *
2876 * This function is called from the worker thread when there is a ring event
2877 * for non-fcp rings. The caller does not hold any lock. The function will
2878 * remove each response iocb in the response ring and calls the handle
2879 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
2880 **/
2881static void
2882lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2883 struct lpfc_sli_ring *pring, uint32_t mask)
2884{
2091 struct lpfc_pgp *pgp; 2885 struct lpfc_pgp *pgp;
2092 IOCB_t *entry; 2886 IOCB_t *entry;
2093 IOCB_t *irsp = NULL; 2887 IOCB_t *irsp = NULL;
2094 struct lpfc_iocbq *rspiocbp = NULL; 2888 struct lpfc_iocbq *rspiocbp = NULL;
2095 struct lpfc_iocbq *next_iocb;
2096 struct lpfc_iocbq *cmdiocbp;
2097 struct lpfc_iocbq *saveq;
2098 uint8_t iocb_cmd_type;
2099 lpfc_iocb_type type;
2100 uint32_t status, free_saveq;
2101 uint32_t portRspPut, portRspMax; 2889 uint32_t portRspPut, portRspMax;
2102 int rc = 1;
2103 unsigned long iflag; 2890 unsigned long iflag;
2891 uint32_t status;
2104 2892
2105 pgp = &phba->port_gp[pring->ringno]; 2893 pgp = &phba->port_gp[pring->ringno];
2106 spin_lock_irqsave(&phba->hbalock, iflag); 2894 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2128,7 +2916,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2128 phba->work_hs = HS_FFER3; 2916 phba->work_hs = HS_FFER3;
2129 lpfc_handle_eratt(phba); 2917 lpfc_handle_eratt(phba);
2130 2918
2131 return 1; 2919 return;
2132 } 2920 }
2133 2921
2134 rmb(); 2922 rmb();
@@ -2173,138 +2961,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2173 2961
2174 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2962 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2175 2963
2176 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 2964 spin_unlock_irqrestore(&phba->hbalock, iflag);
2177 2965 /* Handle the response IOCB */
2178 pring->iocb_continueq_cnt++; 2966 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2179 if (irsp->ulpLe) { 2967 spin_lock_irqsave(&phba->hbalock, iflag);
2180 /*
2181 * By default, the driver expects to free all resources
2182 * associated with this iocb completion.
2183 */
2184 free_saveq = 1;
2185 saveq = list_get_first(&pring->iocb_continueq,
2186 struct lpfc_iocbq, list);
2187 irsp = &(saveq->iocb);
2188 list_del_init(&pring->iocb_continueq);
2189 pring->iocb_continueq_cnt = 0;
2190
2191 pring->stats.iocb_rsp++;
2192
2193 /*
2194 * If resource errors reported from HBA, reduce
2195 * queuedepths of the SCSI device.
2196 */
2197 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2198 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2199 spin_unlock_irqrestore(&phba->hbalock, iflag);
2200 lpfc_rampdown_queue_depth(phba);
2201 spin_lock_irqsave(&phba->hbalock, iflag);
2202 }
2203
2204 if (irsp->ulpStatus) {
2205 /* Rsp ring <ringno> error: IOCB */
2206 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2207 "0328 Rsp Ring %d error: "
2208 "IOCB Data: "
2209 "x%x x%x x%x x%x "
2210 "x%x x%x x%x x%x "
2211 "x%x x%x x%x x%x "
2212 "x%x x%x x%x x%x\n",
2213 pring->ringno,
2214 irsp->un.ulpWord[0],
2215 irsp->un.ulpWord[1],
2216 irsp->un.ulpWord[2],
2217 irsp->un.ulpWord[3],
2218 irsp->un.ulpWord[4],
2219 irsp->un.ulpWord[5],
2220 *(((uint32_t *) irsp) + 6),
2221 *(((uint32_t *) irsp) + 7),
2222 *(((uint32_t *) irsp) + 8),
2223 *(((uint32_t *) irsp) + 9),
2224 *(((uint32_t *) irsp) + 10),
2225 *(((uint32_t *) irsp) + 11),
2226 *(((uint32_t *) irsp) + 12),
2227 *(((uint32_t *) irsp) + 13),
2228 *(((uint32_t *) irsp) + 14),
2229 *(((uint32_t *) irsp) + 15));
2230 }
2231
2232 /*
2233 * Fetch the IOCB command type and call the correct
2234 * completion routine. Solicited and Unsolicited
2235 * IOCBs on the ELS ring get freed back to the
2236 * lpfc_iocb_list by the discovery kernel thread.
2237 */
2238 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2239 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2240 if (type == LPFC_SOL_IOCB) {
2241 spin_unlock_irqrestore(&phba->hbalock, iflag);
2242 rc = lpfc_sli_process_sol_iocb(phba, pring,
2243 saveq);
2244 spin_lock_irqsave(&phba->hbalock, iflag);
2245 } else if (type == LPFC_UNSOL_IOCB) {
2246 spin_unlock_irqrestore(&phba->hbalock, iflag);
2247 rc = lpfc_sli_process_unsol_iocb(phba, pring,
2248 saveq);
2249 spin_lock_irqsave(&phba->hbalock, iflag);
2250 if (!rc)
2251 free_saveq = 0;
2252 } else if (type == LPFC_ABORT_IOCB) {
2253 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
2254 ((cmdiocbp =
2255 lpfc_sli_iocbq_lookup(phba, pring,
2256 saveq)))) {
2257 /* Call the specified completion
2258 routine */
2259 if (cmdiocbp->iocb_cmpl) {
2260 spin_unlock_irqrestore(
2261 &phba->hbalock,
2262 iflag);
2263 (cmdiocbp->iocb_cmpl) (phba,
2264 cmdiocbp, saveq);
2265 spin_lock_irqsave(
2266 &phba->hbalock,
2267 iflag);
2268 } else
2269 __lpfc_sli_release_iocbq(phba,
2270 cmdiocbp);
2271 }
2272 } else if (type == LPFC_UNKNOWN_IOCB) {
2273 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2274
2275 char adaptermsg[LPFC_MAX_ADPTMSG];
2276
2277 memset(adaptermsg, 0,
2278 LPFC_MAX_ADPTMSG);
2279 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2280 MAX_MSG_DATA);
2281 dev_warn(&((phba->pcidev)->dev),
2282 "lpfc%d: %s\n",
2283 phba->brd_no, adaptermsg);
2284 } else {
2285 /* Unknown IOCB command */
2286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2287 "0335 Unknown IOCB "
2288 "command Data: x%x "
2289 "x%x x%x x%x\n",
2290 irsp->ulpCommand,
2291 irsp->ulpStatus,
2292 irsp->ulpIoTag,
2293 irsp->ulpContext);
2294 }
2295 }
2296
2297 if (free_saveq) {
2298 list_for_each_entry_safe(rspiocbp, next_iocb,
2299 &saveq->list, list) {
2300 list_del(&rspiocbp->list);
2301 __lpfc_sli_release_iocbq(phba,
2302 rspiocbp);
2303 }
2304 __lpfc_sli_release_iocbq(phba, saveq);
2305 }
2306 rspiocbp = NULL;
2307 }
2308 2968
2309 /* 2969 /*
2310 * If the port response put pointer has not been updated, sync 2970 * If the port response put pointer has not been updated, sync
@@ -2338,7 +2998,37 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2338 } 2998 }
2339 2999
2340 spin_unlock_irqrestore(&phba->hbalock, iflag); 3000 spin_unlock_irqrestore(&phba->hbalock, iflag);
2341 return rc; 3001 return;
3002}
3003
3004/**
3005 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3006 * @phba: Pointer to HBA context object.
3007 * @pring: Pointer to driver SLI ring object.
3008 * @mask: Host attention register mask for this ring.
3009 *
3010 * This function is called from the worker thread when there is a pending
3011 * ELS response iocb on the driver internal slow-path response iocb worker
3012 * queue. The caller does not hold any lock. The function will remove each
3013 * response iocb from the response worker queue and calls the handle
3014 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3015 **/
3016static void
3017lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask)
3019{
3020 struct lpfc_iocbq *irspiocbq;
3021 unsigned long iflag;
3022
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
3024 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
3027 irspiocbq, struct lpfc_iocbq, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
3031 }
2342} 3032}
2343 3033
2344/** 3034/**
@@ -2420,7 +3110,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2420} 3110}
2421 3111
2422/** 3112/**
2423 * lpfc_sli_brdready - Check for host status bits 3113 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
2424 * @phba: Pointer to HBA context object. 3114 * @phba: Pointer to HBA context object.
2425 * @mask: Bit mask to be checked. 3115 * @mask: Bit mask to be checked.
2426 * 3116 *
@@ -2432,8 +3122,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
2432 * function returns 1 when HBA fail to restart otherwise returns 3122 * function returns 1 when HBA fail to restart otherwise returns
2433 * zero. 3123 * zero.
2434 **/ 3124 **/
2435int 3125static int
2436lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3126lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
2437{ 3127{
2438 uint32_t status; 3128 uint32_t status;
2439 int i = 0; 3129 int i = 0;
@@ -2477,6 +3167,56 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
2477 return retval; 3167 return retval;
2478} 3168}
2479 3169
3170/**
3171 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3172 * @phba: Pointer to HBA context object.
3173 * @mask: Bit mask to be checked.
3174 *
3175 * This function checks the host status register to check if HBA is
3176 * ready. This function will wait in a loop for the HBA to be ready
3177 * If the HBA is not ready , the function will will reset the HBA PCI
3178 * function again. The function returns 1 when HBA fail to be ready
3179 * otherwise returns zero.
3180 **/
3181static int
3182lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3183{
3184 uint32_t status;
3185 int retval = 0;
3186
3187 /* Read the HBA Host Status Register */
3188 status = lpfc_sli4_post_status_check(phba);
3189
3190 if (status) {
3191 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3192 lpfc_sli_brdrestart(phba);
3193 status = lpfc_sli4_post_status_check(phba);
3194 }
3195
3196 /* Check to see if any errors occurred during init */
3197 if (status) {
3198 phba->link_state = LPFC_HBA_ERROR;
3199 retval = 1;
3200 } else
3201 phba->sli4_hba.intr_enable = 0;
3202
3203 return retval;
3204}
3205
3206/**
3207 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3208 * @phba: Pointer to HBA context object.
3209 * @mask: Bit mask to be checked.
3210 *
3211 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3212 * from the API jump table function pointer from the lpfc_hba struct.
3213 **/
3214int
3215lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3216{
3217 return phba->lpfc_sli_brdready(phba, mask);
3218}
3219
2480#define BARRIER_TEST_PATTERN (0xdeadbeef) 3220#define BARRIER_TEST_PATTERN (0xdeadbeef)
2481 3221
2482/** 3222/**
@@ -2532,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
2532 mdelay(1); 3272 mdelay(1);
2533 3273
2534 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3274 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
2535 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 3275 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
2536 phba->pport->stopped) 3276 phba->pport->stopped)
2537 goto restore_hc; 3277 goto restore_hc;
2538 else 3278 else
@@ -2613,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2613 return 1; 3353 return 1;
2614 } 3354 }
2615 3355
2616 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 3356 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock);
2617 3359
2618 mempool_free(pmb, phba->mbox_mem_pool); 3360 mempool_free(pmb, phba->mbox_mem_pool);
2619 3361
@@ -2636,10 +3378,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2636 } 3378 }
2637 spin_lock_irq(&phba->hbalock); 3379 spin_lock_irq(&phba->hbalock);
2638 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3380 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3381 psli->mbox_active = NULL;
2639 phba->link_flag &= ~LS_IGNORE_ERATT; 3382 phba->link_flag &= ~LS_IGNORE_ERATT;
2640 spin_unlock_irq(&phba->hbalock); 3383 spin_unlock_irq(&phba->hbalock);
2641 3384
2642 psli->mbox_active = NULL;
2643 lpfc_hba_down_post(phba); 3385 lpfc_hba_down_post(phba);
2644 phba->link_state = LPFC_HBA_ERROR; 3386 phba->link_state = LPFC_HBA_ERROR;
2645 3387
@@ -2647,7 +3389,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
2647} 3389}
2648 3390
2649/** 3391/**
2650 * lpfc_sli_brdreset - Reset the HBA 3392 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
2651 * @phba: Pointer to HBA context object. 3393 * @phba: Pointer to HBA context object.
2652 * 3394 *
2653 * This function resets the HBA by writing HC_INITFF to the control 3395 * This function resets the HBA by writing HC_INITFF to the control
@@ -2683,7 +3425,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2683 (cfg_value & 3425 (cfg_value &
2684 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3426 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2685 3427
2686 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 3428 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3429
2687 /* Now toggle INITFF bit in the Host Control Register */ 3430 /* Now toggle INITFF bit in the Host Control Register */
2688 writel(HC_INITFF, phba->HCregaddr); 3431 writel(HC_INITFF, phba->HCregaddr);
2689 mdelay(1); 3432 mdelay(1);
@@ -2710,7 +3453,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2710} 3453}
2711 3454
2712/** 3455/**
2713 * lpfc_sli_brdrestart - Restart the HBA 3456 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3457 * @phba: Pointer to HBA context object.
3458 *
3459 * This function resets a SLI4 HBA. This function disables PCI layer parity
3460 * checking during resets the device. The caller is not required to hold
3461 * any locks.
3462 *
3463 * This function returns 0 always.
3464 **/
3465int
3466lpfc_sli4_brdreset(struct lpfc_hba *phba)
3467{
3468 struct lpfc_sli *psli = &phba->sli;
3469 uint16_t cfg_value;
3470 uint8_t qindx;
3471
3472 /* Reset HBA */
3473 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3474 "0295 Reset HBA Data: x%x x%x\n",
3475 phba->pport->port_state, psli->sli_flag);
3476
3477 /* perform board reset */
3478 phba->fc_eventTag = 0;
3479 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0;
3481
3482 /* Turn off parity checking and serr during the physical reset */
3483 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3484 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3485 (cfg_value &
3486 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3487
3488 spin_lock_irq(&phba->hbalock);
3489 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3490 phba->fcf.fcf_flag = 0;
3491 /* Clean up the child queue list for the CQs */
3492 list_del_init(&phba->sli4_hba.mbx_wq->list);
3493 list_del_init(&phba->sli4_hba.els_wq->list);
3494 list_del_init(&phba->sli4_hba.hdr_rq->list);
3495 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3502 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3503 spin_unlock_irq(&phba->hbalock);
3504
3505 /* Now physically reset the device */
3506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3507 "0389 Performing PCI function reset!\n");
3508 /* Perform FCoE PCI function reset */
3509 lpfc_pci_function_reset(phba);
3510
3511 return 0;
3512}
3513
3514/**
3515 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
2714 * @phba: Pointer to HBA context object. 3516 * @phba: Pointer to HBA context object.
2715 * 3517 *
2716 * This function is called in the SLI initialization code path to 3518 * This function is called in the SLI initialization code path to
@@ -2722,8 +3524,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2722 * The function does not guarantee completion of MBX_RESTART mailbox 3524 * The function does not guarantee completion of MBX_RESTART mailbox
2723 * command before the return of this function. 3525 * command before the return of this function.
2724 **/ 3526 **/
2725int 3527static int
2726lpfc_sli_brdrestart(struct lpfc_hba *phba) 3528lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
2727{ 3529{
2728 MAILBOX_t *mb; 3530 MAILBOX_t *mb;
2729 struct lpfc_sli *psli; 3531 struct lpfc_sli *psli;
@@ -2762,7 +3564,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2762 lpfc_sli_brdreset(phba); 3564 lpfc_sli_brdreset(phba);
2763 phba->pport->stopped = 0; 3565 phba->pport->stopped = 0;
2764 phba->link_state = LPFC_INIT_START; 3566 phba->link_state = LPFC_INIT_START;
2765 3567 phba->hba_flag = 0;
2766 spin_unlock_irq(&phba->hbalock); 3568 spin_unlock_irq(&phba->hbalock);
2767 3569
2768 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3570 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -2777,6 +3579,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2777} 3579}
2778 3580
2779/** 3581/**
3582 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3583 * @phba: Pointer to HBA context object.
3584 *
3585 * This function is called in the SLI initialization code path to restart
3586 * a SLI4 HBA. The caller is not required to hold any lock.
3587 * At the end of the function, it calls lpfc_hba_down_post function to
3588 * free any pending commands.
3589 **/
3590static int
3591lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3592{
3593 struct lpfc_sli *psli = &phba->sli;
3594
3595
3596 /* Restart HBA */
3597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3598 "0296 Restart HBA Data: x%x x%x\n",
3599 phba->pport->port_state, psli->sli_flag);
3600
3601 lpfc_sli4_brdreset(phba);
3602
3603 spin_lock_irq(&phba->hbalock);
3604 phba->pport->stopped = 0;
3605 phba->link_state = LPFC_INIT_START;
3606 phba->hba_flag = 0;
3607 spin_unlock_irq(&phba->hbalock);
3608
3609 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3610 psli->stats_start = get_seconds();
3611
3612 lpfc_hba_down_post(phba);
3613
3614 return 0;
3615}
3616
3617/**
3618 * lpfc_sli_brdrestart - Wrapper func for restarting hba
3619 * @phba: Pointer to HBA context object.
3620 *
3621 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3622 * API jump table function pointer from the lpfc_hba struct.
3623**/
3624int
3625lpfc_sli_brdrestart(struct lpfc_hba *phba)
3626{
3627 return phba->lpfc_sli_brdrestart(phba);
3628}
3629
3630/**
2780 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 3631 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
2781 * @phba: Pointer to HBA context object. 3632 * @phba: Pointer to HBA context object.
2782 * 3633 *
@@ -2940,7 +3791,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2940 if (!pmb) 3791 if (!pmb)
2941 return -ENOMEM; 3792 return -ENOMEM;
2942 3793
2943 pmbox = &pmb->mb; 3794 pmbox = &pmb->u.mb;
2944 3795
2945 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 3796 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2946 phba->link_state = LPFC_INIT_MBX_CMDS; 3797 phba->link_state = LPFC_INIT_MBX_CMDS;
@@ -2984,6 +3835,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2984} 3835}
2985 3836
2986/** 3837/**
3838 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
3839 * @phba: Pointer to HBA context object.
3840 *
3841 * This function is called during the SLI initialization to configure
3842 * all the HBQs and post buffers to the HBQ. The caller is not
3843 * required to hold any locks. This function will return zero if successful
3844 * else it will return negative error code.
3845 **/
3846static int
3847lpfc_sli4_rb_setup(struct lpfc_hba *phba)
3848{
3849 phba->hbq_in_use = 1;
3850 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
3851 phba->hbq_count = 1;
3852 /* Initially populate or replenish the HBQs */
3853 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
3854 return 0;
3855}
3856
3857/**
2987 * lpfc_sli_config_port - Issue config port mailbox command 3858 * lpfc_sli_config_port - Issue config port mailbox command
2988 * @phba: Pointer to HBA context object. 3859 * @phba: Pointer to HBA context object.
2989 * @sli_mode: sli mode - 2/3 3860 * @sli_mode: sli mode - 2/3
@@ -3047,33 +3918,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3048 "0442 Adapter failed to init, mbxCmd x%x " 3919 "0442 Adapter failed to init, mbxCmd x%x "
3049 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 3920 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3050 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 3921 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
3051 spin_lock_irq(&phba->hbalock); 3922 spin_lock_irq(&phba->hbalock);
3052 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 3923 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
3053 spin_unlock_irq(&phba->hbalock); 3924 spin_unlock_irq(&phba->hbalock);
3054 rc = -ENXIO; 3925 rc = -ENXIO;
3055 } else 3926 } else {
3927 /* Allow asynchronous mailbox command to go through */
3928 spin_lock_irq(&phba->hbalock);
3929 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3930 spin_unlock_irq(&phba->hbalock);
3056 done = 1; 3931 done = 1;
3932 }
3057 } 3933 }
3058 if (!done) { 3934 if (!done) {
3059 rc = -EINVAL; 3935 rc = -EINVAL;
3060 goto do_prep_failed; 3936 goto do_prep_failed;
3061 } 3937 }
3062 if (pmb->mb.un.varCfgPort.sli_mode == 3) { 3938 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3063 if (!pmb->mb.un.varCfgPort.cMA) { 3939 if (!pmb->u.mb.un.varCfgPort.cMA) {
3064 rc = -ENXIO; 3940 rc = -ENXIO;
3065 goto do_prep_failed; 3941 goto do_prep_failed;
3066 } 3942 }
3067 if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { 3943 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
3068 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3944 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3069 phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; 3945 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3946 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3947 phba->max_vpi : phba->max_vports;
3948
3070 } else 3949 } else
3071 phba->max_vpi = 0; 3950 phba->max_vpi = 0;
3072 if (pmb->mb.un.varCfgPort.gerbm) 3951 if (pmb->u.mb.un.varCfgPort.gdss)
3952 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3953 if (pmb->u.mb.un.varCfgPort.gerbm)
3073 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3954 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3074 if (pmb->mb.un.varCfgPort.gcrp) 3955 if (pmb->u.mb.un.varCfgPort.gcrp)
3075 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3956 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3076 if (pmb->mb.un.varCfgPort.ginb) { 3957 if (pmb->u.mb.un.varCfgPort.ginb) {
3077 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3958 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3078 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; 3959 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3079 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3960 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
@@ -3089,7 +3970,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3089 } 3970 }
3090 3971
3091 if (phba->cfg_enable_bg) { 3972 if (phba->cfg_enable_bg) {
3092 if (pmb->mb.un.varCfgPort.gbg) 3973 if (pmb->u.mb.un.varCfgPort.gbg)
3093 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 3974 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3094 else 3975 else
3095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3184,8 +4065,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
3184 if (rc) 4065 if (rc)
3185 goto lpfc_sli_hba_setup_error; 4066 goto lpfc_sli_hba_setup_error;
3186 } 4067 }
3187 4068 spin_lock_irq(&phba->hbalock);
3188 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4069 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4070 spin_unlock_irq(&phba->hbalock);
3189 4071
3190 rc = lpfc_config_port_post(phba); 4072 rc = lpfc_config_port_post(phba);
3191 if (rc) 4073 if (rc)
@@ -3200,6 +4082,488 @@ lpfc_sli_hba_setup_error:
3200 return rc; 4082 return rc;
3201} 4083}
3202 4084
4085/**
4086 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4087 * @phba: Pointer to HBA context object.
4088 * @mboxq: mailbox pointer.
4089 * This function issue a dump mailbox command to read config region
4090 * 23 and parse the records in the region and populate driver
4091 * data structure.
4092 **/
4093static int
4094lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4095 LPFC_MBOXQ_t *mboxq)
4096{
4097 struct lpfc_dmabuf *mp;
4098 struct lpfc_mqe *mqe;
4099 uint32_t data_length;
4100 int rc;
4101
4102 /* Program the default value of vlan_id and fc_map */
4103 phba->valid_vlan = 0;
4104 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4105 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4106 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4107
4108 mqe = &mboxq->u.mqe;
4109 if (lpfc_dump_fcoe_param(phba, mboxq))
4110 return -ENOMEM;
4111
4112 mp = (struct lpfc_dmabuf *) mboxq->context1;
4113 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4114
4115 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4116 "(%d):2571 Mailbox cmd x%x Status x%x "
4117 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4118 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4119 "CQ: x%x x%x x%x x%x\n",
4120 mboxq->vport ? mboxq->vport->vpi : 0,
4121 bf_get(lpfc_mqe_command, mqe),
4122 bf_get(lpfc_mqe_status, mqe),
4123 mqe->un.mb_words[0], mqe->un.mb_words[1],
4124 mqe->un.mb_words[2], mqe->un.mb_words[3],
4125 mqe->un.mb_words[4], mqe->un.mb_words[5],
4126 mqe->un.mb_words[6], mqe->un.mb_words[7],
4127 mqe->un.mb_words[8], mqe->un.mb_words[9],
4128 mqe->un.mb_words[10], mqe->un.mb_words[11],
4129 mqe->un.mb_words[12], mqe->un.mb_words[13],
4130 mqe->un.mb_words[14], mqe->un.mb_words[15],
4131 mqe->un.mb_words[16], mqe->un.mb_words[50],
4132 mboxq->mcqe.word0,
4133 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4134 mboxq->mcqe.trailer);
4135
4136 if (rc) {
4137 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4138 kfree(mp);
4139 return -EIO;
4140 }
4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE)
4143 return -EIO;
4144
4145 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4146 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4147 kfree(mp);
4148 return 0;
4149}
4150
4151/**
4152 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4153 * @phba: pointer to lpfc hba data structure.
4154 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4155 * @vpd: pointer to the memory to hold resulting port vpd data.
4156 * @vpd_size: On input, the number of bytes allocated to @vpd.
4157 * On output, the number of data bytes in @vpd.
4158 *
4159 * This routine executes a READ_REV SLI4 mailbox command. In
4160 * addition, this routine gets the port vpd data.
4161 *
4162 * Return codes
4163 * 0 - sucessful
4164 * ENOMEM - could not allocated memory.
4165 **/
4166static int
4167lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4168 uint8_t *vpd, uint32_t *vpd_size)
4169{
4170 int rc = 0;
4171 uint32_t dma_size;
4172 struct lpfc_dmabuf *dmabuf;
4173 struct lpfc_mqe *mqe;
4174
4175 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4176 if (!dmabuf)
4177 return -ENOMEM;
4178
4179 /*
4180 * Get a DMA buffer for the vpd data resulting from the READ_REV
4181 * mailbox command.
4182 */
4183 dma_size = *vpd_size;
4184 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4185 dma_size,
4186 &dmabuf->phys,
4187 GFP_KERNEL);
4188 if (!dmabuf->virt) {
4189 kfree(dmabuf);
4190 return -ENOMEM;
4191 }
4192 memset(dmabuf->virt, 0, dma_size);
4193
4194 /*
4195 * The SLI4 implementation of READ_REV conflicts at word1,
4196 * bits 31:16 and SLI4 adds vpd functionality not present
4197 * in SLI3. This code corrects the conflicts.
4198 */
4199 lpfc_read_rev(phba, mboxq);
4200 mqe = &mboxq->u.mqe;
4201 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4202 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4203 mqe->un.read_rev.word1 &= 0x0000FFFF;
4204 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4205 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4206
4207 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4208 if (rc) {
4209 dma_free_coherent(&phba->pcidev->dev, dma_size,
4210 dmabuf->virt, dmabuf->phys);
4211 return -EIO;
4212 }
4213
4214 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4215 "(%d):0380 Mailbox cmd x%x Status x%x "
4216 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4217 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4218 "CQ: x%x x%x x%x x%x\n",
4219 mboxq->vport ? mboxq->vport->vpi : 0,
4220 bf_get(lpfc_mqe_command, mqe),
4221 bf_get(lpfc_mqe_status, mqe),
4222 mqe->un.mb_words[0], mqe->un.mb_words[1],
4223 mqe->un.mb_words[2], mqe->un.mb_words[3],
4224 mqe->un.mb_words[4], mqe->un.mb_words[5],
4225 mqe->un.mb_words[6], mqe->un.mb_words[7],
4226 mqe->un.mb_words[8], mqe->un.mb_words[9],
4227 mqe->un.mb_words[10], mqe->un.mb_words[11],
4228 mqe->un.mb_words[12], mqe->un.mb_words[13],
4229 mqe->un.mb_words[14], mqe->un.mb_words[15],
4230 mqe->un.mb_words[16], mqe->un.mb_words[50],
4231 mboxq->mcqe.word0,
4232 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4233 mboxq->mcqe.trailer);
4234
4235 /*
4236 * The available vpd length cannot be bigger than the
4237 * DMA buffer passed to the port. Catch the less than
4238 * case and update the caller's size.
4239 */
4240 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4241 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4242
4243 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
4244 dma_free_coherent(&phba->pcidev->dev, dma_size,
4245 dmabuf->virt, dmabuf->phys);
4246 kfree(dmabuf);
4247 return 0;
4248}
4249
4250/**
4251 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4252 * @phba: pointer to lpfc hba data structure.
4253 *
4254 * This routine is called to explicitly arm the SLI4 device's completion and
4255 * event queues
4256 **/
4257static void
4258lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4259{
4260 uint8_t fcp_eqidx;
4261
4262 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4263 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4264 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4265 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4266 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4267 LPFC_QUEUE_REARM);
4268 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4269 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4270 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4271 LPFC_QUEUE_REARM);
4272}
4273
4274/**
4275 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4276 * @phba: Pointer to HBA context object.
4277 *
4278 * This function is the main SLI4 device intialization PCI function. This
4279 * function is called by the HBA intialization code, HBA reset code and
4280 * HBA error attention handler code. Caller is not required to hold any
4281 * locks.
4282 **/
4283int
4284lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4285{
4286 int rc;
4287 LPFC_MBOXQ_t *mboxq;
4288 struct lpfc_mqe *mqe;
4289 uint8_t *vpd;
4290 uint32_t vpd_size;
4291 uint32_t ftr_rsp = 0;
4292 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4293 struct lpfc_vport *vport = phba->pport;
4294 struct lpfc_dmabuf *mp;
4295
4296 /* Perform a PCI function reset to start from clean */
4297 rc = lpfc_pci_function_reset(phba);
4298 if (unlikely(rc))
4299 return -ENODEV;
4300
4301 /* Check the HBA Host Status Register for readyness */
4302 rc = lpfc_sli4_post_status_check(phba);
4303 if (unlikely(rc))
4304 return -ENODEV;
4305 else {
4306 spin_lock_irq(&phba->hbalock);
4307 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4308 spin_unlock_irq(&phba->hbalock);
4309 }
4310
4311 /*
4312 * Allocate a single mailbox container for initializing the
4313 * port.
4314 */
4315 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4316 if (!mboxq)
4317 return -ENOMEM;
4318
4319 /*
4320 * Continue initialization with default values even if driver failed
4321 * to read FCoE param config regions
4322 */
4323 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4324 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4325 "2570 Failed to read FCoE parameters \n");
4326
4327 /* Issue READ_REV to collect vpd and FW information. */
4328 vpd_size = PAGE_SIZE;
4329 vpd = kzalloc(vpd_size, GFP_KERNEL);
4330 if (!vpd) {
4331 rc = -ENOMEM;
4332 goto out_free_mbox;
4333 }
4334
4335 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4336 if (unlikely(rc))
4337 goto out_free_vpd;
4338
4339 mqe = &mboxq->u.mqe;
4340 if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
4341 &mqe->un.read_rev) != LPFC_SLI_REV4) ||
4342 (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
4343 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4344 "0376 READ_REV Error. SLI Level %d "
4345 "FCoE enabled %d\n",
4346 bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
4347 bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
4348 rc = -EIO;
4349 goto out_free_vpd;
4350 }
4351 /* Single threaded at this point, no need for lock */
4352 spin_lock_irq(&phba->hbalock);
4353 phba->hba_flag |= HBA_FCOE_SUPPORT;
4354 spin_unlock_irq(&phba->hbalock);
4355 /*
4356 * Evaluate the read rev and vpd data. Populate the driver
4357 * state with the results. If this routine fails, the failure
4358 * is not fatal as the driver will use generic values.
4359 */
4360 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4361 if (unlikely(!rc)) {
4362 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4363 "0377 Error %d parsing vpd. "
4364 "Using defaults.\n", rc);
4365 rc = 0;
4366 }
4367
4368 /* By now, we should determine the SLI revision, hard code for now */
4369 phba->sli_rev = LPFC_SLI_REV4;
4370
4371 /*
4372 * Discover the port's supported feature set and match it against the
4373 * hosts requests.
4374 */
4375 lpfc_request_features(phba, mboxq);
4376 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4377 if (unlikely(rc)) {
4378 rc = -EIO;
4379 goto out_free_vpd;
4380 }
4381
4382 /*
4383 * The port must support FCP initiator mode as this is the
4384 * only mode running in the host.
4385 */
4386 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4387 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4388 "0378 No support for fcpi mode.\n");
4389 ftr_rsp++;
4390 }
4391
4392 /*
4393 * If the port cannot support the host's requested features
4394 * then turn off the global config parameters to disable the
4395 * feature in the driver. This is not a fatal error.
4396 */
4397 if ((phba->cfg_enable_bg) &&
4398 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4399 ftr_rsp++;
4400
4401 if (phba->max_vpi && phba->cfg_enable_npiv &&
4402 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4403 ftr_rsp++;
4404
4405 if (ftr_rsp) {
4406 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4407 "0379 Feature Mismatch Data: x%08x %08x "
4408 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4409 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4410 phba->cfg_enable_npiv, phba->max_vpi);
4411 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4412 phba->cfg_enable_bg = 0;
4413 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4414 phba->cfg_enable_npiv = 0;
4415 }
4416
4417 /* These SLI3 features are assumed in SLI4 */
4418 spin_lock_irq(&phba->hbalock);
4419 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4420 spin_unlock_irq(&phba->hbalock);
4421
4422 /* Read the port's service parameters. */
4423 lpfc_read_sparam(phba, mboxq, vport->vpi);
4424 mboxq->vport = vport;
4425 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4426 mp = (struct lpfc_dmabuf *) mboxq->context1;
4427 if (rc == MBX_SUCCESS) {
4428 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4429 rc = 0;
4430 }
4431
4432 /*
4433 * This memory was allocated by the lpfc_read_sparam routine. Release
4434 * it to the mbuf pool.
4435 */
4436 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4437 kfree(mp);
4438 mboxq->context1 = NULL;
4439 if (unlikely(rc)) {
4440 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4441 "0382 READ_SPARAM command failed "
4442 "status %d, mbxStatus x%x\n",
4443 rc, bf_get(lpfc_mqe_status, mqe));
4444 phba->link_state = LPFC_HBA_ERROR;
4445 rc = -EIO;
4446 goto out_free_vpd;
4447 }
4448
4449 if (phba->cfg_soft_wwnn)
4450 u64_to_wwn(phba->cfg_soft_wwnn,
4451 vport->fc_sparam.nodeName.u.wwn);
4452 if (phba->cfg_soft_wwpn)
4453 u64_to_wwn(phba->cfg_soft_wwpn,
4454 vport->fc_sparam.portName.u.wwn);
4455 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4456 sizeof(struct lpfc_name));
4457 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4458 sizeof(struct lpfc_name));
4459
4460 /* Update the fc_host data structures with new wwn. */
4461 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4462 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4463
4464 /* Register SGL pool to the device using non-embedded mailbox command */
4465 rc = lpfc_sli4_post_sgl_list(phba);
4466 if (unlikely(rc)) {
4467 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4468 "0582 Error %d during sgl post operation", rc);
4469 rc = -ENODEV;
4470 goto out_free_vpd;
4471 }
4472
4473 /* Register SCSI SGL pool to the device */
4474 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4475 if (unlikely(rc)) {
4476 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4477 "0383 Error %d during scsi sgl post opeation",
4478 rc);
4479 /* Some Scsi buffers were moved to the abort scsi list */
4480 /* A pci function reset will repost them */
4481 rc = -ENODEV;
4482 goto out_free_vpd;
4483 }
4484
4485 /* Post the rpi header region to the device. */
4486 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4487 if (unlikely(rc)) {
4488 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4489 "0393 Error %d during rpi post operation\n",
4490 rc);
4491 rc = -ENODEV;
4492 goto out_free_vpd;
4493 }
4494 /* Temporary initialization of lpfc_fip_flag to non-fip */
4495 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4496
4497 /* Set up all the queues to the device */
4498 rc = lpfc_sli4_queue_setup(phba);
4499 if (unlikely(rc)) {
4500 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4501 "0381 Error %d during queue setup.\n ", rc);
4502 goto out_stop_timers;
4503 }
4504
4505 /* Arm the CQs and then EQs on device */
4506 lpfc_sli4_arm_cqeq_intr(phba);
4507
4508 /* Indicate device interrupt mode */
4509 phba->sli4_hba.intr_enable = 1;
4510
4511 /* Allow asynchronous mailbox command to go through */
4512 spin_lock_irq(&phba->hbalock);
4513 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4514 spin_unlock_irq(&phba->hbalock);
4515
4516 /* Post receive buffers to the device */
4517 lpfc_sli4_rb_setup(phba);
4518
4519 /* Start the ELS watchdog timer */
4520 /*
4521 * The driver for SLI4 is not yet ready to process timeouts
4522 * or interrupts. Once it is, the comment bars can be removed.
4523 */
4524 /* mod_timer(&vport->els_tmofunc,
4525 * jiffies + HZ * (phba->fc_ratov*2)); */
4526
4527 /* Start heart beat timer */
4528 mod_timer(&phba->hb_tmofunc,
4529 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4530 phba->hb_outstanding = 0;
4531 phba->last_completion_time = jiffies;
4532
4533 /* Start error attention (ERATT) polling timer */
4534 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4535
4536 /*
4537 * The port is ready, set the host's link state to LINK_DOWN
4538 * in preparation for link interrupts.
4539 */
4540 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4541 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4542 lpfc_set_loopback_flag(phba);
4543 /* Change driver state to LPFC_LINK_DOWN right before init link */
4544 spin_lock_irq(&phba->hbalock);
4545 phba->link_state = LPFC_LINK_DOWN;
4546 spin_unlock_irq(&phba->hbalock);
4547 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4548 if (unlikely(rc != MBX_NOT_FINISHED)) {
4549 kfree(vpd);
4550 return 0;
4551 } else
4552 rc = -EIO;
4553
4554 /* Unset all the queues set up in this routine when error out */
4555 if (rc)
4556 lpfc_sli4_queue_unset(phba);
4557
4558out_stop_timers:
4559 if (rc)
4560 lpfc_stop_hba_timers(phba);
4561out_free_vpd:
4562 kfree(vpd);
4563out_free_mbox:
4564 mempool_free(mboxq, phba->mbox_mem_pool);
4565 return rc;
4566}
3203 4567
3204/** 4568/**
3205 * lpfc_mbox_timeout - Timeout call back function for mbox timer 4569 * lpfc_mbox_timeout - Timeout call back function for mbox timer
@@ -3244,7 +4608,7 @@ void
3244lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 4608lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3245{ 4609{
3246 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 4610 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
3247 MAILBOX_t *mb = &pmbox->mb; 4611 MAILBOX_t *mb = &pmbox->u.mb;
3248 struct lpfc_sli *psli = &phba->sli; 4612 struct lpfc_sli *psli = &phba->sli;
3249 struct lpfc_sli_ring *pring; 4613 struct lpfc_sli_ring *pring;
3250 4614
@@ -3281,7 +4645,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3281 spin_unlock_irq(&phba->pport->work_port_lock); 4645 spin_unlock_irq(&phba->pport->work_port_lock);
3282 spin_lock_irq(&phba->hbalock); 4646 spin_lock_irq(&phba->hbalock);
3283 phba->link_state = LPFC_LINK_UNKNOWN; 4647 phba->link_state = LPFC_LINK_UNKNOWN;
3284 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 4648 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3285 spin_unlock_irq(&phba->hbalock); 4649 spin_unlock_irq(&phba->hbalock);
3286 4650
3287 pring = &psli->ring[psli->fcp_ring]; 4651 pring = &psli->ring[psli->fcp_ring];
@@ -3289,32 +4653,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3289 4653
3290 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4654 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3291 "0345 Resetting board due to mailbox timeout\n"); 4655 "0345 Resetting board due to mailbox timeout\n");
3292 /* 4656
3293 * lpfc_offline calls lpfc_sli_hba_down which will clean up 4657 /* Reset the HBA device */
3294 * on oustanding mailbox commands. 4658 lpfc_reset_hba(phba);
3295 */
3296 /* If resets are disabled then set error state and return. */
3297 if (!phba->cfg_enable_hba_reset) {
3298 phba->link_state = LPFC_HBA_ERROR;
3299 return;
3300 }
3301 lpfc_offline_prep(phba);
3302 lpfc_offline(phba);
3303 lpfc_sli_brdrestart(phba);
3304 lpfc_online(phba);
3305 lpfc_unblock_mgmt_io(phba);
3306 return;
3307} 4659}
3308 4660
3309/** 4661/**
3310 * lpfc_sli_issue_mbox - Issue a mailbox command to firmware 4662 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
3311 * @phba: Pointer to HBA context object. 4663 * @phba: Pointer to HBA context object.
3312 * @pmbox: Pointer to mailbox object. 4664 * @pmbox: Pointer to mailbox object.
3313 * @flag: Flag indicating how the mailbox need to be processed. 4665 * @flag: Flag indicating how the mailbox need to be processed.
3314 * 4666 *
3315 * This function is called by discovery code and HBA management code 4667 * This function is called by discovery code and HBA management code
3316 * to submit a mailbox command to firmware. This function gets the 4668 * to submit a mailbox command to firmware with SLI-3 interface spec. This
3317 * hbalock to protect the data structures. 4669 * function gets the hbalock to protect the data structures.
3318 * The mailbox command can be submitted in polling mode, in which case 4670 * The mailbox command can be submitted in polling mode, in which case
3319 * this function will wait in a polling loop for the completion of the 4671 * this function will wait in a polling loop for the completion of the
3320 * mailbox. 4672 * mailbox.
@@ -3332,8 +4684,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
3332 * return codes the caller owns the mailbox command after the return of 4684 * return codes the caller owns the mailbox command after the return of
3333 * the function. 4685 * the function.
3334 **/ 4686 **/
3335int 4687static int
3336lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 4688lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4689 uint32_t flag)
3337{ 4690{
3338 MAILBOX_t *mb; 4691 MAILBOX_t *mb;
3339 struct lpfc_sli *psli = &phba->sli; 4692 struct lpfc_sli *psli = &phba->sli;
@@ -3349,6 +4702,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3349 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4702 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3350 if (!pmbox) { 4703 if (!pmbox) {
3351 /* processing mbox queue from intr_handler */ 4704 /* processing mbox queue from intr_handler */
4705 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4706 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4707 return MBX_SUCCESS;
4708 }
3352 processing_queue = 1; 4709 processing_queue = 1;
3353 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4710 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3354 pmbox = lpfc_mbox_get(phba); 4711 pmbox = lpfc_mbox_get(phba);
@@ -3365,7 +4722,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3365 lpfc_printf_log(phba, KERN_ERR, 4722 lpfc_printf_log(phba, KERN_ERR,
3366 LOG_MBOX | LOG_VPORT, 4723 LOG_MBOX | LOG_VPORT,
3367 "1806 Mbox x%x failed. No vport\n", 4724 "1806 Mbox x%x failed. No vport\n",
3368 pmbox->mb.mbxCommand); 4725 pmbox->u.mb.mbxCommand);
3369 dump_stack(); 4726 dump_stack();
3370 goto out_not_finished; 4727 goto out_not_finished;
3371 } 4728 }
@@ -3385,21 +4742,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3385 4742
3386 psli = &phba->sli; 4743 psli = &phba->sli;
3387 4744
3388 mb = &pmbox->mb; 4745 mb = &pmbox->u.mb;
3389 status = MBX_SUCCESS; 4746 status = MBX_SUCCESS;
3390 4747
3391 if (phba->link_state == LPFC_HBA_ERROR) { 4748 if (phba->link_state == LPFC_HBA_ERROR) {
3392 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4749 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3393 4750
3394 /* Mbox command <mbxCommand> cannot issue */ 4751 /* Mbox command <mbxCommand> cannot issue */
3395 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4752 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4753 "(%d):0311 Mailbox command x%x cannot "
4754 "issue Data: x%x x%x\n",
4755 pmbox->vport ? pmbox->vport->vpi : 0,
4756 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3396 goto out_not_finished; 4757 goto out_not_finished;
3397 } 4758 }
3398 4759
3399 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 4760 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
3400 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 4761 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
3401 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4762 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3402 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4763 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4764 "(%d):2528 Mailbox command x%x cannot "
4765 "issue Data: x%x x%x\n",
4766 pmbox->vport ? pmbox->vport->vpi : 0,
4767 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
3403 goto out_not_finished; 4768 goto out_not_finished;
3404 } 4769 }
3405 4770
@@ -3413,14 +4778,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3413 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4778 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3414 4779
3415 /* Mbox command <mbxCommand> cannot issue */ 4780 /* Mbox command <mbxCommand> cannot issue */
3416 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4781 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4782 "(%d):2529 Mailbox command x%x "
4783 "cannot issue Data: x%x x%x\n",
4784 pmbox->vport ? pmbox->vport->vpi : 0,
4785 pmbox->u.mb.mbxCommand,
4786 psli->sli_flag, flag);
3417 goto out_not_finished; 4787 goto out_not_finished;
3418 } 4788 }
3419 4789
3420 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 4790 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
3421 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4791 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3422 /* Mbox command <mbxCommand> cannot issue */ 4792 /* Mbox command <mbxCommand> cannot issue */
3423 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4793 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4794 "(%d):2530 Mailbox command x%x "
4795 "cannot issue Data: x%x x%x\n",
4796 pmbox->vport ? pmbox->vport->vpi : 0,
4797 pmbox->u.mb.mbxCommand,
4798 psli->sli_flag, flag);
3424 goto out_not_finished; 4799 goto out_not_finished;
3425 } 4800 }
3426 4801
@@ -3462,12 +4837,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3462 4837
3463 /* If we are not polling, we MUST be in SLI2 mode */ 4838 /* If we are not polling, we MUST be in SLI2 mode */
3464 if (flag != MBX_POLL) { 4839 if (flag != MBX_POLL) {
3465 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 4840 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
3466 (mb->mbxCommand != MBX_KILL_BOARD)) { 4841 (mb->mbxCommand != MBX_KILL_BOARD)) {
3467 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4842 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3468 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4843 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
3469 /* Mbox command <mbxCommand> cannot issue */ 4844 /* Mbox command <mbxCommand> cannot issue */
3470 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 4845 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4846 "(%d):2531 Mailbox command x%x "
4847 "cannot issue Data: x%x x%x\n",
4848 pmbox->vport ? pmbox->vport->vpi : 0,
4849 pmbox->u.mb.mbxCommand,
4850 psli->sli_flag, flag);
3471 goto out_not_finished; 4851 goto out_not_finished;
3472 } 4852 }
3473 /* timeout active mbox command */ 4853 /* timeout active mbox command */
@@ -3506,7 +4886,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3506 /* next set own bit for the adapter and copy over command word */ 4886 /* next set own bit for the adapter and copy over command word */
3507 mb->mbxOwner = OWN_CHIP; 4887 mb->mbxOwner = OWN_CHIP;
3508 4888
3509 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4889 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3510 /* First copy command data to host SLIM area */ 4890 /* First copy command data to host SLIM area */
3511 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4891 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
3512 } else { 4892 } else {
@@ -3529,7 +4909,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3529 4909
3530 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4910 if (mb->mbxCommand == MBX_CONFIG_PORT) {
3531 /* switch over to host mailbox */ 4911 /* switch over to host mailbox */
3532 psli->sli_flag |= LPFC_SLI2_ACTIVE; 4912 psli->sli_flag |= LPFC_SLI_ACTIVE;
3533 } 4913 }
3534 } 4914 }
3535 4915
@@ -3552,7 +4932,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3552 writel(CA_MBATT, phba->CAregaddr); 4932 writel(CA_MBATT, phba->CAregaddr);
3553 readl(phba->CAregaddr); /* flush */ 4933 readl(phba->CAregaddr); /* flush */
3554 4934
3555 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4935 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3556 /* First read mbox status word */ 4936 /* First read mbox status word */
3557 word0 = *((uint32_t *)phba->mbox); 4937 word0 = *((uint32_t *)phba->mbox);
3558 word0 = le32_to_cpu(word0); 4938 word0 = le32_to_cpu(word0);
@@ -3591,7 +4971,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3591 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4971 spin_lock_irqsave(&phba->hbalock, drvr_flag);
3592 } 4972 }
3593 4973
3594 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4974 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3595 /* First copy command data */ 4975 /* First copy command data */
3596 word0 = *((uint32_t *)phba->mbox); 4976 word0 = *((uint32_t *)phba->mbox);
3597 word0 = le32_to_cpu(word0); 4977 word0 = le32_to_cpu(word0);
@@ -3604,7 +4984,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3604 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 4984 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
3605 && slimmb->mbxStatus) { 4985 && slimmb->mbxStatus) {
3606 psli->sli_flag &= 4986 psli->sli_flag &=
3607 ~LPFC_SLI2_ACTIVE; 4987 ~LPFC_SLI_ACTIVE;
3608 word0 = slimword0; 4988 word0 = slimword0;
3609 } 4989 }
3610 } 4990 }
@@ -3616,7 +4996,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3616 ha_copy = readl(phba->HAregaddr); 4996 ha_copy = readl(phba->HAregaddr);
3617 } 4997 }
3618 4998
3619 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 4999 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
3620 /* copy results back to user */ 5000 /* copy results back to user */
3621 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5001 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
3622 } else { 5002 } else {
@@ -3643,13 +5023,420 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
3643 5023
3644out_not_finished: 5024out_not_finished:
3645 if (processing_queue) { 5025 if (processing_queue) {
3646 pmbox->mb.mbxStatus = MBX_NOT_FINISHED; 5026 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
3647 lpfc_mbox_cmpl_put(phba, pmbox); 5027 lpfc_mbox_cmpl_put(phba, pmbox);
3648 } 5028 }
3649 return MBX_NOT_FINISHED; 5029 return MBX_NOT_FINISHED;
3650} 5030}
3651 5031
3652/** 5032/**
5033 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5034 * @phba: Pointer to HBA context object.
5035 * @mboxq: Pointer to mailbox object.
5036 *
5037 * The function posts a mailbox to the port. The mailbox is expected
5038 * to be comletely filled in and ready for the port to operate on it.
5039 * This routine executes a synchronous completion operation on the
5040 * mailbox by polling for its completion.
5041 *
5042 * The caller must not be holding any locks when calling this routine.
5043 *
5044 * Returns:
5045 * MBX_SUCCESS - mailbox posted successfully
5046 * Any of the MBX error values.
5047 **/
5048static int
5049lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5050{
5051 int rc = MBX_SUCCESS;
5052 unsigned long iflag;
5053 uint32_t db_ready;
5054 uint32_t mcqe_status;
5055 uint32_t mbx_cmnd;
5056 unsigned long timeout;
5057 struct lpfc_sli *psli = &phba->sli;
5058 struct lpfc_mqe *mb = &mboxq->u.mqe;
5059 struct lpfc_bmbx_create *mbox_rgn;
5060 struct dma_address *dma_address;
5061 struct lpfc_register bmbx_reg;
5062
5063 /*
5064 * Only one mailbox can be active to the bootstrap mailbox region
5065 * at a time and there is no queueing provided.
5066 */
5067 spin_lock_irqsave(&phba->hbalock, iflag);
5068 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5069 spin_unlock_irqrestore(&phba->hbalock, iflag);
5070 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5071 "(%d):2532 Mailbox command x%x (x%x) "
5072 "cannot issue Data: x%x x%x\n",
5073 mboxq->vport ? mboxq->vport->vpi : 0,
5074 mboxq->u.mb.mbxCommand,
5075 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5076 psli->sli_flag, MBX_POLL);
5077 return MBXERR_ERROR;
5078 }
5079 /* The server grabs the token and owns it until release */
5080 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5081 phba->sli.mbox_active = mboxq;
5082 spin_unlock_irqrestore(&phba->hbalock, iflag);
5083
5084 /*
5085 * Initialize the bootstrap memory region to avoid stale data areas
5086 * in the mailbox post. Then copy the caller's mailbox contents to
5087 * the bmbx mailbox region.
5088 */
5089 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
5090 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
5091 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
5092 sizeof(struct lpfc_mqe));
5093
5094 /* Post the high mailbox dma address to the port and wait for ready. */
5095 dma_address = &phba->sli4_hba.bmbx.dma_address;
5096 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
5097
5098 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5099 * 1000) + jiffies;
5100 do {
5101 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5102 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5103 if (!db_ready)
5104 msleep(2);
5105
5106 if (time_after(jiffies, timeout)) {
5107 rc = MBXERR_ERROR;
5108 goto exit;
5109 }
5110 } while (!db_ready);
5111
5112 /* Post the low mailbox dma address to the port. */
5113 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
5114 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5115 * 1000) + jiffies;
5116 do {
5117 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5118 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5119 if (!db_ready)
5120 msleep(2);
5121
5122 if (time_after(jiffies, timeout)) {
5123 rc = MBXERR_ERROR;
5124 goto exit;
5125 }
5126 } while (!db_ready);
5127
5128 /*
5129 * Read the CQ to ensure the mailbox has completed.
5130 * If so, update the mailbox status so that the upper layers
5131 * can complete the request normally.
5132 */
5133 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
5134 sizeof(struct lpfc_mqe));
5135 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
5136 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5137 sizeof(struct lpfc_mcqe));
5138 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5139
5140 /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5141 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5142 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5143 rc = MBXERR_ERROR;
5144 }
5145
5146 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5147 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5148 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5149 " x%x x%x CQ: x%x x%x x%x x%x\n",
5150 mboxq->vport ? mboxq->vport->vpi : 0,
5151 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
5152 bf_get(lpfc_mqe_status, mb),
5153 mb->un.mb_words[0], mb->un.mb_words[1],
5154 mb->un.mb_words[2], mb->un.mb_words[3],
5155 mb->un.mb_words[4], mb->un.mb_words[5],
5156 mb->un.mb_words[6], mb->un.mb_words[7],
5157 mb->un.mb_words[8], mb->un.mb_words[9],
5158 mb->un.mb_words[10], mb->un.mb_words[11],
5159 mb->un.mb_words[12], mboxq->mcqe.word0,
5160 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5161 mboxq->mcqe.trailer);
5162exit:
5163 /* We are holding the token, no needed for lock when release */
5164 spin_lock_irqsave(&phba->hbalock, iflag);
5165 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5166 phba->sli.mbox_active = NULL;
5167 spin_unlock_irqrestore(&phba->hbalock, iflag);
5168 return rc;
5169}
5170
5171/**
5172 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5173 * @phba: Pointer to HBA context object.
5174 * @pmbox: Pointer to mailbox object.
5175 * @flag: Flag indicating how the mailbox need to be processed.
5176 *
5177 * This function is called by discovery code and HBA management code to submit
5178 * a mailbox command to firmware with SLI-4 interface spec.
5179 *
5180 * Return codes the caller owns the mailbox command after the return of the
5181 * function.
5182 **/
5183static int
5184lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5185 uint32_t flag)
5186{
5187 struct lpfc_sli *psli = &phba->sli;
5188 unsigned long iflags;
5189 int rc;
5190
5191 /* Detect polling mode and jump to a handler */
5192 if (!phba->sli4_hba.intr_enable) {
5193 if (flag == MBX_POLL)
5194 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5195 else
5196 rc = -EIO;
5197 if (rc != MBX_SUCCESS)
5198 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5199 "(%d):2541 Mailbox command x%x "
5200 "(x%x) cannot issue Data: x%x x%x\n",
5201 mboxq->vport ? mboxq->vport->vpi : 0,
5202 mboxq->u.mb.mbxCommand,
5203 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5204 psli->sli_flag, flag);
5205 return rc;
5206 } else if (flag == MBX_POLL) {
5207 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5208 "(%d):2542 Mailbox command x%x (x%x) "
5209 "cannot issue Data: x%x x%x\n",
5210 mboxq->vport ? mboxq->vport->vpi : 0,
5211 mboxq->u.mb.mbxCommand,
5212 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5213 psli->sli_flag, flag);
5214 return -EIO;
5215 }
5216
5217 /* Now, interrupt mode asynchrous mailbox command */
5218 rc = lpfc_mbox_cmd_check(phba, mboxq);
5219 if (rc) {
5220 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5221 "(%d):2543 Mailbox command x%x (x%x) "
5222 "cannot issue Data: x%x x%x\n",
5223 mboxq->vport ? mboxq->vport->vpi : 0,
5224 mboxq->u.mb.mbxCommand,
5225 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5226 psli->sli_flag, flag);
5227 goto out_not_finished;
5228 }
5229 rc = lpfc_mbox_dev_check(phba);
5230 if (unlikely(rc)) {
5231 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5232 "(%d):2544 Mailbox command x%x (x%x) "
5233 "cannot issue Data: x%x x%x\n",
5234 mboxq->vport ? mboxq->vport->vpi : 0,
5235 mboxq->u.mb.mbxCommand,
5236 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5237 psli->sli_flag, flag);
5238 goto out_not_finished;
5239 }
5240
5241 /* Put the mailbox command to the driver internal FIFO */
5242 psli->slistat.mbox_busy++;
5243 spin_lock_irqsave(&phba->hbalock, iflags);
5244 lpfc_mbox_put(phba, mboxq);
5245 spin_unlock_irqrestore(&phba->hbalock, iflags);
5246 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5247 "(%d):0354 Mbox cmd issue - Enqueue Data: "
5248 "x%x (x%x) x%x x%x x%x\n",
5249 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
5250 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5251 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5252 phba->pport->port_state,
5253 psli->sli_flag, MBX_NOWAIT);
5254 /* Wake up worker thread to transport mailbox command from head */
5255 lpfc_worker_wake_up(phba);
5256
5257 return MBX_BUSY;
5258
5259out_not_finished:
5260 return MBX_NOT_FINISHED;
5261}
5262
5263/**
5264 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5265 * @phba: Pointer to HBA context object.
5266 *
5267 * This function is called by worker thread to send a mailbox command to
5268 * SLI4 HBA firmware.
5269 *
5270 **/
5271int
5272lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
5273{
5274 struct lpfc_sli *psli = &phba->sli;
5275 LPFC_MBOXQ_t *mboxq;
5276 int rc = MBX_SUCCESS;
5277 unsigned long iflags;
5278 struct lpfc_mqe *mqe;
5279 uint32_t mbx_cmnd;
5280
5281 /* Check interrupt mode before post async mailbox command */
5282 if (unlikely(!phba->sli4_hba.intr_enable))
5283 return MBX_NOT_FINISHED;
5284
5285 /* Check for mailbox command service token */
5286 spin_lock_irqsave(&phba->hbalock, iflags);
5287 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5288 spin_unlock_irqrestore(&phba->hbalock, iflags);
5289 return MBX_NOT_FINISHED;
5290 }
5291 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5292 spin_unlock_irqrestore(&phba->hbalock, iflags);
5293 return MBX_NOT_FINISHED;
5294 }
5295 if (unlikely(phba->sli.mbox_active)) {
5296 spin_unlock_irqrestore(&phba->hbalock, iflags);
5297 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5298 "0384 There is pending active mailbox cmd\n");
5299 return MBX_NOT_FINISHED;
5300 }
5301 /* Take the mailbox command service token */
5302 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5303
5304 /* Get the next mailbox command from head of queue */
5305 mboxq = lpfc_mbox_get(phba);
5306
5307 /* If no more mailbox command waiting for post, we're done */
5308 if (!mboxq) {
5309 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5310 spin_unlock_irqrestore(&phba->hbalock, iflags);
5311 return MBX_SUCCESS;
5312 }
5313 phba->sli.mbox_active = mboxq;
5314 spin_unlock_irqrestore(&phba->hbalock, iflags);
5315
5316 /* Check device readiness for posting mailbox command */
5317 rc = lpfc_mbox_dev_check(phba);
5318 if (unlikely(rc))
5319 /* Driver clean routine will clean up pending mailbox */
5320 goto out_not_finished;
5321
5322 /* Prepare the mbox command to be posted */
5323 mqe = &mboxq->u.mqe;
5324 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
5325
5326 /* Start timer for the mbox_tmo and log some mailbox post messages */
5327 mod_timer(&psli->mbox_tmo, (jiffies +
5328 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
5329
5330 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5331 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5332 "x%x x%x\n",
5333 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
5334 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5335 phba->pport->port_state, psli->sli_flag);
5336
5337 if (mbx_cmnd != MBX_HEARTBEAT) {
5338 if (mboxq->vport) {
5339 lpfc_debugfs_disc_trc(mboxq->vport,
5340 LPFC_DISC_TRC_MBOX_VPORT,
5341 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5342 mbx_cmnd, mqe->un.mb_words[0],
5343 mqe->un.mb_words[1]);
5344 } else {
5345 lpfc_debugfs_disc_trc(phba->pport,
5346 LPFC_DISC_TRC_MBOX,
5347 "MBOX Send: cmd:x%x mb:x%x x%x",
5348 mbx_cmnd, mqe->un.mb_words[0],
5349 mqe->un.mb_words[1]);
5350 }
5351 }
5352 psli->slistat.mbox_cmd++;
5353
5354 /* Post the mailbox command to the port */
5355 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
5356 if (rc != MBX_SUCCESS) {
5357 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5358 "(%d):2533 Mailbox command x%x (x%x) "
5359 "cannot issue Data: x%x x%x\n",
5360 mboxq->vport ? mboxq->vport->vpi : 0,
5361 mboxq->u.mb.mbxCommand,
5362 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5363 psli->sli_flag, MBX_NOWAIT);
5364 goto out_not_finished;
5365 }
5366
5367 return rc;
5368
5369out_not_finished:
5370 spin_lock_irqsave(&phba->hbalock, iflags);
5371 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
5372 __lpfc_mbox_cmpl_put(phba, mboxq);
5373 /* Release the token */
5374 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5375 phba->sli.mbox_active = NULL;
5376 spin_unlock_irqrestore(&phba->hbalock, iflags);
5377
5378 return MBX_NOT_FINISHED;
5379}
5380
5381/**
5382 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
5383 * @phba: Pointer to HBA context object.
5384 * @pmbox: Pointer to mailbox object.
5385 * @flag: Flag indicating how the mailbox need to be processed.
5386 *
5387 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
5388 * the API jump table function pointer from the lpfc_hba struct.
5389 *
5390 * Return codes the caller owns the mailbox command after the return of the
5391 * function.
5392 **/
5393int
5394lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5395{
5396 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
5397}
5398
5399/**
5400 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
5401 * @phba: The hba struct for which this call is being executed.
5402 * @dev_grp: The HBA PCI-Device group number.
5403 *
5404 * This routine sets up the mbox interface API function jump table in @phba
5405 * struct.
5406 * Returns: 0 - success, -ENODEV - failure.
5407 **/
5408int
5409lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5410{
5411
5412 switch (dev_grp) {
5413 case LPFC_PCI_DEV_LP:
5414 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
5415 phba->lpfc_sli_handle_slow_ring_event =
5416 lpfc_sli_handle_slow_ring_event_s3;
5417 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
5418 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
5419 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
5420 break;
5421 case LPFC_PCI_DEV_OC:
5422 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
5423 phba->lpfc_sli_handle_slow_ring_event =
5424 lpfc_sli_handle_slow_ring_event_s4;
5425 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
5426 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
5427 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
5428 break;
5429 default:
5430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5431 "1420 Invalid HBA PCI-device group: 0x%x\n",
5432 dev_grp);
5433 return -ENODEV;
5434 break;
5435 }
5436 return 0;
5437}
5438
5439/**
3653 * __lpfc_sli_ringtx_put - Add an iocb to the txq 5440 * __lpfc_sli_ringtx_put - Add an iocb to the txq
3654 * @phba: Pointer to HBA context object. 5441 * @phba: Pointer to HBA context object.
3655 * @pring: Pointer to driver SLI ring object. 5442 * @pring: Pointer to driver SLI ring object.
@@ -3701,35 +5488,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3701} 5488}
3702 5489
3703/** 5490/**
3704 * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb 5491 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
3705 * @phba: Pointer to HBA context object. 5492 * @phba: Pointer to HBA context object.
3706 * @pring: Pointer to driver SLI ring object. 5493 * @ring_number: SLI ring number to issue iocb on.
3707 * @piocb: Pointer to command iocb. 5494 * @piocb: Pointer to command iocb.
3708 * @flag: Flag indicating if this command can be put into txq. 5495 * @flag: Flag indicating if this command can be put into txq.
3709 * 5496 *
3710 * __lpfc_sli_issue_iocb is used by other functions in the driver 5497 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
3711 * to issue an iocb command to the HBA. If the PCI slot is recovering 5498 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
3712 * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT 5499 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
3713 * flag is turned on, the function returns IOCB_ERROR. 5500 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
3714 * When the link is down, this function allows only iocbs for 5501 * this function allows only iocbs for posting buffers. This function finds
3715 * posting buffers. 5502 * next available slot in the command ring and posts the command to the
3716 * This function finds next available slot in the command ring and 5503 * available slot and writes the port attention register to request HBA start
3717 * posts the command to the available slot and writes the port 5504 * processing new iocb. If there is no slot available in the ring and
3718 * attention register to request HBA start processing new iocb. 5505 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
3719 * If there is no slot available in the ring and 5506 * the function returns IOCB_BUSY.
3720 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the 5507 *
3721 * txq, otherwise the function returns IOCB_BUSY. 5508 * This function is called with hbalock held. The function will return success
3722 * 5509 * after it successfully submit the iocb to firmware or after adding to the
3723 * This function is called with hbalock held. 5510 * txq.
3724 * The function will return success after it successfully submit the
3725 * iocb to firmware or after adding to the txq.
3726 **/ 5511 **/
3727static int 5512static int
3728__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 5513__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
3729 struct lpfc_iocbq *piocb, uint32_t flag) 5514 struct lpfc_iocbq *piocb, uint32_t flag)
3730{ 5515{
3731 struct lpfc_iocbq *nextiocb; 5516 struct lpfc_iocbq *nextiocb;
3732 IOCB_t *iocb; 5517 IOCB_t *iocb;
5518 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
3733 5519
3734 if (piocb->iocb_cmpl && (!piocb->vport) && 5520 if (piocb->iocb_cmpl && (!piocb->vport) &&
3735 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 5521 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3833,6 +5619,498 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3833 return IOCB_BUSY; 5619 return IOCB_BUSY;
3834} 5620}
3835 5621
5622/**
5623 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
5624 * @phba: Pointer to HBA context object.
5625 * @piocb: Pointer to command iocb.
5626 * @sglq: Pointer to the scatter gather queue object.
5627 *
5628 * This routine converts the bpl or bde that is in the IOCB
5629 * to a sgl list for the sli4 hardware. The physical address
5630 * of the bpl/bde is converted back to a virtual address.
5631 * If the IOCB contains a BPL then the list of BDE's is
5632 * converted to sli4_sge's. If the IOCB contains a single
5633 * BDE then it is converted to a single sli_sge.
5634 * The IOCB is still in cpu endianess so the contents of
5635 * the bpl can be used without byte swapping.
5636 *
5637 * Returns valid XRI = Success, NO_XRI = Failure.
5638**/
5639static uint16_t
5640lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5641 struct lpfc_sglq *sglq)
5642{
5643 uint16_t xritag = NO_XRI;
5644 struct ulp_bde64 *bpl = NULL;
5645 struct ulp_bde64 bde;
5646 struct sli4_sge *sgl = NULL;
5647 IOCB_t *icmd;
5648 int numBdes = 0;
5649 int i = 0;
5650
5651 if (!piocbq || !sglq)
5652 return xritag;
5653
5654 sgl = (struct sli4_sge *)sglq->sgl;
5655 icmd = &piocbq->iocb;
5656 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5657 numBdes = icmd->un.genreq64.bdl.bdeSize /
5658 sizeof(struct ulp_bde64);
5659 /* The addrHigh and addrLow fields within the IOCB
5660 * have not been byteswapped yet so there is no
5661 * need to swap them back.
5662 */
5663 bpl = (struct ulp_bde64 *)
5664 ((struct lpfc_dmabuf *)piocbq->context3)->virt;
5665
5666 if (!bpl)
5667 return xritag;
5668
5669 for (i = 0; i < numBdes; i++) {
5670 /* Should already be byte swapped. */
5671 sgl->addr_hi = bpl->addrHigh;
5672 sgl->addr_lo = bpl->addrLow;
5673 /* swap the size field back to the cpu so we
5674 * can assign it to the sgl.
5675 */
5676 bde.tus.w = le32_to_cpu(bpl->tus.w);
5677 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5678 if ((i+1) == numBdes)
5679 bf_set(lpfc_sli4_sge_last, sgl, 1);
5680 else
5681 bf_set(lpfc_sli4_sge_last, sgl, 0);
5682 sgl->word2 = cpu_to_le32(sgl->word2);
5683 sgl->word3 = cpu_to_le32(sgl->word3);
5684 bpl++;
5685 sgl++;
5686 }
5687 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
5688 /* The addrHigh and addrLow fields of the BDE have not
5689 * been byteswapped yet so they need to be swapped
5690 * before putting them in the sgl.
5691 */
5692 sgl->addr_hi =
5693 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5694 sgl->addr_lo =
5695 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5696 bf_set(lpfc_sli4_sge_len, sgl,
5697 icmd->un.genreq64.bdl.bdeSize);
5698 bf_set(lpfc_sli4_sge_last, sgl, 1);
5699 sgl->word2 = cpu_to_le32(sgl->word2);
5700 sgl->word3 = cpu_to_le32(sgl->word3);
5701 }
5702 return sglq->sli4_xritag;
5703}
5704
5705/**
5706 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5707 * @phba: Pointer to HBA context object.
5708 * @piocb: Pointer to command iocb.
5709 *
5710 * This routine performs a round robin SCSI command to SLI4 FCP WQ index
5711 * distribution.
5712 *
5713 * Return: index into SLI4 fast-path FCP queue index.
5714 **/
5715static uint32_t
5716lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
5717{
5718 static uint32_t fcp_qidx;
5719
5720 return fcp_qidx++ % phba->cfg_fcp_wq_count;
5721}
5722
5723/**
5724 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
5725 * @phba: Pointer to HBA context object.
5726 * @piocb: Pointer to command iocb.
5727 * @wqe: Pointer to the work queue entry.
5728 *
5729 * This routine converts the iocb command to its Work Queue Entry
5730 * equivalent. The wqe pointer should not have any fields set when
5731 * this routine is called because it will memcpy over them.
5732 * This routine does not set the CQ_ID or the WQEC bits in the
5733 * wqe.
5734 *
5735 * Returns: 0 = Success, IOCB_ERROR = Failure.
5736 **/
5737static int
5738lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5739 union lpfc_wqe *wqe)
5740{
5741 uint32_t payload_len = 0;
5742 uint8_t ct = 0;
5743 uint32_t fip;
5744 uint32_t abort_tag;
5745 uint8_t command_type = ELS_COMMAND_NON_FIP;
5746 uint8_t cmnd;
5747 uint16_t xritag;
5748 struct ulp_bde64 *bpl = NULL;
5749
5750 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
5751 /* The fcp commands will set command type */
5752 if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip))
5753 command_type = ELS_COMMAND_NON_FIP;
5754 else if (!(iocbq->iocb_flag & LPFC_IO_FCP))
5755 command_type = ELS_COMMAND_FIP;
5756 else if (iocbq->iocb_flag & LPFC_IO_FCP)
5757 command_type = FCP_COMMAND;
5758 else {
5759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5760 "2019 Invalid cmd 0x%x\n",
5761 iocbq->iocb.ulpCommand);
5762 return IOCB_ERROR;
5763 }
5764 /* Some of the fields are in the right position already */
5765 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5766 abort_tag = (uint32_t) iocbq->iotag;
5767 xritag = iocbq->sli4_xritag;
5768 wqe->words[7] = 0; /* The ct field has moved so reset */
5769 /* words0-2 bpl convert bde */
5770 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5771 bpl = (struct ulp_bde64 *)
5772 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5773 if (!bpl)
5774 return IOCB_ERROR;
5775
5776 /* Should already be byte swapped. */
5777 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
5778 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
5779 /* swap the size field back to the cpu so we
5780 * can assign it to the sgl.
5781 */
5782 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5783 payload_len = wqe->generic.bde.tus.f.bdeSize;
5784 } else
5785 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5786
5787 iocbq->iocb.ulpIoTag = iocbq->iotag;
5788 cmnd = iocbq->iocb.ulpCommand;
5789
5790 switch (iocbq->iocb.ulpCommand) {
5791 case CMD_ELS_REQUEST64_CR:
5792 if (!iocbq->iocb.ulpLe) {
5793 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5794 "2007 Only Limited Edition cmd Format"
5795 " supported 0x%x\n",
5796 iocbq->iocb.ulpCommand);
5797 return IOCB_ERROR;
5798 }
5799 wqe->els_req.payload_len = payload_len;
5800 /* Els_reguest64 has a TMO */
5801 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5802 iocbq->iocb.ulpTimeout);
5803 /* Need a VF for word 4 set the vf bit*/
5804 bf_set(els_req64_vf, &wqe->els_req, 0);
5805 /* And a VFID for word 12 */
5806 bf_set(els_req64_vfid, &wqe->els_req, 0);
5807 /*
5808 * Set ct field to 3, indicates that the context_tag field
5809 * contains the FCFI and remote N_Port_ID is
5810 * in word 5.
5811 */
5812
5813 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5814 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5815 iocbq->iocb.ulpContext);
5816
5817 if (iocbq->vport->fc_myDID != 0) {
5818 bf_set(els_req64_sid, &wqe->els_req,
5819 iocbq->vport->fc_myDID);
5820 bf_set(els_req64_sp, &wqe->els_req, 1);
5821 }
5822 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5823 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5824 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5825 break;
5826 case CMD_XMIT_SEQUENCE64_CR:
5827 /* word3 iocb=io_tag32 wqe=payload_offset */
5828 /* payload offset used for multilpe outstanding
5829 * sequences on the same exchange
5830 */
5831 wqe->words[3] = 0;
5832 /* word4 relative_offset memcpy */
5833 /* word5 r_ctl/df_ctl memcpy */
5834 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5835 wqe->xmit_sequence.xmit_len = payload_len;
5836 break;
5837 case CMD_XMIT_BCAST64_CN:
5838 /* word3 iocb=iotag32 wqe=payload_len */
5839 wqe->words[3] = 0; /* no definition for this in wqe */
5840 /* word4 iocb=rsvd wqe=rsvd */
5841 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
5842 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
5843 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5844 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5845 break;
5846 case CMD_FCP_IWRITE64_CR:
5847 command_type = FCP_COMMAND_DATA_OUT;
5848 /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
5849 * confusing.
5850 * word3 is payload_len: byte offset to the sgl entry for the
5851 * fcp_command.
5852 * word4 is total xfer len, same as the IOCB->ulpParameter.
5853 * word5 is initial xfer len 0 = wait for xfer-ready
5854 */
5855
5856 /* Always wait for xfer-ready before sending data */
5857 wqe->fcp_iwrite.initial_xfer_len = 0;
5858 /* word 4 (xfer length) should have been set on the memcpy */
5859
5860 /* allow write to fall through to read */
5861 case CMD_FCP_IREAD64_CR:
5862 /* FCP_CMD is always the 1st sgl entry */
5863 wqe->fcp_iread.payload_len =
5864 payload_len + sizeof(struct fcp_rsp);
5865
5866 /* word 4 (xfer length) should have been set on the memcpy */
5867
5868 bf_set(lpfc_wqe_gen_erp, &wqe->generic,
5869 iocbq->iocb.ulpFCP2Rcvy);
5870 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
5871 /* The XC bit and the XS bit are similar. The driver never
5872 * tracked whether or not the exchange was previouslly open.
5873 * XC = Exchange create, 0 is create. 1 is already open.
5874 * XS = link cmd: 1 do not close the exchange after command.
5875 * XS = 0 close exchange when command completes.
5876 * The only time we would not set the XC bit is when the XS bit
5877 * is set and we are sending our 2nd or greater command on
5878 * this exchange.
5879 */
5880
5881 /* ALLOW read & write to fall through to ICMD64 */
5882 case CMD_FCP_ICMND64_CR:
5883 /* Always open the exchange */
5884 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5885
5886 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5887 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5888 break;
5889 case CMD_GEN_REQUEST64_CR:
5890 /* word3 command length is described as byte offset to the
5891 * rsp_data. Would always be 16, sizeof(struct sli4_sge)
5892 * sgl[0] = cmnd
5893 * sgl[1] = rsp.
5894 *
5895 */
5896 wqe->gen_req.command_len = payload_len;
5897 /* Word4 parameter copied in the memcpy */
5898 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
5899 /* word6 context tag copied in memcpy */
5900 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
5901 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5902 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5903 "2015 Invalid CT %x command 0x%x\n",
5904 ct, iocbq->iocb.ulpCommand);
5905 return IOCB_ERROR;
5906 }
5907 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
5908 bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
5909 iocbq->iocb.ulpTimeout);
5910
5911 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5912 command_type = OTHER_COMMAND;
5913 break;
5914 case CMD_XMIT_ELS_RSP64_CX:
5915 /* words0-2 BDE memcpy */
5916 /* word3 iocb=iotag32 wqe=rsvd */
5917 wqe->words[3] = 0;
5918 /* word4 iocb=did wge=rsvd. */
5919 wqe->words[4] = 0;
5920 /* word5 iocb=rsvd wge=did */
5921 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
5922 iocbq->iocb.un.elsreq64.remoteID);
5923
5924 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5925 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5926
5927 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5928 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5929 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
5930 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5931 iocbq->vport->vpi + phba->vpi_base);
5932 command_type = OTHER_COMMAND;
5933 break;
5934 case CMD_CLOSE_XRI_CN:
5935 case CMD_ABORT_XRI_CN:
5936 case CMD_ABORT_XRI_CX:
5937 /* words 0-2 memcpy should be 0 rserved */
5938 /* port will send abts */
5939 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
5940 /*
5941 * The link is down so the fw does not need to send abts
5942 * on the wire.
5943 */
5944 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
5945 else
5946 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
5947 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
5948 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5949 wqe->words[5] = 0;
5950 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5951 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5952 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
5953 wqe->generic.abort_tag = abort_tag;
5954 /*
5955 * The abort handler will send us CMD_ABORT_XRI_CN or
5956 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
5957 */
5958 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
5959 cmnd = CMD_ABORT_XRI_CX;
5960 command_type = OTHER_COMMAND;
5961 xritag = 0;
5962 break;
5963 case CMD_XRI_ABORTED_CX:
5964 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
5965 /* words0-2 are all 0's no bde */
5966 /* word3 and word4 are rsvrd */
5967 wqe->words[3] = 0;
5968 wqe->words[4] = 0;
5969 /* word5 iocb=rsvd wge=did */
5970 /* There is no remote port id in the IOCB? */
5971 /* Let this fall through and fail */
5972 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
5973 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
5974 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
5975 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
5976 default:
5977 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5978 "2014 Invalid command 0x%x\n",
5979 iocbq->iocb.ulpCommand);
5980 return IOCB_ERROR;
5981 break;
5982
5983 }
5984 bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
5985 bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
5986 wqe->generic.abort_tag = abort_tag;
5987 bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
5988 bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
5989 bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
5990 bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
5991
5992 return 0;
5993}
5994
5995/**
5996 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
5997 * @phba: Pointer to HBA context object.
5998 * @ring_number: SLI ring number to issue iocb on.
5999 * @piocb: Pointer to command iocb.
6000 * @flag: Flag indicating if this command can be put into txq.
6001 *
6002 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
6003 * an iocb command to an HBA with SLI-4 interface spec.
6004 *
6005 * This function is called with hbalock held. The function will return success
6006 * after it successfully submit the iocb to firmware or after adding to the
6007 * txq.
6008 **/
6009static int
6010__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6011 struct lpfc_iocbq *piocb, uint32_t flag)
6012{
6013 struct lpfc_sglq *sglq;
6014 uint16_t xritag;
6015 union lpfc_wqe wqe;
6016 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6017 uint32_t fcp_wqidx;
6018
6019 if (piocb->sli4_xritag == NO_XRI) {
6020 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6021 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6022 sglq = NULL;
6023 else {
6024 sglq = __lpfc_sli_get_sglq(phba);
6025 if (!sglq)
6026 return IOCB_ERROR;
6027 piocb->sli4_xritag = sglq->sli4_xritag;
6028 }
6029 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6030 sglq = NULL; /* These IO's already have an XRI and
6031 * a mapped sgl.
6032 */
6033 } else {
6034 /* This is a continuation of a commandi,(CX) so this
6035 * sglq is on the active list
6036 */
6037 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
6038 if (!sglq)
6039 return IOCB_ERROR;
6040 }
6041
6042 if (sglq) {
6043 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
6044 if (xritag != sglq->sli4_xritag)
6045 return IOCB_ERROR;
6046 }
6047
6048 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6049 return IOCB_ERROR;
6050
6051 if (piocb->iocb_flag & LPFC_IO_FCP) {
6052 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
6053 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
6054 return IOCB_ERROR;
6055 } else {
6056 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
6057 return IOCB_ERROR;
6058 }
6059 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
6060
6061 return 0;
6062}
6063
6064/**
6065 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
6066 *
6067 * This routine wraps the actual lockless version for issusing IOCB function
6068 * pointer from the lpfc_hba struct.
6069 *
6070 * Return codes:
6071 * IOCB_ERROR - Error
6072 * IOCB_SUCCESS - Success
6073 * IOCB_BUSY - Busy
6074 **/
6075static inline int
6076__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6077 struct lpfc_iocbq *piocb, uint32_t flag)
6078{
6079 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6080}
6081
6082/**
6083 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
6084 * @phba: The hba struct for which this call is being executed.
6085 * @dev_grp: The HBA PCI-Device group number.
6086 *
6087 * This routine sets up the SLI interface API function jump table in @phba
6088 * struct.
6089 * Returns: 0 - success, -ENODEV - failure.
6090 **/
6091int
6092lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6093{
6094
6095 switch (dev_grp) {
6096 case LPFC_PCI_DEV_LP:
6097 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
6098 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
6099 break;
6100 case LPFC_PCI_DEV_OC:
6101 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
6102 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
6103 break;
6104 default:
6105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6106 "1419 Invalid HBA PCI-device group: 0x%x\n",
6107 dev_grp);
6108 return -ENODEV;
6109 break;
6110 }
6111 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
6112 return 0;
6113}
3836 6114
3837/** 6115/**
3838 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 6116 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
@@ -3848,14 +6126,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3848 * functions which do not hold hbalock. 6126 * functions which do not hold hbalock.
3849 **/ 6127 **/
3850int 6128int
3851lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6129lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
3852 struct lpfc_iocbq *piocb, uint32_t flag) 6130 struct lpfc_iocbq *piocb, uint32_t flag)
3853{ 6131{
3854 unsigned long iflags; 6132 unsigned long iflags;
3855 int rc; 6133 int rc;
3856 6134
3857 spin_lock_irqsave(&phba->hbalock, iflags); 6135 spin_lock_irqsave(&phba->hbalock, iflags);
3858 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 6136 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
3859 spin_unlock_irqrestore(&phba->hbalock, iflags); 6137 spin_unlock_irqrestore(&phba->hbalock, iflags);
3860 6138
3861 return rc; 6139 return rc;
@@ -4148,6 +6426,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
4148} 6426}
4149 6427
4150/** 6428/**
6429 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
6430 * @phba: Pointer to HBA context object.
6431 *
6432 * This routine flushes the mailbox command subsystem. It will unconditionally
6433 * flush all the mailbox commands in the three possible stages in the mailbox
6434 * command sub-system: pending mailbox command queue; the outstanding mailbox
6435 * command; and completed mailbox command queue. It is caller's responsibility
6436 * to make sure that the driver is in the proper state to flush the mailbox
6437 * command sub-system. Namely, the posting of mailbox commands into the
6438 * pending mailbox command queue from the various clients must be stopped;
6439 * either the HBA is in a state that it will never works on the outstanding
6440 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
6441 * mailbox command has been completed.
6442 **/
6443static void
6444lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
6445{
6446 LIST_HEAD(completions);
6447 struct lpfc_sli *psli = &phba->sli;
6448 LPFC_MBOXQ_t *pmb;
6449 unsigned long iflag;
6450
6451 /* Flush all the mailbox commands in the mbox system */
6452 spin_lock_irqsave(&phba->hbalock, iflag);
6453 /* The pending mailbox command queue */
6454 list_splice_init(&phba->sli.mboxq, &completions);
6455 /* The outstanding active mailbox command */
6456 if (psli->mbox_active) {
6457 list_add_tail(&psli->mbox_active->list, &completions);
6458 psli->mbox_active = NULL;
6459 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6460 }
6461 /* The completed mailbox command queue */
6462 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
6463 spin_unlock_irqrestore(&phba->hbalock, iflag);
6464
6465 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
6466 while (!list_empty(&completions)) {
6467 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
6468 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
6469 if (pmb->mbox_cmpl)
6470 pmb->mbox_cmpl(phba, pmb);
6471 }
6472}
6473
6474/**
4151 * lpfc_sli_host_down - Vport cleanup function 6475 * lpfc_sli_host_down - Vport cleanup function
4152 * @vport: Pointer to virtual port object. 6476 * @vport: Pointer to virtual port object.
4153 * 6477 *
@@ -4240,9 +6564,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4240 struct lpfc_sli *psli = &phba->sli; 6564 struct lpfc_sli *psli = &phba->sli;
4241 struct lpfc_sli_ring *pring; 6565 struct lpfc_sli_ring *pring;
4242 struct lpfc_dmabuf *buf_ptr; 6566 struct lpfc_dmabuf *buf_ptr;
4243 LPFC_MBOXQ_t *pmb;
4244 int i;
4245 unsigned long flags = 0; 6567 unsigned long flags = 0;
6568 int i;
6569
6570 /* Shutdown the mailbox command sub-system */
6571 lpfc_sli_mbox_sys_shutdown(phba);
4246 6572
4247 lpfc_hba_down_prep(phba); 6573 lpfc_hba_down_prep(phba);
4248 6574
@@ -4287,28 +6613,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4287 6613
4288 /* Return any active mbox cmds */ 6614 /* Return any active mbox cmds */
4289 del_timer_sync(&psli->mbox_tmo); 6615 del_timer_sync(&psli->mbox_tmo);
4290 spin_lock_irqsave(&phba->hbalock, flags);
4291 6616
4292 spin_lock(&phba->pport->work_port_lock); 6617 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
4293 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6618 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4294 spin_unlock(&phba->pport->work_port_lock); 6619 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
4295 6620
4296 /* Return any pending or completed mbox cmds */ 6621 return 1;
4297 list_splice_init(&phba->sli.mboxq, &completions); 6622}
4298 if (psli->mbox_active) { 6623
4299 list_add_tail(&psli->mbox_active->list, &completions); 6624/**
4300 psli->mbox_active = NULL; 6625 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
4301 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6626 * @phba: Pointer to HBA context object.
4302 } 6627 *
4303 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 6628 * This function cleans up all queues, iocb, buffers, mailbox commands while
4304 spin_unlock_irqrestore(&phba->hbalock, flags); 6629 * shutting down the SLI4 HBA FCoE function. This function is called with no
6630 * lock held and always returns 1.
6631 *
6632 * This function does the following to cleanup driver FCoE function resources:
6633 * - Free discovery resources for each virtual port
6634 * - Cleanup any pending fabric iocbs
6635 * - Iterate through the iocb txq and free each entry in the list.
6636 * - Free up any buffer posted to the HBA.
6637 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
6638 * - Free mailbox commands in the mailbox queue.
6639 **/
6640int
6641lpfc_sli4_hba_down(struct lpfc_hba *phba)
6642{
6643 /* Stop the SLI4 device port */
6644 lpfc_stop_port(phba);
6645
6646 /* Tear down the queues in the HBA */
6647 lpfc_sli4_queue_unset(phba);
6648
6649 /* unregister default FCFI from the HBA */
6650 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4305 6651
4306 while (!list_empty(&completions)) {
4307 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
4308 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
4309 if (pmb->mbox_cmpl)
4310 pmb->mbox_cmpl(phba,pmb);
4311 }
4312 return 1; 6652 return 1;
4313} 6653}
4314 6654
@@ -4639,7 +6979,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4639 iabt = &abtsiocbp->iocb; 6979 iabt = &abtsiocbp->iocb;
4640 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 6980 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
4641 iabt->un.acxri.abortContextTag = icmd->ulpContext; 6981 iabt->un.acxri.abortContextTag = icmd->ulpContext;
4642 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 6982 if (phba->sli_rev == LPFC_SLI_REV4)
6983 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
6984 else
6985 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
4643 iabt->ulpLe = 1; 6986 iabt->ulpLe = 1;
4644 iabt->ulpClass = icmd->ulpClass; 6987 iabt->ulpClass = icmd->ulpClass;
4645 6988
@@ -4655,7 +6998,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4655 "abort cmd iotag x%x\n", 6998 "abort cmd iotag x%x\n",
4656 iabt->un.acxri.abortContextTag, 6999 iabt->un.acxri.abortContextTag,
4657 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 7000 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
4658 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 7001 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
4659 7002
4660 if (retval) 7003 if (retval)
4661 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7004 __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -4838,7 +7181,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4838 cmd = &iocbq->iocb; 7181 cmd = &iocbq->iocb;
4839 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 7182 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
4840 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 7183 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
4841 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 7184 if (phba->sli_rev == LPFC_SLI_REV4)
7185 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
7186 else
7187 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
4842 abtsiocb->iocb.ulpLe = 1; 7188 abtsiocb->iocb.ulpLe = 1;
4843 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7189 abtsiocb->iocb.ulpClass = cmd->ulpClass;
4844 abtsiocb->vport = phba->pport; 7190 abtsiocb->vport = phba->pport;
@@ -4850,7 +7196,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
4850 7196
4851 /* Setup callback routine and issue the command. */ 7197 /* Setup callback routine and issue the command. */
4852 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 7198 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4853 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 7199 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
7200 abtsiocb, 0);
4854 if (ret_val == IOCB_ERROR) { 7201 if (ret_val == IOCB_ERROR) {
4855 lpfc_sli_release_iocbq(phba, abtsiocb); 7202 lpfc_sli_release_iocbq(phba, abtsiocb);
4856 errcnt++; 7203 errcnt++;
@@ -4931,7 +7278,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
4931 **/ 7278 **/
4932int 7279int
4933lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 7280lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4934 struct lpfc_sli_ring *pring, 7281 uint32_t ring_number,
4935 struct lpfc_iocbq *piocb, 7282 struct lpfc_iocbq *piocb,
4936 struct lpfc_iocbq *prspiocbq, 7283 struct lpfc_iocbq *prspiocbq,
4937 uint32_t timeout) 7284 uint32_t timeout)
@@ -4962,7 +7309,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
4962 readl(phba->HCregaddr); /* flush */ 7309 readl(phba->HCregaddr); /* flush */
4963 } 7310 }
4964 7311
4965 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 7312 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
4966 if (retval == IOCB_SUCCESS) { 7313 if (retval == IOCB_SUCCESS) {
4967 timeout_req = timeout * HZ; 7314 timeout_req = timeout * HZ;
4968 timeleft = wait_event_timeout(done_q, 7315 timeleft = wait_event_timeout(done_q,
@@ -5077,53 +7424,156 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
5077} 7424}
5078 7425
5079/** 7426/**
5080 * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function 7427 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
5081 * @phba: Pointer to HBA context. 7428 * @phba: Pointer to HBA context.
5082 * 7429 *
5083 * This function is called to cleanup any pending mailbox 7430 * This function is called to shutdown the driver's mailbox sub-system.
5084 * objects in the driver queue before bringing the HBA offline. 7431 * It first marks the mailbox sub-system is in a block state to prevent
5085 * This function is called while resetting the HBA. 7432 * the asynchronous mailbox command from issued off the pending mailbox
5086 * The function is called without any lock held. The function 7433 * command queue. If the mailbox command sub-system shutdown is due to
5087 * takes hbalock to update SLI data structure. 7434 * HBA error conditions such as EEH or ERATT, this routine shall invoke
5088 * This function returns 1 when there is an active mailbox 7435 * the mailbox sub-system flush routine to forcefully bring down the
5089 * command pending else returns 0. 7436 * mailbox sub-system. Otherwise, if it is due to normal condition (such
7437 * as with offline or HBA function reset), this routine will wait for the
7438 * outstanding mailbox command to complete before invoking the mailbox
7439 * sub-system flush routine to gracefully bring down mailbox sub-system.
5090 **/ 7440 **/
5091int 7441void
5092lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 7442lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
5093{ 7443{
5094 struct lpfc_vport *vport = phba->pport; 7444 struct lpfc_sli *psli = &phba->sli;
5095 int i = 0; 7445 uint8_t actcmd = MBX_HEARTBEAT;
5096 uint32_t ha_copy; 7446 unsigned long timeout;
5097 7447
5098 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 7448 spin_lock_irq(&phba->hbalock);
5099 if (i++ > LPFC_MBOX_TMO * 1000) 7449 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5100 return 1; 7450 spin_unlock_irq(&phba->hbalock);
5101 7451
5102 /* 7452 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5103 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
5104 * did finish. This way we won't get the misleading
5105 * "Stray Mailbox Interrupt" message.
5106 */
5107 spin_lock_irq(&phba->hbalock); 7453 spin_lock_irq(&phba->hbalock);
5108 ha_copy = phba->work_ha; 7454 if (phba->sli.mbox_active)
5109 phba->work_ha &= ~HA_MBATT; 7455 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5110 spin_unlock_irq(&phba->hbalock); 7456 spin_unlock_irq(&phba->hbalock);
7457 /* Determine how long we might wait for the active mailbox
7458 * command to be gracefully completed by firmware.
7459 */
7460 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
7461 1000) + jiffies;
7462 while (phba->sli.mbox_active) {
7463 /* Check active mailbox complete status every 2ms */
7464 msleep(2);
7465 if (time_after(jiffies, timeout))
7466 /* Timeout, let the mailbox flush routine to
7467 * forcefully release active mailbox command
7468 */
7469 break;
7470 }
7471 }
7472 lpfc_sli_mbox_sys_flush(phba);
7473}
7474
7475/**
7476 * lpfc_sli_eratt_read - read sli-3 error attention events
7477 * @phba: Pointer to HBA context.
7478 *
7479 * This function is called to read the SLI3 device error attention registers
7480 * for possible error attention events. The caller must hold the hostlock
7481 * with spin_lock_irq().
7482 *
7483 * This fucntion returns 1 when there is Error Attention in the Host Attention
7484 * Register and returns 0 otherwise.
7485 **/
7486static int
7487lpfc_sli_eratt_read(struct lpfc_hba *phba)
7488{
7489 uint32_t ha_copy;
5111 7490
5112 if (ha_copy & HA_MBATT) 7491 /* Read chip Host Attention (HA) register */
5113 if (lpfc_sli_handle_mb_event(phba) == 0) 7492 ha_copy = readl(phba->HAregaddr);
5114 i = 0; 7493 if (ha_copy & HA_ERATT) {
7494 /* Read host status register to retrieve error event */
7495 lpfc_sli_read_hs(phba);
7496
7497 /* Check if there is a deferred error condition is active */
7498 if ((HS_FFER1 & phba->work_hs) &&
7499 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7500 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7501 spin_lock_irq(&phba->hbalock);
7502 phba->hba_flag |= DEFER_ERATT;
7503 spin_unlock_irq(&phba->hbalock);
7504 /* Clear all interrupt enable conditions */
7505 writel(0, phba->HCregaddr);
7506 readl(phba->HCregaddr);
7507 }
5115 7508
5116 msleep(1); 7509 /* Set the driver HA work bitmap */
7510 spin_lock_irq(&phba->hbalock);
7511 phba->work_ha |= HA_ERATT;
7512 /* Indicate polling handles this ERATT */
7513 phba->hba_flag |= HBA_ERATT_HANDLED;
7514 spin_unlock_irq(&phba->hbalock);
7515 return 1;
5117 } 7516 }
7517 return 0;
7518}
7519
7520/**
7521 * lpfc_sli4_eratt_read - read sli-4 error attention events
7522 * @phba: Pointer to HBA context.
7523 *
7524 * This function is called to read the SLI4 device error attention registers
7525 * for possible error attention events. The caller must hold the hostlock
7526 * with spin_lock_irq().
7527 *
7528 * This fucntion returns 1 when there is Error Attention in the Host Attention
7529 * Register and returns 0 otherwise.
7530 **/
7531static int
7532lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7533{
7534 uint32_t uerr_sta_hi, uerr_sta_lo;
7535 uint32_t onlnreg0, onlnreg1;
5118 7536
5119 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 7537 /* For now, use the SLI4 device internal unrecoverable error
7538 * registers for error attention. This can be changed later.
7539 */
7540 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
7541 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
7542 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
7543 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7544 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7545 if (uerr_sta_lo || uerr_sta_hi) {
7546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7547 "1423 HBA Unrecoverable error: "
7548 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7549 "online0_reg=0x%x, online1_reg=0x%x\n",
7550 uerr_sta_lo, uerr_sta_hi,
7551 onlnreg0, onlnreg1);
7552 /* TEMP: as the driver error recover logic is not
7553 * fully developed, we just log the error message
7554 * and the device error attention action is now
7555 * temporarily disabled.
7556 */
7557 return 0;
7558 phba->work_status[0] = uerr_sta_lo;
7559 phba->work_status[1] = uerr_sta_hi;
7560 spin_lock_irq(&phba->hbalock);
7561 /* Set the driver HA work bitmap */
7562 phba->work_ha |= HA_ERATT;
7563 /* Indicate polling handles this ERATT */
7564 phba->hba_flag |= HBA_ERATT_HANDLED;
7565 spin_unlock_irq(&phba->hbalock);
7566 return 1;
7567 }
7568 }
7569 return 0;
5120} 7570}
5121 7571
5122/** 7572/**
5123 * lpfc_sli_check_eratt - check error attention events 7573 * lpfc_sli_check_eratt - check error attention events
5124 * @phba: Pointer to HBA context. 7574 * @phba: Pointer to HBA context.
5125 * 7575 *
5126 * This function is called form timer soft interrupt context to check HBA's 7576 * This function is called from timer soft interrupt context to check HBA's
5127 * error attention register bit for error attention events. 7577 * error attention register bit for error attention events.
5128 * 7578 *
5129 * This fucntion returns 1 when there is Error Attention in the Host Attention 7579 * This fucntion returns 1 when there is Error Attention in the Host Attention
@@ -5134,10 +7584,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5134{ 7584{
5135 uint32_t ha_copy; 7585 uint32_t ha_copy;
5136 7586
5137 /* If PCI channel is offline, don't process it */
5138 if (unlikely(pci_channel_offline(phba->pcidev)))
5139 return 0;
5140
5141 /* If somebody is waiting to handle an eratt, don't process it 7587 /* If somebody is waiting to handle an eratt, don't process it
5142 * here. The brdkill function will do this. 7588 * here. The brdkill function will do this.
5143 */ 7589 */
@@ -5161,56 +7607,84 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5161 return 0; 7607 return 0;
5162 } 7608 }
5163 7609
5164 /* Read chip Host Attention (HA) register */ 7610 /* If PCI channel is offline, don't process it */
5165 ha_copy = readl(phba->HAregaddr); 7611 if (unlikely(pci_channel_offline(phba->pcidev))) {
5166 if (ha_copy & HA_ERATT) {
5167 /* Read host status register to retrieve error event */
5168 lpfc_sli_read_hs(phba);
5169
5170 /* Check if there is a deferred error condition is active */
5171 if ((HS_FFER1 & phba->work_hs) &&
5172 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
5173 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
5174 phba->hba_flag |= DEFER_ERATT;
5175 /* Clear all interrupt enable conditions */
5176 writel(0, phba->HCregaddr);
5177 readl(phba->HCregaddr);
5178 }
5179
5180 /* Set the driver HA work bitmap */
5181 phba->work_ha |= HA_ERATT;
5182 /* Indicate polling handles this ERATT */
5183 phba->hba_flag |= HBA_ERATT_HANDLED;
5184 spin_unlock_irq(&phba->hbalock); 7612 spin_unlock_irq(&phba->hbalock);
5185 return 1; 7613 return 0;
7614 }
7615
7616 switch (phba->sli_rev) {
7617 case LPFC_SLI_REV2:
7618 case LPFC_SLI_REV3:
7619 /* Read chip Host Attention (HA) register */
7620 ha_copy = lpfc_sli_eratt_read(phba);
7621 break;
7622 case LPFC_SLI_REV4:
7623 /* Read devcie Uncoverable Error (UERR) registers */
7624 ha_copy = lpfc_sli4_eratt_read(phba);
7625 break;
7626 default:
7627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7628 "0299 Invalid SLI revision (%d)\n",
7629 phba->sli_rev);
7630 ha_copy = 0;
7631 break;
5186 } 7632 }
5187 spin_unlock_irq(&phba->hbalock); 7633 spin_unlock_irq(&phba->hbalock);
7634
7635 return ha_copy;
7636}
7637
7638/**
7639 * lpfc_intr_state_check - Check device state for interrupt handling
7640 * @phba: Pointer to HBA context.
7641 *
7642 * This inline routine checks whether a device or its PCI slot is in a state
7643 * that the interrupt should be handled.
7644 *
7645 * This function returns 0 if the device or the PCI slot is in a state that
7646 * interrupt should be handled, otherwise -EIO.
7647 */
7648static inline int
7649lpfc_intr_state_check(struct lpfc_hba *phba)
7650{
7651 /* If the pci channel is offline, ignore all the interrupts */
7652 if (unlikely(pci_channel_offline(phba->pcidev)))
7653 return -EIO;
7654
7655 /* Update device level interrupt statistics */
7656 phba->sli.slistat.sli_intr++;
7657
7658 /* Ignore all interrupts during initialization. */
7659 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7660 return -EIO;
7661
5188 return 0; 7662 return 0;
5189} 7663}
5190 7664
5191/** 7665/**
5192 * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver 7666 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
5193 * @irq: Interrupt number. 7667 * @irq: Interrupt number.
5194 * @dev_id: The device context pointer. 7668 * @dev_id: The device context pointer.
5195 * 7669 *
5196 * This function is directly called from the PCI layer as an interrupt 7670 * This function is directly called from the PCI layer as an interrupt
5197 * service routine when the device is enabled with MSI-X multi-message 7671 * service routine when device with SLI-3 interface spec is enabled with
5198 * interrupt mode and there are slow-path events in the HBA. However, 7672 * MSI-X multi-message interrupt mode and there are slow-path events in
5199 * when the device is enabled with either MSI or Pin-IRQ interrupt mode, 7673 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
5200 * this function is called as part of the device-level interrupt handler. 7674 * interrupt mode, this function is called as part of the device-level
5201 * When the PCI slot is in error recovery or the HBA is undergoing 7675 * interrupt handler. When the PCI slot is in error recovery or the HBA
5202 * initialization, the interrupt handler will not process the interrupt. 7676 * is undergoing initialization, the interrupt handler will not process
5203 * The link attention and ELS ring attention events are handled by the 7677 * the interrupt. The link attention and ELS ring attention events are
5204 * worker thread. The interrupt handler signals the worker thread and 7678 * handled by the worker thread. The interrupt handler signals the worker
5205 * and returns for these events. This function is called without any 7679 * thread and returns for these events. This function is called without
5206 * lock held. It gets the hbalock to access and update SLI data 7680 * any lock held. It gets the hbalock to access and update SLI data
5207 * structures. 7681 * structures.
5208 * 7682 *
5209 * This function returns IRQ_HANDLED when interrupt is handled else it 7683 * This function returns IRQ_HANDLED when interrupt is handled else it
5210 * returns IRQ_NONE. 7684 * returns IRQ_NONE.
5211 **/ 7685 **/
5212irqreturn_t 7686irqreturn_t
5213lpfc_sp_intr_handler(int irq, void *dev_id) 7687lpfc_sli_sp_intr_handler(int irq, void *dev_id)
5214{ 7688{
5215 struct lpfc_hba *phba; 7689 struct lpfc_hba *phba;
5216 uint32_t ha_copy; 7690 uint32_t ha_copy;
@@ -5240,13 +7714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5240 * individual interrupt handler in MSI-X multi-message interrupt mode 7714 * individual interrupt handler in MSI-X multi-message interrupt mode
5241 */ 7715 */
5242 if (phba->intr_type == MSIX) { 7716 if (phba->intr_type == MSIX) {
5243 /* If the pci channel is offline, ignore all the interrupts */ 7717 /* Check device state for handling interrupt */
5244 if (unlikely(pci_channel_offline(phba->pcidev))) 7718 if (lpfc_intr_state_check(phba))
5245 return IRQ_NONE;
5246 /* Update device-level interrupt statistics */
5247 phba->sli.slistat.sli_intr++;
5248 /* Ignore all interrupts during initialization. */
5249 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5250 return IRQ_NONE; 7719 return IRQ_NONE;
5251 /* Need to read HA REG for slow-path events */ 7720 /* Need to read HA REG for slow-path events */
5252 spin_lock_irqsave(&phba->hbalock, iflag); 7721 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -5271,7 +7740,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5271 * interrupt. 7740 * interrupt.
5272 */ 7741 */
5273 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7742 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5274 spin_unlock_irq(&phba->hbalock); 7743 spin_unlock_irqrestore(&phba->hbalock, iflag);
5275 return IRQ_NONE; 7744 return IRQ_NONE;
5276 } 7745 }
5277 7746
@@ -5364,7 +7833,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5364 7833
5365 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 7834 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
5366 pmb = phba->sli.mbox_active; 7835 pmb = phba->sli.mbox_active;
5367 pmbox = &pmb->mb; 7836 pmbox = &pmb->u.mb;
5368 mbox = phba->mbox; 7837 mbox = phba->mbox;
5369 vport = pmb->vport; 7838 vport = pmb->vport;
5370 7839
@@ -5434,7 +7903,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
5434 LOG_MBOX | LOG_SLI, 7903 LOG_MBOX | LOG_SLI,
5435 "0350 rc should have" 7904 "0350 rc should have"
5436 "been MBX_BUSY"); 7905 "been MBX_BUSY");
5437 goto send_current_mbox; 7906 if (rc != MBX_NOT_FINISHED)
7907 goto send_current_mbox;
5438 } 7908 }
5439 } 7909 }
5440 spin_lock_irqsave( 7910 spin_lock_irqsave(
@@ -5471,29 +7941,29 @@ send_current_mbox:
5471 } 7941 }
5472 return IRQ_HANDLED; 7942 return IRQ_HANDLED;
5473 7943
5474} /* lpfc_sp_intr_handler */ 7944} /* lpfc_sli_sp_intr_handler */
5475 7945
5476/** 7946/**
5477 * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver 7947 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
5478 * @irq: Interrupt number. 7948 * @irq: Interrupt number.
5479 * @dev_id: The device context pointer. 7949 * @dev_id: The device context pointer.
5480 * 7950 *
5481 * This function is directly called from the PCI layer as an interrupt 7951 * This function is directly called from the PCI layer as an interrupt
5482 * service routine when the device is enabled with MSI-X multi-message 7952 * service routine when device with SLI-3 interface spec is enabled with
5483 * interrupt mode and there is a fast-path FCP IOCB ring event in the 7953 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
5484 * HBA. However, when the device is enabled with either MSI or Pin-IRQ 7954 * ring event in the HBA. However, when the device is enabled with either
5485 * interrupt mode, this function is called as part of the device-level 7955 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
5486 * interrupt handler. When the PCI slot is in error recovery or the HBA 7956 * device-level interrupt handler. When the PCI slot is in error recovery
5487 * is undergoing initialization, the interrupt handler will not process 7957 * or the HBA is undergoing initialization, the interrupt handler will not
5488 * the interrupt. The SCSI FCP fast-path ring event are handled in the 7958 * process the interrupt. The SCSI FCP fast-path ring event are handled in
5489 * intrrupt context. This function is called without any lock held. It 7959 * the intrrupt context. This function is called without any lock held.
5490 * gets the hbalock to access and update SLI data structures. 7960 * It gets the hbalock to access and update SLI data structures.
5491 * 7961 *
5492 * This function returns IRQ_HANDLED when interrupt is handled else it 7962 * This function returns IRQ_HANDLED when interrupt is handled else it
5493 * returns IRQ_NONE. 7963 * returns IRQ_NONE.
5494 **/ 7964 **/
5495irqreturn_t 7965irqreturn_t
5496lpfc_fp_intr_handler(int irq, void *dev_id) 7966lpfc_sli_fp_intr_handler(int irq, void *dev_id)
5497{ 7967{
5498 struct lpfc_hba *phba; 7968 struct lpfc_hba *phba;
5499 uint32_t ha_copy; 7969 uint32_t ha_copy;
@@ -5513,13 +7983,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5513 * individual interrupt handler in MSI-X multi-message interrupt mode 7983 * individual interrupt handler in MSI-X multi-message interrupt mode
5514 */ 7984 */
5515 if (phba->intr_type == MSIX) { 7985 if (phba->intr_type == MSIX) {
5516 /* If pci channel is offline, ignore all the interrupts */ 7986 /* Check device state for handling interrupt */
5517 if (unlikely(pci_channel_offline(phba->pcidev))) 7987 if (lpfc_intr_state_check(phba))
5518 return IRQ_NONE;
5519 /* Update device-level interrupt statistics */
5520 phba->sli.slistat.sli_intr++;
5521 /* Ignore all interrupts during initialization. */
5522 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5523 return IRQ_NONE; 7988 return IRQ_NONE;
5524 /* Need to read HA REG for FCP ring and other ring events */ 7989 /* Need to read HA REG for FCP ring and other ring events */
5525 ha_copy = readl(phba->HAregaddr); 7990 ha_copy = readl(phba->HAregaddr);
@@ -5530,7 +7995,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5530 * any interrupt. 7995 * any interrupt.
5531 */ 7996 */
5532 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7997 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
5533 spin_unlock_irq(&phba->hbalock); 7998 spin_unlock_irqrestore(&phba->hbalock, iflag);
5534 return IRQ_NONE; 7999 return IRQ_NONE;
5535 } 8000 }
5536 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 8001 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
@@ -5566,26 +8031,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
5566 } 8031 }
5567 } 8032 }
5568 return IRQ_HANDLED; 8033 return IRQ_HANDLED;
5569} /* lpfc_fp_intr_handler */ 8034} /* lpfc_sli_fp_intr_handler */
5570 8035
5571/** 8036/**
5572 * lpfc_intr_handler - The device-level interrupt handler of lpfc driver 8037 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
5573 * @irq: Interrupt number. 8038 * @irq: Interrupt number.
5574 * @dev_id: The device context pointer. 8039 * @dev_id: The device context pointer.
5575 * 8040 *
5576 * This function is the device-level interrupt handler called from the PCI 8041 * This function is the HBA device-level interrupt handler to device with
5577 * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is 8042 * SLI-3 interface spec, called from the PCI layer when either MSI or
5578 * an event in the HBA which requires driver attention. This function 8043 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
5579 * invokes the slow-path interrupt attention handling function and fast-path 8044 * requires driver attention. This function invokes the slow-path interrupt
5580 * interrupt attention handling function in turn to process the relevant 8045 * attention handling function and fast-path interrupt attention handling
5581 * HBA attention events. This function is called without any lock held. It 8046 * function in turn to process the relevant HBA attention events. This
5582 * gets the hbalock to access and update SLI data structures. 8047 * function is called without any lock held. It gets the hbalock to access
8048 * and update SLI data structures.
5583 * 8049 *
5584 * This function returns IRQ_HANDLED when interrupt is handled, else it 8050 * This function returns IRQ_HANDLED when interrupt is handled, else it
5585 * returns IRQ_NONE. 8051 * returns IRQ_NONE.
5586 **/ 8052 **/
5587irqreturn_t 8053irqreturn_t
5588lpfc_intr_handler(int irq, void *dev_id) 8054lpfc_sli_intr_handler(int irq, void *dev_id)
5589{ 8055{
5590 struct lpfc_hba *phba; 8056 struct lpfc_hba *phba;
5591 irqreturn_t sp_irq_rc, fp_irq_rc; 8057 irqreturn_t sp_irq_rc, fp_irq_rc;
@@ -5600,15 +8066,8 @@ lpfc_intr_handler(int irq, void *dev_id)
5600 if (unlikely(!phba)) 8066 if (unlikely(!phba))
5601 return IRQ_NONE; 8067 return IRQ_NONE;
5602 8068
5603 /* If the pci channel is offline, ignore all the interrupts. */ 8069 /* Check device state for handling interrupt */
5604 if (unlikely(pci_channel_offline(phba->pcidev))) 8070 if (lpfc_intr_state_check(phba))
5605 return IRQ_NONE;
5606
5607 /* Update device level interrupt statistics */
5608 phba->sli.slistat.sli_intr++;
5609
5610 /* Ignore all interrupts during initialization. */
5611 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5612 return IRQ_NONE; 8071 return IRQ_NONE;
5613 8072
5614 spin_lock(&phba->hbalock); 8073 spin_lock(&phba->hbalock);
@@ -5650,7 +8109,7 @@ lpfc_intr_handler(int irq, void *dev_id)
5650 status2 >>= (4*LPFC_ELS_RING); 8109 status2 >>= (4*LPFC_ELS_RING);
5651 8110
5652 if (status1 || (status2 & HA_RXMASK)) 8111 if (status1 || (status2 & HA_RXMASK))
5653 sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); 8112 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
5654 else 8113 else
5655 sp_irq_rc = IRQ_NONE; 8114 sp_irq_rc = IRQ_NONE;
5656 8115
@@ -5670,10 +8129,3322 @@ lpfc_intr_handler(int irq, void *dev_id)
5670 status2 = 0; 8129 status2 = 0;
5671 8130
5672 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 8131 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
5673 fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); 8132 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
5674 else 8133 else
5675 fp_irq_rc = IRQ_NONE; 8134 fp_irq_rc = IRQ_NONE;
5676 8135
5677 /* Return device-level interrupt handling status */ 8136 /* Return device-level interrupt handling status */
5678 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 8137 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
5679} /* lpfc_intr_handler */ 8138} /* lpfc_sli_intr_handler */
8139
8140/**
8141 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
8142 * @phba: pointer to lpfc hba data structure.
8143 *
8144 * This routine is invoked by the worker thread to process all the pending
8145 * SLI4 FCP abort XRI events.
8146 **/
8147void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
8148{
8149 struct lpfc_cq_event *cq_event;
8150
8151 /* First, declare the fcp xri abort event has been handled */
8152 spin_lock_irq(&phba->hbalock);
8153 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
8154 spin_unlock_irq(&phba->hbalock);
8155 /* Now, handle all the fcp xri abort events */
8156 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
8157 /* Get the first event from the head of the event queue */
8158 spin_lock_irq(&phba->hbalock);
8159 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8160 cq_event, struct lpfc_cq_event, list);
8161 spin_unlock_irq(&phba->hbalock);
8162 /* Notify aborted XRI for FCP work queue */
8163 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8164 /* Free the event processed back to the free pool */
8165 lpfc_sli4_cq_event_release(phba, cq_event);
8166 }
8167}
8168
8169/**
8170 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
8171 * @phba: pointer to lpfc hba data structure.
8172 *
8173 * This routine is invoked by the worker thread to process all the pending
8174 * SLI4 els abort xri events.
8175 **/
8176void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8177{
8178 struct lpfc_cq_event *cq_event;
8179
8180 /* First, declare the els xri abort event has been handled */
8181 spin_lock_irq(&phba->hbalock);
8182 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
8183 spin_unlock_irq(&phba->hbalock);
8184 /* Now, handle all the els xri abort events */
8185 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
8186 /* Get the first event from the head of the event queue */
8187 spin_lock_irq(&phba->hbalock);
8188 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8189 cq_event, struct lpfc_cq_event, list);
8190 spin_unlock_irq(&phba->hbalock);
8191 /* Notify aborted XRI for ELS work queue */
8192 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8193 /* Free the event processed back to the free pool */
8194 lpfc_sli4_cq_event_release(phba, cq_event);
8195 }
8196}
8197
8198static void
8199lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8200 struct lpfc_iocbq *pIocbOut,
8201 struct lpfc_wcqe_complete *wcqe)
8202{
8203 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8204
8205 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8206 sizeof(struct lpfc_iocbq) - offset);
8207 memset(&pIocbIn->sli4_info, 0,
8208 sizeof(struct lpfc_sli4_rspiocb_info));
8209 /* Map WCQE parameters into irspiocb parameters */
8210 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8211 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
8212 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
8213 pIocbIn->iocb.un.fcpi.fcpi_parm =
8214 pIocbOut->iocb.un.fcpi.fcpi_parm -
8215 wcqe->total_data_placed;
8216 else
8217 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8218 else
8219 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8220 /* Load in additional WCQE parameters */
8221 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
8222 pIocbIn->sli4_info.bfield = 0;
8223 if (bf_get(lpfc_wcqe_c_xb, wcqe))
8224 pIocbIn->sli4_info.bfield |= LPFC_XB;
8225 if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
8226 pIocbIn->sli4_info.bfield |= LPFC_PV;
8227 pIocbIn->sli4_info.priority =
8228 bf_get(lpfc_wcqe_c_priority, wcqe);
8229 }
8230}
8231
8232/**
8233 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8234 * @phba: Pointer to HBA context object.
8235 * @cqe: Pointer to mailbox completion queue entry.
8236 *
8237 * This routine process a mailbox completion queue entry with asynchrous
8238 * event.
8239 *
8240 * Return: true if work posted to worker thread, otherwise false.
8241 **/
8242static bool
8243lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8244{
8245 struct lpfc_cq_event *cq_event;
8246 unsigned long iflags;
8247
8248 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8249 "0392 Async Event: word0:x%x, word1:x%x, "
8250 "word2:x%x, word3:x%x\n", mcqe->word0,
8251 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
8252
8253 /* Allocate a new internal CQ_EVENT entry */
8254 cq_event = lpfc_sli4_cq_event_alloc(phba);
8255 if (!cq_event) {
8256 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8257 "0394 Failed to allocate CQ_EVENT entry\n");
8258 return false;
8259 }
8260
8261 /* Move the CQE into an asynchronous event entry */
8262 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
8263 spin_lock_irqsave(&phba->hbalock, iflags);
8264 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
8265 /* Set the async event flag */
8266 phba->hba_flag |= ASYNC_EVENT;
8267 spin_unlock_irqrestore(&phba->hbalock, iflags);
8268
8269 return true;
8270}
8271
8272/**
8273 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
8274 * @phba: Pointer to HBA context object.
8275 * @cqe: Pointer to mailbox completion queue entry.
8276 *
8277 * This routine process a mailbox completion queue entry with mailbox
8278 * completion event.
8279 *
8280 * Return: true if work posted to worker thread, otherwise false.
8281 **/
8282static bool
8283lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8284{
8285 uint32_t mcqe_status;
8286 MAILBOX_t *mbox, *pmbox;
8287 struct lpfc_mqe *mqe;
8288 struct lpfc_vport *vport;
8289 struct lpfc_nodelist *ndlp;
8290 struct lpfc_dmabuf *mp;
8291 unsigned long iflags;
8292 LPFC_MBOXQ_t *pmb;
8293 bool workposted = false;
8294 int rc;
8295
8296 /* If not a mailbox complete MCQE, out by checking mailbox consume */
8297 if (!bf_get(lpfc_trailer_completed, mcqe))
8298 goto out_no_mqe_complete;
8299
8300 /* Get the reference to the active mbox command */
8301 spin_lock_irqsave(&phba->hbalock, iflags);
8302 pmb = phba->sli.mbox_active;
8303 if (unlikely(!pmb)) {
8304 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
8305 "1832 No pending MBOX command to handle\n");
8306 spin_unlock_irqrestore(&phba->hbalock, iflags);
8307 goto out_no_mqe_complete;
8308 }
8309 spin_unlock_irqrestore(&phba->hbalock, iflags);
8310 mqe = &pmb->u.mqe;
8311 pmbox = (MAILBOX_t *)&pmb->u.mqe;
8312 mbox = phba->mbox;
8313 vport = pmb->vport;
8314
8315 /* Reset heartbeat timer */
8316 phba->last_completion_time = jiffies;
8317 del_timer(&phba->sli.mbox_tmo);
8318
8319 /* Move mbox data to caller's mailbox region, do endian swapping */
8320 if (pmb->mbox_cmpl && mbox)
8321 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
8322 /* Set the mailbox status with SLI4 range 0x4000 */
8323 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
8324 if (mcqe_status != MB_CQE_STATUS_SUCCESS)
8325 bf_set(lpfc_mqe_status, mqe,
8326 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8327
8328 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8329 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8330 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
8331 "MBOX dflt rpi: status:x%x rpi:x%x",
8332 mcqe_status,
8333 pmbox->un.varWords[0], 0);
8334 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
8335 mp = (struct lpfc_dmabuf *)(pmb->context1);
8336 ndlp = (struct lpfc_nodelist *)pmb->context2;
8337 /* Reg_LOGIN of dflt RPI was successful. Now lets get
8338 * RID of the PPI using the same mbox buffer.
8339 */
8340 lpfc_unreg_login(phba, vport->vpi,
8341 pmbox->un.varWords[0], pmb);
8342 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
8343 pmb->context1 = mp;
8344 pmb->context2 = ndlp;
8345 pmb->vport = vport;
8346 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8347 if (rc != MBX_BUSY)
8348 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8349 LOG_SLI, "0385 rc should "
8350 "have been MBX_BUSY\n");
8351 if (rc != MBX_NOT_FINISHED)
8352 goto send_current_mbox;
8353 }
8354 }
8355 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8356 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8357 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8358
8359 /* There is mailbox completion work to do */
8360 spin_lock_irqsave(&phba->hbalock, iflags);
8361 __lpfc_mbox_cmpl_put(phba, pmb);
8362 phba->work_ha |= HA_MBATT;
8363 spin_unlock_irqrestore(&phba->hbalock, iflags);
8364 workposted = true;
8365
8366send_current_mbox:
8367 spin_lock_irqsave(&phba->hbalock, iflags);
8368 /* Release the mailbox command posting token */
8369 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8370 /* Setting active mailbox pointer need to be in sync to flag clear */
8371 phba->sli.mbox_active = NULL;
8372 spin_unlock_irqrestore(&phba->hbalock, iflags);
8373 /* Wake up worker thread to post the next pending mailbox command */
8374 lpfc_worker_wake_up(phba);
8375out_no_mqe_complete:
8376 if (bf_get(lpfc_trailer_consumed, mcqe))
8377 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
8378 return workposted;
8379}
8380
8381/**
8382 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
8383 * @phba: Pointer to HBA context object.
8384 * @cqe: Pointer to mailbox completion queue entry.
8385 *
8386 * This routine process a mailbox completion queue entry, it invokes the
8387 * proper mailbox complete handling or asynchrous event handling routine
8388 * according to the MCQE's async bit.
8389 *
8390 * Return: true if work posted to worker thread, otherwise false.
8391 **/
8392static bool
8393lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8394{
8395 struct lpfc_mcqe mcqe;
8396 bool workposted;
8397
8398 /* Copy the mailbox MCQE and convert endian order as needed */
8399 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
8400
8401 /* Invoke the proper event handling routine */
8402 if (!bf_get(lpfc_trailer_async, &mcqe))
8403 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
8404 else
8405 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
8406 return workposted;
8407}
8408
8409/**
8410 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
8411 * @phba: Pointer to HBA context object.
8412 * @wcqe: Pointer to work-queue completion queue entry.
8413 *
8414 * This routine handles an ELS work-queue completion event.
8415 *
8416 * Return: true if work posted to worker thread, otherwise false.
8417 **/
8418static bool
8419lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8420 struct lpfc_wcqe_complete *wcqe)
8421{
8422 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8423 struct lpfc_iocbq *cmdiocbq;
8424 struct lpfc_iocbq *irspiocbq;
8425 unsigned long iflags;
8426 bool workposted = false;
8427
8428 spin_lock_irqsave(&phba->hbalock, iflags);
8429 pring->stats.iocb_event++;
8430 /* Look up the ELS command IOCB and create pseudo response IOCB */
8431 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8432 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8433 spin_unlock_irqrestore(&phba->hbalock, iflags);
8434
8435 if (unlikely(!cmdiocbq)) {
8436 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8437 "0386 ELS complete with no corresponding "
8438 "cmdiocb: iotag (%d)\n",
8439 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8440 return workposted;
8441 }
8442
8443 /* Fake the irspiocbq and copy necessary response information */
8444 irspiocbq = lpfc_sli_get_iocbq(phba);
8445 if (!irspiocbq) {
8446 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8447 "0387 Failed to allocate an iocbq\n");
8448 return workposted;
8449 }
8450 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8451
8452 /* Add the irspiocb to the response IOCB work list */
8453 spin_lock_irqsave(&phba->hbalock, iflags);
8454 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
8455 /* Indicate ELS ring attention */
8456 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
8457 spin_unlock_irqrestore(&phba->hbalock, iflags);
8458 workposted = true;
8459
8460 return workposted;
8461}
8462
8463/**
8464 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
8465 * @phba: Pointer to HBA context object.
8466 * @wcqe: Pointer to work-queue completion queue entry.
8467 *
8468 * This routine handles slow-path WQ entry comsumed event by invoking the
8469 * proper WQ release routine to the slow-path WQ.
8470 **/
8471static void
8472lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
8473 struct lpfc_wcqe_release *wcqe)
8474{
8475 /* Check for the slow-path ELS work queue */
8476 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
8477 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
8478 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8479 else
8480 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8481 "2579 Slow-path wqe consume event carries "
8482 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
8483 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
8484 phba->sli4_hba.els_wq->queue_id);
8485}
8486
8487/**
8488 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
8489 * @phba: Pointer to HBA context object.
8490 * @cq: Pointer to a WQ completion queue.
8491 * @wcqe: Pointer to work-queue completion queue entry.
8492 *
8493 * This routine handles an XRI abort event.
8494 *
8495 * Return: true if work posted to worker thread, otherwise false.
8496 **/
8497static bool
8498lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8499 struct lpfc_queue *cq,
8500 struct sli4_wcqe_xri_aborted *wcqe)
8501{
8502 bool workposted = false;
8503 struct lpfc_cq_event *cq_event;
8504 unsigned long iflags;
8505
8506 /* Allocate a new internal CQ_EVENT entry */
8507 cq_event = lpfc_sli4_cq_event_alloc(phba);
8508 if (!cq_event) {
8509 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8510 "0602 Failed to allocate CQ_EVENT entry\n");
8511 return false;
8512 }
8513
8514 /* Move the CQE into the proper xri abort event list */
8515 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
8516 switch (cq->subtype) {
8517 case LPFC_FCP:
8518 spin_lock_irqsave(&phba->hbalock, iflags);
8519 list_add_tail(&cq_event->list,
8520 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
8521 /* Set the fcp xri abort event flag */
8522 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
8523 spin_unlock_irqrestore(&phba->hbalock, iflags);
8524 workposted = true;
8525 break;
8526 case LPFC_ELS:
8527 spin_lock_irqsave(&phba->hbalock, iflags);
8528 list_add_tail(&cq_event->list,
8529 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
8530 /* Set the els xri abort event flag */
8531 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
8532 spin_unlock_irqrestore(&phba->hbalock, iflags);
8533 workposted = true;
8534 break;
8535 default:
8536 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8537 "0603 Invalid work queue CQE subtype (x%x)\n",
8538 cq->subtype);
8539 workposted = false;
8540 break;
8541 }
8542 return workposted;
8543}
8544
8545/**
8546 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8547 * @phba: Pointer to HBA context object.
8548 * @cq: Pointer to the completion queue.
8549 * @wcqe: Pointer to a completion queue entry.
8550 *
8551 * This routine process a slow-path work-queue completion queue entry.
8552 *
8553 * Return: true if work posted to worker thread, otherwise false.
8554 **/
8555static bool
8556lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8557 struct lpfc_cqe *cqe)
8558{
8559 struct lpfc_wcqe_complete wcqe;
8560 bool workposted = false;
8561
8562 /* Copy the work queue CQE and convert endian order if needed */
8563 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8564
8565 /* Check and process for different type of WCQE and dispatch */
8566 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8567 case CQE_CODE_COMPL_WQE:
8568 /* Process the WQ complete event */
8569 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8570 (struct lpfc_wcqe_complete *)&wcqe);
8571 break;
8572 case CQE_CODE_RELEASE_WQE:
8573 /* Process the WQ release event */
8574 lpfc_sli4_sp_handle_rel_wcqe(phba,
8575 (struct lpfc_wcqe_release *)&wcqe);
8576 break;
8577 case CQE_CODE_XRI_ABORTED:
8578 /* Process the WQ XRI abort event */
8579 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8580 (struct sli4_wcqe_xri_aborted *)&wcqe);
8581 break;
8582 default:
8583 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8584 "0388 Not a valid WCQE code: x%x\n",
8585 bf_get(lpfc_wcqe_c_code, &wcqe));
8586 break;
8587 }
8588 return workposted;
8589}
8590
8591/**
8592 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8593 * @phba: Pointer to HBA context object.
8594 * @rcqe: Pointer to receive-queue completion queue entry.
8595 *
8596 * This routine process a receive-queue completion queue entry.
8597 *
8598 * Return: true if work posted to worker thread, otherwise false.
8599 **/
8600static bool
8601lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8602{
8603 struct lpfc_rcqe rcqe;
8604 bool workposted = false;
8605 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8606 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
8607 struct hbq_dmabuf *dma_buf;
8608 uint32_t status;
8609 unsigned long iflags;
8610
8611 /* Copy the receive queue CQE and convert endian order if needed */
8612 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8613 lpfc_sli4_rq_release(hrq, drq);
8614 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8615 goto out;
8616 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8617 goto out;
8618
8619 status = bf_get(lpfc_rcqe_status, &rcqe);
8620 switch (status) {
8621 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8623 "2537 Receive Frame Truncated!!\n");
8624 case FC_STATUS_RQ_SUCCESS:
8625 spin_lock_irqsave(&phba->hbalock, iflags);
8626 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8627 if (!dma_buf) {
8628 spin_unlock_irqrestore(&phba->hbalock, iflags);
8629 goto out;
8630 }
8631 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
8632 /* save off the frame for the word thread to process */
8633 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
8634 /* Frame received */
8635 phba->hba_flag |= HBA_RECEIVE_BUFFER;
8636 spin_unlock_irqrestore(&phba->hbalock, iflags);
8637 workposted = true;
8638 break;
8639 case FC_STATUS_INSUFF_BUF_NEED_BUF:
8640 case FC_STATUS_INSUFF_BUF_FRM_DISC:
8641 /* Post more buffers if possible */
8642 spin_lock_irqsave(&phba->hbalock, iflags);
8643 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
8644 spin_unlock_irqrestore(&phba->hbalock, iflags);
8645 workposted = true;
8646 break;
8647 }
8648out:
8649 return workposted;
8650
8651}
8652
8653/**
8654 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
8655 * @phba: Pointer to HBA context object.
8656 * @eqe: Pointer to fast-path event queue entry.
8657 *
8658 * This routine process a event queue entry from the slow-path event queue.
8659 * It will check the MajorCode and MinorCode to determine this is for a
8660 * completion event on a completion queue, if not, an error shall be logged
8661 * and just return. Otherwise, it will get to the corresponding completion
8662 * queue and process all the entries on that completion queue, rearm the
8663 * completion queue, and then return.
8664 *
8665 **/
8666static void
8667lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8668{
8669 struct lpfc_queue *cq = NULL, *childq, *speq;
8670 struct lpfc_cqe *cqe;
8671 bool workposted = false;
8672 int ecount = 0;
8673 uint16_t cqid;
8674
8675 if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
8676 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8678 "0359 Not a valid slow-path completion "
8679 "event: majorcode=x%x, minorcode=x%x\n",
8680 bf_get(lpfc_eqe_major_code, eqe),
8681 bf_get(lpfc_eqe_minor_code, eqe));
8682 return;
8683 }
8684
8685 /* Get the reference to the corresponding CQ */
8686 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8687
8688 /* Search for completion queue pointer matching this cqid */
8689 speq = phba->sli4_hba.sp_eq;
8690 list_for_each_entry(childq, &speq->child_list, list) {
8691 if (childq->queue_id == cqid) {
8692 cq = childq;
8693 break;
8694 }
8695 }
8696 if (unlikely(!cq)) {
8697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8698 "0365 Slow-path CQ identifier (%d) does "
8699 "not exist\n", cqid);
8700 return;
8701 }
8702
8703 /* Process all the entries to the CQ */
8704 switch (cq->type) {
8705 case LPFC_MCQ:
8706 while ((cqe = lpfc_sli4_cq_get(cq))) {
8707 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
8708 if (!(++ecount % LPFC_GET_QE_REL_INT))
8709 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8710 }
8711 break;
8712 case LPFC_WCQ:
8713 while ((cqe = lpfc_sli4_cq_get(cq))) {
8714 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
8715 if (!(++ecount % LPFC_GET_QE_REL_INT))
8716 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8717 }
8718 break;
8719 case LPFC_RCQ:
8720 while ((cqe = lpfc_sli4_cq_get(cq))) {
8721 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8722 if (!(++ecount % LPFC_GET_QE_REL_INT))
8723 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8724 }
8725 break;
8726 default:
8727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8728 "0370 Invalid completion queue type (%d)\n",
8729 cq->type);
8730 return;
8731 }
8732
8733 /* Catch the no cq entry condition, log an error */
8734 if (unlikely(ecount == 0))
8735 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8736 "0371 No entry from the CQ: identifier "
8737 "(x%x), type (%d)\n", cq->queue_id, cq->type);
8738
8739 /* In any case, flash and re-arm the RCQ */
8740 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8741
8742 /* wake up worker thread if there are works to be done */
8743 if (workposted)
8744 lpfc_worker_wake_up(phba);
8745}
8746
8747/**
8748 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
8749 * @eqe: Pointer to fast-path completion queue entry.
8750 *
8751 * This routine process a fast-path work queue completion entry from fast-path
8752 * event queue for FCP command response completion.
8753 **/
8754static void
8755lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8756 struct lpfc_wcqe_complete *wcqe)
8757{
8758 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8759 struct lpfc_iocbq *cmdiocbq;
8760 struct lpfc_iocbq irspiocbq;
8761 unsigned long iflags;
8762
8763 spin_lock_irqsave(&phba->hbalock, iflags);
8764 pring->stats.iocb_event++;
8765 spin_unlock_irqrestore(&phba->hbalock, iflags);
8766
8767 /* Check for response status */
8768 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
8769 /* If resource errors reported from HBA, reduce queue
8770 * depth of the SCSI device.
8771 */
8772 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
8773 IOSTAT_LOCAL_REJECT) &&
8774 (wcqe->parameter == IOERR_NO_RESOURCES)) {
8775 phba->lpfc_rampdown_queue_depth(phba);
8776 }
8777 /* Log the error status */
8778 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8779 "0373 FCP complete error: status=x%x, "
8780 "hw_status=x%x, total_data_specified=%d, "
8781 "parameter=x%x, word3=x%x\n",
8782 bf_get(lpfc_wcqe_c_status, wcqe),
8783 bf_get(lpfc_wcqe_c_hw_status, wcqe),
8784 wcqe->total_data_placed, wcqe->parameter,
8785 wcqe->word3);
8786 }
8787
8788 /* Look up the FCP command IOCB and create pseudo response IOCB */
8789 spin_lock_irqsave(&phba->hbalock, iflags);
8790 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8791 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8792 spin_unlock_irqrestore(&phba->hbalock, iflags);
8793 if (unlikely(!cmdiocbq)) {
8794 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8795 "0374 FCP complete with no corresponding "
8796 "cmdiocb: iotag (%d)\n",
8797 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8798 return;
8799 }
8800 if (unlikely(!cmdiocbq->iocb_cmpl)) {
8801 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8802 "0375 FCP cmdiocb not callback function "
8803 "iotag: (%d)\n",
8804 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8805 return;
8806 }
8807
8808 /* Fake the irspiocb and copy necessary response information */
8809 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
8810
8811 /* Pass the cmd_iocb and the rsp state to the upper layer */
8812 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
8813}
8814
8815/**
8816 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
8817 * @phba: Pointer to HBA context object.
8818 * @cq: Pointer to completion queue.
8819 * @wcqe: Pointer to work-queue completion queue entry.
8820 *
8821 * This routine handles an fast-path WQ entry comsumed event by invoking the
8822 * proper WQ release routine to the slow-path WQ.
8823 **/
8824static void
8825lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8826 struct lpfc_wcqe_release *wcqe)
8827{
8828 struct lpfc_queue *childwq;
8829 bool wqid_matched = false;
8830 uint16_t fcp_wqid;
8831
8832 /* Check for fast-path FCP work queue release */
8833 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
8834 list_for_each_entry(childwq, &cq->child_list, list) {
8835 if (childwq->queue_id == fcp_wqid) {
8836 lpfc_sli4_wq_release(childwq,
8837 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8838 wqid_matched = true;
8839 break;
8840 }
8841 }
8842 /* Report warning log message if no match found */
8843 if (wqid_matched != true)
8844 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8845 "2580 Fast-path wqe consume event carries "
8846 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
8847}
8848
8849/**
8850 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
8851 * @cq: Pointer to the completion queue.
8852 * @eqe: Pointer to fast-path completion queue entry.
8853 *
8854 * This routine process a fast-path work queue completion entry from fast-path
8855 * event queue for FCP command response completion.
8856 **/
8857static int
8858lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8859 struct lpfc_cqe *cqe)
8860{
8861 struct lpfc_wcqe_release wcqe;
8862 bool workposted = false;
8863
8864 /* Copy the work queue CQE and convert endian order if needed */
8865 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8866
8867 /* Check and process for different type of WCQE and dispatch */
8868 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8869 case CQE_CODE_COMPL_WQE:
8870 /* Process the WQ complete event */
8871 lpfc_sli4_fp_handle_fcp_wcqe(phba,
8872 (struct lpfc_wcqe_complete *)&wcqe);
8873 break;
8874 case CQE_CODE_RELEASE_WQE:
8875 /* Process the WQ release event */
8876 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
8877 (struct lpfc_wcqe_release *)&wcqe);
8878 break;
8879 case CQE_CODE_XRI_ABORTED:
8880 /* Process the WQ XRI abort event */
8881 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8882 (struct sli4_wcqe_xri_aborted *)&wcqe);
8883 break;
8884 default:
8885 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8886 "0144 Not a valid WCQE code: x%x\n",
8887 bf_get(lpfc_wcqe_c_code, &wcqe));
8888 break;
8889 }
8890 return workposted;
8891}
8892
8893/**
8894 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
8895 * @phba: Pointer to HBA context object.
8896 * @eqe: Pointer to fast-path event queue entry.
8897 *
8898 * This routine process a event queue entry from the fast-path event queue.
8899 * It will check the MajorCode and MinorCode to determine this is for a
8900 * completion event on a completion queue, if not, an error shall be logged
8901 * and just return. Otherwise, it will get to the corresponding completion
8902 * queue and process all the entries on the completion queue, rearm the
8903 * completion queue, and then return.
8904 **/
8905static void
8906lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
8907 uint32_t fcp_cqidx)
8908{
8909 struct lpfc_queue *cq;
8910 struct lpfc_cqe *cqe;
8911 bool workposted = false;
8912 uint16_t cqid;
8913 int ecount = 0;
8914
8915 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
8916 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
8917 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8918 "0366 Not a valid fast-path completion "
8919 "event: majorcode=x%x, minorcode=x%x\n",
8920 bf_get(lpfc_eqe_major_code, eqe),
8921 bf_get(lpfc_eqe_minor_code, eqe));
8922 return;
8923 }
8924
8925 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
8926 if (unlikely(!cq)) {
8927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8928 "0367 Fast-path completion queue does not "
8929 "exist\n");
8930 return;
8931 }
8932
8933 /* Get the reference to the corresponding CQ */
8934 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8935 if (unlikely(cqid != cq->queue_id)) {
8936 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8937 "0368 Miss-matched fast-path completion "
8938 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
8939 cqid, cq->queue_id);
8940 return;
8941 }
8942
8943 /* Process all the entries to the CQ */
8944 while ((cqe = lpfc_sli4_cq_get(cq))) {
8945 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
8946 if (!(++ecount % LPFC_GET_QE_REL_INT))
8947 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8948 }
8949
8950 /* Catch the no cq entry condition */
8951 if (unlikely(ecount == 0))
8952 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8953 "0369 No entry from fast-path completion "
8954 "queue fcpcqid=%d\n", cq->queue_id);
8955
8956 /* In any case, flash and re-arm the CQ */
8957 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8958
8959 /* wake up worker thread if there are works to be done */
8960 if (workposted)
8961 lpfc_worker_wake_up(phba);
8962}
8963
8964static void
8965lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
8966{
8967 struct lpfc_eqe *eqe;
8968
8969 /* walk all the EQ entries and drop on the floor */
8970 while ((eqe = lpfc_sli4_eq_get(eq)))
8971 ;
8972
8973 /* Clear and re-arm the EQ */
8974 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
8975}
8976
8977/**
8978 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
8979 * @irq: Interrupt number.
8980 * @dev_id: The device context pointer.
8981 *
8982 * This function is directly called from the PCI layer as an interrupt
8983 * service routine when device with SLI-4 interface spec is enabled with
8984 * MSI-X multi-message interrupt mode and there are slow-path events in
8985 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
8986 * interrupt mode, this function is called as part of the device-level
8987 * interrupt handler. When the PCI slot is in error recovery or the HBA is
8988 * undergoing initialization, the interrupt handler will not process the
8989 * interrupt. The link attention and ELS ring attention events are handled
8990 * by the worker thread. The interrupt handler signals the worker thread
8991 * and returns for these events. This function is called without any lock
8992 * held. It gets the hbalock to access and update SLI data structures.
8993 *
8994 * This function returns IRQ_HANDLED when interrupt is handled else it
8995 * returns IRQ_NONE.
8996 **/
8997irqreturn_t
8998lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
8999{
9000 struct lpfc_hba *phba;
9001 struct lpfc_queue *speq;
9002 struct lpfc_eqe *eqe;
9003 unsigned long iflag;
9004 int ecount = 0;
9005
9006 /*
9007 * Get the driver's phba structure from the dev_id
9008 */
9009 phba = (struct lpfc_hba *)dev_id;
9010
9011 if (unlikely(!phba))
9012 return IRQ_NONE;
9013
9014 /* Get to the EQ struct associated with this vector */
9015 speq = phba->sli4_hba.sp_eq;
9016
9017 /* Check device state for handling interrupt */
9018 if (unlikely(lpfc_intr_state_check(phba))) {
9019 /* Check again for link_state with lock held */
9020 spin_lock_irqsave(&phba->hbalock, iflag);
9021 if (phba->link_state < LPFC_LINK_DOWN)
9022 /* Flush, clear interrupt, and rearm the EQ */
9023 lpfc_sli4_eq_flush(phba, speq);
9024 spin_unlock_irqrestore(&phba->hbalock, iflag);
9025 return IRQ_NONE;
9026 }
9027
9028 /*
9029 * Process all the event on FCP slow-path EQ
9030 */
9031 while ((eqe = lpfc_sli4_eq_get(speq))) {
9032 lpfc_sli4_sp_handle_eqe(phba, eqe);
9033 if (!(++ecount % LPFC_GET_QE_REL_INT))
9034 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
9035 }
9036
9037 /* Always clear and re-arm the slow-path EQ */
9038 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
9039
9040 /* Catch the no cq entry condition */
9041 if (unlikely(ecount == 0)) {
9042 if (phba->intr_type == MSIX)
9043 /* MSI-X treated interrupt served as no EQ share INT */
9044 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9045 "0357 MSI-X interrupt with no EQE\n");
9046 else
9047 /* Non MSI-X treated on interrupt as EQ share INT */
9048 return IRQ_NONE;
9049 }
9050
9051 return IRQ_HANDLED;
9052} /* lpfc_sli4_sp_intr_handler */
9053
9054/**
9055 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
9056 * @irq: Interrupt number.
9057 * @dev_id: The device context pointer.
9058 *
9059 * This function is directly called from the PCI layer as an interrupt
9060 * service routine when device with SLI-4 interface spec is enabled with
9061 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
9062 * ring event in the HBA. However, when the device is enabled with either
9063 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
9064 * device-level interrupt handler. When the PCI slot is in error recovery
9065 * or the HBA is undergoing initialization, the interrupt handler will not
9066 * process the interrupt. The SCSI FCP fast-path ring event are handled in
9067 * the intrrupt context. This function is called without any lock held.
9068 * It gets the hbalock to access and update SLI data structures. Note that,
9069 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
9070 * equal to that of FCP CQ index.
9071 *
9072 * This function returns IRQ_HANDLED when interrupt is handled else it
9073 * returns IRQ_NONE.
9074 **/
9075irqreturn_t
9076lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
9077{
9078 struct lpfc_hba *phba;
9079 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9080 struct lpfc_queue *fpeq;
9081 struct lpfc_eqe *eqe;
9082 unsigned long iflag;
9083 int ecount = 0;
9084 uint32_t fcp_eqidx;
9085
9086 /* Get the driver's phba structure from the dev_id */
9087 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
9088 phba = fcp_eq_hdl->phba;
9089 fcp_eqidx = fcp_eq_hdl->idx;
9090
9091 if (unlikely(!phba))
9092 return IRQ_NONE;
9093
9094 /* Get to the EQ struct associated with this vector */
9095 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
9096
9097 /* Check device state for handling interrupt */
9098 if (unlikely(lpfc_intr_state_check(phba))) {
9099 /* Check again for link_state with lock held */
9100 spin_lock_irqsave(&phba->hbalock, iflag);
9101 if (phba->link_state < LPFC_LINK_DOWN)
9102 /* Flush, clear interrupt, and rearm the EQ */
9103 lpfc_sli4_eq_flush(phba, fpeq);
9104 spin_unlock_irqrestore(&phba->hbalock, iflag);
9105 return IRQ_NONE;
9106 }
9107
9108 /*
9109 * Process all the event on FCP fast-path EQ
9110 */
9111 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9112 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
9113 if (!(++ecount % LPFC_GET_QE_REL_INT))
9114 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
9115 }
9116
9117 /* Always clear and re-arm the fast-path EQ */
9118 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
9119
9120 if (unlikely(ecount == 0)) {
9121 if (phba->intr_type == MSIX)
9122 /* MSI-X treated interrupt served as no EQ share INT */
9123 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9124 "0358 MSI-X interrupt with no EQE\n");
9125 else
9126 /* Non MSI-X treated on interrupt as EQ share INT */
9127 return IRQ_NONE;
9128 }
9129
9130 return IRQ_HANDLED;
9131} /* lpfc_sli4_fp_intr_handler */
9132
9133/**
9134 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
9135 * @irq: Interrupt number.
9136 * @dev_id: The device context pointer.
9137 *
9138 * This function is the device-level interrupt handler to device with SLI-4
9139 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
9140 * interrupt mode is enabled and there is an event in the HBA which requires
9141 * driver attention. This function invokes the slow-path interrupt attention
9142 * handling function and fast-path interrupt attention handling function in
9143 * turn to process the relevant HBA attention events. This function is called
9144 * without any lock held. It gets the hbalock to access and update SLI data
9145 * structures.
9146 *
9147 * This function returns IRQ_HANDLED when interrupt is handled, else it
9148 * returns IRQ_NONE.
9149 **/
9150irqreturn_t
9151lpfc_sli4_intr_handler(int irq, void *dev_id)
9152{
9153 struct lpfc_hba *phba;
9154 irqreturn_t sp_irq_rc, fp_irq_rc;
9155 bool fp_handled = false;
9156 uint32_t fcp_eqidx;
9157
9158 /* Get the driver's phba structure from the dev_id */
9159 phba = (struct lpfc_hba *)dev_id;
9160
9161 if (unlikely(!phba))
9162 return IRQ_NONE;
9163
9164 /*
9165 * Invokes slow-path host attention interrupt handling as appropriate.
9166 */
9167 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
9168
9169 /*
9170 * Invoke fast-path host attention interrupt handling as appropriate.
9171 */
9172 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
9173 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
9174 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
9175 if (fp_irq_rc == IRQ_HANDLED)
9176 fp_handled |= true;
9177 }
9178
9179 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
9180} /* lpfc_sli4_intr_handler */
9181
9182/**
9183 * lpfc_sli4_queue_free - free a queue structure and associated memory
9184 * @queue: The queue structure to free.
9185 *
9186 * This function frees a queue structure and the DMAable memeory used for
9187 * the host resident queue. This function must be called after destroying the
9188 * queue on the HBA.
9189 **/
9190void
9191lpfc_sli4_queue_free(struct lpfc_queue *queue)
9192{
9193 struct lpfc_dmabuf *dmabuf;
9194
9195 if (!queue)
9196 return;
9197
9198 while (!list_empty(&queue->page_list)) {
9199 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9200 list);
9201 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
9202 dmabuf->virt, dmabuf->phys);
9203 kfree(dmabuf);
9204 }
9205 kfree(queue);
9206 return;
9207}
9208
9209/**
9210 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
9211 * @phba: The HBA that this queue is being created on.
9212 * @entry_size: The size of each queue entry for this queue.
9213 * @entry count: The number of entries that this queue will handle.
9214 *
9215 * This function allocates a queue structure and the DMAable memory used for
9216 * the host resident queue. This function must be called before creating the
9217 * queue on the HBA.
9218 **/
9219struct lpfc_queue *
9220lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9221 uint32_t entry_count)
9222{
9223 struct lpfc_queue *queue;
9224 struct lpfc_dmabuf *dmabuf;
9225 int x, total_qe_count;
9226 void *dma_pointer;
9227
9228
9229 queue = kzalloc(sizeof(struct lpfc_queue) +
9230 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9231 if (!queue)
9232 return NULL;
9233 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
9234 INIT_LIST_HEAD(&queue->list);
9235 INIT_LIST_HEAD(&queue->page_list);
9236 INIT_LIST_HEAD(&queue->child_list);
9237 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
9238 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9239 if (!dmabuf)
9240 goto out_fail;
9241 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9242 PAGE_SIZE, &dmabuf->phys,
9243 GFP_KERNEL);
9244 if (!dmabuf->virt) {
9245 kfree(dmabuf);
9246 goto out_fail;
9247 }
9248 dmabuf->buffer_tag = x;
9249 list_add_tail(&dmabuf->list, &queue->page_list);
9250 /* initialize queue's entry array */
9251 dma_pointer = dmabuf->virt;
9252 for (; total_qe_count < entry_count &&
9253 dma_pointer < (PAGE_SIZE + dmabuf->virt);
9254 total_qe_count++, dma_pointer += entry_size) {
9255 queue->qe[total_qe_count].address = dma_pointer;
9256 }
9257 }
9258 queue->entry_size = entry_size;
9259 queue->entry_count = entry_count;
9260 queue->phba = phba;
9261
9262 return queue;
9263out_fail:
9264 lpfc_sli4_queue_free(queue);
9265 return NULL;
9266}
9267
9268/**
9269 * lpfc_eq_create - Create an Event Queue on the HBA
9270 * @phba: HBA structure that indicates port to create a queue on.
9271 * @eq: The queue structure to use to create the event queue.
9272 * @imax: The maximum interrupt per second limit.
9273 *
9274 * This function creates an event queue, as detailed in @eq, on a port,
9275 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
9276 *
9277 * The @phba struct is used to send mailbox command to HBA. The @eq struct
9278 * is used to get the entry count and entry size that are necessary to
9279 * determine the number of pages to allocate and use for this queue. This
9280 * function will send the EQ_CREATE mailbox command to the HBA to setup the
9281 * event queue. This function is asynchronous and will wait for the mailbox
9282 * command to finish before continuing.
9283 *
9284 * On success this function will return a zero. If unable to allocate enough
9285 * memory this function will return ENOMEM. If the queue create mailbox command
9286 * fails this function will return ENXIO.
9287 **/
9288uint32_t
9289lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9290{
9291 struct lpfc_mbx_eq_create *eq_create;
9292 LPFC_MBOXQ_t *mbox;
9293 int rc, length, status = 0;
9294 struct lpfc_dmabuf *dmabuf;
9295 uint32_t shdr_status, shdr_add_status;
9296 union lpfc_sli4_cfg_shdr *shdr;
9297 uint16_t dmult;
9298
9299 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9300 if (!mbox)
9301 return -ENOMEM;
9302 length = (sizeof(struct lpfc_mbx_eq_create) -
9303 sizeof(struct lpfc_sli4_cfg_mhdr));
9304 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9305 LPFC_MBOX_OPCODE_EQ_CREATE,
9306 length, LPFC_SLI4_MBX_EMBED);
9307 eq_create = &mbox->u.mqe.un.eq_create;
9308 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
9309 eq->page_count);
9310 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
9311 LPFC_EQE_SIZE);
9312 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
9313 /* Calculate delay multiper from maximum interrupt per second */
9314 dmult = LPFC_DMULT_CONST/imax - 1;
9315 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
9316 dmult);
9317 switch (eq->entry_count) {
9318 default:
9319 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9320 "0360 Unsupported EQ count. (%d)\n",
9321 eq->entry_count);
9322 if (eq->entry_count < 256)
9323 return -EINVAL;
9324 /* otherwise default to smallest count (drop through) */
9325 case 256:
9326 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9327 LPFC_EQ_CNT_256);
9328 break;
9329 case 512:
9330 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9331 LPFC_EQ_CNT_512);
9332 break;
9333 case 1024:
9334 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9335 LPFC_EQ_CNT_1024);
9336 break;
9337 case 2048:
9338 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9339 LPFC_EQ_CNT_2048);
9340 break;
9341 case 4096:
9342 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9343 LPFC_EQ_CNT_4096);
9344 break;
9345 }
9346 list_for_each_entry(dmabuf, &eq->page_list, list) {
9347 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9348 putPaddrLow(dmabuf->phys);
9349 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9350 putPaddrHigh(dmabuf->phys);
9351 }
9352 mbox->vport = phba->pport;
9353 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9354 mbox->context1 = NULL;
9355 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9356 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
9357 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9358 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9359 if (shdr_status || shdr_add_status || rc) {
9360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9361 "2500 EQ_CREATE mailbox failed with "
9362 "status x%x add_status x%x, mbx status x%x\n",
9363 shdr_status, shdr_add_status, rc);
9364 status = -ENXIO;
9365 }
9366 eq->type = LPFC_EQ;
9367 eq->subtype = LPFC_NONE;
9368 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
9369 if (eq->queue_id == 0xFFFF)
9370 status = -ENXIO;
9371 eq->host_index = 0;
9372 eq->hba_index = 0;
9373
9374 if (rc != MBX_TIMEOUT)
9375 mempool_free(mbox, phba->mbox_mem_pool);
9376 return status;
9377}
9378
9379/**
9380 * lpfc_cq_create - Create a Completion Queue on the HBA
9381 * @phba: HBA structure that indicates port to create a queue on.
9382 * @cq: The queue structure to use to create the completion queue.
9383 * @eq: The event queue to bind this completion queue to.
9384 *
9385 * This function creates a completion queue, as detailed in @wq, on a port,
9386 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
9387 *
9388 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9389 * is used to get the entry count and entry size that are necessary to
9390 * determine the number of pages to allocate and use for this queue. The @eq
9391 * is used to indicate which event queue to bind this completion queue to. This
9392 * function will send the CQ_CREATE mailbox command to the HBA to setup the
9393 * completion queue. This function is asynchronous and will wait for the mailbox
9394 * command to finish before continuing.
9395 *
9396 * On success this function will return a zero. If unable to allocate enough
9397 * memory this function will return ENOMEM. If the queue create mailbox command
9398 * fails this function will return ENXIO.
9399 **/
9400uint32_t
9401lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9402 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
9403{
9404 struct lpfc_mbx_cq_create *cq_create;
9405 struct lpfc_dmabuf *dmabuf;
9406 LPFC_MBOXQ_t *mbox;
9407 int rc, length, status = 0;
9408 uint32_t shdr_status, shdr_add_status;
9409 union lpfc_sli4_cfg_shdr *shdr;
9410
9411 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9412 if (!mbox)
9413 return -ENOMEM;
9414 length = (sizeof(struct lpfc_mbx_cq_create) -
9415 sizeof(struct lpfc_sli4_cfg_mhdr));
9416 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9417 LPFC_MBOX_OPCODE_CQ_CREATE,
9418 length, LPFC_SLI4_MBX_EMBED);
9419 cq_create = &mbox->u.mqe.un.cq_create;
9420 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
9421 cq->page_count);
9422 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
9423 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
9424 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
9425 switch (cq->entry_count) {
9426 default:
9427 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9428 "0361 Unsupported CQ count. (%d)\n",
9429 cq->entry_count);
9430 if (cq->entry_count < 256)
9431 return -EINVAL;
9432 /* otherwise default to smallest count (drop through) */
9433 case 256:
9434 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9435 LPFC_CQ_CNT_256);
9436 break;
9437 case 512:
9438 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9439 LPFC_CQ_CNT_512);
9440 break;
9441 case 1024:
9442 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9443 LPFC_CQ_CNT_1024);
9444 break;
9445 }
9446 list_for_each_entry(dmabuf, &cq->page_list, list) {
9447 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9448 putPaddrLow(dmabuf->phys);
9449 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9450 putPaddrHigh(dmabuf->phys);
9451 }
9452 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9453
9454 /* The IOCTL status is embedded in the mailbox subheader. */
9455 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
9456 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9457 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9458 if (shdr_status || shdr_add_status || rc) {
9459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9460 "2501 CQ_CREATE mailbox failed with "
9461 "status x%x add_status x%x, mbx status x%x\n",
9462 shdr_status, shdr_add_status, rc);
9463 status = -ENXIO;
9464 goto out;
9465 }
9466 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9467 if (cq->queue_id == 0xFFFF) {
9468 status = -ENXIO;
9469 goto out;
9470 }
9471 /* link the cq onto the parent eq child list */
9472 list_add_tail(&cq->list, &eq->child_list);
9473 /* Set up completion queue's type and subtype */
9474 cq->type = type;
9475 cq->subtype = subtype;
9476 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9477 cq->host_index = 0;
9478 cq->hba_index = 0;
9479out:
9480
9481 if (rc != MBX_TIMEOUT)
9482 mempool_free(mbox, phba->mbox_mem_pool);
9483 return status;
9484}
9485
9486/**
9487 * lpfc_mq_create - Create a mailbox Queue on the HBA
9488 * @phba: HBA structure that indicates port to create a queue on.
9489 * @mq: The queue structure to use to create the mailbox queue.
9490 *
9491 * This function creates a mailbox queue, as detailed in @mq, on a port,
9492 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
9493 *
9494 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9495 * is used to get the entry count and entry size that are necessary to
9496 * determine the number of pages to allocate and use for this queue. This
9497 * function will send the MQ_CREATE mailbox command to the HBA to setup the
9498 * mailbox queue. This function is asynchronous and will wait for the mailbox
9499 * command to finish before continuing.
9500 *
9501 * On success this function will return a zero. If unable to allocate enough
9502 * memory this function will return ENOMEM. If the queue create mailbox command
9503 * fails this function will return ENXIO.
9504 **/
9505uint32_t
9506lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9507 struct lpfc_queue *cq, uint32_t subtype)
9508{
9509 struct lpfc_mbx_mq_create *mq_create;
9510 struct lpfc_dmabuf *dmabuf;
9511 LPFC_MBOXQ_t *mbox;
9512 int rc, length, status = 0;
9513 uint32_t shdr_status, shdr_add_status;
9514 union lpfc_sli4_cfg_shdr *shdr;
9515
9516 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9517 if (!mbox)
9518 return -ENOMEM;
9519 length = (sizeof(struct lpfc_mbx_mq_create) -
9520 sizeof(struct lpfc_sli4_cfg_mhdr));
9521 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9522 LPFC_MBOX_OPCODE_MQ_CREATE,
9523 length, LPFC_SLI4_MBX_EMBED);
9524 mq_create = &mbox->u.mqe.un.mq_create;
9525 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9526 mq->page_count);
9527 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9528 cq->queue_id);
9529 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9530 switch (mq->entry_count) {
9531 default:
9532 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9533 "0362 Unsupported MQ count. (%d)\n",
9534 mq->entry_count);
9535 if (mq->entry_count < 16)
9536 return -EINVAL;
9537 /* otherwise default to smallest count (drop through) */
9538 case 16:
9539 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9540 LPFC_MQ_CNT_16);
9541 break;
9542 case 32:
9543 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9544 LPFC_MQ_CNT_32);
9545 break;
9546 case 64:
9547 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9548 LPFC_MQ_CNT_64);
9549 break;
9550 case 128:
9551 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9552 LPFC_MQ_CNT_128);
9553 break;
9554 }
9555 list_for_each_entry(dmabuf, &mq->page_list, list) {
9556 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9557 putPaddrLow(dmabuf->phys);
9558 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9559 putPaddrHigh(dmabuf->phys);
9560 }
9561 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9562 /* The IOCTL status is embedded in the mailbox subheader. */
9563 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9564 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9565 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9566 if (shdr_status || shdr_add_status || rc) {
9567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9568 "2502 MQ_CREATE mailbox failed with "
9569 "status x%x add_status x%x, mbx status x%x\n",
9570 shdr_status, shdr_add_status, rc);
9571 status = -ENXIO;
9572 goto out;
9573 }
9574 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9575 if (mq->queue_id == 0xFFFF) {
9576 status = -ENXIO;
9577 goto out;
9578 }
9579 mq->type = LPFC_MQ;
9580 mq->subtype = subtype;
9581 mq->host_index = 0;
9582 mq->hba_index = 0;
9583
9584 /* link the mq onto the parent cq child list */
9585 list_add_tail(&mq->list, &cq->child_list);
9586out:
9587 if (rc != MBX_TIMEOUT)
9588 mempool_free(mbox, phba->mbox_mem_pool);
9589 return status;
9590}
9591
9592/**
9593 * lpfc_wq_create - Create a Work Queue on the HBA
9594 * @phba: HBA structure that indicates port to create a queue on.
9595 * @wq: The queue structure to use to create the work queue.
9596 * @cq: The completion queue to bind this work queue to.
9597 * @subtype: The subtype of the work queue indicating its functionality.
9598 *
9599 * This function creates a work queue, as detailed in @wq, on a port, described
9600 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
9601 *
9602 * The @phba struct is used to send mailbox command to HBA. The @wq struct
9603 * is used to get the entry count and entry size that are necessary to
9604 * determine the number of pages to allocate and use for this queue. The @cq
9605 * is used to indicate which completion queue to bind this work queue to. This
9606 * function will send the WQ_CREATE mailbox command to the HBA to setup the
9607 * work queue. This function is asynchronous and will wait for the mailbox
9608 * command to finish before continuing.
9609 *
9610 * On success this function will return a zero. If unable to allocate enough
9611 * memory this function will return ENOMEM. If the queue create mailbox command
9612 * fails this function will return ENXIO.
9613 **/
9614uint32_t
9615lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9616 struct lpfc_queue *cq, uint32_t subtype)
9617{
9618 struct lpfc_mbx_wq_create *wq_create;
9619 struct lpfc_dmabuf *dmabuf;
9620 LPFC_MBOXQ_t *mbox;
9621 int rc, length, status = 0;
9622 uint32_t shdr_status, shdr_add_status;
9623 union lpfc_sli4_cfg_shdr *shdr;
9624
9625 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9626 if (!mbox)
9627 return -ENOMEM;
9628 length = (sizeof(struct lpfc_mbx_wq_create) -
9629 sizeof(struct lpfc_sli4_cfg_mhdr));
9630 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9631 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
9632 length, LPFC_SLI4_MBX_EMBED);
9633 wq_create = &mbox->u.mqe.un.wq_create;
9634 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
9635 wq->page_count);
9636 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9637 cq->queue_id);
9638 list_for_each_entry(dmabuf, &wq->page_list, list) {
9639 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9640 putPaddrLow(dmabuf->phys);
9641 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9642 putPaddrHigh(dmabuf->phys);
9643 }
9644 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9645 /* The IOCTL status is embedded in the mailbox subheader. */
9646 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
9647 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9648 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9649 if (shdr_status || shdr_add_status || rc) {
9650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9651 "2503 WQ_CREATE mailbox failed with "
9652 "status x%x add_status x%x, mbx status x%x\n",
9653 shdr_status, shdr_add_status, rc);
9654 status = -ENXIO;
9655 goto out;
9656 }
9657 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
9658 if (wq->queue_id == 0xFFFF) {
9659 status = -ENXIO;
9660 goto out;
9661 }
9662 wq->type = LPFC_WQ;
9663 wq->subtype = subtype;
9664 wq->host_index = 0;
9665 wq->hba_index = 0;
9666
9667 /* link the wq onto the parent cq child list */
9668 list_add_tail(&wq->list, &cq->child_list);
9669out:
9670 if (rc == MBX_TIMEOUT)
9671 mempool_free(mbox, phba->mbox_mem_pool);
9672 return status;
9673}
9674
9675/**
9676 * lpfc_rq_create - Create a Receive Queue on the HBA
9677 * @phba: HBA structure that indicates port to create a queue on.
9678 * @hrq: The queue structure to use to create the header receive queue.
9679 * @drq: The queue structure to use to create the data receive queue.
9680 * @cq: The completion queue to bind this work queue to.
9681 *
9682 * This function creates a receive buffer queue pair , as detailed in @hrq and
9683 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
9684 * to the HBA.
9685 *
9686 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
9687 * struct is used to get the entry count that is necessary to determine the
9688 * number of pages to use for this queue. The @cq is used to indicate which
9689 * completion queue to bind received buffers that are posted to these queues to.
9690 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
9691 * receive queue pair. This function is asynchronous and will wait for the
9692 * mailbox command to finish before continuing.
9693 *
9694 * On success this function will return a zero. If unable to allocate enough
9695 * memory this function will return ENOMEM. If the queue create mailbox command
9696 * fails this function will return ENXIO.
9697 **/
9698uint32_t
9699lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9700 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
9701{
9702 struct lpfc_mbx_rq_create *rq_create;
9703 struct lpfc_dmabuf *dmabuf;
9704 LPFC_MBOXQ_t *mbox;
9705 int rc, length, status = 0;
9706 uint32_t shdr_status, shdr_add_status;
9707 union lpfc_sli4_cfg_shdr *shdr;
9708
9709 if (hrq->entry_count != drq->entry_count)
9710 return -EINVAL;
9711 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9712 if (!mbox)
9713 return -ENOMEM;
9714 length = (sizeof(struct lpfc_mbx_rq_create) -
9715 sizeof(struct lpfc_sli4_cfg_mhdr));
9716 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9717 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9718 length, LPFC_SLI4_MBX_EMBED);
9719 rq_create = &mbox->u.mqe.un.rq_create;
9720 switch (hrq->entry_count) {
9721 default:
9722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9723 "2535 Unsupported RQ count. (%d)\n",
9724 hrq->entry_count);
9725 if (hrq->entry_count < 512)
9726 return -EINVAL;
9727 /* otherwise default to smallest count (drop through) */
9728 case 512:
9729 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9730 LPFC_RQ_RING_SIZE_512);
9731 break;
9732 case 1024:
9733 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9734 LPFC_RQ_RING_SIZE_1024);
9735 break;
9736 case 2048:
9737 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9738 LPFC_RQ_RING_SIZE_2048);
9739 break;
9740 case 4096:
9741 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9742 LPFC_RQ_RING_SIZE_4096);
9743 break;
9744 }
9745 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9746 cq->queue_id);
9747 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9748 hrq->page_count);
9749 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9750 LPFC_HDR_BUF_SIZE);
9751 list_for_each_entry(dmabuf, &hrq->page_list, list) {
9752 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9753 putPaddrLow(dmabuf->phys);
9754 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9755 putPaddrHigh(dmabuf->phys);
9756 }
9757 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9758 /* The IOCTL status is embedded in the mailbox subheader. */
9759 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9760 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9761 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9762 if (shdr_status || shdr_add_status || rc) {
9763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9764 "2504 RQ_CREATE mailbox failed with "
9765 "status x%x add_status x%x, mbx status x%x\n",
9766 shdr_status, shdr_add_status, rc);
9767 status = -ENXIO;
9768 goto out;
9769 }
9770 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9771 if (hrq->queue_id == 0xFFFF) {
9772 status = -ENXIO;
9773 goto out;
9774 }
9775 hrq->type = LPFC_HRQ;
9776 hrq->subtype = subtype;
9777 hrq->host_index = 0;
9778 hrq->hba_index = 0;
9779
9780 /* now create the data queue */
9781 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9782 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9783 length, LPFC_SLI4_MBX_EMBED);
9784 switch (drq->entry_count) {
9785 default:
9786 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9787 "2536 Unsupported RQ count. (%d)\n",
9788 drq->entry_count);
9789 if (drq->entry_count < 512)
9790 return -EINVAL;
9791 /* otherwise default to smallest count (drop through) */
9792 case 512:
9793 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9794 LPFC_RQ_RING_SIZE_512);
9795 break;
9796 case 1024:
9797 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9798 LPFC_RQ_RING_SIZE_1024);
9799 break;
9800 case 2048:
9801 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9802 LPFC_RQ_RING_SIZE_2048);
9803 break;
9804 case 4096:
9805 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9806 LPFC_RQ_RING_SIZE_4096);
9807 break;
9808 }
9809 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9810 cq->queue_id);
9811 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9812 drq->page_count);
9813 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9814 LPFC_DATA_BUF_SIZE);
9815 list_for_each_entry(dmabuf, &drq->page_list, list) {
9816 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9817 putPaddrLow(dmabuf->phys);
9818 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9819 putPaddrHigh(dmabuf->phys);
9820 }
9821 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9822 /* The IOCTL status is embedded in the mailbox subheader. */
9823 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9824 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9825 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9826 if (shdr_status || shdr_add_status || rc) {
9827 status = -ENXIO;
9828 goto out;
9829 }
9830 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9831 if (drq->queue_id == 0xFFFF) {
9832 status = -ENXIO;
9833 goto out;
9834 }
9835 drq->type = LPFC_DRQ;
9836 drq->subtype = subtype;
9837 drq->host_index = 0;
9838 drq->hba_index = 0;
9839
9840 /* link the header and data RQs onto the parent cq child list */
9841 list_add_tail(&hrq->list, &cq->child_list);
9842 list_add_tail(&drq->list, &cq->child_list);
9843
9844out:
9845 if (rc != MBX_TIMEOUT)
9846 mempool_free(mbox, phba->mbox_mem_pool);
9847 return status;
9848}
9849
9850/**
9851 * lpfc_eq_destroy - Destroy an event Queue on the HBA
9852 * @eq: The queue structure associated with the queue to destroy.
9853 *
9854 * This function destroys a queue, as detailed in @eq by sending an mailbox
9855 * command, specific to the type of queue, to the HBA.
9856 *
9857 * The @eq struct is used to get the queue ID of the queue to destroy.
9858 *
9859 * On success this function will return a zero. If the queue destroy mailbox
9860 * command fails this function will return ENXIO.
9861 **/
9862uint32_t
9863lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
9864{
9865 LPFC_MBOXQ_t *mbox;
9866 int rc, length, status = 0;
9867 uint32_t shdr_status, shdr_add_status;
9868 union lpfc_sli4_cfg_shdr *shdr;
9869
9870 if (!eq)
9871 return -ENODEV;
9872 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
9873 if (!mbox)
9874 return -ENOMEM;
9875 length = (sizeof(struct lpfc_mbx_eq_destroy) -
9876 sizeof(struct lpfc_sli4_cfg_mhdr));
9877 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9878 LPFC_MBOX_OPCODE_EQ_DESTROY,
9879 length, LPFC_SLI4_MBX_EMBED);
9880 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
9881 eq->queue_id);
9882 mbox->vport = eq->phba->pport;
9883 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9884
9885 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
9886 /* The IOCTL status is embedded in the mailbox subheader. */
9887 shdr = (union lpfc_sli4_cfg_shdr *)
9888 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
9889 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9890 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9891 if (shdr_status || shdr_add_status || rc) {
9892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9893 "2505 EQ_DESTROY mailbox failed with "
9894 "status x%x add_status x%x, mbx status x%x\n",
9895 shdr_status, shdr_add_status, rc);
9896 status = -ENXIO;
9897 }
9898
9899 /* Remove eq from any list */
9900 list_del_init(&eq->list);
9901 if (rc != MBX_TIMEOUT)
9902 mempool_free(mbox, eq->phba->mbox_mem_pool);
9903 return status;
9904}
9905
9906/**
9907 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
9908 * @cq: The queue structure associated with the queue to destroy.
9909 *
9910 * This function destroys a queue, as detailed in @cq by sending an mailbox
9911 * command, specific to the type of queue, to the HBA.
9912 *
9913 * The @cq struct is used to get the queue ID of the queue to destroy.
9914 *
9915 * On success this function will return a zero. If the queue destroy mailbox
9916 * command fails this function will return ENXIO.
9917 **/
9918uint32_t
9919lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
9920{
9921 LPFC_MBOXQ_t *mbox;
9922 int rc, length, status = 0;
9923 uint32_t shdr_status, shdr_add_status;
9924 union lpfc_sli4_cfg_shdr *shdr;
9925
9926 if (!cq)
9927 return -ENODEV;
9928 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
9929 if (!mbox)
9930 return -ENOMEM;
9931 length = (sizeof(struct lpfc_mbx_cq_destroy) -
9932 sizeof(struct lpfc_sli4_cfg_mhdr));
9933 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9934 LPFC_MBOX_OPCODE_CQ_DESTROY,
9935 length, LPFC_SLI4_MBX_EMBED);
9936 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
9937 cq->queue_id);
9938 mbox->vport = cq->phba->pport;
9939 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9940 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
9941 /* The IOCTL status is embedded in the mailbox subheader. */
9942 shdr = (union lpfc_sli4_cfg_shdr *)
9943 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
9944 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9945 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9946 if (shdr_status || shdr_add_status || rc) {
9947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9948 "2506 CQ_DESTROY mailbox failed with "
9949 "status x%x add_status x%x, mbx status x%x\n",
9950 shdr_status, shdr_add_status, rc);
9951 status = -ENXIO;
9952 }
9953 /* Remove cq from any list */
9954 list_del_init(&cq->list);
9955 if (rc != MBX_TIMEOUT)
9956 mempool_free(mbox, cq->phba->mbox_mem_pool);
9957 return status;
9958}
9959
9960/**
9961 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
9962 * @qm: The queue structure associated with the queue to destroy.
9963 *
9964 * This function destroys a queue, as detailed in @mq by sending an mailbox
9965 * command, specific to the type of queue, to the HBA.
9966 *
9967 * The @mq struct is used to get the queue ID of the queue to destroy.
9968 *
9969 * On success this function will return a zero. If the queue destroy mailbox
9970 * command fails this function will return ENXIO.
9971 **/
9972uint32_t
9973lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
9974{
9975 LPFC_MBOXQ_t *mbox;
9976 int rc, length, status = 0;
9977 uint32_t shdr_status, shdr_add_status;
9978 union lpfc_sli4_cfg_shdr *shdr;
9979
9980 if (!mq)
9981 return -ENODEV;
9982 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
9983 if (!mbox)
9984 return -ENOMEM;
9985 length = (sizeof(struct lpfc_mbx_mq_destroy) -
9986 sizeof(struct lpfc_sli4_cfg_mhdr));
9987 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9988 LPFC_MBOX_OPCODE_MQ_DESTROY,
9989 length, LPFC_SLI4_MBX_EMBED);
9990 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
9991 mq->queue_id);
9992 mbox->vport = mq->phba->pport;
9993 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9994 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
9995 /* The IOCTL status is embedded in the mailbox subheader. */
9996 shdr = (union lpfc_sli4_cfg_shdr *)
9997 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
9998 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9999 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10000 if (shdr_status || shdr_add_status || rc) {
10001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10002 "2507 MQ_DESTROY mailbox failed with "
10003 "status x%x add_status x%x, mbx status x%x\n",
10004 shdr_status, shdr_add_status, rc);
10005 status = -ENXIO;
10006 }
10007 /* Remove mq from any list */
10008 list_del_init(&mq->list);
10009 if (rc != MBX_TIMEOUT)
10010 mempool_free(mbox, mq->phba->mbox_mem_pool);
10011 return status;
10012}
10013
10014/**
10015 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
10016 * @wq: The queue structure associated with the queue to destroy.
10017 *
10018 * This function destroys a queue, as detailed in @wq by sending an mailbox
10019 * command, specific to the type of queue, to the HBA.
10020 *
10021 * The @wq struct is used to get the queue ID of the queue to destroy.
10022 *
10023 * On success this function will return a zero. If the queue destroy mailbox
10024 * command fails this function will return ENXIO.
10025 **/
10026uint32_t
10027lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10028{
10029 LPFC_MBOXQ_t *mbox;
10030 int rc, length, status = 0;
10031 uint32_t shdr_status, shdr_add_status;
10032 union lpfc_sli4_cfg_shdr *shdr;
10033
10034 if (!wq)
10035 return -ENODEV;
10036 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
10037 if (!mbox)
10038 return -ENOMEM;
10039 length = (sizeof(struct lpfc_mbx_wq_destroy) -
10040 sizeof(struct lpfc_sli4_cfg_mhdr));
10041 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10042 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
10043 length, LPFC_SLI4_MBX_EMBED);
10044 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
10045 wq->queue_id);
10046 mbox->vport = wq->phba->pport;
10047 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10048 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
10049 shdr = (union lpfc_sli4_cfg_shdr *)
10050 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
10051 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10052 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10053 if (shdr_status || shdr_add_status || rc) {
10054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10055 "2508 WQ_DESTROY mailbox failed with "
10056 "status x%x add_status x%x, mbx status x%x\n",
10057 shdr_status, shdr_add_status, rc);
10058 status = -ENXIO;
10059 }
10060 /* Remove wq from any list */
10061 list_del_init(&wq->list);
10062 if (rc != MBX_TIMEOUT)
10063 mempool_free(mbox, wq->phba->mbox_mem_pool);
10064 return status;
10065}
10066
10067/**
10068 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
10069 * @rq: The queue structure associated with the queue to destroy.
10070 *
10071 * This function destroys a queue, as detailed in @rq by sending an mailbox
10072 * command, specific to the type of queue, to the HBA.
10073 *
10074 * The @rq struct is used to get the queue ID of the queue to destroy.
10075 *
10076 * On success this function will return a zero. If the queue destroy mailbox
10077 * command fails this function will return ENXIO.
10078 **/
10079uint32_t
10080lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10081 struct lpfc_queue *drq)
10082{
10083 LPFC_MBOXQ_t *mbox;
10084 int rc, length, status = 0;
10085 uint32_t shdr_status, shdr_add_status;
10086 union lpfc_sli4_cfg_shdr *shdr;
10087
10088 if (!hrq || !drq)
10089 return -ENODEV;
10090 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
10091 if (!mbox)
10092 return -ENOMEM;
10093 length = (sizeof(struct lpfc_mbx_rq_destroy) -
10094 sizeof(struct mbox_header));
10095 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10096 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
10097 length, LPFC_SLI4_MBX_EMBED);
10098 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10099 hrq->queue_id);
10100 mbox->vport = hrq->phba->pport;
10101 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10102 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
10103 /* The IOCTL status is embedded in the mailbox subheader. */
10104 shdr = (union lpfc_sli4_cfg_shdr *)
10105 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10106 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10107 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10108 if (shdr_status || shdr_add_status || rc) {
10109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10110 "2509 RQ_DESTROY mailbox failed with "
10111 "status x%x add_status x%x, mbx status x%x\n",
10112 shdr_status, shdr_add_status, rc);
10113 if (rc != MBX_TIMEOUT)
10114 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10115 return -ENXIO;
10116 }
10117 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10118 drq->queue_id);
10119 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
10120 shdr = (union lpfc_sli4_cfg_shdr *)
10121 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10122 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10123 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10124 if (shdr_status || shdr_add_status || rc) {
10125 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10126 "2510 RQ_DESTROY mailbox failed with "
10127 "status x%x add_status x%x, mbx status x%x\n",
10128 shdr_status, shdr_add_status, rc);
10129 status = -ENXIO;
10130 }
10131 list_del_init(&hrq->list);
10132 list_del_init(&drq->list);
10133 if (rc != MBX_TIMEOUT)
10134 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10135 return status;
10136}
10137
10138/**
10139 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
10140 * @phba: The virtual port for which this call being executed.
10141 * @pdma_phys_addr0: Physical address of the 1st SGL page.
10142 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
10143 * @xritag: the xritag that ties this io to the SGL pages.
10144 *
10145 * This routine will post the sgl pages for the IO that has the xritag
10146 * that is in the iocbq structure. The xritag is assigned during iocbq
10147 * creation and persists for as long as the driver is loaded.
10148 * if the caller has fewer than 256 scatter gather segments to map then
10149 * pdma_phys_addr1 should be 0.
10150 * If the caller needs to map more than 256 scatter gather segment then
10151 * pdma_phys_addr1 should be a valid physical address.
10152 * physical address for SGLs must be 64 byte aligned.
10153 * If you are going to map 2 SGL's then the first one must have 256 entries
10154 * the second sgl can have between 1 and 256 entries.
10155 *
10156 * Return codes:
10157 * 0 - Success
10158 * -ENXIO, -ENOMEM - Failure
10159 **/
10160int
10161lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10162 dma_addr_t pdma_phys_addr0,
10163 dma_addr_t pdma_phys_addr1,
10164 uint16_t xritag)
10165{
10166 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
10167 LPFC_MBOXQ_t *mbox;
10168 int rc;
10169 uint32_t shdr_status, shdr_add_status;
10170 union lpfc_sli4_cfg_shdr *shdr;
10171
10172 if (xritag == NO_XRI) {
10173 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10174 "0364 Invalid param:\n");
10175 return -EINVAL;
10176 }
10177
10178 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10179 if (!mbox)
10180 return -ENOMEM;
10181
10182 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10183 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
10184 sizeof(struct lpfc_mbx_post_sgl_pages) -
10185 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
10186
10187 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
10188 &mbox->u.mqe.un.post_sgl_pages;
10189 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
10190 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
10191
10192 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
10193 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
10194 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
10195 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
10196
10197 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
10198 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
10199 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
10200 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
10201 if (!phba->sli4_hba.intr_enable)
10202 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10203 else
10204 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10205 /* The IOCTL status is embedded in the mailbox subheader. */
10206 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
10207 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10208 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10209 if (rc != MBX_TIMEOUT)
10210 mempool_free(mbox, phba->mbox_mem_pool);
10211 if (shdr_status || shdr_add_status || rc) {
10212 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10213 "2511 POST_SGL mailbox failed with "
10214 "status x%x add_status x%x, mbx status x%x\n",
10215 shdr_status, shdr_add_status, rc);
10216 rc = -ENXIO;
10217 }
10218 return 0;
10219}
10220/**
10221 * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
10222 * @phba: The virtual port for which this call being executed.
10223 *
10224 * This routine will remove all of the sgl pages registered with the hba.
10225 *
10226 * Return codes:
10227 * 0 - Success
10228 * -ENXIO, -ENOMEM - Failure
10229 **/
10230int
10231lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10232{
10233 LPFC_MBOXQ_t *mbox;
10234 int rc;
10235 uint32_t shdr_status, shdr_add_status;
10236 union lpfc_sli4_cfg_shdr *shdr;
10237
10238 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10239 if (!mbox)
10240 return -ENOMEM;
10241
10242 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10243 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10244 LPFC_SLI4_MBX_EMBED);
10245 if (!phba->sli4_hba.intr_enable)
10246 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10247 else
10248 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10249 /* The IOCTL status is embedded in the mailbox subheader. */
10250 shdr = (union lpfc_sli4_cfg_shdr *)
10251 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10252 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10253 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10254 if (rc != MBX_TIMEOUT)
10255 mempool_free(mbox, phba->mbox_mem_pool);
10256 if (shdr_status || shdr_add_status || rc) {
10257 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10258 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10259 "status x%x add_status x%x, mbx status x%x\n",
10260 shdr_status, shdr_add_status, rc);
10261 rc = -ENXIO;
10262 }
10263 return rc;
10264}
10265
10266/**
10267 * lpfc_sli4_next_xritag - Get an xritag for the io
10268 * @phba: Pointer to HBA context object.
10269 *
10270 * This function gets an xritag for the iocb. If there is no unused xritag
10271 * it will return 0xffff.
10272 * The function returns the allocated xritag if successful, else returns zero.
10273 * Zero is not a valid xritag.
10274 * The caller is not required to hold any lock.
10275 **/
10276uint16_t
10277lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10278{
10279 uint16_t xritag;
10280
10281 spin_lock_irq(&phba->hbalock);
10282 xritag = phba->sli4_hba.next_xri;
10283 if ((xritag != (uint16_t) -1) && xritag <
10284 (phba->sli4_hba.max_cfg_param.max_xri
10285 + phba->sli4_hba.max_cfg_param.xri_base)) {
10286 phba->sli4_hba.next_xri++;
10287 phba->sli4_hba.max_cfg_param.xri_used++;
10288 spin_unlock_irq(&phba->hbalock);
10289 return xritag;
10290 }
10291 spin_unlock_irq(&phba->hbalock);
10292
10293 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10294 "2004 Failed to allocate XRI.last XRITAG is %d"
10295 " Max XRI is %d, Used XRI is %d\n",
10296 phba->sli4_hba.next_xri,
10297 phba->sli4_hba.max_cfg_param.max_xri,
10298 phba->sli4_hba.max_cfg_param.xri_used);
10299 return -1;
10300}
10301
10302/**
10303 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
10304 * @phba: pointer to lpfc hba data structure.
10305 *
10306 * This routine is invoked to post a block of driver's sgl pages to the
10307 * HBA using non-embedded mailbox command. No Lock is held. This routine
10308 * is only called when the driver is loading and after all IO has been
10309 * stopped.
10310 **/
10311int
10312lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10313{
10314 struct lpfc_sglq *sglq_entry;
10315 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10316 struct sgl_page_pairs *sgl_pg_pairs;
10317 void *viraddr;
10318 LPFC_MBOXQ_t *mbox;
10319 uint32_t reqlen, alloclen, pg_pairs;
10320 uint32_t mbox_tmo;
10321 uint16_t xritag_start = 0;
10322 int els_xri_cnt, rc = 0;
10323 uint32_t shdr_status, shdr_add_status;
10324 union lpfc_sli4_cfg_shdr *shdr;
10325
10326 /* The number of sgls to be posted */
10327 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
10328
10329 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10330 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10331 if (reqlen > PAGE_SIZE) {
10332 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10333 "2559 Block sgl registration required DMA "
10334 "size (%d) great than a page\n", reqlen);
10335 return -ENOMEM;
10336 }
10337 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10338 if (!mbox) {
10339 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10340 "2560 Failed to allocate mbox cmd memory\n");
10341 return -ENOMEM;
10342 }
10343
10344 /* Allocate DMA memory and set up the non-embedded mailbox command */
10345 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10346 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10347 LPFC_SLI4_MBX_NEMBED);
10348
10349 if (alloclen < reqlen) {
10350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10351 "0285 Allocated DMA memory size (%d) is "
10352 "less than the requested DMA memory "
10353 "size (%d)\n", alloclen, reqlen);
10354 lpfc_sli4_mbox_cmd_free(phba, mbox);
10355 return -ENOMEM;
10356 }
10357
10358 /* Get the first SGE entry from the non-embedded DMA memory */
10359 if (unlikely(!mbox->sge_array)) {
10360 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10361 "2525 Failed to get the non-embedded SGE "
10362 "virtual address\n");
10363 lpfc_sli4_mbox_cmd_free(phba, mbox);
10364 return -ENOMEM;
10365 }
10366 viraddr = mbox->sge_array->addr[0];
10367
10368 /* Set up the SGL pages in the non-embedded DMA pages */
10369 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10370 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10371
10372 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
10373 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
10374 /* Set up the sge entry */
10375 sgl_pg_pairs->sgl_pg0_addr_lo =
10376 cpu_to_le32(putPaddrLow(sglq_entry->phys));
10377 sgl_pg_pairs->sgl_pg0_addr_hi =
10378 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
10379 sgl_pg_pairs->sgl_pg1_addr_lo =
10380 cpu_to_le32(putPaddrLow(0));
10381 sgl_pg_pairs->sgl_pg1_addr_hi =
10382 cpu_to_le32(putPaddrHigh(0));
10383 /* Keep the first xritag on the list */
10384 if (pg_pairs == 0)
10385 xritag_start = sglq_entry->sli4_xritag;
10386 sgl_pg_pairs++;
10387 }
10388 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10389 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
10390 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10391 /* Perform endian conversion if necessary */
10392 sgl->word0 = cpu_to_le32(sgl->word0);
10393
10394 if (!phba->sli4_hba.intr_enable)
10395 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10396 else {
10397 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10398 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10399 }
10400 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10401 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10402 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10403 if (rc != MBX_TIMEOUT)
10404 lpfc_sli4_mbox_cmd_free(phba, mbox);
10405 if (shdr_status || shdr_add_status || rc) {
10406 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10407 "2513 POST_SGL_BLOCK mailbox command failed "
10408 "status x%x add_status x%x mbx status x%x\n",
10409 shdr_status, shdr_add_status, rc);
10410 rc = -ENXIO;
10411 }
10412 return rc;
10413}
10414
10415/**
10416 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
10417 * @phba: pointer to lpfc hba data structure.
10418 * @sblist: pointer to scsi buffer list.
10419 * @count: number of scsi buffers on the list.
10420 *
10421 * This routine is invoked to post a block of @count scsi sgl pages from a
10422 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
10423 * No Lock is held.
10424 *
10425 **/
10426int
10427lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10428 int cnt)
10429{
10430 struct lpfc_scsi_buf *psb;
10431 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10432 struct sgl_page_pairs *sgl_pg_pairs;
10433 void *viraddr;
10434 LPFC_MBOXQ_t *mbox;
10435 uint32_t reqlen, alloclen, pg_pairs;
10436 uint32_t mbox_tmo;
10437 uint16_t xritag_start = 0;
10438 int rc = 0;
10439 uint32_t shdr_status, shdr_add_status;
10440 dma_addr_t pdma_phys_bpl1;
10441 union lpfc_sli4_cfg_shdr *shdr;
10442
10443 /* Calculate the requested length of the dma memory */
10444 reqlen = cnt * sizeof(struct sgl_page_pairs) +
10445 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10446 if (reqlen > PAGE_SIZE) {
10447 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10448 "0217 Block sgl registration required DMA "
10449 "size (%d) great than a page\n", reqlen);
10450 return -ENOMEM;
10451 }
10452 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10453 if (!mbox) {
10454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10455 "0283 Failed to allocate mbox cmd memory\n");
10456 return -ENOMEM;
10457 }
10458
10459 /* Allocate DMA memory and set up the non-embedded mailbox command */
10460 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10461 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10462 LPFC_SLI4_MBX_NEMBED);
10463
10464 if (alloclen < reqlen) {
10465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10466 "2561 Allocated DMA memory size (%d) is "
10467 "less than the requested DMA memory "
10468 "size (%d)\n", alloclen, reqlen);
10469 lpfc_sli4_mbox_cmd_free(phba, mbox);
10470 return -ENOMEM;
10471 }
10472
10473 /* Get the first SGE entry from the non-embedded DMA memory */
10474 if (unlikely(!mbox->sge_array)) {
10475 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10476 "2565 Failed to get the non-embedded SGE "
10477 "virtual address\n");
10478 lpfc_sli4_mbox_cmd_free(phba, mbox);
10479 return -ENOMEM;
10480 }
10481 viraddr = mbox->sge_array->addr[0];
10482
10483 /* Set up the SGL pages in the non-embedded DMA pages */
10484 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10485 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10486
10487 pg_pairs = 0;
10488 list_for_each_entry(psb, sblist, list) {
10489 /* Set up the sge entry */
10490 sgl_pg_pairs->sgl_pg0_addr_lo =
10491 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
10492 sgl_pg_pairs->sgl_pg0_addr_hi =
10493 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
10494 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
10495 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
10496 else
10497 pdma_phys_bpl1 = 0;
10498 sgl_pg_pairs->sgl_pg1_addr_lo =
10499 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
10500 sgl_pg_pairs->sgl_pg1_addr_hi =
10501 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
10502 /* Keep the first xritag on the list */
10503 if (pg_pairs == 0)
10504 xritag_start = psb->cur_iocbq.sli4_xritag;
10505 sgl_pg_pairs++;
10506 pg_pairs++;
10507 }
10508 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10509 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10510 /* Perform endian conversion if necessary */
10511 sgl->word0 = cpu_to_le32(sgl->word0);
10512
10513 if (!phba->sli4_hba.intr_enable)
10514 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10515 else {
10516 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10517 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10518 }
10519 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10520 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10521 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10522 if (rc != MBX_TIMEOUT)
10523 lpfc_sli4_mbox_cmd_free(phba, mbox);
10524 if (shdr_status || shdr_add_status || rc) {
10525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10526 "2564 POST_SGL_BLOCK mailbox command failed "
10527 "status x%x add_status x%x mbx status x%x\n",
10528 shdr_status, shdr_add_status, rc);
10529 rc = -ENXIO;
10530 }
10531 return rc;
10532}
10533
10534/**
10535 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
10536 * @phba: pointer to lpfc_hba struct that the frame was received on
10537 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10538 *
10539 * This function checks the fields in the @fc_hdr to see if the FC frame is a
10540 * valid type of frame that the LPFC driver will handle. This function will
10541 * return a zero if the frame is a valid frame or a non zero value when the
10542 * frame does not pass the check.
10543 **/
10544static int
10545lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
10546{
10547 char *rctl_names[] = FC_RCTL_NAMES_INIT;
10548 char *type_names[] = FC_TYPE_NAMES_INIT;
10549 struct fc_vft_header *fc_vft_hdr;
10550
10551 switch (fc_hdr->fh_r_ctl) {
10552 case FC_RCTL_DD_UNCAT: /* uncategorized information */
10553 case FC_RCTL_DD_SOL_DATA: /* solicited data */
10554 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
10555 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
10556 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
10557 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
10558 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
10559 case FC_RCTL_DD_CMD_STATUS: /* command status */
10560 case FC_RCTL_ELS_REQ: /* extended link services request */
10561 case FC_RCTL_ELS_REP: /* extended link services reply */
10562 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
10563 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
10564 case FC_RCTL_BA_NOP: /* basic link service NOP */
10565 case FC_RCTL_BA_ABTS: /* basic link service abort */
10566 case FC_RCTL_BA_RMC: /* remove connection */
10567 case FC_RCTL_BA_ACC: /* basic accept */
10568 case FC_RCTL_BA_RJT: /* basic reject */
10569 case FC_RCTL_BA_PRMT:
10570 case FC_RCTL_ACK_1: /* acknowledge_1 */
10571 case FC_RCTL_ACK_0: /* acknowledge_0 */
10572 case FC_RCTL_P_RJT: /* port reject */
10573 case FC_RCTL_F_RJT: /* fabric reject */
10574 case FC_RCTL_P_BSY: /* port busy */
10575 case FC_RCTL_F_BSY: /* fabric busy to data frame */
10576 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
10577 case FC_RCTL_LCR: /* link credit reset */
10578 case FC_RCTL_END: /* end */
10579 break;
10580 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
10581 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10582 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
10583 return lpfc_fc_frame_check(phba, fc_hdr);
10584 default:
10585 goto drop;
10586 }
10587 switch (fc_hdr->fh_type) {
10588 case FC_TYPE_BLS:
10589 case FC_TYPE_ELS:
10590 case FC_TYPE_FCP:
10591 case FC_TYPE_CT:
10592 break;
10593 case FC_TYPE_IP:
10594 case FC_TYPE_ILS:
10595 default:
10596 goto drop;
10597 }
10598 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10599 "2538 Received frame rctl:%s type:%s\n",
10600 rctl_names[fc_hdr->fh_r_ctl],
10601 type_names[fc_hdr->fh_type]);
10602 return 0;
10603drop:
10604 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
10605 "2539 Dropped frame rctl:%s type:%s\n",
10606 rctl_names[fc_hdr->fh_r_ctl],
10607 type_names[fc_hdr->fh_type]);
10608 return 1;
10609}
10610
10611/**
10612 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
10613 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10614 *
10615 * This function processes the FC header to retrieve the VFI from the VF
10616 * header, if one exists. This function will return the VFI if one exists
10617 * or 0 if no VSAN Header exists.
10618 **/
10619static uint32_t
10620lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
10621{
10622 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10623
10624 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
10625 return 0;
10626 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
10627}
10628
10629/**
10630 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
10631 * @phba: Pointer to the HBA structure to search for the vport on
10632 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
10633 * @fcfi: The FC Fabric ID that the frame came from
10634 *
10635 * This function searches the @phba for a vport that matches the content of the
10636 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
10637 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
10638 * returns the matching vport pointer or NULL if unable to match frame to a
10639 * vport.
10640 **/
10641static struct lpfc_vport *
10642lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10643 uint16_t fcfi)
10644{
10645 struct lpfc_vport **vports;
10646 struct lpfc_vport *vport = NULL;
10647 int i;
10648 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
10649 fc_hdr->fh_d_id[1] << 8 |
10650 fc_hdr->fh_d_id[2]);
10651
10652 vports = lpfc_create_vport_work_array(phba);
10653 if (vports != NULL)
10654 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
10655 if (phba->fcf.fcfi == fcfi &&
10656 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
10657 vports[i]->fc_myDID == did) {
10658 vport = vports[i];
10659 break;
10660 }
10661 }
10662 lpfc_destroy_vport_work_array(phba, vports);
10663 return vport;
10664}
10665
10666/**
10667 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10668 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10669 *
10670 * This function searches through the existing incomplete sequences that have
10671 * been sent to this @vport. If the frame matches one of the incomplete
10672 * sequences then the dbuf in the @dmabuf is added to the list of frames that
10673 * make up that sequence. If no sequence is found that matches this frame then
10674 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
10675 * This function returns a pointer to the first dmabuf in the sequence list that
10676 * the frame was linked to.
10677 **/
10678static struct hbq_dmabuf *
10679lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10680{
10681 struct fc_frame_header *new_hdr;
10682 struct fc_frame_header *temp_hdr;
10683 struct lpfc_dmabuf *d_buf;
10684 struct lpfc_dmabuf *h_buf;
10685 struct hbq_dmabuf *seq_dmabuf = NULL;
10686 struct hbq_dmabuf *temp_dmabuf = NULL;
10687
10688 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10689 /* Use the hdr_buf to find the sequence that this frame belongs to */
10690 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
10691 temp_hdr = (struct fc_frame_header *)h_buf->virt;
10692 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
10693 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
10694 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
10695 continue;
10696 /* found a pending sequence that matches this frame */
10697 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10698 break;
10699 }
10700 if (!seq_dmabuf) {
10701 /*
10702 * This indicates first frame received for this sequence.
10703 * Queue the buffer on the vport's rcv_buffer_list.
10704 */
10705 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10706 return dmabuf;
10707 }
10708 temp_hdr = seq_dmabuf->hbuf.virt;
10709 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
10710 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
10711 return dmabuf;
10712 }
10713 /* find the correct place in the sequence to insert this frame */
10714 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10715 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10716 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
10717 /*
10718 * If the frame's sequence count is greater than the frame on
10719 * the list then insert the frame right after this frame
10720 */
10721 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
10722 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10723 return seq_dmabuf;
10724 }
10725 }
10726 return NULL;
10727}
10728
10729/**
10730 * lpfc_seq_complete - Indicates if a sequence is complete
10731 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10732 *
10733 * This function checks the sequence, starting with the frame described by
10734 * @dmabuf, to see if all the frames associated with this sequence are present.
10735 * the frames associated with this sequence are linked to the @dmabuf using the
10736 * dbuf list. This function looks for two major things. 1) That the first frame
10737 * has a sequence count of zero. 2) There is a frame with last frame of sequence
10738 * set. 3) That there are no holes in the sequence count. The function will
10739 * return 1 when the sequence is complete, otherwise it will return 0.
10740 **/
10741static int
10742lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
10743{
10744 struct fc_frame_header *hdr;
10745 struct lpfc_dmabuf *d_buf;
10746 struct hbq_dmabuf *seq_dmabuf;
10747 uint32_t fctl;
10748 int seq_count = 0;
10749
10750 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10751 /* make sure first fame of sequence has a sequence count of zero */
10752 if (hdr->fh_seq_cnt != seq_count)
10753 return 0;
10754 fctl = (hdr->fh_f_ctl[0] << 16 |
10755 hdr->fh_f_ctl[1] << 8 |
10756 hdr->fh_f_ctl[2]);
10757 /* If last frame of sequence we can return success. */
10758 if (fctl & FC_FC_END_SEQ)
10759 return 1;
10760 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
10761 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10762 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10763 /* If there is a hole in the sequence count then fail. */
10764 if (++seq_count != hdr->fh_seq_cnt)
10765 return 0;
10766 fctl = (hdr->fh_f_ctl[0] << 16 |
10767 hdr->fh_f_ctl[1] << 8 |
10768 hdr->fh_f_ctl[2]);
10769 /* If last frame of sequence we can return success. */
10770 if (fctl & FC_FC_END_SEQ)
10771 return 1;
10772 }
10773 return 0;
10774}
10775
10776/**
10777 * lpfc_prep_seq - Prep sequence for ULP processing
10778 * @vport: Pointer to the vport on which this sequence was received
10779 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10780 *
10781 * This function takes a sequence, described by a list of frames, and creates
10782 * a list of iocbq structures to describe the sequence. This iocbq list will be
10783 * used to issue to the generic unsolicited sequence handler. This routine
10784 * returns a pointer to the first iocbq in the list. If the function is unable
10785 * to allocate an iocbq then it throw out the received frames that were not
10786 * able to be described and return a pointer to the first iocbq. If unable to
10787 * allocate any iocbqs (including the first) this function will return NULL.
10788 **/
10789static struct lpfc_iocbq *
10790lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10791{
10792 struct lpfc_dmabuf *d_buf, *n_buf;
10793 struct lpfc_iocbq *first_iocbq, *iocbq;
10794 struct fc_frame_header *fc_hdr;
10795 uint32_t sid;
10796
10797 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10798 /* remove from receive buffer list */
10799 list_del_init(&seq_dmabuf->hbuf.list);
10800 /* get the Remote Port's SID */
10801 sid = (fc_hdr->fh_s_id[0] << 16 |
10802 fc_hdr->fh_s_id[1] << 8 |
10803 fc_hdr->fh_s_id[2]);
10804 /* Get an iocbq struct to fill in. */
10805 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10806 if (first_iocbq) {
10807 /* Initialize the first IOCB. */
10808 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
10809 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
10810 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
10811 first_iocbq->iocb.unsli3.rcvsli3.vpi =
10812 vport->vpi + vport->phba->vpi_base;
10813 /* put the first buffer into the first IOCBq */
10814 first_iocbq->context2 = &seq_dmabuf->dbuf;
10815 first_iocbq->context3 = NULL;
10816 first_iocbq->iocb.ulpBdeCount = 1;
10817 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10818 LPFC_DATA_BUF_SIZE;
10819 first_iocbq->iocb.un.rcvels.remoteID = sid;
10820 }
10821 iocbq = first_iocbq;
10822 /*
10823 * Each IOCBq can have two Buffers assigned, so go through the list
10824 * of buffers for this sequence and save two buffers in each IOCBq
10825 */
10826 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
10827 if (!iocbq) {
10828 lpfc_in_buf_free(vport->phba, d_buf);
10829 continue;
10830 }
10831 if (!iocbq->context3) {
10832 iocbq->context3 = d_buf;
10833 iocbq->iocb.ulpBdeCount++;
10834 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10835 LPFC_DATA_BUF_SIZE;
10836 } else {
10837 iocbq = lpfc_sli_get_iocbq(vport->phba);
10838 if (!iocbq) {
10839 if (first_iocbq) {
10840 first_iocbq->iocb.ulpStatus =
10841 IOSTAT_FCP_RSP_ERROR;
10842 first_iocbq->iocb.un.ulpWord[4] =
10843 IOERR_NO_RESOURCES;
10844 }
10845 lpfc_in_buf_free(vport->phba, d_buf);
10846 continue;
10847 }
10848 iocbq->context2 = d_buf;
10849 iocbq->context3 = NULL;
10850 iocbq->iocb.ulpBdeCount = 1;
10851 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10852 LPFC_DATA_BUF_SIZE;
10853 iocbq->iocb.un.rcvels.remoteID = sid;
10854 list_add_tail(&iocbq->list, &first_iocbq->list);
10855 }
10856 }
10857 return first_iocbq;
10858}
10859
10860/**
10861 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
10862 * @phba: Pointer to HBA context object.
10863 *
10864 * This function is called with no lock held. This function processes all
10865 * the received buffers and gives it to upper layers when a received buffer
10866 * indicates that it is the final frame in the sequence. The interrupt
10867 * service routine processes received buffers at interrupt contexts and adds
10868 * received dma buffers to the rb_pend_list queue and signals the worker thread.
10869 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
10870 * appropriate receive function when the final frame in a sequence is received.
10871 **/
10872int
10873lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
10874{
10875 LIST_HEAD(cmplq);
10876 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
10877 struct fc_frame_header *fc_hdr;
10878 struct lpfc_vport *vport;
10879 uint32_t fcfi;
10880 struct lpfc_iocbq *iocbq;
10881
10882 /* Clear hba flag and get all received buffers into the cmplq */
10883 spin_lock_irq(&phba->hbalock);
10884 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
10885 list_splice_init(&phba->rb_pend_list, &cmplq);
10886 spin_unlock_irq(&phba->hbalock);
10887
10888 /* Process each received buffer */
10889 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
10890 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10891 /* check to see if this a valid type of frame */
10892 if (lpfc_fc_frame_check(phba, fc_hdr)) {
10893 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10894 continue;
10895 }
10896 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
10897 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
10898 if (!vport) {
10899 /* throw out the frame */
10900 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10901 continue;
10902 }
10903 /* Link this frame */
10904 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
10905 if (!seq_dmabuf) {
10906 /* unable to add frame to vport - throw it out */
10907 lpfc_in_buf_free(phba, &dmabuf->dbuf);
10908 continue;
10909 }
10910 /* If not last frame in sequence continue processing frames. */
10911 if (!lpfc_seq_complete(seq_dmabuf)) {
10912 /*
10913 * When saving off frames post a new one and mark this
10914 * frame to be freed when it is finished.
10915 **/
10916 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
10917 dmabuf->tag = -1;
10918 continue;
10919 }
10920 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10921 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
10922 if (!lpfc_complete_unsol_iocb(phba,
10923 &phba->sli.ring[LPFC_ELS_RING],
10924 iocbq, fc_hdr->fh_r_ctl,
10925 fc_hdr->fh_type))
10926 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10927 "2540 Ring %d handler: unexpected Rctl "
10928 "x%x Type x%x received\n",
10929 LPFC_ELS_RING,
10930 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
10931 };
10932 return 0;
10933}
10934
10935/**
10936 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
10937 * @phba: pointer to lpfc hba data structure.
10938 *
10939 * This routine is invoked to post rpi header templates to the
10940 * HBA consistent with the SLI-4 interface spec. This routine
10941 * posts a PAGE_SIZE memory region to the port to hold up to
10942 * PAGE_SIZE modulo 64 rpi context headers.
10943 *
10944 * This routine does not require any locks. It's usage is expected
10945 * to be driver load or reset recovery when the driver is
10946 * sequential.
10947 *
10948 * Return codes
10949 * 0 - sucessful
10950 * EIO - The mailbox failed to complete successfully.
10951 * When this error occurs, the driver is not guaranteed
10952 * to have any rpi regions posted to the device and
10953 * must either attempt to repost the regions or take a
10954 * fatal error.
10955 **/
10956int
10957lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
10958{
10959 struct lpfc_rpi_hdr *rpi_page;
10960 uint32_t rc = 0;
10961
10962 /* Post all rpi memory regions to the port. */
10963 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
10964 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
10965 if (rc != MBX_SUCCESS) {
10966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10967 "2008 Error %d posting all rpi "
10968 "headers\n", rc);
10969 rc = -EIO;
10970 break;
10971 }
10972 }
10973
10974 return rc;
10975}
10976
10977/**
10978 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
10979 * @phba: pointer to lpfc hba data structure.
10980 * @rpi_page: pointer to the rpi memory region.
10981 *
10982 * This routine is invoked to post a single rpi header to the
10983 * HBA consistent with the SLI-4 interface spec. This memory region
10984 * maps up to 64 rpi context regions.
10985 *
10986 * Return codes
10987 * 0 - sucessful
10988 * ENOMEM - No available memory
10989 * EIO - The mailbox failed to complete successfully.
10990 **/
10991int
10992lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
10993{
10994 LPFC_MBOXQ_t *mboxq;
10995 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
10996 uint32_t rc = 0;
10997 uint32_t mbox_tmo;
10998 uint32_t shdr_status, shdr_add_status;
10999 union lpfc_sli4_cfg_shdr *shdr;
11000
11001 /* The port is notified of the header region via a mailbox command. */
11002 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11003 if (!mboxq) {
11004 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11005 "2001 Unable to allocate memory for issuing "
11006 "SLI_CONFIG_SPECIAL mailbox command\n");
11007 return -ENOMEM;
11008 }
11009
11010 /* Post all rpi memory regions to the port. */
11011 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
11012 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
11013 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11014 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
11015 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
11016 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
11017 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
11018 hdr_tmpl, rpi_page->page_count);
11019 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
11020 rpi_page->start_rpi);
11021 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11022 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
11023 if (!phba->sli4_hba.intr_enable)
11024 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11025 else
11026 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11027 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11028 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11029 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11030 if (rc != MBX_TIMEOUT)
11031 mempool_free(mboxq, phba->mbox_mem_pool);
11032 if (shdr_status || shdr_add_status || rc) {
11033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11034 "2514 POST_RPI_HDR mailbox failed with "
11035 "status x%x add_status x%x, mbx status x%x\n",
11036 shdr_status, shdr_add_status, rc);
11037 rc = -ENXIO;
11038 }
11039 return rc;
11040}
11041
11042/**
11043 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
11044 * @phba: pointer to lpfc hba data structure.
11045 *
11046 * This routine is invoked to post rpi header templates to the
11047 * HBA consistent with the SLI-4 interface spec. This routine
11048 * posts a PAGE_SIZE memory region to the port to hold up to
11049 * PAGE_SIZE modulo 64 rpi context headers.
11050 *
11051 * Returns
11052 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
11053 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
11054 **/
11055int
11056lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11057{
11058 int rpi;
11059 uint16_t max_rpi, rpi_base, rpi_limit;
11060 uint16_t rpi_remaining;
11061 struct lpfc_rpi_hdr *rpi_hdr;
11062
11063 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
11064 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
11065 rpi_limit = phba->sli4_hba.next_rpi;
11066
11067 /*
11068 * The valid rpi range is not guaranteed to be zero-based. Start
11069 * the search at the rpi_base as reported by the port.
11070 */
11071 spin_lock_irq(&phba->hbalock);
11072 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
11073 if (rpi >= rpi_limit || rpi < rpi_base)
11074 rpi = LPFC_RPI_ALLOC_ERROR;
11075 else {
11076 set_bit(rpi, phba->sli4_hba.rpi_bmask);
11077 phba->sli4_hba.max_cfg_param.rpi_used++;
11078 phba->sli4_hba.rpi_count++;
11079 }
11080
11081 /*
11082 * Don't try to allocate more rpi header regions if the device limit
11083 * on available rpis max has been exhausted.
11084 */
11085 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
11086 (phba->sli4_hba.rpi_count >= max_rpi)) {
11087 spin_unlock_irq(&phba->hbalock);
11088 return rpi;
11089 }
11090
11091 /*
11092 * If the driver is running low on rpi resources, allocate another
11093 * page now. Note that the next_rpi value is used because
11094 * it represents how many are actually in use whereas max_rpi notes
11095 * how many are supported max by the device.
11096 */
11097 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
11098 phba->sli4_hba.rpi_count;
11099 spin_unlock_irq(&phba->hbalock);
11100 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
11101 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
11102 if (!rpi_hdr) {
11103 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11104 "2002 Error Could not grow rpi "
11105 "count\n");
11106 } else {
11107 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
11108 }
11109 }
11110
11111 return rpi;
11112}
11113
11114/**
11115 * lpfc_sli4_free_rpi - Release an rpi for reuse.
11116 * @phba: pointer to lpfc hba data structure.
11117 *
11118 * This routine is invoked to release an rpi to the pool of
11119 * available rpis maintained by the driver.
11120 **/
11121void
11122lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11123{
11124 spin_lock_irq(&phba->hbalock);
11125 clear_bit(rpi, phba->sli4_hba.rpi_bmask);
11126 phba->sli4_hba.rpi_count--;
11127 phba->sli4_hba.max_cfg_param.rpi_used--;
11128 spin_unlock_irq(&phba->hbalock);
11129}
11130
11131/**
11132 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
11133 * @phba: pointer to lpfc hba data structure.
11134 *
11135 * This routine is invoked to remove the memory region that
11136 * provided rpi via a bitmask.
11137 **/
11138void
11139lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
11140{
11141 kfree(phba->sli4_hba.rpi_bmask);
11142}
11143
11144/**
11145 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
11146 * @phba: pointer to lpfc hba data structure.
11147 *
11148 * This routine is invoked to remove the memory region that
11149 * provided rpi via a bitmask.
11150 **/
11151int
11152lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
11153{
11154 LPFC_MBOXQ_t *mboxq;
11155 struct lpfc_hba *phba = ndlp->phba;
11156 int rc;
11157
11158 /* The port is notified of the header region via a mailbox command. */
11159 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11160 if (!mboxq)
11161 return -ENOMEM;
11162
11163 /* Post all rpi memory regions to the port. */
11164 lpfc_resume_rpi(mboxq, ndlp);
11165 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11166 if (rc == MBX_NOT_FINISHED) {
11167 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11168 "2010 Resume RPI Mailbox failed "
11169 "status %d, mbxStatus x%x\n", rc,
11170 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11171 mempool_free(mboxq, phba->mbox_mem_pool);
11172 return -EIO;
11173 }
11174 return 0;
11175}
11176
11177/**
11178 * lpfc_sli4_init_vpi - Initialize a vpi with the port
11179 * @phba: pointer to lpfc hba data structure.
11180 * @vpi: vpi value to activate with the port.
11181 *
11182 * This routine is invoked to activate a vpi with the
11183 * port when the host intends to use vports with a
11184 * nonzero vpi.
11185 *
11186 * Returns:
11187 * 0 success
11188 * -Evalue otherwise
11189 **/
11190int
11191lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11192{
11193 LPFC_MBOXQ_t *mboxq;
11194 int rc = 0;
11195 uint32_t mbox_tmo;
11196
11197 if (vpi == 0)
11198 return -EINVAL;
11199 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11200 if (!mboxq)
11201 return -ENOMEM;
11202 lpfc_init_vpi(mboxq, vpi);
11203 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11204 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11205 if (rc != MBX_TIMEOUT)
11206 mempool_free(mboxq, phba->mbox_mem_pool);
11207 if (rc != MBX_SUCCESS) {
11208 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11209 "2022 INIT VPI Mailbox failed "
11210 "status %d, mbxStatus x%x\n", rc,
11211 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11212 rc = -EIO;
11213 }
11214 return rc;
11215}
11216
11217/**
11218 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
11219 * @phba: pointer to lpfc hba data structure.
11220 * @mboxq: Pointer to mailbox object.
11221 *
11222 * This routine is invoked to manually add a single FCF record. The caller
11223 * must pass a completely initialized FCF_Record. This routine takes
11224 * care of the nonembedded mailbox operations.
11225 **/
11226static void
11227lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11228{
11229 void *virt_addr;
11230 union lpfc_sli4_cfg_shdr *shdr;
11231 uint32_t shdr_status, shdr_add_status;
11232
11233 virt_addr = mboxq->sge_array->addr[0];
11234 /* The IOCTL status is embedded in the mailbox subheader. */
11235 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
11236 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11237 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11238
11239 if ((shdr_status || shdr_add_status) &&
11240 (shdr_status != STATUS_FCF_IN_USE))
11241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11242 "2558 ADD_FCF_RECORD mailbox failed with "
11243 "status x%x add_status x%x\n",
11244 shdr_status, shdr_add_status);
11245
11246 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11247}
11248
11249/**
11250 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
11251 * @phba: pointer to lpfc hba data structure.
11252 * @fcf_record: pointer to the initialized fcf record to add.
11253 *
11254 * This routine is invoked to manually add a single FCF record. The caller
11255 * must pass a completely initialized FCF_Record. This routine takes
11256 * care of the nonembedded mailbox operations.
11257 **/
11258int
11259lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11260{
11261 int rc = 0;
11262 LPFC_MBOXQ_t *mboxq;
11263 uint8_t *bytep;
11264 void *virt_addr;
11265 dma_addr_t phys_addr;
11266 struct lpfc_mbx_sge sge;
11267 uint32_t alloc_len, req_len;
11268 uint32_t fcfindex;
11269
11270 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11271 if (!mboxq) {
11272 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11273 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
11274 return -ENOMEM;
11275 }
11276
11277 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
11278 sizeof(uint32_t);
11279
11280 /* Allocate DMA memory and set up the non-embedded mailbox command */
11281 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11282 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
11283 req_len, LPFC_SLI4_MBX_NEMBED);
11284 if (alloc_len < req_len) {
11285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11286 "2523 Allocated DMA memory size (x%x) is "
11287 "less than the requested DMA memory "
11288 "size (x%x)\n", alloc_len, req_len);
11289 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11290 return -ENOMEM;
11291 }
11292
11293 /*
11294 * Get the first SGE entry from the non-embedded DMA memory. This
11295 * routine only uses a single SGE.
11296 */
11297 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11298 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11299 if (unlikely(!mboxq->sge_array)) {
11300 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11301 "2526 Failed to get the non-embedded SGE "
11302 "virtual address\n");
11303 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11304 return -ENOMEM;
11305 }
11306 virt_addr = mboxq->sge_array->addr[0];
11307 /*
11308 * Configure the FCF record for FCFI 0. This is the driver's
11309 * hardcoded default and gets used in nonFIP mode.
11310 */
11311 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
11312 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11313 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
11314
11315 /*
11316 * Copy the fcf_index and the FCF Record Data. The data starts after
11317 * the FCoE header plus word10. The data copy needs to be endian
11318 * correct.
11319 */
11320 bytep += sizeof(uint32_t);
11321 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
11322 mboxq->vport = phba->pport;
11323 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
11324 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11325 if (rc == MBX_NOT_FINISHED) {
11326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11327 "2515 ADD_FCF_RECORD mailbox failed with "
11328 "status 0x%x\n", rc);
11329 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11330 rc = -EIO;
11331 } else
11332 rc = 0;
11333
11334 return rc;
11335}
11336
11337/**
11338 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
11339 * @phba: pointer to lpfc hba data structure.
11340 * @fcf_record: pointer to the fcf record to write the default data.
11341 * @fcf_index: FCF table entry index.
11342 *
11343 * This routine is invoked to build the driver's default FCF record. The
11344 * values used are hardcoded. This routine handles memory initialization.
11345 *
11346 **/
11347void
11348lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11349 struct fcf_record *fcf_record,
11350 uint16_t fcf_index)
11351{
11352 memset(fcf_record, 0, sizeof(struct fcf_record));
11353 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
11354 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
11355 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
11356 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
11357 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
11358 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
11359 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
11360 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
11361 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
11362 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
11363 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11364 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11365 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
11366 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11367 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11368 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
11369 /* Set the VLAN bit map */
11370 if (phba->valid_vlan) {
11371 fcf_record->vlan_bitmap[phba->vlan_id / 8]
11372 = 1 << (phba->vlan_id % 8);
11373 }
11374}
11375
11376/**
11377 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
11378 * @phba: pointer to lpfc hba data structure.
11379 * @fcf_index: FCF table entry offset.
11380 *
11381 * This routine is invoked to read up to @fcf_num of FCF record from the
11382 * device starting with the given @fcf_index.
11383 **/
11384int
11385lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11386{
11387 int rc = 0, error;
11388 LPFC_MBOXQ_t *mboxq;
11389 void *virt_addr;
11390 dma_addr_t phys_addr;
11391 uint8_t *bytep;
11392 struct lpfc_mbx_sge sge;
11393 uint32_t alloc_len, req_len;
11394 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11395
11396 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11397 if (!mboxq) {
11398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11399 "2000 Failed to allocate mbox for "
11400 "READ_FCF cmd\n");
11401 return -ENOMEM;
11402 }
11403
11404 req_len = sizeof(struct fcf_record) +
11405 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
11406
11407 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
11408 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11409 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
11410 LPFC_SLI4_MBX_NEMBED);
11411
11412 if (alloc_len < req_len) {
11413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11414 "0291 Allocated DMA memory size (x%x) is "
11415 "less than the requested DMA memory "
11416 "size (x%x)\n", alloc_len, req_len);
11417 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11418 return -ENOMEM;
11419 }
11420
11421 /* Get the first SGE entry from the non-embedded DMA memory. This
11422 * routine only uses a single SGE.
11423 */
11424 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11425 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11426 if (unlikely(!mboxq->sge_array)) {
11427 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11428 "2527 Failed to get the non-embedded SGE "
11429 "virtual address\n");
11430 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11431 return -ENOMEM;
11432 }
11433 virt_addr = mboxq->sge_array->addr[0];
11434 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11435
11436 /* Set up command fields */
11437 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
11438 /* Perform necessary endian conversion */
11439 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11440 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
11441 mboxq->vport = phba->pport;
11442 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11443 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11444 if (rc == MBX_NOT_FINISHED) {
11445 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11446 error = -EIO;
11447 } else
11448 error = 0;
11449 return error;
11450}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 883938652a6..7d37eb7459b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
29 LPFC_CTX_HOST 29 LPFC_CTX_HOST
30} lpfc_ctx_cmd; 30} lpfc_ctx_cmd;
31 31
32/* This structure is used to carry the needed response IOCB states */
33struct lpfc_sli4_rspiocb_info {
34 uint8_t hw_status;
35 uint8_t bfield;
36#define LPFC_XB 0x1
37#define LPFC_PV 0x2
38 uint8_t priority;
39 uint8_t reserved;
40};
41
32/* This structure is used to handle IOCB requests / responses */ 42/* This structure is used to handle IOCB requests / responses */
33struct lpfc_iocbq { 43struct lpfc_iocbq {
34 /* lpfc_iocbqs are used in double linked lists */ 44 /* lpfc_iocbqs are used in double linked lists */
35 struct list_head list; 45 struct list_head list;
36 struct list_head clist; 46 struct list_head clist;
37 uint16_t iotag; /* pre-assigned IO tag */ 47 uint16_t iotag; /* pre-assigned IO tag */
38 uint16_t rsvd1; 48 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
39 49
40 IOCB_t iocb; /* IOCB cmd */ 50 IOCB_t iocb; /* IOCB cmd */
41 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 51 uint8_t retry; /* retry counter for IOCB cmd - if needed */
@@ -65,7 +75,7 @@ struct lpfc_iocbq {
65 struct lpfc_iocbq *); 75 struct lpfc_iocbq *);
66 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 76 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
67 struct lpfc_iocbq *); 77 struct lpfc_iocbq *);
68 78 struct lpfc_sli4_rspiocb_info sli4_info;
69}; 79};
70 80
71#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 81#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -81,14 +91,18 @@ struct lpfc_iocbq {
81typedef struct lpfcMboxq { 91typedef struct lpfcMboxq {
82 /* MBOXQs are used in single linked lists */ 92 /* MBOXQs are used in single linked lists */
83 struct list_head list; /* ptr to next mailbox command */ 93 struct list_head list; /* ptr to next mailbox command */
84 MAILBOX_t mb; /* Mailbox cmd */ 94 union {
85 struct lpfc_vport *vport;/* virutal port pointer */ 95 MAILBOX_t mb; /* Mailbox cmd */
96 struct lpfc_mqe mqe;
97 } u;
98 struct lpfc_vport *vport;/* virtual port pointer */
86 void *context1; /* caller context information */ 99 void *context1; /* caller context information */
87 void *context2; /* caller context information */ 100 void *context2; /* caller context information */
88 101
89 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); 102 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
90 uint8_t mbox_flag; 103 uint8_t mbox_flag;
91 104 struct lpfc_mcqe mcqe;
105 struct lpfc_mbx_nembed_sge_virt *sge_array;
92} LPFC_MBOXQ_t; 106} LPFC_MBOXQ_t;
93 107
94#define MBX_POLL 1 /* poll mailbox till command done, then 108#define MBX_POLL 1 /* poll mailbox till command done, then
@@ -230,10 +244,11 @@ struct lpfc_sli {
230 244
231 /* Additional sli_flags */ 245 /* Additional sli_flags */
232#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ 246#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
233#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ 247#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
234#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ 248#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
235#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ 249#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
236#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ 250#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
251#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
237 252
238 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 253 struct lpfc_sli_ring ring[LPFC_MAX_RING];
239 int fcp_ring; /* ring used for FCP initiator commands */ 254 int fcp_ring; /* ring used for FCP initiator commands */
@@ -261,6 +276,8 @@ struct lpfc_sli {
261 276
262#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 277#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
263 command */ 278 command */
279#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox
280 command */
264#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write 281#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
265 * or erase cmds. This is especially 282 * or erase cmds. This is especially
266 * long because of the potential of 283 * long because of the potential of
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 00000000000..5196b46608d
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,467 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
22#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
23#define LPFC_GET_QE_REL_INT 32
24#define LPFC_RPI_LOW_WATER_MARK 10
25/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
26#define LPFC_NEMBED_MBOX_SGL_CNT 254
27
28/* Multi-queue arrangement for fast-path FCP work queues */
29#define LPFC_FN_EQN_MAX 8
30#define LPFC_SP_EQN_DEF 1
31#define LPFC_FP_EQN_DEF 1
32#define LPFC_FP_EQN_MIN 1
33#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
34
35#define LPFC_FN_WQN_MAX 32
36#define LPFC_SP_WQN_DEF 1
37#define LPFC_FP_WQN_DEF 4
38#define LPFC_FP_WQN_MIN 1
39#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
40
41/*
42 * Provide the default FCF Record attributes used by the driver
43 * when nonFIP mode is configured and there is no other default
44 * FCF Record attributes.
45 */
46#define LPFC_FCOE_FCF_DEF_INDEX 0
47#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
48#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
49
50/* First 3 bytes of default FCF MAC is specified by FC_MAP */
51#define LPFC_FCOE_FCF_MAC3 0xFF
52#define LPFC_FCOE_FCF_MAC4 0xFF
53#define LPFC_FCOE_FCF_MAC5 0xFE
54#define LPFC_FCOE_FCF_MAP0 0x0E
55#define LPFC_FCOE_FCF_MAP1 0xFC
56#define LPFC_FCOE_FCF_MAP2 0x00
57#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC
58#define LPFC_FCOE_FKA_ADV_PER 0
59#define LPFC_FCOE_FIP_PRIORITY 0x80
60
61enum lpfc_sli4_queue_type {
62 LPFC_EQ,
63 LPFC_GCQ,
64 LPFC_MCQ,
65 LPFC_WCQ,
66 LPFC_RCQ,
67 LPFC_MQ,
68 LPFC_WQ,
69 LPFC_HRQ,
70 LPFC_DRQ
71};
72
73/* The queue sub-type defines the functional purpose of the queue */
74enum lpfc_sli4_queue_subtype {
75 LPFC_NONE,
76 LPFC_MBOX,
77 LPFC_FCP,
78 LPFC_ELS,
79 LPFC_USOL
80};
81
82union sli4_qe {
83 void *address;
84 struct lpfc_eqe *eqe;
85 struct lpfc_cqe *cqe;
86 struct lpfc_mcqe *mcqe;
87 struct lpfc_wcqe_complete *wcqe_complete;
88 struct lpfc_wcqe_release *wcqe_release;
89 struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
90 struct lpfc_rcqe_complete *rcqe_complete;
91 struct lpfc_mqe *mqe;
92 union lpfc_wqe *wqe;
93 struct lpfc_rqe *rqe;
94};
95
96struct lpfc_queue {
97 struct list_head list;
98 enum lpfc_sli4_queue_type type;
99 enum lpfc_sli4_queue_subtype subtype;
100 struct lpfc_hba *phba;
101 struct list_head child_list;
102 uint32_t entry_count; /* Number of entries to support on the queue */
103 uint32_t entry_size; /* Size of each queue entry. */
104 uint32_t queue_id; /* Queue ID assigned by the hardware */
105 struct list_head page_list;
106 uint32_t page_count; /* Number of pages allocated for this queue */
107
108 uint32_t host_index; /* The host's index for putting or getting */
109 uint32_t hba_index; /* The last known hba index for get or put */
110 union sli4_qe qe[1]; /* array to index entries (must be last) */
111};
112
113struct lpfc_cq_event {
114 struct list_head list;
115 union {
116 struct lpfc_mcqe mcqe_cmpl;
117 struct lpfc_acqe_link acqe_link;
118 struct lpfc_acqe_fcoe acqe_fcoe;
119 struct lpfc_acqe_dcbx acqe_dcbx;
120 struct lpfc_rcqe rcqe_cmpl;
121 struct sli4_wcqe_xri_aborted wcqe_axri;
122 } cqe;
123};
124
125struct lpfc_sli4_link {
126 uint8_t speed;
127 uint8_t duplex;
128 uint8_t status;
129 uint8_t physical;
130 uint8_t fault;
131};
132
133struct lpfc_fcf {
134 uint8_t fabric_name[8];
135 uint8_t mac_addr[6];
136 uint16_t fcf_indx;
137 uint16_t fcfi;
138 uint32_t fcf_flag;
139#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
140#define FCF_REGISTERED 0x02 /* FCF registered with FW */
141#define FCF_DISCOVERED 0x04 /* FCF discovery started */
142#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
143#define FCF_IN_USE 0x10 /* Atleast one discovery completed */
144#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
145 uint32_t priority;
146 uint32_t addr_mode;
147 uint16_t vlan_id;
148};
149
150#define LPFC_REGION23_SIGNATURE "RG23"
151#define LPFC_REGION23_VERSION 1
152#define LPFC_REGION23_LAST_REC 0xff
153struct lpfc_fip_param_hdr {
154 uint8_t type;
155#define FCOE_PARAM_TYPE 0xA0
156 uint8_t length;
157#define FCOE_PARAM_LENGTH 2
158 uint8_t parm_version;
159#define FIPP_VERSION 0x01
160 uint8_t parm_flags;
161#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
162#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
163#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
164#define FIPP_MODE_ON 0x2
165#define FIPP_MODE_OFF 0x0
166#define FIPP_VLAN_VALID 0x1
167};
168
169struct lpfc_fcoe_params {
170 uint8_t fc_map[3];
171 uint8_t reserved1;
172 uint16_t vlan_tag;
173 uint8_t reserved[2];
174};
175
176struct lpfc_fcf_conn_hdr {
177 uint8_t type;
178#define FCOE_CONN_TBL_TYPE 0xA1
179 uint8_t length; /* words */
180 uint8_t reserved[2];
181};
182
183struct lpfc_fcf_conn_rec {
184 uint16_t flags;
185#define FCFCNCT_VALID 0x0001
186#define FCFCNCT_BOOT 0x0002
187#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
188#define FCFCNCT_FBNM_VALID 0x0008
189#define FCFCNCT_SWNM_VALID 0x0010
190#define FCFCNCT_VLAN_VALID 0x0020
191#define FCFCNCT_AM_VALID 0x0040
192#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
193#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
194
195 uint16_t vlan_tag;
196 uint8_t fabric_name[8];
197 uint8_t switch_name[8];
198};
199
200struct lpfc_fcf_conn_entry {
201 struct list_head list;
202 struct lpfc_fcf_conn_rec conn_rec;
203};
204
205/*
206 * Define the host's bootstrap mailbox. This structure contains
207 * the member attributes needed to create, use, and destroy the
208 * bootstrap mailbox region.
209 *
210 * The macro definitions for the bmbx data structure are defined
211 * in lpfc_hw4.h with the register definition.
212 */
213struct lpfc_bmbx {
214 struct lpfc_dmabuf *dmabuf;
215 struct dma_address dma_address;
216 void *avirt;
217 dma_addr_t aphys;
218 uint32_t bmbx_size;
219};
220
221#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
222
223#define LPFC_EQE_SIZE_4B 4
224#define LPFC_EQE_SIZE_16B 16
225#define LPFC_CQE_SIZE 16
226#define LPFC_WQE_SIZE 64
227#define LPFC_MQE_SIZE 256
228#define LPFC_RQE_SIZE 8
229
230#define LPFC_EQE_DEF_COUNT 1024
231#define LPFC_CQE_DEF_COUNT 256
232#define LPFC_WQE_DEF_COUNT 64
233#define LPFC_MQE_DEF_COUNT 16
234#define LPFC_RQE_DEF_COUNT 512
235
236#define LPFC_QUEUE_NOARM false
237#define LPFC_QUEUE_REARM true
238
239
240/*
241 * SLI4 CT field defines
242 */
243#define SLI4_CT_RPI 0
244#define SLI4_CT_VPI 1
245#define SLI4_CT_VFI 2
246#define SLI4_CT_FCFI 3
247
248#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
249
250/*
251 * SLI4 specific data structures
252 */
253struct lpfc_max_cfg_param {
254 uint16_t max_xri;
255 uint16_t xri_base;
256 uint16_t xri_used;
257 uint16_t max_rpi;
258 uint16_t rpi_base;
259 uint16_t rpi_used;
260 uint16_t max_vpi;
261 uint16_t vpi_base;
262 uint16_t vpi_used;
263 uint16_t max_vfi;
264 uint16_t vfi_base;
265 uint16_t vfi_used;
266 uint16_t max_fcfi;
267 uint16_t fcfi_base;
268 uint16_t fcfi_used;
269 uint16_t max_eq;
270 uint16_t max_rq;
271 uint16_t max_cq;
272 uint16_t max_wq;
273};
274
275struct lpfc_hba;
276/* SLI4 HBA multi-fcp queue handler struct */
277struct lpfc_fcp_eq_hdl {
278 uint32_t idx;
279 struct lpfc_hba *phba;
280};
281
282/* SLI4 HBA data structure entries */
283struct lpfc_sli4_hba {
284 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
285 PCI BAR0, config space registers */
286 void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
287 PCI BAR1, control registers */
288 void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
289 PCI BAR2, doorbell registers */
290 /* BAR0 PCI config space register memory map */
291 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
292 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
293 void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
294 void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
295#define LPFC_ONLINE_NERR 0xFFFFFFFF
296 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
297 /* BAR1 FCoE function CSR register memory map */
298 void __iomem *STAregaddr; /* Address to HST_STATE register */
299 void __iomem *ISRregaddr; /* Address to HST_ISR register */
300 void __iomem *IMRregaddr; /* Address to HST_IMR register */
301 void __iomem *ISCRregaddr; /* Address to HST_ISCR register */
302 /* BAR2 VF-0 doorbell register memory map */
303 void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */
304 void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */
305 void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
306 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
307 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
308
309 struct msix_entry *msix_entries;
310 uint32_t cfg_eqn;
311 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
312 /* Pointers to the constructed SLI4 queues */
313 struct lpfc_queue **fp_eq; /* Fast-path event queue */
314 struct lpfc_queue *sp_eq; /* Slow-path event queue */
315 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
316 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
317 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
318 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
319 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
320 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
321 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
322 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
323 struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
324
325 /* Setup information for various queue parameters */
326 int eq_esize;
327 int eq_ecount;
328 int cq_esize;
329 int cq_ecount;
330 int wq_esize;
331 int wq_ecount;
332 int mq_esize;
333 int mq_ecount;
334 int rq_esize;
335 int rq_ecount;
336#define LPFC_SP_EQ_MAX_INTR_SEC 10000
337#define LPFC_FP_EQ_MAX_INTR_SEC 10000
338
339 uint32_t intr_enable;
340 struct lpfc_bmbx bmbx;
341 struct lpfc_max_cfg_param max_cfg_param;
342 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
343 uint16_t next_rpi;
344 uint16_t scsi_xri_max;
345 uint16_t scsi_xri_cnt;
346 struct list_head lpfc_free_sgl_list;
347 struct list_head lpfc_sgl_list;
348 struct lpfc_sglq **lpfc_els_sgl_array;
349 struct list_head lpfc_abts_els_sgl_list;
350 struct lpfc_scsi_buf **lpfc_scsi_psb_array;
351 struct list_head lpfc_abts_scsi_buf_list;
352 uint32_t total_sglq_bufs;
353 struct lpfc_sglq **lpfc_sglq_active_list;
354 struct list_head lpfc_rpi_hdr_list;
355 unsigned long *rpi_bmask;
356 uint16_t rpi_count;
357 struct lpfc_sli4_flags sli4_flags;
358 struct list_head sp_rspiocb_work_queue;
359 struct list_head sp_cqe_event_pool;
360 struct list_head sp_asynce_work_queue;
361 struct list_head sp_fcp_xri_aborted_work_queue;
362 struct list_head sp_els_xri_aborted_work_queue;
363 struct list_head sp_unsol_work_queue;
364 struct lpfc_sli4_link link_state;
365 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
366 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
367};
368
369enum lpfc_sge_type {
370 GEN_BUFF_TYPE,
371 SCSI_BUFF_TYPE
372};
373
374struct lpfc_sglq {
375 /* lpfc_sglqs are used in double linked lists */
376 struct list_head list;
377 struct list_head clist;
378 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
379 uint16_t iotag; /* pre-assigned IO tag */
380 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
381 struct sli4_sge *sgl; /* pre-assigned SGL */
382 void *virt; /* virtual address. */
383 dma_addr_t phys; /* physical address */
384};
385
386struct lpfc_rpi_hdr {
387 struct list_head list;
388 uint32_t len;
389 struct lpfc_dmabuf *dmabuf;
390 uint32_t page_count;
391 uint32_t start_rpi;
392};
393
394/*
395 * SLI4 specific function prototypes
396 */
397int lpfc_pci_function_reset(struct lpfc_hba *);
398int lpfc_sli4_hba_setup(struct lpfc_hba *);
399int lpfc_sli4_hba_down(struct lpfc_hba *);
400int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
401 uint8_t, uint32_t, bool);
402void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
403void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
404void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
405 struct lpfc_mbx_sge *);
406
407void lpfc_sli4_hba_reset(struct lpfc_hba *);
408struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
409 uint32_t);
410void lpfc_sli4_queue_free(struct lpfc_queue *);
411uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
412uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
413 struct lpfc_queue *, uint32_t, uint32_t);
414uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
415 struct lpfc_queue *, uint32_t);
416uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
417 struct lpfc_queue *, uint32_t);
418uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
419 struct lpfc_queue *, struct lpfc_queue *, uint32_t);
420uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
421uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
422uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
423uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
424uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
425 struct lpfc_queue *);
426int lpfc_sli4_queue_setup(struct lpfc_hba *);
427void lpfc_sli4_queue_unset(struct lpfc_hba *);
428int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
429int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
430int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
431uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
432int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
433int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
434int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
435struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
436struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
437void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
438void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
439int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
440int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
441int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
442struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
443void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
444int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
445void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
446void lpfc_sli4_remove_rpis(struct lpfc_hba *);
447void lpfc_sli4_async_event_proc(struct lpfc_hba *);
448int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
449void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
450void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
451void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
452 struct sli4_wcqe_xri_aborted *);
453void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
454 struct sli4_wcqe_xri_aborted *);
455int lpfc_sli4_brdreset(struct lpfc_hba *);
456int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
457void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
458int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
459int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
460uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
461uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
462void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
463int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
464void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
465int lpfc_sli4_post_status_check(struct lpfc_hba *);
466uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
467
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e599519e307..6b8a148f0a5 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.1" 21#define LPFC_DRIVER_VERSION "8.3.2"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 917ad56b0af..a6313ee84ac 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -32,8 +32,10 @@
32#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
36#include "lpfc_sli.h" 37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
37#include "lpfc_nl.h" 39#include "lpfc_nl.h"
38#include "lpfc_disc.h" 40#include "lpfc_disc.h"
39#include "lpfc_scsi.h" 41#include "lpfc_scsi.h"
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
89 vpi = 0; 91 vpi = 0;
90 else 92 else
91 set_bit(vpi, phba->vpi_bmask); 93 set_bit(vpi, phba->vpi_bmask);
94 if (phba->sli_rev == LPFC_SLI_REV4)
95 phba->sli4_hba.max_cfg_param.vpi_used++;
92 spin_unlock_irq(&phba->hbalock); 96 spin_unlock_irq(&phba->hbalock);
93 return vpi; 97 return vpi;
94} 98}
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
96static void 100static void
97lpfc_free_vpi(struct lpfc_hba *phba, int vpi) 101lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
98{ 102{
103 if (vpi == 0)
104 return;
99 spin_lock_irq(&phba->hbalock); 105 spin_lock_irq(&phba->hbalock);
100 clear_bit(vpi, phba->vpi_bmask); 106 clear_bit(vpi, phba->vpi_bmask);
107 if (phba->sli_rev == LPFC_SLI_REV4)
108 phba->sli4_hba.max_cfg_param.vpi_used--;
101 spin_unlock_irq(&phba->hbalock); 109 spin_unlock_irq(&phba->hbalock);
102} 110}
103 111
@@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
113 if (!pmb) { 121 if (!pmb) {
114 return -ENOMEM; 122 return -ENOMEM;
115 } 123 }
116 mb = &pmb->mb; 124 mb = &pmb->u.mb;
117 125
118 lpfc_read_sparam(phba, pmb, vport->vpi); 126 lpfc_read_sparam(phba, pmb, vport->vpi);
119 /* 127 /*
@@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
243 (vport->fc_flag & wait_flags) || 251 (vport->fc_flag & wait_flags) ||
244 ((vport->port_state > LPFC_VPORT_FAILED) && 252 ((vport->port_state > LPFC_VPORT_FAILED) &&
245 (vport->port_state < LPFC_VPORT_READY))) { 253 (vport->port_state < LPFC_VPORT_READY))) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 254 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
247 "1833 Vport discovery quiesce Wait:" 255 "1833 Vport discovery quiesce Wait:"
248 " vpi x%x state x%x fc_flags x%x" 256 " state x%x fc_flags x%x"
249 " num_nodes x%x, waiting 1000 msecs" 257 " num_nodes x%x, waiting 1000 msecs"
250 " total wait msecs x%x\n", 258 " total wait msecs x%x\n",
251 vport->vpi, vport->port_state, 259 vport->port_state, vport->fc_flag,
252 vport->fc_flag, vport->num_disc_nodes, 260 vport->num_disc_nodes,
253 jiffies_to_msecs(jiffies - start_time)); 261 jiffies_to_msecs(jiffies - start_time));
254 msleep(1000); 262 msleep(1000);
255 } else { 263 } else {
256 /* Base case. Wait variants satisfied. Break out */ 264 /* Base case. Wait variants satisfied. Break out */
257 lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, 265 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
258 "1834 Vport discovery quiesced:" 266 "1834 Vport discovery quiesced:"
259 " vpi x%x state x%x fc_flags x%x" 267 " state x%x fc_flags x%x"
260 " wait msecs x%x\n", 268 " wait msecs x%x\n",
261 vport->vpi, vport->port_state, 269 vport->port_state, vport->fc_flag,
262 vport->fc_flag,
263 jiffies_to_msecs(jiffies 270 jiffies_to_msecs(jiffies
264 - start_time)); 271 - start_time));
265 break; 272 break;
@@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
267 } 274 }
268 275
269 if (time_after(jiffies, wait_time_max)) 276 if (time_after(jiffies, wait_time_max))
270 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 277 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
271 "1835 Vport discovery quiesce failed:" 278 "1835 Vport discovery quiesce failed:"
272 " vpi x%x state x%x fc_flags x%x" 279 " state x%x fc_flags x%x wait msecs x%x\n",
273 " wait msecs x%x\n", 280 vport->port_state, vport->fc_flag,
274 vport->vpi, vport->port_state,
275 vport->fc_flag,
276 jiffies_to_msecs(jiffies - start_time)); 281 jiffies_to_msecs(jiffies - start_time));
277} 282}
278 283
@@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
308 goto error_out; 313 goto error_out;
309 } 314 }
310 315
316 /*
317 * In SLI4, the vpi must be activated before it can be used
318 * by the port.
319 */
320 if (phba->sli_rev == LPFC_SLI_REV4) {
321 rc = lpfc_sli4_init_vpi(phba, vpi);
322 if (rc) {
323 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
324 "1838 Failed to INIT_VPI on vpi %d "
325 "status %d\n", vpi, rc);
326 rc = VPORT_NORESOURCES;
327 lpfc_free_vpi(phba, vpi);
328 goto error_out;
329 }
330 }
311 331
312 /* Assign an unused board number */ 332 /* Assign an unused board number */
313 if ((instance = lpfc_get_instance()) < 0) { 333 if ((instance = lpfc_get_instance()) < 0) {
@@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
535 "physical host\n"); 555 "physical host\n");
536 return VPORT_ERROR; 556 return VPORT_ERROR;
537 } 557 }
558
559 /* If the vport is a static vport fail the deletion. */
560 if ((vport->vport_flag & STATIC_VPORT) &&
561 !(phba->pport->load_flag & FC_UNLOADING)) {
562 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
563 "1837 vport_delete failed: Cannot delete "
564 "static vport.\n");
565 return VPORT_ERROR;
566 }
567
538 /* 568 /*
539 * If we are not unloading the driver then prevent the vport_delete 569 * If we are not unloading the driver then prevent the vport_delete
540 * from happening until after this vport's discovery is finished. 570 * from happening until after this vport's discovery is finished.
@@ -710,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
710 struct lpfc_vport *port_iterator; 740 struct lpfc_vport *port_iterator;
711 struct lpfc_vport **vports; 741 struct lpfc_vport **vports;
712 int index = 0; 742 int index = 0;
713 vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), 743 vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
714 GFP_KERNEL); 744 GFP_KERNEL);
715 if (vports == NULL) 745 if (vports == NULL)
716 return NULL; 746 return NULL;
@@ -734,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
734 int i; 764 int i;
735 if (vports == NULL) 765 if (vports == NULL)
736 return; 766 return;
737 for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) 767 for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
738 scsi_host_put(lpfc_shost_from_vport(vports[i])); 768 scsi_host_put(lpfc_shost_from_vport(vports[i]));
739 kfree(vports); 769 kfree(vports);
740} 770}
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 795201fa0b4..512c2cc1a33 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -469,7 +469,7 @@ typedef struct {
469 u8 type; /* Type of the device */ 469 u8 type; /* Type of the device */
470 u8 cur_status; /* current status of the device */ 470 u8 cur_status; /* current status of the device */
471 u8 tag_depth; /* Level of tagging */ 471 u8 tag_depth; /* Level of tagging */
472 u8 sync_neg; /* sync negotiation - ENABLE or DISBALE */ 472 u8 sync_neg; /* sync negotiation - ENABLE or DISABLE */
473 u32 size; /* configurable size in terms of 512 byte 473 u32 size; /* configurable size in terms of 512 byte
474 blocks */ 474 blocks */
475}__attribute__ ((packed)) phys_drv; 475}__attribute__ ((packed)) phys_drv;
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
index 170399ef06f..b25b74764ec 100644
--- a/drivers/scsi/megaraid/mbox_defs.h
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -686,7 +686,7 @@ typedef struct {
686 * @type : Type of the device 686 * @type : Type of the device
687 * @cur_status : current status of the device 687 * @cur_status : current status of the device
688 * @tag_depth : Level of tagging 688 * @tag_depth : Level of tagging
689 * @sync_neg : sync negotiation - ENABLE or DISBALE 689 * @sync_neg : sync negotiation - ENABLE or DISABLE
690 * @size : configurable size in terms of 512 byte 690 * @size : configurable size in terms of 512 byte
691 */ 691 */
692typedef struct { 692typedef struct {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index babd4cc0cb2..286c185fa9e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -61,6 +61,7 @@
61#include <scsi/scsi_tcq.h> 61#include <scsi/scsi_tcq.h>
62#include <scsi/scsi_transport_sas.h> 62#include <scsi/scsi_transport_sas.h>
63#include <scsi/scsi_dbg.h> 63#include <scsi/scsi_dbg.h>
64#include <scsi/scsi_eh.h>
64 65
65#include "mpt2sas_debug.h" 66#include "mpt2sas_debug.h"
66 67
@@ -68,10 +69,10 @@
68#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
69#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
70#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
71#define MPT2SAS_DRIVER_VERSION "01.100.02.00" 72#define MPT2SAS_DRIVER_VERSION "01.100.03.00"
72#define MPT2SAS_MAJOR_VERSION 00 73#define MPT2SAS_MAJOR_VERSION 01
73#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
74#define MPT2SAS_BUILD_VERSION 02 75#define MPT2SAS_BUILD_VERSION 03
75#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
76 77
77/* 78/*
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index ba6ab170bdf..14e473d1fa7 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
473} 473}
474 474
475/** 475/**
476 * _ctl_do_task_abort - assign an active smid to the abort_task 476 * _ctl_set_task_mid - assign an active smid to tm request
477 * @ioc: per adapter object 477 * @ioc: per adapter object
478 * @karg - (struct mpt2_ioctl_command) 478 * @karg - (struct mpt2_ioctl_command)
479 * @tm_request - pointer to mf from user space 479 * @tm_request - pointer to mf from user space
@@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
482 * during failure, the reply frame is filled. 482 * during failure, the reply frame is filled.
483 */ 483 */
484static int 484static int
485_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, 485_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
486 Mpi2SCSITaskManagementRequest_t *tm_request) 486 Mpi2SCSITaskManagementRequest_t *tm_request)
487{ 487{
488 u8 found = 0; 488 u8 found = 0;
@@ -494,6 +494,14 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
494 Mpi2SCSITaskManagementReply_t *tm_reply; 494 Mpi2SCSITaskManagementReply_t *tm_reply;
495 u32 sz; 495 u32 sz;
496 u32 lun; 496 u32 lun;
497 char *desc = NULL;
498
499 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
500 desc = "abort_task";
501 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
502 desc = "query_task";
503 else
504 return 0;
497 505
498 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 506 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
499 507
@@ -517,13 +525,13 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
517 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 525 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
518 526
519 if (!found) { 527 if (!found) {
520 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " 528 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
521 "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name, 529 "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
522 tm_request->DevHandle, lun)); 530 desc, tm_request->DevHandle, lun));
523 tm_reply = ioc->ctl_cmds.reply; 531 tm_reply = ioc->ctl_cmds.reply;
524 tm_reply->DevHandle = tm_request->DevHandle; 532 tm_reply->DevHandle = tm_request->DevHandle;
525 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 533 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
526 tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 534 tm_reply->TaskType = tm_request->TaskType;
527 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; 535 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
528 tm_reply->VP_ID = tm_request->VP_ID; 536 tm_reply->VP_ID = tm_request->VP_ID;
529 tm_reply->VF_ID = tm_request->VF_ID; 537 tm_reply->VF_ID = tm_request->VF_ID;
@@ -535,9 +543,9 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
535 return 1; 543 return 1;
536 } 544 }
537 545
538 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " 546 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
539 "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name, 547 "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
540 tm_request->DevHandle, lun, tm_request->TaskMID)); 548 desc, tm_request->DevHandle, lun, tm_request->TaskMID));
541 return 0; 549 return 0;
542} 550}
543 551
@@ -739,8 +747,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
739 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 747 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
740 748
741 if (tm_request->TaskType == 749 if (tm_request->TaskType ==
742 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 750 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
743 if (_ctl_do_task_abort(ioc, &karg, tm_request)) { 751 tm_request->TaskType ==
752 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
753 if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
744 mpt2sas_base_free_smid(ioc, smid); 754 mpt2sas_base_free_smid(ioc, smid);
745 goto out; 755 goto out;
746 } 756 }
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index e3a7967259e..2a01a5f2a84 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = {
197MODULE_DEVICE_TABLE(pci, scsih_pci_table); 197MODULE_DEVICE_TABLE(pci, scsih_pci_table);
198 198
199/** 199/**
200 * scsih_set_debug_level - global setting of ioc->logging_level. 200 * _scsih_set_debug_level - global setting of ioc->logging_level.
201 * 201 *
202 * Note: The logging levels are defined in mpt2sas_debug.h. 202 * Note: The logging levels are defined in mpt2sas_debug.h.
203 */ 203 */
204static int 204static int
205scsih_set_debug_level(const char *val, struct kernel_param *kp) 205_scsih_set_debug_level(const char *val, struct kernel_param *kp)
206{ 206{
207 int ret = param_set_int(val, kp); 207 int ret = param_set_int(val, kp);
208 struct MPT2SAS_ADAPTER *ioc; 208 struct MPT2SAS_ADAPTER *ioc;
@@ -215,7 +215,7 @@ scsih_set_debug_level(const char *val, struct kernel_param *kp)
215 ioc->logging_level = logging_level; 215 ioc->logging_level = logging_level;
216 return 0; 216 return 0;
217} 217}
218module_param_call(logging_level, scsih_set_debug_level, param_get_int, 218module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
219 &logging_level, 0644); 219 &logging_level, 0644);
220 220
221/** 221/**
@@ -884,6 +884,41 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
884} 884}
885 885
886/** 886/**
887 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
888 * @ioc: per adapter object
889 * @id: target id
890 * @lun: lun number
891 * @channel: channel
892 * Context: This function will acquire ioc->scsi_lookup_lock.
893 *
894 * This will search for a matching channel:id:lun in the scsi_lookup array,
895 * returning 1 if found.
896 */
897static u8
898_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
899 unsigned int lun, int channel)
900{
901 u8 found;
902 unsigned long flags;
903 int i;
904
905 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
906 found = 0;
907 for (i = 0 ; i < ioc->request_depth; i++) {
908 if (ioc->scsi_lookup[i].scmd &&
909 (ioc->scsi_lookup[i].scmd->device->id == id &&
910 ioc->scsi_lookup[i].scmd->device->channel == channel &&
911 ioc->scsi_lookup[i].scmd->device->lun == lun)) {
912 found = 1;
913 goto out;
914 }
915 }
916 out:
917 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
918 return found;
919}
920
921/**
887 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address) 922 * _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
888 * @ioc: per adapter object 923 * @ioc: per adapter object
889 * @smid: system request message index 924 * @smid: system request message index
@@ -1047,14 +1082,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1047} 1082}
1048 1083
1049/** 1084/**
1050 * scsih_change_queue_depth - setting device queue depth 1085 * _scsih_change_queue_depth - setting device queue depth
1051 * @sdev: scsi device struct 1086 * @sdev: scsi device struct
1052 * @qdepth: requested queue depth 1087 * @qdepth: requested queue depth
1053 * 1088 *
1054 * Returns queue depth. 1089 * Returns queue depth.
1055 */ 1090 */
1056static int 1091static int
1057scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1092_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1058{ 1093{
1059 struct Scsi_Host *shost = sdev->host; 1094 struct Scsi_Host *shost = sdev->host;
1060 int max_depth; 1095 int max_depth;
@@ -1079,14 +1114,14 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1079} 1114}
1080 1115
1081/** 1116/**
1082 * scsih_change_queue_depth - changing device queue tag type 1117 * _scsih_change_queue_depth - changing device queue tag type
1083 * @sdev: scsi device struct 1118 * @sdev: scsi device struct
1084 * @tag_type: requested tag type 1119 * @tag_type: requested tag type
1085 * 1120 *
1086 * Returns queue tag type. 1121 * Returns queue tag type.
1087 */ 1122 */
1088static int 1123static int
1089scsih_change_queue_type(struct scsi_device *sdev, int tag_type) 1124_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1090{ 1125{
1091 if (sdev->tagged_supported) { 1126 if (sdev->tagged_supported) {
1092 scsi_set_tag_type(sdev, tag_type); 1127 scsi_set_tag_type(sdev, tag_type);
@@ -1101,14 +1136,14 @@ scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
1101} 1136}
1102 1137
1103/** 1138/**
1104 * scsih_target_alloc - target add routine 1139 * _scsih_target_alloc - target add routine
1105 * @starget: scsi target struct 1140 * @starget: scsi target struct
1106 * 1141 *
1107 * Returns 0 if ok. Any other return is assumed to be an error and 1142 * Returns 0 if ok. Any other return is assumed to be an error and
1108 * the device is ignored. 1143 * the device is ignored.
1109 */ 1144 */
1110static int 1145static int
1111scsih_target_alloc(struct scsi_target *starget) 1146_scsih_target_alloc(struct scsi_target *starget)
1112{ 1147{
1113 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1148 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1114 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1149 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1163,13 +1198,13 @@ scsih_target_alloc(struct scsi_target *starget)
1163} 1198}
1164 1199
1165/** 1200/**
1166 * scsih_target_destroy - target destroy routine 1201 * _scsih_target_destroy - target destroy routine
1167 * @starget: scsi target struct 1202 * @starget: scsi target struct
1168 * 1203 *
1169 * Returns nothing. 1204 * Returns nothing.
1170 */ 1205 */
1171static void 1206static void
1172scsih_target_destroy(struct scsi_target *starget) 1207_scsih_target_destroy(struct scsi_target *starget)
1173{ 1208{
1174 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1209 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1175 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1210 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1212,14 +1247,14 @@ scsih_target_destroy(struct scsi_target *starget)
1212} 1247}
1213 1248
1214/** 1249/**
1215 * scsih_slave_alloc - device add routine 1250 * _scsih_slave_alloc - device add routine
1216 * @sdev: scsi device struct 1251 * @sdev: scsi device struct
1217 * 1252 *
1218 * Returns 0 if ok. Any other return is assumed to be an error and 1253 * Returns 0 if ok. Any other return is assumed to be an error and
1219 * the device is ignored. 1254 * the device is ignored.
1220 */ 1255 */
1221static int 1256static int
1222scsih_slave_alloc(struct scsi_device *sdev) 1257_scsih_slave_alloc(struct scsi_device *sdev)
1223{ 1258{
1224 struct Scsi_Host *shost; 1259 struct Scsi_Host *shost;
1225 struct MPT2SAS_ADAPTER *ioc; 1260 struct MPT2SAS_ADAPTER *ioc;
@@ -1273,13 +1308,13 @@ scsih_slave_alloc(struct scsi_device *sdev)
1273} 1308}
1274 1309
1275/** 1310/**
1276 * scsih_slave_destroy - device destroy routine 1311 * _scsih_slave_destroy - device destroy routine
1277 * @sdev: scsi device struct 1312 * @sdev: scsi device struct
1278 * 1313 *
1279 * Returns nothing. 1314 * Returns nothing.
1280 */ 1315 */
1281static void 1316static void
1282scsih_slave_destroy(struct scsi_device *sdev) 1317_scsih_slave_destroy(struct scsi_device *sdev)
1283{ 1318{
1284 struct MPT2SAS_TARGET *sas_target_priv_data; 1319 struct MPT2SAS_TARGET *sas_target_priv_data;
1285 struct scsi_target *starget; 1320 struct scsi_target *starget;
@@ -1295,13 +1330,13 @@ scsih_slave_destroy(struct scsi_device *sdev)
1295} 1330}
1296 1331
1297/** 1332/**
1298 * scsih_display_sata_capabilities - sata capabilities 1333 * _scsih_display_sata_capabilities - sata capabilities
1299 * @ioc: per adapter object 1334 * @ioc: per adapter object
1300 * @sas_device: the sas_device object 1335 * @sas_device: the sas_device object
1301 * @sdev: scsi device struct 1336 * @sdev: scsi device struct
1302 */ 1337 */
1303static void 1338static void
1304scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, 1339_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
1305 struct _sas_device *sas_device, struct scsi_device *sdev) 1340 struct _sas_device *sas_device, struct scsi_device *sdev)
1306{ 1341{
1307 Mpi2ConfigReply_t mpi_reply; 1342 Mpi2ConfigReply_t mpi_reply;
@@ -1401,14 +1436,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
1401} 1436}
1402 1437
1403/** 1438/**
1404 * scsih_slave_configure - device configure routine. 1439 * _scsih_slave_configure - device configure routine.
1405 * @sdev: scsi device struct 1440 * @sdev: scsi device struct
1406 * 1441 *
1407 * Returns 0 if ok. Any other return is assumed to be an error and 1442 * Returns 0 if ok. Any other return is assumed to be an error and
1408 * the device is ignored. 1443 * the device is ignored.
1409 */ 1444 */
1410static int 1445static int
1411scsih_slave_configure(struct scsi_device *sdev) 1446_scsih_slave_configure(struct scsi_device *sdev)
1412{ 1447{
1413 struct Scsi_Host *shost = sdev->host; 1448 struct Scsi_Host *shost = sdev->host;
1414 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1449 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1489,7 +1524,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1489 r_level, raid_device->handle, 1524 r_level, raid_device->handle,
1490 (unsigned long long)raid_device->wwid, 1525 (unsigned long long)raid_device->wwid,
1491 raid_device->num_pds, ds); 1526 raid_device->num_pds, ds);
1492 scsih_change_queue_depth(sdev, qdepth); 1527 _scsih_change_queue_depth(sdev, qdepth);
1493 return 0; 1528 return 0;
1494 } 1529 }
1495 1530
@@ -1532,10 +1567,10 @@ scsih_slave_configure(struct scsi_device *sdev)
1532 sas_device->slot); 1567 sas_device->slot);
1533 1568
1534 if (!ssp_target) 1569 if (!ssp_target)
1535 scsih_display_sata_capabilities(ioc, sas_device, sdev); 1570 _scsih_display_sata_capabilities(ioc, sas_device, sdev);
1536 } 1571 }
1537 1572
1538 scsih_change_queue_depth(sdev, qdepth); 1573 _scsih_change_queue_depth(sdev, qdepth);
1539 1574
1540 if (ssp_target) 1575 if (ssp_target)
1541 sas_read_port_mode_page(sdev); 1576 sas_read_port_mode_page(sdev);
@@ -1543,7 +1578,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1543} 1578}
1544 1579
1545/** 1580/**
1546 * scsih_bios_param - fetch head, sector, cylinder info for a disk 1581 * _scsih_bios_param - fetch head, sector, cylinder info for a disk
1547 * @sdev: scsi device struct 1582 * @sdev: scsi device struct
1548 * @bdev: pointer to block device context 1583 * @bdev: pointer to block device context
1549 * @capacity: device size (in 512 byte sectors) 1584 * @capacity: device size (in 512 byte sectors)
@@ -1555,7 +1590,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1555 * Return nothing. 1590 * Return nothing.
1556 */ 1591 */
1557static int 1592static int
1558scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 1593_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
1559 sector_t capacity, int params[]) 1594 sector_t capacity, int params[])
1560{ 1595{
1561 int heads; 1596 int heads;
@@ -1636,7 +1671,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1636} 1671}
1637 1672
1638/** 1673/**
1639 * scsih_tm_done - tm completion routine 1674 * _scsih_tm_done - tm completion routine
1640 * @ioc: per adapter object 1675 * @ioc: per adapter object
1641 * @smid: system request message index 1676 * @smid: system request message index
1642 * @VF_ID: virtual function id 1677 * @VF_ID: virtual function id
@@ -1648,7 +1683,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
1648 * Return nothing. 1683 * Return nothing.
1649 */ 1684 */
1650static void 1685static void
1651scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 1686_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
1652{ 1687{
1653 MPI2DefaultReply_t *mpi_reply; 1688 MPI2DefaultReply_t *mpi_reply;
1654 1689
@@ -1823,13 +1858,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
1823} 1858}
1824 1859
1825/** 1860/**
1826 * scsih_abort - eh threads main abort routine 1861 * _scsih_abort - eh threads main abort routine
1827 * @sdev: scsi device struct 1862 * @sdev: scsi device struct
1828 * 1863 *
1829 * Returns SUCCESS if command aborted else FAILED 1864 * Returns SUCCESS if command aborted else FAILED
1830 */ 1865 */
1831static int 1866static int
1832scsih_abort(struct scsi_cmnd *scmd) 1867_scsih_abort(struct scsi_cmnd *scmd)
1833{ 1868{
1834 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 1869 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1835 struct MPT2SAS_DEVICE *sas_device_priv_data; 1870 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1889,15 +1924,86 @@ scsih_abort(struct scsi_cmnd *scmd)
1889 return r; 1924 return r;
1890} 1925}
1891 1926
1927/**
1928 * _scsih_dev_reset - eh threads main device reset routine
1929 * @sdev: scsi device struct
1930 *
1931 * Returns SUCCESS if command aborted else FAILED
1932 */
1933static int
1934_scsih_dev_reset(struct scsi_cmnd *scmd)
1935{
1936 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1937 struct MPT2SAS_DEVICE *sas_device_priv_data;
1938 struct _sas_device *sas_device;
1939 unsigned long flags;
1940 u16 handle;
1941 int r;
1942
1943 printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n",
1944 ioc->name, scmd);
1945 scsi_print_command(scmd);
1946
1947 sas_device_priv_data = scmd->device->hostdata;
1948 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1949 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
1950 ioc->name, scmd);
1951 scmd->result = DID_NO_CONNECT << 16;
1952 scmd->scsi_done(scmd);
1953 r = SUCCESS;
1954 goto out;
1955 }
1956
1957 /* for hidden raid components obtain the volume_handle */
1958 handle = 0;
1959 if (sas_device_priv_data->sas_target->flags &
1960 MPT_TARGET_FLAGS_RAID_COMPONENT) {
1961 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1962 sas_device = _scsih_sas_device_find_by_handle(ioc,
1963 sas_device_priv_data->sas_target->handle);
1964 if (sas_device)
1965 handle = sas_device->volume_handle;
1966 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1967 } else
1968 handle = sas_device_priv_data->sas_target->handle;
1969
1970 if (!handle) {
1971 scmd->result = DID_RESET << 16;
1972 r = FAILED;
1973 goto out;
1974 }
1975
1976 mutex_lock(&ioc->tm_cmds.mutex);
1977 mpt2sas_scsih_issue_tm(ioc, handle, 0,
1978 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
1979 30);
1980
1981 /*
1982 * sanity check see whether all commands to this device been
1983 * completed
1984 */
1985 if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
1986 scmd->device->lun, scmd->device->channel))
1987 r = FAILED;
1988 else
1989 r = SUCCESS;
1990 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
1991 mutex_unlock(&ioc->tm_cmds.mutex);
1992
1993 out:
1994 printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
1995 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
1996 return r;
1997}
1892 1998
1893/** 1999/**
1894 * scsih_dev_reset - eh threads main device reset routine 2000 * _scsih_target_reset - eh threads main target reset routine
1895 * @sdev: scsi device struct 2001 * @sdev: scsi device struct
1896 * 2002 *
1897 * Returns SUCCESS if command aborted else FAILED 2003 * Returns SUCCESS if command aborted else FAILED
1898 */ 2004 */
1899static int 2005static int
1900scsih_dev_reset(struct scsi_cmnd *scmd) 2006_scsih_target_reset(struct scsi_cmnd *scmd)
1901{ 2007{
1902 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2008 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1903 struct MPT2SAS_DEVICE *sas_device_priv_data; 2009 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1912,7 +2018,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
1912 2018
1913 sas_device_priv_data = scmd->device->hostdata; 2019 sas_device_priv_data = scmd->device->hostdata;
1914 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2020 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
1915 printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n", 2021 printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n",
1916 ioc->name, scmd); 2022 ioc->name, scmd);
1917 scmd->result = DID_NO_CONNECT << 16; 2023 scmd->result = DID_NO_CONNECT << 16;
1918 scmd->scsi_done(scmd); 2024 scmd->scsi_done(scmd);
@@ -1962,13 +2068,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
1962} 2068}
1963 2069
1964/** 2070/**
1965 * scsih_abort - eh threads main host reset routine 2071 * _scsih_abort - eh threads main host reset routine
1966 * @sdev: scsi device struct 2072 * @sdev: scsi device struct
1967 * 2073 *
1968 * Returns SUCCESS if command aborted else FAILED 2074 * Returns SUCCESS if command aborted else FAILED
1969 */ 2075 */
1970static int 2076static int
1971scsih_host_reset(struct scsi_cmnd *scmd) 2077_scsih_host_reset(struct scsi_cmnd *scmd)
1972{ 2078{
1973 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2079 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
1974 int r, retval; 2080 int r, retval;
@@ -2390,7 +2496,107 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2390} 2496}
2391 2497
2392/** 2498/**
2393 * scsih_qcmd - main scsi request entry point 2499 * _scsih_setup_eedp - setup MPI request for EEDP transfer
2500 * @scmd: pointer to scsi command object
2501 * @mpi_request: pointer to the SCSI_IO reqest message frame
2502 *
2503 * Supporting protection 1 and 3.
2504 *
2505 * Returns nothing
2506 */
2507static void
2508_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
2509{
2510 u16 eedp_flags;
2511 unsigned char prot_op = scsi_get_prot_op(scmd);
2512 unsigned char prot_type = scsi_get_prot_type(scmd);
2513
2514 if (prot_type == SCSI_PROT_DIF_TYPE0 ||
2515 prot_type == SCSI_PROT_DIF_TYPE2 ||
2516 prot_op == SCSI_PROT_NORMAL)
2517 return;
2518
2519 if (prot_op == SCSI_PROT_READ_STRIP)
2520 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
2521 else if (prot_op == SCSI_PROT_WRITE_INSERT)
2522 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
2523 else
2524 return;
2525
2526 mpi_request->EEDPBlockSize = scmd->device->sector_size;
2527
2528 switch (prot_type) {
2529 case SCSI_PROT_DIF_TYPE1:
2530
2531 /*
2532 * enable ref/guard checking
2533 * auto increment ref tag
2534 */
2535 mpi_request->EEDPFlags = eedp_flags |
2536 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2537 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2538 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2539 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
2540 cpu_to_be32(scsi_get_lba(scmd));
2541
2542 break;
2543
2544 case SCSI_PROT_DIF_TYPE3:
2545
2546 /*
2547 * enable guard checking
2548 */
2549 mpi_request->EEDPFlags = eedp_flags |
2550 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
2551
2552 break;
2553 }
2554}
2555
2556/**
2557 * _scsih_eedp_error_handling - return sense code for EEDP errors
2558 * @scmd: pointer to scsi command object
2559 * @ioc_status: ioc status
2560 *
2561 * Returns nothing
2562 */
2563static void
2564_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
2565{
2566 u8 ascq;
2567 u8 sk;
2568 u8 host_byte;
2569
2570 switch (ioc_status) {
2571 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2572 ascq = 0x01;
2573 break;
2574 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2575 ascq = 0x02;
2576 break;
2577 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2578 ascq = 0x03;
2579 break;
2580 default:
2581 ascq = 0x00;
2582 break;
2583 }
2584
2585 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
2586 sk = ILLEGAL_REQUEST;
2587 host_byte = DID_ABORT;
2588 } else {
2589 sk = ABORTED_COMMAND;
2590 host_byte = DID_OK;
2591 }
2592
2593 scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
2594 scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
2595 SAM_STAT_CHECK_CONDITION;
2596}
2597
2598/**
2599 * _scsih_qcmd - main scsi request entry point
2394 * @scmd: pointer to scsi command object 2600 * @scmd: pointer to scsi command object
2395 * @done: function pointer to be invoked on completion 2601 * @done: function pointer to be invoked on completion
2396 * 2602 *
@@ -2401,7 +2607,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
2401 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 2607 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
2402 */ 2608 */
2403static int 2609static int
2404scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) 2610_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2405{ 2611{
2406 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2612 struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2407 struct MPT2SAS_DEVICE *sas_device_priv_data; 2613 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -2470,6 +2676,7 @@ scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
2470 } 2676 }
2471 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 2677 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2472 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); 2678 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
2679 _scsih_setup_eedp(scmd, mpi_request);
2473 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2680 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2474 if (sas_device_priv_data->sas_target->flags & 2681 if (sas_device_priv_data->sas_target->flags &
2475 MPT_TARGET_FLAGS_RAID_COMPONENT) 2682 MPT_TARGET_FLAGS_RAID_COMPONENT)
@@ -2604,6 +2811,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
2604 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2811 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2605 desc_ioc_state = "scsi ext terminated"; 2812 desc_ioc_state = "scsi ext terminated";
2606 break; 2813 break;
2814 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2815 desc_ioc_state = "eedp guard error";
2816 break;
2817 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2818 desc_ioc_state = "eedp ref tag error";
2819 break;
2820 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2821 desc_ioc_state = "eedp app tag error";
2822 break;
2607 default: 2823 default:
2608 desc_ioc_state = "unknown"; 2824 desc_ioc_state = "unknown";
2609 break; 2825 break;
@@ -2783,7 +2999,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2783} 2999}
2784 3000
2785/** 3001/**
2786 * scsih_io_done - scsi request callback 3002 * _scsih_io_done - scsi request callback
2787 * @ioc: per adapter object 3003 * @ioc: per adapter object
2788 * @smid: system request message index 3004 * @smid: system request message index
2789 * @VF_ID: virtual function id 3005 * @VF_ID: virtual function id
@@ -2794,7 +3010,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2794 * Return nothing. 3010 * Return nothing.
2795 */ 3011 */
2796static void 3012static void
2797scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) 3013_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2798{ 3014{
2799 Mpi2SCSIIORequest_t *mpi_request; 3015 Mpi2SCSIIORequest_t *mpi_request;
2800 Mpi2SCSIIOReply_t *mpi_reply; 3016 Mpi2SCSIIOReply_t *mpi_reply;
@@ -2939,6 +3155,11 @@ scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
2939 scmd->result = DID_RESET << 16; 3155 scmd->result = DID_RESET << 16;
2940 break; 3156 break;
2941 3157
3158 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
3159 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
3160 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
3161 _scsih_eedp_error_handling(scmd, ioc_status);
3162 break;
2942 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3163 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2943 case MPI2_IOCSTATUS_INVALID_FUNCTION: 3164 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2944 case MPI2_IOCSTATUS_INVALID_SGL: 3165 case MPI2_IOCSTATUS_INVALID_SGL:
@@ -5130,18 +5351,19 @@ static struct scsi_host_template scsih_driver_template = {
5130 .module = THIS_MODULE, 5351 .module = THIS_MODULE,
5131 .name = "Fusion MPT SAS Host", 5352 .name = "Fusion MPT SAS Host",
5132 .proc_name = MPT2SAS_DRIVER_NAME, 5353 .proc_name = MPT2SAS_DRIVER_NAME,
5133 .queuecommand = scsih_qcmd, 5354 .queuecommand = _scsih_qcmd,
5134 .target_alloc = scsih_target_alloc, 5355 .target_alloc = _scsih_target_alloc,
5135 .slave_alloc = scsih_slave_alloc, 5356 .slave_alloc = _scsih_slave_alloc,
5136 .slave_configure = scsih_slave_configure, 5357 .slave_configure = _scsih_slave_configure,
5137 .target_destroy = scsih_target_destroy, 5358 .target_destroy = _scsih_target_destroy,
5138 .slave_destroy = scsih_slave_destroy, 5359 .slave_destroy = _scsih_slave_destroy,
5139 .change_queue_depth = scsih_change_queue_depth, 5360 .change_queue_depth = _scsih_change_queue_depth,
5140 .change_queue_type = scsih_change_queue_type, 5361 .change_queue_type = _scsih_change_queue_type,
5141 .eh_abort_handler = scsih_abort, 5362 .eh_abort_handler = _scsih_abort,
5142 .eh_device_reset_handler = scsih_dev_reset, 5363 .eh_device_reset_handler = _scsih_dev_reset,
5143 .eh_host_reset_handler = scsih_host_reset, 5364 .eh_target_reset_handler = _scsih_target_reset,
5144 .bios_param = scsih_bios_param, 5365 .eh_host_reset_handler = _scsih_host_reset,
5366 .bios_param = _scsih_bios_param,
5145 .can_queue = 1, 5367 .can_queue = 1,
5146 .this_id = -1, 5368 .this_id = -1,
5147 .sg_tablesize = MPT2SAS_SG_DEPTH, 5369 .sg_tablesize = MPT2SAS_SG_DEPTH,
@@ -5228,13 +5450,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
5228} 5450}
5229 5451
5230/** 5452/**
5231 * scsih_remove - detach and remove add host 5453 * _scsih_remove - detach and remove add host
5232 * @pdev: PCI device struct 5454 * @pdev: PCI device struct
5233 * 5455 *
5234 * Return nothing. 5456 * Return nothing.
5235 */ 5457 */
5236static void __devexit 5458static void __devexit
5237scsih_remove(struct pci_dev *pdev) 5459_scsih_remove(struct pci_dev *pdev)
5238{ 5460{
5239 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5461 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5240 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5462 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5442,14 +5664,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
5442} 5664}
5443 5665
5444/** 5666/**
5445 * scsih_probe - attach and add scsi host 5667 * _scsih_probe - attach and add scsi host
5446 * @pdev: PCI device struct 5668 * @pdev: PCI device struct
5447 * @id: pci device id 5669 * @id: pci device id
5448 * 5670 *
5449 * Returns 0 success, anything else error. 5671 * Returns 0 success, anything else error.
5450 */ 5672 */
5451static int 5673static int
5452scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5674_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5453{ 5675{
5454 struct MPT2SAS_ADAPTER *ioc; 5676 struct MPT2SAS_ADAPTER *ioc;
5455 struct Scsi_Host *shost; 5677 struct Scsi_Host *shost;
@@ -5503,6 +5725,9 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5503 goto out_add_shost_fail; 5725 goto out_add_shost_fail;
5504 } 5726 }
5505 5727
5728 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
5729 | SHOST_DIF_TYPE3_PROTECTION);
5730
5506 /* event thread */ 5731 /* event thread */
5507 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 5732 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
5508 "fw_event%d", ioc->id); 5733 "fw_event%d", ioc->id);
@@ -5536,14 +5761,14 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5536 5761
5537#ifdef CONFIG_PM 5762#ifdef CONFIG_PM
5538/** 5763/**
5539 * scsih_suspend - power management suspend main entry point 5764 * _scsih_suspend - power management suspend main entry point
5540 * @pdev: PCI device struct 5765 * @pdev: PCI device struct
5541 * @state: PM state change to (usually PCI_D3) 5766 * @state: PM state change to (usually PCI_D3)
5542 * 5767 *
5543 * Returns 0 success, anything else error. 5768 * Returns 0 success, anything else error.
5544 */ 5769 */
5545static int 5770static int
5546scsih_suspend(struct pci_dev *pdev, pm_message_t state) 5771_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5547{ 5772{
5548 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5773 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5549 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5774 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5564,13 +5789,13 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5564} 5789}
5565 5790
5566/** 5791/**
5567 * scsih_resume - power management resume main entry point 5792 * _scsih_resume - power management resume main entry point
5568 * @pdev: PCI device struct 5793 * @pdev: PCI device struct
5569 * 5794 *
5570 * Returns 0 success, anything else error. 5795 * Returns 0 success, anything else error.
5571 */ 5796 */
5572static int 5797static int
5573scsih_resume(struct pci_dev *pdev) 5798_scsih_resume(struct pci_dev *pdev)
5574{ 5799{
5575 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5800 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5576 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5801 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5599,22 +5824,22 @@ scsih_resume(struct pci_dev *pdev)
5599static struct pci_driver scsih_driver = { 5824static struct pci_driver scsih_driver = {
5600 .name = MPT2SAS_DRIVER_NAME, 5825 .name = MPT2SAS_DRIVER_NAME,
5601 .id_table = scsih_pci_table, 5826 .id_table = scsih_pci_table,
5602 .probe = scsih_probe, 5827 .probe = _scsih_probe,
5603 .remove = __devexit_p(scsih_remove), 5828 .remove = __devexit_p(_scsih_remove),
5604#ifdef CONFIG_PM 5829#ifdef CONFIG_PM
5605 .suspend = scsih_suspend, 5830 .suspend = _scsih_suspend,
5606 .resume = scsih_resume, 5831 .resume = _scsih_resume,
5607#endif 5832#endif
5608}; 5833};
5609 5834
5610 5835
5611/** 5836/**
5612 * scsih_init - main entry point for this driver. 5837 * _scsih_init - main entry point for this driver.
5613 * 5838 *
5614 * Returns 0 success, anything else error. 5839 * Returns 0 success, anything else error.
5615 */ 5840 */
5616static int __init 5841static int __init
5617scsih_init(void) 5842_scsih_init(void)
5618{ 5843{
5619 int error; 5844 int error;
5620 5845
@@ -5630,10 +5855,10 @@ scsih_init(void)
5630 mpt2sas_base_initialize_callback_handler(); 5855 mpt2sas_base_initialize_callback_handler();
5631 5856
5632 /* queuecommand callback hander */ 5857 /* queuecommand callback hander */
5633 scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done); 5858 scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done);
5634 5859
5635 /* task managment callback handler */ 5860 /* task managment callback handler */
5636 tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done); 5861 tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done);
5637 5862
5638 /* base internal commands callback handler */ 5863 /* base internal commands callback handler */
5639 base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); 5864 base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
@@ -5659,12 +5884,12 @@ scsih_init(void)
5659} 5884}
5660 5885
5661/** 5886/**
5662 * scsih_exit - exit point for this driver (when it is a module). 5887 * _scsih_exit - exit point for this driver (when it is a module).
5663 * 5888 *
5664 * Returns 0 success, anything else error. 5889 * Returns 0 success, anything else error.
5665 */ 5890 */
5666static void __exit 5891static void __exit
5667scsih_exit(void) 5892_scsih_exit(void)
5668{ 5893{
5669 printk(KERN_INFO "mpt2sas version %s unloading\n", 5894 printk(KERN_INFO "mpt2sas version %s unloading\n",
5670 MPT2SAS_DRIVER_VERSION); 5895 MPT2SAS_DRIVER_VERSION);
@@ -5682,5 +5907,5 @@ scsih_exit(void)
5682 mpt2sas_ctl_exit(); 5907 mpt2sas_ctl_exit();
5683} 5908}
5684 5909
5685module_init(scsih_init); 5910module_init(_scsih_init);
5686module_exit(scsih_exit); 5911module_exit(_scsih_exit);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index e03dc0b1e1a..686695b155c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -264,7 +264,7 @@ struct rep_manu_reply{
264}; 264};
265 265
266/** 266/**
267 * transport_expander_report_manufacture - obtain SMP report_manufacture 267 * _transport_expander_report_manufacture - obtain SMP report_manufacture
268 * @ioc: per adapter object 268 * @ioc: per adapter object
269 * @sas_address: expander sas address 269 * @sas_address: expander sas address
270 * @edev: the sas_expander_device object 270 * @edev: the sas_expander_device object
@@ -274,7 +274,7 @@ struct rep_manu_reply{
274 * Returns 0 for success, non-zero for failure. 274 * Returns 0 for success, non-zero for failure.
275 */ 275 */
276static int 276static int
277transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, 277_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
278 u64 sas_address, struct sas_expander_device *edev) 278 u64 sas_address, struct sas_expander_device *edev)
279{ 279{
280 Mpi2SmpPassthroughRequest_t *mpi_request; 280 Mpi2SmpPassthroughRequest_t *mpi_request;
@@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
578 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || 578 MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
579 mpt2sas_port->remote_identify.device_type == 579 mpt2sas_port->remote_identify.device_type ==
580 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) 580 MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
581 transport_expander_report_manufacture(ioc, 581 _transport_expander_report_manufacture(ioc,
582 mpt2sas_port->remote_identify.sas_address, 582 mpt2sas_port->remote_identify.sas_address,
583 rphy_to_expander_device(rphy)); 583 rphy_to_expander_device(rphy));
584 584
@@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
852} 852}
853 853
854/** 854/**
855 * transport_get_linkerrors - 855 * _transport_get_linkerrors -
856 * @phy: The sas phy object 856 * @phy: The sas phy object
857 * 857 *
858 * Only support sas_host direct attached phys. 858 * Only support sas_host direct attached phys.
@@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
860 * 860 *
861 */ 861 */
862static int 862static int
863transport_get_linkerrors(struct sas_phy *phy) 863_transport_get_linkerrors(struct sas_phy *phy)
864{ 864{
865 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 865 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
866 struct _sas_phy *mpt2sas_phy; 866 struct _sas_phy *mpt2sas_phy;
@@ -903,14 +903,14 @@ transport_get_linkerrors(struct sas_phy *phy)
903} 903}
904 904
905/** 905/**
906 * transport_get_enclosure_identifier - 906 * _transport_get_enclosure_identifier -
907 * @phy: The sas phy object 907 * @phy: The sas phy object
908 * 908 *
909 * Obtain the enclosure logical id for an expander. 909 * Obtain the enclosure logical id for an expander.
910 * Returns 0 for success, non-zero for failure. 910 * Returns 0 for success, non-zero for failure.
911 */ 911 */
912static int 912static int
913transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) 913_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
914{ 914{
915 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 915 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
916 struct _sas_node *sas_expander; 916 struct _sas_node *sas_expander;
@@ -929,13 +929,13 @@ transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
929} 929}
930 930
931/** 931/**
932 * transport_get_bay_identifier - 932 * _transport_get_bay_identifier -
933 * @phy: The sas phy object 933 * @phy: The sas phy object
934 * 934 *
935 * Returns the slot id for a device that resides inside an enclosure. 935 * Returns the slot id for a device that resides inside an enclosure.
936 */ 936 */
937static int 937static int
938transport_get_bay_identifier(struct sas_rphy *rphy) 938_transport_get_bay_identifier(struct sas_rphy *rphy)
939{ 939{
940 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); 940 struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
941 struct _sas_device *sas_device; 941 struct _sas_device *sas_device;
@@ -953,7 +953,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
953} 953}
954 954
955/** 955/**
956 * transport_phy_reset - 956 * _transport_phy_reset -
957 * @phy: The sas phy object 957 * @phy: The sas phy object
958 * @hard_reset: 958 * @hard_reset:
959 * 959 *
@@ -961,7 +961,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
961 * Returns 0 for success, non-zero for failure. 961 * Returns 0 for success, non-zero for failure.
962 */ 962 */
963static int 963static int
964transport_phy_reset(struct sas_phy *phy, int hard_reset) 964_transport_phy_reset(struct sas_phy *phy, int hard_reset)
965{ 965{
966 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); 966 struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
967 struct _sas_phy *mpt2sas_phy; 967 struct _sas_phy *mpt2sas_phy;
@@ -1002,7 +1002,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
1002} 1002}
1003 1003
1004/** 1004/**
1005 * transport_smp_handler - transport portal for smp passthru 1005 * _transport_smp_handler - transport portal for smp passthru
1006 * @shost: shost object 1006 * @shost: shost object
1007 * @rphy: sas transport rphy object 1007 * @rphy: sas transport rphy object
1008 * @req: 1008 * @req:
@@ -1012,7 +1012,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
1012 * smp_rep_general /sys/class/bsg/expander-5:0 1012 * smp_rep_general /sys/class/bsg/expander-5:0
1013 */ 1013 */
1014static int 1014static int
1015transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, 1015_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1016 struct request *req) 1016 struct request *req)
1017{ 1017{
1018 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1018 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1041,7 +1041,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, " 1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt, 1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
1044 req->data_len, rsp->bio->bi_vcnt, rsp->data_len); 1044 blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
1045 return -EINVAL; 1045 return -EINVAL;
1046 } 1046 }
1047 1047
@@ -1104,7 +1104,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ? 1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ?
1105 cpu_to_le64(rphy->identify.sas_address) : 1105 cpu_to_le64(rphy->identify.sas_address) :
1106 cpu_to_le64(ioc->sas_hba.sas_address); 1106 cpu_to_le64(ioc->sas_hba.sas_address);
1107 mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4); 1107 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
1108 psge = &mpi_request->SGL; 1108 psge = &mpi_request->SGL;
1109 1109
1110 /* WRITE sgel first */ 1110 /* WRITE sgel first */
@@ -1112,13 +1112,13 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1115 req->data_len, PCI_DMA_BIDIRECTIONAL); 1115 blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
1116 if (!dma_addr_out) { 1116 if (!dma_addr_out) {
1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1118 goto unmap; 1118 goto unmap;
1119 } 1119 }
1120 1120
1121 ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4), 1121 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
1122 dma_addr_out); 1122 dma_addr_out);
1123 1123
1124 /* incr sgel */ 1124 /* incr sgel */
@@ -1129,14 +1129,14 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1130 MPI2_SGE_FLAGS_END_OF_LIST); 1130 MPI2_SGE_FLAGS_END_OF_LIST);
1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1132 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
1133 rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1133 blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
1134 if (!dma_addr_in) { 1134 if (!dma_addr_in) {
1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
1136 goto unmap; 1136 goto unmap;
1137 } 1137 }
1138 1138
1139 ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4), 1139 ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
1140 dma_addr_in); 1140 dma_addr_in);
1141 1141
1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - " 1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
@@ -1170,9 +1170,8 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1170 1170
1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); 1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
1172 req->sense_len = sizeof(*mpi_reply); 1172 req->sense_len = sizeof(*mpi_reply);
1173 req->data_len = 0; 1173 req->resid_len = 0;
1174 rsp->data_len -= mpi_reply->ResponseDataLength; 1174 rsp->resid_len -= mpi_reply->ResponseDataLength;
1175
1176 } else { 1175 } else {
1177 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 1176 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
1178 "%s - no reply\n", ioc->name, __func__)); 1177 "%s - no reply\n", ioc->name, __func__));
@@ -1188,10 +1187,10 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1188 1187
1189 unmap: 1188 unmap:
1190 if (dma_addr_out) 1189 if (dma_addr_out)
1191 pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len, 1190 pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
1192 PCI_DMA_BIDIRECTIONAL); 1191 PCI_DMA_BIDIRECTIONAL);
1193 if (dma_addr_in) 1192 if (dma_addr_in)
1194 pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len, 1193 pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
1195 PCI_DMA_BIDIRECTIONAL); 1194 PCI_DMA_BIDIRECTIONAL);
1196 1195
1197 out: 1196 out:
@@ -1201,11 +1200,11 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1201} 1200}
1202 1201
1203struct sas_function_template mpt2sas_transport_functions = { 1202struct sas_function_template mpt2sas_transport_functions = {
1204 .get_linkerrors = transport_get_linkerrors, 1203 .get_linkerrors = _transport_get_linkerrors,
1205 .get_enclosure_identifier = transport_get_enclosure_identifier, 1204 .get_enclosure_identifier = _transport_get_enclosure_identifier,
1206 .get_bay_identifier = transport_get_bay_identifier, 1205 .get_bay_identifier = _transport_get_bay_identifier,
1207 .phy_reset = transport_phy_reset, 1206 .phy_reset = _transport_phy_reset,
1208 .smp_handler = transport_smp_handler, 1207 .smp_handler = _transport_smp_handler,
1209}; 1208};
1210 1209
1211struct scsi_transport_template *mpt2sas_transport_template; 1210struct scsi_transport_template *mpt2sas_transport_template;
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
deleted file mode 100644
index e4acebd10d1..00000000000
--- a/drivers/scsi/mvsas.c
+++ /dev/null
@@ -1,3222 +0,0 @@
1/*
2 mvsas.c - Marvell 88SE6440 SAS/SATA support
3
4 Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com>
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 ---------------------------------------------------------------
23
24 Random notes:
25 * hardware supports controlling the endian-ness of data
26 structures. this permits elimination of all the le32_to_cpu()
27 and cpu_to_le32() conversions.
28
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35#include <linux/spinlock.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/ctype.h>
39#include <scsi/libsas.h>
40#include <scsi/scsi_tcq.h>
41#include <scsi/sas_ata.h>
42#include <asm/io.h>
43
44#define DRV_NAME "mvsas"
45#define DRV_VERSION "0.5.2"
46#define _MV_DUMP 0
47#define MVS_DISABLE_NVRAM
48#define MVS_DISABLE_MSI
49
50#define mr32(reg) readl(regs + MVS_##reg)
51#define mw32(reg,val) writel((val), regs + MVS_##reg)
52#define mw32_f(reg,val) do { \
53 writel((val), regs + MVS_##reg); \
54 readl(regs + MVS_##reg); \
55 } while (0)
56
57#define MVS_ID_NOT_MAPPED 0x7f
58#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
59
60/* offset for D2H FIS in the Received FIS List Structure */
61#define SATA_RECEIVED_D2H_FIS(reg_set) \
62 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
63#define SATA_RECEIVED_PIO_FIS(reg_set) \
64 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
65#define UNASSOC_D2H_FIS(id) \
66 ((void *) mvi->rx_fis + 0x100 * id)
67
68#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
69 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
70 (__mc) != 0 && __rest; \
71 (++__lseq), (__mc) >>= 1)
72
73/* driver compile-time configuration */
74enum driver_configuration {
75 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
76 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
77 /* software requires power-of-2
78 ring size */
79
80 MVS_SLOTS = 512, /* command slots */
81 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
82 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
83 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
84 MVS_OAF_SZ = 64, /* Open address frame buffer size */
85
86 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
87
88 MVS_QUEUE_SIZE = 30, /* Support Queue depth */
89 MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */
90};
91
92/* unchangeable hardware details */
93enum hardware_details {
94 MVS_MAX_PHYS = 8, /* max. possible phys */
95 MVS_MAX_PORTS = 8, /* max. possible ports */
96 MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
97};
98
99/* peripheral registers (BAR2) */
100enum peripheral_registers {
101 SPI_CTL = 0x10, /* EEPROM control */
102 SPI_CMD = 0x14, /* EEPROM command */
103 SPI_DATA = 0x18, /* EEPROM data */
104};
105
106enum peripheral_register_bits {
107 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
108 TWSI_RD = (1U << 4), /* EEPROM read access */
109
110 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
111};
112
113/* enhanced mode registers (BAR4) */
114enum hw_registers {
115 MVS_GBL_CTL = 0x04, /* global control */
116 MVS_GBL_INT_STAT = 0x08, /* global irq status */
117 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
118 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
119
120 MVS_CTL = 0x100, /* SAS/SATA port configuration */
121 MVS_PCS = 0x104, /* SAS/SATA port control/status */
122 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
123 MVS_CMD_LIST_HI = 0x10C,
124 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
125 MVS_RX_FIS_HI = 0x114,
126
127 MVS_TX_CFG = 0x120, /* TX configuration */
128 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
129 MVS_TX_HI = 0x128,
130
131 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
132 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
133 MVS_RX_CFG = 0x134, /* RX configuration */
134 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
135 MVS_RX_HI = 0x13C,
136 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
137
138 MVS_INT_COAL = 0x148, /* Int coalescing config */
139 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
140 MVS_INT_STAT = 0x150, /* Central int status */
141 MVS_INT_MASK = 0x154, /* Central int enable */
142 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
143 MVS_INT_MASK_SRS = 0x15C,
144
145 /* ports 1-3 follow after this */
146 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
147 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
148 MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
149 MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
150
151 /* ports 1-3 follow after this */
152 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
153 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
154
155 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
156 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
157
158 /* ports 1-3 follow after this */
159 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
160 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
161 MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
162 MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
163
164 /* ports 1-3 follow after this */
165 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
166 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
167 MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
168 MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
169};
170
171enum hw_register_bits {
172 /* MVS_GBL_CTL */
173 INT_EN = (1U << 1), /* Global int enable */
174 HBA_RST = (1U << 0), /* HBA reset */
175
176 /* MVS_GBL_INT_STAT */
177 INT_XOR = (1U << 4), /* XOR engine event */
178 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
179
180 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
181 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
182 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
183 MODE_AUTO_DET_PORT6 = (1U << 14),
184 MODE_AUTO_DET_PORT5 = (1U << 13),
185 MODE_AUTO_DET_PORT4 = (1U << 12),
186 MODE_AUTO_DET_PORT3 = (1U << 11),
187 MODE_AUTO_DET_PORT2 = (1U << 10),
188 MODE_AUTO_DET_PORT1 = (1U << 9),
189 MODE_AUTO_DET_PORT0 = (1U << 8),
190 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
191 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
192 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
193 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
194 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
195 MODE_SAS_PORT6_MASK = (1U << 6),
196 MODE_SAS_PORT5_MASK = (1U << 5),
197 MODE_SAS_PORT4_MASK = (1U << 4),
198 MODE_SAS_PORT3_MASK = (1U << 3),
199 MODE_SAS_PORT2_MASK = (1U << 2),
200 MODE_SAS_PORT1_MASK = (1U << 1),
201 MODE_SAS_PORT0_MASK = (1U << 0),
202 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
203 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
204 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
205 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
206
207 /* SAS_MODE value may be
208 * dictated (in hw) by values
209 * of SATA_TARGET & AUTO_DET
210 */
211
212 /* MVS_TX_CFG */
213 TX_EN = (1U << 16), /* Enable TX */
214 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
215
216 /* MVS_RX_CFG */
217 RX_EN = (1U << 16), /* Enable RX */
218 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
219
220 /* MVS_INT_COAL */
221 COAL_EN = (1U << 16), /* Enable int coalescing */
222
223 /* MVS_INT_STAT, MVS_INT_MASK */
224 CINT_I2C = (1U << 31), /* I2C event */
225 CINT_SW0 = (1U << 30), /* software event 0 */
226 CINT_SW1 = (1U << 29), /* software event 1 */
227 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
228 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
229 CINT_MEM = (1U << 26), /* int mem parity err */
230 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
231 CINT_SRS = (1U << 3), /* SRS event */
232 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
233 CINT_DONE = (1U << 0), /* cmd completion */
234
235 /* shl for ports 1-3 */
236 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
237 CINT_PORT = (1U << 8), /* port0 event */
238 CINT_PORT_MASK_OFFSET = 8,
239 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
240
241 /* TX (delivery) ring bits */
242 TXQ_CMD_SHIFT = 29,
243 TXQ_CMD_SSP = 1, /* SSP protocol */
244 TXQ_CMD_SMP = 2, /* SMP protocol */
245 TXQ_CMD_STP = 3, /* STP/SATA protocol */
246 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
247 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
248 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
249 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
250 TXQ_SRS_SHIFT = 20, /* SATA register set */
251 TXQ_SRS_MASK = 0x7f,
252 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
253 TXQ_PHY_MASK = 0xff,
254 TXQ_SLOT_MASK = 0xfff, /* slot number */
255
256 /* RX (completion) ring bits */
257 RXQ_GOOD = (1U << 23), /* Response good */
258 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
259 RXQ_CMD_RX = (1U << 20), /* target cmd received */
260 RXQ_ATTN = (1U << 19), /* attention */
261 RXQ_RSP = (1U << 18), /* response frame xfer'd */
262 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
263 RXQ_DONE = (1U << 16), /* cmd complete */
264 RXQ_SLOT_MASK = 0xfff, /* slot number */
265
266 /* mvs_cmd_hdr bits */
267 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
268 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
269
270 /* SSP initiator only */
271 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
272
273 /* SSP initiator or target */
274 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
275
276 /* SSP target only */
277 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
278 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
279 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
280 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
281
282 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
283 MCH_FBURST = (1U << 11), /* first burst (SSP) */
284 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
285 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
286 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
287 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
288 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
289 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
290 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
291 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
292
293 CCTL_RST = (1U << 5), /* port logic reset */
294
295 /* 0(LSB first), 1(MSB first) */
296 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
297 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
298 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
299 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
300
301 /* MVS_Px_SER_CTLSTAT (per-phy control) */
302 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
303 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
304 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
305 PHY_RST = (1U << 0), /* phy reset */
306 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
307 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
308 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
309 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
310 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
311 PHY_READY_MASK = (1U << 20),
312
313 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
314 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
315 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
316 PHYEV_AN = (1U << 18), /* SATA async notification */
317 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
318 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
319 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
320 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
321 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
322 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
323 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
324 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
325 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
326 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
327 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
328 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
329 PHYEV_ID_DONE = (1U << 2), /* identify done */
330 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
331 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
332
333 /* MVS_PCS */
334 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
335 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
336 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
337 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
338 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
339 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
340 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
341 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
342 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
343 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
344
345 /* Port n Attached Device Info */
346 PORT_DEV_SSP_TRGT = (1U << 19),
347 PORT_DEV_SMP_TRGT = (1U << 18),
348 PORT_DEV_STP_TRGT = (1U << 17),
349 PORT_DEV_SSP_INIT = (1U << 11),
350 PORT_DEV_SMP_INIT = (1U << 10),
351 PORT_DEV_STP_INIT = (1U << 9),
352 PORT_PHY_ID_MASK = (0xFFU << 24),
353 PORT_DEV_TRGT_MASK = (0x7U << 17),
354 PORT_DEV_INIT_MASK = (0x7U << 9),
355 PORT_DEV_TYPE_MASK = (0x7U << 0),
356
357 /* Port n PHY Status */
358 PHY_RDY = (1U << 2),
359 PHY_DW_SYNC = (1U << 1),
360 PHY_OOB_DTCTD = (1U << 0),
361
362 /* VSR */
363 /* PHYMODE 6 (CDB) */
364 PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
365 PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
366 PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
367 PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
368 PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
369 PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
370 PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
371 PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
372 PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
373 PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
374 PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
375 PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
376 PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
377 PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
378};
379
380enum mvs_info_flags {
381 MVF_MSI = (1U << 0), /* MSI is enabled */
382 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
383};
384
385enum sas_cmd_port_registers {
386 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
387 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
388 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
389 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
390 CMD_OOB_SPACE = 0x110, /* OOB space control register */
391 CMD_OOB_BURST = 0x114, /* OOB burst control register */
392 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
393 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
394 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
395 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
396 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
397 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
398 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
399 CMD_ID_TEST = 0x134, /* ID test register */
400 CMD_PL_TIMER = 0x138, /* PL timer register */
401 CMD_WD_TIMER = 0x13c, /* WD timer register */
402 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
403 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
404 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
405 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
406 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
407 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
408 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
409 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
410 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
411 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
412 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
413 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
414 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
415 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
416 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
417 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
418 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
419 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
420 CMD_RESET_COUNT = 0x188, /* Reset Count */
421 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
422 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
423 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
424 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
425 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
426 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
427 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
428 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
429 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
430 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
431 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
432 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
433 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
434 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
435 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
436};
437
438/* SAS/SATA configuration port registers, aka phy registers */
439enum sas_sata_config_port_regs {
440 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
441 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
442 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
443 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
444 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
445 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
446 PHYR_SATA_CTL = 0x18, /* SATA control */
447 PHYR_PHY_STAT = 0x1C, /* PHY status */
448 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
449 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
450 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
451 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
452 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
453 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
454 PHYR_WIDE_PORT = 0x38, /* wide port participating */
455 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
456 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
457 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
458};
459
460/* SAS/SATA Vendor Specific Port Registers */
461enum sas_sata_vsp_regs {
462 VSR_PHY_STAT = 0x00, /* Phy Status */
463 VSR_PHY_MODE1 = 0x01, /* phy tx */
464 VSR_PHY_MODE2 = 0x02, /* tx scc */
465 VSR_PHY_MODE3 = 0x03, /* pll */
466 VSR_PHY_MODE4 = 0x04, /* VCO */
467 VSR_PHY_MODE5 = 0x05, /* Rx */
468 VSR_PHY_MODE6 = 0x06, /* CDR */
469 VSR_PHY_MODE7 = 0x07, /* Impedance */
470 VSR_PHY_MODE8 = 0x08, /* Voltage */
471 VSR_PHY_MODE9 = 0x09, /* Test */
472 VSR_PHY_MODE10 = 0x0A, /* Power */
473 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
474 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
475 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
476};
477
478enum pci_cfg_registers {
479 PCR_PHY_CTL = 0x40,
480 PCR_PHY_CTL2 = 0x90,
481 PCR_DEV_CTRL = 0xE8,
482};
483
484enum pci_cfg_register_bits {
485 PCTL_PWR_ON = (0xFU << 24),
486 PCTL_OFF = (0xFU << 12),
487 PRD_REQ_SIZE = (0x4000),
488 PRD_REQ_MASK = (0x00007000),
489};
490
491enum nvram_layout_offsets {
492 NVR_SIG = 0x00, /* 0xAA, 0x55 */
493 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
494};
495
496enum chip_flavors {
497 chip_6320,
498 chip_6440,
499 chip_6480,
500};
501
502enum port_type {
503 PORT_TYPE_SAS = (1L << 1),
504 PORT_TYPE_SATA = (1L << 0),
505};
506
507/* Command Table Format */
508enum ct_format {
509 /* SSP */
510 SSP_F_H = 0x00,
511 SSP_F_IU = 0x18,
512 SSP_F_MAX = 0x4D,
513 /* STP */
514 STP_CMD_FIS = 0x00,
515 STP_ATAPI_CMD = 0x40,
516 STP_F_MAX = 0x10,
517 /* SMP */
518 SMP_F_T = 0x00,
519 SMP_F_DEP = 0x01,
520 SMP_F_MAX = 0x101,
521};
522
523enum status_buffer {
524 SB_EIR_OFF = 0x00, /* Error Information Record */
525 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
526 SB_RFB_MAX = 0x400, /* RFB size*/
527};
528
529enum error_info_rec {
530 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
531 CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
532 RSP_OVER = (1U << 29), /* rsp buffer overflow */
533 RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
534 UNK_FIS = (1U << 27), /* unknown FIS */
535 DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
536 SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
537 TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
538 R_ERR = (1U << 23), /* SATA returned R_ERR prim */
539 RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
540 XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
541 UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
542 DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
543 INTERLOCK = (1U << 15), /* interlock error */
544 NAK = (1U << 14), /* NAK rx'd */
545 ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
546 CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
547 OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
548 PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
549 NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
550 STP_RES_BSY = (1U << 8), /* STP resources busy */
551 BREAK = (1U << 7), /* break received */
552 BAD_DEST = (1U << 6), /* bad destination */
553 BAD_PROTO = (1U << 5), /* protocol not supported */
554 BAD_RATE = (1U << 4), /* cxn rate not supported */
555 WRONG_DEST = (1U << 3), /* wrong destination error */
556 CREDIT_TO = (1U << 2), /* credit timeout */
557 WDOG_TO = (1U << 1), /* watchdog timeout */
558 BUF_PAR = (1U << 0), /* buffer parity error */
559};
560
561enum error_info_rec_2 {
562 SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
563 GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
564 APP_CHK_ERR = (1U << 13), /* Application Check error */
565 REF_CHK_ERR = (1U << 12), /* Reference Check Error */
566 USR_BLK_NM = (1U << 0), /* User Block Number */
567};
568
569struct mvs_chip_info {
570 u32 n_phy;
571 u32 srs_sz;
572 u32 slot_width;
573};
574
575struct mvs_err_info {
576 __le32 flags;
577 __le32 flags2;
578};
579
580struct mvs_prd {
581 __le64 addr; /* 64-bit buffer address */
582 __le32 reserved;
583 __le32 len; /* 16-bit length */
584};
585
586struct mvs_cmd_hdr {
587 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
588 __le32 lens; /* cmd, max resp frame len */
589 __le32 tags; /* targ port xfer tag; tag */
590 __le32 data_len; /* data xfer len */
591 __le64 cmd_tbl; /* command table address */
592 __le64 open_frame; /* open addr frame address */
593 __le64 status_buf; /* status buffer address */
594 __le64 prd_tbl; /* PRD tbl address */
595 __le32 reserved[4];
596};
597
598struct mvs_port {
599 struct asd_sas_port sas_port;
600 u8 port_attached;
601 u8 taskfileset;
602 u8 wide_port_phymap;
603 struct list_head list;
604};
605
606struct mvs_phy {
607 struct mvs_port *port;
608 struct asd_sas_phy sas_phy;
609 struct sas_identify identify;
610 struct scsi_device *sdev;
611 u64 dev_sas_addr;
612 u64 att_dev_sas_addr;
613 u32 att_dev_info;
614 u32 dev_info;
615 u32 phy_type;
616 u32 phy_status;
617 u32 irq_status;
618 u32 frame_rcvd_size;
619 u8 frame_rcvd[32];
620 u8 phy_attached;
621 enum sas_linkrate minimum_linkrate;
622 enum sas_linkrate maximum_linkrate;
623};
624
625struct mvs_slot_info {
626 struct list_head list;
627 struct sas_task *task;
628 u32 n_elem;
629 u32 tx;
630
631 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
632 * and PRD table
633 */
634 void *buf;
635 dma_addr_t buf_dma;
636#if _MV_DUMP
637 u32 cmd_size;
638#endif
639
640 void *response;
641 struct mvs_port *port;
642};
643
644struct mvs_info {
645 unsigned long flags;
646
647 spinlock_t lock; /* host-wide lock */
648 struct pci_dev *pdev; /* our device */
649 void __iomem *regs; /* enhanced mode registers */
650 void __iomem *peri_regs; /* peripheral registers */
651
652 u8 sas_addr[SAS_ADDR_SIZE];
653 struct sas_ha_struct sas; /* SCSI/SAS glue */
654 struct Scsi_Host *shost;
655
656 __le32 *tx; /* TX (delivery) DMA ring */
657 dma_addr_t tx_dma;
658 u32 tx_prod; /* cached next-producer idx */
659
660 __le32 *rx; /* RX (completion) DMA ring */
661 dma_addr_t rx_dma;
662 u32 rx_cons; /* RX consumer idx */
663
664 __le32 *rx_fis; /* RX'd FIS area */
665 dma_addr_t rx_fis_dma;
666
667 struct mvs_cmd_hdr *slot; /* DMA command header slots */
668 dma_addr_t slot_dma;
669
670 const struct mvs_chip_info *chip;
671
672 u8 tags[MVS_SLOTS];
673 struct mvs_slot_info slot_info[MVS_SLOTS];
674 /* further per-slot information */
675 struct mvs_phy phy[MVS_MAX_PHYS];
676 struct mvs_port port[MVS_MAX_PHYS];
677#ifdef MVS_USE_TASKLET
678 struct tasklet_struct tasklet;
679#endif
680};
681
682static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
683 void *funcdata);
684static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
685static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
686static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
687static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
688static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
689static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
690
691static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
692static void mvs_detect_porttype(struct mvs_info *mvi, int i);
693static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
694static void mvs_release_task(struct mvs_info *mvi, int phy_no);
695
696static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
697static void mvs_scan_start(struct Scsi_Host *);
698static int mvs_slave_configure(struct scsi_device *sdev);
699
700static struct scsi_transport_template *mvs_stt;
701
702static const struct mvs_chip_info mvs_chips[] = {
703 [chip_6320] = { 2, 16, 9 },
704 [chip_6440] = { 4, 16, 9 },
705 [chip_6480] = { 8, 32, 10 },
706};
707
708static struct scsi_host_template mvs_sht = {
709 .module = THIS_MODULE,
710 .name = DRV_NAME,
711 .queuecommand = sas_queuecommand,
712 .target_alloc = sas_target_alloc,
713 .slave_configure = mvs_slave_configure,
714 .slave_destroy = sas_slave_destroy,
715 .scan_finished = mvs_scan_finished,
716 .scan_start = mvs_scan_start,
717 .change_queue_depth = sas_change_queue_depth,
718 .change_queue_type = sas_change_queue_type,
719 .bios_param = sas_bios_param,
720 .can_queue = 1,
721 .cmd_per_lun = 1,
722 .this_id = -1,
723 .sg_tablesize = SG_ALL,
724 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
725 .use_clustering = ENABLE_CLUSTERING,
726 .eh_device_reset_handler = sas_eh_device_reset_handler,
727 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
728 .slave_alloc = sas_slave_alloc,
729 .target_destroy = sas_target_destroy,
730 .ioctl = sas_ioctl,
731};
732
733static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
734{
735 u32 i;
736 u32 run;
737 u32 offset;
738
739 offset = 0;
740 while (size) {
741 printk("%08X : ", baseaddr + offset);
742 if (size >= 16)
743 run = 16;
744 else
745 run = size;
746 size -= run;
747 for (i = 0; i < 16; i++) {
748 if (i < run)
749 printk("%02X ", (u32)data[i]);
750 else
751 printk(" ");
752 }
753 printk(": ");
754 for (i = 0; i < run; i++)
755 printk("%c", isalnum(data[i]) ? data[i] : '.');
756 printk("\n");
757 data = &data[16];
758 offset += run;
759 }
760 printk("\n");
761}
762
763#if _MV_DUMP
764static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
765 enum sas_protocol proto)
766{
767 u32 offset;
768 struct pci_dev *pdev = mvi->pdev;
769 struct mvs_slot_info *slot = &mvi->slot_info[tag];
770
771 offset = slot->cmd_size + MVS_OAF_SZ +
772 sizeof(struct mvs_prd) * slot->n_elem;
773 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
774 tag);
775 mvs_hexdump(32, (u8 *) slot->response,
776 (u32) slot->buf_dma + offset);
777}
778#endif
779
780static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
781 enum sas_protocol proto)
782{
783#if _MV_DUMP
784 u32 sz, w_ptr;
785 u64 addr;
786 void __iomem *regs = mvi->regs;
787 struct pci_dev *pdev = mvi->pdev;
788 struct mvs_slot_info *slot = &mvi->slot_info[tag];
789
790 /*Delivery Queue */
791 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
792 w_ptr = slot->tx;
793 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
794 dev_printk(KERN_DEBUG, &pdev->dev,
795 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
796 dev_printk(KERN_DEBUG, &pdev->dev,
797 "Delivery Queue Base Address=0x%llX (PA)"
798 "(tx_dma=0x%llX), Entry=%04d\n",
799 addr, mvi->tx_dma, w_ptr);
800 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
801 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
802 /*Command List */
803 addr = mvi->slot_dma;
804 dev_printk(KERN_DEBUG, &pdev->dev,
805 "Command List Base Address=0x%llX (PA)"
806 "(slot_dma=0x%llX), Header=%03d\n",
807 addr, slot->buf_dma, tag);
808 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
809 /*mvs_cmd_hdr */
810 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
811 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
812 /*1.command table area */
813 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
814 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
815 /*2.open address frame area */
816 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
817 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
818 (u32) slot->buf_dma + slot->cmd_size);
819 /*3.status buffer */
820 mvs_hba_sb_dump(mvi, tag, proto);
821 /*4.PRD table */
822 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
823 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
824 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
825 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
826#endif
827}
828
829static void mvs_hba_cq_dump(struct mvs_info *mvi)
830{
831#if (_MV_DUMP > 2)
832 u64 addr;
833 void __iomem *regs = mvi->regs;
834 struct pci_dev *pdev = mvi->pdev;
835 u32 entry = mvi->rx_cons + 1;
836 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
837
838 /*Completion Queue */
839 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
840 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
841 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
842 dev_printk(KERN_DEBUG, &pdev->dev,
843 "Completion List Base Address=0x%llX (PA), "
844 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
845 addr, entry - 1, mvi->rx[0]);
846 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
847 mvi->rx_dma + sizeof(u32) * entry);
848#endif
849}
850
851static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
852{
853 void __iomem *regs = mvi->regs;
854 u32 tmp;
855
856 tmp = mr32(GBL_CTL);
857
858 mw32(GBL_CTL, tmp | INT_EN);
859}
860
861static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
862{
863 void __iomem *regs = mvi->regs;
864 u32 tmp;
865
866 tmp = mr32(GBL_CTL);
867
868 mw32(GBL_CTL, tmp & ~INT_EN);
869}
870
871static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
872
873/* move to PCI layer or libata core? */
874static int pci_go_64(struct pci_dev *pdev)
875{
876 int rc;
877
878 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
879 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
880 if (rc) {
881 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
882 if (rc) {
883 dev_printk(KERN_ERR, &pdev->dev,
884 "64-bit DMA enable failed\n");
885 return rc;
886 }
887 }
888 } else {
889 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
890 if (rc) {
891 dev_printk(KERN_ERR, &pdev->dev,
892 "32-bit DMA enable failed\n");
893 return rc;
894 }
895 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
896 if (rc) {
897 dev_printk(KERN_ERR, &pdev->dev,
898 "32-bit consistent DMA enable failed\n");
899 return rc;
900 }
901 }
902
903 return rc;
904}
905
906static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
907{
908 if (task->lldd_task) {
909 struct mvs_slot_info *slot;
910 slot = (struct mvs_slot_info *) task->lldd_task;
911 *tag = slot - mvi->slot_info;
912 return 1;
913 }
914 return 0;
915}
916
917static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
918{
919 void *bitmap = (void *) &mvi->tags;
920 clear_bit(tag, bitmap);
921}
922
923static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
924{
925 mvs_tag_clear(mvi, tag);
926}
927
928static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
929{
930 void *bitmap = (void *) &mvi->tags;
931 set_bit(tag, bitmap);
932}
933
934static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
935{
936 unsigned int index, tag;
937 void *bitmap = (void *) &mvi->tags;
938
939 index = find_first_zero_bit(bitmap, MVS_SLOTS);
940 tag = index;
941 if (tag >= MVS_SLOTS)
942 return -SAS_QUEUE_FULL;
943 mvs_tag_set(mvi, tag);
944 *tag_out = tag;
945 return 0;
946}
947
948static void mvs_tag_init(struct mvs_info *mvi)
949{
950 int i;
951 for (i = 0; i < MVS_SLOTS; ++i)
952 mvs_tag_clear(mvi, i);
953}
954
955#ifndef MVS_DISABLE_NVRAM
956static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
957{
958 int timeout = 1000;
959
960 if (addr & ~SPI_ADDR_MASK)
961 return -EINVAL;
962
963 writel(addr, regs + SPI_CMD);
964 writel(TWSI_RD, regs + SPI_CTL);
965
966 while (timeout-- > 0) {
967 if (readl(regs + SPI_CTL) & TWSI_RDY) {
968 *data = readl(regs + SPI_DATA);
969 return 0;
970 }
971
972 udelay(10);
973 }
974
975 return -EBUSY;
976}
977
978static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
979 void *buf, u32 buflen)
980{
981 u32 addr_end, tmp_addr, i, j;
982 u32 tmp = 0;
983 int rc;
984 u8 *tmp8, *buf8 = buf;
985
986 addr_end = addr + buflen;
987 tmp_addr = ALIGN(addr, 4);
988 if (addr > 0xff)
989 return -EINVAL;
990
991 j = addr & 0x3;
992 if (j) {
993 rc = mvs_eep_read(regs, tmp_addr, &tmp);
994 if (rc)
995 return rc;
996
997 tmp8 = (u8 *)&tmp;
998 for (i = j; i < 4; i++)
999 *buf8++ = tmp8[i];
1000
1001 tmp_addr += 4;
1002 }
1003
1004 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
1005 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1006 if (rc)
1007 return rc;
1008
1009 memcpy(buf8, &tmp, 4);
1010 buf8 += 4;
1011 }
1012
1013 if (tmp_addr < addr_end) {
1014 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1015 if (rc)
1016 return rc;
1017
1018 tmp8 = (u8 *)&tmp;
1019 j = addr_end - tmp_addr;
1020 for (i = 0; i < j; i++)
1021 *buf8++ = tmp8[i];
1022
1023 tmp_addr += 4;
1024 }
1025
1026 return 0;
1027}
1028#endif
1029
1030static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
1031 void *buf, u32 buflen)
1032{
1033#ifndef MVS_DISABLE_NVRAM
1034 void __iomem *regs = mvi->regs;
1035 int rc, i;
1036 u32 sum;
1037 u8 hdr[2], *tmp;
1038 const char *msg;
1039
1040 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
1041 if (rc) {
1042 msg = "nvram hdr read failed";
1043 goto err_out;
1044 }
1045 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
1046 if (rc) {
1047 msg = "nvram read failed";
1048 goto err_out;
1049 }
1050
1051 if (hdr[0] != 0x5A) {
1052 /* entry id */
1053 msg = "invalid nvram entry id";
1054 rc = -ENOENT;
1055 goto err_out;
1056 }
1057
1058 tmp = buf;
1059 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
1060 for (i = 0; i < buflen; i++)
1061 sum += ((u32)tmp[i]);
1062
1063 if (sum) {
1064 msg = "nvram checksum failure";
1065 rc = -EILSEQ;
1066 goto err_out;
1067 }
1068
1069 return 0;
1070
1071err_out:
1072 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
1073 return rc;
1074#else
1075 /* FIXME , For SAS target mode */
1076 memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
1077 return 0;
1078#endif
1079}
1080
1081static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
1082{
1083 struct mvs_phy *phy = &mvi->phy[i];
1084 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
1085
1086 if (!phy->phy_attached)
1087 return;
1088
1089 if (sas_phy->phy) {
1090 struct sas_phy *sphy = sas_phy->phy;
1091
1092 sphy->negotiated_linkrate = sas_phy->linkrate;
1093 sphy->minimum_linkrate = phy->minimum_linkrate;
1094 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
1095 sphy->maximum_linkrate = phy->maximum_linkrate;
1096 sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
1097 }
1098
1099 if (phy->phy_type & PORT_TYPE_SAS) {
1100 struct sas_identify_frame *id;
1101
1102 id = (struct sas_identify_frame *)phy->frame_rcvd;
1103 id->dev_type = phy->identify.device_type;
1104 id->initiator_bits = SAS_PROTOCOL_ALL;
1105 id->target_bits = phy->identify.target_port_protocols;
1106 } else if (phy->phy_type & PORT_TYPE_SATA) {
1107 /* TODO */
1108 }
1109 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
1110 mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
1111 PORTE_BYTES_DMAED);
1112}
1113
1114static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
1115{
1116 /* give the phy enabling interrupt event time to come in (1s
1117 * is empirically about all it takes) */
1118 if (time < HZ)
1119 return 0;
1120 /* Wait for discovery to finish */
1121 scsi_flush_work(shost);
1122 return 1;
1123}
1124
1125static void mvs_scan_start(struct Scsi_Host *shost)
1126{
1127 int i;
1128 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
1129
1130 for (i = 0; i < mvi->chip->n_phy; ++i) {
1131 mvs_bytes_dmaed(mvi, i);
1132 }
1133}
1134
1135static int mvs_slave_configure(struct scsi_device *sdev)
1136{
1137 struct domain_device *dev = sdev_to_domain_dev(sdev);
1138 int ret = sas_slave_configure(sdev);
1139
1140 if (ret)
1141 return ret;
1142
1143 if (dev_is_sata(dev)) {
1144 /* struct ata_port *ap = dev->sata_dev.ap; */
1145 /* struct ata_device *adev = ap->link.device; */
1146
1147 /* clamp at no NCQ for the time being */
1148 /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
1149 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
1150 }
1151 return 0;
1152}
1153
1154static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
1155{
1156 struct pci_dev *pdev = mvi->pdev;
1157 struct sas_ha_struct *sas_ha = &mvi->sas;
1158 struct mvs_phy *phy = &mvi->phy[phy_no];
1159 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1160
1161 phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
1162 /*
1163 * events is port event now ,
1164 * we need check the interrupt status which belongs to per port.
1165 */
1166 dev_printk(KERN_DEBUG, &pdev->dev,
1167 "Port %d Event = %X\n",
1168 phy_no, phy->irq_status);
1169
1170 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1171 mvs_release_task(mvi, phy_no);
1172 if (!mvs_is_phy_ready(mvi, phy_no)) {
1173 sas_phy_disconnected(sas_phy);
1174 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1175 dev_printk(KERN_INFO, &pdev->dev,
1176 "Port %d Unplug Notice\n", phy_no);
1177
1178 } else
1179 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1180 }
1181 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1182 if (phy->irq_status & PHYEV_COMWAKE) {
1183 u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
1184 mvs_write_port_irq_mask(mvi, phy_no,
1185 tmp | PHYEV_SIG_FIS);
1186 }
1187 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1188 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
1189 if (phy->phy_status) {
1190 mvs_detect_porttype(mvi, phy_no);
1191
1192 if (phy->phy_type & PORT_TYPE_SATA) {
1193 u32 tmp = mvs_read_port_irq_mask(mvi,
1194 phy_no);
1195 tmp &= ~PHYEV_SIG_FIS;
1196 mvs_write_port_irq_mask(mvi,
1197 phy_no, tmp);
1198 }
1199
1200 mvs_update_phyinfo(mvi, phy_no, 0);
1201 sas_ha->notify_phy_event(sas_phy,
1202 PHYE_OOB_DONE);
1203 mvs_bytes_dmaed(mvi, phy_no);
1204 } else {
1205 dev_printk(KERN_DEBUG, &pdev->dev,
1206 "plugin interrupt but phy is gone\n");
1207 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1208 NULL);
1209 }
1210 } else if (phy->irq_status & PHYEV_BROAD_CH) {
1211 mvs_release_task(mvi, phy_no);
1212 sas_ha->notify_port_event(sas_phy,
1213 PORTE_BROADCAST_RCVD);
1214 }
1215 }
1216 mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
1217}
1218
1219static void mvs_int_sata(struct mvs_info *mvi)
1220{
1221 u32 tmp;
1222 void __iomem *regs = mvi->regs;
1223 tmp = mr32(INT_STAT_SRS);
1224 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1225}
1226
1227static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
1228 u32 slot_idx)
1229{
1230 void __iomem *regs = mvi->regs;
1231 struct domain_device *dev = task->dev;
1232 struct asd_sas_port *sas_port = dev->port;
1233 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1234 u32 reg_set, phy_mask;
1235
1236 if (!sas_protocol_ata(task->task_proto)) {
1237 reg_set = 0;
1238 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1239 sas_port->phy_mask;
1240 } else {
1241 reg_set = port->taskfileset;
1242 phy_mask = sas_port->phy_mask;
1243 }
1244 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
1245 (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
1246 (phy_mask << TXQ_PHY_SHIFT) |
1247 (reg_set << TXQ_SRS_SHIFT));
1248
1249 mw32(TX_PROD_IDX, mvi->tx_prod);
1250 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1251}
1252
1253static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1254 u32 slot_idx, int err)
1255{
1256 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1257 struct task_status_struct *tstat = &task->task_status;
1258 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1259 int stat = SAM_GOOD;
1260
1261 resp->frame_len = sizeof(struct dev_to_host_fis);
1262 memcpy(&resp->ending_fis[0],
1263 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1264 sizeof(struct dev_to_host_fis));
1265 tstat->buf_valid_size = sizeof(*resp);
1266 if (unlikely(err))
1267 stat = SAS_PROTO_RESPONSE;
1268 return stat;
1269}
1270
1271static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1272{
1273 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1274 mvs_tag_clear(mvi, slot_idx);
1275}
1276
1277static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1278 struct mvs_slot_info *slot, u32 slot_idx)
1279{
1280 if (!sas_protocol_ata(task->task_proto))
1281 if (slot->n_elem)
1282 pci_unmap_sg(mvi->pdev, task->scatter,
1283 slot->n_elem, task->data_dir);
1284
1285 switch (task->task_proto) {
1286 case SAS_PROTOCOL_SMP:
1287 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
1288 PCI_DMA_FROMDEVICE);
1289 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
1290 PCI_DMA_TODEVICE);
1291 break;
1292
1293 case SAS_PROTOCOL_SATA:
1294 case SAS_PROTOCOL_STP:
1295 case SAS_PROTOCOL_SSP:
1296 default:
1297 /* do nothing */
1298 break;
1299 }
1300 list_del(&slot->list);
1301 task->lldd_task = NULL;
1302 slot->task = NULL;
1303 slot->port = NULL;
1304}
1305
1306static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1307 u32 slot_idx)
1308{
1309 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1310 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1311 u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
1312 int stat = SAM_CHECK_COND;
1313
1314 if (err_dw1 & SLOT_BSY_ERR) {
1315 stat = SAS_QUEUE_FULL;
1316 mvs_slot_reset(mvi, task, slot_idx);
1317 }
1318 switch (task->task_proto) {
1319 case SAS_PROTOCOL_SSP:
1320 break;
1321 case SAS_PROTOCOL_SMP:
1322 break;
1323 case SAS_PROTOCOL_SATA:
1324 case SAS_PROTOCOL_STP:
1325 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1326 if (err_dw0 & TFILE_ERR)
1327 stat = mvs_sata_done(mvi, task, slot_idx, 1);
1328 break;
1329 default:
1330 break;
1331 }
1332
1333 mvs_hexdump(16, (u8 *) slot->response, 0);
1334 return stat;
1335}
1336
1337static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1338{
1339 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1340 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1341 struct sas_task *task = slot->task;
1342 struct task_status_struct *tstat;
1343 struct mvs_port *port;
1344 bool aborted;
1345 void *to;
1346
1347 if (unlikely(!task || !task->lldd_task))
1348 return -1;
1349
1350 mvs_hba_cq_dump(mvi);
1351
1352 spin_lock(&task->task_state_lock);
1353 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1354 if (!aborted) {
1355 task->task_state_flags &=
1356 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1357 task->task_state_flags |= SAS_TASK_STATE_DONE;
1358 }
1359 spin_unlock(&task->task_state_lock);
1360
1361 if (aborted) {
1362 mvs_slot_task_free(mvi, task, slot, slot_idx);
1363 mvs_slot_free(mvi, rx_desc);
1364 return -1;
1365 }
1366
1367 port = slot->port;
1368 tstat = &task->task_status;
1369 memset(tstat, 0, sizeof(*tstat));
1370 tstat->resp = SAS_TASK_COMPLETE;
1371
1372 if (unlikely(!port->port_attached || flags)) {
1373 mvs_slot_err(mvi, task, slot_idx);
1374 if (!sas_protocol_ata(task->task_proto))
1375 tstat->stat = SAS_PHY_DOWN;
1376 goto out;
1377 }
1378
1379 /* error info record present */
1380 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1381 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1382 goto out;
1383 }
1384
1385 switch (task->task_proto) {
1386 case SAS_PROTOCOL_SSP:
1387 /* hw says status == 0, datapres == 0 */
1388 if (rx_desc & RXQ_GOOD) {
1389 tstat->stat = SAM_GOOD;
1390 tstat->resp = SAS_TASK_COMPLETE;
1391 }
1392 /* response frame present */
1393 else if (rx_desc & RXQ_RSP) {
1394 struct ssp_response_iu *iu =
1395 slot->response + sizeof(struct mvs_err_info);
1396 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
1397 }
1398
1399 /* should never happen? */
1400 else
1401 tstat->stat = SAM_CHECK_COND;
1402 break;
1403
1404 case SAS_PROTOCOL_SMP: {
1405 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1406 tstat->stat = SAM_GOOD;
1407 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1408 memcpy(to + sg_resp->offset,
1409 slot->response + sizeof(struct mvs_err_info),
1410 sg_dma_len(sg_resp));
1411 kunmap_atomic(to, KM_IRQ0);
1412 break;
1413 }
1414
1415 case SAS_PROTOCOL_SATA:
1416 case SAS_PROTOCOL_STP:
1417 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1418 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1419 break;
1420 }
1421
1422 default:
1423 tstat->stat = SAM_CHECK_COND;
1424 break;
1425 }
1426
1427out:
1428 mvs_slot_task_free(mvi, task, slot, slot_idx);
1429 if (unlikely(tstat->stat != SAS_QUEUE_FULL))
1430 mvs_slot_free(mvi, rx_desc);
1431
1432 spin_unlock(&mvi->lock);
1433 task->task_done(task);
1434 spin_lock(&mvi->lock);
1435 return tstat->stat;
1436}
1437
1438static void mvs_release_task(struct mvs_info *mvi, int phy_no)
1439{
1440 struct list_head *pos, *n;
1441 struct mvs_slot_info *slot;
1442 struct mvs_phy *phy = &mvi->phy[phy_no];
1443 struct mvs_port *port = phy->port;
1444 u32 rx_desc;
1445
1446 if (!port)
1447 return;
1448
1449 list_for_each_safe(pos, n, &port->list) {
1450 slot = container_of(pos, struct mvs_slot_info, list);
1451 rx_desc = (u32) (slot - mvi->slot_info);
1452 mvs_slot_complete(mvi, rx_desc, 1);
1453 }
1454}
1455
1456static void mvs_int_full(struct mvs_info *mvi)
1457{
1458 void __iomem *regs = mvi->regs;
1459 u32 tmp, stat;
1460 int i;
1461
1462 stat = mr32(INT_STAT);
1463
1464 mvs_int_rx(mvi, false);
1465
1466 for (i = 0; i < MVS_MAX_PORTS; i++) {
1467 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
1468 if (tmp)
1469 mvs_int_port(mvi, i, tmp);
1470 }
1471
1472 if (stat & CINT_SRS)
1473 mvs_int_sata(mvi);
1474
1475 mw32(INT_STAT, stat);
1476}
1477
1478static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1479{
1480 void __iomem *regs = mvi->regs;
1481 u32 rx_prod_idx, rx_desc;
1482 bool attn = false;
1483 struct pci_dev *pdev = mvi->pdev;
1484
1485 /* the first dword in the RX ring is special: it contains
1486 * a mirror of the hardware's RX producer index, so that
1487 * we don't have to stall the CPU reading that register.
1488 * The actual RX ring is offset by one dword, due to this.
1489 */
1490 rx_prod_idx = mvi->rx_cons;
1491 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
1492 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
1493 return 0;
1494
1495 /* The CMPL_Q may come late, read from register and try again
1496 * note: if coalescing is enabled,
1497 * it will need to read from register every time for sure
1498 */
1499 if (mvi->rx_cons == rx_prod_idx)
1500 mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
1501
1502 if (mvi->rx_cons == rx_prod_idx)
1503 return 0;
1504
1505 while (mvi->rx_cons != rx_prod_idx) {
1506
1507 /* increment our internal RX consumer pointer */
1508 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
1509
1510 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
1511
1512 if (likely(rx_desc & RXQ_DONE))
1513 mvs_slot_complete(mvi, rx_desc, 0);
1514 if (rx_desc & RXQ_ATTN) {
1515 attn = true;
1516 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1517 rx_desc);
1518 } else if (rx_desc & RXQ_ERR) {
1519 if (!(rx_desc & RXQ_DONE))
1520 mvs_slot_complete(mvi, rx_desc, 0);
1521 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1522 rx_desc);
1523 } else if (rx_desc & RXQ_SLOT_RESET) {
1524 dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
1525 rx_desc);
1526 mvs_slot_free(mvi, rx_desc);
1527 }
1528 }
1529
1530 if (attn && self_clear)
1531 mvs_int_full(mvi);
1532
1533 return 0;
1534}
1535
1536#ifdef MVS_USE_TASKLET
1537static void mvs_tasklet(unsigned long data)
1538{
1539 struct mvs_info *mvi = (struct mvs_info *) data;
1540 unsigned long flags;
1541
1542 spin_lock_irqsave(&mvi->lock, flags);
1543
1544#ifdef MVS_DISABLE_MSI
1545 mvs_int_full(mvi);
1546#else
1547 mvs_int_rx(mvi, true);
1548#endif
1549 spin_unlock_irqrestore(&mvi->lock, flags);
1550}
1551#endif
1552
1553static irqreturn_t mvs_interrupt(int irq, void *opaque)
1554{
1555 struct mvs_info *mvi = opaque;
1556 void __iomem *regs = mvi->regs;
1557 u32 stat;
1558
1559 stat = mr32(GBL_INT_STAT);
1560
1561 if (stat == 0 || stat == 0xffffffff)
1562 return IRQ_NONE;
1563
1564 /* clear CMD_CMPLT ASAP */
1565 mw32_f(INT_STAT, CINT_DONE);
1566
1567#ifndef MVS_USE_TASKLET
1568 spin_lock(&mvi->lock);
1569
1570 mvs_int_full(mvi);
1571
1572 spin_unlock(&mvi->lock);
1573#else
1574 tasklet_schedule(&mvi->tasklet);
1575#endif
1576 return IRQ_HANDLED;
1577}
1578
1579#ifndef MVS_DISABLE_MSI
1580static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1581{
1582 struct mvs_info *mvi = opaque;
1583
1584#ifndef MVS_USE_TASKLET
1585 spin_lock(&mvi->lock);
1586
1587 mvs_int_rx(mvi, true);
1588
1589 spin_unlock(&mvi->lock);
1590#else
1591 tasklet_schedule(&mvi->tasklet);
1592#endif
1593 return IRQ_HANDLED;
1594}
1595#endif
1596
1597struct mvs_task_exec_info {
1598 struct sas_task *task;
1599 struct mvs_cmd_hdr *hdr;
1600 struct mvs_port *port;
1601 u32 tag;
1602 int n_elem;
1603};
1604
1605static int mvs_task_prep_smp(struct mvs_info *mvi,
1606 struct mvs_task_exec_info *tei)
1607{
1608 int elem, rc, i;
1609 struct sas_task *task = tei->task;
1610 struct mvs_cmd_hdr *hdr = tei->hdr;
1611 struct scatterlist *sg_req, *sg_resp;
1612 u32 req_len, resp_len, tag = tei->tag;
1613 void *buf_tmp;
1614 u8 *buf_oaf;
1615 dma_addr_t buf_tmp_dma;
1616 struct mvs_prd *buf_prd;
1617 struct scatterlist *sg;
1618 struct mvs_slot_info *slot = &mvi->slot_info[tag];
1619 struct asd_sas_port *sas_port = task->dev->port;
1620 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1621#if _MV_DUMP
1622 u8 *buf_cmd;
1623 void *from;
1624#endif
1625 /*
1626 * DMA-map SMP request, response buffers
1627 */
1628 sg_req = &task->smp_task.smp_req;
1629 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
1630 if (!elem)
1631 return -ENOMEM;
1632 req_len = sg_dma_len(sg_req);
1633
1634 sg_resp = &task->smp_task.smp_resp;
1635 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
1636 if (!elem) {
1637 rc = -ENOMEM;
1638 goto err_out;
1639 }
1640 resp_len = sg_dma_len(sg_resp);
1641
1642 /* must be in dwords */
1643 if ((req_len & 0x3) || (resp_len & 0x3)) {
1644 rc = -EINVAL;
1645 goto err_out_2;
1646 }
1647
1648 /*
1649 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1650 */
1651
1652 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1653 buf_tmp = slot->buf;
1654 buf_tmp_dma = slot->buf_dma;
1655
1656#if _MV_DUMP
1657 buf_cmd = buf_tmp;
1658 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1659 buf_tmp += req_len;
1660 buf_tmp_dma += req_len;
1661 slot->cmd_size = req_len;
1662#else
1663 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
1664#endif
1665
1666 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1667 buf_oaf = buf_tmp;
1668 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1669
1670 buf_tmp += MVS_OAF_SZ;
1671 buf_tmp_dma += MVS_OAF_SZ;
1672
1673 /* region 3: PRD table ********************************************* */
1674 buf_prd = buf_tmp;
1675 if (tei->n_elem)
1676 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1677 else
1678 hdr->prd_tbl = 0;
1679
1680 i = sizeof(struct mvs_prd) * tei->n_elem;
1681 buf_tmp += i;
1682 buf_tmp_dma += i;
1683
1684 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1685 slot->response = buf_tmp;
1686 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1687
1688 /*
1689 * Fill in TX ring and command slot header
1690 */
1691 slot->tx = mvi->tx_prod;
1692 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
1693 TXQ_MODE_I | tag |
1694 (sas_port->phy_mask << TXQ_PHY_SHIFT));
1695
1696 hdr->flags |= flags;
1697 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
1698 hdr->tags = cpu_to_le32(tag);
1699 hdr->data_len = 0;
1700
1701 /* generate open address frame hdr (first 12 bytes) */
1702 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
1703 buf_oaf[1] = task->dev->linkrate & 0xf;
1704 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
1705 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1706
1707 /* fill in PRD (scatter/gather) table, if any */
1708 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1709 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1710 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1711 buf_prd++;
1712 }
1713
1714#if _MV_DUMP
1715 /* copy cmd table */
1716 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
1717 memcpy(buf_cmd, from + sg_req->offset, req_len);
1718 kunmap_atomic(from, KM_IRQ0);
1719#endif
1720 return 0;
1721
1722err_out_2:
1723 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
1724 PCI_DMA_FROMDEVICE);
1725err_out:
1726 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
1727 PCI_DMA_TODEVICE);
1728 return rc;
1729}
1730
1731static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1732{
1733 void __iomem *regs = mvi->regs;
1734 u32 tmp, offs;
1735 u8 *tfs = &port->taskfileset;
1736
1737 if (*tfs == MVS_ID_NOT_MAPPED)
1738 return;
1739
1740 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1741 if (*tfs < 16) {
1742 tmp = mr32(PCS);
1743 mw32(PCS, tmp & ~offs);
1744 } else {
1745 tmp = mr32(CTL);
1746 mw32(CTL, tmp & ~offs);
1747 }
1748
1749 tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
1750 if (tmp)
1751 mw32(INT_STAT_SRS, tmp);
1752
1753 *tfs = MVS_ID_NOT_MAPPED;
1754}
1755
1756static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1757{
1758 int i;
1759 u32 tmp, offs;
1760 void __iomem *regs = mvi->regs;
1761
1762 if (port->taskfileset != MVS_ID_NOT_MAPPED)
1763 return 0;
1764
1765 tmp = mr32(PCS);
1766
1767 for (i = 0; i < mvi->chip->srs_sz; i++) {
1768 if (i == 16)
1769 tmp = mr32(CTL);
1770 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1771 if (!(tmp & offs)) {
1772 port->taskfileset = i;
1773
1774 if (i < 16)
1775 mw32(PCS, tmp | offs);
1776 else
1777 mw32(CTL, tmp | offs);
1778 tmp = mr32(INT_STAT_SRS) & (1U << i);
1779 if (tmp)
1780 mw32(INT_STAT_SRS, tmp);
1781 return 0;
1782 }
1783 }
1784 return MVS_ID_NOT_MAPPED;
1785}
1786
1787static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
1788{
1789 struct ata_queued_cmd *qc = task->uldd_task;
1790
1791 if (qc) {
1792 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
1793 qc->tf.command == ATA_CMD_FPDMA_READ) {
1794 *tag = qc->tag;
1795 return 1;
1796 }
1797 }
1798
1799 return 0;
1800}
1801
1802static int mvs_task_prep_ata(struct mvs_info *mvi,
1803 struct mvs_task_exec_info *tei)
1804{
1805 struct sas_task *task = tei->task;
1806 struct domain_device *dev = task->dev;
1807 struct mvs_cmd_hdr *hdr = tei->hdr;
1808 struct asd_sas_port *sas_port = dev->port;
1809 struct mvs_slot_info *slot;
1810 struct scatterlist *sg;
1811 struct mvs_prd *buf_prd;
1812 struct mvs_port *port = tei->port;
1813 u32 tag = tei->tag;
1814 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1815 void *buf_tmp;
1816 u8 *buf_cmd, *buf_oaf;
1817 dma_addr_t buf_tmp_dma;
1818 u32 i, req_len, resp_len;
1819 const u32 max_resp_len = SB_RFB_MAX;
1820
1821 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
1822 return -EBUSY;
1823
1824 slot = &mvi->slot_info[tag];
1825 slot->tx = mvi->tx_prod;
1826 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1827 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
1828 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
1829 (port->taskfileset << TXQ_SRS_SHIFT));
1830
1831 if (task->ata_task.use_ncq)
1832 flags |= MCH_FPDMA;
1833 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
1834 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
1835 flags |= MCH_ATAPI;
1836 }
1837
1838 /* FIXME: fill in port multiplier number */
1839
1840 hdr->flags = cpu_to_le32(flags);
1841
1842 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
1843 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
1844 task->ata_task.fis.sector_count |= hdr->tags << 3;
1845 else
1846 hdr->tags = cpu_to_le32(tag);
1847 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1848
1849 /*
1850 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1851 */
1852
1853 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
1854 buf_cmd = buf_tmp = slot->buf;
1855 buf_tmp_dma = slot->buf_dma;
1856
1857 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1858
1859 buf_tmp += MVS_ATA_CMD_SZ;
1860 buf_tmp_dma += MVS_ATA_CMD_SZ;
1861#if _MV_DUMP
1862 slot->cmd_size = MVS_ATA_CMD_SZ;
1863#endif
1864
1865 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1866 /* used for STP. unused for SATA? */
1867 buf_oaf = buf_tmp;
1868 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1869
1870 buf_tmp += MVS_OAF_SZ;
1871 buf_tmp_dma += MVS_OAF_SZ;
1872
1873 /* region 3: PRD table ********************************************* */
1874 buf_prd = buf_tmp;
1875 if (tei->n_elem)
1876 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1877 else
1878 hdr->prd_tbl = 0;
1879
1880 i = sizeof(struct mvs_prd) * tei->n_elem;
1881 buf_tmp += i;
1882 buf_tmp_dma += i;
1883
1884 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1885 /* FIXME: probably unused, for SATA. kept here just in case
1886 * we get a STP/SATA error information record
1887 */
1888 slot->response = buf_tmp;
1889 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1890
1891 req_len = sizeof(struct host_to_dev_fis);
1892 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
1893 sizeof(struct mvs_err_info) - i;
1894
1895 /* request, response lengths */
1896 resp_len = min(resp_len, max_resp_len);
1897 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1898
1899 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1900 /* fill in command FIS and ATAPI CDB */
1901 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1902 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
1903 memcpy(buf_cmd + STP_ATAPI_CMD,
1904 task->ata_task.atapi_packet, 16);
1905
1906 /* generate open address frame hdr (first 12 bytes) */
1907 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
1908 buf_oaf[1] = task->dev->linkrate & 0xf;
1909 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1910 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1911
1912 /* fill in PRD (scatter/gather) table, if any */
1913 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1914 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1915 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1916 buf_prd++;
1917 }
1918
1919 return 0;
1920}
1921
1922static int mvs_task_prep_ssp(struct mvs_info *mvi,
1923 struct mvs_task_exec_info *tei)
1924{
1925 struct sas_task *task = tei->task;
1926 struct mvs_cmd_hdr *hdr = tei->hdr;
1927 struct mvs_port *port = tei->port;
1928 struct mvs_slot_info *slot;
1929 struct scatterlist *sg;
1930 struct mvs_prd *buf_prd;
1931 struct ssp_frame_hdr *ssp_hdr;
1932 void *buf_tmp;
1933 u8 *buf_cmd, *buf_oaf, fburst = 0;
1934 dma_addr_t buf_tmp_dma;
1935 u32 flags;
1936 u32 resp_len, req_len, i, tag = tei->tag;
1937 const u32 max_resp_len = SB_RFB_MAX;
1938 u8 phy_mask;
1939
1940 slot = &mvi->slot_info[tag];
1941
1942 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1943 task->dev->port->phy_mask;
1944 slot->tx = mvi->tx_prod;
1945 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1946 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
1947 (phy_mask << TXQ_PHY_SHIFT));
1948
1949 flags = MCH_RETRY;
1950 if (task->ssp_task.enable_first_burst) {
1951 flags |= MCH_FBURST;
1952 fburst = (1 << 7);
1953 }
1954 hdr->flags = cpu_to_le32(flags |
1955 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
1956 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
1957
1958 hdr->tags = cpu_to_le32(tag);
1959 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1960
1961 /*
1962 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1963 */
1964
1965 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1966 buf_cmd = buf_tmp = slot->buf;
1967 buf_tmp_dma = slot->buf_dma;
1968
1969 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1970
1971 buf_tmp += MVS_SSP_CMD_SZ;
1972 buf_tmp_dma += MVS_SSP_CMD_SZ;
1973#if _MV_DUMP
1974 slot->cmd_size = MVS_SSP_CMD_SZ;
1975#endif
1976
1977 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1978 buf_oaf = buf_tmp;
1979 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1980
1981 buf_tmp += MVS_OAF_SZ;
1982 buf_tmp_dma += MVS_OAF_SZ;
1983
1984 /* region 3: PRD table ********************************************* */
1985 buf_prd = buf_tmp;
1986 if (tei->n_elem)
1987 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1988 else
1989 hdr->prd_tbl = 0;
1990
1991 i = sizeof(struct mvs_prd) * tei->n_elem;
1992 buf_tmp += i;
1993 buf_tmp_dma += i;
1994
1995 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1996 slot->response = buf_tmp;
1997 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1998
1999 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
2000 sizeof(struct mvs_err_info) - i;
2001 resp_len = min(resp_len, max_resp_len);
2002
2003 req_len = sizeof(struct ssp_frame_hdr) + 28;
2004
2005 /* request, response lengths */
2006 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
2007
2008 /* generate open address frame hdr (first 12 bytes) */
2009 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
2010 buf_oaf[1] = task->dev->linkrate & 0xf;
2011 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
2012 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
2013
2014 /* fill in SSP frame header (Command Table.SSP frame header) */
2015 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
2016 ssp_hdr->frame_type = SSP_COMMAND;
2017 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
2018 HASHED_SAS_ADDR_SIZE);
2019 memcpy(ssp_hdr->hashed_src_addr,
2020 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
2021 ssp_hdr->tag = cpu_to_be16(tag);
2022
2023 /* fill in command frame IU */
2024 buf_cmd += sizeof(*ssp_hdr);
2025 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
2026 buf_cmd[9] = fburst | task->ssp_task.task_attr |
2027 (task->ssp_task.task_prio << 3);
2028 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
2029
2030 /* fill in PRD (scatter/gather) table, if any */
2031 for_each_sg(task->scatter, sg, tei->n_elem, i) {
2032 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
2033 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
2034 buf_prd++;
2035 }
2036
2037 return 0;
2038}
2039
2040static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
2041{
2042 struct domain_device *dev = task->dev;
2043 struct mvs_info *mvi = dev->port->ha->lldd_ha;
2044 struct pci_dev *pdev = mvi->pdev;
2045 void __iomem *regs = mvi->regs;
2046 struct mvs_task_exec_info tei;
2047 struct sas_task *t = task;
2048 struct mvs_slot_info *slot;
2049 u32 tag = 0xdeadbeef, rc, n_elem = 0;
2050 unsigned long flags;
2051 u32 n = num, pass = 0;
2052
2053 spin_lock_irqsave(&mvi->lock, flags);
2054 do {
2055 dev = t->dev;
2056 tei.port = &mvi->port[dev->port->id];
2057
2058 if (!tei.port->port_attached) {
2059 if (sas_protocol_ata(t->task_proto)) {
2060 rc = SAS_PHY_DOWN;
2061 goto out_done;
2062 } else {
2063 struct task_status_struct *ts = &t->task_status;
2064 ts->resp = SAS_TASK_UNDELIVERED;
2065 ts->stat = SAS_PHY_DOWN;
2066 t->task_done(t);
2067 if (n > 1)
2068 t = list_entry(t->list.next,
2069 struct sas_task, list);
2070 continue;
2071 }
2072 }
2073
2074 if (!sas_protocol_ata(t->task_proto)) {
2075 if (t->num_scatter) {
2076 n_elem = pci_map_sg(mvi->pdev, t->scatter,
2077 t->num_scatter,
2078 t->data_dir);
2079 if (!n_elem) {
2080 rc = -ENOMEM;
2081 goto err_out;
2082 }
2083 }
2084 } else {
2085 n_elem = t->num_scatter;
2086 }
2087
2088 rc = mvs_tag_alloc(mvi, &tag);
2089 if (rc)
2090 goto err_out;
2091
2092 slot = &mvi->slot_info[tag];
2093 t->lldd_task = NULL;
2094 slot->n_elem = n_elem;
2095 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2096 tei.task = t;
2097 tei.hdr = &mvi->slot[tag];
2098 tei.tag = tag;
2099 tei.n_elem = n_elem;
2100
2101 switch (t->task_proto) {
2102 case SAS_PROTOCOL_SMP:
2103 rc = mvs_task_prep_smp(mvi, &tei);
2104 break;
2105 case SAS_PROTOCOL_SSP:
2106 rc = mvs_task_prep_ssp(mvi, &tei);
2107 break;
2108 case SAS_PROTOCOL_SATA:
2109 case SAS_PROTOCOL_STP:
2110 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2111 rc = mvs_task_prep_ata(mvi, &tei);
2112 break;
2113 default:
2114 dev_printk(KERN_ERR, &pdev->dev,
2115 "unknown sas_task proto: 0x%x\n",
2116 t->task_proto);
2117 rc = -EINVAL;
2118 break;
2119 }
2120
2121 if (rc)
2122 goto err_out_tag;
2123
2124 slot->task = t;
2125 slot->port = tei.port;
2126 t->lldd_task = (void *) slot;
2127 list_add_tail(&slot->list, &slot->port->list);
2128 /* TODO: select normal or high priority */
2129
2130 spin_lock(&t->task_state_lock);
2131 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
2132 spin_unlock(&t->task_state_lock);
2133
2134 mvs_hba_memory_dump(mvi, tag, t->task_proto);
2135
2136 ++pass;
2137 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
2138 if (n > 1)
2139 t = list_entry(t->list.next, struct sas_task, list);
2140 } while (--n);
2141
2142 rc = 0;
2143 goto out_done;
2144
2145err_out_tag:
2146 mvs_tag_free(mvi, tag);
2147err_out:
2148 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
2149 if (!sas_protocol_ata(t->task_proto))
2150 if (n_elem)
2151 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
2152 t->data_dir);
2153out_done:
2154 if (pass)
2155 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
2156 spin_unlock_irqrestore(&mvi->lock, flags);
2157 return rc;
2158}
2159
2160static int mvs_task_abort(struct sas_task *task)
2161{
2162 int rc;
2163 unsigned long flags;
2164 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
2165 struct pci_dev *pdev = mvi->pdev;
2166 int tag;
2167
2168 spin_lock_irqsave(&task->task_state_lock, flags);
2169 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
2170 rc = TMF_RESP_FUNC_COMPLETE;
2171 spin_unlock_irqrestore(&task->task_state_lock, flags);
2172 goto out_done;
2173 }
2174 spin_unlock_irqrestore(&task->task_state_lock, flags);
2175
2176 switch (task->task_proto) {
2177 case SAS_PROTOCOL_SMP:
2178 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
2179 break;
2180 case SAS_PROTOCOL_SSP:
2181 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
2182 break;
2183 case SAS_PROTOCOL_SATA:
2184 case SAS_PROTOCOL_STP:
2185 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
2186 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
2187#if _MV_DUMP
2188 dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
2189 mvs_hexdump(sizeof(struct host_to_dev_fis),
2190 (void *)&task->ata_task.fis, 0);
2191 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
2192 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
2193#endif
2194 spin_lock_irqsave(&task->task_state_lock, flags);
2195 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
2196 /* TODO */
2197 ;
2198 }
2199 spin_unlock_irqrestore(&task->task_state_lock, flags);
2200 break;
2201 }
2202 default:
2203 break;
2204 }
2205
2206 if (mvs_find_tag(mvi, task, &tag)) {
2207 spin_lock_irqsave(&mvi->lock, flags);
2208 mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
2209 spin_unlock_irqrestore(&mvi->lock, flags);
2210 }
2211 if (!mvs_task_exec(task, 1, GFP_ATOMIC))
2212 rc = TMF_RESP_FUNC_COMPLETE;
2213 else
2214 rc = TMF_RESP_FUNC_FAILED;
2215out_done:
2216 return rc;
2217}
2218
2219static void mvs_free(struct mvs_info *mvi)
2220{
2221 int i;
2222
2223 if (!mvi)
2224 return;
2225
2226 for (i = 0; i < MVS_SLOTS; i++) {
2227 struct mvs_slot_info *slot = &mvi->slot_info[i];
2228
2229 if (slot->buf)
2230 dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
2231 slot->buf, slot->buf_dma);
2232 }
2233
2234 if (mvi->tx)
2235 dma_free_coherent(&mvi->pdev->dev,
2236 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2237 mvi->tx, mvi->tx_dma);
2238 if (mvi->rx_fis)
2239 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
2240 mvi->rx_fis, mvi->rx_fis_dma);
2241 if (mvi->rx)
2242 dma_free_coherent(&mvi->pdev->dev,
2243 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2244 mvi->rx, mvi->rx_dma);
2245 if (mvi->slot)
2246 dma_free_coherent(&mvi->pdev->dev,
2247 sizeof(*mvi->slot) * MVS_SLOTS,
2248 mvi->slot, mvi->slot_dma);
2249#ifdef MVS_ENABLE_PERI
2250 if (mvi->peri_regs)
2251 iounmap(mvi->peri_regs);
2252#endif
2253 if (mvi->regs)
2254 iounmap(mvi->regs);
2255 if (mvi->shost)
2256 scsi_host_put(mvi->shost);
2257 kfree(mvi->sas.sas_port);
2258 kfree(mvi->sas.sas_phy);
2259 kfree(mvi);
2260}
2261
2262/* FIXME: locking? */
2263static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
2264 void *funcdata)
2265{
2266 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
2267 int rc = 0, phy_id = sas_phy->id;
2268 u32 tmp;
2269
2270 tmp = mvs_read_phy_ctl(mvi, phy_id);
2271
2272 switch (func) {
2273 case PHY_FUNC_SET_LINK_RATE:{
2274 struct sas_phy_linkrates *rates = funcdata;
2275 u32 lrmin = 0, lrmax = 0;
2276
2277 lrmin = (rates->minimum_linkrate << 8);
2278 lrmax = (rates->maximum_linkrate << 12);
2279
2280 if (lrmin) {
2281 tmp &= ~(0xf << 8);
2282 tmp |= lrmin;
2283 }
2284 if (lrmax) {
2285 tmp &= ~(0xf << 12);
2286 tmp |= lrmax;
2287 }
2288 mvs_write_phy_ctl(mvi, phy_id, tmp);
2289 break;
2290 }
2291
2292 case PHY_FUNC_HARD_RESET:
2293 if (tmp & PHY_RST_HARD)
2294 break;
2295 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
2296 break;
2297
2298 case PHY_FUNC_LINK_RESET:
2299 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
2300 break;
2301
2302 case PHY_FUNC_DISABLE:
2303 case PHY_FUNC_RELEASE_SPINUP_HOLD:
2304 default:
2305 rc = -EOPNOTSUPP;
2306 }
2307
2308 return rc;
2309}
2310
2311static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
2312{
2313 struct mvs_phy *phy = &mvi->phy[phy_id];
2314 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2315
2316 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
2317 sas_phy->class = SAS;
2318 sas_phy->iproto = SAS_PROTOCOL_ALL;
2319 sas_phy->tproto = 0;
2320 sas_phy->type = PHY_TYPE_PHYSICAL;
2321 sas_phy->role = PHY_ROLE_INITIATOR;
2322 sas_phy->oob_mode = OOB_NOT_CONNECTED;
2323 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
2324
2325 sas_phy->id = phy_id;
2326 sas_phy->sas_addr = &mvi->sas_addr[0];
2327 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
2328 sas_phy->ha = &mvi->sas;
2329 sas_phy->lldd_phy = phy;
2330}
2331
2332static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2333 const struct pci_device_id *ent)
2334{
2335 struct mvs_info *mvi;
2336 unsigned long res_start, res_len, res_flag;
2337 struct asd_sas_phy **arr_phy;
2338 struct asd_sas_port **arr_port;
2339 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
2340 int i;
2341
2342 /*
2343 * alloc and init our per-HBA mvs_info struct
2344 */
2345
2346 mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
2347 if (!mvi)
2348 return NULL;
2349
2350 spin_lock_init(&mvi->lock);
2351#ifdef MVS_USE_TASKLET
2352 tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
2353#endif
2354 mvi->pdev = pdev;
2355 mvi->chip = chip;
2356
2357 if (pdev->device == 0x6440 && pdev->revision == 0)
2358 mvi->flags |= MVF_PHY_PWR_FIX;
2359
2360 /*
2361 * alloc and init SCSI, SAS glue
2362 */
2363
2364 mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
2365 if (!mvi->shost)
2366 goto err_out;
2367
2368 arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2369 arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2370 if (!arr_phy || !arr_port)
2371 goto err_out;
2372
2373 for (i = 0; i < MVS_MAX_PHYS; i++) {
2374 mvs_phy_init(mvi, i);
2375 arr_phy[i] = &mvi->phy[i].sas_phy;
2376 arr_port[i] = &mvi->port[i].sas_port;
2377 mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
2378 mvi->port[i].wide_port_phymap = 0;
2379 mvi->port[i].port_attached = 0;
2380 INIT_LIST_HEAD(&mvi->port[i].list);
2381 }
2382
2383 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
2384 mvi->shost->transportt = mvs_stt;
2385 mvi->shost->max_id = 21;
2386 mvi->shost->max_lun = ~0;
2387 mvi->shost->max_channel = 0;
2388 mvi->shost->max_cmd_len = 16;
2389
2390 mvi->sas.sas_ha_name = DRV_NAME;
2391 mvi->sas.dev = &pdev->dev;
2392 mvi->sas.lldd_module = THIS_MODULE;
2393 mvi->sas.sas_addr = &mvi->sas_addr[0];
2394 mvi->sas.sas_phy = arr_phy;
2395 mvi->sas.sas_port = arr_port;
2396 mvi->sas.num_phys = chip->n_phy;
2397 mvi->sas.lldd_max_execute_num = 1;
2398 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
2399 mvi->shost->can_queue = MVS_CAN_QUEUE;
2400 mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
2401 mvi->sas.lldd_ha = mvi;
2402 mvi->sas.core.shost = mvi->shost;
2403
2404 mvs_tag_init(mvi);
2405
2406 /*
2407 * ioremap main and peripheral registers
2408 */
2409
2410#ifdef MVS_ENABLE_PERI
2411 res_start = pci_resource_start(pdev, 2);
2412 res_len = pci_resource_len(pdev, 2);
2413 if (!res_start || !res_len)
2414 goto err_out;
2415
2416 mvi->peri_regs = ioremap_nocache(res_start, res_len);
2417 if (!mvi->peri_regs)
2418 goto err_out;
2419#endif
2420
2421 res_start = pci_resource_start(pdev, 4);
2422 res_len = pci_resource_len(pdev, 4);
2423 if (!res_start || !res_len)
2424 goto err_out;
2425
2426 res_flag = pci_resource_flags(pdev, 4);
2427 if (res_flag & IORESOURCE_CACHEABLE)
2428 mvi->regs = ioremap(res_start, res_len);
2429 else
2430 mvi->regs = ioremap_nocache(res_start, res_len);
2431
2432 if (!mvi->regs)
2433 goto err_out;
2434
2435 /*
2436 * alloc and init our DMA areas
2437 */
2438
2439 mvi->tx = dma_alloc_coherent(&pdev->dev,
2440 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2441 &mvi->tx_dma, GFP_KERNEL);
2442 if (!mvi->tx)
2443 goto err_out;
2444 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
2445
2446 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
2447 &mvi->rx_fis_dma, GFP_KERNEL);
2448 if (!mvi->rx_fis)
2449 goto err_out;
2450 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
2451
2452 mvi->rx = dma_alloc_coherent(&pdev->dev,
2453 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2454 &mvi->rx_dma, GFP_KERNEL);
2455 if (!mvi->rx)
2456 goto err_out;
2457 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
2458
2459 mvi->rx[0] = cpu_to_le32(0xfff);
2460 mvi->rx_cons = 0xfff;
2461
2462 mvi->slot = dma_alloc_coherent(&pdev->dev,
2463 sizeof(*mvi->slot) * MVS_SLOTS,
2464 &mvi->slot_dma, GFP_KERNEL);
2465 if (!mvi->slot)
2466 goto err_out;
2467 memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
2468
2469 for (i = 0; i < MVS_SLOTS; i++) {
2470 struct mvs_slot_info *slot = &mvi->slot_info[i];
2471
2472 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
2473 &slot->buf_dma, GFP_KERNEL);
2474 if (!slot->buf)
2475 goto err_out;
2476 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2477 }
2478
2479 /* finally, read NVRAM to get our SAS address */
2480 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
2481 goto err_out;
2482 return mvi;
2483
2484err_out:
2485 mvs_free(mvi);
2486 return NULL;
2487}
2488
2489static u32 mvs_cr32(void __iomem *regs, u32 addr)
2490{
2491 mw32(CMD_ADDR, addr);
2492 return mr32(CMD_DATA);
2493}
2494
2495static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
2496{
2497 mw32(CMD_ADDR, addr);
2498 mw32(CMD_DATA, val);
2499}
2500
2501static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
2502{
2503 void __iomem *regs = mvi->regs;
2504 return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
2505 mr32(P4_SER_CTLSTAT + (port - 4) * 4);
2506}
2507
2508static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
2509{
2510 void __iomem *regs = mvi->regs;
2511 if (port < 4)
2512 mw32(P0_SER_CTLSTAT + port * 4, val);
2513 else
2514 mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
2515}
2516
2517static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
2518{
2519 void __iomem *regs = mvi->regs + off;
2520 void __iomem *regs2 = mvi->regs + off2;
2521 return (port < 4)?readl(regs + port * 8):
2522 readl(regs2 + (port - 4) * 8);
2523}
2524
2525static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
2526 u32 port, u32 val)
2527{
2528 void __iomem *regs = mvi->regs + off;
2529 void __iomem *regs2 = mvi->regs + off2;
2530 if (port < 4)
2531 writel(val, regs + port * 8);
2532 else
2533 writel(val, regs2 + (port - 4) * 8);
2534}
2535
2536static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
2537{
2538 return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
2539}
2540
2541static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
2542{
2543 mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
2544}
2545
2546static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
2547{
2548 mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
2549}
2550
2551static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
2552{
2553 return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
2554}
2555
2556static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
2557{
2558 mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
2559}
2560
2561static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
2562{
2563 mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
2564}
2565
2566static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
2567{
2568 return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
2569}
2570
2571static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
2572{
2573 mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
2574}
2575
2576static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
2577{
2578 return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
2579}
2580
2581static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
2582{
2583 mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
2584}
2585
2586static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
2587{
2588 void __iomem *regs = mvi->regs;
2589 u32 tmp;
2590
2591 /* workaround for SATA R-ERR, to ignore phy glitch */
2592 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2593 tmp &= ~(1 << 9);
2594 tmp |= (1 << 10);
2595 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2596
2597 /* enable retry 127 times */
2598 mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
2599
2600 /* extend open frame timeout to max */
2601 tmp = mvs_cr32(regs, CMD_SAS_CTL0);
2602 tmp &= ~0xffff;
2603 tmp |= 0x3fff;
2604 mvs_cw32(regs, CMD_SAS_CTL0, tmp);
2605
2606 /* workaround for WDTIMEOUT , set to 550 ms */
2607 mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
2608
2609 /* not to halt for different port op during wideport link change */
2610 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
2611
2612 /* workaround for Seagate disk not-found OOB sequence, recv
2613 * COMINIT before sending out COMWAKE */
2614 tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
2615 tmp &= 0x0000ffff;
2616 tmp |= 0x00fa0000;
2617 mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
2618
2619 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2620 tmp &= 0x1fffffff;
2621 tmp |= (2U << 29); /* 8 ms retry */
2622 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2623
2624 /* TEST - for phy decoding error, adjust voltage levels */
2625 mw32(P0_VSR_ADDR + 0, 0x8);
2626 mw32(P0_VSR_DATA + 0, 0x2F0);
2627
2628 mw32(P0_VSR_ADDR + 8, 0x8);
2629 mw32(P0_VSR_DATA + 8, 0x2F0);
2630
2631 mw32(P0_VSR_ADDR + 16, 0x8);
2632 mw32(P0_VSR_DATA + 16, 0x2F0);
2633
2634 mw32(P0_VSR_ADDR + 24, 0x8);
2635 mw32(P0_VSR_DATA + 24, 0x2F0);
2636
2637}
2638
2639static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
2640{
2641 void __iomem *regs = mvi->regs;
2642 u32 tmp;
2643
2644 tmp = mr32(PCS);
2645 if (mvi->chip->n_phy <= 4)
2646 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
2647 else
2648 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
2649 mw32(PCS, tmp);
2650}
2651
2652static void mvs_detect_porttype(struct mvs_info *mvi, int i)
2653{
2654 void __iomem *regs = mvi->regs;
2655 u32 reg;
2656 struct mvs_phy *phy = &mvi->phy[i];
2657
2658 /* TODO check & save device type */
2659 reg = mr32(GBL_PORT_TYPE);
2660
2661 if (reg & MODE_SAS_SATA & (1 << i))
2662 phy->phy_type |= PORT_TYPE_SAS;
2663 else
2664 phy->phy_type |= PORT_TYPE_SATA;
2665}
2666
2667static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
2668{
2669 u32 *s = (u32 *) buf;
2670
2671 if (!s)
2672 return NULL;
2673
2674 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
2675 s[3] = mvs_read_port_cfg_data(mvi, i);
2676
2677 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
2678 s[2] = mvs_read_port_cfg_data(mvi, i);
2679
2680 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
2681 s[1] = mvs_read_port_cfg_data(mvi, i);
2682
2683 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
2684 s[0] = mvs_read_port_cfg_data(mvi, i);
2685
2686 return (void *)s;
2687}
2688
2689static u32 mvs_is_sig_fis_received(u32 irq_status)
2690{
2691 return irq_status & PHYEV_SIG_FIS;
2692}
2693
2694static void mvs_update_wideport(struct mvs_info *mvi, int i)
2695{
2696 struct mvs_phy *phy = &mvi->phy[i];
2697 struct mvs_port *port = phy->port;
2698 int j, no;
2699
2700 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
2701 if (no & 1) {
2702 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2703 mvs_write_port_cfg_data(mvi, no,
2704 port->wide_port_phymap);
2705 } else {
2706 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2707 mvs_write_port_cfg_data(mvi, no, 0);
2708 }
2709}
2710
2711static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
2712{
2713 u32 tmp;
2714 struct mvs_phy *phy = &mvi->phy[i];
2715 struct mvs_port *port = phy->port;;
2716
2717 tmp = mvs_read_phy_ctl(mvi, i);
2718
2719 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
2720 if (!port)
2721 phy->phy_attached = 1;
2722 return tmp;
2723 }
2724
2725 if (port) {
2726 if (phy->phy_type & PORT_TYPE_SAS) {
2727 port->wide_port_phymap &= ~(1U << i);
2728 if (!port->wide_port_phymap)
2729 port->port_attached = 0;
2730 mvs_update_wideport(mvi, i);
2731 } else if (phy->phy_type & PORT_TYPE_SATA)
2732 port->port_attached = 0;
2733 mvs_free_reg_set(mvi, phy->port);
2734 phy->port = NULL;
2735 phy->phy_attached = 0;
2736 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
2737 }
2738 return 0;
2739}
2740
2741static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2742 int get_st)
2743{
2744 struct mvs_phy *phy = &mvi->phy[i];
2745 struct pci_dev *pdev = mvi->pdev;
2746 u32 tmp;
2747 u64 tmp64;
2748
2749 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
2750 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
2751
2752 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2753 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2754
2755 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2756 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2757
2758 if (get_st) {
2759 phy->irq_status = mvs_read_port_irq_stat(mvi, i);
2760 phy->phy_status = mvs_is_phy_ready(mvi, i);
2761 }
2762
2763 if (phy->phy_status) {
2764 u32 phy_st;
2765 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
2766
2767 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
2768 phy_st = mvs_read_port_cfg_data(mvi, i);
2769
2770 sas_phy->linkrate =
2771 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2772 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
2773 phy->minimum_linkrate =
2774 (phy->phy_status &
2775 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
2776 phy->maximum_linkrate =
2777 (phy->phy_status &
2778 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
2779
2780 if (phy->phy_type & PORT_TYPE_SAS) {
2781 /* Updated attached_sas_addr */
2782 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
2783 phy->att_dev_sas_addr =
2784 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2785 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
2786 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2787 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
2788 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
2789 phy->identify.device_type =
2790 phy->att_dev_info & PORT_DEV_TYPE_MASK;
2791
2792 if (phy->identify.device_type == SAS_END_DEV)
2793 phy->identify.target_port_protocols =
2794 SAS_PROTOCOL_SSP;
2795 else if (phy->identify.device_type != NO_DEVICE)
2796 phy->identify.target_port_protocols =
2797 SAS_PROTOCOL_SMP;
2798 if (phy_st & PHY_OOB_DTCTD)
2799 sas_phy->oob_mode = SAS_OOB_MODE;
2800 phy->frame_rcvd_size =
2801 sizeof(struct sas_identify_frame);
2802 } else if (phy->phy_type & PORT_TYPE_SATA) {
2803 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
2804 if (mvs_is_sig_fis_received(phy->irq_status)) {
2805 phy->att_dev_sas_addr = i; /* temp */
2806 if (phy_st & PHY_OOB_DTCTD)
2807 sas_phy->oob_mode = SATA_OOB_MODE;
2808 phy->frame_rcvd_size =
2809 sizeof(struct dev_to_host_fis);
2810 mvs_get_d2h_reg(mvi, i,
2811 (void *)sas_phy->frame_rcvd);
2812 } else {
2813 dev_printk(KERN_DEBUG, &pdev->dev,
2814 "No sig fis\n");
2815 phy->phy_type &= ~(PORT_TYPE_SATA);
2816 goto out_done;
2817 }
2818 }
2819 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
2820 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
2821
2822 dev_printk(KERN_DEBUG, &pdev->dev,
2823 "phy[%d] Get Attached Address 0x%llX ,"
2824 " SAS Address 0x%llX\n",
2825 i,
2826 (unsigned long long)phy->att_dev_sas_addr,
2827 (unsigned long long)phy->dev_sas_addr);
2828 dev_printk(KERN_DEBUG, &pdev->dev,
2829 "Rate = %x , type = %d\n",
2830 sas_phy->linkrate, phy->phy_type);
2831
2832 /* workaround for HW phy decoding error on 1.5g disk drive */
2833 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
2834 tmp = mvs_read_port_vsr_data(mvi, i);
2835 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2836 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
2837 SAS_LINK_RATE_1_5_GBPS)
2838 tmp &= ~PHY_MODE6_LATECLK;
2839 else
2840 tmp |= PHY_MODE6_LATECLK;
2841 mvs_write_port_vsr_data(mvi, i, tmp);
2842
2843 }
2844out_done:
2845 if (get_st)
2846 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
2847}
2848
2849static void mvs_port_formed(struct asd_sas_phy *sas_phy)
2850{
2851 struct sas_ha_struct *sas_ha = sas_phy->ha;
2852 struct mvs_info *mvi = sas_ha->lldd_ha;
2853 struct asd_sas_port *sas_port = sas_phy->port;
2854 struct mvs_phy *phy = sas_phy->lldd_phy;
2855 struct mvs_port *port = &mvi->port[sas_port->id];
2856 unsigned long flags;
2857
2858 spin_lock_irqsave(&mvi->lock, flags);
2859 port->port_attached = 1;
2860 phy->port = port;
2861 port->taskfileset = MVS_ID_NOT_MAPPED;
2862 if (phy->phy_type & PORT_TYPE_SAS) {
2863 port->wide_port_phymap = sas_port->phy_mask;
2864 mvs_update_wideport(mvi, sas_phy->id);
2865 }
2866 spin_unlock_irqrestore(&mvi->lock, flags);
2867}
2868
2869static int mvs_I_T_nexus_reset(struct domain_device *dev)
2870{
2871 return TMF_RESP_FUNC_FAILED;
2872}
2873
2874static int __devinit mvs_hw_init(struct mvs_info *mvi)
2875{
2876 void __iomem *regs = mvi->regs;
2877 int i;
2878 u32 tmp, cctl;
2879
2880 /* make sure interrupts are masked immediately (paranoia) */
2881 mw32(GBL_CTL, 0);
2882 tmp = mr32(GBL_CTL);
2883
2884 /* Reset Controller */
2885 if (!(tmp & HBA_RST)) {
2886 if (mvi->flags & MVF_PHY_PWR_FIX) {
2887 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2888 tmp &= ~PCTL_PWR_ON;
2889 tmp |= PCTL_OFF;
2890 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2891
2892 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2893 tmp &= ~PCTL_PWR_ON;
2894 tmp |= PCTL_OFF;
2895 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2896 }
2897
2898 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
2899 mw32_f(GBL_CTL, HBA_RST);
2900 }
2901
2902 /* wait for reset to finish; timeout is just a guess */
2903 i = 1000;
2904 while (i-- > 0) {
2905 msleep(10);
2906
2907 if (!(mr32(GBL_CTL) & HBA_RST))
2908 break;
2909 }
2910 if (mr32(GBL_CTL) & HBA_RST) {
2911 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
2912 return -EBUSY;
2913 }
2914
2915 /* Init Chip */
2916 /* make sure RST is set; HBA_RST /should/ have done that for us */
2917 cctl = mr32(CTL);
2918 if (cctl & CCTL_RST)
2919 cctl &= ~CCTL_RST;
2920 else
2921 mw32_f(CTL, cctl | CCTL_RST);
2922
2923 /* write to device control _AND_ device status register? - A.C. */
2924 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
2925 tmp &= ~PRD_REQ_MASK;
2926 tmp |= PRD_REQ_SIZE;
2927 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
2928
2929 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2930 tmp |= PCTL_PWR_ON;
2931 tmp &= ~PCTL_OFF;
2932 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2933
2934 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2935 tmp |= PCTL_PWR_ON;
2936 tmp &= ~PCTL_OFF;
2937 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2938
2939 mw32_f(CTL, cctl);
2940
2941 /* reset control */
2942 mw32(PCS, 0); /*MVS_PCS */
2943
2944 mvs_phy_hacks(mvi);
2945
2946 mw32(CMD_LIST_LO, mvi->slot_dma);
2947 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
2948
2949 mw32(RX_FIS_LO, mvi->rx_fis_dma);
2950 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
2951
2952 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
2953 mw32(TX_LO, mvi->tx_dma);
2954 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
2955
2956 mw32(RX_CFG, MVS_RX_RING_SZ);
2957 mw32(RX_LO, mvi->rx_dma);
2958 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
2959
2960 /* enable auto port detection */
2961 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
2962 msleep(1100);
2963 /* init and reset phys */
2964 for (i = 0; i < mvi->chip->n_phy; i++) {
2965 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
2966 u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
2967
2968 mvs_detect_porttype(mvi, i);
2969
2970 /* set phy local SAS address */
2971 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2972 mvs_write_port_cfg_data(mvi, i, lo);
2973 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2974 mvs_write_port_cfg_data(mvi, i, hi);
2975
2976 /* reset phy */
2977 tmp = mvs_read_phy_ctl(mvi, i);
2978 tmp |= PHY_RST;
2979 mvs_write_phy_ctl(mvi, i, tmp);
2980 }
2981
2982 msleep(100);
2983
2984 for (i = 0; i < mvi->chip->n_phy; i++) {
2985 /* clear phy int status */
2986 tmp = mvs_read_port_irq_stat(mvi, i);
2987 tmp &= ~PHYEV_SIG_FIS;
2988 mvs_write_port_irq_stat(mvi, i, tmp);
2989
2990 /* set phy int mask */
2991 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
2992 PHYEV_ID_DONE | PHYEV_DEC_ERR;
2993 mvs_write_port_irq_mask(mvi, i, tmp);
2994
2995 msleep(100);
2996 mvs_update_phyinfo(mvi, i, 1);
2997 mvs_enable_xmt(mvi, i);
2998 }
2999
3000 /* FIXME: update wide port bitmaps */
3001
3002 /* little endian for open address and command table, etc. */
3003 /* A.C.
3004 * it seems that ( from the spec ) turning on big-endian won't
3005 * do us any good on big-endian machines, need further confirmation
3006 */
3007 cctl = mr32(CTL);
3008 cctl |= CCTL_ENDIAN_CMD;
3009 cctl |= CCTL_ENDIAN_DATA;
3010 cctl &= ~CCTL_ENDIAN_OPEN;
3011 cctl |= CCTL_ENDIAN_RSP;
3012 mw32_f(CTL, cctl);
3013
3014 /* reset CMD queue */
3015 tmp = mr32(PCS);
3016 tmp |= PCS_CMD_RST;
3017 mw32(PCS, tmp);
3018 /* interrupt coalescing may cause missing HW interrput in some case,
3019 * and the max count is 0x1ff, while our max slot is 0x200,
3020 * it will make count 0.
3021 */
3022 tmp = 0;
3023 mw32(INT_COAL, tmp);
3024
3025 tmp = 0x100;
3026 mw32(INT_COAL_TMOUT, tmp);
3027
3028 /* ladies and gentlemen, start your engines */
3029 mw32(TX_CFG, 0);
3030 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
3031 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
3032 /* enable CMD/CMPL_Q/RESP mode */
3033 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
3034
3035 /* enable completion queue interrupt */
3036 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
3037 mw32(INT_MASK, tmp);
3038
3039 /* Enable SRS interrupt */
3040 mw32(INT_MASK_SRS, 0xFF);
3041 return 0;
3042}
3043
3044static void __devinit mvs_print_info(struct mvs_info *mvi)
3045{
3046 struct pci_dev *pdev = mvi->pdev;
3047 static int printed_version;
3048
3049 if (!printed_version++)
3050 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3051
3052 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
3053 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
3054}
3055
3056static int __devinit mvs_pci_init(struct pci_dev *pdev,
3057 const struct pci_device_id *ent)
3058{
3059 int rc;
3060 struct mvs_info *mvi;
3061 irq_handler_t irq_handler = mvs_interrupt;
3062
3063 rc = pci_enable_device(pdev);
3064 if (rc)
3065 return rc;
3066
3067 pci_set_master(pdev);
3068
3069 rc = pci_request_regions(pdev, DRV_NAME);
3070 if (rc)
3071 goto err_out_disable;
3072
3073 rc = pci_go_64(pdev);
3074 if (rc)
3075 goto err_out_regions;
3076
3077 mvi = mvs_alloc(pdev, ent);
3078 if (!mvi) {
3079 rc = -ENOMEM;
3080 goto err_out_regions;
3081 }
3082
3083 rc = mvs_hw_init(mvi);
3084 if (rc)
3085 goto err_out_mvi;
3086
3087#ifndef MVS_DISABLE_MSI
3088 if (!pci_enable_msi(pdev)) {
3089 u32 tmp;
3090 void __iomem *regs = mvi->regs;
3091 mvi->flags |= MVF_MSI;
3092 irq_handler = mvs_msi_interrupt;
3093 tmp = mr32(PCS);
3094 mw32(PCS, tmp | PCS_SELF_CLEAR);
3095 }
3096#endif
3097
3098 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
3099 if (rc)
3100 goto err_out_msi;
3101
3102 rc = scsi_add_host(mvi->shost, &pdev->dev);
3103 if (rc)
3104 goto err_out_irq;
3105
3106 rc = sas_register_ha(&mvi->sas);
3107 if (rc)
3108 goto err_out_shost;
3109
3110 pci_set_drvdata(pdev, mvi);
3111
3112 mvs_print_info(mvi);
3113
3114 mvs_hba_interrupt_enable(mvi);
3115
3116 scsi_scan_host(mvi->shost);
3117
3118 return 0;
3119
3120err_out_shost:
3121 scsi_remove_host(mvi->shost);
3122err_out_irq:
3123 free_irq(pdev->irq, mvi);
3124err_out_msi:
3125 if (mvi->flags |= MVF_MSI)
3126 pci_disable_msi(pdev);
3127err_out_mvi:
3128 mvs_free(mvi);
3129err_out_regions:
3130 pci_release_regions(pdev);
3131err_out_disable:
3132 pci_disable_device(pdev);
3133 return rc;
3134}
3135
3136static void __devexit mvs_pci_remove(struct pci_dev *pdev)
3137{
3138 struct mvs_info *mvi = pci_get_drvdata(pdev);
3139
3140 pci_set_drvdata(pdev, NULL);
3141
3142 if (mvi) {
3143 sas_unregister_ha(&mvi->sas);
3144 mvs_hba_interrupt_disable(mvi);
3145 sas_remove_host(mvi->shost);
3146 scsi_remove_host(mvi->shost);
3147
3148 free_irq(pdev->irq, mvi);
3149 if (mvi->flags & MVF_MSI)
3150 pci_disable_msi(pdev);
3151 mvs_free(mvi);
3152 pci_release_regions(pdev);
3153 }
3154 pci_disable_device(pdev);
3155}
3156
3157static struct sas_domain_function_template mvs_transport_ops = {
3158 .lldd_execute_task = mvs_task_exec,
3159 .lldd_control_phy = mvs_phy_control,
3160 .lldd_abort_task = mvs_task_abort,
3161 .lldd_port_formed = mvs_port_formed,
3162 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
3163};
3164
3165static struct pci_device_id __devinitdata mvs_pci_table[] = {
3166 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
3167 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
3168 {
3169 .vendor = PCI_VENDOR_ID_MARVELL,
3170 .device = 0x6440,
3171 .subvendor = PCI_ANY_ID,
3172 .subdevice = 0x6480,
3173 .class = 0,
3174 .class_mask = 0,
3175 .driver_data = chip_6480,
3176 },
3177 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
3178 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
3179
3180 { } /* terminate list */
3181};
3182
3183static struct pci_driver mvs_pci_driver = {
3184 .name = DRV_NAME,
3185 .id_table = mvs_pci_table,
3186 .probe = mvs_pci_init,
3187 .remove = __devexit_p(mvs_pci_remove),
3188};
3189
3190static int __init mvs_init(void)
3191{
3192 int rc;
3193
3194 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
3195 if (!mvs_stt)
3196 return -ENOMEM;
3197
3198 rc = pci_register_driver(&mvs_pci_driver);
3199 if (rc)
3200 goto err_out;
3201
3202 return 0;
3203
3204err_out:
3205 sas_release_transport(mvs_stt);
3206 return rc;
3207}
3208
3209static void __exit mvs_exit(void)
3210{
3211 pci_unregister_driver(&mvs_pci_driver);
3212 sas_release_transport(mvs_stt);
3213}
3214
3215module_init(mvs_init);
3216module_exit(mvs_exit);
3217
3218MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
3219MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
3220MODULE_VERSION(DRV_VERSION);
3221MODULE_LICENSE("GPL");
3222MODULE_DEVICE_TABLE(pci, mvs_pci_table);
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
new file mode 100644
index 00000000000..6de7af27e50
--- /dev/null
+++ b/drivers/scsi/mvsas/Kconfig
@@ -0,0 +1,42 @@
1#
2# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
3#
4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com>
6#
7# This file is licensed under GPLv2.
8#
9# This file is part of the 88SE64XX/88SE94XX driver.
10#
11# The 88SE64XX/88SE94XX driver is free software; you can redistribute
12# it and/or modify it under the terms of the GNU General Public License
13# as published by the Free Software Foundation; version 2 of the
14# License.
15#
16# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
17# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24#
25#
26
27config SCSI_MVSAS
28 tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
29 depends on PCI
30 select SCSI_SAS_LIBSAS
31 select FW_LOADER
32 help
33 This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s
34 PCI-E 88SE94XX chip based host adapters.
35
36config SCSI_MVSAS_DEBUG
37 bool "Compile in debug mode"
38 default y
39 depends on SCSI_MVSAS
40 help
41 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
42 the driver prints some messages to the console.
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
new file mode 100644
index 00000000000..52ac4264677
--- /dev/null
+++ b/drivers/scsi/mvsas/Makefile
@@ -0,0 +1,32 @@
1#
2# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
3#
4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com>
6#
7# This file is licensed under GPLv2.
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public License as
11# published by the Free Software Foundation; version 2 of the
12# License.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17# General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA
23
24ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y)
25 EXTRA_CFLAGS += -DMV_DEBUG
26endif
27
28obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
29mvsas-y += mv_init.o \
30 mv_sas.o \
31 mv_64xx.o \
32 mv_94xx.o
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
new file mode 100644
index 00000000000..10a5077b6ae
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -0,0 +1,793 @@
1/*
2 * Marvell 88SE64xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26#include "mv_64xx.h"
27#include "mv_chips.h"
28
29static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
30{
31 void __iomem *regs = mvi->regs;
32 u32 reg;
33 struct mvs_phy *phy = &mvi->phy[i];
34
35 /* TODO check & save device type */
36 reg = mr32(MVS_GBL_PORT_TYPE);
37 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
38 if (reg & MODE_SAS_SATA & (1 << i))
39 phy->phy_type |= PORT_TYPE_SAS;
40 else
41 phy->phy_type |= PORT_TYPE_SATA;
42}
43
44static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
45{
46 void __iomem *regs = mvi->regs;
47 u32 tmp;
48
49 tmp = mr32(MVS_PCS);
50 if (mvi->chip->n_phy <= 4)
51 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
52 else
53 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
54 mw32(MVS_PCS, tmp);
55}
56
57static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
58{
59 void __iomem *regs = mvi->regs;
60
61 mvs_phy_hacks(mvi);
62
63 if (!(mvi->flags & MVF_FLAG_SOC)) {
64 /* TEST - for phy decoding error, adjust voltage levels */
65 mw32(MVS_P0_VSR_ADDR + 0, 0x8);
66 mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
67
68 mw32(MVS_P0_VSR_ADDR + 8, 0x8);
69 mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
70
71 mw32(MVS_P0_VSR_ADDR + 16, 0x8);
72 mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
73
74 mw32(MVS_P0_VSR_ADDR + 24, 0x8);
75 mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
76 } else {
77 int i;
78 /* disable auto port detection */
79 mw32(MVS_GBL_PORT_TYPE, 0);
80 for (i = 0; i < mvi->chip->n_phy; i++) {
81 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
82 mvs_write_port_vsr_data(mvi, i, 0x90000000);
83 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
84 mvs_write_port_vsr_data(mvi, i, 0x50f2);
85 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
86 mvs_write_port_vsr_data(mvi, i, 0x0e);
87 }
88 }
89}
90
91static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
92{
93 void __iomem *regs = mvi->regs;
94 u32 reg, tmp;
95
96 if (!(mvi->flags & MVF_FLAG_SOC)) {
97 if (phy_id < 4)
98 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
99 else
100 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
101
102 } else
103 reg = mr32(MVS_PHY_CTL);
104
105 tmp = reg;
106 if (phy_id < 4)
107 tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
108 else
109 tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
110
111 if (!(mvi->flags & MVF_FLAG_SOC)) {
112 if (phy_id < 4) {
113 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
114 mdelay(10);
115 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
116 } else {
117 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
118 mdelay(10);
119 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
120 }
121 } else {
122 mw32(MVS_PHY_CTL, tmp);
123 mdelay(10);
124 mw32(MVS_PHY_CTL, reg);
125 }
126}
127
128static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
129{
130 u32 tmp;
131 tmp = mvs_read_port_irq_stat(mvi, phy_id);
132 tmp &= ~PHYEV_RDY_CH;
133 mvs_write_port_irq_stat(mvi, phy_id, tmp);
134 tmp = mvs_read_phy_ctl(mvi, phy_id);
135 if (hard)
136 tmp |= PHY_RST_HARD;
137 else
138 tmp |= PHY_RST;
139 mvs_write_phy_ctl(mvi, phy_id, tmp);
140 if (hard) {
141 do {
142 tmp = mvs_read_phy_ctl(mvi, phy_id);
143 } while (tmp & PHY_RST_HARD);
144 }
145}
146
147static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
148{
149 void __iomem *regs = mvi->regs;
150 u32 tmp;
151 int i;
152
153 /* make sure interrupts are masked immediately (paranoia) */
154 mw32(MVS_GBL_CTL, 0);
155 tmp = mr32(MVS_GBL_CTL);
156
157 /* Reset Controller */
158 if (!(tmp & HBA_RST)) {
159 if (mvi->flags & MVF_PHY_PWR_FIX) {
160 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
161 tmp &= ~PCTL_PWR_OFF;
162 tmp |= PCTL_PHY_DSBL;
163 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
164
165 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
166 tmp &= ~PCTL_PWR_OFF;
167 tmp |= PCTL_PHY_DSBL;
168 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
169 }
170 }
171
172 /* make sure interrupts are masked immediately (paranoia) */
173 mw32(MVS_GBL_CTL, 0);
174 tmp = mr32(MVS_GBL_CTL);
175
176 /* Reset Controller */
177 if (!(tmp & HBA_RST)) {
178 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
179 mw32_f(MVS_GBL_CTL, HBA_RST);
180 }
181
182 /* wait for reset to finish; timeout is just a guess */
183 i = 1000;
184 while (i-- > 0) {
185 msleep(10);
186
187 if (!(mr32(MVS_GBL_CTL) & HBA_RST))
188 break;
189 }
190 if (mr32(MVS_GBL_CTL) & HBA_RST) {
191 dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
192 return -EBUSY;
193 }
194 return 0;
195}
196
197static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
198{
199 void __iomem *regs = mvi->regs;
200 u32 tmp;
201 if (!(mvi->flags & MVF_FLAG_SOC)) {
202 u32 offs;
203 if (phy_id < 4)
204 offs = PCR_PHY_CTL;
205 else {
206 offs = PCR_PHY_CTL2;
207 phy_id -= 4;
208 }
209 pci_read_config_dword(mvi->pdev, offs, &tmp);
210 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
211 pci_write_config_dword(mvi->pdev, offs, tmp);
212 } else {
213 tmp = mr32(MVS_PHY_CTL);
214 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
215 mw32(MVS_PHY_CTL, tmp);
216 }
217}
218
219static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
220{
221 void __iomem *regs = mvi->regs;
222 u32 tmp;
223 if (!(mvi->flags & MVF_FLAG_SOC)) {
224 u32 offs;
225 if (phy_id < 4)
226 offs = PCR_PHY_CTL;
227 else {
228 offs = PCR_PHY_CTL2;
229 phy_id -= 4;
230 }
231 pci_read_config_dword(mvi->pdev, offs, &tmp);
232 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
233 pci_write_config_dword(mvi->pdev, offs, tmp);
234 } else {
235 tmp = mr32(MVS_PHY_CTL);
236 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
237 mw32(MVS_PHY_CTL, tmp);
238 }
239}
240
241static int __devinit mvs_64xx_init(struct mvs_info *mvi)
242{
243 void __iomem *regs = mvi->regs;
244 int i;
245 u32 tmp, cctl;
246
247 if (mvi->pdev && mvi->pdev->revision == 0)
248 mvi->flags |= MVF_PHY_PWR_FIX;
249 if (!(mvi->flags & MVF_FLAG_SOC)) {
250 mvs_show_pcie_usage(mvi);
251 tmp = mvs_64xx_chip_reset(mvi);
252 if (tmp)
253 return tmp;
254 } else {
255 tmp = mr32(MVS_PHY_CTL);
256 tmp &= ~PCTL_PWR_OFF;
257 tmp |= PCTL_PHY_DSBL;
258 mw32(MVS_PHY_CTL, tmp);
259 }
260
261 /* Init Chip */
262 /* make sure RST is set; HBA_RST /should/ have done that for us */
263 cctl = mr32(MVS_CTL) & 0xFFFF;
264 if (cctl & CCTL_RST)
265 cctl &= ~CCTL_RST;
266 else
267 mw32_f(MVS_CTL, cctl | CCTL_RST);
268
269 if (!(mvi->flags & MVF_FLAG_SOC)) {
270 /* write to device control _AND_ device status register */
271 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
272 tmp &= ~PRD_REQ_MASK;
273 tmp |= PRD_REQ_SIZE;
274 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
275
276 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
277 tmp &= ~PCTL_PWR_OFF;
278 tmp &= ~PCTL_PHY_DSBL;
279 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
280
281 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
282 tmp &= PCTL_PWR_OFF;
283 tmp &= ~PCTL_PHY_DSBL;
284 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
285 } else {
286 tmp = mr32(MVS_PHY_CTL);
287 tmp &= ~PCTL_PWR_OFF;
288 tmp |= PCTL_COM_ON;
289 tmp &= ~PCTL_PHY_DSBL;
290 tmp |= PCTL_LINK_RST;
291 mw32(MVS_PHY_CTL, tmp);
292 msleep(100);
293 tmp &= ~PCTL_LINK_RST;
294 mw32(MVS_PHY_CTL, tmp);
295 msleep(100);
296 }
297
298 /* reset control */
299 mw32(MVS_PCS, 0); /* MVS_PCS */
300 /* init phys */
301 mvs_64xx_phy_hacks(mvi);
302
303 /* enable auto port detection */
304 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
305
306 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
307 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
308
309 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
310 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
311
312 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
313 mw32(MVS_TX_LO, mvi->tx_dma);
314 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
315
316 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
317 mw32(MVS_RX_LO, mvi->rx_dma);
318 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
319
320 for (i = 0; i < mvi->chip->n_phy; i++) {
321 /* set phy local SAS address */
322 /* should set little endian SAS address to 64xx chip */
323 mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
324 cpu_to_be64(mvi->phy[i].dev_sas_addr));
325
326 mvs_64xx_enable_xmt(mvi, i);
327
328 mvs_64xx_phy_reset(mvi, i, 1);
329 msleep(500);
330 mvs_64xx_detect_porttype(mvi, i);
331 }
332 if (mvi->flags & MVF_FLAG_SOC) {
333 /* set select registers */
334 writel(0x0E008000, regs + 0x000);
335 writel(0x59000008, regs + 0x004);
336 writel(0x20, regs + 0x008);
337 writel(0x20, regs + 0x00c);
338 writel(0x20, regs + 0x010);
339 writel(0x20, regs + 0x014);
340 writel(0x20, regs + 0x018);
341 writel(0x20, regs + 0x01c);
342 }
343 for (i = 0; i < mvi->chip->n_phy; i++) {
344 /* clear phy int status */
345 tmp = mvs_read_port_irq_stat(mvi, i);
346 tmp &= ~PHYEV_SIG_FIS;
347 mvs_write_port_irq_stat(mvi, i, tmp);
348
349 /* set phy int mask */
350 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
351 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
352 PHYEV_DEC_ERR;
353 mvs_write_port_irq_mask(mvi, i, tmp);
354
355 msleep(100);
356 mvs_update_phyinfo(mvi, i, 1);
357 }
358
359 /* FIXME: update wide port bitmaps */
360
361 /* little endian for open address and command table, etc. */
362 /*
363 * it seems that ( from the spec ) turning on big-endian won't
364 * do us any good on big-endian machines, need further confirmation
365 */
366 cctl = mr32(MVS_CTL);
367 cctl |= CCTL_ENDIAN_CMD;
368 cctl |= CCTL_ENDIAN_DATA;
369 cctl &= ~CCTL_ENDIAN_OPEN;
370 cctl |= CCTL_ENDIAN_RSP;
371 mw32_f(MVS_CTL, cctl);
372
373 /* reset CMD queue */
374 tmp = mr32(MVS_PCS);
375 tmp |= PCS_CMD_RST;
376 mw32(MVS_PCS, tmp);
377 /* interrupt coalescing may cause missing HW interrput in some case,
378 * and the max count is 0x1ff, while our max slot is 0x200,
379 * it will make count 0.
380 */
381 tmp = 0;
382 mw32(MVS_INT_COAL, tmp);
383
384 tmp = 0x100;
385 mw32(MVS_INT_COAL_TMOUT, tmp);
386
387 /* ladies and gentlemen, start your engines */
388 mw32(MVS_TX_CFG, 0);
389 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
390 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
391 /* enable CMD/CMPL_Q/RESP mode */
392 mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
393 PCS_CMD_EN | PCS_CMD_STOP_ERR);
394
395 /* enable completion queue interrupt */
396 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
397 CINT_DMA_PCIE);
398
399 mw32(MVS_INT_MASK, tmp);
400
401 /* Enable SRS interrupt */
402 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
403
404 return 0;
405}
406
407static int mvs_64xx_ioremap(struct mvs_info *mvi)
408{
409 if (!mvs_ioremap(mvi, 4, 2))
410 return 0;
411 return -1;
412}
413
414static void mvs_64xx_iounmap(struct mvs_info *mvi)
415{
416 mvs_iounmap(mvi->regs);
417 mvs_iounmap(mvi->regs_ex);
418}
419
420static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
421{
422 void __iomem *regs = mvi->regs;
423 u32 tmp;
424
425 tmp = mr32(MVS_GBL_CTL);
426 mw32(MVS_GBL_CTL, tmp | INT_EN);
427}
428
429static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
430{
431 void __iomem *regs = mvi->regs;
432 u32 tmp;
433
434 tmp = mr32(MVS_GBL_CTL);
435 mw32(MVS_GBL_CTL, tmp & ~INT_EN);
436}
437
438static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
439{
440 void __iomem *regs = mvi->regs;
441 u32 stat;
442
443 if (!(mvi->flags & MVF_FLAG_SOC)) {
444 stat = mr32(MVS_GBL_INT_STAT);
445
446 if (stat == 0 || stat == 0xffffffff)
447 return 0;
448 } else
449 stat = 1;
450 return stat;
451}
452
453static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
454{
455 void __iomem *regs = mvi->regs;
456
457 /* clear CMD_CMPLT ASAP */
458 mw32_f(MVS_INT_STAT, CINT_DONE);
459#ifndef MVS_USE_TASKLET
460 spin_lock(&mvi->lock);
461#endif
462 mvs_int_full(mvi);
463#ifndef MVS_USE_TASKLET
464 spin_unlock(&mvi->lock);
465#endif
466 return IRQ_HANDLED;
467}
468
469static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
470{
471 u32 tmp;
472 mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
473 mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
474 do {
475 tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
476 } while (tmp & 1 << (slot_idx % 32));
477 do {
478 tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
479 } while (tmp & 1 << (slot_idx % 32));
480}
481
482static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
483 u32 tfs)
484{
485 void __iomem *regs = mvi->regs;
486 u32 tmp;
487
488 if (type == PORT_TYPE_SATA) {
489 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
490 mw32(MVS_INT_STAT_SRS_0, tmp);
491 }
492 mw32(MVS_INT_STAT, CINT_CI_STOP);
493 tmp = mr32(MVS_PCS) | 0xFF00;
494 mw32(MVS_PCS, tmp);
495}
496
497static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
498{
499 void __iomem *regs = mvi->regs;
500 u32 tmp, offs;
501
502 if (*tfs == MVS_ID_NOT_MAPPED)
503 return;
504
505 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
506 if (*tfs < 16) {
507 tmp = mr32(MVS_PCS);
508 mw32(MVS_PCS, tmp & ~offs);
509 } else {
510 tmp = mr32(MVS_CTL);
511 mw32(MVS_CTL, tmp & ~offs);
512 }
513
514 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
515 if (tmp)
516 mw32(MVS_INT_STAT_SRS_0, tmp);
517
518 *tfs = MVS_ID_NOT_MAPPED;
519 return;
520}
521
522static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
523{
524 int i;
525 u32 tmp, offs;
526 void __iomem *regs = mvi->regs;
527
528 if (*tfs != MVS_ID_NOT_MAPPED)
529 return 0;
530
531 tmp = mr32(MVS_PCS);
532
533 for (i = 0; i < mvi->chip->srs_sz; i++) {
534 if (i == 16)
535 tmp = mr32(MVS_CTL);
536 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
537 if (!(tmp & offs)) {
538 *tfs = i;
539
540 if (i < 16)
541 mw32(MVS_PCS, tmp | offs);
542 else
543 mw32(MVS_CTL, tmp | offs);
544 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
545 if (tmp)
546 mw32(MVS_INT_STAT_SRS_0, tmp);
547 return 0;
548 }
549 }
550 return MVS_ID_NOT_MAPPED;
551}
552
553void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
554{
555 int i;
556 struct scatterlist *sg;
557 struct mvs_prd *buf_prd = prd;
558 for_each_sg(scatter, sg, nr, i) {
559 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
560 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
561 buf_prd++;
562 }
563}
564
565static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
566{
567 u32 phy_st;
568 mvs_write_port_cfg_addr(mvi, i,
569 PHYR_PHY_STAT);
570 phy_st = mvs_read_port_cfg_data(mvi, i);
571 if (phy_st & PHY_OOB_DTCTD)
572 return 1;
573 return 0;
574}
575
576static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
577 struct sas_identify_frame *id)
578
579{
580 struct mvs_phy *phy = &mvi->phy[i];
581 struct asd_sas_phy *sas_phy = &phy->sas_phy;
582
583 sas_phy->linkrate =
584 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
585 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
586
587 phy->minimum_linkrate =
588 (phy->phy_status &
589 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
590 phy->maximum_linkrate =
591 (phy->phy_status &
592 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
593
594 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
595 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
596
597 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
598 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
599
600 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
601 phy->att_dev_sas_addr =
602 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
603 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
604 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
605 phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
606}
607
608static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
609{
610 u32 tmp;
611 struct mvs_phy *phy = &mvi->phy[i];
612 /* workaround for HW phy decoding error on 1.5g disk drive */
613 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
614 tmp = mvs_read_port_vsr_data(mvi, i);
615 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
616 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
617 SAS_LINK_RATE_1_5_GBPS)
618 tmp &= ~PHY_MODE6_LATECLK;
619 else
620 tmp |= PHY_MODE6_LATECLK;
621 mvs_write_port_vsr_data(mvi, i, tmp);
622}
623
624void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
625 struct sas_phy_linkrates *rates)
626{
627 u32 lrmin = 0, lrmax = 0;
628 u32 tmp;
629
630 tmp = mvs_read_phy_ctl(mvi, phy_id);
631 lrmin = (rates->minimum_linkrate << 8);
632 lrmax = (rates->maximum_linkrate << 12);
633
634 if (lrmin) {
635 tmp &= ~(0xf << 8);
636 tmp |= lrmin;
637 }
638 if (lrmax) {
639 tmp &= ~(0xf << 12);
640 tmp |= lrmax;
641 }
642 mvs_write_phy_ctl(mvi, phy_id, tmp);
643 mvs_64xx_phy_reset(mvi, phy_id, 1);
644}
645
646static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
647{
648 u32 tmp;
649 void __iomem *regs = mvi->regs;
650 tmp = mr32(MVS_PCS);
651 mw32(MVS_PCS, tmp & 0xFFFF);
652 mw32(MVS_PCS, tmp);
653 tmp = mr32(MVS_CTL);
654 mw32(MVS_CTL, tmp & 0xFFFF);
655 mw32(MVS_CTL, tmp);
656}
657
658
659u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
660{
661 void __iomem *regs = mvi->regs_ex;
662 return ior32(SPI_DATA_REG_64XX);
663}
664
665void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
666{
667 void __iomem *regs = mvi->regs_ex;
668 iow32(SPI_DATA_REG_64XX, data);
669}
670
671
672int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
673 u32 *dwCmd,
674 u8 cmd,
675 u8 read,
676 u8 length,
677 u32 addr
678 )
679{
680 u32 dwTmp;
681
682 dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
683 if (read)
684 dwTmp |= 1U<<23;
685
686 if (addr != MV_MAX_U32) {
687 dwTmp |= 1U<<22;
688 dwTmp |= (addr & 0x0003FFFF);
689 }
690
691 *dwCmd = dwTmp;
692 return 0;
693}
694
695
696int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
697{
698 void __iomem *regs = mvi->regs_ex;
699 int retry;
700
701 for (retry = 0; retry < 1; retry++) {
702 iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
703 iow32(SPI_CMD_REG_64XX, cmd);
704 iow32(SPI_CTRL_REG_64XX,
705 SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
706 }
707
708 return 0;
709}
710
711int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
712{
713 void __iomem *regs = mvi->regs_ex;
714 u32 i, dwTmp;
715
716 for (i = 0; i < timeout; i++) {
717 dwTmp = ior32(SPI_CTRL_REG_64XX);
718 if (!(dwTmp & SPI_CTRL_SPISTART))
719 return 0;
720 msleep(10);
721 }
722
723 return -1;
724}
725
726#ifndef DISABLE_HOTPLUG_DMA_FIX
727void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
728{
729 int i;
730 struct mvs_prd *buf_prd = prd;
731 buf_prd += from;
732 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
733 buf_prd->addr = cpu_to_le64(buf_dma);
734 buf_prd->len = cpu_to_le32(buf_len);
735 ++buf_prd;
736 }
737}
738#endif
739
740const struct mvs_dispatch mvs_64xx_dispatch = {
741 "mv64xx",
742 mvs_64xx_init,
743 NULL,
744 mvs_64xx_ioremap,
745 mvs_64xx_iounmap,
746 mvs_64xx_isr,
747 mvs_64xx_isr_status,
748 mvs_64xx_interrupt_enable,
749 mvs_64xx_interrupt_disable,
750 mvs_read_phy_ctl,
751 mvs_write_phy_ctl,
752 mvs_read_port_cfg_data,
753 mvs_write_port_cfg_data,
754 mvs_write_port_cfg_addr,
755 mvs_read_port_vsr_data,
756 mvs_write_port_vsr_data,
757 mvs_write_port_vsr_addr,
758 mvs_read_port_irq_stat,
759 mvs_write_port_irq_stat,
760 mvs_read_port_irq_mask,
761 mvs_write_port_irq_mask,
762 mvs_get_sas_addr,
763 mvs_64xx_command_active,
764 mvs_64xx_issue_stop,
765 mvs_start_delivery,
766 mvs_rx_update,
767 mvs_int_full,
768 mvs_64xx_assign_reg_set,
769 mvs_64xx_free_reg_set,
770 mvs_get_prd_size,
771 mvs_get_prd_count,
772 mvs_64xx_make_prd,
773 mvs_64xx_detect_porttype,
774 mvs_64xx_oob_done,
775 mvs_64xx_fix_phy_info,
776 mvs_64xx_phy_work_around,
777 mvs_64xx_phy_set_link_rate,
778 mvs_hw_max_link_rate,
779 mvs_64xx_phy_disable,
780 mvs_64xx_phy_enable,
781 mvs_64xx_phy_reset,
782 mvs_64xx_stp_reset,
783 mvs_64xx_clear_active_cmds,
784 mvs_64xx_spi_read_data,
785 mvs_64xx_spi_write_data,
786 mvs_64xx_spi_buildcmd,
787 mvs_64xx_spi_issuecmd,
788 mvs_64xx_spi_waitdataready,
789#ifndef DISABLE_HOTPLUG_DMA_FIX
790 mvs_64xx_fix_dma,
791#endif
792};
793
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
new file mode 100644
index 00000000000..42e947d9795
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -0,0 +1,151 @@
1/*
2 * Marvell 88SE64xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MVS64XX_REG_H_
26#define _MVS64XX_REG_H_
27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS
31
32/* enhanced mode registers (BAR4) */
33enum hw_registers {
34 MVS_GBL_CTL = 0x04, /* global control */
35 MVS_GBL_INT_STAT = 0x08, /* global irq status */
36 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
37
38 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
39 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
40
41 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
42
43 MVS_CTL = 0x100, /* SAS/SATA port configuration */
44 MVS_PCS = 0x104, /* SAS/SATA port control/status */
45 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
46 MVS_CMD_LIST_HI = 0x10C,
47 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
48 MVS_RX_FIS_HI = 0x114,
49
50 MVS_TX_CFG = 0x120, /* TX configuration */
51 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
52 MVS_TX_HI = 0x128,
53
54 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
55 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
56 MVS_RX_CFG = 0x134, /* RX configuration */
57 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
58 MVS_RX_HI = 0x13C,
59 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
60
61 MVS_INT_COAL = 0x148, /* Int coalescing config */
62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
63 MVS_INT_STAT = 0x150, /* Central int status */
64 MVS_INT_MASK = 0x154, /* Central int enable */
65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
66 MVS_INT_MASK_SRS_0 = 0x15C,
67
68 /* ports 1-3 follow after this */
69 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
70 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
71 /* ports 5-7 follow after this */
72 MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */
73 MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */
74
75 /* ports 1-3 follow after this */
76 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
77 /* ports 5-7 follow after this */
78 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
79
80 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
81 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
82
83 /* ports 1-3 follow after this */
84 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
85 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
86 /* ports 5-7 follow after this */
87 MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */
88 MVS_P4_CFG_DATA = 0x234, /* Port4 config data */
89
90 /* ports 1-3 follow after this */
91 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
92 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
93 /* ports 5-7 follow after this */
94 MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */
95 MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */
96};
97
98enum pci_cfg_registers {
99 PCR_PHY_CTL = 0x40,
100 PCR_PHY_CTL2 = 0x90,
101 PCR_DEV_CTRL = 0xE8,
102 PCR_LINK_STAT = 0xF2,
103};
104
105/* SAS/SATA Vendor Specific Port Registers */
106enum sas_sata_vsp_regs {
107 VSR_PHY_STAT = 0x00, /* Phy Status */
108 VSR_PHY_MODE1 = 0x01, /* phy tx */
109 VSR_PHY_MODE2 = 0x02, /* tx scc */
110 VSR_PHY_MODE3 = 0x03, /* pll */
111 VSR_PHY_MODE4 = 0x04, /* VCO */
112 VSR_PHY_MODE5 = 0x05, /* Rx */
113 VSR_PHY_MODE6 = 0x06, /* CDR */
114 VSR_PHY_MODE7 = 0x07, /* Impedance */
115 VSR_PHY_MODE8 = 0x08, /* Voltage */
116 VSR_PHY_MODE9 = 0x09, /* Test */
117 VSR_PHY_MODE10 = 0x0A, /* Power */
118 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
119 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
120 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
121};
122
123enum chip_register_bits {
124 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
125 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
126 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
127 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
128 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
129};
130
131#define MAX_SG_ENTRY 64
132
133struct mvs_prd {
134 __le64 addr; /* 64-bit buffer address */
135 __le32 reserved;
136 __le32 len; /* 16-bit length */
137};
138
139#define SPI_CTRL_REG 0xc0
140#define SPI_CTRL_VENDOR_ENABLE (1U<<29)
141#define SPI_CTRL_SPIRDY (1U<<22)
142#define SPI_CTRL_SPISTART (1U<<20)
143
144#define SPI_CMD_REG 0xc4
145#define SPI_DATA_REG 0xc8
146
147#define SPI_CTRL_REG_64XX 0x10
148#define SPI_CMD_REG_64XX 0x14
149#define SPI_DATA_REG_64XX 0x18
150
151#endif
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
new file mode 100644
index 00000000000..0940fae19d2
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -0,0 +1,672 @@
1/*
2 * Marvell 88SE94xx hardware specific
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26#include "mv_94xx.h"
27#include "mv_chips.h"
28
29static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
30{
31 u32 reg;
32 struct mvs_phy *phy = &mvi->phy[i];
33 u32 phy_status;
34
35 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
36 reg = mvs_read_port_vsr_data(mvi, i);
37 phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
38 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
39 switch (phy_status) {
40 case 0x10:
41 phy->phy_type |= PORT_TYPE_SAS;
42 break;
43 case 0x1d:
44 default:
45 phy->phy_type |= PORT_TYPE_SATA;
46 break;
47 }
48}
49
50static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
51{
52 void __iomem *regs = mvi->regs;
53 u32 tmp;
54
55 tmp = mr32(MVS_PCS);
56 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
57 mw32(MVS_PCS, tmp);
58}
59
60static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
61{
62 u32 tmp;
63
64 tmp = mvs_read_port_irq_stat(mvi, phy_id);
65 tmp &= ~PHYEV_RDY_CH;
66 mvs_write_port_irq_stat(mvi, phy_id, tmp);
67 if (hard) {
68 tmp = mvs_read_phy_ctl(mvi, phy_id);
69 tmp |= PHY_RST_HARD;
70 mvs_write_phy_ctl(mvi, phy_id, tmp);
71 do {
72 tmp = mvs_read_phy_ctl(mvi, phy_id);
73 } while (tmp & PHY_RST_HARD);
74 } else {
75 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
76 tmp = mvs_read_port_vsr_data(mvi, phy_id);
77 tmp |= PHY_RST;
78 mvs_write_port_vsr_data(mvi, phy_id, tmp);
79 }
80}
81
82static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
83{
84 u32 tmp;
85 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
86 tmp = mvs_read_port_vsr_data(mvi, phy_id);
87 mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
88}
89
90static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
91{
92 mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
93 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
94 mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
95 mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
96 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
97 mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
98}
99
100static int __devinit mvs_94xx_init(struct mvs_info *mvi)
101{
102 void __iomem *regs = mvi->regs;
103 int i;
104 u32 tmp, cctl;
105
106 mvs_show_pcie_usage(mvi);
107 if (mvi->flags & MVF_FLAG_SOC) {
108 tmp = mr32(MVS_PHY_CTL);
109 tmp &= ~PCTL_PWR_OFF;
110 tmp |= PCTL_PHY_DSBL;
111 mw32(MVS_PHY_CTL, tmp);
112 }
113
114 /* Init Chip */
115 /* make sure RST is set; HBA_RST /should/ have done that for us */
116 cctl = mr32(MVS_CTL) & 0xFFFF;
117 if (cctl & CCTL_RST)
118 cctl &= ~CCTL_RST;
119 else
120 mw32_f(MVS_CTL, cctl | CCTL_RST);
121
122 if (mvi->flags & MVF_FLAG_SOC) {
123 tmp = mr32(MVS_PHY_CTL);
124 tmp &= ~PCTL_PWR_OFF;
125 tmp |= PCTL_COM_ON;
126 tmp &= ~PCTL_PHY_DSBL;
127 tmp |= PCTL_LINK_RST;
128 mw32(MVS_PHY_CTL, tmp);
129 msleep(100);
130 tmp &= ~PCTL_LINK_RST;
131 mw32(MVS_PHY_CTL, tmp);
132 msleep(100);
133 }
134
135 /* reset control */
136 mw32(MVS_PCS, 0); /* MVS_PCS */
137 mw32(MVS_STP_REG_SET_0, 0);
138 mw32(MVS_STP_REG_SET_1, 0);
139
140 /* init phys */
141 mvs_phy_hacks(mvi);
142
143 /* disable Multiplexing, enable phy implemented */
144 mw32(MVS_PORTS_IMP, 0xFF);
145
146
147 mw32(MVS_PA_VSR_ADDR, 0x00000104);
148 mw32(MVS_PA_VSR_PORT, 0x00018080);
149 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
150 mw32(MVS_PA_VSR_PORT, 0x0084ffff);
151
152 /* set LED blink when IO*/
153 mw32(MVS_PA_VSR_ADDR, 0x00000030);
154 tmp = mr32(MVS_PA_VSR_PORT);
155 tmp &= 0xFFFF00FF;
156 tmp |= 0x00003300;
157 mw32(MVS_PA_VSR_PORT, tmp);
158
159 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
160 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
161
162 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
163 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
164
165 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
166 mw32(MVS_TX_LO, mvi->tx_dma);
167 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
168
169 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
170 mw32(MVS_RX_LO, mvi->rx_dma);
171 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
172
173 for (i = 0; i < mvi->chip->n_phy; i++) {
174 mvs_94xx_phy_disable(mvi, i);
175 /* set phy local SAS address */
176 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
177 (mvi->phy[i].dev_sas_addr));
178
179 mvs_94xx_enable_xmt(mvi, i);
180 mvs_94xx_phy_enable(mvi, i);
181
182 mvs_94xx_phy_reset(mvi, i, 1);
183 msleep(500);
184 mvs_94xx_detect_porttype(mvi, i);
185 }
186
187 if (mvi->flags & MVF_FLAG_SOC) {
188 /* set select registers */
189 writel(0x0E008000, regs + 0x000);
190 writel(0x59000008, regs + 0x004);
191 writel(0x20, regs + 0x008);
192 writel(0x20, regs + 0x00c);
193 writel(0x20, regs + 0x010);
194 writel(0x20, regs + 0x014);
195 writel(0x20, regs + 0x018);
196 writel(0x20, regs + 0x01c);
197 }
198 for (i = 0; i < mvi->chip->n_phy; i++) {
199 /* clear phy int status */
200 tmp = mvs_read_port_irq_stat(mvi, i);
201 tmp &= ~PHYEV_SIG_FIS;
202 mvs_write_port_irq_stat(mvi, i, tmp);
203
204 /* set phy int mask */
205 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
206 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
207 mvs_write_port_irq_mask(mvi, i, tmp);
208
209 msleep(100);
210 mvs_update_phyinfo(mvi, i, 1);
211 }
212
213 /* FIXME: update wide port bitmaps */
214
215 /* little endian for open address and command table, etc. */
216 /*
217 * it seems that ( from the spec ) turning on big-endian won't
218 * do us any good on big-endian machines, need further confirmation
219 */
220 cctl = mr32(MVS_CTL);
221 cctl |= CCTL_ENDIAN_CMD;
222 cctl |= CCTL_ENDIAN_DATA;
223 cctl &= ~CCTL_ENDIAN_OPEN;
224 cctl |= CCTL_ENDIAN_RSP;
225 mw32_f(MVS_CTL, cctl);
226
227 /* reset CMD queue */
228 tmp = mr32(MVS_PCS);
229 tmp |= PCS_CMD_RST;
230 mw32(MVS_PCS, tmp);
231 /* interrupt coalescing may cause missing HW interrput in some case,
232 * and the max count is 0x1ff, while our max slot is 0x200,
233 * it will make count 0.
234 */
235 tmp = 0;
236 mw32(MVS_INT_COAL, tmp);
237
238 tmp = 0x100;
239 mw32(MVS_INT_COAL_TMOUT, tmp);
240
241 /* ladies and gentlemen, start your engines */
242 mw32(MVS_TX_CFG, 0);
243 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
244 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
245 /* enable CMD/CMPL_Q/RESP mode */
246 mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
247 PCS_CMD_EN | PCS_CMD_STOP_ERR);
248
249 /* enable completion queue interrupt */
250 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
251 CINT_DMA_PCIE);
252 tmp |= CINT_PHY_MASK;
253 mw32(MVS_INT_MASK, tmp);
254
255 /* Enable SRS interrupt */
256 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
257
258 return 0;
259}
260
261static int mvs_94xx_ioremap(struct mvs_info *mvi)
262{
263 if (!mvs_ioremap(mvi, 2, -1)) {
264 mvi->regs_ex = mvi->regs + 0x10200;
265 mvi->regs += 0x20000;
266 if (mvi->id == 1)
267 mvi->regs += 0x4000;
268 return 0;
269 }
270 return -1;
271}
272
273static void mvs_94xx_iounmap(struct mvs_info *mvi)
274{
275 if (mvi->regs) {
276 mvi->regs -= 0x20000;
277 if (mvi->id == 1)
278 mvi->regs -= 0x4000;
279 mvs_iounmap(mvi->regs);
280 }
281}
282
283static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
284{
285 void __iomem *regs = mvi->regs_ex;
286 u32 tmp;
287
288 tmp = mr32(MVS_GBL_CTL);
289 tmp |= (IRQ_SAS_A | IRQ_SAS_B);
290 mw32(MVS_GBL_INT_STAT, tmp);
291 writel(tmp, regs + 0x0C);
292 writel(tmp, regs + 0x10);
293 writel(tmp, regs + 0x14);
294 writel(tmp, regs + 0x18);
295 mw32(MVS_GBL_CTL, tmp);
296}
297
298static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
299{
300 void __iomem *regs = mvi->regs_ex;
301 u32 tmp;
302
303 tmp = mr32(MVS_GBL_CTL);
304
305 tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
306 mw32(MVS_GBL_INT_STAT, tmp);
307 writel(tmp, regs + 0x0C);
308 writel(tmp, regs + 0x10);
309 writel(tmp, regs + 0x14);
310 writel(tmp, regs + 0x18);
311 mw32(MVS_GBL_CTL, tmp);
312}
313
314static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
315{
316 void __iomem *regs = mvi->regs_ex;
317 u32 stat = 0;
318 if (!(mvi->flags & MVF_FLAG_SOC)) {
319 stat = mr32(MVS_GBL_INT_STAT);
320
321 if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
322 return 0;
323 }
324 return stat;
325}
326
327static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
328{
329 void __iomem *regs = mvi->regs;
330
331 if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
332 ((stat & IRQ_SAS_B) && mvi->id == 1)) {
333 mw32_f(MVS_INT_STAT, CINT_DONE);
334 #ifndef MVS_USE_TASKLET
335 spin_lock(&mvi->lock);
336 #endif
337 mvs_int_full(mvi);
338 #ifndef MVS_USE_TASKLET
339 spin_unlock(&mvi->lock);
340 #endif
341 }
342 return IRQ_HANDLED;
343}
344
345static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
346{
347 u32 tmp;
348 mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
349 do {
350 tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
351 } while (tmp & 1 << (slot_idx % 32));
352}
353
354static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
355 u32 tfs)
356{
357 void __iomem *regs = mvi->regs;
358 u32 tmp;
359
360 if (type == PORT_TYPE_SATA) {
361 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
362 mw32(MVS_INT_STAT_SRS_0, tmp);
363 }
364 mw32(MVS_INT_STAT, CINT_CI_STOP);
365 tmp = mr32(MVS_PCS) | 0xFF00;
366 mw32(MVS_PCS, tmp);
367}
368
369static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
370{
371 void __iomem *regs = mvi->regs;
372 u32 tmp;
373 u8 reg_set = *tfs;
374
375 if (*tfs == MVS_ID_NOT_MAPPED)
376 return;
377
378 mvi->sata_reg_set &= ~bit(reg_set);
379 if (reg_set < 32) {
380 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
381 tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
382 if (tmp)
383 mw32(MVS_INT_STAT_SRS_0, tmp);
384 } else {
385 w_reg_set_enable(reg_set, mvi->sata_reg_set);
386 tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
387 if (tmp)
388 mw32(MVS_INT_STAT_SRS_1, tmp);
389 }
390
391 *tfs = MVS_ID_NOT_MAPPED;
392
393 return;
394}
395
396static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
397{
398 int i;
399 void __iomem *regs = mvi->regs;
400
401 if (*tfs != MVS_ID_NOT_MAPPED)
402 return 0;
403
404 i = mv_ffc64(mvi->sata_reg_set);
405 if (i > 32) {
406 mvi->sata_reg_set |= bit(i);
407 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
408 *tfs = i;
409 return 0;
410 } else if (i >= 0) {
411 mvi->sata_reg_set |= bit(i);
412 w_reg_set_enable(i, (u32)mvi->sata_reg_set);
413 *tfs = i;
414 return 0;
415 }
416 return MVS_ID_NOT_MAPPED;
417}
418
419static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
420{
421 int i;
422 struct scatterlist *sg;
423 struct mvs_prd *buf_prd = prd;
424 for_each_sg(scatter, sg, nr, i) {
425 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
426 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
427 buf_prd++;
428 }
429}
430
431static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
432{
433 u32 phy_st;
434 phy_st = mvs_read_phy_ctl(mvi, i);
435 if (phy_st & PHY_READY_MASK) /* phy ready */
436 return 1;
437 return 0;
438}
439
440static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
441 struct sas_identify_frame *id)
442{
443 int i;
444 u32 id_frame[7];
445
446 for (i = 0; i < 7; i++) {
447 mvs_write_port_cfg_addr(mvi, port_id,
448 CONFIG_ID_FRAME0 + i * 4);
449 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
450 }
451 memcpy(id, id_frame, 28);
452}
453
454static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
455 struct sas_identify_frame *id)
456{
457 int i;
458 u32 id_frame[7];
459
460 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
461 for (i = 0; i < 7; i++) {
462 mvs_write_port_cfg_addr(mvi, port_id,
463 CONFIG_ATT_ID_FRAME0 + i * 4);
464 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
465 mv_dprintk("94xx phy %d atta frame %d %x.\n",
466 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
467 }
468 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
469 memcpy(id, id_frame, 28);
470}
471
472static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
473{
474 u32 att_dev_info = 0;
475
476 att_dev_info |= id->dev_type;
477 if (id->stp_iport)
478 att_dev_info |= PORT_DEV_STP_INIT;
479 if (id->smp_iport)
480 att_dev_info |= PORT_DEV_SMP_INIT;
481 if (id->ssp_iport)
482 att_dev_info |= PORT_DEV_SSP_INIT;
483 if (id->stp_tport)
484 att_dev_info |= PORT_DEV_STP_TRGT;
485 if (id->smp_tport)
486 att_dev_info |= PORT_DEV_SMP_TRGT;
487 if (id->ssp_tport)
488 att_dev_info |= PORT_DEV_SSP_TRGT;
489
490 att_dev_info |= (u32)id->phy_id<<24;
491 return att_dev_info;
492}
493
494static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
495{
496 return mvs_94xx_make_dev_info(id);
497}
498
499static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
500 struct sas_identify_frame *id)
501{
502 struct mvs_phy *phy = &mvi->phy[i];
503 struct asd_sas_phy *sas_phy = &phy->sas_phy;
504 mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
505 sas_phy->linkrate =
506 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
507 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
508 sas_phy->linkrate += 0x8;
509 mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
510 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
511 phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
512 mvs_94xx_get_dev_identify_frame(mvi, i, id);
513 phy->dev_info = mvs_94xx_make_dev_info(id);
514
515 if (phy->phy_type & PORT_TYPE_SAS) {
516 mvs_94xx_get_att_identify_frame(mvi, i, id);
517 phy->att_dev_info = mvs_94xx_make_att_info(id);
518 phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
519 } else {
520 phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
521 }
522
523}
524
525void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
526 struct sas_phy_linkrates *rates)
527{
528 /* TODO */
529}
530
531static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
532{
533 u32 tmp;
534 void __iomem *regs = mvi->regs;
535 tmp = mr32(MVS_STP_REG_SET_0);
536 mw32(MVS_STP_REG_SET_0, 0);
537 mw32(MVS_STP_REG_SET_0, tmp);
538 tmp = mr32(MVS_STP_REG_SET_1);
539 mw32(MVS_STP_REG_SET_1, 0);
540 mw32(MVS_STP_REG_SET_1, tmp);
541}
542
543
544u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
545{
546 void __iomem *regs = mvi->regs_ex - 0x10200;
547 return mr32(SPI_RD_DATA_REG_94XX);
548}
549
550void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
551{
552 void __iomem *regs = mvi->regs_ex - 0x10200;
553 mw32(SPI_RD_DATA_REG_94XX, data);
554}
555
556
557int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
558 u32 *dwCmd,
559 u8 cmd,
560 u8 read,
561 u8 length,
562 u32 addr
563 )
564{
565 void __iomem *regs = mvi->regs_ex - 0x10200;
566 u32 dwTmp;
567
568 dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
569 if (read)
570 dwTmp |= SPI_CTRL_READ_94XX;
571
572 if (addr != MV_MAX_U32) {
573 mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
574 dwTmp |= SPI_ADDR_VLD_94XX;
575 }
576
577 *dwCmd = dwTmp;
578 return 0;
579}
580
581
582int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
583{
584 void __iomem *regs = mvi->regs_ex - 0x10200;
585 mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
586
587 return 0;
588}
589
590int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
591{
592 void __iomem *regs = mvi->regs_ex - 0x10200;
593 u32 i, dwTmp;
594
595 for (i = 0; i < timeout; i++) {
596 dwTmp = mr32(SPI_CTRL_REG_94XX);
597 if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
598 return 0;
599 msleep(10);
600 }
601
602 return -1;
603}
604
605#ifndef DISABLE_HOTPLUG_DMA_FIX
606void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
607{
608 int i;
609 struct mvs_prd *buf_prd = prd;
610 buf_prd += from;
611 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
612 buf_prd->addr = cpu_to_le64(buf_dma);
613 buf_prd->im_len.len = cpu_to_le32(buf_len);
614 ++buf_prd;
615 }
616}
617#endif
618
619const struct mvs_dispatch mvs_94xx_dispatch = {
620 "mv94xx",
621 mvs_94xx_init,
622 NULL,
623 mvs_94xx_ioremap,
624 mvs_94xx_iounmap,
625 mvs_94xx_isr,
626 mvs_94xx_isr_status,
627 mvs_94xx_interrupt_enable,
628 mvs_94xx_interrupt_disable,
629 mvs_read_phy_ctl,
630 mvs_write_phy_ctl,
631 mvs_read_port_cfg_data,
632 mvs_write_port_cfg_data,
633 mvs_write_port_cfg_addr,
634 mvs_read_port_vsr_data,
635 mvs_write_port_vsr_data,
636 mvs_write_port_vsr_addr,
637 mvs_read_port_irq_stat,
638 mvs_write_port_irq_stat,
639 mvs_read_port_irq_mask,
640 mvs_write_port_irq_mask,
641 mvs_get_sas_addr,
642 mvs_94xx_command_active,
643 mvs_94xx_issue_stop,
644 mvs_start_delivery,
645 mvs_rx_update,
646 mvs_int_full,
647 mvs_94xx_assign_reg_set,
648 mvs_94xx_free_reg_set,
649 mvs_get_prd_size,
650 mvs_get_prd_count,
651 mvs_94xx_make_prd,
652 mvs_94xx_detect_porttype,
653 mvs_94xx_oob_done,
654 mvs_94xx_fix_phy_info,
655 NULL,
656 mvs_94xx_phy_set_link_rate,
657 mvs_hw_max_link_rate,
658 mvs_94xx_phy_disable,
659 mvs_94xx_phy_enable,
660 mvs_94xx_phy_reset,
661 NULL,
662 mvs_94xx_clear_active_cmds,
663 mvs_94xx_spi_read_data,
664 mvs_94xx_spi_write_data,
665 mvs_94xx_spi_buildcmd,
666 mvs_94xx_spi_issuecmd,
667 mvs_94xx_spi_waitdataready,
668#ifndef DISABLE_HOTPLUG_DMA_FIX
669 mvs_94xx_fix_dma,
670#endif
671};
672
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
new file mode 100644
index 00000000000..23ed9b16466
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -0,0 +1,222 @@
1/*
2 * Marvell 88SE94xx hardware specific head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MVS94XX_REG_H_
26#define _MVS94XX_REG_H_
27
28#include <linux/types.h>
29
30#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
31
32enum hw_registers {
33 MVS_GBL_CTL = 0x04, /* global control */
34 MVS_GBL_INT_STAT = 0x00, /* global irq status */
35 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
36
37 MVS_PHY_CTL = 0x40, /* SOC PHY Control */
38 MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */
39
40 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
41
42 MVS_CTL = 0x100, /* SAS/SATA port configuration */
43 MVS_PCS = 0x104, /* SAS/SATA port control/status */
44 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
45 MVS_CMD_LIST_HI = 0x10C,
46 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
47 MVS_RX_FIS_HI = 0x114,
48 MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */
49 MVS_STP_REG_SET_1 = 0x11C,
50 MVS_TX_CFG = 0x120, /* TX configuration */
51 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
52 MVS_TX_HI = 0x128,
53
54 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
55 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
56 MVS_RX_CFG = 0x134, /* RX configuration */
57 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
58 MVS_RX_HI = 0x13C,
59 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
60
61 MVS_INT_COAL = 0x148, /* Int coalescing config */
62 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
63 MVS_INT_STAT = 0x150, /* Central int status */
64 MVS_INT_MASK = 0x154, /* Central int enable */
65 MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */
66 MVS_INT_MASK_SRS_0 = 0x15C,
67 MVS_INT_STAT_SRS_1 = 0x160,
68 MVS_INT_MASK_SRS_1 = 0x164,
69 MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */
70 MVS_NON_NCQ_ERR_1 = 0x16C,
71 MVS_CMD_ADDR = 0x170, /* Command register port (addr) */
72 MVS_CMD_DATA = 0x174, /* Command register port (data) */
73 MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */
74
75 /* ports 1-3 follow after this */
76 MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */
77 MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */
78 /* ports 5-7 follow after this */
79 MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */
80 MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */
81
82 /* ports 1-3 follow after this */
83 MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */
84 /* ports 5-7 follow after this */
85 MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */
86
87 /* ports 1-3 follow after this */
88 MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */
89 MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */
90 /* ports 5-7 follow after this */
91 MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */
92 MVS_P4_CFG_DATA = 0x224, /* Port4 config data */
93
94 /* phys 1-3 follow after this */
95 MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */
96 MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */
97 /* phys 1-3 follow after this */
98 /* multiplexing */
99 MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */
100 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
101 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
102 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
103};
104
105enum pci_cfg_registers {
106 PCR_PHY_CTL = 0x40,
107 PCR_PHY_CTL2 = 0x90,
108 PCR_DEV_CTRL = 0x78,
109 PCR_LINK_STAT = 0x82,
110};
111
112/* SAS/SATA Vendor Specific Port Registers */
113enum sas_sata_vsp_regs {
114 VSR_PHY_STAT = 0x00 * 4, /* Phy Status */
115 VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */
116 VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */
117 VSR_PHY_MODE3 = 0x03 * 4, /* pll */
118 VSR_PHY_MODE4 = 0x04 * 4, /* VCO */
119 VSR_PHY_MODE5 = 0x05 * 4, /* Rx */
120 VSR_PHY_MODE6 = 0x06 * 4, /* CDR */
121 VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */
122 VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */
123 VSR_PHY_MODE9 = 0x09 * 4, /* Test */
124 VSR_PHY_MODE10 = 0x0A * 4, /* Power */
125 VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */
126 VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */
127 VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */
128};
129
130enum chip_register_bits {
131 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
132 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
133 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
134 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
135 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
136};
137
138enum pci_interrupt_cause {
139 /* MAIN_IRQ_CAUSE (R10200) Bits*/
140 IRQ_COM_IN_I2O_IOP0 = (1 << 0),
141 IRQ_COM_IN_I2O_IOP1 = (1 << 1),
142 IRQ_COM_IN_I2O_IOP2 = (1 << 2),
143 IRQ_COM_IN_I2O_IOP3 = (1 << 3),
144 IRQ_COM_OUT_I2O_HOS0 = (1 << 4),
145 IRQ_COM_OUT_I2O_HOS1 = (1 << 5),
146 IRQ_COM_OUT_I2O_HOS2 = (1 << 6),
147 IRQ_COM_OUT_I2O_HOS3 = (1 << 7),
148 IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8),
149 IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9),
150 IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10),
151 IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11),
152 IRQ_PCIF_DRBL0 = (1 << 12),
153 IRQ_PCIF_DRBL1 = (1 << 13),
154 IRQ_PCIF_DRBL2 = (1 << 14),
155 IRQ_PCIF_DRBL3 = (1 << 15),
156 IRQ_XOR_A = (1 << 16),
157 IRQ_XOR_B = (1 << 17),
158 IRQ_SAS_A = (1 << 18),
159 IRQ_SAS_B = (1 << 19),
160 IRQ_CPU_CNTRL = (1 << 20),
161 IRQ_GPIO = (1 << 21),
162 IRQ_UART = (1 << 22),
163 IRQ_SPI = (1 << 23),
164 IRQ_I2C = (1 << 24),
165 IRQ_SGPIO = (1 << 25),
166 IRQ_COM_ERR = (1 << 29),
167 IRQ_I2O_ERR = (1 << 30),
168 IRQ_PCIE_ERR = (1 << 31),
169};
170
171#define MAX_SG_ENTRY 255
172
173struct mvs_prd_imt {
174 __le32 len:22;
175 u8 _r_a:2;
176 u8 misc_ctl:4;
177 u8 inter_sel:4;
178};
179
180struct mvs_prd {
181 /* 64-bit buffer address */
182 __le64 addr;
183 /* 22-bit length */
184 struct mvs_prd_imt im_len;
185} __attribute__ ((packed));
186
187#define SPI_CTRL_REG_94XX 0xc800
188#define SPI_ADDR_REG_94XX 0xc804
189#define SPI_WR_DATA_REG_94XX 0xc808
190#define SPI_RD_DATA_REG_94XX 0xc80c
191#define SPI_CTRL_READ_94XX (1U << 2)
192#define SPI_ADDR_VLD_94XX (1U << 1)
193#define SPI_CTRL_SpiStart_94XX (1U << 0)
194
195#define mv_ffc(x) ffz(x)
196
197static inline int
198mv_ffc64(u64 v)
199{
200 int i;
201 i = mv_ffc((u32)v);
202 if (i >= 0)
203 return i;
204 i = mv_ffc((u32)(v>>32));
205
206 if (i != 0)
207 return 32 + i;
208
209 return -1;
210}
211
212#define r_reg_set_enable(i) \
213 (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
214 mr32(MVS_STP_REG_SET_0))
215
216#define w_reg_set_enable(i, tmp) \
217 (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
218 mw32(MVS_STP_REG_SET_0, tmp))
219
220extern const struct mvs_dispatch mvs_94xx_dispatch;
221#endif
222
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
new file mode 100644
index 00000000000..a67e1c4172f
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -0,0 +1,280 @@
1/*
2 * Marvell 88SE64xx/88SE94xx register IO interface
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
26#ifndef _MV_CHIPS_H_
27#define _MV_CHIPS_H_
28
29#define mr32(reg) readl(regs + reg)
30#define mw32(reg, val) writel((val), regs + reg)
31#define mw32_f(reg, val) do { \
32 mw32(reg, val); \
33 mr32(reg); \
34 } while (0)
35
36#define iow32(reg, val) outl(val, (unsigned long)(regs + reg))
37#define ior32(reg) inl((unsigned long)(regs + reg))
38#define iow16(reg, val) outw((unsigned long)(val, regs + reg))
39#define ior16(reg) inw((unsigned long)(regs + reg))
40#define iow8(reg, val) outb((unsigned long)(val, regs + reg))
41#define ior8(reg) inb((unsigned long)(regs + reg))
42
43static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
44{
45 void __iomem *regs = mvi->regs;
46 mw32(MVS_CMD_ADDR, addr);
47 return mr32(MVS_CMD_DATA);
48}
49
50static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
51{
52 void __iomem *regs = mvi->regs;
53 mw32(MVS_CMD_ADDR, addr);
54 mw32(MVS_CMD_DATA, val);
55}
56
57static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
58{
59 void __iomem *regs = mvi->regs;
60 return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
61 mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
62}
63
64static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
65{
66 void __iomem *regs = mvi->regs;
67 if (port < 4)
68 mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
69 else
70 mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
71}
72
73static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
74 u32 off2, u32 port)
75{
76 void __iomem *regs = mvi->regs + off;
77 void __iomem *regs2 = mvi->regs + off2;
78 return (port < 4) ? readl(regs + port * 8) :
79 readl(regs2 + (port - 4) * 8);
80}
81
82static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
83 u32 port, u32 val)
84{
85 void __iomem *regs = mvi->regs + off;
86 void __iomem *regs2 = mvi->regs + off2;
87 if (port < 4)
88 writel(val, regs + port * 8);
89 else
90 writel(val, regs2 + (port - 4) * 8);
91}
92
93static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
94{
95 return mvs_read_port(mvi, MVS_P0_CFG_DATA,
96 MVS_P4_CFG_DATA, port);
97}
98
99static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
100 u32 port, u32 val)
101{
102 mvs_write_port(mvi, MVS_P0_CFG_DATA,
103 MVS_P4_CFG_DATA, port, val);
104}
105
106static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
107 u32 port, u32 addr)
108{
109 mvs_write_port(mvi, MVS_P0_CFG_ADDR,
110 MVS_P4_CFG_ADDR, port, addr);
111 mdelay(10);
112}
113
114static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
115{
116 return mvs_read_port(mvi, MVS_P0_VSR_DATA,
117 MVS_P4_VSR_DATA, port);
118}
119
120static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
121 u32 port, u32 val)
122{
123 mvs_write_port(mvi, MVS_P0_VSR_DATA,
124 MVS_P4_VSR_DATA, port, val);
125}
126
127static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
128 u32 port, u32 addr)
129{
130 mvs_write_port(mvi, MVS_P0_VSR_ADDR,
131 MVS_P4_VSR_ADDR, port, addr);
132 mdelay(10);
133}
134
135static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
136{
137 return mvs_read_port(mvi, MVS_P0_INT_STAT,
138 MVS_P4_INT_STAT, port);
139}
140
141static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
142 u32 port, u32 val)
143{
144 mvs_write_port(mvi, MVS_P0_INT_STAT,
145 MVS_P4_INT_STAT, port, val);
146}
147
148static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
149{
150 return mvs_read_port(mvi, MVS_P0_INT_MASK,
151 MVS_P4_INT_MASK, port);
152
153}
154
155static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
156 u32 port, u32 val)
157{
158 mvs_write_port(mvi, MVS_P0_INT_MASK,
159 MVS_P4_INT_MASK, port, val);
160}
161
162static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
163{
164 u32 tmp;
165
166 /* workaround for SATA R-ERR, to ignore phy glitch */
167 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
168 tmp &= ~(1 << 9);
169 tmp |= (1 << 10);
170 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
171
172 /* enable retry 127 times */
173 mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
174
175 /* extend open frame timeout to max */
176 tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
177 tmp &= ~0xffff;
178 tmp |= 0x3fff;
179 mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
180
181 /* workaround for WDTIMEOUT , set to 550 ms */
182 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
183
184 /* not to halt for different port op during wideport link change */
185 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
186
187 /* workaround for Seagate disk not-found OOB sequence, recv
188 * COMINIT before sending out COMWAKE */
189 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
190 tmp &= 0x0000ffff;
191 tmp |= 0x00fa0000;
192 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
193
194 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
195 tmp &= 0x1fffffff;
196 tmp |= (2U << 29); /* 8 ms retry */
197 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
198}
199
200static inline void mvs_int_sata(struct mvs_info *mvi)
201{
202 u32 tmp;
203 void __iomem *regs = mvi->regs;
204 tmp = mr32(MVS_INT_STAT_SRS_0);
205 if (tmp)
206 mw32(MVS_INT_STAT_SRS_0, tmp);
207 MVS_CHIP_DISP->clear_active_cmds(mvi);
208}
209
210static inline void mvs_int_full(struct mvs_info *mvi)
211{
212 void __iomem *regs = mvi->regs;
213 u32 tmp, stat;
214 int i;
215
216 stat = mr32(MVS_INT_STAT);
217 mvs_int_rx(mvi, false);
218
219 for (i = 0; i < mvi->chip->n_phy; i++) {
220 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
221 if (tmp)
222 mvs_int_port(mvi, i, tmp);
223 }
224
225 if (stat & CINT_SRS)
226 mvs_int_sata(mvi);
227
228 mw32(MVS_INT_STAT, stat);
229}
230
231static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
232{
233 void __iomem *regs = mvi->regs;
234 mw32(MVS_TX_PROD_IDX, tx);
235}
236
237static inline u32 mvs_rx_update(struct mvs_info *mvi)
238{
239 void __iomem *regs = mvi->regs;
240 return mr32(MVS_RX_CONS_IDX);
241}
242
243static inline u32 mvs_get_prd_size(void)
244{
245 return sizeof(struct mvs_prd);
246}
247
248static inline u32 mvs_get_prd_count(void)
249{
250 return MAX_SG_ENTRY;
251}
252
253static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
254{
255 u16 link_stat, link_spd;
256 const char *spd[] = {
257 "UnKnown",
258 "2.5",
259 "5.0",
260 };
261 if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
262 return;
263
264 pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
265 link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
266 if (link_spd >= 3)
267 link_spd = 0;
268 dev_printk(KERN_INFO, mvi->dev,
269 "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
270 (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
271 spd[link_spd]);
272}
273
274static inline u32 mvs_hw_max_link_rate(void)
275{
276 return MAX_LINK_RATE;
277}
278
279#endif /* _MV_CHIPS_H_ */
280
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
new file mode 100644
index 00000000000..f8cb9defb96
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -0,0 +1,502 @@
1/*
2 * Marvell 88SE64xx/88SE94xx const head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MV_DEFS_H_
26#define _MV_DEFS_H_
27
28
29enum chip_flavors {
30 chip_6320,
31 chip_6440,
32 chip_6485,
33 chip_9480,
34 chip_9180,
35};
36
37/* driver compile-time configuration */
38enum driver_configuration {
39 MVS_SLOTS = 512, /* command slots */
40 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
41 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
42 /* software requires power-of-2
43 ring size */
44 MVS_SOC_SLOTS = 64,
45 MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2,
46 MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2,
47
48 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
49 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
50 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
51 MVS_OAF_SZ = 64, /* Open address frame buffer size */
52 MVS_QUEUE_SIZE = 32, /* Support Queue depth */
53 MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
54 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
55};
56
57/* unchangeable hardware details */
58enum hardware_details {
59 MVS_MAX_PHYS = 8, /* max. possible phys */
60 MVS_MAX_PORTS = 8, /* max. possible ports */
61 MVS_SOC_PHYS = 4, /* soc phys */
62 MVS_SOC_PORTS = 4, /* soc phys */
63 MVS_MAX_DEVICES = 1024, /* max supported device */
64};
65
66/* peripheral registers (BAR2) */
67enum peripheral_registers {
68 SPI_CTL = 0x10, /* EEPROM control */
69 SPI_CMD = 0x14, /* EEPROM command */
70 SPI_DATA = 0x18, /* EEPROM data */
71};
72
73enum peripheral_register_bits {
74 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
75 TWSI_RD = (1U << 4), /* EEPROM read access */
76
77 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
78};
79
80enum hw_register_bits {
81 /* MVS_GBL_CTL */
82 INT_EN = (1U << 1), /* Global int enable */
83 HBA_RST = (1U << 0), /* HBA reset */
84
85 /* MVS_GBL_INT_STAT */
86 INT_XOR = (1U << 4), /* XOR engine event */
87 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
88
89 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
90 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
91 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
92 MODE_AUTO_DET_PORT6 = (1U << 14),
93 MODE_AUTO_DET_PORT5 = (1U << 13),
94 MODE_AUTO_DET_PORT4 = (1U << 12),
95 MODE_AUTO_DET_PORT3 = (1U << 11),
96 MODE_AUTO_DET_PORT2 = (1U << 10),
97 MODE_AUTO_DET_PORT1 = (1U << 9),
98 MODE_AUTO_DET_PORT0 = (1U << 8),
99 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
100 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
101 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
102 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
103 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
104 MODE_SAS_PORT6_MASK = (1U << 6),
105 MODE_SAS_PORT5_MASK = (1U << 5),
106 MODE_SAS_PORT4_MASK = (1U << 4),
107 MODE_SAS_PORT3_MASK = (1U << 3),
108 MODE_SAS_PORT2_MASK = (1U << 2),
109 MODE_SAS_PORT1_MASK = (1U << 1),
110 MODE_SAS_PORT0_MASK = (1U << 0),
111 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
112 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
113 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
114 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
115
116 /* SAS_MODE value may be
117 * dictated (in hw) by values
118 * of SATA_TARGET & AUTO_DET
119 */
120
121 /* MVS_TX_CFG */
122 TX_EN = (1U << 16), /* Enable TX */
123 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
124
125 /* MVS_RX_CFG */
126 RX_EN = (1U << 16), /* Enable RX */
127 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
128
129 /* MVS_INT_COAL */
130 COAL_EN = (1U << 16), /* Enable int coalescing */
131
132 /* MVS_INT_STAT, MVS_INT_MASK */
133 CINT_I2C = (1U << 31), /* I2C event */
134 CINT_SW0 = (1U << 30), /* software event 0 */
135 CINT_SW1 = (1U << 29), /* software event 1 */
136 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
137 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
138 CINT_MEM = (1U << 26), /* int mem parity err */
139 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
140 CINT_SRS = (1U << 3), /* SRS event */
141 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
142 CINT_DONE = (1U << 0), /* cmd completion */
143
144 /* shl for ports 1-3 */
145 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
146 CINT_PORT = (1U << 8), /* port0 event */
147 CINT_PORT_MASK_OFFSET = 8,
148 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
149 CINT_PHY_MASK_OFFSET = 4,
150 CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET),
151
152 /* TX (delivery) ring bits */
153 TXQ_CMD_SHIFT = 29,
154 TXQ_CMD_SSP = 1, /* SSP protocol */
155 TXQ_CMD_SMP = 2, /* SMP protocol */
156 TXQ_CMD_STP = 3, /* STP/SATA protocol */
157 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
158 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
159 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
160 TXQ_MODE_TARGET = 0,
161 TXQ_MODE_INITIATOR = 1,
162 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
163 TXQ_PRI_NORMAL = 0,
164 TXQ_PRI_HIGH = 1,
165 TXQ_SRS_SHIFT = 20, /* SATA register set */
166 TXQ_SRS_MASK = 0x7f,
167 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
168 TXQ_PHY_MASK = 0xff,
169 TXQ_SLOT_MASK = 0xfff, /* slot number */
170
171 /* RX (completion) ring bits */
172 RXQ_GOOD = (1U << 23), /* Response good */
173 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
174 RXQ_CMD_RX = (1U << 20), /* target cmd received */
175 RXQ_ATTN = (1U << 19), /* attention */
176 RXQ_RSP = (1U << 18), /* response frame xfer'd */
177 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
178 RXQ_DONE = (1U << 16), /* cmd complete */
179 RXQ_SLOT_MASK = 0xfff, /* slot number */
180
181 /* mvs_cmd_hdr bits */
182 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
183 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
184
185 /* SSP initiator only */
186 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
187
188 /* SSP initiator or target */
189 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
190
191 /* SSP target only */
192 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
193 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
194 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
195 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
196
197 MCH_SSP_MODE_PASSTHRU = 1,
198 MCH_SSP_MODE_NORMAL = 0,
199 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
200 MCH_FBURST = (1U << 11), /* first burst (SSP) */
201 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
202 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
203 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
204 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
205 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
206 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
207 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
208 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
209
210 CCTL_RST = (1U << 5), /* port logic reset */
211
212 /* 0(LSB first), 1(MSB first) */
213 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
214 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
215 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
216 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
217
218 /* MVS_Px_SER_CTLSTAT (per-phy control) */
219 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
220 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
221 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
222 PHY_RST = (1U << 0), /* phy reset */
223 PHY_READY_MASK = (1U << 20),
224
225 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
226 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
227 PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
228 PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
229 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
230 PHYEV_AN = (1U << 18), /* SATA async notification */
231 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
232 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
233 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
234 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
235 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
236 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
237 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
238 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
239 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
240 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
241 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
242 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
243 PHYEV_ID_DONE = (1U << 2), /* identify done */
244 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
245 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
246
247 /* MVS_PCS */
248 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
249 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
250 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */
251 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
252 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
253 PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */
254 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
255 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
256 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
257 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
258 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
259
260 /* Port n Attached Device Info */
261 PORT_DEV_SSP_TRGT = (1U << 19),
262 PORT_DEV_SMP_TRGT = (1U << 18),
263 PORT_DEV_STP_TRGT = (1U << 17),
264 PORT_DEV_SSP_INIT = (1U << 11),
265 PORT_DEV_SMP_INIT = (1U << 10),
266 PORT_DEV_STP_INIT = (1U << 9),
267 PORT_PHY_ID_MASK = (0xFFU << 24),
268 PORT_SSP_TRGT_MASK = (0x1U << 19),
269 PORT_SSP_INIT_MASK = (0x1U << 11),
270 PORT_DEV_TRGT_MASK = (0x7U << 17),
271 PORT_DEV_INIT_MASK = (0x7U << 9),
272 PORT_DEV_TYPE_MASK = (0x7U << 0),
273
274 /* Port n PHY Status */
275 PHY_RDY = (1U << 2),
276 PHY_DW_SYNC = (1U << 1),
277 PHY_OOB_DTCTD = (1U << 0),
278
279 /* VSR */
280 /* PHYMODE 6 (CDB) */
281 PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
282 PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
283 PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
284 PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
285 PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
286 PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
287 PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
288 PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
289 PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
290 PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
291 PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
292 PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
293 PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
294 PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
295};
296
297/* SAS/SATA configuration port registers, aka phy registers */
298enum sas_sata_config_port_regs {
299 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
300 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
301 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
302 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
303 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
304 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
305 PHYR_SATA_CTL = 0x18, /* SATA control */
306 PHYR_PHY_STAT = 0x1C, /* PHY status */
307 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
308 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
309 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
310 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
311 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
312 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
313 PHYR_WIDE_PORT = 0x38, /* wide port participating */
314 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
315 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
316 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
317 CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */
318 CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */
319 CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */
320 CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */
321 CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */
322 CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */
323 CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */
324 CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */
325 CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */
326 CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */
327 CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */
328 CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */
329 CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */
330 CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */
331};
332
333enum sas_cmd_port_registers {
334 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
335 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
336 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
337 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
338 CMD_OOB_SPACE = 0x110, /* OOB space control register */
339 CMD_OOB_BURST = 0x114, /* OOB burst control register */
340 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
341 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
342 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
343 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
344 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
345 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
346 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
347 CMD_ID_TEST = 0x134, /* ID test register */
348 CMD_PL_TIMER = 0x138, /* PL timer register */
349 CMD_WD_TIMER = 0x13c, /* WD timer register */
350 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
351 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
352 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
353 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
354 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
355 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
356 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
357 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
358 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
359 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
360 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
361 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
362 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
363 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
364 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
365 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
366 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
367 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
368 CMD_RESET_COUNT = 0x188, /* Reset Count */
369 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
370 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
371 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
372 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
373 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
374 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
375 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
376 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
377 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
378 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
379 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
380 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
381 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
382 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
383 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
384};
385
386enum mvs_info_flags {
387 MVF_MSI = (1U << 0), /* MSI is enabled */
388 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
389 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
390};
391
392enum mvs_event_flags {
393 PHY_PLUG_EVENT = (3U),
394 PHY_PLUG_IN = (1U << 0), /* phy plug in */
395 PHY_PLUG_OUT = (1U << 1), /* phy plug out */
396};
397
398enum mvs_port_type {
399 PORT_TGT_MASK = (1U << 5),
400 PORT_INIT_PORT = (1U << 4),
401 PORT_TGT_PORT = (1U << 3),
402 PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
403 PORT_TYPE_SAS = (1U << 1),
404 PORT_TYPE_SATA = (1U << 0),
405};
406
407/* Command Table Format */
408enum ct_format {
409 /* SSP */
410 SSP_F_H = 0x00,
411 SSP_F_IU = 0x18,
412 SSP_F_MAX = 0x4D,
413 /* STP */
414 STP_CMD_FIS = 0x00,
415 STP_ATAPI_CMD = 0x40,
416 STP_F_MAX = 0x10,
417 /* SMP */
418 SMP_F_T = 0x00,
419 SMP_F_DEP = 0x01,
420 SMP_F_MAX = 0x101,
421};
422
423enum status_buffer {
424 SB_EIR_OFF = 0x00, /* Error Information Record */
425 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
426 SB_RFB_MAX = 0x400, /* RFB size*/
427};
428
429enum error_info_rec {
430 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
431 CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
432 RSP_OVER = (1U << 29), /* rsp buffer overflow */
433 RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
434 UNK_FIS = (1U << 27), /* unknown FIS */
435 DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
436 SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
437 TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
438 R_ERR = (1U << 23), /* SATA returned R_ERR prim */
439 RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
440 XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
441 UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
442 DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
443 INTERLOCK = (1U << 15), /* interlock error */
444 NAK = (1U << 14), /* NAK rx'd */
445 ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
446 CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
447 OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
448 PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
449 NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
450 STP_RES_BSY = (1U << 8), /* STP resources busy */
451 BREAK = (1U << 7), /* break received */
452 BAD_DEST = (1U << 6), /* bad destination */
453 BAD_PROTO = (1U << 5), /* protocol not supported */
454 BAD_RATE = (1U << 4), /* cxn rate not supported */
455 WRONG_DEST = (1U << 3), /* wrong destination error */
456 CREDIT_TO = (1U << 2), /* credit timeout */
457 WDOG_TO = (1U << 1), /* watchdog timeout */
458 BUF_PAR = (1U << 0), /* buffer parity error */
459};
460
461enum error_info_rec_2 {
462 SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
463 GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
464 APP_CHK_ERR = (1U << 13), /* Application Check error */
465 REF_CHK_ERR = (1U << 12), /* Reference Check Error */
466 USR_BLK_NM = (1U << 0), /* User Block Number */
467};
468
469enum pci_cfg_register_bits {
470 PCTL_PWR_OFF = (0xFU << 24),
471 PCTL_COM_ON = (0xFU << 20),
472 PCTL_LINK_RST = (0xFU << 16),
473 PCTL_LINK_OFFS = (16),
474 PCTL_PHY_DSBL = (0xFU << 12),
475 PCTL_PHY_DSBL_OFFS = (12),
476 PRD_REQ_SIZE = (0x4000),
477 PRD_REQ_MASK = (0x00007000),
478 PLS_NEG_LINK_WD = (0x3FU << 4),
479 PLS_NEG_LINK_WD_OFFS = 4,
480 PLS_LINK_SPD = (0x0FU << 0),
481 PLS_LINK_SPD_OFFS = 0,
482};
483
484enum open_frame_protocol {
485 PROTOCOL_SMP = 0x0,
486 PROTOCOL_SSP = 0x1,
487 PROTOCOL_STP = 0x2,
488};
489
490/* define for response frame datapres field */
491enum datapres_field {
492 NO_DATA = 0,
493 RESPONSE_DATA = 1,
494 SENSE_DATA = 2,
495};
496
497/* define task management IU */
498struct mvs_tmf_task{
499 u8 tmf;
500 u16 tag_of_task_to_be_managed;
501};
502#endif
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
new file mode 100644
index 00000000000..8646a19f999
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -0,0 +1,703 @@
1/*
2 * Marvell 88SE64xx/88SE94xx pci init
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
26#include "mv_sas.h"
27
28static struct scsi_transport_template *mvs_stt;
29static const struct mvs_chip_info mvs_chips[] = {
30 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
31 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
32 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
33 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
34 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
35};
36
37#define SOC_SAS_NUM 2
38
39static struct scsi_host_template mvs_sht = {
40 .module = THIS_MODULE,
41 .name = DRV_NAME,
42 .queuecommand = sas_queuecommand,
43 .target_alloc = sas_target_alloc,
44 .slave_configure = mvs_slave_configure,
45 .slave_destroy = sas_slave_destroy,
46 .scan_finished = mvs_scan_finished,
47 .scan_start = mvs_scan_start,
48 .change_queue_depth = sas_change_queue_depth,
49 .change_queue_type = sas_change_queue_type,
50 .bios_param = sas_bios_param,
51 .can_queue = 1,
52 .cmd_per_lun = 1,
53 .this_id = -1,
54 .sg_tablesize = SG_ALL,
55 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
56 .use_clustering = ENABLE_CLUSTERING,
57 .eh_device_reset_handler = sas_eh_device_reset_handler,
58 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
59 .slave_alloc = mvs_slave_alloc,
60 .target_destroy = sas_target_destroy,
61 .ioctl = sas_ioctl,
62};
63
64static struct sas_domain_function_template mvs_transport_ops = {
65 .lldd_dev_found = mvs_dev_found,
66 .lldd_dev_gone = mvs_dev_gone,
67
68 .lldd_execute_task = mvs_queue_command,
69 .lldd_control_phy = mvs_phy_control,
70
71 .lldd_abort_task = mvs_abort_task,
72 .lldd_abort_task_set = mvs_abort_task_set,
73 .lldd_clear_aca = mvs_clear_aca,
74 .lldd_clear_task_set = mvs_clear_task_set,
75 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
76 .lldd_lu_reset = mvs_lu_reset,
77 .lldd_query_task = mvs_query_task,
78
79 .lldd_port_formed = mvs_port_formed,
80 .lldd_port_deformed = mvs_port_deformed,
81
82};
83
84static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
85{
86 struct mvs_phy *phy = &mvi->phy[phy_id];
87 struct asd_sas_phy *sas_phy = &phy->sas_phy;
88
89 phy->mvi = mvi;
90 init_timer(&phy->timer);
91 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
92 sas_phy->class = SAS;
93 sas_phy->iproto = SAS_PROTOCOL_ALL;
94 sas_phy->tproto = 0;
95 sas_phy->type = PHY_TYPE_PHYSICAL;
96 sas_phy->role = PHY_ROLE_INITIATOR;
97 sas_phy->oob_mode = OOB_NOT_CONNECTED;
98 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
99
100 sas_phy->id = phy_id;
101 sas_phy->sas_addr = &mvi->sas_addr[0];
102 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
103 sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
104 sas_phy->lldd_phy = phy;
105}
106
107static void mvs_free(struct mvs_info *mvi)
108{
109 int i;
110 struct mvs_wq *mwq;
111 int slot_nr;
112
113 if (!mvi)
114 return;
115
116 if (mvi->flags & MVF_FLAG_SOC)
117 slot_nr = MVS_SOC_SLOTS;
118 else
119 slot_nr = MVS_SLOTS;
120
121 for (i = 0; i < mvi->tags_num; i++) {
122 struct mvs_slot_info *slot = &mvi->slot_info[i];
123 if (slot->buf)
124 dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
125 slot->buf, slot->buf_dma);
126 }
127
128 if (mvi->tx)
129 dma_free_coherent(mvi->dev,
130 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
131 mvi->tx, mvi->tx_dma);
132 if (mvi->rx_fis)
133 dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
134 mvi->rx_fis, mvi->rx_fis_dma);
135 if (mvi->rx)
136 dma_free_coherent(mvi->dev,
137 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
138 mvi->rx, mvi->rx_dma);
139 if (mvi->slot)
140 dma_free_coherent(mvi->dev,
141 sizeof(*mvi->slot) * slot_nr,
142 mvi->slot, mvi->slot_dma);
143#ifndef DISABLE_HOTPLUG_DMA_FIX
144 if (mvi->bulk_buffer)
145 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
146 mvi->bulk_buffer, mvi->bulk_buffer_dma);
147#endif
148
149 MVS_CHIP_DISP->chip_iounmap(mvi);
150 if (mvi->shost)
151 scsi_host_put(mvi->shost);
152 list_for_each_entry(mwq, &mvi->wq_list, entry)
153 cancel_delayed_work(&mwq->work_q);
154 kfree(mvi);
155}
156
157#ifdef MVS_USE_TASKLET
158struct tasklet_struct mv_tasklet;
159static void mvs_tasklet(unsigned long opaque)
160{
161 unsigned long flags;
162 u32 stat;
163 u16 core_nr, i = 0;
164
165 struct mvs_info *mvi;
166 struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
167
168 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
169 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
170
171 if (unlikely(!mvi))
172 BUG_ON(1);
173
174 for (i = 0; i < core_nr; i++) {
175 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
176 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
177 if (stat)
178 MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
179 }
180
181}
182#endif
183
184static irqreturn_t mvs_interrupt(int irq, void *opaque)
185{
186 u32 core_nr, i = 0;
187 u32 stat;
188 struct mvs_info *mvi;
189 struct sas_ha_struct *sha = opaque;
190
191 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
192 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
193
194 if (unlikely(!mvi))
195 return IRQ_NONE;
196
197 stat = MVS_CHIP_DISP->isr_status(mvi, irq);
198 if (!stat)
199 return IRQ_NONE;
200
201#ifdef MVS_USE_TASKLET
202 tasklet_schedule(&mv_tasklet);
203#else
204 for (i = 0; i < core_nr; i++) {
205 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
206 MVS_CHIP_DISP->isr(mvi, irq, stat);
207 }
208#endif
209 return IRQ_HANDLED;
210}
211
212static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
213{
214 int i, slot_nr;
215
216 if (mvi->flags & MVF_FLAG_SOC)
217 slot_nr = MVS_SOC_SLOTS;
218 else
219 slot_nr = MVS_SLOTS;
220
221 spin_lock_init(&mvi->lock);
222 for (i = 0; i < mvi->chip->n_phy; i++) {
223 mvs_phy_init(mvi, i);
224 mvi->port[i].wide_port_phymap = 0;
225 mvi->port[i].port_attached = 0;
226 INIT_LIST_HEAD(&mvi->port[i].list);
227 }
228 for (i = 0; i < MVS_MAX_DEVICES; i++) {
229 mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
230 mvi->devices[i].dev_type = NO_DEVICE;
231 mvi->devices[i].device_id = i;
232 mvi->devices[i].dev_status = MVS_DEV_NORMAL;
233 }
234
235 /*
236 * alloc and init our DMA areas
237 */
238 mvi->tx = dma_alloc_coherent(mvi->dev,
239 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
240 &mvi->tx_dma, GFP_KERNEL);
241 if (!mvi->tx)
242 goto err_out;
243 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
244 mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
245 &mvi->rx_fis_dma, GFP_KERNEL);
246 if (!mvi->rx_fis)
247 goto err_out;
248 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
249
250 mvi->rx = dma_alloc_coherent(mvi->dev,
251 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
252 &mvi->rx_dma, GFP_KERNEL);
253 if (!mvi->rx)
254 goto err_out;
255 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
256 mvi->rx[0] = cpu_to_le32(0xfff);
257 mvi->rx_cons = 0xfff;
258
259 mvi->slot = dma_alloc_coherent(mvi->dev,
260 sizeof(*mvi->slot) * slot_nr,
261 &mvi->slot_dma, GFP_KERNEL);
262 if (!mvi->slot)
263 goto err_out;
264 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
265
266#ifndef DISABLE_HOTPLUG_DMA_FIX
267 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
268 TRASH_BUCKET_SIZE,
269 &mvi->bulk_buffer_dma, GFP_KERNEL);
270 if (!mvi->bulk_buffer)
271 goto err_out;
272#endif
273 for (i = 0; i < slot_nr; i++) {
274 struct mvs_slot_info *slot = &mvi->slot_info[i];
275
276 slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
277 &slot->buf_dma, GFP_KERNEL);
278 if (!slot->buf) {
279 printk(KERN_DEBUG"failed to allocate slot->buf.\n");
280 goto err_out;
281 }
282 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
283 ++mvi->tags_num;
284 }
285 /* Initialize tags */
286 mvs_tag_init(mvi);
287 return 0;
288err_out:
289 return 1;
290}
291
292
293int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
294{
295 unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
296 struct pci_dev *pdev = mvi->pdev;
297 if (bar_ex != -1) {
298 /*
299 * ioremap main and peripheral registers
300 */
301 res_start = pci_resource_start(pdev, bar_ex);
302 res_len = pci_resource_len(pdev, bar_ex);
303 if (!res_start || !res_len)
304 goto err_out;
305
306 res_flag_ex = pci_resource_flags(pdev, bar_ex);
307 if (res_flag_ex & IORESOURCE_MEM) {
308 if (res_flag_ex & IORESOURCE_CACHEABLE)
309 mvi->regs_ex = ioremap(res_start, res_len);
310 else
311 mvi->regs_ex = ioremap_nocache(res_start,
312 res_len);
313 } else
314 mvi->regs_ex = (void *)res_start;
315 if (!mvi->regs_ex)
316 goto err_out;
317 }
318
319 res_start = pci_resource_start(pdev, bar);
320 res_len = pci_resource_len(pdev, bar);
321 if (!res_start || !res_len)
322 goto err_out;
323
324 res_flag = pci_resource_flags(pdev, bar);
325 if (res_flag & IORESOURCE_CACHEABLE)
326 mvi->regs = ioremap(res_start, res_len);
327 else
328 mvi->regs = ioremap_nocache(res_start, res_len);
329
330 if (!mvi->regs) {
331 if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
332 iounmap(mvi->regs_ex);
333 mvi->regs_ex = NULL;
334 goto err_out;
335 }
336
337 return 0;
338err_out:
339 return -1;
340}
341
342void mvs_iounmap(void __iomem *regs)
343{
344 iounmap(regs);
345}
346
347static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
348 const struct pci_device_id *ent,
349 struct Scsi_Host *shost, unsigned int id)
350{
351 struct mvs_info *mvi;
352 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
353
354 mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
355 GFP_KERNEL);
356 if (!mvi)
357 return NULL;
358
359 mvi->pdev = pdev;
360 mvi->dev = &pdev->dev;
361 mvi->chip_id = ent->driver_data;
362 mvi->chip = &mvs_chips[mvi->chip_id];
363 INIT_LIST_HEAD(&mvi->wq_list);
364 mvi->irq = pdev->irq;
365
366 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
367 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
368
369 mvi->id = id;
370 mvi->sas = sha;
371 mvi->shost = shost;
372#ifdef MVS_USE_TASKLET
373 tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
374#endif
375
376 if (MVS_CHIP_DISP->chip_ioremap(mvi))
377 goto err_out;
378 if (!mvs_alloc(mvi, shost))
379 return mvi;
380err_out:
381 mvs_free(mvi);
382 return NULL;
383}
384
385/* move to PCI layer or libata core? */
386static int pci_go_64(struct pci_dev *pdev)
387{
388 int rc;
389
390 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
391 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
392 if (rc) {
393 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
394 if (rc) {
395 dev_printk(KERN_ERR, &pdev->dev,
396 "64-bit DMA enable failed\n");
397 return rc;
398 }
399 }
400 } else {
401 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
402 if (rc) {
403 dev_printk(KERN_ERR, &pdev->dev,
404 "32-bit DMA enable failed\n");
405 return rc;
406 }
407 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
408 if (rc) {
409 dev_printk(KERN_ERR, &pdev->dev,
410 "32-bit consistent DMA enable failed\n");
411 return rc;
412 }
413 }
414
415 return rc;
416}
417
418static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
419 const struct mvs_chip_info *chip_info)
420{
421 int phy_nr, port_nr; unsigned short core_nr;
422 struct asd_sas_phy **arr_phy;
423 struct asd_sas_port **arr_port;
424 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
425
426 core_nr = chip_info->n_host;
427 phy_nr = core_nr * chip_info->n_phy;
428 port_nr = phy_nr;
429
430 memset(sha, 0x00, sizeof(struct sas_ha_struct));
431 arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
432 arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
433 if (!arr_phy || !arr_port)
434 goto exit_free;
435
436 sha->sas_phy = arr_phy;
437 sha->sas_port = arr_port;
438
439 sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
440 if (!sha->lldd_ha)
441 goto exit_free;
442
443 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
444
445 shost->transportt = mvs_stt;
446 shost->max_id = 128;
447 shost->max_lun = ~0;
448 shost->max_channel = 1;
449 shost->max_cmd_len = 16;
450
451 return 0;
452exit_free:
453 kfree(arr_phy);
454 kfree(arr_port);
455 return -1;
456
457}
458
459static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
460 const struct mvs_chip_info *chip_info)
461{
462 int can_queue, i = 0, j = 0;
463 struct mvs_info *mvi = NULL;
464 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
465 unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
466
467 for (j = 0; j < nr_core; j++) {
468 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
469 for (i = 0; i < chip_info->n_phy; i++) {
470 sha->sas_phy[j * chip_info->n_phy + i] =
471 &mvi->phy[i].sas_phy;
472 sha->sas_port[j * chip_info->n_phy + i] =
473 &mvi->port[i].sas_port;
474 }
475 }
476
477 sha->sas_ha_name = DRV_NAME;
478 sha->dev = mvi->dev;
479 sha->lldd_module = THIS_MODULE;
480 sha->sas_addr = &mvi->sas_addr[0];
481
482 sha->num_phys = nr_core * chip_info->n_phy;
483
484 sha->lldd_max_execute_num = 1;
485
486 if (mvi->flags & MVF_FLAG_SOC)
487 can_queue = MVS_SOC_CAN_QUEUE;
488 else
489 can_queue = MVS_CAN_QUEUE;
490
491 sha->lldd_queue_size = can_queue;
492 shost->can_queue = can_queue;
493 mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
494 sha->core.shost = mvi->shost;
495}
496
497static void mvs_init_sas_add(struct mvs_info *mvi)
498{
499 u8 i;
500 for (i = 0; i < mvi->chip->n_phy; i++) {
501 mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
502 mvi->phy[i].dev_sas_addr =
503 cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
504 }
505
506 memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
507}
508
509static int __devinit mvs_pci_init(struct pci_dev *pdev,
510 const struct pci_device_id *ent)
511{
512 unsigned int rc, nhost = 0;
513 struct mvs_info *mvi;
514 irq_handler_t irq_handler = mvs_interrupt;
515 struct Scsi_Host *shost = NULL;
516 const struct mvs_chip_info *chip;
517
518 dev_printk(KERN_INFO, &pdev->dev,
519 "mvsas: driver version %s\n", DRV_VERSION);
520 rc = pci_enable_device(pdev);
521 if (rc)
522 goto err_out_enable;
523
524 pci_set_master(pdev);
525
526 rc = pci_request_regions(pdev, DRV_NAME);
527 if (rc)
528 goto err_out_disable;
529
530 rc = pci_go_64(pdev);
531 if (rc)
532 goto err_out_regions;
533
534 shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
535 if (!shost) {
536 rc = -ENOMEM;
537 goto err_out_regions;
538 }
539
540 chip = &mvs_chips[ent->driver_data];
541 SHOST_TO_SAS_HA(shost) =
542 kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
543 if (!SHOST_TO_SAS_HA(shost)) {
544 kfree(shost);
545 rc = -ENOMEM;
546 goto err_out_regions;
547 }
548
549 rc = mvs_prep_sas_ha_init(shost, chip);
550 if (rc) {
551 kfree(shost);
552 rc = -ENOMEM;
553 goto err_out_regions;
554 }
555
556 pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
557
558 do {
559 mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
560 if (!mvi) {
561 rc = -ENOMEM;
562 goto err_out_regions;
563 }
564
565 mvs_init_sas_add(mvi);
566
567 mvi->instance = nhost;
568 rc = MVS_CHIP_DISP->chip_init(mvi);
569 if (rc) {
570 mvs_free(mvi);
571 goto err_out_regions;
572 }
573 nhost++;
574 } while (nhost < chip->n_host);
575
576 mvs_post_sas_ha_init(shost, chip);
577
578 rc = scsi_add_host(shost, &pdev->dev);
579 if (rc)
580 goto err_out_shost;
581
582 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
583 if (rc)
584 goto err_out_shost;
585 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
586 DRV_NAME, SHOST_TO_SAS_HA(shost));
587 if (rc)
588 goto err_not_sas;
589
590 MVS_CHIP_DISP->interrupt_enable(mvi);
591
592 scsi_scan_host(mvi->shost);
593
594 return 0;
595
596err_not_sas:
597 sas_unregister_ha(SHOST_TO_SAS_HA(shost));
598err_out_shost:
599 scsi_remove_host(mvi->shost);
600err_out_regions:
601 pci_release_regions(pdev);
602err_out_disable:
603 pci_disable_device(pdev);
604err_out_enable:
605 return rc;
606}
607
608static void __devexit mvs_pci_remove(struct pci_dev *pdev)
609{
610 unsigned short core_nr, i = 0;
611 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
612 struct mvs_info *mvi = NULL;
613
614 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
615 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
616
617#ifdef MVS_USE_TASKLET
618 tasklet_kill(&mv_tasklet);
619#endif
620
621 pci_set_drvdata(pdev, NULL);
622 sas_unregister_ha(sha);
623 sas_remove_host(mvi->shost);
624 scsi_remove_host(mvi->shost);
625
626 MVS_CHIP_DISP->interrupt_disable(mvi);
627 free_irq(mvi->irq, sha);
628 for (i = 0; i < core_nr; i++) {
629 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
630 mvs_free(mvi);
631 }
632 kfree(sha->sas_phy);
633 kfree(sha->sas_port);
634 kfree(sha);
635 pci_release_regions(pdev);
636 pci_disable_device(pdev);
637 return;
638}
639
640static struct pci_device_id __devinitdata mvs_pci_table[] = {
641 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
642 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
643 {
644 .vendor = PCI_VENDOR_ID_MARVELL,
645 .device = 0x6440,
646 .subvendor = PCI_ANY_ID,
647 .subdevice = 0x6480,
648 .class = 0,
649 .class_mask = 0,
650 .driver_data = chip_6485,
651 },
652 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
653 { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
654 { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
655 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
656
657 { } /* terminate list */
658};
659
660static struct pci_driver mvs_pci_driver = {
661 .name = DRV_NAME,
662 .id_table = mvs_pci_table,
663 .probe = mvs_pci_init,
664 .remove = __devexit_p(mvs_pci_remove),
665};
666
667/* task handler */
668struct task_struct *mvs_th;
669static int __init mvs_init(void)
670{
671 int rc;
672 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
673 if (!mvs_stt)
674 return -ENOMEM;
675
676 rc = pci_register_driver(&mvs_pci_driver);
677
678 if (rc)
679 goto err_out;
680
681 return 0;
682
683err_out:
684 sas_release_transport(mvs_stt);
685 return rc;
686}
687
688static void __exit mvs_exit(void)
689{
690 pci_unregister_driver(&mvs_pci_driver);
691 sas_release_transport(mvs_stt);
692}
693
694module_init(mvs_init);
695module_exit(mvs_exit);
696
697MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
698MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
699MODULE_VERSION(DRV_VERSION);
700MODULE_LICENSE("GPL");
701#ifdef CONFIG_PCI
702MODULE_DEVICE_TABLE(pci, mvs_pci_table);
703#endif
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
new file mode 100644
index 00000000000..0d213864121
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -0,0 +1,2154 @@
1/*
2 * Marvell 88SE64xx/88SE94xx main function
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#include "mv_sas.h"
26
27static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
28{
29 if (task->lldd_task) {
30 struct mvs_slot_info *slot;
31 slot = task->lldd_task;
32 *tag = slot->slot_tag;
33 return 1;
34 }
35 return 0;
36}
37
38void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
39{
40 void *bitmap = &mvi->tags;
41 clear_bit(tag, bitmap);
42}
43
44void mvs_tag_free(struct mvs_info *mvi, u32 tag)
45{
46 mvs_tag_clear(mvi, tag);
47}
48
49void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
50{
51 void *bitmap = &mvi->tags;
52 set_bit(tag, bitmap);
53}
54
55inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
56{
57 unsigned int index, tag;
58 void *bitmap = &mvi->tags;
59
60 index = find_first_zero_bit(bitmap, mvi->tags_num);
61 tag = index;
62 if (tag >= mvi->tags_num)
63 return -SAS_QUEUE_FULL;
64 mvs_tag_set(mvi, tag);
65 *tag_out = tag;
66 return 0;
67}
68
69void mvs_tag_init(struct mvs_info *mvi)
70{
71 int i;
72 for (i = 0; i < mvi->tags_num; ++i)
73 mvs_tag_clear(mvi, i);
74}
75
76void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
77{
78 u32 i;
79 u32 run;
80 u32 offset;
81
82 offset = 0;
83 while (size) {
84 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
85 if (size >= 16)
86 run = 16;
87 else
88 run = size;
89 size -= run;
90 for (i = 0; i < 16; i++) {
91 if (i < run)
92 printk(KERN_DEBUG"%02X ", (u32)data[i]);
93 else
94 printk(KERN_DEBUG" ");
95 }
96 printk(KERN_DEBUG": ");
97 for (i = 0; i < run; i++)
98 printk(KERN_DEBUG"%c",
99 isalnum(data[i]) ? data[i] : '.');
100 printk(KERN_DEBUG"\n");
101 data = &data[16];
102 offset += run;
103 }
104 printk(KERN_DEBUG"\n");
105}
106
107#if (_MV_DUMP > 1)
108static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
109 enum sas_protocol proto)
110{
111 u32 offset;
112 struct mvs_slot_info *slot = &mvi->slot_info[tag];
113
114 offset = slot->cmd_size + MVS_OAF_SZ +
115 MVS_CHIP_DISP->prd_size() * slot->n_elem;
116 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
117 tag);
118 mvs_hexdump(32, (u8 *) slot->response,
119 (u32) slot->buf_dma + offset);
120}
121#endif
122
123static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
124 enum sas_protocol proto)
125{
126#if (_MV_DUMP > 1)
127 u32 sz, w_ptr;
128 u64 addr;
129 struct mvs_slot_info *slot = &mvi->slot_info[tag];
130
131 /*Delivery Queue */
132 sz = MVS_CHIP_SLOT_SZ;
133 w_ptr = slot->tx;
134 addr = mvi->tx_dma;
135 dev_printk(KERN_DEBUG, mvi->dev,
136 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
137 dev_printk(KERN_DEBUG, mvi->dev,
138 "Delivery Queue Base Address=0x%llX (PA)"
139 "(tx_dma=0x%llX), Entry=%04d\n",
140 addr, (unsigned long long)mvi->tx_dma, w_ptr);
141 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
142 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
143 /*Command List */
144 addr = mvi->slot_dma;
145 dev_printk(KERN_DEBUG, mvi->dev,
146 "Command List Base Address=0x%llX (PA)"
147 "(slot_dma=0x%llX), Header=%03d\n",
148 addr, (unsigned long long)slot->buf_dma, tag);
149 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
150 /*mvs_cmd_hdr */
151 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
152 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
153 /*1.command table area */
154 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
155 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
156 /*2.open address frame area */
157 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
158 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
159 (u32) slot->buf_dma + slot->cmd_size);
160 /*3.status buffer */
161 mvs_hba_sb_dump(mvi, tag, proto);
162 /*4.PRD table */
163 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
164 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
165 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
166 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
167#endif
168}
169
170static void mvs_hba_cq_dump(struct mvs_info *mvi)
171{
172#if (_MV_DUMP > 2)
173 u64 addr;
174 void __iomem *regs = mvi->regs;
175 u32 entry = mvi->rx_cons + 1;
176 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
177
178 /*Completion Queue */
179 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
180 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
181 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
182 dev_printk(KERN_DEBUG, mvi->dev,
183 "Completion List Base Address=0x%llX (PA), "
184 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
185 addr, entry - 1, mvi->rx[0]);
186 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
187 mvi->rx_dma + sizeof(u32) * entry);
188#endif
189}
190
191void mvs_get_sas_addr(void *buf, u32 buflen)
192{
193 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
194}
195
196struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
197{
198 unsigned long i = 0, j = 0, hi = 0;
199 struct sas_ha_struct *sha = dev->port->ha;
200 struct mvs_info *mvi = NULL;
201 struct asd_sas_phy *phy;
202
203 while (sha->sas_port[i]) {
204 if (sha->sas_port[i] == dev->port) {
205 phy = container_of(sha->sas_port[i]->phy_list.next,
206 struct asd_sas_phy, port_phy_el);
207 j = 0;
208 while (sha->sas_phy[j]) {
209 if (sha->sas_phy[j] == phy)
210 break;
211 j++;
212 }
213 break;
214 }
215 i++;
216 }
217 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
218 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
219
220 return mvi;
221
222}
223
224/* FIXME */
225int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
226{
227 unsigned long i = 0, j = 0, n = 0, num = 0;
228 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
229 struct mvs_info *mvi = mvi_dev->mvi_info;
230 struct sas_ha_struct *sha = dev->port->ha;
231
232 while (sha->sas_port[i]) {
233 if (sha->sas_port[i] == dev->port) {
234 struct asd_sas_phy *phy;
235 list_for_each_entry(phy,
236 &sha->sas_port[i]->phy_list, port_phy_el) {
237 j = 0;
238 while (sha->sas_phy[j]) {
239 if (sha->sas_phy[j] == phy)
240 break;
241 j++;
242 }
243 phyno[n] = (j >= mvi->chip->n_phy) ?
244 (j - mvi->chip->n_phy) : j;
245 num++;
246 n++;
247 }
248 break;
249 }
250 i++;
251 }
252 return num;
253}
254
255static inline void mvs_free_reg_set(struct mvs_info *mvi,
256 struct mvs_device *dev)
257{
258 if (!dev) {
259 mv_printk("device has been free.\n");
260 return;
261 }
262 if (dev->runing_req != 0)
263 return;
264 if (dev->taskfileset == MVS_ID_NOT_MAPPED)
265 return;
266 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
267}
268
269static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
270 struct mvs_device *dev)
271{
272 if (dev->taskfileset != MVS_ID_NOT_MAPPED)
273 return 0;
274 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
275}
276
277void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
278{
279 u32 no;
280 for_each_phy(phy_mask, phy_mask, no) {
281 if (!(phy_mask & 1))
282 continue;
283 MVS_CHIP_DISP->phy_reset(mvi, no, hard);
284 }
285}
286
287/* FIXME: locking? */
288int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
289 void *funcdata)
290{
291 int rc = 0, phy_id = sas_phy->id;
292 u32 tmp, i = 0, hi;
293 struct sas_ha_struct *sha = sas_phy->ha;
294 struct mvs_info *mvi = NULL;
295
296 while (sha->sas_phy[i]) {
297 if (sha->sas_phy[i] == sas_phy)
298 break;
299 i++;
300 }
301 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
302 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
303
304 switch (func) {
305 case PHY_FUNC_SET_LINK_RATE:
306 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
307 break;
308
309 case PHY_FUNC_HARD_RESET:
310 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
311 if (tmp & PHY_RST_HARD)
312 break;
313 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
314 break;
315
316 case PHY_FUNC_LINK_RESET:
317 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
318 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
319 break;
320
321 case PHY_FUNC_DISABLE:
322 MVS_CHIP_DISP->phy_disable(mvi, phy_id);
323 break;
324 case PHY_FUNC_RELEASE_SPINUP_HOLD:
325 default:
326 rc = -EOPNOTSUPP;
327 }
328 msleep(200);
329 return rc;
330}
331
332void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
333 u32 off_lo, u32 off_hi, u64 sas_addr)
334{
335 u32 lo = (u32)sas_addr;
336 u32 hi = (u32)(sas_addr>>32);
337
338 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
339 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
340 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
341 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
342}
343
344static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
345{
346 struct mvs_phy *phy = &mvi->phy[i];
347 struct asd_sas_phy *sas_phy = &phy->sas_phy;
348 struct sas_ha_struct *sas_ha;
349 if (!phy->phy_attached)
350 return;
351
352 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
353 && phy->phy_type & PORT_TYPE_SAS) {
354 return;
355 }
356
357 sas_ha = mvi->sas;
358 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
359
360 if (sas_phy->phy) {
361 struct sas_phy *sphy = sas_phy->phy;
362
363 sphy->negotiated_linkrate = sas_phy->linkrate;
364 sphy->minimum_linkrate = phy->minimum_linkrate;
365 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
366 sphy->maximum_linkrate = phy->maximum_linkrate;
367 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
368 }
369
370 if (phy->phy_type & PORT_TYPE_SAS) {
371 struct sas_identify_frame *id;
372
373 id = (struct sas_identify_frame *)phy->frame_rcvd;
374 id->dev_type = phy->identify.device_type;
375 id->initiator_bits = SAS_PROTOCOL_ALL;
376 id->target_bits = phy->identify.target_port_protocols;
377 } else if (phy->phy_type & PORT_TYPE_SATA) {
378 /*Nothing*/
379 }
380 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
381
382 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
383
384 mvi->sas->notify_port_event(sas_phy,
385 PORTE_BYTES_DMAED);
386}
387
388int mvs_slave_alloc(struct scsi_device *scsi_dev)
389{
390 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
391 if (dev_is_sata(dev)) {
392 /* We don't need to rescan targets
393 * if REPORT_LUNS request is failed
394 */
395 if (scsi_dev->lun > 0)
396 return -ENXIO;
397 scsi_dev->tagged_supported = 1;
398 }
399
400 return sas_slave_alloc(scsi_dev);
401}
402
403int mvs_slave_configure(struct scsi_device *sdev)
404{
405 struct domain_device *dev = sdev_to_domain_dev(sdev);
406 int ret = sas_slave_configure(sdev);
407
408 if (ret)
409 return ret;
410 if (dev_is_sata(dev)) {
411 /* may set PIO mode */
412 #if MV_DISABLE_NCQ
413 struct ata_port *ap = dev->sata_dev.ap;
414 struct ata_device *adev = ap->link.device;
415 adev->flags |= ATA_DFLAG_NCQ_OFF;
416 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
417 #endif
418 }
419 return 0;
420}
421
422void mvs_scan_start(struct Scsi_Host *shost)
423{
424 int i, j;
425 unsigned short core_nr;
426 struct mvs_info *mvi;
427 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
428
429 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
430
431 for (j = 0; j < core_nr; j++) {
432 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
433 for (i = 0; i < mvi->chip->n_phy; ++i)
434 mvs_bytes_dmaed(mvi, i);
435 }
436}
437
438int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
439{
440 /* give the phy enabling interrupt event time to come in (1s
441 * is empirically about all it takes) */
442 if (time < HZ)
443 return 0;
444 /* Wait for discovery to finish */
445 scsi_flush_work(shost);
446 return 1;
447}
448
449static int mvs_task_prep_smp(struct mvs_info *mvi,
450 struct mvs_task_exec_info *tei)
451{
452 int elem, rc, i;
453 struct sas_task *task = tei->task;
454 struct mvs_cmd_hdr *hdr = tei->hdr;
455 struct domain_device *dev = task->dev;
456 struct asd_sas_port *sas_port = dev->port;
457 struct scatterlist *sg_req, *sg_resp;
458 u32 req_len, resp_len, tag = tei->tag;
459 void *buf_tmp;
460 u8 *buf_oaf;
461 dma_addr_t buf_tmp_dma;
462 void *buf_prd;
463 struct mvs_slot_info *slot = &mvi->slot_info[tag];
464 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
465#if _MV_DUMP
466 u8 *buf_cmd;
467 void *from;
468#endif
469 /*
470 * DMA-map SMP request, response buffers
471 */
472 sg_req = &task->smp_task.smp_req;
473 elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
474 if (!elem)
475 return -ENOMEM;
476 req_len = sg_dma_len(sg_req);
477
478 sg_resp = &task->smp_task.smp_resp;
479 elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
480 if (!elem) {
481 rc = -ENOMEM;
482 goto err_out;
483 }
484 resp_len = SB_RFB_MAX;
485
486 /* must be in dwords */
487 if ((req_len & 0x3) || (resp_len & 0x3)) {
488 rc = -EINVAL;
489 goto err_out_2;
490 }
491
492 /*
493 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
494 */
495
496 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
497 buf_tmp = slot->buf;
498 buf_tmp_dma = slot->buf_dma;
499
500#if _MV_DUMP
501 buf_cmd = buf_tmp;
502 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
503 buf_tmp += req_len;
504 buf_tmp_dma += req_len;
505 slot->cmd_size = req_len;
506#else
507 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
508#endif
509
510 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
511 buf_oaf = buf_tmp;
512 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
513
514 buf_tmp += MVS_OAF_SZ;
515 buf_tmp_dma += MVS_OAF_SZ;
516
517 /* region 3: PRD table *********************************** */
518 buf_prd = buf_tmp;
519 if (tei->n_elem)
520 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
521 else
522 hdr->prd_tbl = 0;
523
524 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
525 buf_tmp += i;
526 buf_tmp_dma += i;
527
528 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
529 slot->response = buf_tmp;
530 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
531 if (mvi->flags & MVF_FLAG_SOC)
532 hdr->reserved[0] = 0;
533
534 /*
535 * Fill in TX ring and command slot header
536 */
537 slot->tx = mvi->tx_prod;
538 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
539 TXQ_MODE_I | tag |
540 (sas_port->phy_mask << TXQ_PHY_SHIFT));
541
542 hdr->flags |= flags;
543 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
544 hdr->tags = cpu_to_le32(tag);
545 hdr->data_len = 0;
546
547 /* generate open address frame hdr (first 12 bytes) */
548 /* initiator, SMP, ftype 1h */
549 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
550 buf_oaf[1] = dev->linkrate & 0xf;
551 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
552 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
553
554 /* fill in PRD (scatter/gather) table, if any */
555 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
556
557#if _MV_DUMP
558 /* copy cmd table */
559 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
560 memcpy(buf_cmd, from + sg_req->offset, req_len);
561 kunmap_atomic(from, KM_IRQ0);
562#endif
563 return 0;
564
565err_out_2:
566 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
567 PCI_DMA_FROMDEVICE);
568err_out:
569 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
570 PCI_DMA_TODEVICE);
571 return rc;
572}
573
574static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
575{
576 struct ata_queued_cmd *qc = task->uldd_task;
577
578 if (qc) {
579 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
580 qc->tf.command == ATA_CMD_FPDMA_READ) {
581 *tag = qc->tag;
582 return 1;
583 }
584 }
585
586 return 0;
587}
588
589static int mvs_task_prep_ata(struct mvs_info *mvi,
590 struct mvs_task_exec_info *tei)
591{
592 struct sas_task *task = tei->task;
593 struct domain_device *dev = task->dev;
594 struct mvs_device *mvi_dev = dev->lldd_dev;
595 struct mvs_cmd_hdr *hdr = tei->hdr;
596 struct asd_sas_port *sas_port = dev->port;
597 struct mvs_slot_info *slot;
598 void *buf_prd;
599 u32 tag = tei->tag, hdr_tag;
600 u32 flags, del_q;
601 void *buf_tmp;
602 u8 *buf_cmd, *buf_oaf;
603 dma_addr_t buf_tmp_dma;
604 u32 i, req_len, resp_len;
605 const u32 max_resp_len = SB_RFB_MAX;
606
607 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
608 mv_dprintk("Have not enough regiset for dev %d.\n",
609 mvi_dev->device_id);
610 return -EBUSY;
611 }
612 slot = &mvi->slot_info[tag];
613 slot->tx = mvi->tx_prod;
614 del_q = TXQ_MODE_I | tag |
615 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
616 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
617 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
618 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
619
620#ifndef DISABLE_HOTPLUG_DMA_FIX
621 if (task->data_dir == DMA_FROM_DEVICE)
622 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
623 else
624 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
625#else
626 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
627#endif
628 if (task->ata_task.use_ncq)
629 flags |= MCH_FPDMA;
630 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
631 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
632 flags |= MCH_ATAPI;
633 }
634
635 /* FIXME: fill in port multiplier number */
636
637 hdr->flags = cpu_to_le32(flags);
638
639 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
640 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
641 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
642 else
643 hdr_tag = tag;
644
645 hdr->tags = cpu_to_le32(hdr_tag);
646
647 hdr->data_len = cpu_to_le32(task->total_xfer_len);
648
649 /*
650 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
651 */
652
653 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
654 buf_cmd = buf_tmp = slot->buf;
655 buf_tmp_dma = slot->buf_dma;
656
657 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
658
659 buf_tmp += MVS_ATA_CMD_SZ;
660 buf_tmp_dma += MVS_ATA_CMD_SZ;
661#if _MV_DUMP
662 slot->cmd_size = MVS_ATA_CMD_SZ;
663#endif
664
665 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
666 /* used for STP. unused for SATA? */
667 buf_oaf = buf_tmp;
668 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
669
670 buf_tmp += MVS_OAF_SZ;
671 buf_tmp_dma += MVS_OAF_SZ;
672
673 /* region 3: PRD table ********************************************* */
674 buf_prd = buf_tmp;
675
676 if (tei->n_elem)
677 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
678 else
679 hdr->prd_tbl = 0;
680 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
681
682 buf_tmp += i;
683 buf_tmp_dma += i;
684
685 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
686 /* FIXME: probably unused, for SATA. kept here just in case
687 * we get a STP/SATA error information record
688 */
689 slot->response = buf_tmp;
690 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
691 if (mvi->flags & MVF_FLAG_SOC)
692 hdr->reserved[0] = 0;
693
694 req_len = sizeof(struct host_to_dev_fis);
695 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
696 sizeof(struct mvs_err_info) - i;
697
698 /* request, response lengths */
699 resp_len = min(resp_len, max_resp_len);
700 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
701
702 if (likely(!task->ata_task.device_control_reg_update))
703 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
704 /* fill in command FIS and ATAPI CDB */
705 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
706 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
707 memcpy(buf_cmd + STP_ATAPI_CMD,
708 task->ata_task.atapi_packet, 16);
709
710 /* generate open address frame hdr (first 12 bytes) */
711 /* initiator, STP, ftype 1h */
712 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
713 buf_oaf[1] = dev->linkrate & 0xf;
714 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
715 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
716
717 /* fill in PRD (scatter/gather) table, if any */
718 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
719#ifndef DISABLE_HOTPLUG_DMA_FIX
720 if (task->data_dir == DMA_FROM_DEVICE)
721 MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
722 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
723#endif
724 return 0;
725}
726
727static int mvs_task_prep_ssp(struct mvs_info *mvi,
728 struct mvs_task_exec_info *tei, int is_tmf,
729 struct mvs_tmf_task *tmf)
730{
731 struct sas_task *task = tei->task;
732 struct mvs_cmd_hdr *hdr = tei->hdr;
733 struct mvs_port *port = tei->port;
734 struct domain_device *dev = task->dev;
735 struct mvs_device *mvi_dev = dev->lldd_dev;
736 struct asd_sas_port *sas_port = dev->port;
737 struct mvs_slot_info *slot;
738 void *buf_prd;
739 struct ssp_frame_hdr *ssp_hdr;
740 void *buf_tmp;
741 u8 *buf_cmd, *buf_oaf, fburst = 0;
742 dma_addr_t buf_tmp_dma;
743 u32 flags;
744 u32 resp_len, req_len, i, tag = tei->tag;
745 const u32 max_resp_len = SB_RFB_MAX;
746 u32 phy_mask;
747
748 slot = &mvi->slot_info[tag];
749
750 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
751 sas_port->phy_mask) & TXQ_PHY_MASK;
752
753 slot->tx = mvi->tx_prod;
754 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
755 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
756 (phy_mask << TXQ_PHY_SHIFT));
757
758 flags = MCH_RETRY;
759 if (task->ssp_task.enable_first_burst) {
760 flags |= MCH_FBURST;
761 fburst = (1 << 7);
762 }
763 if (is_tmf)
764 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
765 else
766 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
767 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
768 hdr->tags = cpu_to_le32(tag);
769 hdr->data_len = cpu_to_le32(task->total_xfer_len);
770
771 /*
772 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
773 */
774
775 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
776 buf_cmd = buf_tmp = slot->buf;
777 buf_tmp_dma = slot->buf_dma;
778
779 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
780
781 buf_tmp += MVS_SSP_CMD_SZ;
782 buf_tmp_dma += MVS_SSP_CMD_SZ;
783#if _MV_DUMP
784 slot->cmd_size = MVS_SSP_CMD_SZ;
785#endif
786
787 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
788 buf_oaf = buf_tmp;
789 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
790
791 buf_tmp += MVS_OAF_SZ;
792 buf_tmp_dma += MVS_OAF_SZ;
793
794 /* region 3: PRD table ********************************************* */
795 buf_prd = buf_tmp;
796 if (tei->n_elem)
797 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
798 else
799 hdr->prd_tbl = 0;
800
801 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
802 buf_tmp += i;
803 buf_tmp_dma += i;
804
805 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
806 slot->response = buf_tmp;
807 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
808 if (mvi->flags & MVF_FLAG_SOC)
809 hdr->reserved[0] = 0;
810
811 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
812 sizeof(struct mvs_err_info) - i;
813 resp_len = min(resp_len, max_resp_len);
814
815 req_len = sizeof(struct ssp_frame_hdr) + 28;
816
817 /* request, response lengths */
818 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
819
820 /* generate open address frame hdr (first 12 bytes) */
821 /* initiator, SSP, ftype 1h */
822 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
823 buf_oaf[1] = dev->linkrate & 0xf;
824 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
825 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
826
827 /* fill in SSP frame header (Command Table.SSP frame header) */
828 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
829
830 if (is_tmf)
831 ssp_hdr->frame_type = SSP_TASK;
832 else
833 ssp_hdr->frame_type = SSP_COMMAND;
834
835 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
836 HASHED_SAS_ADDR_SIZE);
837 memcpy(ssp_hdr->hashed_src_addr,
838 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
839 ssp_hdr->tag = cpu_to_be16(tag);
840
841 /* fill in IU for TASK and Command Frame */
842 buf_cmd += sizeof(*ssp_hdr);
843 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
844
845 if (ssp_hdr->frame_type != SSP_TASK) {
846 buf_cmd[9] = fburst | task->ssp_task.task_attr |
847 (task->ssp_task.task_prio << 3);
848 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
849 } else{
850 buf_cmd[10] = tmf->tmf;
851 switch (tmf->tmf) {
852 case TMF_ABORT_TASK:
853 case TMF_QUERY_TASK:
854 buf_cmd[12] =
855 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
856 buf_cmd[13] =
857 tmf->tag_of_task_to_be_managed & 0xff;
858 break;
859 default:
860 break;
861 }
862 }
863 /* fill in PRD (scatter/gather) table, if any */
864 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
865 return 0;
866}
867
868#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
869static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
870 struct completion *completion,int is_tmf,
871 struct mvs_tmf_task *tmf)
872{
873 struct domain_device *dev = task->dev;
874 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
875 struct mvs_info *mvi = mvi_dev->mvi_info;
876 struct mvs_task_exec_info tei;
877 struct sas_task *t = task;
878 struct mvs_slot_info *slot;
879 u32 tag = 0xdeadbeef, rc, n_elem = 0;
880 u32 n = num, pass = 0;
881 unsigned long flags = 0;
882
883 if (!dev->port) {
884 struct task_status_struct *tsm = &t->task_status;
885
886 tsm->resp = SAS_TASK_UNDELIVERED;
887 tsm->stat = SAS_PHY_DOWN;
888 t->task_done(t);
889 return 0;
890 }
891
892 spin_lock_irqsave(&mvi->lock, flags);
893 do {
894 dev = t->dev;
895 mvi_dev = dev->lldd_dev;
896 if (DEV_IS_GONE(mvi_dev)) {
897 if (mvi_dev)
898 mv_dprintk("device %d not ready.\n",
899 mvi_dev->device_id);
900 else
901 mv_dprintk("device %016llx not ready.\n",
902 SAS_ADDR(dev->sas_addr));
903
904 rc = SAS_PHY_DOWN;
905 goto out_done;
906 }
907
908 if (dev->port->id >= mvi->chip->n_phy)
909 tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
910 else
911 tei.port = &mvi->port[dev->port->id];
912
913 if (!tei.port->port_attached) {
914 if (sas_protocol_ata(t->task_proto)) {
915 mv_dprintk("port %d does not"
916 "attached device.\n", dev->port->id);
917 rc = SAS_PHY_DOWN;
918 goto out_done;
919 } else {
920 struct task_status_struct *ts = &t->task_status;
921 ts->resp = SAS_TASK_UNDELIVERED;
922 ts->stat = SAS_PHY_DOWN;
923 t->task_done(t);
924 if (n > 1)
925 t = list_entry(t->list.next,
926 struct sas_task, list);
927 continue;
928 }
929 }
930
931 if (!sas_protocol_ata(t->task_proto)) {
932 if (t->num_scatter) {
933 n_elem = dma_map_sg(mvi->dev,
934 t->scatter,
935 t->num_scatter,
936 t->data_dir);
937 if (!n_elem) {
938 rc = -ENOMEM;
939 goto err_out;
940 }
941 }
942 } else {
943 n_elem = t->num_scatter;
944 }
945
946 rc = mvs_tag_alloc(mvi, &tag);
947 if (rc)
948 goto err_out;
949
950 slot = &mvi->slot_info[tag];
951
952
953 t->lldd_task = NULL;
954 slot->n_elem = n_elem;
955 slot->slot_tag = tag;
956 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
957
958 tei.task = t;
959 tei.hdr = &mvi->slot[tag];
960 tei.tag = tag;
961 tei.n_elem = n_elem;
962 switch (t->task_proto) {
963 case SAS_PROTOCOL_SMP:
964 rc = mvs_task_prep_smp(mvi, &tei);
965 break;
966 case SAS_PROTOCOL_SSP:
967 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
968 break;
969 case SAS_PROTOCOL_SATA:
970 case SAS_PROTOCOL_STP:
971 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
972 rc = mvs_task_prep_ata(mvi, &tei);
973 break;
974 default:
975 dev_printk(KERN_ERR, mvi->dev,
976 "unknown sas_task proto: 0x%x\n",
977 t->task_proto);
978 rc = -EINVAL;
979 break;
980 }
981
982 if (rc) {
983 mv_dprintk("rc is %x\n", rc);
984 goto err_out_tag;
985 }
986 slot->task = t;
987 slot->port = tei.port;
988 t->lldd_task = slot;
989 list_add_tail(&slot->entry, &tei.port->list);
990 /* TODO: select normal or high priority */
991 spin_lock(&t->task_state_lock);
992 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
993 spin_unlock(&t->task_state_lock);
994
995 mvs_hba_memory_dump(mvi, tag, t->task_proto);
996 mvi_dev->runing_req++;
997 ++pass;
998 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
999 if (n > 1)
1000 t = list_entry(t->list.next, struct sas_task, list);
1001 } while (--n);
1002 rc = 0;
1003 goto out_done;
1004
1005err_out_tag:
1006 mvs_tag_free(mvi, tag);
1007err_out:
1008
1009 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1010 if (!sas_protocol_ata(t->task_proto))
1011 if (n_elem)
1012 dma_unmap_sg(mvi->dev, t->scatter, n_elem,
1013 t->data_dir);
1014out_done:
1015 if (likely(pass)) {
1016 MVS_CHIP_DISP->start_delivery(mvi,
1017 (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1018 }
1019 spin_unlock_irqrestore(&mvi->lock, flags);
1020 return rc;
1021}
1022
1023int mvs_queue_command(struct sas_task *task, const int num,
1024 gfp_t gfp_flags)
1025{
1026 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1027}
1028
1029static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1030{
1031 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1032 mvs_tag_clear(mvi, slot_idx);
1033}
1034
1035static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1036 struct mvs_slot_info *slot, u32 slot_idx)
1037{
1038 if (!slot->task)
1039 return;
1040 if (!sas_protocol_ata(task->task_proto))
1041 if (slot->n_elem)
1042 dma_unmap_sg(mvi->dev, task->scatter,
1043 slot->n_elem, task->data_dir);
1044
1045 switch (task->task_proto) {
1046 case SAS_PROTOCOL_SMP:
1047 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
1048 PCI_DMA_FROMDEVICE);
1049 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
1050 PCI_DMA_TODEVICE);
1051 break;
1052
1053 case SAS_PROTOCOL_SATA:
1054 case SAS_PROTOCOL_STP:
1055 case SAS_PROTOCOL_SSP:
1056 default:
1057 /* do nothing */
1058 break;
1059 }
1060 list_del_init(&slot->entry);
1061 task->lldd_task = NULL;
1062 slot->task = NULL;
1063 slot->port = NULL;
1064 slot->slot_tag = 0xFFFFFFFF;
1065 mvs_slot_free(mvi, slot_idx);
1066}
1067
1068static void mvs_update_wideport(struct mvs_info *mvi, int i)
1069{
1070 struct mvs_phy *phy = &mvi->phy[i];
1071 struct mvs_port *port = phy->port;
1072 int j, no;
1073
1074 for_each_phy(port->wide_port_phymap, j, no) {
1075 if (j & 1) {
1076 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1077 PHYR_WIDE_PORT);
1078 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1079 port->wide_port_phymap);
1080 } else {
1081 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1082 PHYR_WIDE_PORT);
1083 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1084 0);
1085 }
1086 }
1087}
1088
1089static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
1090{
1091 u32 tmp;
1092 struct mvs_phy *phy = &mvi->phy[i];
1093 struct mvs_port *port = phy->port;
1094
1095 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
1096 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
1097 if (!port)
1098 phy->phy_attached = 1;
1099 return tmp;
1100 }
1101
1102 if (port) {
1103 if (phy->phy_type & PORT_TYPE_SAS) {
1104 port->wide_port_phymap &= ~(1U << i);
1105 if (!port->wide_port_phymap)
1106 port->port_attached = 0;
1107 mvs_update_wideport(mvi, i);
1108 } else if (phy->phy_type & PORT_TYPE_SATA)
1109 port->port_attached = 0;
1110 phy->port = NULL;
1111 phy->phy_attached = 0;
1112 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1113 }
1114 return 0;
1115}
1116
1117static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1118{
1119 u32 *s = (u32 *) buf;
1120
1121 if (!s)
1122 return NULL;
1123
1124 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1125 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1126
1127 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1128 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1129
1130 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1131 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1132
1133 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1134 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1135
1136 /* Workaround: take some ATAPI devices for ATA */
1137 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1138 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1139
1140 return s;
1141}
1142
1143static u32 mvs_is_sig_fis_received(u32 irq_status)
1144{
1145 return irq_status & PHYEV_SIG_FIS;
1146}
1147
1148void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1149{
1150 struct mvs_phy *phy = &mvi->phy[i];
1151 struct sas_identify_frame *id;
1152
1153 id = (struct sas_identify_frame *)phy->frame_rcvd;
1154
1155 if (get_st) {
1156 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
1157 phy->phy_status = mvs_is_phy_ready(mvi, i);
1158 }
1159
1160 if (phy->phy_status) {
1161 int oob_done = 0;
1162 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
1163
1164 oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
1165
1166 MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
1167 if (phy->phy_type & PORT_TYPE_SATA) {
1168 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1169 if (mvs_is_sig_fis_received(phy->irq_status)) {
1170 phy->phy_attached = 1;
1171 phy->att_dev_sas_addr =
1172 i + mvi->id * mvi->chip->n_phy;
1173 if (oob_done)
1174 sas_phy->oob_mode = SATA_OOB_MODE;
1175 phy->frame_rcvd_size =
1176 sizeof(struct dev_to_host_fis);
1177 mvs_get_d2h_reg(mvi, i, id);
1178 } else {
1179 u32 tmp;
1180 dev_printk(KERN_DEBUG, mvi->dev,
1181 "Phy%d : No sig fis\n", i);
1182 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1183 MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1184 tmp | PHYEV_SIG_FIS);
1185 phy->phy_attached = 0;
1186 phy->phy_type &= ~PORT_TYPE_SATA;
1187 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1188 goto out_done;
1189 }
1190 } else if (phy->phy_type & PORT_TYPE_SAS
1191 || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1192 phy->phy_attached = 1;
1193 phy->identify.device_type =
1194 phy->att_dev_info & PORT_DEV_TYPE_MASK;
1195
1196 if (phy->identify.device_type == SAS_END_DEV)
1197 phy->identify.target_port_protocols =
1198 SAS_PROTOCOL_SSP;
1199 else if (phy->identify.device_type != NO_DEVICE)
1200 phy->identify.target_port_protocols =
1201 SAS_PROTOCOL_SMP;
1202 if (oob_done)
1203 sas_phy->oob_mode = SAS_OOB_MODE;
1204 phy->frame_rcvd_size =
1205 sizeof(struct sas_identify_frame);
1206 }
1207 memcpy(sas_phy->attached_sas_addr,
1208 &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
1209
1210 if (MVS_CHIP_DISP->phy_work_around)
1211 MVS_CHIP_DISP->phy_work_around(mvi, i);
1212 }
1213 mv_dprintk("port %d attach dev info is %x\n",
1214 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1215 mv_dprintk("port %d attach sas addr is %llx\n",
1216 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1217out_done:
1218 if (get_st)
1219 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
1220}
1221
1222static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1223{
1224 struct sas_ha_struct *sas_ha = sas_phy->ha;
1225 struct mvs_info *mvi = NULL; int i = 0, hi;
1226 struct mvs_phy *phy = sas_phy->lldd_phy;
1227 struct asd_sas_port *sas_port = sas_phy->port;
1228 struct mvs_port *port;
1229 unsigned long flags = 0;
1230 if (!sas_port)
1231 return;
1232
1233 while (sas_ha->sas_phy[i]) {
1234 if (sas_ha->sas_phy[i] == sas_phy)
1235 break;
1236 i++;
1237 }
1238 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1239 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1240 if (sas_port->id >= mvi->chip->n_phy)
1241 port = &mvi->port[sas_port->id - mvi->chip->n_phy];
1242 else
1243 port = &mvi->port[sas_port->id];
1244 if (lock)
1245 spin_lock_irqsave(&mvi->lock, flags);
1246 port->port_attached = 1;
1247 phy->port = port;
1248 if (phy->phy_type & PORT_TYPE_SAS) {
1249 port->wide_port_phymap = sas_port->phy_mask;
1250 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1251 mvs_update_wideport(mvi, sas_phy->id);
1252 }
1253 if (lock)
1254 spin_unlock_irqrestore(&mvi->lock, flags);
1255}
1256
1257static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1258{
1259 /*Nothing*/
1260}
1261
1262
1263void mvs_port_formed(struct asd_sas_phy *sas_phy)
1264{
1265 mvs_port_notify_formed(sas_phy, 1);
1266}
1267
1268void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1269{
1270 mvs_port_notify_deformed(sas_phy, 1);
1271}
1272
1273struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1274{
1275 u32 dev;
1276 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1277 if (mvi->devices[dev].dev_type == NO_DEVICE) {
1278 mvi->devices[dev].device_id = dev;
1279 return &mvi->devices[dev];
1280 }
1281 }
1282
1283 if (dev == MVS_MAX_DEVICES)
1284 mv_printk("max support %d devices, ignore ..\n",
1285 MVS_MAX_DEVICES);
1286
1287 return NULL;
1288}
1289
1290void mvs_free_dev(struct mvs_device *mvi_dev)
1291{
1292 u32 id = mvi_dev->device_id;
1293 memset(mvi_dev, 0, sizeof(*mvi_dev));
1294 mvi_dev->device_id = id;
1295 mvi_dev->dev_type = NO_DEVICE;
1296 mvi_dev->dev_status = MVS_DEV_NORMAL;
1297 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1298}
1299
1300int mvs_dev_found_notify(struct domain_device *dev, int lock)
1301{
1302 unsigned long flags = 0;
1303 int res = 0;
1304 struct mvs_info *mvi = NULL;
1305 struct domain_device *parent_dev = dev->parent;
1306 struct mvs_device *mvi_device;
1307
1308 mvi = mvs_find_dev_mvi(dev);
1309
1310 if (lock)
1311 spin_lock_irqsave(&mvi->lock, flags);
1312
1313 mvi_device = mvs_alloc_dev(mvi);
1314 if (!mvi_device) {
1315 res = -1;
1316 goto found_out;
1317 }
1318 dev->lldd_dev = mvi_device;
1319 mvi_device->dev_type = dev->dev_type;
1320 mvi_device->mvi_info = mvi;
1321 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1322 int phy_id;
1323 u8 phy_num = parent_dev->ex_dev.num_phys;
1324 struct ex_phy *phy;
1325 for (phy_id = 0; phy_id < phy_num; phy_id++) {
1326 phy = &parent_dev->ex_dev.ex_phy[phy_id];
1327 if (SAS_ADDR(phy->attached_sas_addr) ==
1328 SAS_ADDR(dev->sas_addr)) {
1329 mvi_device->attached_phy = phy_id;
1330 break;
1331 }
1332 }
1333
1334 if (phy_id == phy_num) {
1335 mv_printk("Error: no attached dev:%016llx"
1336 "at ex:%016llx.\n",
1337 SAS_ADDR(dev->sas_addr),
1338 SAS_ADDR(parent_dev->sas_addr));
1339 res = -1;
1340 }
1341 }
1342
1343found_out:
1344 if (lock)
1345 spin_unlock_irqrestore(&mvi->lock, flags);
1346 return res;
1347}
1348
1349int mvs_dev_found(struct domain_device *dev)
1350{
1351 return mvs_dev_found_notify(dev, 1);
1352}
1353
1354void mvs_dev_gone_notify(struct domain_device *dev, int lock)
1355{
1356 unsigned long flags = 0;
1357 struct mvs_device *mvi_dev = dev->lldd_dev;
1358 struct mvs_info *mvi = mvi_dev->mvi_info;
1359
1360 if (lock)
1361 spin_lock_irqsave(&mvi->lock, flags);
1362
1363 if (mvi_dev) {
1364 mv_dprintk("found dev[%d:%x] is gone.\n",
1365 mvi_dev->device_id, mvi_dev->dev_type);
1366 mvs_free_reg_set(mvi, mvi_dev);
1367 mvs_free_dev(mvi_dev);
1368 } else {
1369 mv_dprintk("found dev has gone.\n");
1370 }
1371 dev->lldd_dev = NULL;
1372
1373 if (lock)
1374 spin_unlock_irqrestore(&mvi->lock, flags);
1375}
1376
1377
1378void mvs_dev_gone(struct domain_device *dev)
1379{
1380 mvs_dev_gone_notify(dev, 1);
1381}
1382
1383static struct sas_task *mvs_alloc_task(void)
1384{
1385 struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
1386
1387 if (task) {
1388 INIT_LIST_HEAD(&task->list);
1389 spin_lock_init(&task->task_state_lock);
1390 task->task_state_flags = SAS_TASK_STATE_PENDING;
1391 init_timer(&task->timer);
1392 init_completion(&task->completion);
1393 }
1394 return task;
1395}
1396
1397static void mvs_free_task(struct sas_task *task)
1398{
1399 if (task) {
1400 BUG_ON(!list_empty(&task->list));
1401 kfree(task);
1402 }
1403}
1404
1405static void mvs_task_done(struct sas_task *task)
1406{
1407 if (!del_timer(&task->timer))
1408 return;
1409 complete(&task->completion);
1410}
1411
1412static void mvs_tmf_timedout(unsigned long data)
1413{
1414 struct sas_task *task = (struct sas_task *)data;
1415
1416 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1417 complete(&task->completion);
1418}
1419
1420/* XXX */
1421#define MVS_TASK_TIMEOUT 20
1422static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1423 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1424{
1425 int res, retry;
1426 struct sas_task *task = NULL;
1427
1428 for (retry = 0; retry < 3; retry++) {
1429 task = mvs_alloc_task();
1430 if (!task)
1431 return -ENOMEM;
1432
1433 task->dev = dev;
1434 task->task_proto = dev->tproto;
1435
1436 memcpy(&task->ssp_task, parameter, para_len);
1437 task->task_done = mvs_task_done;
1438
1439 task->timer.data = (unsigned long) task;
1440 task->timer.function = mvs_tmf_timedout;
1441 task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1442 add_timer(&task->timer);
1443
1444 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
1445
1446 if (res) {
1447 del_timer(&task->timer);
1448 mv_printk("executing internel task failed:%d\n", res);
1449 goto ex_err;
1450 }
1451
1452 wait_for_completion(&task->completion);
1453 res = -TMF_RESP_FUNC_FAILED;
1454 /* Even TMF timed out, return direct. */
1455 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1456 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1457 mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1458 goto ex_err;
1459 }
1460 }
1461
1462 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1463 task->task_status.stat == SAM_GOOD) {
1464 res = TMF_RESP_FUNC_COMPLETE;
1465 break;
1466 }
1467
1468 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1469 task->task_status.stat == SAS_DATA_UNDERRUN) {
1470 /* no error, but return the number of bytes of
1471 * underrun */
1472 res = task->task_status.residual;
1473 break;
1474 }
1475
1476 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1477 task->task_status.stat == SAS_DATA_OVERRUN) {
1478 mv_dprintk("blocked task error.\n");
1479 res = -EMSGSIZE;
1480 break;
1481 } else {
1482 mv_dprintk(" task to dev %016llx response: 0x%x "
1483 "status 0x%x\n",
1484 SAS_ADDR(dev->sas_addr),
1485 task->task_status.resp,
1486 task->task_status.stat);
1487 mvs_free_task(task);
1488 task = NULL;
1489
1490 }
1491 }
1492ex_err:
1493 BUG_ON(retry == 3 && task != NULL);
1494 if (task != NULL)
1495 mvs_free_task(task);
1496 return res;
1497}
1498
1499static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1500 u8 *lun, struct mvs_tmf_task *tmf)
1501{
1502 struct sas_ssp_task ssp_task;
1503 DECLARE_COMPLETION_ONSTACK(completion);
1504 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1505 return TMF_RESP_FUNC_ESUPP;
1506
1507 strncpy((u8 *)&ssp_task.LUN, lun, 8);
1508
1509 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1510 sizeof(ssp_task), tmf);
1511}
1512
1513
1514/* Standard mandates link reset for ATA (type 0)
1515 and hard reset for SSP (type 1) , only for RECOVERY */
1516static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1517{
1518 int rc;
1519 struct sas_phy *phy = sas_find_local_phy(dev);
1520 int reset_type = (dev->dev_type == SATA_DEV ||
1521 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1522 rc = sas_phy_reset(phy, reset_type);
1523 msleep(2000);
1524 return rc;
1525}
1526
1527/* mandatory SAM-3 */
1528int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1529{
1530 unsigned long flags;
1531 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1532 struct mvs_tmf_task tmf_task;
1533 struct mvs_device * mvi_dev = dev->lldd_dev;
1534 struct mvs_info *mvi = mvi_dev->mvi_info;
1535
1536 tmf_task.tmf = TMF_LU_RESET;
1537 mvi_dev->dev_status = MVS_DEV_EH;
1538 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1539 if (rc == TMF_RESP_FUNC_COMPLETE) {
1540 num = mvs_find_dev_phyno(dev, phyno);
1541 spin_lock_irqsave(&mvi->lock, flags);
1542 for (i = 0; i < num; i++)
1543 mvs_release_task(mvi, phyno[i], dev);
1544 spin_unlock_irqrestore(&mvi->lock, flags);
1545 }
1546 /* If failed, fall-through I_T_Nexus reset */
1547 mv_printk("%s for device[%x]:rc= %d\n", __func__,
1548 mvi_dev->device_id, rc);
1549 return rc;
1550}
1551
1552int mvs_I_T_nexus_reset(struct domain_device *dev)
1553{
1554 unsigned long flags;
1555 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1556 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
1557 struct mvs_info *mvi = mvi_dev->mvi_info;
1558
1559 if (mvi_dev->dev_status != MVS_DEV_EH)
1560 return TMF_RESP_FUNC_COMPLETE;
1561 rc = mvs_debug_I_T_nexus_reset(dev);
1562 mv_printk("%s for device[%x]:rc= %d\n",
1563 __func__, mvi_dev->device_id, rc);
1564
1565 /* housekeeper */
1566 num = mvs_find_dev_phyno(dev, phyno);
1567 spin_lock_irqsave(&mvi->lock, flags);
1568 for (i = 0; i < num; i++)
1569 mvs_release_task(mvi, phyno[i], dev);
1570 spin_unlock_irqrestore(&mvi->lock, flags);
1571
1572 return rc;
1573}
1574/* optional SAM-3 */
1575int mvs_query_task(struct sas_task *task)
1576{
1577 u32 tag;
1578 struct scsi_lun lun;
1579 struct mvs_tmf_task tmf_task;
1580 int rc = TMF_RESP_FUNC_FAILED;
1581
1582 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1583 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1584 struct domain_device *dev = task->dev;
1585 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1586 struct mvs_info *mvi = mvi_dev->mvi_info;
1587
1588 int_to_scsilun(cmnd->device->lun, &lun);
1589 rc = mvs_find_tag(mvi, task, &tag);
1590 if (rc == 0) {
1591 rc = TMF_RESP_FUNC_FAILED;
1592 return rc;
1593 }
1594
1595 tmf_task.tmf = TMF_QUERY_TASK;
1596 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1597
1598 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1599 switch (rc) {
1600 /* The task is still in Lun, release it then */
1601 case TMF_RESP_FUNC_SUCC:
1602 /* The task is not in Lun or failed, reset the phy */
1603 case TMF_RESP_FUNC_FAILED:
1604 case TMF_RESP_FUNC_COMPLETE:
1605 break;
1606 }
1607 }
1608 mv_printk("%s:rc= %d\n", __func__, rc);
1609 return rc;
1610}
1611
1612/* mandatory SAM-3, still need free task/slot info */
1613int mvs_abort_task(struct sas_task *task)
1614{
1615 struct scsi_lun lun;
1616 struct mvs_tmf_task tmf_task;
1617 struct domain_device *dev = task->dev;
1618 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1619 struct mvs_info *mvi = mvi_dev->mvi_info;
1620 int rc = TMF_RESP_FUNC_FAILED;
1621 unsigned long flags;
1622 u32 tag;
1623
1624 if (mvi->exp_req)
1625 mvi->exp_req--;
1626 spin_lock_irqsave(&task->task_state_lock, flags);
1627 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1628 spin_unlock_irqrestore(&task->task_state_lock, flags);
1629 rc = TMF_RESP_FUNC_COMPLETE;
1630 goto out;
1631 }
1632 spin_unlock_irqrestore(&task->task_state_lock, flags);
1633 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1634 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1635
1636 int_to_scsilun(cmnd->device->lun, &lun);
1637 rc = mvs_find_tag(mvi, task, &tag);
1638 if (rc == 0) {
1639 mv_printk("No such tag in %s\n", __func__);
1640 rc = TMF_RESP_FUNC_FAILED;
1641 return rc;
1642 }
1643
1644 tmf_task.tmf = TMF_ABORT_TASK;
1645 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1646
1647 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1648
1649 /* if successful, clear the task and callback forwards.*/
1650 if (rc == TMF_RESP_FUNC_COMPLETE) {
1651 u32 slot_no;
1652 struct mvs_slot_info *slot;
1653
1654 if (task->lldd_task) {
1655 slot = task->lldd_task;
1656 slot_no = (u32) (slot - mvi->slot_info);
1657 mvs_slot_complete(mvi, slot_no, 1);
1658 }
1659 }
1660 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1661 task->task_proto & SAS_PROTOCOL_STP) {
1662 /* to do free register_set */
1663 } else {
1664 /* SMP */
1665
1666 }
1667out:
1668 if (rc != TMF_RESP_FUNC_COMPLETE)
1669 mv_printk("%s:rc= %d\n", __func__, rc);
1670 return rc;
1671}
1672
1673int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
1674{
1675 int rc = TMF_RESP_FUNC_FAILED;
1676 struct mvs_tmf_task tmf_task;
1677
1678 tmf_task.tmf = TMF_ABORT_TASK_SET;
1679 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1680
1681 return rc;
1682}
1683
1684int mvs_clear_aca(struct domain_device *dev, u8 *lun)
1685{
1686 int rc = TMF_RESP_FUNC_FAILED;
1687 struct mvs_tmf_task tmf_task;
1688
1689 tmf_task.tmf = TMF_CLEAR_ACA;
1690 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1691
1692 return rc;
1693}
1694
1695int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1696{
1697 int rc = TMF_RESP_FUNC_FAILED;
1698 struct mvs_tmf_task tmf_task;
1699
1700 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1701 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1702
1703 return rc;
1704}
1705
1706static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1707 u32 slot_idx, int err)
1708{
1709 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1710 struct task_status_struct *tstat = &task->task_status;
1711 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1712 int stat = SAM_GOOD;
1713
1714
1715 resp->frame_len = sizeof(struct dev_to_host_fis);
1716 memcpy(&resp->ending_fis[0],
1717 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
1718 sizeof(struct dev_to_host_fis));
1719 tstat->buf_valid_size = sizeof(*resp);
1720 if (unlikely(err))
1721 stat = SAS_PROTO_RESPONSE;
1722 return stat;
1723}
1724
1725static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1726 u32 slot_idx)
1727{
1728 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1729 int stat;
1730 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1731 u32 tfs = 0;
1732 enum mvs_port_type type = PORT_TYPE_SAS;
1733
1734 if (err_dw0 & CMD_ISS_STPD)
1735 MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1736
1737 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1738
1739 stat = SAM_CHECK_COND;
1740 switch (task->task_proto) {
1741 case SAS_PROTOCOL_SSP:
1742 stat = SAS_ABORTED_TASK;
1743 break;
1744 case SAS_PROTOCOL_SMP:
1745 stat = SAM_CHECK_COND;
1746 break;
1747
1748 case SAS_PROTOCOL_SATA:
1749 case SAS_PROTOCOL_STP:
1750 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1751 {
1752 if (err_dw0 == 0x80400002)
1753 mv_printk("find reserved error, why?\n");
1754
1755 task->ata_task.use_ncq = 0;
1756 stat = SAS_PROTO_RESPONSE;
1757 mvs_sata_done(mvi, task, slot_idx, 1);
1758
1759 }
1760 break;
1761 default:
1762 break;
1763 }
1764
1765 return stat;
1766}
1767
1768int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1769{
1770 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1771 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1772 struct sas_task *task = slot->task;
1773 struct mvs_device *mvi_dev = NULL;
1774 struct task_status_struct *tstat;
1775
1776 bool aborted;
1777 void *to;
1778 enum exec_status sts;
1779
1780 if (mvi->exp_req)
1781 mvi->exp_req--;
1782 if (unlikely(!task || !task->lldd_task))
1783 return -1;
1784
1785 tstat = &task->task_status;
1786 mvi_dev = task->dev->lldd_dev;
1787
1788 mvs_hba_cq_dump(mvi);
1789
1790 spin_lock(&task->task_state_lock);
1791 task->task_state_flags &=
1792 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1793 task->task_state_flags |= SAS_TASK_STATE_DONE;
1794 /* race condition*/
1795 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1796 spin_unlock(&task->task_state_lock);
1797
1798 memset(tstat, 0, sizeof(*tstat));
1799 tstat->resp = SAS_TASK_COMPLETE;
1800
1801 if (unlikely(aborted)) {
1802 tstat->stat = SAS_ABORTED_TASK;
1803 if (mvi_dev)
1804 mvi_dev->runing_req--;
1805 if (sas_protocol_ata(task->task_proto))
1806 mvs_free_reg_set(mvi, mvi_dev);
1807
1808 mvs_slot_task_free(mvi, task, slot, slot_idx);
1809 return -1;
1810 }
1811
1812 if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
1813 mv_dprintk("port has not device.\n");
1814 tstat->stat = SAS_PHY_DOWN;
1815 goto out;
1816 }
1817
1818 /*
1819 if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
1820 mv_dprintk("Find device[%016llx] RXQ_ERR %X,
1821 err info:%016llx\n",
1822 SAS_ADDR(task->dev->sas_addr),
1823 rx_desc, (u64)(*(u64 *) slot->response));
1824 }
1825 */
1826
1827 /* error info record present */
1828 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1829 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1830 goto out;
1831 }
1832
1833 switch (task->task_proto) {
1834 case SAS_PROTOCOL_SSP:
1835 /* hw says status == 0, datapres == 0 */
1836 if (rx_desc & RXQ_GOOD) {
1837 tstat->stat = SAM_GOOD;
1838 tstat->resp = SAS_TASK_COMPLETE;
1839 }
1840 /* response frame present */
1841 else if (rx_desc & RXQ_RSP) {
1842 struct ssp_response_iu *iu = slot->response +
1843 sizeof(struct mvs_err_info);
1844 sas_ssp_task_response(mvi->dev, task, iu);
1845 } else
1846 tstat->stat = SAM_CHECK_COND;
1847 break;
1848
1849 case SAS_PROTOCOL_SMP: {
1850 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1851 tstat->stat = SAM_GOOD;
1852 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1853 memcpy(to + sg_resp->offset,
1854 slot->response + sizeof(struct mvs_err_info),
1855 sg_dma_len(sg_resp));
1856 kunmap_atomic(to, KM_IRQ0);
1857 break;
1858 }
1859
1860 case SAS_PROTOCOL_SATA:
1861 case SAS_PROTOCOL_STP:
1862 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1863 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1864 break;
1865 }
1866
1867 default:
1868 tstat->stat = SAM_CHECK_COND;
1869 break;
1870 }
1871
1872out:
1873 if (mvi_dev) {
1874 mvi_dev->runing_req--;
1875 if (sas_protocol_ata(task->task_proto))
1876 mvs_free_reg_set(mvi, mvi_dev);
1877 }
1878 mvs_slot_task_free(mvi, task, slot, slot_idx);
1879 sts = tstat->stat;
1880
1881 spin_unlock(&mvi->lock);
1882 if (task->task_done)
1883 task->task_done(task);
1884 else
1885 mv_dprintk("why has not task_done.\n");
1886 spin_lock(&mvi->lock);
1887
1888 return sts;
1889}
1890
1891void mvs_release_task(struct mvs_info *mvi,
1892 int phy_no, struct domain_device *dev)
1893{
1894 int i = 0; u32 slot_idx;
1895 struct mvs_phy *phy;
1896 struct mvs_port *port;
1897 struct mvs_slot_info *slot, *slot2;
1898
1899 phy = &mvi->phy[phy_no];
1900 port = phy->port;
1901 if (!port)
1902 return;
1903
1904 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
1905 struct sas_task *task;
1906 slot_idx = (u32) (slot - mvi->slot_info);
1907 task = slot->task;
1908
1909 if (dev && task->dev != dev)
1910 continue;
1911
1912 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
1913 slot_idx, slot->slot_tag, task);
1914
1915 if (task->task_proto & SAS_PROTOCOL_SSP) {
1916 mv_printk("attached with SSP task CDB[");
1917 for (i = 0; i < 16; i++)
1918 mv_printk(" %02x", task->ssp_task.cdb[i]);
1919 mv_printk(" ]\n");
1920 }
1921
1922 mvs_slot_complete(mvi, slot_idx, 1);
1923 }
1924}
1925
1926static void mvs_phy_disconnected(struct mvs_phy *phy)
1927{
1928 phy->phy_attached = 0;
1929 phy->att_dev_info = 0;
1930 phy->att_dev_sas_addr = 0;
1931}
1932
1933static void mvs_work_queue(struct work_struct *work)
1934{
1935 struct delayed_work *dw = container_of(work, struct delayed_work, work);
1936 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
1937 struct mvs_info *mvi = mwq->mvi;
1938 unsigned long flags;
1939
1940 spin_lock_irqsave(&mvi->lock, flags);
1941 if (mwq->handler & PHY_PLUG_EVENT) {
1942 u32 phy_no = (unsigned long) mwq->data;
1943 struct sas_ha_struct *sas_ha = mvi->sas;
1944 struct mvs_phy *phy = &mvi->phy[phy_no];
1945 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1946
1947 if (phy->phy_event & PHY_PLUG_OUT) {
1948 u32 tmp;
1949 struct sas_identify_frame *id;
1950 id = (struct sas_identify_frame *)phy->frame_rcvd;
1951 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
1952 phy->phy_event &= ~PHY_PLUG_OUT;
1953 if (!(tmp & PHY_READY_MASK)) {
1954 sas_phy_disconnected(sas_phy);
1955 mvs_phy_disconnected(phy);
1956 sas_ha->notify_phy_event(sas_phy,
1957 PHYE_LOSS_OF_SIGNAL);
1958 mv_dprintk("phy%d Removed Device\n", phy_no);
1959 } else {
1960 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
1961 mvs_update_phyinfo(mvi, phy_no, 1);
1962 mvs_bytes_dmaed(mvi, phy_no);
1963 mvs_port_notify_formed(sas_phy, 0);
1964 mv_dprintk("phy%d Attached Device\n", phy_no);
1965 }
1966 }
1967 }
1968 list_del(&mwq->entry);
1969 spin_unlock_irqrestore(&mvi->lock, flags);
1970 kfree(mwq);
1971}
1972
1973static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
1974{
1975 struct mvs_wq *mwq;
1976 int ret = 0;
1977
1978 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
1979 if (mwq) {
1980 mwq->mvi = mvi;
1981 mwq->data = data;
1982 mwq->handler = handler;
1983 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
1984 list_add_tail(&mwq->entry, &mvi->wq_list);
1985 schedule_delayed_work(&mwq->work_q, HZ * 2);
1986 } else
1987 ret = -ENOMEM;
1988
1989 return ret;
1990}
1991
1992static void mvs_sig_time_out(unsigned long tphy)
1993{
1994 struct mvs_phy *phy = (struct mvs_phy *)tphy;
1995 struct mvs_info *mvi = phy->mvi;
1996 u8 phy_no;
1997
1998 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
1999 if (&mvi->phy[phy_no] == phy) {
2000 mv_dprintk("Get signature time out, reset phy %d\n",
2001 phy_no+mvi->id*mvi->chip->n_phy);
2002 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
2003 }
2004 }
2005}
2006
2007static void mvs_sig_remove_timer(struct mvs_phy *phy)
2008{
2009 if (phy->timer.function)
2010 del_timer(&phy->timer);
2011 phy->timer.function = NULL;
2012}
2013
2014void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2015{
2016 u32 tmp;
2017 struct sas_ha_struct *sas_ha = mvi->sas;
2018 struct mvs_phy *phy = &mvi->phy[phy_no];
2019 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2020
2021 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2022 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2023 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2024 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
2025 phy->irq_status);
2026
2027 /*
2028 * events is port event now ,
2029 * we need check the interrupt status which belongs to per port.
2030 */
2031
2032 if (phy->irq_status & PHYEV_DCDR_ERR)
2033 mv_dprintk("port %d STP decoding error.\n",
2034 phy_no+mvi->id*mvi->chip->n_phy);
2035
2036 if (phy->irq_status & PHYEV_POOF) {
2037 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2038 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2039 int ready;
2040 mvs_release_task(mvi, phy_no, NULL);
2041 phy->phy_event |= PHY_PLUG_OUT;
2042 mvs_handle_event(mvi,
2043 (void *)(unsigned long)phy_no,
2044 PHY_PLUG_EVENT);
2045 ready = mvs_is_phy_ready(mvi, phy_no);
2046 if (!ready)
2047 mv_dprintk("phy%d Unplug Notice\n",
2048 phy_no +
2049 mvi->id * mvi->chip->n_phy);
2050 if (ready || dev_sata) {
2051 if (MVS_CHIP_DISP->stp_reset)
2052 MVS_CHIP_DISP->stp_reset(mvi,
2053 phy_no);
2054 else
2055 MVS_CHIP_DISP->phy_reset(mvi,
2056 phy_no, 0);
2057 return;
2058 }
2059 }
2060 }
2061
2062 if (phy->irq_status & PHYEV_COMWAKE) {
2063 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
2064 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
2065 tmp | PHYEV_SIG_FIS);
2066 if (phy->timer.function == NULL) {
2067 phy->timer.data = (unsigned long)phy;
2068 phy->timer.function = mvs_sig_time_out;
2069 phy->timer.expires = jiffies + 10*HZ;
2070 add_timer(&phy->timer);
2071 }
2072 }
2073 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2074 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2075 mvs_sig_remove_timer(phy);
2076 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2077 if (phy->phy_status) {
2078 mdelay(10);
2079 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2080 if (phy->phy_type & PORT_TYPE_SATA) {
2081 tmp = MVS_CHIP_DISP->read_port_irq_mask(
2082 mvi, phy_no);
2083 tmp &= ~PHYEV_SIG_FIS;
2084 MVS_CHIP_DISP->write_port_irq_mask(mvi,
2085 phy_no, tmp);
2086 }
2087 mvs_update_phyinfo(mvi, phy_no, 0);
2088 mvs_bytes_dmaed(mvi, phy_no);
2089 /* whether driver is going to handle hot plug */
2090 if (phy->phy_event & PHY_PLUG_OUT) {
2091 mvs_port_notify_formed(sas_phy, 0);
2092 phy->phy_event &= ~PHY_PLUG_OUT;
2093 }
2094 } else {
2095 mv_dprintk("plugin interrupt but phy%d is gone\n",
2096 phy_no + mvi->id*mvi->chip->n_phy);
2097 }
2098 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2099 mv_dprintk("port %d broadcast change.\n",
2100 phy_no + mvi->id*mvi->chip->n_phy);
2101 /* exception for Samsung disk drive*/
2102 mdelay(1000);
2103 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2104 }
2105 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2106}
2107
2108int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
2109{
2110 u32 rx_prod_idx, rx_desc;
2111 bool attn = false;
2112
2113 /* the first dword in the RX ring is special: it contains
2114 * a mirror of the hardware's RX producer index, so that
2115 * we don't have to stall the CPU reading that register.
2116 * The actual RX ring is offset by one dword, due to this.
2117 */
2118 rx_prod_idx = mvi->rx_cons;
2119 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
2120 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
2121 return 0;
2122
2123 /* The CMPL_Q may come late, read from register and try again
2124 * note: if coalescing is enabled,
2125 * it will need to read from register every time for sure
2126 */
2127 if (unlikely(mvi->rx_cons == rx_prod_idx))
2128 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
2129
2130 if (mvi->rx_cons == rx_prod_idx)
2131 return 0;
2132
2133 while (mvi->rx_cons != rx_prod_idx) {
2134 /* increment our internal RX consumer pointer */
2135 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
2136 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
2137
2138 if (likely(rx_desc & RXQ_DONE))
2139 mvs_slot_complete(mvi, rx_desc, 0);
2140 if (rx_desc & RXQ_ATTN) {
2141 attn = true;
2142 } else if (rx_desc & RXQ_ERR) {
2143 if (!(rx_desc & RXQ_DONE))
2144 mvs_slot_complete(mvi, rx_desc, 0);
2145 } else if (rx_desc & RXQ_SLOT_RESET) {
2146 mvs_slot_free(mvi, rx_desc);
2147 }
2148 }
2149
2150 if (attn && self_clear)
2151 MVS_CHIP_DISP->int_full(mvi);
2152 return 0;
2153}
2154
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
new file mode 100644
index 00000000000..aa2270af1ba
--- /dev/null
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -0,0 +1,406 @@
1/*
2 * Marvell 88SE64xx/88SE94xx main function head file
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25#ifndef _MV_SAS_H_
26#define _MV_SAS_H_
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/spinlock.h>
31#include <linux/delay.h>
32#include <linux/types.h>
33#include <linux/ctype.h>
34#include <linux/dma-mapping.h>
35#include <linux/pci.h>
36#include <linux/platform_device.h>
37#include <linux/interrupt.h>
38#include <linux/irq.h>
39#include <linux/vmalloc.h>
40#include <scsi/libsas.h>
41#include <scsi/scsi_tcq.h>
42#include <scsi/sas_ata.h>
43#include <linux/version.h>
44#include "mv_defs.h"
45
46#define DRV_NAME "mvsas"
47#define DRV_VERSION "0.8.2"
48#define _MV_DUMP 0
49#define MVS_ID_NOT_MAPPED 0x7f
50/* #define DISABLE_HOTPLUG_DMA_FIX */
51#define MAX_EXP_RUNNING_REQ 2
52#define WIDE_PORT_MAX_PHY 4
53#define MV_DISABLE_NCQ 0
54#define mv_printk(fmt, arg ...) \
55 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
56#ifdef MV_DEBUG
57#define mv_dprintk(format, arg...) \
58 printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
59#else
60#define mv_dprintk(format, arg...)
61#endif
62#define MV_MAX_U32 0xffffffff
63
64extern struct mvs_tgt_initiator mvs_tgt;
65extern struct mvs_info *tgt_mvi;
66extern const struct mvs_dispatch mvs_64xx_dispatch;
67extern const struct mvs_dispatch mvs_94xx_dispatch;
68
69#define DEV_IS_EXPANDER(type) \
70 ((type == EDGE_DEV) || (type == FANOUT_DEV))
71
72#define bit(n) ((u32)1 << n)
73
74#define for_each_phy(__lseq_mask, __mc, __lseq) \
75 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
76 (__mc) != 0 ; \
77 (++__lseq), (__mc) >>= 1)
78
79#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
80#define UNASSOC_D2H_FIS(id) \
81 ((void *) mvi->rx_fis + 0x100 * id)
82#define SATA_RECEIVED_FIS_LIST(reg_set) \
83 ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
84#define SATA_RECEIVED_SDB_FIS(reg_set) \
85 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
86#define SATA_RECEIVED_D2H_FIS(reg_set) \
87 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
88#define SATA_RECEIVED_PIO_FIS(reg_set) \
89 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
90#define SATA_RECEIVED_DMA_FIS(reg_set) \
91 (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
92
93enum dev_status {
94 MVS_DEV_NORMAL = 0x0,
95 MVS_DEV_EH = 0x1,
96};
97
98
99struct mvs_info;
100
101struct mvs_dispatch {
102 char *name;
103 int (*chip_init)(struct mvs_info *mvi);
104 int (*spi_init)(struct mvs_info *mvi);
105 int (*chip_ioremap)(struct mvs_info *mvi);
106 void (*chip_iounmap)(struct mvs_info *mvi);
107 irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
108 u32 (*isr_status)(struct mvs_info *mvi, int irq);
109 void (*interrupt_enable)(struct mvs_info *mvi);
110 void (*interrupt_disable)(struct mvs_info *mvi);
111
112 u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
113 void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
114
115 u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
116 void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
117 void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
118
119 u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
120 void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
121 void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
122
123 u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
124 void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
125
126 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
127 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
128
129 void (*get_sas_addr)(void *buf, u32 buflen);
130 void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
131 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
132 u32 tfs);
133 void (*start_delivery)(struct mvs_info *mvi, u32 tx);
134 u32 (*rx_update)(struct mvs_info *mvi);
135 void (*int_full)(struct mvs_info *mvi);
136 u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
137 void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
138 u32 (*prd_size)(void);
139 u32 (*prd_count)(void);
140 void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
141 void (*detect_porttype)(struct mvs_info *mvi, int i);
142 int (*oob_done)(struct mvs_info *mvi, int i);
143 void (*fix_phy_info)(struct mvs_info *mvi, int i,
144 struct sas_identify_frame *id);
145 void (*phy_work_around)(struct mvs_info *mvi, int i);
146 void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
147 struct sas_phy_linkrates *rates);
148 u32 (*phy_max_link_rate)(void);
149 void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
150 void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
151 void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
152 void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
153 void (*clear_active_cmds)(struct mvs_info *mvi);
154 u32 (*spi_read_data)(struct mvs_info *mvi);
155 void (*spi_write_data)(struct mvs_info *mvi, u32 data);
156 int (*spi_buildcmd)(struct mvs_info *mvi,
157 u32 *dwCmd,
158 u8 cmd,
159 u8 read,
160 u8 length,
161 u32 addr
162 );
163 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
164 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
165#ifndef DISABLE_HOTPLUG_DMA_FIX
166 void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
167#endif
168
169};
170
171struct mvs_chip_info {
172 u32 n_host;
173 u32 n_phy;
174 u32 fis_offs;
175 u32 fis_count;
176 u32 srs_sz;
177 u32 slot_width;
178 const struct mvs_dispatch *dispatch;
179};
180#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
181#define MVS_RX_FISL_SZ \
182 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
183#define MVS_CHIP_DISP (mvi->chip->dispatch)
184
185struct mvs_err_info {
186 __le32 flags;
187 __le32 flags2;
188};
189
190struct mvs_cmd_hdr {
191 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
192 __le32 lens; /* cmd, max resp frame len */
193 __le32 tags; /* targ port xfer tag; tag */
194 __le32 data_len; /* data xfer len */
195 __le64 cmd_tbl; /* command table address */
196 __le64 open_frame; /* open addr frame address */
197 __le64 status_buf; /* status buffer address */
198 __le64 prd_tbl; /* PRD tbl address */
199 __le32 reserved[4];
200};
201
202struct mvs_port {
203 struct asd_sas_port sas_port;
204 u8 port_attached;
205 u8 wide_port_phymap;
206 struct list_head list;
207};
208
209struct mvs_phy {
210 struct mvs_info *mvi;
211 struct mvs_port *port;
212 struct asd_sas_phy sas_phy;
213 struct sas_identify identify;
214 struct scsi_device *sdev;
215 struct timer_list timer;
216 u64 dev_sas_addr;
217 u64 att_dev_sas_addr;
218 u32 att_dev_info;
219 u32 dev_info;
220 u32 phy_type;
221 u32 phy_status;
222 u32 irq_status;
223 u32 frame_rcvd_size;
224 u8 frame_rcvd[32];
225 u8 phy_attached;
226 u8 phy_mode;
227 u8 reserved[2];
228 u32 phy_event;
229 enum sas_linkrate minimum_linkrate;
230 enum sas_linkrate maximum_linkrate;
231};
232
233struct mvs_device {
234 struct list_head dev_entry;
235 enum sas_dev_type dev_type;
236 struct mvs_info *mvi_info;
237 struct domain_device *sas_device;
238 u32 attached_phy;
239 u32 device_id;
240 u32 runing_req;
241 u8 taskfileset;
242 u8 dev_status;
243 u16 reserved;
244};
245
246struct mvs_slot_info {
247 struct list_head entry;
248 union {
249 struct sas_task *task;
250 void *tdata;
251 };
252 u32 n_elem;
253 u32 tx;
254 u32 slot_tag;
255
256 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
257 * and PRD table
258 */
259 void *buf;
260 dma_addr_t buf_dma;
261#if _MV_DUMP
262 u32 cmd_size;
263#endif
264 void *response;
265 struct mvs_port *port;
266 struct mvs_device *device;
267 void *open_frame;
268};
269
270struct mvs_info {
271 unsigned long flags;
272
273 /* host-wide lock */
274 spinlock_t lock;
275
276 /* our device */
277 struct pci_dev *pdev;
278 struct device *dev;
279
280 /* enhanced mode registers */
281 void __iomem *regs;
282
283 /* peripheral or soc registers */
284 void __iomem *regs_ex;
285 u8 sas_addr[SAS_ADDR_SIZE];
286
287 /* SCSI/SAS glue */
288 struct sas_ha_struct *sas;
289 struct Scsi_Host *shost;
290
291 /* TX (delivery) DMA ring */
292 __le32 *tx;
293 dma_addr_t tx_dma;
294
295 /* cached next-producer idx */
296 u32 tx_prod;
297
298 /* RX (completion) DMA ring */
299 __le32 *rx;
300 dma_addr_t rx_dma;
301
302 /* RX consumer idx */
303 u32 rx_cons;
304
305 /* RX'd FIS area */
306 __le32 *rx_fis;
307 dma_addr_t rx_fis_dma;
308
309 /* DMA command header slots */
310 struct mvs_cmd_hdr *slot;
311 dma_addr_t slot_dma;
312
313 u32 chip_id;
314 const struct mvs_chip_info *chip;
315
316 int tags_num;
317 DECLARE_BITMAP(tags, MVS_SLOTS);
318 /* further per-slot information */
319 struct mvs_phy phy[MVS_MAX_PHYS];
320 struct mvs_port port[MVS_MAX_PHYS];
321 u32 irq;
322 u32 exp_req;
323 u32 id;
324 u64 sata_reg_set;
325 struct list_head *hba_list;
326 struct list_head soc_entry;
327 struct list_head wq_list;
328 unsigned long instance;
329 u16 flashid;
330 u32 flashsize;
331 u32 flashsectSize;
332
333 void *addon;
334 struct mvs_device devices[MVS_MAX_DEVICES];
335#ifndef DISABLE_HOTPLUG_DMA_FIX
336 void *bulk_buffer;
337 dma_addr_t bulk_buffer_dma;
338#define TRASH_BUCKET_SIZE 0x20000
339#endif
340 struct mvs_slot_info slot_info[0];
341};
342
343struct mvs_prv_info{
344 u8 n_host;
345 u8 n_phy;
346 u16 reserve;
347 struct mvs_info *mvi[2];
348};
349
350struct mvs_wq {
351 struct delayed_work work_q;
352 struct mvs_info *mvi;
353 void *data;
354 int handler;
355 struct list_head entry;
356};
357
358struct mvs_task_exec_info {
359 struct sas_task *task;
360 struct mvs_cmd_hdr *hdr;
361 struct mvs_port *port;
362 u32 tag;
363 int n_elem;
364};
365
366
367/******************** function prototype *********************/
368void mvs_get_sas_addr(void *buf, u32 buflen);
369void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
370void mvs_tag_free(struct mvs_info *mvi, u32 tag);
371void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
372int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
373void mvs_tag_init(struct mvs_info *mvi);
374void mvs_iounmap(void __iomem *regs);
375int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
376void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
377int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
378 void *funcdata);
379void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
380 u32 off_lo, u32 off_hi, u64 sas_addr);
381int mvs_slave_alloc(struct scsi_device *scsi_dev);
382int mvs_slave_configure(struct scsi_device *sdev);
383void mvs_scan_start(struct Scsi_Host *shost);
384int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
385int mvs_queue_command(struct sas_task *task, const int num,
386 gfp_t gfp_flags);
387int mvs_abort_task(struct sas_task *task);
388int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
389int mvs_clear_aca(struct domain_device *dev, u8 *lun);
390int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
391void mvs_port_formed(struct asd_sas_phy *sas_phy);
392void mvs_port_deformed(struct asd_sas_phy *sas_phy);
393int mvs_dev_found(struct domain_device *dev);
394void mvs_dev_gone(struct domain_device *dev);
395int mvs_lu_reset(struct domain_device *dev, u8 *lun);
396int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
397int mvs_I_T_nexus_reset(struct domain_device *dev);
398int mvs_query_task(struct sas_task *task);
399void mvs_release_task(struct mvs_info *mvi, int phy_no,
400 struct domain_device *dev);
401void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
402void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
403int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
404void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
405#endif
406
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
index 0e207aa67d1..5fd73d77c3a 100644
--- a/drivers/scsi/osd/Kbuild
+++ b/drivers/scsi/osd/Kbuild
@@ -11,31 +11,6 @@
11# it under the terms of the GNU General Public License version 2 11# it under the terms of the GNU General Public License version 2
12# 12#
13 13
14ifneq ($(OSD_INC),)
15# we are built out-of-tree Kconfigure everything as on
16
17CONFIG_SCSI_OSD_INITIATOR=m
18ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE
19
20CONFIG_SCSI_OSD_ULD=m
21ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE
22
23# CONFIG_SCSI_OSD_DPRINT_SENSE =
24# 0 - no print of errors
25# 1 - print errors
26# 2 - errors + warrnings
27ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1
28
29# Uncomment to turn debug on
30# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG
31
32# if we are built out-of-tree and the hosting kernel has OSD headers
33# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing
34# this it will work. This might break in future kernels
35LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE)
36
37endif
38
39# libosd.ko - osd-initiator library 14# libosd.ko - osd-initiator library
40libosd-y := osd_initiator.o 15libosd-y := osd_initiator.o
41obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o 16obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
diff --git a/drivers/scsi/osd/Makefile b/drivers/scsi/osd/Makefile
deleted file mode 100755
index d905344f83b..00000000000
--- a/drivers/scsi/osd/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
1#
2# Makefile for the OSD modules (out of tree)
3#
4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5#
6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com>
8# Benny Halevy <bhalevy@panasas.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 2
12#
13# This Makefile is used to call the kernel Makefile in case of an out-of-tree
14# build.
15# $KSRC should point to a Kernel source tree otherwise host's default is
16# used. (eg. /lib/modules/`uname -r`/build)
17
18# include path for out-of-tree Headers
19OSD_INC ?= `pwd`/../../../include
20
21# allow users to override these
22# e.g. to compile for a kernel that you aren't currently running
23KSRC ?= /lib/modules/$(shell uname -r)/build
24KBUILD_OUTPUT ?=
25ARCH ?=
26V ?= 0
27
28# this is the basic Kbuild out-of-tree invocation, with the M= option
29KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V)
30
31all: libosd
32
33libosd: ;
34 $(KBUILD_BASE) OSD_INC=$(OSD_INC) modules
35
36clean:
37 $(KBUILD_BASE) clean
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 1ce6b24abab..7a117c18114 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -118,39 +118,39 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
118 _osd_ver_desc(or)); 118 _osd_ver_desc(or));
119 119
120 pFirst = get_attrs[a++].val_ptr; 120 pFirst = get_attrs[a++].val_ptr;
121 OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n", 121 OSD_INFO("VENDOR_IDENTIFICATION [%s]\n",
122 (char *)pFirst); 122 (char *)pFirst);
123 123
124 pFirst = get_attrs[a++].val_ptr; 124 pFirst = get_attrs[a++].val_ptr;
125 OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n", 125 OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
126 (char *)pFirst); 126 (char *)pFirst);
127 127
128 pFirst = get_attrs[a++].val_ptr; 128 pFirst = get_attrs[a++].val_ptr;
129 OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n", 129 OSD_INFO("PRODUCT_MODEL [%s]\n",
130 (char *)pFirst); 130 (char *)pFirst);
131 131
132 pFirst = get_attrs[a++].val_ptr; 132 pFirst = get_attrs[a++].val_ptr;
133 OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n", 133 OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
134 pFirst ? get_unaligned_be32(pFirst) : ~0U); 134 pFirst ? get_unaligned_be32(pFirst) : ~0U);
135 135
136 pFirst = get_attrs[a++].val_ptr; 136 pFirst = get_attrs[a++].val_ptr;
137 OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n", 137 OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
138 (char *)pFirst); 138 (char *)pFirst);
139 139
140 pFirst = get_attrs[a].val_ptr; 140 pFirst = get_attrs[a].val_ptr;
141 OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst); 141 OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst);
142 a++; 142 a++;
143 143
144 pFirst = get_attrs[a++].val_ptr; 144 pFirst = get_attrs[a++].val_ptr;
145 OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n", 145 OSD_INFO("TOTAL_CAPACITY [0x%llx]\n",
146 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 146 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
147 147
148 pFirst = get_attrs[a++].val_ptr; 148 pFirst = get_attrs[a++].val_ptr;
149 OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n", 149 OSD_INFO("USED_CAPACITY [0x%llx]\n",
150 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 150 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
151 151
152 pFirst = get_attrs[a++].val_ptr; 152 pFirst = get_attrs[a++].val_ptr;
153 OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n", 153 OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n",
154 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); 154 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
155 155
156 if (a >= nelem) 156 if (a >= nelem)
@@ -158,7 +158,7 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
158 158
159 /* FIXME: Where are the time utilities */ 159 /* FIXME: Where are the time utilities */
160 pFirst = get_attrs[a++].val_ptr; 160 pFirst = get_attrs[a++].val_ptr;
161 OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", 161 OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
162 ((char *)pFirst)[0], ((char *)pFirst)[1], 162 ((char *)pFirst)[0], ((char *)pFirst)[1],
163 ((char *)pFirst)[2], ((char *)pFirst)[3], 163 ((char *)pFirst)[2], ((char *)pFirst)[3],
164 ((char *)pFirst)[4], ((char *)pFirst)[5]); 164 ((char *)pFirst)[4], ((char *)pFirst)[5]);
@@ -169,7 +169,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
169 169
170 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1, 170 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
171 sid_dump, sizeof(sid_dump), true); 171 sid_dump, sizeof(sid_dump), true);
172 OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump); 172 OSD_INFO("OSD_SYSTEM_ID(%d)\n"
173 " [%s]\n", len, sid_dump);
173 a++; 174 a++;
174 } 175 }
175out: 176out:
@@ -669,7 +670,7 @@ static int _osd_req_list_objects(struct osd_request *or,
669 __be16 action, const struct osd_obj_id *obj, osd_id initial_id, 670 __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
670 struct osd_obj_id_list *list, unsigned nelem) 671 struct osd_obj_id_list *list, unsigned nelem)
671{ 672{
672 struct request_queue *q = or->osd_dev->scsi_device->request_queue; 673 struct request_queue *q = osd_request_queue(or->osd_dev);
673 u64 len = nelem * sizeof(osd_id) + sizeof(*list); 674 u64 len = nelem * sizeof(osd_id) + sizeof(*list);
674 struct bio *bio; 675 struct bio *bio;
675 676
@@ -778,16 +779,32 @@ EXPORT_SYMBOL(osd_req_remove_object);
778*/ 779*/
779 780
780void osd_req_write(struct osd_request *or, 781void osd_req_write(struct osd_request *or,
781 const struct osd_obj_id *obj, struct bio *bio, u64 offset) 782 const struct osd_obj_id *obj, u64 offset,
783 struct bio *bio, u64 len)
782{ 784{
783 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size); 785 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
784 WARN_ON(or->out.bio || or->out.total_bytes); 786 WARN_ON(or->out.bio || or->out.total_bytes);
785 bio->bi_rw |= (1 << BIO_RW); 787 WARN_ON(0 == bio_rw_flagged(bio, BIO_RW));
786 or->out.bio = bio; 788 or->out.bio = bio;
787 or->out.total_bytes = bio->bi_size; 789 or->out.total_bytes = len;
788} 790}
789EXPORT_SYMBOL(osd_req_write); 791EXPORT_SYMBOL(osd_req_write);
790 792
793int osd_req_write_kern(struct osd_request *or,
794 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
795{
796 struct request_queue *req_q = osd_request_queue(or->osd_dev);
797 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
798
799 if (IS_ERR(bio))
800 return PTR_ERR(bio);
801
802 bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
803 osd_req_write(or, obj, offset, bio, len);
804 return 0;
805}
806EXPORT_SYMBOL(osd_req_write_kern);
807
791/*TODO: void osd_req_append(struct osd_request *, 808/*TODO: void osd_req_append(struct osd_request *,
792 const struct osd_obj_id *, struct bio *data_out); */ 809 const struct osd_obj_id *, struct bio *data_out); */
793/*TODO: void osd_req_create_write(struct osd_request *, 810/*TODO: void osd_req_create_write(struct osd_request *,
@@ -813,16 +830,31 @@ void osd_req_flush_object(struct osd_request *or,
813EXPORT_SYMBOL(osd_req_flush_object); 830EXPORT_SYMBOL(osd_req_flush_object);
814 831
815void osd_req_read(struct osd_request *or, 832void osd_req_read(struct osd_request *or,
816 const struct osd_obj_id *obj, struct bio *bio, u64 offset) 833 const struct osd_obj_id *obj, u64 offset,
834 struct bio *bio, u64 len)
817{ 835{
818 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size); 836 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
819 WARN_ON(or->in.bio || or->in.total_bytes); 837 WARN_ON(or->in.bio || or->in.total_bytes);
820 bio->bi_rw &= ~(1 << BIO_RW); 838 WARN_ON(1 == bio_rw_flagged(bio, BIO_RW));
821 or->in.bio = bio; 839 or->in.bio = bio;
822 or->in.total_bytes = bio->bi_size; 840 or->in.total_bytes = len;
823} 841}
824EXPORT_SYMBOL(osd_req_read); 842EXPORT_SYMBOL(osd_req_read);
825 843
844int osd_req_read_kern(struct osd_request *or,
845 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
846{
847 struct request_queue *req_q = osd_request_queue(or->osd_dev);
848 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
849
850 if (IS_ERR(bio))
851 return PTR_ERR(bio);
852
853 osd_req_read(or, obj, offset, bio, len);
854 return 0;
855}
856EXPORT_SYMBOL(osd_req_read_kern);
857
826void osd_req_get_attributes(struct osd_request *or, 858void osd_req_get_attributes(struct osd_request *or,
827 const struct osd_obj_id *obj) 859 const struct osd_obj_id *obj)
828{ 860{
@@ -889,26 +921,6 @@ int osd_req_add_set_attr_list(struct osd_request *or,
889} 921}
890EXPORT_SYMBOL(osd_req_add_set_attr_list); 922EXPORT_SYMBOL(osd_req_add_set_attr_list);
891 923
892static int _append_map_kern(struct request *req,
893 void *buff, unsigned len, gfp_t flags)
894{
895 struct bio *bio;
896 int ret;
897
898 bio = bio_map_kern(req->q, buff, len, flags);
899 if (IS_ERR(bio)) {
900 OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
901 PTR_ERR(bio));
902 return PTR_ERR(bio);
903 }
904 ret = blk_rq_append_bio(req->q, req, bio);
905 if (ret) {
906 OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
907 bio_put(bio);
908 }
909 return ret;
910}
911
912static int _req_append_segment(struct osd_request *or, 924static int _req_append_segment(struct osd_request *or,
913 unsigned padding, struct _osd_req_data_segment *seg, 925 unsigned padding, struct _osd_req_data_segment *seg,
914 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io) 926 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
@@ -924,14 +936,14 @@ static int _req_append_segment(struct osd_request *or,
924 else 936 else
925 pad_buff = io->pad_buff; 937 pad_buff = io->pad_buff;
926 938
927 ret = _append_map_kern(io->req, pad_buff, padding, 939 ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
928 or->alloc_flags); 940 or->alloc_flags);
929 if (ret) 941 if (ret)
930 return ret; 942 return ret;
931 io->total_bytes += padding; 943 io->total_bytes += padding;
932 } 944 }
933 945
934 ret = _append_map_kern(io->req, seg->buff, seg->total_bytes, 946 ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
935 or->alloc_flags); 947 or->alloc_flags);
936 if (ret) 948 if (ret)
937 return ret; 949 return ret;
@@ -1233,7 +1245,7 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1,
1233} 1245}
1234 1246
1235static int _osd_req_finalize_data_integrity(struct osd_request *or, 1247static int _osd_req_finalize_data_integrity(struct osd_request *or,
1236 bool has_in, bool has_out, const u8 *cap_key) 1248 bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
1237{ 1249{
1238 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); 1250 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1239 int ret; 1251 int ret;
@@ -1248,8 +1260,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1248 }; 1260 };
1249 unsigned pad; 1261 unsigned pad;
1250 1262
1251 or->out_data_integ.data_bytes = cpu_to_be64( 1263 or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
1252 or->out.bio ? or->out.bio->bi_size : 0);
1253 or->out_data_integ.set_attributes_bytes = cpu_to_be64( 1264 or->out_data_integ.set_attributes_bytes = cpu_to_be64(
1254 or->set_attr.total_bytes); 1265 or->set_attr.total_bytes);
1255 or->out_data_integ.get_attributes_bytes = cpu_to_be64( 1266 or->out_data_integ.get_attributes_bytes = cpu_to_be64(
@@ -1293,6 +1304,21 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
1293/* 1304/*
1294 * osd_finalize_request and helpers 1305 * osd_finalize_request and helpers
1295 */ 1306 */
1307static struct request *_make_request(struct request_queue *q, bool has_write,
1308 struct _osd_io_info *oii, gfp_t flags)
1309{
1310 if (oii->bio)
1311 return blk_make_request(q, oii->bio, flags);
1312 else {
1313 struct request *req;
1314
1315 req = blk_get_request(q, has_write ? WRITE : READ, flags);
1316 if (unlikely(!req))
1317 return ERR_PTR(-ENOMEM);
1318
1319 return req;
1320 }
1321}
1296 1322
1297static int _init_blk_request(struct osd_request *or, 1323static int _init_blk_request(struct osd_request *or,
1298 bool has_in, bool has_out) 1324 bool has_in, bool has_out)
@@ -1301,14 +1327,18 @@ static int _init_blk_request(struct osd_request *or,
1301 struct scsi_device *scsi_device = or->osd_dev->scsi_device; 1327 struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1302 struct request_queue *q = scsi_device->request_queue; 1328 struct request_queue *q = scsi_device->request_queue;
1303 struct request *req; 1329 struct request *req;
1304 int ret = -ENOMEM; 1330 int ret;
1305 1331
1306 req = blk_get_request(q, has_out, flags); 1332 req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
1307 if (!req) 1333 if (IS_ERR(req)) {
1334 ret = PTR_ERR(req);
1308 goto out; 1335 goto out;
1336 }
1309 1337
1310 or->request = req; 1338 or->request = req;
1311 req->cmd_type = REQ_TYPE_BLOCK_PC; 1339 req->cmd_type = REQ_TYPE_BLOCK_PC;
1340 req->cmd_flags |= REQ_QUIET;
1341
1312 req->timeout = or->timeout; 1342 req->timeout = or->timeout;
1313 req->retries = or->retries; 1343 req->retries = or->retries;
1314 req->sense = or->sense; 1344 req->sense = or->sense;
@@ -1318,9 +1348,10 @@ static int _init_blk_request(struct osd_request *or,
1318 or->out.req = req; 1348 or->out.req = req;
1319 if (has_in) { 1349 if (has_in) {
1320 /* allocate bidi request */ 1350 /* allocate bidi request */
1321 req = blk_get_request(q, READ, flags); 1351 req = _make_request(q, false, &or->in, flags);
1322 if (!req) { 1352 if (IS_ERR(req)) {
1323 OSD_DEBUG("blk_get_request for bidi failed\n"); 1353 OSD_DEBUG("blk_get_request for bidi failed\n");
1354 ret = PTR_ERR(req);
1324 goto out; 1355 goto out;
1325 } 1356 }
1326 req->cmd_type = REQ_TYPE_BLOCK_PC; 1357 req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1341,6 +1372,7 @@ int osd_finalize_request(struct osd_request *or,
1341{ 1372{
1342 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); 1373 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1343 bool has_in, has_out; 1374 bool has_in, has_out;
1375 u64 out_data_bytes = or->out.total_bytes;
1344 int ret; 1376 int ret;
1345 1377
1346 if (options & OSD_REQ_FUA) 1378 if (options & OSD_REQ_FUA)
@@ -1364,26 +1396,6 @@ int osd_finalize_request(struct osd_request *or,
1364 return ret; 1396 return ret;
1365 } 1397 }
1366 1398
1367 if (or->out.bio) {
1368 ret = blk_rq_append_bio(or->request->q, or->out.req,
1369 or->out.bio);
1370 if (ret) {
1371 OSD_DEBUG("blk_rq_append_bio out failed\n");
1372 return ret;
1373 }
1374 OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
1375 _LLU(or->out.total_bytes), or->out.req->data_len);
1376 }
1377 if (or->in.bio) {
1378 ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
1379 if (ret) {
1380 OSD_DEBUG("blk_rq_append_bio in failed\n");
1381 return ret;
1382 }
1383 OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
1384 _LLU(or->in.total_bytes), or->in.req->data_len);
1385 }
1386
1387 or->out.pad_buff = sg_out_pad_buffer; 1399 or->out.pad_buff = sg_out_pad_buffer;
1388 or->in.pad_buff = sg_in_pad_buffer; 1400 or->in.pad_buff = sg_in_pad_buffer;
1389 1401
@@ -1410,7 +1422,8 @@ int osd_finalize_request(struct osd_request *or,
1410 } 1422 }
1411 } 1423 }
1412 1424
1413 ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key); 1425 ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
1426 out_data_bytes, cap_key);
1414 if (ret) 1427 if (ret)
1415 return ret; 1428 return ret;
1416 1429
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 22b59e13ba8..0bdef339090 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -49,6 +49,7 @@
49#include <linux/device.h> 49#include <linux/device.h>
50#include <linux/idr.h> 50#include <linux/idr.h>
51#include <linux/major.h> 51#include <linux/major.h>
52#include <linux/file.h>
52 53
53#include <scsi/scsi.h> 54#include <scsi/scsi.h>
54#include <scsi/scsi_driver.h> 55#include <scsi/scsi_driver.h>
@@ -175,10 +176,9 @@ static const struct file_operations osd_fops = {
175 176
176struct osd_dev *osduld_path_lookup(const char *name) 177struct osd_dev *osduld_path_lookup(const char *name)
177{ 178{
178 struct path path; 179 struct osd_uld_device *oud;
179 struct inode *inode; 180 struct osd_dev *od;
180 struct cdev *cdev; 181 struct file *file;
181 struct osd_uld_device *uninitialized_var(oud);
182 int error; 182 int error;
183 183
184 if (!name || !*name) { 184 if (!name || !*name) {
@@ -186,52 +186,46 @@ struct osd_dev *osduld_path_lookup(const char *name)
186 return ERR_PTR(-EINVAL); 186 return ERR_PTR(-EINVAL);
187 } 187 }
188 188
189 error = kern_path(name, LOOKUP_FOLLOW, &path); 189 od = kzalloc(sizeof(*od), GFP_KERNEL);
190 if (error) { 190 if (!od)
191 OSD_ERR("path_lookup of %s failed=>%d\n", name, error); 191 return ERR_PTR(-ENOMEM);
192 return ERR_PTR(error);
193 }
194 192
195 inode = path.dentry->d_inode; 193 file = filp_open(name, O_RDWR, 0);
196 error = -EINVAL; /* Not the right device e.g osd_uld_device */ 194 if (IS_ERR(file)) {
197 if (!S_ISCHR(inode->i_mode)) { 195 error = PTR_ERR(file);
198 OSD_DEBUG("!S_ISCHR()\n"); 196 goto free_od;
199 goto out;
200 } 197 }
201 198
202 cdev = inode->i_cdev; 199 if (file->f_op != &osd_fops){
203 if (!cdev) { 200 error = -EINVAL;
204 OSD_ERR("Before mounting an OSD Based filesystem\n"); 201 goto close_file;
205 OSD_ERR(" user-mode must open+close the %s device\n", name);
206 OSD_ERR(" Example: bash: echo < %s\n", name);
207 goto out;
208 } 202 }
209 203
210 /* The Magic wand. Is it our char-dev */ 204 oud = file->private_data;
211 /* TODO: Support sg devices */
212 if (cdev->owner != THIS_MODULE) {
213 OSD_ERR("Error mounting %s - is not an OSD device\n", name);
214 goto out;
215 }
216 205
217 oud = container_of(cdev, struct osd_uld_device, cdev); 206 *od = oud->od;
207 od->file = file;
218 208
219 __uld_get(oud); 209 return od;
220 error = 0;
221 210
222out: 211close_file:
223 path_put(&path); 212 fput(file);
224 return error ? ERR_PTR(error) : &oud->od; 213free_od:
214 kfree(od);
215 return ERR_PTR(error);
225} 216}
226EXPORT_SYMBOL(osduld_path_lookup); 217EXPORT_SYMBOL(osduld_path_lookup);
227 218
228void osduld_put_device(struct osd_dev *od) 219void osduld_put_device(struct osd_dev *od)
229{ 220{
230 if (od) {
231 struct osd_uld_device *oud = container_of(od,
232 struct osd_uld_device, od);
233 221
234 __uld_put(oud); 222 if (od && !IS_ERR(od)) {
223 struct osd_uld_device *oud = od->file->private_data;
224
225 BUG_ON(od->scsi_device != oud->od.scsi_device);
226
227 fput(od->file);
228 kfree(od);
235 } 229 }
236} 230}
237EXPORT_SYMBOL(osduld_put_device); 231EXPORT_SYMBOL(osduld_put_device);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 5defe5ea5ed..8371d917a9a 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -17,9 +17,12 @@
17* General Public License for more details. 17* General Public License for more details.
18* 18*
19******************************************************************************/ 19******************************************************************************/
20#define QLA1280_VERSION "3.26" 20#define QLA1280_VERSION "3.27"
21/***************************************************************************** 21/*****************************************************************************
22 Revision History: 22 Revision History:
23 Rev 3.27, February 10, 2009, Michael Reed
24 - General code cleanup.
25 - Improve error recovery.
23 Rev 3.26, January 16, 2006 Jes Sorensen 26 Rev 3.26, January 16, 2006 Jes Sorensen
24 - Ditch all < 2.6 support 27 - Ditch all < 2.6 support
25 Rev 3.25.1, February 10, 2005 Christoph Hellwig 28 Rev 3.25.1, February 10, 2005 Christoph Hellwig
@@ -435,7 +438,6 @@ static int qla1280_mailbox_command(struct scsi_qla_host *,
435 uint8_t, uint16_t *); 438 uint8_t, uint16_t *);
436static int qla1280_bus_reset(struct scsi_qla_host *, int); 439static int qla1280_bus_reset(struct scsi_qla_host *, int);
437static int qla1280_device_reset(struct scsi_qla_host *, int, int); 440static int qla1280_device_reset(struct scsi_qla_host *, int, int);
438static int qla1280_abort_device(struct scsi_qla_host *, int, int, int);
439static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); 441static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
440static int qla1280_abort_isp(struct scsi_qla_host *); 442static int qla1280_abort_isp(struct scsi_qla_host *);
441#ifdef QLA_64BIT_PTR 443#ifdef QLA_64BIT_PTR
@@ -698,7 +700,7 @@ qla1280_info(struct Scsi_Host *host)
698} 700}
699 701
700/************************************************************************** 702/**************************************************************************
701 * qla1200_queuecommand 703 * qla1280_queuecommand
702 * Queue a command to the controller. 704 * Queue a command to the controller.
703 * 705 *
704 * Note: 706 * Note:
@@ -713,12 +715,14 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
713{ 715{
714 struct Scsi_Host *host = cmd->device->host; 716 struct Scsi_Host *host = cmd->device->host;
715 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 717 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
716 struct srb *sp = (struct srb *)&cmd->SCp; 718 struct srb *sp = (struct srb *)CMD_SP(cmd);
717 int status; 719 int status;
718 720
719 cmd->scsi_done = fn; 721 cmd->scsi_done = fn;
720 sp->cmd = cmd; 722 sp->cmd = cmd;
721 sp->flags = 0; 723 sp->flags = 0;
724 sp->wait = NULL;
725 CMD_HANDLE(cmd) = (unsigned char *)NULL;
722 726
723 qla1280_print_scsi_cmd(5, cmd); 727 qla1280_print_scsi_cmd(5, cmd);
724 728
@@ -738,21 +742,11 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
738 742
739enum action { 743enum action {
740 ABORT_COMMAND, 744 ABORT_COMMAND,
741 ABORT_DEVICE,
742 DEVICE_RESET, 745 DEVICE_RESET,
743 BUS_RESET, 746 BUS_RESET,
744 ADAPTER_RESET, 747 ADAPTER_RESET,
745 FAIL
746}; 748};
747 749
748/* timer action for error action processor */
749static void qla1280_error_wait_timeout(unsigned long __data)
750{
751 struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data;
752 struct srb *sp = (struct srb *)CMD_SP(cmd);
753
754 complete(sp->wait);
755}
756 750
757static void qla1280_mailbox_timeout(unsigned long __data) 751static void qla1280_mailbox_timeout(unsigned long __data)
758{ 752{
@@ -767,8 +761,67 @@ static void qla1280_mailbox_timeout(unsigned long __data)
767 complete(ha->mailbox_wait); 761 complete(ha->mailbox_wait);
768} 762}
769 763
764static int
765_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
766 struct completion *wait)
767{
768 int status = FAILED;
769 struct scsi_cmnd *cmd = sp->cmd;
770
771 spin_unlock_irq(ha->host->host_lock);
772 wait_for_completion_timeout(wait, 4*HZ);
773 spin_lock_irq(ha->host->host_lock);
774 sp->wait = NULL;
775 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
776 status = SUCCESS;
777 (*cmd->scsi_done)(cmd);
778 }
779 return status;
780}
781
782static int
783qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
784{
785 DECLARE_COMPLETION_ONSTACK(wait);
786
787 sp->wait = &wait;
788 return _qla1280_wait_for_single_command(ha, sp, &wait);
789}
790
791static int
792qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
793{
794 int cnt;
795 int status;
796 struct srb *sp;
797 struct scsi_cmnd *cmd;
798
799 status = SUCCESS;
800
801 /*
802 * Wait for all commands with the designated bus/target
803 * to be completed by the firmware
804 */
805 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
806 sp = ha->outstanding_cmds[cnt];
807 if (sp) {
808 cmd = sp->cmd;
809
810 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
811 continue;
812 if (target >= 0 && SCSI_TCN_32(cmd) != target)
813 continue;
814
815 status = qla1280_wait_for_single_command(ha, sp);
816 if (status == FAILED)
817 break;
818 }
819 }
820 return status;
821}
822
770/************************************************************************** 823/**************************************************************************
771 * qla1200_error_action 824 * qla1280_error_action
772 * The function will attempt to perform a specified error action and 825 * The function will attempt to perform a specified error action and
773 * wait for the results (or time out). 826 * wait for the results (or time out).
774 * 827 *
@@ -780,11 +833,6 @@ static void qla1280_mailbox_timeout(unsigned long __data)
780 * Returns: 833 * Returns:
781 * SUCCESS or FAILED 834 * SUCCESS or FAILED
782 * 835 *
783 * Note:
784 * Resetting the bus always succeeds - is has to, otherwise the
785 * kernel will panic! Try a surgical technique - sending a BUS
786 * DEVICE RESET message - on the offending target before pulling
787 * the SCSI bus reset line.
788 **************************************************************************/ 836 **************************************************************************/
789static int 837static int
790qla1280_error_action(struct scsi_cmnd *cmd, enum action action) 838qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
@@ -792,13 +840,19 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
792 struct scsi_qla_host *ha; 840 struct scsi_qla_host *ha;
793 int bus, target, lun; 841 int bus, target, lun;
794 struct srb *sp; 842 struct srb *sp;
795 uint16_t data; 843 int i, found;
796 unsigned char *handle; 844 int result=FAILED;
797 int result, i; 845 int wait_for_bus=-1;
846 int wait_for_target = -1;
798 DECLARE_COMPLETION_ONSTACK(wait); 847 DECLARE_COMPLETION_ONSTACK(wait);
799 struct timer_list timer; 848
849 ENTER("qla1280_error_action");
800 850
801 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); 851 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
852 sp = (struct srb *)CMD_SP(cmd);
853 bus = SCSI_BUS_32(cmd);
854 target = SCSI_TCN_32(cmd);
855 lun = SCSI_LUN_32(cmd);
802 856
803 dprintk(4, "error_action %i, istatus 0x%04x\n", action, 857 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
804 RD_REG_WORD(&ha->iobase->istatus)); 858 RD_REG_WORD(&ha->iobase->istatus));
@@ -807,99 +861,47 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
807 RD_REG_WORD(&ha->iobase->host_cmd), 861 RD_REG_WORD(&ha->iobase->host_cmd),
808 RD_REG_WORD(&ha->iobase->ictrl), jiffies); 862 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
809 863
810 ENTER("qla1280_error_action");
811 if (qla1280_verbose) 864 if (qla1280_verbose)
812 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " 865 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
813 "Handle=0x%p, action=0x%x\n", 866 "Handle=0x%p, action=0x%x\n",
814 ha->host_no, cmd, CMD_HANDLE(cmd), action); 867 ha->host_no, cmd, CMD_HANDLE(cmd), action);
815 868
816 if (cmd == NULL) {
817 printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL "
818 "si_Cmnd pointer, failing.\n");
819 LEAVE("qla1280_error_action");
820 return FAILED;
821 }
822
823 ha = (struct scsi_qla_host *)cmd->device->host->hostdata;
824 sp = (struct srb *)CMD_SP(cmd);
825 handle = CMD_HANDLE(cmd);
826
827 /* Check for pending interrupts. */
828 data = qla1280_debounce_register(&ha->iobase->istatus);
829 /*
830 * The io_request_lock is held when the reset handler is called, hence
831 * the interrupt handler cannot be running in parallel as it also
832 * grabs the lock. /Jes
833 */
834 if (data & RISC_INT)
835 qla1280_isr(ha, &ha->done_q);
836
837 /* 869 /*
838 * Determine the suggested action that the mid-level driver wants 870 * Check to see if we have the command in the outstanding_cmds[]
839 * us to perform. 871 * array. If not then it must have completed before this error
872 * action was initiated. If the error_action isn't ABORT_COMMAND
873 * then the driver must proceed with the requested action.
840 */ 874 */
841 if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) { 875 found = -1;
842 if(action == ABORT_COMMAND) { 876 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
843 /* we never got this command */ 877 if (sp == ha->outstanding_cmds[i]) {
844 printk(KERN_INFO "qla1280: Aborting a NULL handle\n"); 878 found = i;
845 return SUCCESS; /* no action - we don't have command */ 879 sp->wait = &wait; /* we'll wait for it to complete */
880 break;
846 } 881 }
847 } else {
848 sp->wait = &wait;
849 } 882 }
850 883
851 bus = SCSI_BUS_32(cmd); 884 if (found < 0) { /* driver doesn't have command */
852 target = SCSI_TCN_32(cmd); 885 result = SUCCESS;
853 lun = SCSI_LUN_32(cmd); 886 if (qla1280_verbose) {
887 printk(KERN_INFO
888 "scsi(%ld:%d:%d:%d): specified command has "
889 "already completed.\n", ha->host_no, bus,
890 target, lun);
891 }
892 }
854 893
855 /* Overloading result. Here it means the success or fail of the
856 * *issue* of the action. When we return from the routine, it must
857 * mean the actual success or fail of the action */
858 result = FAILED;
859 switch (action) { 894 switch (action) {
860 case FAIL:
861 break;
862 895
863 case ABORT_COMMAND: 896 case ABORT_COMMAND:
864 if ((sp->flags & SRB_ABORT_PENDING)) { 897 dprintk(1, "qla1280: RISC aborting command\n");
865 printk(KERN_WARNING 898 /*
866 "scsi(): Command has a pending abort " 899 * The abort might fail due to race when the host_lock
867 "message - ABORT_PENDING.\n"); 900 * is released to issue the abort. As such, we
868 /* This should technically be impossible since we 901 * don't bother to check the return status.
869 * now wait for abort completion */ 902 */
870 break; 903 if (found >= 0)
871 } 904 qla1280_abort_command(ha, sp, found);
872
873 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
874 if (sp == ha->outstanding_cmds[i]) {
875 dprintk(1, "qla1280: RISC aborting command\n");
876 if (qla1280_abort_command(ha, sp, i) == 0)
877 result = SUCCESS;
878 else {
879 /*
880 * Since we don't know what might
881 * have happend to the command, it
882 * is unsafe to remove it from the
883 * device's queue at this point.
884 * Wait and let the escalation
885 * process take care of it.
886 */
887 printk(KERN_WARNING
888 "scsi(%li:%i:%i:%i): Unable"
889 " to abort command!\n",
890 ha->host_no, bus, target, lun);
891 }
892 }
893 }
894 break;
895
896 case ABORT_DEVICE:
897 if (qla1280_verbose)
898 printk(KERN_INFO
899 "scsi(%ld:%d:%d:%d): Queueing abort device "
900 "command.\n", ha->host_no, bus, target, lun);
901 if (qla1280_abort_device(ha, bus, target, lun) == 0)
902 result = SUCCESS;
903 break; 905 break;
904 906
905 case DEVICE_RESET: 907 case DEVICE_RESET:
@@ -907,16 +909,21 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
907 printk(KERN_INFO 909 printk(KERN_INFO
908 "scsi(%ld:%d:%d:%d): Queueing device reset " 910 "scsi(%ld:%d:%d:%d): Queueing device reset "
909 "command.\n", ha->host_no, bus, target, lun); 911 "command.\n", ha->host_no, bus, target, lun);
910 if (qla1280_device_reset(ha, bus, target) == 0) 912 if (qla1280_device_reset(ha, bus, target) == 0) {
911 result = SUCCESS; 913 /* issued device reset, set wait conditions */
914 wait_for_bus = bus;
915 wait_for_target = target;
916 }
912 break; 917 break;
913 918
914 case BUS_RESET: 919 case BUS_RESET:
915 if (qla1280_verbose) 920 if (qla1280_verbose)
916 printk(KERN_INFO "qla1280(%ld:%d): Issued bus " 921 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
917 "reset.\n", ha->host_no, bus); 922 "reset.\n", ha->host_no, bus);
918 if (qla1280_bus_reset(ha, bus) == 0) 923 if (qla1280_bus_reset(ha, bus) == 0) {
919 result = SUCCESS; 924 /* issued bus reset, set wait conditions */
925 wait_for_bus = bus;
926 }
920 break; 927 break;
921 928
922 case ADAPTER_RESET: 929 case ADAPTER_RESET:
@@ -929,55 +936,48 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
929 "continue automatically\n", ha->host_no); 936 "continue automatically\n", ha->host_no);
930 } 937 }
931 ha->flags.reset_active = 1; 938 ha->flags.reset_active = 1;
932 /* 939
933 * We restarted all of the commands automatically, so the 940 if (qla1280_abort_isp(ha) != 0) { /* it's dead */
934 * mid-level code can expect completions momentitarily. 941 result = FAILED;
935 */ 942 }
936 if (qla1280_abort_isp(ha) == 0)
937 result = SUCCESS;
938 943
939 ha->flags.reset_active = 0; 944 ha->flags.reset_active = 0;
940 } 945 }
941 946
942 if (!list_empty(&ha->done_q)) 947 /*
943 qla1280_done(ha); 948 * At this point, the host_lock has been released and retaken
944 949 * by the issuance of the mailbox command.
945 /* If we didn't manage to issue the action, or we have no 950 * Wait for the command passed in by the mid-layer if it
946 * command to wait for, exit here */ 951 * was found by the driver. It might have been returned
947 if (result == FAILED || handle == NULL || 952 * between eh recovery steps, hence the check of the "found"
948 handle == (unsigned char *)INVALID_HANDLE) { 953 * variable.
949 /* 954 */
950 * Clear completion queue to avoid qla1280_done() trying
951 * to complete the command at a later stage after we
952 * have exited the current context
953 */
954 sp->wait = NULL;
955 goto leave;
956 }
957 955
958 /* set up a timer just in case we're really jammed */ 956 if (found >= 0)
959 init_timer(&timer); 957 result = _qla1280_wait_for_single_command(ha, sp, &wait);
960 timer.expires = jiffies + 4*HZ;
961 timer.data = (unsigned long)cmd;
962 timer.function = qla1280_error_wait_timeout;
963 add_timer(&timer);
964 958
965 /* wait for the action to complete (or the timer to expire) */ 959 if (action == ABORT_COMMAND && result != SUCCESS) {
966 spin_unlock_irq(ha->host->host_lock); 960 printk(KERN_WARNING
967 wait_for_completion(&wait); 961 "scsi(%li:%i:%i:%i): "
968 del_timer_sync(&timer); 962 "Unable to abort command!\n",
969 spin_lock_irq(ha->host->host_lock); 963 ha->host_no, bus, target, lun);
970 sp->wait = NULL; 964 }
971 965
972 /* the only action we might get a fail for is abort */ 966 /*
973 if (action == ABORT_COMMAND) { 967 * If the command passed in by the mid-layer has been
974 if(sp->flags & SRB_ABORTED) 968 * returned by the board, then wait for any additional
975 result = SUCCESS; 969 * commands which are supposed to complete based upon
976 else 970 * the error action.
977 result = FAILED; 971 *
972 * All commands are unconditionally returned during a
973 * call to qla1280_abort_isp(), ADAPTER_RESET. No need
974 * to wait for them.
975 */
976 if (result == SUCCESS && wait_for_bus >= 0) {
977 result = qla1280_wait_for_pending_commands(ha,
978 wait_for_bus, wait_for_target);
978 } 979 }
979 980
980 leave:
981 dprintk(1, "RESET returning %d\n", result); 981 dprintk(1, "RESET returning %d\n", result);
982 982
983 LEAVE("qla1280_error_action"); 983 LEAVE("qla1280_error_action");
@@ -1280,13 +1280,12 @@ qla1280_done(struct scsi_qla_host *ha)
1280 switch ((CMD_RESULT(cmd) >> 16)) { 1280 switch ((CMD_RESULT(cmd) >> 16)) {
1281 case DID_RESET: 1281 case DID_RESET:
1282 /* Issue marker command. */ 1282 /* Issue marker command. */
1283 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 1283 if (!ha->flags.abort_isp_active)
1284 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1284 break; 1285 break;
1285 case DID_ABORT: 1286 case DID_ABORT:
1286 sp->flags &= ~SRB_ABORT_PENDING; 1287 sp->flags &= ~SRB_ABORT_PENDING;
1287 sp->flags |= SRB_ABORTED; 1288 sp->flags |= SRB_ABORTED;
1288 if (sp->flags & SRB_TIMEOUT)
1289 CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16;
1290 break; 1289 break;
1291 default: 1290 default:
1292 break; 1291 break;
@@ -1296,12 +1295,11 @@ qla1280_done(struct scsi_qla_host *ha)
1296 scsi_dma_unmap(cmd); 1295 scsi_dma_unmap(cmd);
1297 1296
1298 /* Call the mid-level driver interrupt handler */ 1297 /* Call the mid-level driver interrupt handler */
1299 CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
1300 ha->actthreads--; 1298 ha->actthreads--;
1301 1299
1302 (*(cmd)->scsi_done)(cmd); 1300 if (sp->wait == NULL)
1303 1301 (*(cmd)->scsi_done)(cmd);
1304 if(sp->wait != NULL) 1302 else
1305 complete(sp->wait); 1303 complete(sp->wait);
1306 } 1304 }
1307 LEAVE("qla1280_done"); 1305 LEAVE("qla1280_done");
@@ -2417,9 +2415,6 @@ static int
2417qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) 2415qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2418{ 2416{
2419 struct device_reg __iomem *reg = ha->iobase; 2417 struct device_reg __iomem *reg = ha->iobase;
2420#if 0
2421 LIST_HEAD(done_q);
2422#endif
2423 int status = 0; 2418 int status = 0;
2424 int cnt; 2419 int cnt;
2425 uint16_t *optr, *iptr; 2420 uint16_t *optr, *iptr;
@@ -2493,19 +2488,9 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2493 mr = MAILBOX_REGISTER_COUNT; 2488 mr = MAILBOX_REGISTER_COUNT;
2494 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); 2489 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2495 2490
2496#if 0
2497 /* Go check for any response interrupts pending. */
2498 qla1280_isr(ha, &done_q);
2499#endif
2500
2501 if (ha->flags.reset_marker) 2491 if (ha->flags.reset_marker)
2502 qla1280_rst_aen(ha); 2492 qla1280_rst_aen(ha);
2503 2493
2504#if 0
2505 if (!list_empty(&done_q))
2506 qla1280_done(ha, &done_q);
2507#endif
2508
2509 if (status) 2494 if (status)
2510 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " 2495 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2511 "0x%x ****\n", mb[0]); 2496 "0x%x ****\n", mb[0]);
@@ -2641,41 +2626,6 @@ qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2641} 2626}
2642 2627
2643/* 2628/*
2644 * qla1280_abort_device
2645 * Issue an abort message to the device
2646 *
2647 * Input:
2648 * ha = adapter block pointer.
2649 * bus = SCSI BUS.
2650 * target = SCSI ID.
2651 * lun = SCSI LUN.
2652 *
2653 * Returns:
2654 * 0 = success
2655 */
2656static int
2657qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun)
2658{
2659 uint16_t mb[MAILBOX_REGISTER_COUNT];
2660 int status;
2661
2662 ENTER("qla1280_abort_device");
2663
2664 mb[0] = MBC_ABORT_DEVICE;
2665 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2666 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2667
2668 /* Issue marker command. */
2669 qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN);
2670
2671 if (status)
2672 dprintk(2, "qla1280_abort_device: **** FAILED ****\n");
2673
2674 LEAVE("qla1280_abort_device");
2675 return status;
2676}
2677
2678/*
2679 * qla1280_abort_command 2629 * qla1280_abort_command
2680 * Abort command aborts a specified IOCB. 2630 * Abort command aborts a specified IOCB.
2681 * 2631 *
@@ -2833,7 +2783,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2833 2783
2834 /* If room for request in request ring. */ 2784 /* If room for request in request ring. */
2835 if ((req_cnt + 2) >= ha->req_q_cnt) { 2785 if ((req_cnt + 2) >= ha->req_q_cnt) {
2836 status = 1; 2786 status = SCSI_MLQUEUE_HOST_BUSY;
2837 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" 2787 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2838 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, 2788 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2839 req_cnt); 2789 req_cnt);
@@ -2845,7 +2795,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2845 ha->outstanding_cmds[cnt] != NULL; cnt++); 2795 ha->outstanding_cmds[cnt] != NULL; cnt++);
2846 2796
2847 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 2797 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2848 status = 1; 2798 status = SCSI_MLQUEUE_HOST_BUSY;
2849 dprintk(2, "qla1280_start_scsi: NO ROOM IN " 2799 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2850 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); 2800 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2851 goto out; 2801 goto out;
@@ -3108,7 +3058,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3108 ha->req_q_cnt, seg_cnt); 3058 ha->req_q_cnt, seg_cnt);
3109 /* If room for request in request ring. */ 3059 /* If room for request in request ring. */
3110 if ((req_cnt + 2) >= ha->req_q_cnt) { 3060 if ((req_cnt + 2) >= ha->req_q_cnt) {
3111 status = 1; 3061 status = SCSI_MLQUEUE_HOST_BUSY;
3112 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " 3062 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3113 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, 3063 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3114 ha->req_q_cnt, req_cnt); 3064 ha->req_q_cnt, req_cnt);
@@ -3120,7 +3070,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3120 (ha->outstanding_cmds[cnt] != 0); cnt++) ; 3070 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3121 3071
3122 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3072 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3123 status = 1; 3073 status = SCSI_MLQUEUE_HOST_BUSY;
3124 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " 3074 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3125 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); 3075 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3126 goto out; 3076 goto out;
@@ -3487,6 +3437,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3487 3437
3488 /* Save ISP completion status */ 3438 /* Save ISP completion status */
3489 CMD_RESULT(sp->cmd) = 0; 3439 CMD_RESULT(sp->cmd) = 0;
3440 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3490 3441
3491 /* Place block on done queue */ 3442 /* Place block on done queue */
3492 list_add_tail(&sp->list, done_q); 3443 list_add_tail(&sp->list, done_q);
@@ -3495,7 +3446,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3495 * If we get here we have a real problem! 3446 * If we get here we have a real problem!
3496 */ 3447 */
3497 printk(KERN_WARNING 3448 printk(KERN_WARNING
3498 "qla1280: ISP invalid handle"); 3449 "qla1280: ISP invalid handle\n");
3499 } 3450 }
3500 } 3451 }
3501 break; 3452 break;
@@ -3753,6 +3704,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3753 } 3704 }
3754 } 3705 }
3755 3706
3707 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3708
3756 /* Place command on done queue. */ 3709 /* Place command on done queue. */
3757 list_add_tail(&sp->list, done_q); 3710 list_add_tail(&sp->list, done_q);
3758 out: 3711 out:
@@ -3808,6 +3761,8 @@ qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3808 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 3761 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3809 } 3762 }
3810 3763
3764 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3765
3811 /* Place command on done queue. */ 3766 /* Place command on done queue. */
3812 list_add_tail(&sp->list, done_q); 3767 list_add_tail(&sp->list, done_q);
3813 } 3768 }
@@ -3858,19 +3813,16 @@ qla1280_abort_isp(struct scsi_qla_host *ha)
3858 struct scsi_cmnd *cmd; 3813 struct scsi_cmnd *cmd;
3859 sp = ha->outstanding_cmds[cnt]; 3814 sp = ha->outstanding_cmds[cnt];
3860 if (sp) { 3815 if (sp) {
3861
3862 cmd = sp->cmd; 3816 cmd = sp->cmd;
3863 CMD_RESULT(cmd) = DID_RESET << 16; 3817 CMD_RESULT(cmd) = DID_RESET << 16;
3864 3818 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3865 sp->cmd = NULL;
3866 ha->outstanding_cmds[cnt] = NULL; 3819 ha->outstanding_cmds[cnt] = NULL;
3867 3820 list_add_tail(&sp->list, &ha->done_q);
3868 (*cmd->scsi_done)(cmd);
3869
3870 sp->flags = 0;
3871 } 3821 }
3872 } 3822 }
3873 3823
3824 qla1280_done(ha);
3825
3874 status = qla1280_load_firmware(ha); 3826 status = qla1280_load_firmware(ha);
3875 if (status) 3827 if (status)
3876 goto out; 3828 goto out;
@@ -3955,13 +3907,6 @@ qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3955 3907
3956 if (scsi_control == SCSI_PHASE_INVALID) { 3908 if (scsi_control == SCSI_PHASE_INVALID) {
3957 ha->bus_settings[bus].scsi_bus_dead = 1; 3909 ha->bus_settings[bus].scsi_bus_dead = 1;
3958#if 0
3959 CMD_RESULT(cp) = DID_NO_CONNECT << 16;
3960 CMD_HANDLE(cp) = INVALID_HANDLE;
3961 /* ha->actthreads--; */
3962
3963 (*(cp)->scsi_done)(cp);
3964#endif
3965 return 1; /* bus is dead */ 3910 return 1; /* bus is dead */
3966 } else { 3911 } else {
3967 ha->bus_settings[bus].scsi_bus_dead = 0; 3912 ha->bus_settings[bus].scsi_bus_dead = 0;
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index d7c44b8d2b4..834884b9eed 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -88,7 +88,8 @@
88 88
89/* Maximum outstanding commands in ISP queues */ 89/* Maximum outstanding commands in ISP queues */
90#define MAX_OUTSTANDING_COMMANDS 512 90#define MAX_OUTSTANDING_COMMANDS 512
91#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS + 2) 91#define COMPLETED_HANDLE ((unsigned char *) \
92 (MAX_OUTSTANDING_COMMANDS + 2))
92 93
93/* ISP request and response entry counts (37-65535) */ 94/* ISP request and response entry counts (37-65535) */
94#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */ 95#define REQUEST_ENTRY_CNT 255 /* Number of request entries. */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b09993a0657..0f879620150 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -97,7 +97,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
97 return 0; 97 return 0;
98 98
99 if (IS_NOCACHE_VPD_TYPE(ha)) 99 if (IS_NOCACHE_VPD_TYPE(ha))
100 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2, 100 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
101 ha->nvram_size); 101 ha->nvram_size);
102 return memory_read_from_buffer(buf, count, &off, ha->nvram, 102 return memory_read_from_buffer(buf, count, &off, ha->nvram,
103 ha->nvram_size); 103 ha->nvram_size);
@@ -692,6 +692,109 @@ static struct bin_attribute sysfs_edc_status_attr = {
692 .read = qla2x00_sysfs_read_edc_status, 692 .read = qla2x00_sysfs_read_edc_status,
693}; 693};
694 694
695static ssize_t
696qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
697 struct bin_attribute *bin_attr,
698 char *buf, loff_t off, size_t count)
699{
700 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
701 struct device, kobj)));
702 struct qla_hw_data *ha = vha->hw;
703 int rval;
704 uint16_t actual_size;
705
706 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
707 return 0;
708
709 if (ha->xgmac_data)
710 goto do_read;
711
712 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
713 &ha->xgmac_data_dma, GFP_KERNEL);
714 if (!ha->xgmac_data) {
715 qla_printk(KERN_WARNING, ha,
716 "Unable to allocate memory for XGMAC read-data.\n");
717 return 0;
718 }
719
720do_read:
721 actual_size = 0;
722 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
723
724 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
725 XGMAC_DATA_SIZE, &actual_size);
726 if (rval != QLA_SUCCESS) {
727 qla_printk(KERN_WARNING, ha,
728 "Unable to read XGMAC data (%x).\n", rval);
729 count = 0;
730 }
731
732 count = actual_size > count ? count: actual_size;
733 memcpy(buf, ha->xgmac_data, count);
734
735 return count;
736}
737
738static struct bin_attribute sysfs_xgmac_stats_attr = {
739 .attr = {
740 .name = "xgmac_stats",
741 .mode = S_IRUSR,
742 },
743 .size = 0,
744 .read = qla2x00_sysfs_read_xgmac_stats,
745};
746
747static ssize_t
748qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
749 struct bin_attribute *bin_attr,
750 char *buf, loff_t off, size_t count)
751{
752 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
753 struct device, kobj)));
754 struct qla_hw_data *ha = vha->hw;
755 int rval;
756 uint16_t actual_size;
757
758 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
759 return 0;
760
761 if (ha->dcbx_tlv)
762 goto do_read;
763
764 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
765 &ha->dcbx_tlv_dma, GFP_KERNEL);
766 if (!ha->dcbx_tlv) {
767 qla_printk(KERN_WARNING, ha,
768 "Unable to allocate memory for DCBX TLV read-data.\n");
769 return 0;
770 }
771
772do_read:
773 actual_size = 0;
774 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
775
776 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
777 DCBX_TLV_DATA_SIZE);
778 if (rval != QLA_SUCCESS) {
779 qla_printk(KERN_WARNING, ha,
780 "Unable to read DCBX TLV data (%x).\n", rval);
781 count = 0;
782 }
783
784 memcpy(buf, ha->dcbx_tlv, count);
785
786 return count;
787}
788
789static struct bin_attribute sysfs_dcbx_tlv_attr = {
790 .attr = {
791 .name = "dcbx_tlv",
792 .mode = S_IRUSR,
793 },
794 .size = 0,
795 .read = qla2x00_sysfs_read_dcbx_tlv,
796};
797
695static struct sysfs_entry { 798static struct sysfs_entry {
696 char *name; 799 char *name;
697 struct bin_attribute *attr; 800 struct bin_attribute *attr;
@@ -706,6 +809,8 @@ static struct sysfs_entry {
706 { "reset", &sysfs_reset_attr, }, 809 { "reset", &sysfs_reset_attr, },
707 { "edc", &sysfs_edc_attr, 2 }, 810 { "edc", &sysfs_edc_attr, 2 },
708 { "edc_status", &sysfs_edc_status_attr, 2 }, 811 { "edc_status", &sysfs_edc_status_attr, 2 },
812 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
813 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
709 { NULL }, 814 { NULL },
710}; 815};
711 816
@@ -721,6 +826,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
721 continue; 826 continue;
722 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 827 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
723 continue; 828 continue;
829 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
830 continue;
724 831
725 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 832 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
726 iter->attr); 833 iter->attr);
@@ -743,6 +850,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
743 continue; 850 continue;
744 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 851 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
745 continue; 852 continue;
853 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
854 continue;
746 855
747 sysfs_remove_bin_file(&host->shost_gendev.kobj, 856 sysfs_remove_bin_file(&host->shost_gendev.kobj,
748 iter->attr); 857 iter->attr);
@@ -1088,6 +1197,58 @@ qla2x00_flash_block_size_show(struct device *dev,
1088 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); 1197 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1089} 1198}
1090 1199
1200static ssize_t
1201qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1202 char *buf)
1203{
1204 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1205
1206 if (!IS_QLA81XX(vha->hw))
1207 return snprintf(buf, PAGE_SIZE, "\n");
1208
1209 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1210}
1211
1212static ssize_t
1213qla2x00_vn_port_mac_address_show(struct device *dev,
1214 struct device_attribute *attr, char *buf)
1215{
1216 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1217
1218 if (!IS_QLA81XX(vha->hw))
1219 return snprintf(buf, PAGE_SIZE, "\n");
1220
1221 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1222 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1223 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1224 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1225}
1226
1227static ssize_t
1228qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1229 char *buf)
1230{
1231 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1232
1233 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1234}
1235
1236static ssize_t
1237qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1238 char *buf)
1239{
1240 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1241 int rval;
1242 uint16_t state[5];
1243
1244 rval = qla2x00_get_firmware_state(vha, state);
1245 if (rval != QLA_SUCCESS)
1246 memset(state, -1, sizeof(state));
1247
1248 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1249 state[1], state[2], state[3], state[4]);
1250}
1251
1091static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1252static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1092static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1253static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1093static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1254static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1116,6 +1277,11 @@ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1116static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); 1277static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1117static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, 1278static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1118 NULL); 1279 NULL);
1280static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1281static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1282 qla2x00_vn_port_mac_address_show, NULL);
1283static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1284static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1119 1285
1120struct device_attribute *qla2x00_host_attrs[] = { 1286struct device_attribute *qla2x00_host_attrs[] = {
1121 &dev_attr_driver_version, 1287 &dev_attr_driver_version,
@@ -1138,6 +1304,10 @@ struct device_attribute *qla2x00_host_attrs[] = {
1138 &dev_attr_mpi_version, 1304 &dev_attr_mpi_version,
1139 &dev_attr_phy_version, 1305 &dev_attr_phy_version,
1140 &dev_attr_flash_block_size, 1306 &dev_attr_flash_block_size,
1307 &dev_attr_vlan_id,
1308 &dev_attr_vn_port_mac_address,
1309 &dev_attr_fabric_param,
1310 &dev_attr_fw_state,
1141 NULL, 1311 NULL,
1142}; 1312};
1143 1313
@@ -1313,7 +1483,8 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1313 * At this point all fcport's software-states are cleared. Perform any 1483 * At this point all fcport's software-states are cleared. Perform any
1314 * final cleanup of firmware resources (PCBs and XCBs). 1484 * final cleanup of firmware resources (PCBs and XCBs).
1315 */ 1485 */
1316 if (fcport->loop_id != FC_NO_LOOP_ID) 1486 if (fcport->loop_id != FC_NO_LOOP_ID &&
1487 !test_bit(UNLOADING, &fcport->vha->dpc_flags))
1317 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1488 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1318 fcport->loop_id, fcport->d_id.b.domain, 1489 fcport->loop_id, fcport->d_id.b.domain,
1319 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1490 fcport->d_id.b.area, fcport->d_id.b.al_pa);
@@ -1437,11 +1608,13 @@ static int
1437qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1608qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1438{ 1609{
1439 int ret = 0; 1610 int ret = 0;
1440 int cnt = 0; 1611 uint8_t qos = 0;
1441 uint8_t qos = QLA_DEFAULT_QUE_QOS;
1442 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 1612 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1443 scsi_qla_host_t *vha = NULL; 1613 scsi_qla_host_t *vha = NULL;
1444 struct qla_hw_data *ha = base_vha->hw; 1614 struct qla_hw_data *ha = base_vha->hw;
1615 uint16_t options = 0;
1616 int cnt;
1617 struct req_que *req = ha->req_q_map[0];
1445 1618
1446 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1619 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1447 if (ret) { 1620 if (ret) {
@@ -1497,23 +1670,39 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1497 1670
1498 qla24xx_vport_disable(fc_vport, disable); 1671 qla24xx_vport_disable(fc_vport, disable);
1499 1672
1500 /* Create a queue pair for the vport */ 1673 if (ql2xmultique_tag) {
1501 if (ha->mqenable) { 1674 req = ha->req_q_map[1];
1502 if (ha->npiv_info) { 1675 goto vport_queue;
1503 for (; cnt < ha->nvram_npiv_size; cnt++) { 1676 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1504 if (ha->npiv_info[cnt].port_name == 1677 goto vport_queue;
1505 vha->port_name && 1678 /* Create a request queue in QoS mode for the vport */
1506 ha->npiv_info[cnt].node_name == 1679 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1507 vha->node_name) { 1680 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1508 qos = ha->npiv_info[cnt].q_qos; 1681 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1509 break; 1682 8) == 0) {
1510 } 1683 qos = ha->npiv_info[cnt].q_qos;
1511 } 1684 break;
1685 }
1686 }
1687 if (qos) {
1688 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1689 qos);
1690 if (!ret)
1691 qla_printk(KERN_WARNING, ha,
1692 "Can't create request queue for vp_idx:%d\n",
1693 vha->vp_idx);
1694 else {
1695 DEBUG2(qla_printk(KERN_INFO, ha,
1696 "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
1697 ret, qos, vha->vp_idx));
1698 req = ha->req_q_map[ret];
1512 } 1699 }
1513 qla25xx_create_queues(vha, qos);
1514 } 1700 }
1515 1701
1702vport_queue:
1703 vha->req = req;
1516 return 0; 1704 return 0;
1705
1517vport_create_failed_2: 1706vport_create_failed_2:
1518 qla24xx_disable_vp(vha); 1707 qla24xx_disable_vp(vha);
1519 qla24xx_deallocate_vp_id(vha); 1708 qla24xx_deallocate_vp_id(vha);
@@ -1554,8 +1743,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1554 vha->host_no, vha->vp_idx, vha)); 1743 vha->host_no, vha->vp_idx, vha));
1555 } 1744 }
1556 1745
1557 if (ha->mqenable) { 1746 if (vha->req->id && !ql2xmultique_tag) {
1558 if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) 1747 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1559 qla_printk(KERN_WARNING, ha, 1748 qla_printk(KERN_WARNING, ha,
1560 "Queue delete failed.\n"); 1749 "Queue delete failed.\n");
1561 } 1750 }
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 34760f8d4f1..4a990f4da4e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -149,11 +149,9 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
149 int rval = QLA_SUCCESS; 149 int rval = QLA_SUCCESS;
150 uint32_t cnt; 150 uint32_t cnt;
151 151
152 if (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE)
153 return rval;
154
155 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE); 152 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
156 for (cnt = 30000; (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0 && 153 for (cnt = 30000;
154 ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
157 rval == QLA_SUCCESS; cnt--) { 155 rval == QLA_SUCCESS; cnt--) {
158 if (cnt) 156 if (cnt)
159 udelay(100); 157 udelay(100);
@@ -351,7 +349,7 @@ static inline void *
351qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 349qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
352{ 350{
353 uint32_t cnt, que_idx; 351 uint32_t cnt, que_idx;
354 uint8_t req_cnt, rsp_cnt, que_cnt; 352 uint8_t que_cnt;
355 struct qla2xxx_mq_chain *mq = ptr; 353 struct qla2xxx_mq_chain *mq = ptr;
356 struct device_reg_25xxmq __iomem *reg; 354 struct device_reg_25xxmq __iomem *reg;
357 355
@@ -363,9 +361,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
363 mq->type = __constant_htonl(DUMP_CHAIN_MQ); 361 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
364 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); 362 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
365 363
366 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 364 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
367 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 365 ha->max_req_queues : ha->max_rsp_queues;
368 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
369 mq->count = htonl(que_cnt); 366 mq->count = htonl(que_cnt);
370 for (cnt = 0; cnt < que_cnt; cnt++) { 367 for (cnt = 0; cnt < que_cnt; cnt++) {
371 reg = (struct device_reg_25xxmq *) ((void *) 368 reg = (struct device_reg_25xxmq *) ((void *)
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 714ee67567e..00aa48d975a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -93,6 +93,7 @@
93#define LSD(x) ((uint32_t)((uint64_t)(x))) 93#define LSD(x) ((uint32_t)((uint64_t)(x)))
94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) 94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
95 95
96#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
96 97
97/* 98/*
98 * I/O register 99 * I/O register
@@ -179,6 +180,7 @@
179#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ 180#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
180#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
181#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
183#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
182 184
183struct req_que; 185struct req_que;
184 186
@@ -186,7 +188,6 @@ struct req_que;
186 * SCSI Request Block 188 * SCSI Request Block
187 */ 189 */
188typedef struct srb { 190typedef struct srb {
189 struct req_que *que;
190 struct fc_port *fcport; 191 struct fc_port *fcport;
191 192
192 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 193 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
@@ -2008,7 +2009,7 @@ typedef struct vport_params {
2008#define VP_RET_CODE_NOT_FOUND 6 2009#define VP_RET_CODE_NOT_FOUND 6
2009 2010
2010struct qla_hw_data; 2011struct qla_hw_data;
2011 2012struct rsp_que;
2012/* 2013/*
2013 * ISP operations 2014 * ISP operations
2014 */ 2015 */
@@ -2030,10 +2031,9 @@ struct isp_operations {
2030 void (*enable_intrs) (struct qla_hw_data *); 2031 void (*enable_intrs) (struct qla_hw_data *);
2031 void (*disable_intrs) (struct qla_hw_data *); 2032 void (*disable_intrs) (struct qla_hw_data *);
2032 2033
2033 int (*abort_command) (struct scsi_qla_host *, srb_t *, 2034 int (*abort_command) (srb_t *);
2034 struct req_que *); 2035 int (*target_reset) (struct fc_port *, unsigned int, int);
2035 int (*target_reset) (struct fc_port *, unsigned int); 2036 int (*lun_reset) (struct fc_port *, unsigned int, int);
2036 int (*lun_reset) (struct fc_port *, unsigned int);
2037 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, 2037 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
2038 uint8_t, uint8_t, uint16_t *, uint8_t); 2038 uint8_t, uint8_t, uint16_t *, uint8_t);
2039 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, 2039 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2079,7 +2079,6 @@ struct isp_operations {
2079#define QLA_PCI_MSIX_CONTROL 0xa2 2079#define QLA_PCI_MSIX_CONTROL 0xa2
2080 2080
2081struct scsi_qla_host; 2081struct scsi_qla_host;
2082struct rsp_que;
2083 2082
2084struct qla_msix_entry { 2083struct qla_msix_entry {
2085 int have_irq; 2084 int have_irq;
@@ -2140,7 +2139,6 @@ struct qla_statistics {
2140#define MBC_INITIALIZE_MULTIQ 0x1f 2139#define MBC_INITIALIZE_MULTIQ 0x1f
2141#define QLA_QUE_PAGE 0X1000 2140#define QLA_QUE_PAGE 0X1000
2142#define QLA_MQ_SIZE 32 2141#define QLA_MQ_SIZE 32
2143#define QLA_MAX_HOST_QUES 16
2144#define QLA_MAX_QUEUES 256 2142#define QLA_MAX_QUEUES 256
2145#define ISP_QUE_REG(ha, id) \ 2143#define ISP_QUE_REG(ha, id) \
2146 ((ha->mqenable) ? \ 2144 ((ha->mqenable) ? \
@@ -2170,6 +2168,8 @@ struct rsp_que {
2170 struct qla_hw_data *hw; 2168 struct qla_hw_data *hw;
2171 struct qla_msix_entry *msix; 2169 struct qla_msix_entry *msix;
2172 struct req_que *req; 2170 struct req_que *req;
2171 srb_t *status_srb; /* status continuation entry */
2172 struct work_struct q_work;
2173}; 2173};
2174 2174
2175/* Request queue data structure */ 2175/* Request queue data structure */
@@ -2222,6 +2222,8 @@ struct qla_hw_data {
2222 uint32_t fce_enabled :1; 2222 uint32_t fce_enabled :1;
2223 uint32_t fac_supported :1; 2223 uint32_t fac_supported :1;
2224 uint32_t chip_reset_done :1; 2224 uint32_t chip_reset_done :1;
2225 uint32_t port0 :1;
2226 uint32_t running_gold_fw :1;
2225 } flags; 2227 } flags;
2226 2228
2227 /* This spinlock is used to protect "io transactions", you must 2229 /* This spinlock is used to protect "io transactions", you must
@@ -2246,7 +2248,8 @@ struct qla_hw_data {
2246 struct rsp_que **rsp_q_map; 2248 struct rsp_que **rsp_q_map;
2247 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2249 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2248 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2250 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2249 uint16_t max_queues; 2251 uint8_t max_req_queues;
2252 uint8_t max_rsp_queues;
2250 struct qla_npiv_entry *npiv_info; 2253 struct qla_npiv_entry *npiv_info;
2251 uint16_t nvram_npiv_size; 2254 uint16_t nvram_npiv_size;
2252 2255
@@ -2255,6 +2258,9 @@ struct qla_hw_data {
2255#define FLOGI_MID_SUPPORT BIT_10 2258#define FLOGI_MID_SUPPORT BIT_10
2256#define FLOGI_VSAN_SUPPORT BIT_12 2259#define FLOGI_VSAN_SUPPORT BIT_12
2257#define FLOGI_SP_SUPPORT BIT_13 2260#define FLOGI_SP_SUPPORT BIT_13
2261
2262 uint8_t port_no; /* Physical port of adapter */
2263
2258 /* Timeout timers. */ 2264 /* Timeout timers. */
2259 uint8_t loop_down_abort_time; /* port down timer */ 2265 uint8_t loop_down_abort_time; /* port down timer */
2260 atomic_t loop_down_timer; /* loop down timer */ 2266 atomic_t loop_down_timer; /* loop down timer */
@@ -2392,6 +2398,14 @@ struct qla_hw_data {
2392 dma_addr_t edc_data_dma; 2398 dma_addr_t edc_data_dma;
2393 uint16_t edc_data_len; 2399 uint16_t edc_data_len;
2394 2400
2401#define XGMAC_DATA_SIZE PAGE_SIZE
2402 void *xgmac_data;
2403 dma_addr_t xgmac_data_dma;
2404
2405#define DCBX_TLV_DATA_SIZE PAGE_SIZE
2406 void *dcbx_tlv;
2407 dma_addr_t dcbx_tlv_dma;
2408
2395 struct task_struct *dpc_thread; 2409 struct task_struct *dpc_thread;
2396 uint8_t dpc_active; /* DPC routine is active */ 2410 uint8_t dpc_active; /* DPC routine is active */
2397 2411
@@ -2510,6 +2524,7 @@ struct qla_hw_data {
2510 uint32_t flt_region_vpd; 2524 uint32_t flt_region_vpd;
2511 uint32_t flt_region_nvram; 2525 uint32_t flt_region_nvram;
2512 uint32_t flt_region_npiv_conf; 2526 uint32_t flt_region_npiv_conf;
2527 uint32_t flt_region_gold_fw;
2513 2528
2514 /* Needed for BEACON */ 2529 /* Needed for BEACON */
2515 uint16_t beacon_blink_led; 2530 uint16_t beacon_blink_led;
@@ -2536,6 +2551,7 @@ struct qla_hw_data {
2536 struct qla_chip_state_84xx *cs84xx; 2551 struct qla_chip_state_84xx *cs84xx;
2537 struct qla_statistics qla_stats; 2552 struct qla_statistics qla_stats;
2538 struct isp_operations *isp_ops; 2553 struct isp_operations *isp_ops;
2554 struct workqueue_struct *wq;
2539}; 2555};
2540 2556
2541/* 2557/*
@@ -2545,6 +2561,8 @@ typedef struct scsi_qla_host {
2545 struct list_head list; 2561 struct list_head list;
2546 struct list_head vp_fcports; /* list of fcports */ 2562 struct list_head vp_fcports; /* list of fcports */
2547 struct list_head work_list; 2563 struct list_head work_list;
2564 spinlock_t work_lock;
2565
2548 /* Commonly used flags and state information. */ 2566 /* Commonly used flags and state information. */
2549 struct Scsi_Host *host; 2567 struct Scsi_Host *host;
2550 unsigned long host_no; 2568 unsigned long host_no;
@@ -2591,8 +2609,6 @@ typedef struct scsi_qla_host {
2591#define SWITCH_FOUND BIT_0 2609#define SWITCH_FOUND BIT_0
2592#define DFLG_NO_CABLE BIT_1 2610#define DFLG_NO_CABLE BIT_1
2593 2611
2594 srb_t *status_srb; /* Status continuation entry. */
2595
2596 /* ISP configuration data. */ 2612 /* ISP configuration data. */
2597 uint16_t loop_id; /* Host adapter loop id */ 2613 uint16_t loop_id; /* Host adapter loop id */
2598 2614
@@ -2618,6 +2634,11 @@ typedef struct scsi_qla_host {
2618 uint8_t node_name[WWN_SIZE]; 2634 uint8_t node_name[WWN_SIZE];
2619 uint8_t port_name[WWN_SIZE]; 2635 uint8_t port_name[WWN_SIZE];
2620 uint8_t fabric_node_name[WWN_SIZE]; 2636 uint8_t fabric_node_name[WWN_SIZE];
2637
2638 uint16_t fcoe_vlan_id;
2639 uint16_t fcoe_fcf_idx;
2640 uint8_t fcoe_vn_port_mac[6];
2641
2621 uint32_t vp_abort_cnt; 2642 uint32_t vp_abort_cnt;
2622 2643
2623 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 2644 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
@@ -2643,7 +2664,7 @@ typedef struct scsi_qla_host {
2643#define VP_ERR_FAB_LOGOUT 4 2664#define VP_ERR_FAB_LOGOUT 4
2644#define VP_ERR_ADAP_NORESOURCES 5 2665#define VP_ERR_ADAP_NORESOURCES 5
2645 struct qla_hw_data *hw; 2666 struct qla_hw_data *hw;
2646 int req_ques[QLA_MAX_HOST_QUES]; 2667 struct req_que *req;
2647} scsi_qla_host_t; 2668} scsi_qla_host_t;
2648 2669
2649/* 2670/*
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 96ccb9642ba..dfde2dd865c 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -878,7 +878,6 @@ struct device_reg_24xx {
878 /* HCCR statuses. */ 878 /* HCCR statuses. */
879#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */ 879#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */
880#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */ 880#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */
881#define HCCRX_RISC_PAUSE BIT_4 /* RISC Pause mode bit. */
882 /* HCCR commands. */ 881 /* HCCR commands. */
883 /* NOOP. */ 882 /* NOOP. */
884#define HCCRX_NOOP 0x00000000 883#define HCCRX_NOOP 0x00000000
@@ -1241,6 +1240,7 @@ struct qla_flt_header {
1241#define FLT_REG_HW_EVENT_1 0x1f 1240#define FLT_REG_HW_EVENT_1 0x1f
1242#define FLT_REG_NPIV_CONF_0 0x29 1241#define FLT_REG_NPIV_CONF_0 0x29
1243#define FLT_REG_NPIV_CONF_1 0x2a 1242#define FLT_REG_NPIV_CONF_1 0x2a
1243#define FLT_REG_GOLD_FW 0x2f
1244 1244
1245struct qla_flt_region { 1245struct qla_flt_region {
1246 uint32_t code; 1246 uint32_t code;
@@ -1405,6 +1405,8 @@ struct access_chip_rsp_84xx {
1405#define MBC_IDC_ACK 0x101 1405#define MBC_IDC_ACK 0x101
1406#define MBC_RESTART_MPI_FW 0x3d 1406#define MBC_RESTART_MPI_FW 0x3d
1407#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */ 1407#define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */
1408#define MBC_GET_XGMAC_STATS 0x7a
1409#define MBC_GET_DCBX_PARAMS 0x51
1408 1410
1409/* Flash access control option field bit definitions */ 1411/* Flash access control option field bit definitions */
1410#define FAC_OPT_FORCE_SEMAPHORE BIT_15 1412#define FAC_OPT_FORCE_SEMAPHORE BIT_15
@@ -1711,7 +1713,7 @@ struct ex_init_cb_81xx {
1711#define FA_VPD0_ADDR_81 0xD0000 1713#define FA_VPD0_ADDR_81 0xD0000
1712#define FA_VPD1_ADDR_81 0xD0400 1714#define FA_VPD1_ADDR_81 0xD0400
1713#define FA_NVRAM0_ADDR_81 0xD0080 1715#define FA_NVRAM0_ADDR_81 0xD0080
1714#define FA_NVRAM1_ADDR_81 0xD0480 1716#define FA_NVRAM1_ADDR_81 0xD0180
1715#define FA_FEATURE_ADDR_81 0xD4000 1717#define FA_FEATURE_ADDR_81 0xD4000
1716#define FA_FLASH_DESCR_ADDR_81 0xD8000 1718#define FA_FLASH_DESCR_ADDR_81 0xD8000
1717#define FA_FLASH_LAYOUT_ADDR_81 0xD8400 1719#define FA_FLASH_LAYOUT_ADDR_81 0xD8400
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 528913f6bed..65b12d82867 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -65,8 +65,11 @@ extern int ql2xfdmienable;
65extern int ql2xallocfwdump; 65extern int ql2xallocfwdump;
66extern int ql2xextended_error_logging; 66extern int ql2xextended_error_logging;
67extern int ql2xqfullrampup; 67extern int ql2xqfullrampup;
68extern int ql2xqfulltracking;
68extern int ql2xiidmaenable; 69extern int ql2xiidmaenable;
69extern int ql2xmaxqueues; 70extern int ql2xmaxqueues;
71extern int ql2xmultique_tag;
72extern int ql2xfwloadbin;
70 73
71extern int qla2x00_loop_reset(scsi_qla_host_t *); 74extern int qla2x00_loop_reset(scsi_qla_host_t *);
72extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 75extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -145,7 +148,7 @@ qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
145extern int 148extern int
146qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); 149qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
147 150
148extern void 151extern int
149qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *, 152qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
150 uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *); 153 uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
151 154
@@ -165,13 +168,13 @@ extern int
165qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); 168qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
166 169
167extern int 170extern int
168qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 171qla2x00_abort_command(srb_t *);
169 172
170extern int 173extern int
171qla2x00_abort_target(struct fc_port *, unsigned int); 174qla2x00_abort_target(struct fc_port *, unsigned int, int);
172 175
173extern int 176extern int
174qla2x00_lun_reset(struct fc_port *, unsigned int); 177qla2x00_lun_reset(struct fc_port *, unsigned int, int);
175 178
176extern int 179extern int
177qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, 180qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -236,9 +239,11 @@ extern int
236qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 239qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
237 dma_addr_t); 240 dma_addr_t);
238 241
239extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 242extern int qla24xx_abort_command(srb_t *);
240extern int qla24xx_abort_target(struct fc_port *, unsigned int); 243extern int
241extern int qla24xx_lun_reset(struct fc_port *, unsigned int); 244qla24xx_abort_target(struct fc_port *, unsigned int, int);
245extern int
246qla24xx_lun_reset(struct fc_port *, unsigned int, int);
242 247
243extern int 248extern int
244qla2x00_system_error(scsi_qla_host_t *); 249qla2x00_system_error(scsi_qla_host_t *);
@@ -288,6 +293,18 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
288extern int 293extern int
289qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t); 294qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
290 295
296extern int
297qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
298
299extern int
300qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t);
301
302extern int
303qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
304
305extern int
306qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
307
291/* 308/*
292 * Global Function Prototypes in qla_isr.c source file. 309 * Global Function Prototypes in qla_isr.c source file.
293 */ 310 */
@@ -295,8 +312,8 @@ extern irqreturn_t qla2100_intr_handler(int, void *);
295extern irqreturn_t qla2300_intr_handler(int, void *); 312extern irqreturn_t qla2300_intr_handler(int, void *);
296extern irqreturn_t qla24xx_intr_handler(int, void *); 313extern irqreturn_t qla24xx_intr_handler(int, void *);
297extern void qla2x00_process_response_queue(struct rsp_que *); 314extern void qla2x00_process_response_queue(struct rsp_que *);
298extern void qla24xx_process_response_queue(struct rsp_que *); 315extern void
299 316qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
300extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); 317extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
301extern void qla2x00_free_irqs(scsi_qla_host_t *); 318extern void qla2x00_free_irqs(scsi_qla_host_t *);
302 319
@@ -401,19 +418,21 @@ extern int qla25xx_request_irq(struct rsp_que *);
401extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); 418extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
402extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); 419extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
403extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, 420extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
404 uint16_t, uint8_t, uint8_t); 421 uint16_t, int, uint8_t);
405extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, 422extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
406 uint16_t); 423 uint16_t, int);
407extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t); 424extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
408extern void qla2x00_init_response_q_entries(struct rsp_que *); 425extern void qla2x00_init_response_q_entries(struct rsp_que *);
409extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); 426extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
410extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); 427extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
411extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t); 428extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
412extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t); 429extern int qla25xx_delete_queues(struct scsi_qla_host *);
413extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); 430extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
414extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); 431extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
415extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 432extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
416extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 433extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
417extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 434extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
418extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 435extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
436extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
437
419#endif /* _QLA_GBL_H */ 438#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 557f58d5bf8..917534b9f22 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1107,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1107 return ret; 1107 return ret;
1108 1108
1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1110 mb, BIT_1); 1110 mb, BIT_1|BIT_0);
1111 if (mb[0] != MBS_COMMAND_COMPLETE) { 1111 if (mb[0] != MBS_COMMAND_COMPLETE) {
1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
@@ -1879,6 +1879,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1879 case BIT_13: 1879 case BIT_13:
1880 list[i].fp_speed = PORT_SPEED_4GB; 1880 list[i].fp_speed = PORT_SPEED_4GB;
1881 break; 1881 break;
1882 case BIT_12:
1883 list[i].fp_speed = PORT_SPEED_10GB;
1884 break;
1882 case BIT_11: 1885 case BIT_11:
1883 list[i].fp_speed = PORT_SPEED_8GB; 1886 list[i].fp_speed = PORT_SPEED_8GB;
1884 break; 1887 break;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index bd7dd84c064..26202612932 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -634,7 +634,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
634 goto chip_diag_failed; 634 goto chip_diag_failed;
635 635
636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 636 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
637 ha->host_no)); 637 vha->host_no));
638 638
639 /* Reset RISC processor. */ 639 /* Reset RISC processor. */
640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 640 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -655,7 +655,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
655 goto chip_diag_failed; 655 goto chip_diag_failed;
656 656
657 /* Check product ID of chip */ 657 /* Check product ID of chip */
658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no)); 658 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
659 659
660 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 660 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
661 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 661 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -730,9 +730,6 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
730 struct qla_hw_data *ha = vha->hw; 730 struct qla_hw_data *ha = vha->hw;
731 struct req_que *req = ha->req_q_map[0]; 731 struct req_que *req = ha->req_q_map[0];
732 732
733 /* Perform RISC reset. */
734 qla24xx_reset_risc(vha);
735
736 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 733 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
737 734
738 rval = qla2x00_mbx_reg_test(vha); 735 rval = qla2x00_mbx_reg_test(vha);
@@ -786,7 +783,6 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
786 sizeof(uint32_t); 783 sizeof(uint32_t);
787 if (ha->mqenable) 784 if (ha->mqenable)
788 mq_size = sizeof(struct qla2xxx_mq_chain); 785 mq_size = sizeof(struct qla2xxx_mq_chain);
789
790 /* Allocate memory for Fibre Channel Event Buffer. */ 786 /* Allocate memory for Fibre Channel Event Buffer. */
791 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 787 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
792 goto try_eft; 788 goto try_eft;
@@ -850,8 +846,7 @@ cont_alloc:
850 rsp_q_size = rsp->length * sizeof(response_t); 846 rsp_q_size = rsp->length * sizeof(response_t);
851 847
852 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 848 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 849 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
854 eft_size;
855 ha->chain_offset = dump_size; 850 ha->chain_offset = dump_size;
856 dump_size += mq_size + fce_size; 851 dump_size += mq_size + fce_size;
857 852
@@ -891,6 +886,56 @@ cont_alloc:
891 htonl(offsetof(struct qla2xxx_fw_dump, isp)); 886 htonl(offsetof(struct qla2xxx_fw_dump, isp));
892} 887}
893 888
889static int
890qla81xx_mpi_sync(scsi_qla_host_t *vha)
891{
892#define MPS_MASK 0xe0
893 int rval;
894 uint16_t dc;
895 uint32_t dw;
896 struct qla_hw_data *ha = vha->hw;
897
898 if (!IS_QLA81XX(vha->hw))
899 return QLA_SUCCESS;
900
901 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
902 if (rval != QLA_SUCCESS) {
903 DEBUG2(qla_printk(KERN_WARNING, ha,
904 "Sync-MPI: Unable to acquire semaphore.\n"));
905 goto done;
906 }
907
908 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
909 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
910 if (rval != QLA_SUCCESS) {
911 DEBUG2(qla_printk(KERN_WARNING, ha,
912 "Sync-MPI: Unable to read sync.\n"));
913 goto done_release;
914 }
915
916 dc &= MPS_MASK;
917 if (dc == (dw & MPS_MASK))
918 goto done_release;
919
920 dw &= ~MPS_MASK;
921 dw |= dc;
922 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
923 if (rval != QLA_SUCCESS) {
924 DEBUG2(qla_printk(KERN_WARNING, ha,
925 "Sync-MPI: Unable to gain sync.\n"));
926 }
927
928done_release:
929 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
930 if (rval != QLA_SUCCESS) {
931 DEBUG2(qla_printk(KERN_WARNING, ha,
932 "Sync-MPI: Unable to release semaphore.\n"));
933 }
934
935done:
936 return rval;
937}
938
894/** 939/**
895 * qla2x00_setup_chip() - Load and start RISC firmware. 940 * qla2x00_setup_chip() - Load and start RISC firmware.
896 * @ha: HA context 941 * @ha: HA context
@@ -915,6 +960,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
915 spin_unlock_irqrestore(&ha->hardware_lock, flags); 960 spin_unlock_irqrestore(&ha->hardware_lock, flags);
916 } 961 }
917 962
963 qla81xx_mpi_sync(vha);
964
918 /* Load firmware sequences */ 965 /* Load firmware sequences */
919 rval = ha->isp_ops->load_risc(vha, &srisc_address); 966 rval = ha->isp_ops->load_risc(vha, &srisc_address);
920 if (rval == QLA_SUCCESS) { 967 if (rval == QLA_SUCCESS) {
@@ -931,13 +978,16 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
931 /* Retrieve firmware information. */ 978 /* Retrieve firmware information. */
932 if (rval == QLA_SUCCESS) { 979 if (rval == QLA_SUCCESS) {
933 fw_major_version = ha->fw_major_version; 980 fw_major_version = ha->fw_major_version;
934 qla2x00_get_fw_version(vha, 981 rval = qla2x00_get_fw_version(vha,
935 &ha->fw_major_version, 982 &ha->fw_major_version,
936 &ha->fw_minor_version, 983 &ha->fw_minor_version,
937 &ha->fw_subminor_version, 984 &ha->fw_subminor_version,
938 &ha->fw_attributes, &ha->fw_memory_size, 985 &ha->fw_attributes, &ha->fw_memory_size,
939 ha->mpi_version, &ha->mpi_capabilities, 986 ha->mpi_version, &ha->mpi_capabilities,
940 ha->phy_version); 987 ha->phy_version);
988 if (rval != QLA_SUCCESS)
989 goto failed;
990
941 ha->flags.npiv_supported = 0; 991 ha->flags.npiv_supported = 0;
942 if (IS_QLA2XXX_MIDTYPE(ha) && 992 if (IS_QLA2XXX_MIDTYPE(ha) &&
943 (ha->fw_attributes & BIT_2)) { 993 (ha->fw_attributes & BIT_2)) {
@@ -989,7 +1039,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
989 ha->fw_subminor_version); 1039 ha->fw_subminor_version);
990 } 1040 }
991 } 1041 }
992 1042failed:
993 if (rval) { 1043 if (rval) {
994 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1044 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
995 vha->host_no)); 1045 vha->host_no));
@@ -1013,12 +1063,14 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp)
1013 uint16_t cnt; 1063 uint16_t cnt;
1014 response_t *pkt; 1064 response_t *pkt;
1015 1065
1066 rsp->ring_ptr = rsp->ring;
1067 rsp->ring_index = 0;
1068 rsp->status_srb = NULL;
1016 pkt = rsp->ring_ptr; 1069 pkt = rsp->ring_ptr;
1017 for (cnt = 0; cnt < rsp->length; cnt++) { 1070 for (cnt = 0; cnt < rsp->length; cnt++) {
1018 pkt->signature = RESPONSE_PROCESSED; 1071 pkt->signature = RESPONSE_PROCESSED;
1019 pkt++; 1072 pkt++;
1020 } 1073 }
1021
1022} 1074}
1023 1075
1024/** 1076/**
@@ -1176,7 +1228,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1176 if (ha->flags.msix_enabled) { 1228 if (ha->flags.msix_enabled) {
1177 msix = &ha->msix_entries[1]; 1229 msix = &ha->msix_entries[1];
1178 DEBUG2_17(printk(KERN_INFO 1230 DEBUG2_17(printk(KERN_INFO
1179 "Reistering vector 0x%x for base que\n", msix->entry)); 1231 "Registering vector 0x%x for base que\n", msix->entry));
1180 icb->msix = cpu_to_le16(msix->entry); 1232 icb->msix = cpu_to_le16(msix->entry);
1181 } 1233 }
1182 /* Use alternate PCI bus number */ 1234 /* Use alternate PCI bus number */
@@ -1230,14 +1282,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1230 spin_lock_irqsave(&ha->hardware_lock, flags); 1282 spin_lock_irqsave(&ha->hardware_lock, flags);
1231 1283
1232 /* Clear outstanding commands array. */ 1284 /* Clear outstanding commands array. */
1233 for (que = 0; que < ha->max_queues; que++) { 1285 for (que = 0; que < ha->max_req_queues; que++) {
1234 req = ha->req_q_map[que]; 1286 req = ha->req_q_map[que];
1235 if (!req) 1287 if (!req)
1236 continue; 1288 continue;
1237 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1289 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1238 req->outstanding_cmds[cnt] = NULL; 1290 req->outstanding_cmds[cnt] = NULL;
1239 1291
1240 req->current_outstanding_cmd = 0; 1292 req->current_outstanding_cmd = 1;
1241 1293
1242 /* Initialize firmware. */ 1294 /* Initialize firmware. */
1243 req->ring_ptr = req->ring; 1295 req->ring_ptr = req->ring;
@@ -1245,13 +1297,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1245 req->cnt = req->length; 1297 req->cnt = req->length;
1246 } 1298 }
1247 1299
1248 for (que = 0; que < ha->max_queues; que++) { 1300 for (que = 0; que < ha->max_rsp_queues; que++) {
1249 rsp = ha->rsp_q_map[que]; 1301 rsp = ha->rsp_q_map[que];
1250 if (!rsp) 1302 if (!rsp)
1251 continue; 1303 continue;
1252 rsp->ring_ptr = rsp->ring;
1253 rsp->ring_index = 0;
1254
1255 /* Initialize response queue entries */ 1304 /* Initialize response queue entries */
1256 qla2x00_init_response_q_entries(rsp); 1305 qla2x00_init_response_q_entries(rsp);
1257 } 1306 }
@@ -1307,7 +1356,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1307 unsigned long wtime, mtime, cs84xx_time; 1356 unsigned long wtime, mtime, cs84xx_time;
1308 uint16_t min_wait; /* Minimum wait time if loop is down */ 1357 uint16_t min_wait; /* Minimum wait time if loop is down */
1309 uint16_t wait_time; /* Wait time if loop is coming ready */ 1358 uint16_t wait_time; /* Wait time if loop is coming ready */
1310 uint16_t state[3]; 1359 uint16_t state[5];
1311 struct qla_hw_data *ha = vha->hw; 1360 struct qla_hw_data *ha = vha->hw;
1312 1361
1313 rval = QLA_SUCCESS; 1362 rval = QLA_SUCCESS;
@@ -1406,8 +1455,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1406 vha->host_no, state[0], jiffies)); 1455 vha->host_no, state[0], jiffies));
1407 } while (1); 1456 } while (1);
1408 1457
1409 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1458 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
1410 vha->host_no, state[0], jiffies)); 1459 vha->host_no, state[0], state[1], state[2], state[3], state[4],
1460 jiffies));
1411 1461
1412 if (rval) { 1462 if (rval) {
1413 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1463 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
@@ -1541,6 +1591,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1541 char *st, *en; 1591 char *st, *en;
1542 uint16_t index; 1592 uint16_t index;
1543 struct qla_hw_data *ha = vha->hw; 1593 struct qla_hw_data *ha = vha->hw;
1594 int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha);
1544 1595
1545 if (memcmp(model, BINZERO, len) != 0) { 1596 if (memcmp(model, BINZERO, len) != 0) {
1546 strncpy(ha->model_number, model, len); 1597 strncpy(ha->model_number, model, len);
@@ -1553,14 +1604,16 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1553 } 1604 }
1554 1605
1555 index = (ha->pdev->subsystem_device & 0xff); 1606 index = (ha->pdev->subsystem_device & 0xff);
1556 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1607 if (use_tbl &&
1608 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1557 index < QLA_MODEL_NAMES) 1609 index < QLA_MODEL_NAMES)
1558 strncpy(ha->model_desc, 1610 strncpy(ha->model_desc,
1559 qla2x00_model_name[index * 2 + 1], 1611 qla2x00_model_name[index * 2 + 1],
1560 sizeof(ha->model_desc) - 1); 1612 sizeof(ha->model_desc) - 1);
1561 } else { 1613 } else {
1562 index = (ha->pdev->subsystem_device & 0xff); 1614 index = (ha->pdev->subsystem_device & 0xff);
1563 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1615 if (use_tbl &&
1616 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1564 index < QLA_MODEL_NAMES) { 1617 index < QLA_MODEL_NAMES) {
1565 strcpy(ha->model_number, 1618 strcpy(ha->model_number,
1566 qla2x00_model_name[index * 2]); 1619 qla2x00_model_name[index * 2]);
@@ -2061,8 +2114,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2061 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 2114 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2062 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2115 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2063 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2116 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2064 if (test_bit(RSCN_UPDATE, &save_flags)) 2117 if (test_bit(RSCN_UPDATE, &save_flags)) {
2065 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2118 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2119 vha->flags.rscn_queue_overflow = 1;
2120 }
2066 } 2121 }
2067 2122
2068 return (rval); 2123 return (rval);
@@ -2110,7 +2165,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2110 goto cleanup_allocation; 2165 goto cleanup_allocation;
2111 2166
2112 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2167 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
2113 ha->host_no, entries)); 2168 vha->host_no, entries));
2114 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2169 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
2115 entries * sizeof(struct gid_list_info))); 2170 entries * sizeof(struct gid_list_info)));
2116 2171
@@ -2243,7 +2298,8 @@ static void
2243qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2298qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2244{ 2299{
2245#define LS_UNKNOWN 2 2300#define LS_UNKNOWN 2
2246 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 2301 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2302 char *link_speed;
2247 int rval; 2303 int rval;
2248 uint16_t mb[6]; 2304 uint16_t mb[6];
2249 struct qla_hw_data *ha = vha->hw; 2305 struct qla_hw_data *ha = vha->hw;
@@ -2266,10 +2322,15 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2266 fcport->port_name[6], fcport->port_name[7], rval, 2322 fcport->port_name[6], fcport->port_name[7], rval,
2267 fcport->fp_speed, mb[0], mb[1])); 2323 fcport->fp_speed, mb[0], mb[1]));
2268 } else { 2324 } else {
2325 link_speed = link_speeds[LS_UNKNOWN];
2326 if (fcport->fp_speed < 5)
2327 link_speed = link_speeds[fcport->fp_speed];
2328 else if (fcport->fp_speed == 0x13)
2329 link_speed = link_speeds[5];
2269 DEBUG2(qla_printk(KERN_INFO, ha, 2330 DEBUG2(qla_printk(KERN_INFO, ha,
2270 "iIDMA adjusted to %s GB/s on " 2331 "iIDMA adjusted to %s GB/s on "
2271 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2332 "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
2272 link_speeds[fcport->fp_speed], fcport->port_name[0], 2333 link_speed, fcport->port_name[0],
2273 fcport->port_name[1], fcport->port_name[2], 2334 fcport->port_name[1], fcport->port_name[2],
2274 fcport->port_name[3], fcport->port_name[4], 2335 fcport->port_name[3], fcport->port_name[4],
2275 fcport->port_name[5], fcport->port_name[6], 2336 fcport->port_name[5], fcport->port_name[6],
@@ -3180,9 +3241,14 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3180{ 3241{
3181 int rval = QLA_SUCCESS; 3242 int rval = QLA_SUCCESS;
3182 uint32_t wait_time; 3243 uint32_t wait_time;
3183 struct qla_hw_data *ha = vha->hw; 3244 struct req_que *req;
3184 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 3245 struct rsp_que *rsp;
3185 struct rsp_que *rsp = req->rsp; 3246
3247 if (ql2xmultique_tag)
3248 req = vha->hw->req_q_map[0];
3249 else
3250 req = vha->req;
3251 rsp = req->rsp;
3186 3252
3187 atomic_set(&vha->loop_state, LOOP_UPDATE); 3253 atomic_set(&vha->loop_state, LOOP_UPDATE);
3188 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3254 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -3448,7 +3514,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3448 int ret = -1; 3514 int ret = -1;
3449 int i; 3515 int i;
3450 3516
3451 for (i = 1; i < ha->max_queues; i++) { 3517 for (i = 1; i < ha->max_rsp_queues; i++) {
3452 rsp = ha->rsp_q_map[i]; 3518 rsp = ha->rsp_q_map[i];
3453 if (rsp) { 3519 if (rsp) {
3454 rsp->options &= ~BIT_0; 3520 rsp->options &= ~BIT_0;
@@ -3462,6 +3528,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3462 "%s Rsp que:%d inited\n", __func__, 3528 "%s Rsp que:%d inited\n", __func__,
3463 rsp->id)); 3529 rsp->id));
3464 } 3530 }
3531 }
3532 for (i = 1; i < ha->max_req_queues; i++) {
3465 req = ha->req_q_map[i]; 3533 req = ha->req_q_map[i];
3466 if (req) { 3534 if (req) {
3467 /* Clear outstanding commands array. */ 3535 /* Clear outstanding commands array. */
@@ -3566,14 +3634,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3566 nv = ha->nvram; 3634 nv = ha->nvram;
3567 3635
3568 /* Determine NVRAM starting address. */ 3636 /* Determine NVRAM starting address. */
3569 ha->nvram_size = sizeof(struct nvram_24xx); 3637 if (ha->flags.port0) {
3570 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 3638 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
3571 ha->vpd_size = FA_NVRAM_VPD_SIZE; 3639 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
3572 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 3640 } else {
3573 if (PCI_FUNC(ha->pdev->devfn)) {
3574 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 3641 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
3575 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 3642 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
3576 } 3643 }
3644 ha->nvram_size = sizeof(struct nvram_24xx);
3645 ha->vpd_size = FA_NVRAM_VPD_SIZE;
3577 3646
3578 /* Get VPD data into cache */ 3647 /* Get VPD data into cache */
3579 ha->vpd = ha->nvram + VPD_OFFSET; 3648 ha->vpd = ha->nvram + VPD_OFFSET;
@@ -3587,7 +3656,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3587 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3656 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3588 chksum += le32_to_cpu(*dptr++); 3657 chksum += le32_to_cpu(*dptr++);
3589 3658
3590 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 3659 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
3591 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 3660 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
3592 3661
3593 /* Bad NVRAM data, set defaults parameters. */ 3662 /* Bad NVRAM data, set defaults parameters. */
@@ -3612,7 +3681,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3612 nv->exchange_count = __constant_cpu_to_le16(0); 3681 nv->exchange_count = __constant_cpu_to_le16(0);
3613 nv->hard_address = __constant_cpu_to_le16(124); 3682 nv->hard_address = __constant_cpu_to_le16(124);
3614 nv->port_name[0] = 0x21; 3683 nv->port_name[0] = 0x21;
3615 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 3684 nv->port_name[1] = 0x00 + ha->port_no;
3616 nv->port_name[2] = 0x00; 3685 nv->port_name[2] = 0x00;
3617 nv->port_name[3] = 0xe0; 3686 nv->port_name[3] = 0xe0;
3618 nv->port_name[4] = 0x8b; 3687 nv->port_name[4] = 0x8b;
@@ -3798,11 +3867,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3798} 3867}
3799 3868
3800static int 3869static int
3801qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) 3870qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
3871 uint32_t faddr)
3802{ 3872{
3803 int rval = QLA_SUCCESS; 3873 int rval = QLA_SUCCESS;
3804 int segments, fragment; 3874 int segments, fragment;
3805 uint32_t faddr;
3806 uint32_t *dcode, dlen; 3875 uint32_t *dcode, dlen;
3807 uint32_t risc_addr; 3876 uint32_t risc_addr;
3808 uint32_t risc_size; 3877 uint32_t risc_size;
@@ -3811,12 +3880,11 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3811 struct req_que *req = ha->req_q_map[0]; 3880 struct req_que *req = ha->req_q_map[0];
3812 3881
3813 qla_printk(KERN_INFO, ha, 3882 qla_printk(KERN_INFO, ha,
3814 "FW: Loading from flash (%x)...\n", ha->flt_region_fw); 3883 "FW: Loading from flash (%x)...\n", faddr);
3815 3884
3816 rval = QLA_SUCCESS; 3885 rval = QLA_SUCCESS;
3817 3886
3818 segments = FA_RISC_CODE_SEGMENTS; 3887 segments = FA_RISC_CODE_SEGMENTS;
3819 faddr = ha->flt_region_fw;
3820 dcode = (uint32_t *)req->ring; 3888 dcode = (uint32_t *)req->ring;
3821 *srisc_addr = 0; 3889 *srisc_addr = 0;
3822 3890
@@ -4104,6 +4172,9 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4104{ 4172{
4105 int rval; 4173 int rval;
4106 4174
4175 if (ql2xfwloadbin == 1)
4176 return qla81xx_load_risc(vha, srisc_addr);
4177
4107 /* 4178 /*
4108 * FW Load priority: 4179 * FW Load priority:
4109 * 1) Firmware via request-firmware interface (.bin file). 4180 * 1) Firmware via request-firmware interface (.bin file).
@@ -4113,24 +4184,45 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4113 if (rval == QLA_SUCCESS) 4184 if (rval == QLA_SUCCESS)
4114 return rval; 4185 return rval;
4115 4186
4116 return qla24xx_load_risc_flash(vha, srisc_addr); 4187 return qla24xx_load_risc_flash(vha, srisc_addr,
4188 vha->hw->flt_region_fw);
4117} 4189}
4118 4190
4119int 4191int
4120qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) 4192qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4121{ 4193{
4122 int rval; 4194 int rval;
4195 struct qla_hw_data *ha = vha->hw;
4196
4197 if (ql2xfwloadbin == 2)
4198 goto try_blob_fw;
4123 4199
4124 /* 4200 /*
4125 * FW Load priority: 4201 * FW Load priority:
4126 * 1) Firmware residing in flash. 4202 * 1) Firmware residing in flash.
4127 * 2) Firmware via request-firmware interface (.bin file). 4203 * 2) Firmware via request-firmware interface (.bin file).
4204 * 3) Golden-Firmware residing in flash -- limited operation.
4128 */ 4205 */
4129 rval = qla24xx_load_risc_flash(vha, srisc_addr); 4206 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
4130 if (rval == QLA_SUCCESS) 4207 if (rval == QLA_SUCCESS)
4131 return rval; 4208 return rval;
4132 4209
4133 return qla24xx_load_risc_blob(vha, srisc_addr); 4210try_blob_fw:
4211 rval = qla24xx_load_risc_blob(vha, srisc_addr);
4212 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4213 return rval;
4214
4215 qla_printk(KERN_ERR, ha,
4216 "FW: Attempting to fallback to golden firmware...\n");
4217 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4218 if (rval != QLA_SUCCESS)
4219 return rval;
4220
4221 qla_printk(KERN_ERR, ha,
4222 "FW: Please update operational firmware...\n");
4223 ha->flags.running_gold_fw = 1;
4224
4225 return rval;
4134} 4226}
4135 4227
4136void 4228void
@@ -4146,7 +4238,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4146 4238
4147 ret = qla2x00_stop_firmware(vha); 4239 ret = qla2x00_stop_firmware(vha);
4148 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4240 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4149 retries ; retries--) { 4241 ret != QLA_INVALID_COMMAND && retries ; retries--) {
4150 ha->isp_ops->reset_chip(vha); 4242 ha->isp_ops->reset_chip(vha);
4151 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) 4243 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4152 continue; 4244 continue;
@@ -4165,13 +4257,19 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
4165 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4257 uint16_t mb[MAILBOX_REGISTER_COUNT];
4166 struct qla_hw_data *ha = vha->hw; 4258 struct qla_hw_data *ha = vha->hw;
4167 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4259 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4168 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 4260 struct req_que *req;
4169 struct rsp_que *rsp = req->rsp; 4261 struct rsp_que *rsp;
4170 4262
4171 if (!vha->vp_idx) 4263 if (!vha->vp_idx)
4172 return -EINVAL; 4264 return -EINVAL;
4173 4265
4174 rval = qla2x00_fw_ready(base_vha); 4266 rval = qla2x00_fw_ready(base_vha);
4267 if (ql2xmultique_tag)
4268 req = ha->req_q_map[0];
4269 else
4270 req = vha->req;
4271 rsp = req->rsp;
4272
4175 if (rval == QLA_SUCCESS) { 4273 if (rval == QLA_SUCCESS) {
4176 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 4274 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4177 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4275 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4305,7 +4403,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4305 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4403 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4306 chksum += le32_to_cpu(*dptr++); 4404 chksum += le32_to_cpu(*dptr++);
4307 4405
4308 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 4406 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
4309 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4407 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4310 4408
4311 /* Bad NVRAM data, set defaults parameters. */ 4409 /* Bad NVRAM data, set defaults parameters. */
@@ -4329,7 +4427,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4329 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4427 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4330 nv->exchange_count = __constant_cpu_to_le16(0); 4428 nv->exchange_count = __constant_cpu_to_le16(0);
4331 nv->port_name[0] = 0x21; 4429 nv->port_name[0] = 0x21;
4332 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); 4430 nv->port_name[1] = 0x00 + ha->port_no;
4333 nv->port_name[2] = 0x00; 4431 nv->port_name[2] = 0x00;
4334 nv->port_name[3] = 0xe0; 4432 nv->port_name[3] = 0xe0;
4335 nv->port_name[4] = 0x8b; 4433 nv->port_name[4] = 0x8b;
@@ -4358,12 +4456,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4358 nv->max_luns_per_target = __constant_cpu_to_le16(128); 4456 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4359 nv->port_down_retry_count = __constant_cpu_to_le16(30); 4457 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4360 nv->link_down_timeout = __constant_cpu_to_le16(30); 4458 nv->link_down_timeout = __constant_cpu_to_le16(30);
4361 nv->enode_mac[0] = 0x01; 4459 nv->enode_mac[0] = 0x00;
4362 nv->enode_mac[1] = 0x02; 4460 nv->enode_mac[1] = 0x02;
4363 nv->enode_mac[2] = 0x03; 4461 nv->enode_mac[2] = 0x03;
4364 nv->enode_mac[3] = 0x04; 4462 nv->enode_mac[3] = 0x04;
4365 nv->enode_mac[4] = 0x05; 4463 nv->enode_mac[4] = 0x05;
4366 nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4464 nv->enode_mac[5] = 0x06 + ha->port_no;
4367 4465
4368 rval = 1; 4466 rval = 1;
4369 } 4467 }
@@ -4396,7 +4494,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4396 icb->enode_mac[2] = 0x03; 4494 icb->enode_mac[2] = 0x03;
4397 icb->enode_mac[3] = 0x04; 4495 icb->enode_mac[3] = 0x04;
4398 icb->enode_mac[4] = 0x05; 4496 icb->enode_mac[4] = 0x05;
4399 icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); 4497 icb->enode_mac[5] = 0x06 + ha->port_no;
4400 } 4498 }
4401 4499
4402 /* Use extended-initialization control block. */ 4500 /* Use extended-initialization control block. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a8abbb95730..13396beae2c 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -15,6 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15 struct rsp_que *rsp); 15 struct rsp_que *rsp);
16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); 16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
17 17
18static void qla25xx_set_que(srb_t *, struct rsp_que **);
18/** 19/**
19 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * @cmd: SCSI command 21 * @cmd: SCSI command
@@ -92,9 +93,10 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
92 * Returns a pointer to the Continuation Type 0 IOCB packet. 93 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 */ 94 */
94static inline cont_entry_t * 95static inline cont_entry_t *
95qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) 96qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96{ 97{
97 cont_entry_t *cont_pkt; 98 cont_entry_t *cont_pkt;
99 struct req_que *req = vha->req;
98 /* Adjust ring index. */ 100 /* Adjust ring index. */
99 req->ring_index++; 101 req->ring_index++;
100 if (req->ring_index == req->length) { 102 if (req->ring_index == req->length) {
@@ -120,10 +122,11 @@ qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
120 * Returns a pointer to the continuation type 1 IOCB packet. 122 * Returns a pointer to the continuation type 1 IOCB packet.
121 */ 123 */
122static inline cont_a64_entry_t * 124static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha) 125qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124{ 126{
125 cont_a64_entry_t *cont_pkt; 127 cont_a64_entry_t *cont_pkt;
126 128
129 struct req_que *req = vha->req;
127 /* Adjust ring index. */ 130 /* Adjust ring index. */
128 req->ring_index++; 131 req->ring_index++;
129 if (req->ring_index == req->length) { 132 if (req->ring_index == req->length) {
@@ -159,7 +162,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
159 struct scsi_cmnd *cmd; 162 struct scsi_cmnd *cmd;
160 struct scatterlist *sg; 163 struct scatterlist *sg;
161 int i; 164 int i;
162 struct req_que *req;
163 165
164 cmd = sp->cmd; 166 cmd = sp->cmd;
165 167
@@ -174,8 +176,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
174 } 176 }
175 177
176 vha = sp->fcport->vha; 178 vha = sp->fcport->vha;
177 req = sp->que;
178
179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
180 180
181 /* Three DSDs are available in the Command Type 2 IOCB */ 181 /* Three DSDs are available in the Command Type 2 IOCB */
@@ -192,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 * Seven DSDs are available in the Continuation 192 * Seven DSDs are available in the Continuation
193 * Type 0 IOCB. 193 * Type 0 IOCB.
194 */ 194 */
195 cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha); 195 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 avail_dsds = 7; 197 avail_dsds = 7;
198 } 198 }
@@ -220,7 +220,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
220 struct scsi_cmnd *cmd; 220 struct scsi_cmnd *cmd;
221 struct scatterlist *sg; 221 struct scatterlist *sg;
222 int i; 222 int i;
223 struct req_que *req;
224 223
225 cmd = sp->cmd; 224 cmd = sp->cmd;
226 225
@@ -235,8 +234,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
235 } 234 }
236 235
237 vha = sp->fcport->vha; 236 vha = sp->fcport->vha;
238 req = sp->que;
239
240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 237 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
241 238
242 /* Two DSDs are available in the Command Type 3 IOCB */ 239 /* Two DSDs are available in the Command Type 3 IOCB */
@@ -254,7 +251,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
254 * Five DSDs are available in the Continuation 251 * Five DSDs are available in the Continuation
255 * Type 1 IOCB. 252 * Type 1 IOCB.
256 */ 253 */
257 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); 254 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
258 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 255 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
259 avail_dsds = 5; 256 avail_dsds = 5;
260 } 257 }
@@ -353,7 +350,6 @@ qla2x00_start_scsi(srb_t *sp)
353 /* Build command packet */ 350 /* Build command packet */
354 req->current_outstanding_cmd = handle; 351 req->current_outstanding_cmd = handle;
355 req->outstanding_cmds[handle] = sp; 352 req->outstanding_cmds[handle] = sp;
356 sp->que = req;
357 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 353 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
358 req->cnt -= req_cnt; 354 req->cnt -= req_cnt;
359 355
@@ -453,6 +449,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
453 mrk24->lun[2] = MSB(lun); 449 mrk24->lun[2] = MSB(lun);
454 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 450 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
455 mrk24->vp_index = vha->vp_idx; 451 mrk24->vp_index = vha->vp_idx;
452 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
456 } else { 453 } else {
457 SET_TARGET_ID(ha, mrk->target, loop_id); 454 SET_TARGET_ID(ha, mrk->target, loop_id);
458 mrk->lun = cpu_to_le16(lun); 455 mrk->lun = cpu_to_le16(lun);
@@ -531,9 +528,6 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
531 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++) 528 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
532 *dword_ptr++ = 0; 529 *dword_ptr++ = 0;
533 530
534 /* Set system defined field. */
535 pkt->sys_define = (uint8_t)req->ring_index;
536
537 /* Set entry count. */ 531 /* Set entry count. */
538 pkt->entry_count = 1; 532 pkt->entry_count = 1;
539 533
@@ -656,7 +650,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
656 } 650 }
657 651
658 vha = sp->fcport->vha; 652 vha = sp->fcport->vha;
659 req = sp->que; 653 req = vha->req;
660 654
661 /* Set transfer direction */ 655 /* Set transfer direction */
662 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 656 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -687,7 +681,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
687 * Five DSDs are available in the Continuation 681 * Five DSDs are available in the Continuation
688 * Type 1 IOCB. 682 * Type 1 IOCB.
689 */ 683 */
690 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); 684 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
691 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 685 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
692 avail_dsds = 5; 686 avail_dsds = 5;
693 } 687 }
@@ -724,19 +718,13 @@ qla24xx_start_scsi(srb_t *sp)
724 struct scsi_cmnd *cmd = sp->cmd; 718 struct scsi_cmnd *cmd = sp->cmd;
725 struct scsi_qla_host *vha = sp->fcport->vha; 719 struct scsi_qla_host *vha = sp->fcport->vha;
726 struct qla_hw_data *ha = vha->hw; 720 struct qla_hw_data *ha = vha->hw;
727 uint16_t que_id;
728 721
729 /* Setup device pointers. */ 722 /* Setup device pointers. */
730 ret = 0; 723 ret = 0;
731 que_id = vha->req_ques[0];
732 724
733 req = ha->req_q_map[que_id]; 725 qla25xx_set_que(sp, &rsp);
734 sp->que = req; 726 req = vha->req;
735 727
736 if (req->rsp)
737 rsp = req->rsp;
738 else
739 rsp = ha->rsp_q_map[que_id];
740 /* So we know we haven't pci_map'ed anything yet */ 728 /* So we know we haven't pci_map'ed anything yet */
741 tot_dsds = 0; 729 tot_dsds = 0;
742 730
@@ -794,7 +782,7 @@ qla24xx_start_scsi(srb_t *sp)
794 req->cnt -= req_cnt; 782 req->cnt -= req_cnt;
795 783
796 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 784 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
797 cmd_pkt->handle = handle; 785 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
798 786
799 /* Zero out remaining portion of packet. */ 787 /* Zero out remaining portion of packet. */
800 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 788 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -823,6 +811,8 @@ qla24xx_start_scsi(srb_t *sp)
823 811
824 /* Set total data segment count. */ 812 /* Set total data segment count. */
825 cmd_pkt->entry_count = (uint8_t)req_cnt; 813 cmd_pkt->entry_count = (uint8_t)req_cnt;
814 /* Specify response queue number where completion should happen */
815 cmd_pkt->entry_status = (uint8_t) rsp->id;
826 wmb(); 816 wmb();
827 817
828 /* Adjust ring index. */ 818 /* Adjust ring index. */
@@ -842,7 +832,7 @@ qla24xx_start_scsi(srb_t *sp)
842 /* Manage unprocessed RIO/ZIO commands in response queue. */ 832 /* Manage unprocessed RIO/ZIO commands in response queue. */
843 if (vha->flags.process_response_queue && 833 if (vha->flags.process_response_queue &&
844 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 834 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
845 qla24xx_process_response_queue(rsp); 835 qla24xx_process_response_queue(vha, rsp);
846 836
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); 837 spin_unlock_irqrestore(&ha->hardware_lock, flags);
848 return QLA_SUCCESS; 838 return QLA_SUCCESS;
@@ -855,3 +845,16 @@ queuing_error:
855 845
856 return QLA_FUNCTION_FAILED; 846 return QLA_FUNCTION_FAILED;
857} 847}
848
849static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
850{
851 struct scsi_cmnd *cmd = sp->cmd;
852 struct qla_hw_data *ha = sp->fcport->vha->hw;
853 int affinity = cmd->request->cpu;
854
855 if (ql2xmultique_tag && affinity >= 0 &&
856 affinity < ha->max_rsp_queues - 1)
857 *rsp = ha->rsp_q_map[affinity + 1];
858 else
859 *rsp = ha->rsp_q_map[0];
860}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d04981848e5..c8d0a176fea 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -13,10 +13,9 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, 13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14 struct req_que *, uint32_t); 14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 16static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *); 18 sts_entry_t *);
19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20 19
21/** 20/**
22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 21 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -51,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id)
51 status = 0; 50 status = 0;
52 51
53 spin_lock(&ha->hardware_lock); 52 spin_lock(&ha->hardware_lock);
54 vha = qla2x00_get_rsp_host(rsp); 53 vha = pci_get_drvdata(ha->pdev);
55 for (iter = 50; iter--; ) { 54 for (iter = 50; iter--; ) {
56 hccr = RD_REG_WORD(&reg->hccr); 55 hccr = RD_REG_WORD(&reg->hccr);
57 if (hccr & HCCR_RISC_PAUSE) { 56 if (hccr & HCCR_RISC_PAUSE) {
@@ -147,7 +146,7 @@ qla2300_intr_handler(int irq, void *dev_id)
147 status = 0; 146 status = 0;
148 147
149 spin_lock(&ha->hardware_lock); 148 spin_lock(&ha->hardware_lock);
150 vha = qla2x00_get_rsp_host(rsp); 149 vha = pci_get_drvdata(ha->pdev);
151 for (iter = 50; iter--; ) { 150 for (iter = 50; iter--; ) {
152 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 151 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153 if (stat & HSR_RISC_PAUSED) { 152 if (stat & HSR_RISC_PAUSED) {
@@ -685,7 +684,7 @@ skip_rio:
685 vha->host_no)); 684 vha->host_no));
686 685
687 if (IS_FWI2_CAPABLE(ha)) 686 if (IS_FWI2_CAPABLE(ha))
688 qla24xx_process_response_queue(rsp); 687 qla24xx_process_response_queue(vha, rsp);
689 else 688 else
690 qla2x00_process_response_queue(rsp); 689 qla2x00_process_response_queue(rsp);
691 break; 690 break;
@@ -766,7 +765,10 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
766 struct qla_hw_data *ha = vha->hw; 765 struct qla_hw_data *ha = vha->hw;
767 struct req_que *req = NULL; 766 struct req_que *req = NULL;
768 767
769 req = ha->req_q_map[vha->req_ques[0]]; 768 if (!ql2xqfulltracking)
769 return;
770
771 req = vha->req;
770 if (!req) 772 if (!req)
771 return; 773 return;
772 if (req->max_q_depth <= sdev->queue_depth) 774 if (req->max_q_depth <= sdev->queue_depth)
@@ -808,6 +810,9 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
808 fc_port_t *fcport; 810 fc_port_t *fcport;
809 struct scsi_device *sdev; 811 struct scsi_device *sdev;
810 812
813 if (!ql2xqfulltracking)
814 return;
815
811 sdev = sp->cmd->device; 816 sdev = sp->cmd->device;
812 if (sdev->queue_depth >= req->max_q_depth) 817 if (sdev->queue_depth >= req->max_q_depth)
813 return; 818 return;
@@ -858,8 +863,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
858 qla2x00_ramp_up_queue_depth(vha, req, sp); 863 qla2x00_ramp_up_queue_depth(vha, req, sp);
859 qla2x00_sp_compl(ha, sp); 864 qla2x00_sp_compl(ha, sp);
860 } else { 865 } else {
861 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 866 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
862 vha->host_no)); 867 " handle(%d)\n", vha->host_no, req->id, index));
863 qla_printk(KERN_WARNING, ha, 868 qla_printk(KERN_WARNING, ha,
864 "Invalid ISP SCSI completion handle\n"); 869 "Invalid ISP SCSI completion handle\n");
865 870
@@ -881,7 +886,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
881 uint16_t handle_cnt; 886 uint16_t handle_cnt;
882 uint16_t cnt; 887 uint16_t cnt;
883 888
884 vha = qla2x00_get_rsp_host(rsp); 889 vha = pci_get_drvdata(ha->pdev);
885 890
886 if (!vha->flags.online) 891 if (!vha->flags.online)
887 return; 892 return;
@@ -926,7 +931,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
926 } 931 }
927 break; 932 break;
928 case STATUS_CONT_TYPE: 933 case STATUS_CONT_TYPE:
929 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 934 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
930 break; 935 break;
931 default: 936 default:
932 /* Type Not Supported. */ 937 /* Type Not Supported. */
@@ -945,7 +950,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
945} 950}
946 951
947static inline void 952static inline void
948qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) 953qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
954 struct rsp_que *rsp)
949{ 955{
950 struct scsi_cmnd *cp = sp->cmd; 956 struct scsi_cmnd *cp = sp->cmd;
951 957
@@ -962,7 +968,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
962 sp->request_sense_ptr += sense_len; 968 sp->request_sense_ptr += sense_len;
963 sp->request_sense_length -= sense_len; 969 sp->request_sense_length -= sense_len;
964 if (sp->request_sense_length != 0) 970 if (sp->request_sense_length != 0)
965 sp->fcport->vha->status_srb = sp; 971 rsp->status_srb = sp;
966 972
967 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 973 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
968 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, 974 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
@@ -992,7 +998,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
992 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 998 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
993 uint8_t *rsp_info, *sense_data; 999 uint8_t *rsp_info, *sense_data;
994 struct qla_hw_data *ha = vha->hw; 1000 struct qla_hw_data *ha = vha->hw;
995 struct req_que *req = rsp->req; 1001 uint32_t handle;
1002 uint16_t que;
1003 struct req_que *req;
996 1004
997 sts = (sts_entry_t *) pkt; 1005 sts = (sts_entry_t *) pkt;
998 sts24 = (struct sts_entry_24xx *) pkt; 1006 sts24 = (struct sts_entry_24xx *) pkt;
@@ -1003,18 +1011,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1003 comp_status = le16_to_cpu(sts->comp_status); 1011 comp_status = le16_to_cpu(sts->comp_status);
1004 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1012 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1005 } 1013 }
1006 1014 handle = (uint32_t) LSW(sts->handle);
1015 que = MSW(sts->handle);
1016 req = ha->req_q_map[que];
1007 /* Fast path completion. */ 1017 /* Fast path completion. */
1008 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1018 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1009 qla2x00_process_completed_request(vha, req, sts->handle); 1019 qla2x00_process_completed_request(vha, req, handle);
1010 1020
1011 return; 1021 return;
1012 } 1022 }
1013 1023
1014 /* Validate handle. */ 1024 /* Validate handle. */
1015 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 1025 if (handle < MAX_OUTSTANDING_COMMANDS) {
1016 sp = req->outstanding_cmds[sts->handle]; 1026 sp = req->outstanding_cmds[handle];
1017 req->outstanding_cmds[sts->handle] = NULL; 1027 req->outstanding_cmds[handle] = NULL;
1018 } else 1028 } else
1019 sp = NULL; 1029 sp = NULL;
1020 1030
@@ -1030,7 +1040,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1030 cp = sp->cmd; 1040 cp = sp->cmd;
1031 if (cp == NULL) { 1041 if (cp == NULL) {
1032 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 1042 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1033 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp)); 1043 "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
1034 qla_printk(KERN_WARNING, ha, 1044 qla_printk(KERN_WARNING, ha,
1035 "Command is NULL: already returned to OS (sp=%p)\n", sp); 1045 "Command is NULL: already returned to OS (sp=%p)\n", sp);
1036 1046
@@ -1121,6 +1131,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1121 scsi_status)); 1131 scsi_status));
1122 1132
1123 /* Adjust queue depth for all luns on the port. */ 1133 /* Adjust queue depth for all luns on the port. */
1134 if (!ql2xqfulltracking)
1135 break;
1124 fcport->last_queue_full = jiffies; 1136 fcport->last_queue_full = jiffies;
1125 starget_for_each_device(cp->device->sdev_target, 1137 starget_for_each_device(cp->device->sdev_target,
1126 fcport, qla2x00_adjust_sdev_qdepth_down); 1138 fcport, qla2x00_adjust_sdev_qdepth_down);
@@ -1133,7 +1145,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1133 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1145 if (!(scsi_status & SS_SENSE_LEN_VALID))
1134 break; 1146 break;
1135 1147
1136 qla2x00_handle_sense(sp, sense_data, sense_len); 1148 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1137 break; 1149 break;
1138 1150
1139 case CS_DATA_UNDERRUN: 1151 case CS_DATA_UNDERRUN:
@@ -1179,6 +1191,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1179 * Adjust queue depth for all luns on the 1191 * Adjust queue depth for all luns on the
1180 * port. 1192 * port.
1181 */ 1193 */
1194 if (!ql2xqfulltracking)
1195 break;
1182 fcport->last_queue_full = jiffies; 1196 fcport->last_queue_full = jiffies;
1183 starget_for_each_device( 1197 starget_for_each_device(
1184 cp->device->sdev_target, fcport, 1198 cp->device->sdev_target, fcport,
@@ -1192,12 +1206,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1192 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1206 if (!(scsi_status & SS_SENSE_LEN_VALID))
1193 break; 1207 break;
1194 1208
1195 qla2x00_handle_sense(sp, sense_data, sense_len); 1209 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1196 } else { 1210 } else {
1197 /* 1211 /*
1198 * If RISC reports underrun and target does not report 1212 * If RISC reports underrun and target does not report
1199 * it then we must have a lost frame, so tell upper 1213 * it then we must have a lost frame, so tell upper
1200 * layer to retry it by reporting a bus busy. 1214 * layer to retry it by reporting an error.
1201 */ 1215 */
1202 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1216 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1203 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1217 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
@@ -1207,7 +1221,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1207 cp->device->id, cp->device->lun, resid, 1221 cp->device->id, cp->device->lun, resid,
1208 scsi_bufflen(cp))); 1222 scsi_bufflen(cp)));
1209 1223
1210 cp->result = DID_BUS_BUSY << 16; 1224 cp->result = DID_ERROR << 16;
1211 break; 1225 break;
1212 } 1226 }
1213 1227
@@ -1334,7 +1348,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1334 } 1348 }
1335 1349
1336 /* Place command on done queue. */ 1350 /* Place command on done queue. */
1337 if (vha->status_srb == NULL) 1351 if (rsp->status_srb == NULL)
1338 qla2x00_sp_compl(ha, sp); 1352 qla2x00_sp_compl(ha, sp);
1339} 1353}
1340 1354
@@ -1346,11 +1360,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1346 * Extended sense data. 1360 * Extended sense data.
1347 */ 1361 */
1348static void 1362static void
1349qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) 1363qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1350{ 1364{
1351 uint8_t sense_sz = 0; 1365 uint8_t sense_sz = 0;
1352 struct qla_hw_data *ha = vha->hw; 1366 struct qla_hw_data *ha = rsp->hw;
1353 srb_t *sp = vha->status_srb; 1367 srb_t *sp = rsp->status_srb;
1354 struct scsi_cmnd *cp; 1368 struct scsi_cmnd *cp;
1355 1369
1356 if (sp != NULL && sp->request_sense_length != 0) { 1370 if (sp != NULL && sp->request_sense_length != 0) {
@@ -1362,7 +1376,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1362 "cmd is NULL: already returned to OS (sp=%p)\n", 1376 "cmd is NULL: already returned to OS (sp=%p)\n",
1363 sp); 1377 sp);
1364 1378
1365 vha->status_srb = NULL; 1379 rsp->status_srb = NULL;
1366 return; 1380 return;
1367 } 1381 }
1368 1382
@@ -1383,7 +1397,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1383 1397
1384 /* Place command on done queue. */ 1398 /* Place command on done queue. */
1385 if (sp->request_sense_length == 0) { 1399 if (sp->request_sense_length == 0) {
1386 vha->status_srb = NULL; 1400 rsp->status_srb = NULL;
1387 qla2x00_sp_compl(ha, sp); 1401 qla2x00_sp_compl(ha, sp);
1388 } 1402 }
1389 } 1403 }
@@ -1399,7 +1413,9 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1399{ 1413{
1400 srb_t *sp; 1414 srb_t *sp;
1401 struct qla_hw_data *ha = vha->hw; 1415 struct qla_hw_data *ha = vha->hw;
1402 struct req_que *req = rsp->req; 1416 uint32_t handle = LSW(pkt->handle);
1417 uint16_t que = MSW(pkt->handle);
1418 struct req_que *req = ha->req_q_map[que];
1403#if defined(QL_DEBUG_LEVEL_2) 1419#if defined(QL_DEBUG_LEVEL_2)
1404 if (pkt->entry_status & RF_INV_E_ORDER) 1420 if (pkt->entry_status & RF_INV_E_ORDER)
1405 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1421 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1417,14 +1433,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1417#endif 1433#endif
1418 1434
1419 /* Validate handle. */ 1435 /* Validate handle. */
1420 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1436 if (handle < MAX_OUTSTANDING_COMMANDS)
1421 sp = req->outstanding_cmds[pkt->handle]; 1437 sp = req->outstanding_cmds[handle];
1422 else 1438 else
1423 sp = NULL; 1439 sp = NULL;
1424 1440
1425 if (sp) { 1441 if (sp) {
1426 /* Free outstanding command slot. */ 1442 /* Free outstanding command slot. */
1427 req->outstanding_cmds[pkt->handle] = NULL; 1443 req->outstanding_cmds[handle] = NULL;
1428 1444
1429 /* Bad payload or header */ 1445 /* Bad payload or header */
1430 if (pkt->entry_status & 1446 if (pkt->entry_status &
@@ -1486,13 +1502,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1486 * qla24xx_process_response_queue() - Process response queue entries. 1502 * qla24xx_process_response_queue() - Process response queue entries.
1487 * @ha: SCSI driver HA context 1503 * @ha: SCSI driver HA context
1488 */ 1504 */
1489void 1505void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1490qla24xx_process_response_queue(struct rsp_que *rsp) 1506 struct rsp_que *rsp)
1491{ 1507{
1492 struct sts_entry_24xx *pkt; 1508 struct sts_entry_24xx *pkt;
1493 struct scsi_qla_host *vha;
1494
1495 vha = qla2x00_get_rsp_host(rsp);
1496 1509
1497 if (!vha->flags.online) 1510 if (!vha->flags.online)
1498 return; 1511 return;
@@ -1523,7 +1536,7 @@ qla24xx_process_response_queue(struct rsp_que *rsp)
1523 qla2x00_status_entry(vha, rsp, pkt); 1536 qla2x00_status_entry(vha, rsp, pkt);
1524 break; 1537 break;
1525 case STATUS_CONT_TYPE: 1538 case STATUS_CONT_TYPE:
1526 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 1539 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1527 break; 1540 break;
1528 case VP_RPT_ID_IOCB_TYPE: 1541 case VP_RPT_ID_IOCB_TYPE:
1529 qla24xx_report_id_acquisition(vha, 1542 qla24xx_report_id_acquisition(vha,
@@ -1626,7 +1639,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1626 status = 0; 1639 status = 0;
1627 1640
1628 spin_lock(&ha->hardware_lock); 1641 spin_lock(&ha->hardware_lock);
1629 vha = qla2x00_get_rsp_host(rsp); 1642 vha = pci_get_drvdata(ha->pdev);
1630 for (iter = 50; iter--; ) { 1643 for (iter = 50; iter--; ) {
1631 stat = RD_REG_DWORD(&reg->host_status); 1644 stat = RD_REG_DWORD(&reg->host_status);
1632 if (stat & HSRX_RISC_PAUSED) { 1645 if (stat & HSRX_RISC_PAUSED) {
@@ -1664,7 +1677,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1664 break; 1677 break;
1665 case 0x13: 1678 case 0x13:
1666 case 0x14: 1679 case 0x14:
1667 qla24xx_process_response_queue(rsp); 1680 qla24xx_process_response_queue(vha, rsp);
1668 break; 1681 break;
1669 default: 1682 default:
1670 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1683 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1692,6 +1705,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1692 struct qla_hw_data *ha; 1705 struct qla_hw_data *ha;
1693 struct rsp_que *rsp; 1706 struct rsp_que *rsp;
1694 struct device_reg_24xx __iomem *reg; 1707 struct device_reg_24xx __iomem *reg;
1708 struct scsi_qla_host *vha;
1695 1709
1696 rsp = (struct rsp_que *) dev_id; 1710 rsp = (struct rsp_que *) dev_id;
1697 if (!rsp) { 1711 if (!rsp) {
@@ -1704,7 +1718,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1704 1718
1705 spin_lock_irq(&ha->hardware_lock); 1719 spin_lock_irq(&ha->hardware_lock);
1706 1720
1707 qla24xx_process_response_queue(rsp); 1721 vha = qla25xx_get_host(rsp);
1722 qla24xx_process_response_queue(vha, rsp);
1708 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1723 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1709 1724
1710 spin_unlock_irq(&ha->hardware_lock); 1725 spin_unlock_irq(&ha->hardware_lock);
@@ -1717,7 +1732,6 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1717{ 1732{
1718 struct qla_hw_data *ha; 1733 struct qla_hw_data *ha;
1719 struct rsp_que *rsp; 1734 struct rsp_que *rsp;
1720 struct device_reg_24xx __iomem *reg;
1721 1735
1722 rsp = (struct rsp_que *) dev_id; 1736 rsp = (struct rsp_que *) dev_id;
1723 if (!rsp) { 1737 if (!rsp) {
@@ -1726,13 +1740,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1726 return IRQ_NONE; 1740 return IRQ_NONE;
1727 } 1741 }
1728 ha = rsp->hw; 1742 ha = rsp->hw;
1729 reg = &ha->iobase->isp24;
1730 1743
1731 spin_lock_irq(&ha->hardware_lock); 1744 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
1732
1733 qla24xx_process_response_queue(rsp);
1734
1735 spin_unlock_irq(&ha->hardware_lock);
1736 1745
1737 return IRQ_HANDLED; 1746 return IRQ_HANDLED;
1738} 1747}
@@ -1760,7 +1769,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1760 status = 0; 1769 status = 0;
1761 1770
1762 spin_lock_irq(&ha->hardware_lock); 1771 spin_lock_irq(&ha->hardware_lock);
1763 vha = qla2x00_get_rsp_host(rsp); 1772 vha = pci_get_drvdata(ha->pdev);
1764 do { 1773 do {
1765 stat = RD_REG_DWORD(&reg->host_status); 1774 stat = RD_REG_DWORD(&reg->host_status);
1766 if (stat & HSRX_RISC_PAUSED) { 1775 if (stat & HSRX_RISC_PAUSED) {
@@ -1798,7 +1807,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1798 break; 1807 break;
1799 case 0x13: 1808 case 0x13:
1800 case 0x14: 1809 case 0x14:
1801 qla24xx_process_response_queue(rsp); 1810 qla24xx_process_response_queue(vha, rsp);
1802 break; 1811 break;
1803 default: 1812 default:
1804 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1813 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1822,31 +1831,14 @@ qla24xx_msix_default(int irq, void *dev_id)
1822/* Interrupt handling helpers. */ 1831/* Interrupt handling helpers. */
1823 1832
1824struct qla_init_msix_entry { 1833struct qla_init_msix_entry {
1825 uint16_t entry;
1826 uint16_t index;
1827 const char *name; 1834 const char *name;
1828 irq_handler_t handler; 1835 irq_handler_t handler;
1829}; 1836};
1830 1837
1831static struct qla_init_msix_entry base_queue = { 1838static struct qla_init_msix_entry msix_entries[3] = {
1832 .entry = 0, 1839 { "qla2xxx (default)", qla24xx_msix_default },
1833 .index = 0, 1840 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1834 .name = "qla2xxx (default)", 1841 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
1835 .handler = qla24xx_msix_default,
1836};
1837
1838static struct qla_init_msix_entry base_rsp_queue = {
1839 .entry = 1,
1840 .index = 1,
1841 .name = "qla2xxx (rsp_q)",
1842 .handler = qla24xx_msix_rsp_q,
1843};
1844
1845static struct qla_init_msix_entry multi_rsp_queue = {
1846 .entry = 1,
1847 .index = 1,
1848 .name = "qla2xxx (multi_q)",
1849 .handler = qla25xx_msix_rsp_q,
1850}; 1842};
1851 1843
1852static void 1844static void
@@ -1873,7 +1865,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1873 int i, ret; 1865 int i, ret;
1874 struct msix_entry *entries; 1866 struct msix_entry *entries;
1875 struct qla_msix_entry *qentry; 1867 struct qla_msix_entry *qentry;
1876 struct qla_init_msix_entry *msix_queue;
1877 1868
1878 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 1869 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1879 GFP_KERNEL); 1870 GFP_KERNEL);
@@ -1900,7 +1891,7 @@ msix_failed:
1900 ha->msix_count, ret); 1891 ha->msix_count, ret);
1901 goto msix_out; 1892 goto msix_out;
1902 } 1893 }
1903 ha->max_queues = ha->msix_count - 1; 1894 ha->max_rsp_queues = ha->msix_count - 1;
1904 } 1895 }
1905 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 1896 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1906 ha->msix_count, GFP_KERNEL); 1897 ha->msix_count, GFP_KERNEL);
@@ -1918,45 +1909,27 @@ msix_failed:
1918 qentry->rsp = NULL; 1909 qentry->rsp = NULL;
1919 } 1910 }
1920 1911
1921 /* Enable MSI-X for AENs for queue 0 */ 1912 /* Enable MSI-X vectors for the base queue */
1922 qentry = &ha->msix_entries[0]; 1913 for (i = 0; i < 2; i++) {
1923 ret = request_irq(qentry->vector, base_queue.handler, 0, 1914 qentry = &ha->msix_entries[i];
1924 base_queue.name, rsp); 1915 ret = request_irq(qentry->vector, msix_entries[i].handler,
1925 if (ret) { 1916 0, msix_entries[i].name, rsp);
1926 qla_printk(KERN_WARNING, ha, 1917 if (ret) {
1918 qla_printk(KERN_WARNING, ha,
1927 "MSI-X: Unable to register handler -- %x/%d.\n", 1919 "MSI-X: Unable to register handler -- %x/%d.\n",
1928 qentry->vector, ret); 1920 qentry->vector, ret);
1929 qla24xx_disable_msix(ha); 1921 qla24xx_disable_msix(ha);
1930 goto msix_out; 1922 ha->mqenable = 0;
1923 goto msix_out;
1924 }
1925 qentry->have_irq = 1;
1926 qentry->rsp = rsp;
1927 rsp->msix = qentry;
1931 } 1928 }
1932 qentry->have_irq = 1;
1933 qentry->rsp = rsp;
1934 1929
1935 /* Enable MSI-X vector for response queue update for queue 0 */ 1930 /* Enable MSI-X vector for response queue update for queue 0 */
1936 if (ha->max_queues > 1 && ha->mqiobase) { 1931 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
1937 ha->mqenable = 1; 1932 ha->mqenable = 1;
1938 msix_queue = &multi_rsp_queue;
1939 qla_printk(KERN_INFO, ha,
1940 "MQ enabled, Number of Queue Resources: %d \n",
1941 ha->max_queues);
1942 } else {
1943 ha->mqenable = 0;
1944 msix_queue = &base_rsp_queue;
1945 }
1946
1947 qentry = &ha->msix_entries[1];
1948 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1949 msix_queue->name, rsp);
1950 if (ret) {
1951 qla_printk(KERN_WARNING, ha,
1952 "MSI-X: Unable to register handler -- %x/%d.\n",
1953 qentry->vector, ret);
1954 qla24xx_disable_msix(ha);
1955 ha->mqenable = 0;
1956 goto msix_out;
1957 }
1958 qentry->have_irq = 1;
1959 qentry->rsp = rsp;
1960 1933
1961msix_out: 1934msix_out:
1962 kfree(entries); 1935 kfree(entries);
@@ -2063,35 +2036,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
2063 } 2036 }
2064} 2037}
2065 2038
2066static struct scsi_qla_host *
2067qla2x00_get_rsp_host(struct rsp_que *rsp)
2068{
2069 srb_t *sp;
2070 struct qla_hw_data *ha = rsp->hw;
2071 struct scsi_qla_host *vha = NULL;
2072 struct sts_entry_24xx *pkt;
2073 struct req_que *req;
2074
2075 if (rsp->id) {
2076 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2077 req = rsp->req;
2078 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2079 sp = req->outstanding_cmds[pkt->handle];
2080 if (sp)
2081 vha = sp->fcport->vha;
2082 }
2083 }
2084 if (!vha)
2085 /* handle it in base queue */
2086 vha = pci_get_drvdata(ha->pdev);
2087
2088 return vha;
2089}
2090 2039
2091int qla25xx_request_irq(struct rsp_que *rsp) 2040int qla25xx_request_irq(struct rsp_que *rsp)
2092{ 2041{
2093 struct qla_hw_data *ha = rsp->hw; 2042 struct qla_hw_data *ha = rsp->hw;
2094 struct qla_init_msix_entry *intr = &multi_rsp_queue; 2043 struct qla_init_msix_entry *intr = &msix_entries[2];
2095 struct qla_msix_entry *msix = rsp->msix; 2044 struct qla_msix_entry *msix = rsp->msix;
2096 int ret; 2045 int ret;
2097 2046
@@ -2106,3 +2055,30 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2106 msix->rsp = rsp; 2055 msix->rsp = rsp;
2107 return ret; 2056 return ret;
2108} 2057}
2058
2059struct scsi_qla_host *
2060qla25xx_get_host(struct rsp_que *rsp)
2061{
2062 srb_t *sp;
2063 struct qla_hw_data *ha = rsp->hw;
2064 struct scsi_qla_host *vha = NULL;
2065 struct sts_entry_24xx *pkt;
2066 struct req_que *req;
2067 uint16_t que;
2068 uint32_t handle;
2069
2070 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2071 que = MSW(pkt->handle);
2072 handle = (uint32_t) LSW(pkt->handle);
2073 req = ha->req_q_map[que];
2074 if (handle < MAX_OUTSTANDING_COMMANDS) {
2075 sp = req->outstanding_cmds[handle];
2076 if (sp)
2077 return sp->fcport->vha;
2078 else
2079 goto base_que;
2080 }
2081base_que:
2082 vha = pci_get_drvdata(ha->pdev);
2083 return vha;
2084}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index e67c1660bf4..451ece0760b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -408,7 +408,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
408 * Context: 408 * Context:
409 * Kernel context. 409 * Kernel context.
410 */ 410 */
411void 411int
412qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, 412qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
413 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi, 413 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
414 uint32_t *mpi_caps, uint8_t *phy) 414 uint32_t *mpi_caps, uint8_t *phy)
@@ -427,6 +427,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
427 mcp->flags = 0; 427 mcp->flags = 0;
428 mcp->tov = MBX_TOV_SECONDS; 428 mcp->tov = MBX_TOV_SECONDS;
429 rval = qla2x00_mailbox_command(vha, mcp); 429 rval = qla2x00_mailbox_command(vha, mcp);
430 if (rval != QLA_SUCCESS)
431 goto failed;
430 432
431 /* Return mailbox data. */ 433 /* Return mailbox data. */
432 *major = mcp->mb[1]; 434 *major = mcp->mb[1];
@@ -446,7 +448,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
446 phy[1] = mcp->mb[9] >> 8; 448 phy[1] = mcp->mb[9] >> 8;
447 phy[2] = mcp->mb[9] & 0xff; 449 phy[2] = mcp->mb[9] & 0xff;
448 } 450 }
449 451failed:
450 if (rval != QLA_SUCCESS) { 452 if (rval != QLA_SUCCESS) {
451 /*EMPTY*/ 453 /*EMPTY*/
452 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 454 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
@@ -455,6 +457,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
455 /*EMPTY*/ 457 /*EMPTY*/
456 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 458 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
457 } 459 }
460 return rval;
458} 461}
459 462
460/* 463/*
@@ -748,20 +751,20 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
748 * Kernel context. 751 * Kernel context.
749 */ 752 */
750int 753int
751qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 754qla2x00_abort_command(srb_t *sp)
752{ 755{
753 unsigned long flags = 0; 756 unsigned long flags = 0;
754 fc_port_t *fcport;
755 int rval; 757 int rval;
756 uint32_t handle = 0; 758 uint32_t handle = 0;
757 mbx_cmd_t mc; 759 mbx_cmd_t mc;
758 mbx_cmd_t *mcp = &mc; 760 mbx_cmd_t *mcp = &mc;
761 fc_port_t *fcport = sp->fcport;
762 scsi_qla_host_t *vha = fcport->vha;
759 struct qla_hw_data *ha = vha->hw; 763 struct qla_hw_data *ha = vha->hw;
764 struct req_que *req = vha->req;
760 765
761 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); 766 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
762 767
763 fcport = sp->fcport;
764
765 spin_lock_irqsave(&ha->hardware_lock, flags); 768 spin_lock_irqsave(&ha->hardware_lock, flags);
766 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 769 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
767 if (req->outstanding_cmds[handle] == sp) 770 if (req->outstanding_cmds[handle] == sp)
@@ -800,7 +803,7 @@ qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
800} 803}
801 804
802int 805int
803qla2x00_abort_target(struct fc_port *fcport, unsigned int l) 806qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
804{ 807{
805 int rval, rval2; 808 int rval, rval2;
806 mbx_cmd_t mc; 809 mbx_cmd_t mc;
@@ -813,8 +816,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
813 816
814 l = l; 817 l = l;
815 vha = fcport->vha; 818 vha = fcport->vha;
816 req = vha->hw->req_q_map[0]; 819 req = vha->hw->req_q_map[tag];
817 rsp = vha->hw->rsp_q_map[0]; 820 rsp = vha->hw->rsp_q_map[tag];
818 mcp->mb[0] = MBC_ABORT_TARGET; 821 mcp->mb[0] = MBC_ABORT_TARGET;
819 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 822 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
820 if (HAS_EXTENDED_IDS(vha->hw)) { 823 if (HAS_EXTENDED_IDS(vha->hw)) {
@@ -850,7 +853,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
850} 853}
851 854
852int 855int
853qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) 856qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
854{ 857{
855 int rval, rval2; 858 int rval, rval2;
856 mbx_cmd_t mc; 859 mbx_cmd_t mc;
@@ -862,8 +865,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
862 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); 865 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
863 866
864 vha = fcport->vha; 867 vha = fcport->vha;
865 req = vha->hw->req_q_map[0]; 868 req = vha->hw->req_q_map[tag];
866 rsp = vha->hw->rsp_q_map[0]; 869 rsp = vha->hw->rsp_q_map[tag];
867 mcp->mb[0] = MBC_LUN_RESET; 870 mcp->mb[0] = MBC_LUN_RESET;
868 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 871 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
869 if (HAS_EXTENDED_IDS(vha->hw)) 872 if (HAS_EXTENDED_IDS(vha->hw))
@@ -931,6 +934,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
931 mcp->mb[9] = vha->vp_idx; 934 mcp->mb[9] = vha->vp_idx;
932 mcp->out_mb = MBX_9|MBX_0; 935 mcp->out_mb = MBX_9|MBX_0;
933 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 936 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
937 if (IS_QLA81XX(vha->hw))
938 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
934 mcp->tov = MBX_TOV_SECONDS; 939 mcp->tov = MBX_TOV_SECONDS;
935 mcp->flags = 0; 940 mcp->flags = 0;
936 rval = qla2x00_mailbox_command(vha, mcp); 941 rval = qla2x00_mailbox_command(vha, mcp);
@@ -952,9 +957,19 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
952 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 957 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
953 vha->host_no, rval)); 958 vha->host_no, rval));
954 } else { 959 } else {
955 /*EMPTY*/
956 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 960 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
957 vha->host_no)); 961 vha->host_no));
962
963 if (IS_QLA81XX(vha->hw)) {
964 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
965 vha->fcoe_fcf_idx = mcp->mb[10];
966 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
967 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
968 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
969 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
970 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
971 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
972 }
958 } 973 }
959 974
960 return rval; 975 return rval;
@@ -1252,7 +1267,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1252 1267
1253 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1268 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1254 mcp->out_mb = MBX_0; 1269 mcp->out_mb = MBX_0;
1255 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1270 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1256 mcp->tov = MBX_TOV_SECONDS; 1271 mcp->tov = MBX_TOV_SECONDS;
1257 mcp->flags = 0; 1272 mcp->flags = 0;
1258 rval = qla2x00_mailbox_command(vha, mcp); 1273 rval = qla2x00_mailbox_command(vha, mcp);
@@ -1261,6 +1276,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1261 states[0] = mcp->mb[1]; 1276 states[0] = mcp->mb[1];
1262 states[1] = mcp->mb[2]; 1277 states[1] = mcp->mb[2];
1263 states[2] = mcp->mb[3]; 1278 states[2] = mcp->mb[3];
1279 states[3] = mcp->mb[4];
1280 states[4] = mcp->mb[5];
1264 1281
1265 if (rval != QLA_SUCCESS) { 1282 if (rval != QLA_SUCCESS) {
1266 /*EMPTY*/ 1283 /*EMPTY*/
@@ -1480,9 +1497,17 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1480 dma_addr_t lg_dma; 1497 dma_addr_t lg_dma;
1481 uint32_t iop[2]; 1498 uint32_t iop[2];
1482 struct qla_hw_data *ha = vha->hw; 1499 struct qla_hw_data *ha = vha->hw;
1500 struct req_que *req;
1501 struct rsp_que *rsp;
1483 1502
1484 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1503 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1485 1504
1505 if (ql2xmultique_tag)
1506 req = ha->req_q_map[0];
1507 else
1508 req = vha->req;
1509 rsp = req->rsp;
1510
1486 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1511 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1487 if (lg == NULL) { 1512 if (lg == NULL) {
1488 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1513 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
@@ -1493,6 +1518,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1493 1518
1494 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1519 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1495 lg->entry_count = 1; 1520 lg->entry_count = 1;
1521 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1496 lg->nport_handle = cpu_to_le16(loop_id); 1522 lg->nport_handle = cpu_to_le16(loop_id);
1497 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); 1523 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1498 if (opt & BIT_0) 1524 if (opt & BIT_0)
@@ -1741,6 +1767,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1741 struct logio_entry_24xx *lg; 1767 struct logio_entry_24xx *lg;
1742 dma_addr_t lg_dma; 1768 dma_addr_t lg_dma;
1743 struct qla_hw_data *ha = vha->hw; 1769 struct qla_hw_data *ha = vha->hw;
1770 struct req_que *req;
1771 struct rsp_que *rsp;
1744 1772
1745 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1773 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1746 1774
@@ -1752,8 +1780,14 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1752 } 1780 }
1753 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1781 memset(lg, 0, sizeof(struct logio_entry_24xx));
1754 1782
1783 if (ql2xmaxqueues > 1)
1784 req = ha->req_q_map[0];
1785 else
1786 req = vha->req;
1787 rsp = req->rsp;
1755 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1788 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1756 lg->entry_count = 1; 1789 lg->entry_count = 1;
1790 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1757 lg->nport_handle = cpu_to_le16(loop_id); 1791 lg->nport_handle = cpu_to_le16(loop_id);
1758 lg->control_flags = 1792 lg->control_flags =
1759 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1793 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
@@ -1864,9 +1898,6 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
1864 mbx_cmd_t mc; 1898 mbx_cmd_t mc;
1865 mbx_cmd_t *mcp = &mc; 1899 mbx_cmd_t *mcp = &mc;
1866 1900
1867 if (IS_QLA81XX(vha->hw))
1868 return QLA_SUCCESS;
1869
1870 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1901 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1871 vha->host_no)); 1902 vha->host_no));
1872 1903
@@ -2195,21 +2226,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2195} 2226}
2196 2227
2197int 2228int
2198qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 2229qla24xx_abort_command(srb_t *sp)
2199{ 2230{
2200 int rval; 2231 int rval;
2201 fc_port_t *fcport;
2202 unsigned long flags = 0; 2232 unsigned long flags = 0;
2203 2233
2204 struct abort_entry_24xx *abt; 2234 struct abort_entry_24xx *abt;
2205 dma_addr_t abt_dma; 2235 dma_addr_t abt_dma;
2206 uint32_t handle; 2236 uint32_t handle;
2237 fc_port_t *fcport = sp->fcport;
2238 struct scsi_qla_host *vha = fcport->vha;
2207 struct qla_hw_data *ha = vha->hw; 2239 struct qla_hw_data *ha = vha->hw;
2240 struct req_que *req = vha->req;
2208 2241
2209 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2242 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2210 2243
2211 fcport = sp->fcport;
2212
2213 spin_lock_irqsave(&ha->hardware_lock, flags); 2244 spin_lock_irqsave(&ha->hardware_lock, flags);
2214 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2245 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2215 if (req->outstanding_cmds[handle] == sp) 2246 if (req->outstanding_cmds[handle] == sp)
@@ -2231,6 +2262,7 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
2231 2262
2232 abt->entry_type = ABORT_IOCB_TYPE; 2263 abt->entry_type = ABORT_IOCB_TYPE;
2233 abt->entry_count = 1; 2264 abt->entry_count = 1;
2265 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2234 abt->nport_handle = cpu_to_le16(fcport->loop_id); 2266 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2235 abt->handle_to_abort = handle; 2267 abt->handle_to_abort = handle;
2236 abt->port_id[0] = fcport->d_id.b.al_pa; 2268 abt->port_id[0] = fcport->d_id.b.al_pa;
@@ -2272,7 +2304,7 @@ struct tsk_mgmt_cmd {
2272 2304
2273static int 2305static int
2274__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 2306__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2275 unsigned int l) 2307 unsigned int l, int tag)
2276{ 2308{
2277 int rval, rval2; 2309 int rval, rval2;
2278 struct tsk_mgmt_cmd *tsk; 2310 struct tsk_mgmt_cmd *tsk;
@@ -2286,8 +2318,11 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2286 2318
2287 vha = fcport->vha; 2319 vha = fcport->vha;
2288 ha = vha->hw; 2320 ha = vha->hw;
2289 req = ha->req_q_map[0]; 2321 req = vha->req;
2290 rsp = ha->rsp_q_map[0]; 2322 if (ql2xmultique_tag)
2323 rsp = ha->rsp_q_map[tag + 1];
2324 else
2325 rsp = req->rsp;
2291 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2326 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2292 if (tsk == NULL) { 2327 if (tsk == NULL) {
2293 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2328 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
@@ -2298,6 +2333,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2298 2333
2299 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 2334 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2300 tsk->p.tsk.entry_count = 1; 2335 tsk->p.tsk.entry_count = 1;
2336 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2301 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 2337 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2302 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2338 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2303 tsk->p.tsk.control_flags = cpu_to_le32(type); 2339 tsk->p.tsk.control_flags = cpu_to_le32(type);
@@ -2344,15 +2380,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2344} 2380}
2345 2381
2346int 2382int
2347qla24xx_abort_target(struct fc_port *fcport, unsigned int l) 2383qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2348{ 2384{
2349 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l); 2385 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2350} 2386}
2351 2387
2352int 2388int
2353qla24xx_lun_reset(struct fc_port *fcport, unsigned int l) 2389qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2354{ 2390{
2355 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l); 2391 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2356} 2392}
2357 2393
2358int 2394int
@@ -2446,6 +2482,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2446 if (rval != QLA_SUCCESS) { 2482 if (rval != QLA_SUCCESS) {
2447 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2483 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2448 vha->host_no, rval)); 2484 vha->host_no, rval));
2485 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2486 rval = QLA_INVALID_COMMAND;
2449 } else { 2487 } else {
2450 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2488 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2451 } 2489 }
@@ -2717,8 +2755,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2717 if (vp_idx == 0) 2755 if (vp_idx == 0)
2718 return; 2756 return;
2719 2757
2720 if (MSB(stat) == 1) 2758 if (MSB(stat) == 1) {
2759 DEBUG2(printk("scsi(%ld): Could not acquire ID for "
2760 "VP[%d].\n", vha->host_no, vp_idx));
2721 return; 2761 return;
2762 }
2722 2763
2723 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) 2764 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
2724 if (vp_idx == vp->vp_idx) 2765 if (vp_idx == vp->vp_idx)
@@ -3141,6 +3182,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3141 WRT_REG_DWORD(&reg->req_q_in, 0); 3182 WRT_REG_DWORD(&reg->req_q_in, 0);
3142 WRT_REG_DWORD(&reg->req_q_out, 0); 3183 WRT_REG_DWORD(&reg->req_q_out, 0);
3143 } 3184 }
3185 req->req_q_in = &reg->req_q_in;
3186 req->req_q_out = &reg->req_q_out;
3144 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3187 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3145 3188
3146 rval = qla2x00_mailbox_command(vha, mcp); 3189 rval = qla2x00_mailbox_command(vha, mcp);
@@ -3167,7 +3210,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3167 mcp->mb[6] = MSW(MSD(rsp->dma)); 3210 mcp->mb[6] = MSW(MSD(rsp->dma));
3168 mcp->mb[7] = LSW(MSD(rsp->dma)); 3211 mcp->mb[7] = LSW(MSD(rsp->dma));
3169 mcp->mb[5] = rsp->length; 3212 mcp->mb[5] = rsp->length;
3170 mcp->mb[11] = rsp->vp_idx;
3171 mcp->mb[14] = rsp->msix->entry; 3213 mcp->mb[14] = rsp->msix->entry;
3172 mcp->mb[13] = rsp->rid; 3214 mcp->mb[13] = rsp->rid;
3173 3215
@@ -3179,7 +3221,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3179 mcp->mb[8] = 0; 3221 mcp->mb[8] = 0;
3180 /* que out ptr index */ 3222 /* que out ptr index */
3181 mcp->mb[9] = 0; 3223 mcp->mb[9] = 0;
3182 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7 3224 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3183 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3225 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3184 mcp->in_mb = MBX_0; 3226 mcp->in_mb = MBX_0;
3185 mcp->flags = MBX_DMA_OUT; 3227 mcp->flags = MBX_DMA_OUT;
@@ -3384,7 +3426,7 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3384 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3426 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
3385 vha->host_no, rval, mcp->mb[0])); 3427 vha->host_no, rval, mcp->mb[0]));
3386 } else { 3428 } else {
3387 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3429 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3388 } 3430 }
3389 3431
3390 return rval; 3432 return rval;
@@ -3428,3 +3470,141 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
3428 3470
3429 return rval; 3471 return rval;
3430} 3472}
3473
3474int
3475qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3476 uint16_t size_in_bytes, uint16_t *actual_size)
3477{
3478 int rval;
3479 mbx_cmd_t mc;
3480 mbx_cmd_t *mcp = &mc;
3481
3482 if (!IS_QLA81XX(vha->hw))
3483 return QLA_FUNCTION_FAILED;
3484
3485 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3486
3487 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3488 mcp->mb[2] = MSW(stats_dma);
3489 mcp->mb[3] = LSW(stats_dma);
3490 mcp->mb[6] = MSW(MSD(stats_dma));
3491 mcp->mb[7] = LSW(MSD(stats_dma));
3492 mcp->mb[8] = size_in_bytes >> 2;
3493 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3494 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3495 mcp->tov = MBX_TOV_SECONDS;
3496 mcp->flags = 0;
3497 rval = qla2x00_mailbox_command(vha, mcp);
3498
3499 if (rval != QLA_SUCCESS) {
3500 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
3501 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
3502 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
3503 } else {
3504 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3505
3506 *actual_size = mcp->mb[2] << 2;
3507 }
3508
3509 return rval;
3510}
3511
3512int
3513qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3514 uint16_t size)
3515{
3516 int rval;
3517 mbx_cmd_t mc;
3518 mbx_cmd_t *mcp = &mc;
3519
3520 if (!IS_QLA81XX(vha->hw))
3521 return QLA_FUNCTION_FAILED;
3522
3523 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3524
3525 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3526 mcp->mb[1] = 0;
3527 mcp->mb[2] = MSW(tlv_dma);
3528 mcp->mb[3] = LSW(tlv_dma);
3529 mcp->mb[6] = MSW(MSD(tlv_dma));
3530 mcp->mb[7] = LSW(MSD(tlv_dma));
3531 mcp->mb[8] = size;
3532 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3533 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3534 mcp->tov = MBX_TOV_SECONDS;
3535 mcp->flags = 0;
3536 rval = qla2x00_mailbox_command(vha, mcp);
3537
3538 if (rval != QLA_SUCCESS) {
3539 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
3540 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
3541 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
3542 } else {
3543 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3544 }
3545
3546 return rval;
3547}
3548
3549int
3550qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3551{
3552 int rval;
3553 mbx_cmd_t mc;
3554 mbx_cmd_t *mcp = &mc;
3555
3556 if (!IS_FWI2_CAPABLE(vha->hw))
3557 return QLA_FUNCTION_FAILED;
3558
3559 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3560
3561 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3562 mcp->mb[1] = LSW(risc_addr);
3563 mcp->mb[8] = MSW(risc_addr);
3564 mcp->out_mb = MBX_8|MBX_1|MBX_0;
3565 mcp->in_mb = MBX_3|MBX_2|MBX_0;
3566 mcp->tov = 30;
3567 mcp->flags = 0;
3568 rval = qla2x00_mailbox_command(vha, mcp);
3569 if (rval != QLA_SUCCESS) {
3570 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
3571 vha->host_no, rval, mcp->mb[0]));
3572 } else {
3573 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3574 *data = mcp->mb[3] << 16 | mcp->mb[2];
3575 }
3576
3577 return rval;
3578}
3579
3580int
3581qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3582{
3583 int rval;
3584 mbx_cmd_t mc;
3585 mbx_cmd_t *mcp = &mc;
3586
3587 if (!IS_FWI2_CAPABLE(vha->hw))
3588 return QLA_FUNCTION_FAILED;
3589
3590 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3591
3592 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3593 mcp->mb[1] = LSW(risc_addr);
3594 mcp->mb[2] = LSW(data);
3595 mcp->mb[3] = MSW(data);
3596 mcp->mb[8] = MSW(risc_addr);
3597 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
3598 mcp->in_mb = MBX_0;
3599 mcp->tov = 30;
3600 mcp->flags = 0;
3601 rval = qla2x00_mailbox_command(vha, mcp);
3602 if (rval != QLA_SUCCESS) {
3603 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
3604 vha->host_no, rval, mcp->mb[0]));
3605 } else {
3606 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
3607 }
3608
3609 return rval;
3610}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 51716c7e300..650bcef08f2 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -398,9 +398,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
398 398
399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); 399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
400 400
401 memset(vha->req_ques, 0, sizeof(vha->req_ques)); 401 vha->req = base_vha->req;
402 vha->req_ques[0] = ha->req_q_map[0]->id; 402 host->can_queue = base_vha->req->length + 128;
403 host->can_queue = ha->req_q_map[0]->length + 128;
404 host->this_id = 255; 403 host->this_id = 255;
405 host->cmd_per_lun = 3; 404 host->cmd_per_lun = 3;
406 host->max_cmd_len = MAX_CMDSZ; 405 host->max_cmd_len = MAX_CMDSZ;
@@ -515,76 +514,53 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
515 514
516/* Delete all queues for a given vhost */ 515/* Delete all queues for a given vhost */
517int 516int
518qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no) 517qla25xx_delete_queues(struct scsi_qla_host *vha)
519{ 518{
520 int cnt, ret = 0; 519 int cnt, ret = 0;
521 struct req_que *req = NULL; 520 struct req_que *req = NULL;
522 struct rsp_que *rsp = NULL; 521 struct rsp_que *rsp = NULL;
523 struct qla_hw_data *ha = vha->hw; 522 struct qla_hw_data *ha = vha->hw;
524 523
525 if (que_no) { 524 /* Delete request queues */
526 /* Delete request queue */ 525 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
527 req = ha->req_q_map[que_no]; 526 req = ha->req_q_map[cnt];
528 if (req) { 527 if (req) {
529 rsp = req->rsp;
530 ret = qla25xx_delete_req_que(vha, req); 528 ret = qla25xx_delete_req_que(vha, req);
531 if (ret != QLA_SUCCESS) { 529 if (ret != QLA_SUCCESS) {
532 qla_printk(KERN_WARNING, ha, 530 qla_printk(KERN_WARNING, ha,
533 "Couldn't delete req que %d\n", req->id); 531 "Couldn't delete req que %d\n",
532 req->id);
534 return ret; 533 return ret;
535 } 534 }
536 /* Delete associated response queue */
537 if (rsp) {
538 ret = qla25xx_delete_rsp_que(vha, rsp);
539 if (ret != QLA_SUCCESS) {
540 qla_printk(KERN_WARNING, ha,
541 "Couldn't delete rsp que %d\n",
542 rsp->id);
543 return ret;
544 }
545 }
546 } 535 }
547 } else { /* delete all queues of this host */ 536 }
548 for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) { 537
549 /* Delete request queues */ 538 /* Delete response queues */
550 req = ha->req_q_map[vha->req_ques[cnt]]; 539 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
551 if (req && req->id) { 540 rsp = ha->rsp_q_map[cnt];
552 rsp = req->rsp; 541 if (rsp) {
553 ret = qla25xx_delete_req_que(vha, req); 542 ret = qla25xx_delete_rsp_que(vha, rsp);
554 if (ret != QLA_SUCCESS) { 543 if (ret != QLA_SUCCESS) {
555 qla_printk(KERN_WARNING, ha, 544 qla_printk(KERN_WARNING, ha,
556 "Couldn't delete req que %d\n", 545 "Couldn't delete rsp que %d\n",
557 vha->req_ques[cnt]); 546 rsp->id);
558 return ret; 547 return ret;
559 }
560 vha->req_ques[cnt] = ha->req_q_map[0]->id;
561 /* Delete associated response queue */
562 if (rsp && rsp->id) {
563 ret = qla25xx_delete_rsp_que(vha, rsp);
564 if (ret != QLA_SUCCESS) {
565 qla_printk(KERN_WARNING, ha,
566 "Couldn't delete rsp que %d\n",
567 rsp->id);
568 return ret;
569 }
570 }
571 } 548 }
572 } 549 }
573 } 550 }
574 qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
575 vha->vp_idx);
576 return ret; 551 return ret;
577} 552}
578 553
579int 554int
580qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, 555qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
581 uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos) 556 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
582{ 557{
583 int ret = 0; 558 int ret = 0;
584 struct req_que *req = NULL; 559 struct req_que *req = NULL;
585 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 560 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
586 uint16_t que_id = 0; 561 uint16_t que_id = 0;
587 device_reg_t __iomem *reg; 562 device_reg_t __iomem *reg;
563 uint32_t cnt;
588 564
589 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 565 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
590 if (req == NULL) { 566 if (req == NULL) {
@@ -604,8 +580,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
604 } 580 }
605 581
606 mutex_lock(&ha->vport_lock); 582 mutex_lock(&ha->vport_lock);
607 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 583 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
608 if (que_id >= ha->max_queues) { 584 if (que_id >= ha->max_req_queues) {
609 mutex_unlock(&ha->vport_lock); 585 mutex_unlock(&ha->vport_lock);
610 qla_printk(KERN_INFO, ha, "No resources to create " 586 qla_printk(KERN_INFO, ha, "No resources to create "
611 "additional request queue\n"); 587 "additional request queue\n");
@@ -617,10 +593,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
617 req->vp_idx = vp_idx; 593 req->vp_idx = vp_idx;
618 req->qos = qos; 594 req->qos = qos;
619 595
620 if (ha->rsp_q_map[rsp_que]) { 596 if (rsp_que < 0)
597 req->rsp = NULL;
598 else
621 req->rsp = ha->rsp_q_map[rsp_que]; 599 req->rsp = ha->rsp_q_map[rsp_que];
622 req->rsp->req = req;
623 }
624 /* Use alternate PCI bus number */ 600 /* Use alternate PCI bus number */
625 if (MSB(req->rid)) 601 if (MSB(req->rid))
626 options |= BIT_4; 602 options |= BIT_4;
@@ -628,13 +604,16 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
628 if (LSB(req->rid)) 604 if (LSB(req->rid))
629 options |= BIT_5; 605 options |= BIT_5;
630 req->options = options; 606 req->options = options;
607
608 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
609 req->outstanding_cmds[cnt] = NULL;
610 req->current_outstanding_cmd = 1;
611
631 req->ring_ptr = req->ring; 612 req->ring_ptr = req->ring;
632 req->ring_index = 0; 613 req->ring_index = 0;
633 req->cnt = req->length; 614 req->cnt = req->length;
634 req->id = que_id; 615 req->id = que_id;
635 reg = ISP_QUE_REG(ha, que_id); 616 reg = ISP_QUE_REG(ha, que_id);
636 req->req_q_in = &reg->isp25mq.req_q_in;
637 req->req_q_out = &reg->isp25mq.req_q_out;
638 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 617 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
639 mutex_unlock(&ha->vport_lock); 618 mutex_unlock(&ha->vport_lock);
640 619
@@ -654,10 +633,19 @@ que_failed:
654 return 0; 633 return 0;
655} 634}
656 635
636static void qla_do_work(struct work_struct *work)
637{
638 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
639 struct scsi_qla_host *vha;
640
641 vha = qla25xx_get_host(rsp);
642 qla24xx_process_response_queue(vha, rsp);
643}
644
657/* create response queue */ 645/* create response queue */
658int 646int
659qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, 647qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
660 uint8_t vp_idx, uint16_t rid) 648 uint8_t vp_idx, uint16_t rid, int req)
661{ 649{
662 int ret = 0; 650 int ret = 0;
663 struct rsp_que *rsp = NULL; 651 struct rsp_que *rsp = NULL;
@@ -672,7 +660,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
672 goto que_failed; 660 goto que_failed;
673 } 661 }
674 662
675 rsp->length = RESPONSE_ENTRY_CNT_2300; 663 rsp->length = RESPONSE_ENTRY_CNT_MQ;
676 rsp->ring = dma_alloc_coherent(&ha->pdev->dev, 664 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
677 (rsp->length + 1) * sizeof(response_t), 665 (rsp->length + 1) * sizeof(response_t),
678 &rsp->dma, GFP_KERNEL); 666 &rsp->dma, GFP_KERNEL);
@@ -683,8 +671,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
683 } 671 }
684 672
685 mutex_lock(&ha->vport_lock); 673 mutex_lock(&ha->vport_lock);
686 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 674 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
687 if (que_id >= ha->max_queues) { 675 if (que_id >= ha->max_rsp_queues) {
688 mutex_unlock(&ha->vport_lock); 676 mutex_unlock(&ha->vport_lock);
689 qla_printk(KERN_INFO, ha, "No resources to create " 677 qla_printk(KERN_INFO, ha, "No resources to create "
690 "additional response queue\n"); 678 "additional response queue\n");
@@ -708,8 +696,6 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
708 if (LSB(rsp->rid)) 696 if (LSB(rsp->rid))
709 options |= BIT_5; 697 options |= BIT_5;
710 rsp->options = options; 698 rsp->options = options;
711 rsp->ring_ptr = rsp->ring;
712 rsp->ring_index = 0;
713 rsp->id = que_id; 699 rsp->id = que_id;
714 reg = ISP_QUE_REG(ha, que_id); 700 reg = ISP_QUE_REG(ha, que_id);
715 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 701 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
@@ -728,9 +714,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
728 mutex_unlock(&ha->vport_lock); 714 mutex_unlock(&ha->vport_lock);
729 goto que_failed; 715 goto que_failed;
730 } 716 }
717 if (req >= 0)
718 rsp->req = ha->req_q_map[req];
719 else
720 rsp->req = NULL;
731 721
732 qla2x00_init_response_q_entries(rsp); 722 qla2x00_init_response_q_entries(rsp);
733 723 if (rsp->hw->wq)
724 INIT_WORK(&rsp->q_work, qla_do_work);
734 return rsp->id; 725 return rsp->id;
735 726
736que_failed: 727que_failed:
@@ -744,14 +735,16 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
744 uint16_t options = 0; 735 uint16_t options = 0;
745 uint8_t ret = 0; 736 uint8_t ret = 0;
746 struct qla_hw_data *ha = vha->hw; 737 struct qla_hw_data *ha = vha->hw;
738 struct rsp_que *rsp;
747 739
748 options |= BIT_1; 740 options |= BIT_1;
749 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0); 741 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1);
750 if (!ret) { 742 if (!ret) {
751 qla_printk(KERN_WARNING, ha, "Response Que create failed\n"); 743 qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
752 return ret; 744 return ret;
753 } else 745 } else
754 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret); 746 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
747 rsp = ha->rsp_q_map[ret];
755 748
756 options = 0; 749 options = 0;
757 if (qos & BIT_7) 750 if (qos & BIT_7)
@@ -759,10 +752,11 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
759 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret, 752 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
760 qos & ~BIT_7); 753 qos & ~BIT_7);
761 if (ret) { 754 if (ret) {
762 vha->req_ques[0] = ret; 755 vha->req = ha->req_q_map[ret];
763 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret); 756 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
764 } else 757 } else
765 qla_printk(KERN_WARNING, ha, "Request Que create failed\n"); 758 qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
759 rsp->req = ha->req_q_map[ret];
766 760
767 return ret; 761 return ret;
768} 762}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e4fdcdad80d..dcf011679c8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -77,6 +77,14 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
77MODULE_PARM_DESC(ql2xmaxqdepth, 77MODULE_PARM_DESC(ql2xmaxqdepth,
78 "Maximum queue depth to report for target devices."); 78 "Maximum queue depth to report for target devices.");
79 79
80int ql2xqfulltracking = 1;
81module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(ql2xqfulltracking,
83 "Controls whether the driver tracks queue full status "
84 "returns and dynamically adjusts a scsi device's queue "
85 "depth. Default is 1, perform tracking. Set to 0 to "
86 "disable dynamic tracking and adjustment of queue depth.");
87
80int ql2xqfullrampup = 120; 88int ql2xqfullrampup = 120;
81module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); 89module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(ql2xqfullrampup, 90MODULE_PARM_DESC(ql2xqfullrampup,
@@ -96,6 +104,23 @@ MODULE_PARM_DESC(ql2xmaxqueues,
96 "Enables MQ settings " 104 "Enables MQ settings "
97 "Default is 1 for single queue. Set it to number \ 105 "Default is 1 for single queue. Set it to number \
98 of queues in MQ mode."); 106 of queues in MQ mode.");
107
108int ql2xmultique_tag;
109module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
110MODULE_PARM_DESC(ql2xmultique_tag,
111 "Enables CPU affinity settings for the driver "
112 "Default is 0 for no affinity of request and response IO. "
113 "Set it to 1 to turn on the cpu affinity.");
114
115int ql2xfwloadbin;
116module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
117MODULE_PARM_DESC(ql2xfwloadbin,
118 "Option to specify location from which to load ISP firmware:\n"
119 " 2 -- load firmware via the request_firmware() (hotplug)\n"
120 " interface.\n"
121 " 1 -- load firmware from flash.\n"
122 " 0 -- use default semantics.\n");
123
99/* 124/*
100 * SCSI host template entry points 125 * SCSI host template entry points
101 */ 126 */
@@ -187,7 +212,7 @@ static void qla2x00_sp_free_dma(srb_t *);
187/* -------------------------------------------------------------------------- */ 212/* -------------------------------------------------------------------------- */
188static int qla2x00_alloc_queues(struct qla_hw_data *ha) 213static int qla2x00_alloc_queues(struct qla_hw_data *ha)
189{ 214{
190 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, 215 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
191 GFP_KERNEL); 216 GFP_KERNEL);
192 if (!ha->req_q_map) { 217 if (!ha->req_q_map) {
193 qla_printk(KERN_WARNING, ha, 218 qla_printk(KERN_WARNING, ha,
@@ -195,7 +220,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
195 goto fail_req_map; 220 goto fail_req_map;
196 } 221 }
197 222
198 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, 223 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
199 GFP_KERNEL); 224 GFP_KERNEL);
200 if (!ha->rsp_q_map) { 225 if (!ha->rsp_q_map) {
201 qla_printk(KERN_WARNING, ha, 226 qla_printk(KERN_WARNING, ha,
@@ -213,16 +238,8 @@ fail_req_map:
213 return -ENOMEM; 238 return -ENOMEM;
214} 239}
215 240
216static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, 241static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
217 struct rsp_que *rsp)
218{ 242{
219 if (rsp && rsp->ring)
220 dma_free_coherent(&ha->pdev->dev,
221 (rsp->length + 1) * sizeof(response_t),
222 rsp->ring, rsp->dma);
223
224 kfree(rsp);
225 rsp = NULL;
226 if (req && req->ring) 243 if (req && req->ring)
227 dma_free_coherent(&ha->pdev->dev, 244 dma_free_coherent(&ha->pdev->dev,
228 (req->length + 1) * sizeof(request_t), 245 (req->length + 1) * sizeof(request_t),
@@ -232,22 +249,77 @@ static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
232 req = NULL; 249 req = NULL;
233} 250}
234 251
252static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
253{
254 if (rsp && rsp->ring)
255 dma_free_coherent(&ha->pdev->dev,
256 (rsp->length + 1) * sizeof(response_t),
257 rsp->ring, rsp->dma);
258
259 kfree(rsp);
260 rsp = NULL;
261}
262
235static void qla2x00_free_queues(struct qla_hw_data *ha) 263static void qla2x00_free_queues(struct qla_hw_data *ha)
236{ 264{
237 struct req_que *req; 265 struct req_que *req;
238 struct rsp_que *rsp; 266 struct rsp_que *rsp;
239 int cnt; 267 int cnt;
240 268
241 for (cnt = 0; cnt < ha->max_queues; cnt++) { 269 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
242 rsp = ha->rsp_q_map[cnt];
243 req = ha->req_q_map[cnt]; 270 req = ha->req_q_map[cnt];
244 qla2x00_free_que(ha, req, rsp); 271 qla2x00_free_req_que(ha, req);
272 }
273 kfree(ha->req_q_map);
274 ha->req_q_map = NULL;
275
276 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
277 rsp = ha->rsp_q_map[cnt];
278 qla2x00_free_rsp_que(ha, rsp);
245 } 279 }
246 kfree(ha->rsp_q_map); 280 kfree(ha->rsp_q_map);
247 ha->rsp_q_map = NULL; 281 ha->rsp_q_map = NULL;
282}
248 283
249 kfree(ha->req_q_map); 284static int qla25xx_setup_mode(struct scsi_qla_host *vha)
250 ha->req_q_map = NULL; 285{
286 uint16_t options = 0;
287 int ques, req, ret;
288 struct qla_hw_data *ha = vha->hw;
289
290 if (ql2xmultique_tag) {
291 /* CPU affinity mode */
292 ha->wq = create_workqueue("qla2xxx_wq");
293 /* create a request queue for IO */
294 options |= BIT_7;
295 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
296 QLA_DEFAULT_QUE_QOS);
297 if (!req) {
298 qla_printk(KERN_WARNING, ha,
299 "Can't create request queue\n");
300 goto fail;
301 }
302 vha->req = ha->req_q_map[req];
303 options |= BIT_1;
304 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
305 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
306 if (!ret) {
307 qla_printk(KERN_WARNING, ha,
308 "Response Queue create failed\n");
309 goto fail2;
310 }
311 }
312 DEBUG2(qla_printk(KERN_INFO, ha,
313 "CPU affinity mode enabled, no. of response"
314 " queues:%d, no. of request queues:%d\n",
315 ha->max_rsp_queues, ha->max_req_queues));
316 }
317 return 0;
318fail2:
319 qla25xx_delete_queues(vha);
320fail:
321 ha->mqenable = 0;
322 return 1;
251} 323}
252 324
253static char * 325static char *
@@ -387,7 +459,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
387 459
388 sp->fcport = fcport; 460 sp->fcport = fcport;
389 sp->cmd = cmd; 461 sp->cmd = cmd;
390 sp->que = ha->req_q_map[0];
391 sp->flags = 0; 462 sp->flags = 0;
392 CMD_SP(cmd) = (void *)sp; 463 CMD_SP(cmd) = (void *)sp;
393 cmd->scsi_done = done; 464 cmd->scsi_done = done;
@@ -612,7 +683,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
612void 683void
613qla2x00_abort_fcport_cmds(fc_port_t *fcport) 684qla2x00_abort_fcport_cmds(fc_port_t *fcport)
614{ 685{
615 int cnt, que, id; 686 int cnt;
616 unsigned long flags; 687 unsigned long flags;
617 srb_t *sp; 688 srb_t *sp;
618 scsi_qla_host_t *vha = fcport->vha; 689 scsi_qla_host_t *vha = fcport->vha;
@@ -620,32 +691,27 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
620 struct req_que *req; 691 struct req_que *req;
621 692
622 spin_lock_irqsave(&ha->hardware_lock, flags); 693 spin_lock_irqsave(&ha->hardware_lock, flags);
623 for (que = 0; que < QLA_MAX_HOST_QUES; que++) { 694 req = vha->req;
624 id = vha->req_ques[que]; 695 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
625 req = ha->req_q_map[id]; 696 sp = req->outstanding_cmds[cnt];
626 if (!req) 697 if (!sp)
698 continue;
699 if (sp->fcport != fcport)
627 continue; 700 continue;
628 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
629 sp = req->outstanding_cmds[cnt];
630 if (!sp)
631 continue;
632 if (sp->fcport != fcport)
633 continue;
634 701
635 spin_unlock_irqrestore(&ha->hardware_lock, flags); 702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
636 if (ha->isp_ops->abort_command(vha, sp, req)) { 703 if (ha->isp_ops->abort_command(sp)) {
704 DEBUG2(qla_printk(KERN_WARNING, ha,
705 "Abort failed -- %lx\n",
706 sp->cmd->serial_number));
707 } else {
708 if (qla2x00_eh_wait_on_command(sp->cmd) !=
709 QLA_SUCCESS)
637 DEBUG2(qla_printk(KERN_WARNING, ha, 710 DEBUG2(qla_printk(KERN_WARNING, ha,
638 "Abort failed -- %lx\n", 711 "Abort failed while waiting -- %lx\n",
639 sp->cmd->serial_number)); 712 sp->cmd->serial_number));
640 } else {
641 if (qla2x00_eh_wait_on_command(sp->cmd) !=
642 QLA_SUCCESS)
643 DEBUG2(qla_printk(KERN_WARNING, ha,
644 "Abort failed while waiting -- %lx\n",
645 sp->cmd->serial_number));
646 }
647 spin_lock_irqsave(&ha->hardware_lock, flags);
648 } 713 }
714 spin_lock_irqsave(&ha->hardware_lock, flags);
649 } 715 }
650 spin_unlock_irqrestore(&ha->hardware_lock, flags); 716 spin_unlock_irqrestore(&ha->hardware_lock, flags);
651} 717}
@@ -693,7 +759,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
693 unsigned long flags; 759 unsigned long flags;
694 int wait = 0; 760 int wait = 0;
695 struct qla_hw_data *ha = vha->hw; 761 struct qla_hw_data *ha = vha->hw;
696 struct req_que *req; 762 struct req_que *req = vha->req;
697 srb_t *spt; 763 srb_t *spt;
698 764
699 qla2x00_block_error_handler(cmd); 765 qla2x00_block_error_handler(cmd);
@@ -709,7 +775,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
709 spt = (srb_t *) CMD_SP(cmd); 775 spt = (srb_t *) CMD_SP(cmd);
710 if (!spt) 776 if (!spt)
711 return SUCCESS; 777 return SUCCESS;
712 req = spt->que;
713 778
714 /* Check active list for command command. */ 779 /* Check active list for command command. */
715 spin_lock_irqsave(&ha->hardware_lock, flags); 780 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -726,7 +791,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
726 " pid=%ld.\n", __func__, vha->host_no, sp, serial)); 791 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
727 792
728 spin_unlock_irqrestore(&ha->hardware_lock, flags); 793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
729 if (ha->isp_ops->abort_command(vha, sp, req)) { 794 if (ha->isp_ops->abort_command(sp)) {
730 DEBUG2(printk("%s(%ld): abort_command " 795 DEBUG2(printk("%s(%ld): abort_command "
731 "mbx failed.\n", __func__, vha->host_no)); 796 "mbx failed.\n", __func__, vha->host_no));
732 ret = FAILED; 797 ret = FAILED;
@@ -777,7 +842,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
777 return status; 842 return status;
778 843
779 spin_lock_irqsave(&ha->hardware_lock, flags); 844 spin_lock_irqsave(&ha->hardware_lock, flags);
780 req = sp->que; 845 req = vha->req;
781 for (cnt = 1; status == QLA_SUCCESS && 846 for (cnt = 1; status == QLA_SUCCESS &&
782 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 847 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
783 sp = req->outstanding_cmds[cnt]; 848 sp = req->outstanding_cmds[cnt];
@@ -820,7 +885,7 @@ static char *reset_errors[] = {
820 885
821static int 886static int
822__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 887__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
823 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 888 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
824{ 889{
825 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 890 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
826 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 891 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -841,7 +906,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
841 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 906 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
842 goto eh_reset_failed; 907 goto eh_reset_failed;
843 err = 2; 908 err = 2;
844 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 909 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
910 != QLA_SUCCESS)
845 goto eh_reset_failed; 911 goto eh_reset_failed;
846 err = 3; 912 err = 3;
847 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 913 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
@@ -996,6 +1062,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
996 if (qla2x00_vp_abort_isp(vha)) 1062 if (qla2x00_vp_abort_isp(vha))
997 goto eh_host_reset_lock; 1063 goto eh_host_reset_lock;
998 } else { 1064 } else {
1065 if (ha->wq)
1066 flush_workqueue(ha->wq);
1067
999 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1068 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1000 if (qla2x00_abort_isp(base_vha)) { 1069 if (qla2x00_abort_isp(base_vha)) {
1001 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1070 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
@@ -1037,7 +1106,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1037 struct fc_port *fcport; 1106 struct fc_port *fcport;
1038 struct qla_hw_data *ha = vha->hw; 1107 struct qla_hw_data *ha = vha->hw;
1039 1108
1040 if (ha->flags.enable_lip_full_login && !vha->vp_idx) { 1109 if (ha->flags.enable_lip_full_login && !vha->vp_idx &&
1110 !IS_QLA81XX(ha)) {
1041 ret = qla2x00_full_login_lip(vha); 1111 ret = qla2x00_full_login_lip(vha);
1042 if (ret != QLA_SUCCESS) { 1112 if (ret != QLA_SUCCESS) {
1043 DEBUG2_3(printk("%s(%ld): failed: " 1113 DEBUG2_3(printk("%s(%ld): failed: "
@@ -1064,7 +1134,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1064 if (fcport->port_type != FCT_TARGET) 1134 if (fcport->port_type != FCT_TARGET)
1065 continue; 1135 continue;
1066 1136
1067 ret = ha->isp_ops->target_reset(fcport, 0); 1137 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1068 if (ret != QLA_SUCCESS) { 1138 if (ret != QLA_SUCCESS) {
1069 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1139 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1070 "target_reset=%d d_id=%x.\n", __func__, 1140 "target_reset=%d d_id=%x.\n", __func__,
@@ -1088,7 +1158,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1088 struct req_que *req; 1158 struct req_que *req;
1089 1159
1090 spin_lock_irqsave(&ha->hardware_lock, flags); 1160 spin_lock_irqsave(&ha->hardware_lock, flags);
1091 for (que = 0; que < ha->max_queues; que++) { 1161 for (que = 0; que < ha->max_req_queues; que++) {
1092 req = ha->req_q_map[que]; 1162 req = ha->req_q_map[que];
1093 if (!req) 1163 if (!req)
1094 continue; 1164 continue;
@@ -1123,7 +1193,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1123 scsi_qla_host_t *vha = shost_priv(sdev->host); 1193 scsi_qla_host_t *vha = shost_priv(sdev->host);
1124 struct qla_hw_data *ha = vha->hw; 1194 struct qla_hw_data *ha = vha->hw;
1125 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1195 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1126 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 1196 struct req_que *req = vha->req;
1127 1197
1128 if (sdev->tagged_supported) 1198 if (sdev->tagged_supported)
1129 scsi_activate_tcq(sdev, req->max_q_depth); 1199 scsi_activate_tcq(sdev, req->max_q_depth);
@@ -1511,6 +1581,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1511 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1581 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1512 break; 1582 break;
1513 } 1583 }
1584
1585 /* Get adapter physical port no from interrupt pin register. */
1586 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1587 if (ha->port_no & 1)
1588 ha->flags.port0 = 1;
1589 else
1590 ha->flags.port0 = 0;
1514} 1591}
1515 1592
1516static int 1593static int
@@ -1518,6 +1595,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1518{ 1595{
1519 resource_size_t pio; 1596 resource_size_t pio;
1520 uint16_t msix; 1597 uint16_t msix;
1598 int cpus;
1521 1599
1522 if (pci_request_selected_regions(ha->pdev, ha->bars, 1600 if (pci_request_selected_regions(ha->pdev, ha->bars,
1523 QLA2XXX_DRIVER_NAME)) { 1601 QLA2XXX_DRIVER_NAME)) {
@@ -1571,8 +1649,9 @@ skip_pio:
1571 } 1649 }
1572 1650
1573 /* Determine queue resources */ 1651 /* Determine queue resources */
1574 ha->max_queues = 1; 1652 ha->max_req_queues = ha->max_rsp_queues = 1;
1575 if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1653 if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) &&
1654 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1576 goto mqiobase_exit; 1655 goto mqiobase_exit;
1577 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1656 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1578 pci_resource_len(ha->pdev, 3)); 1657 pci_resource_len(ha->pdev, 3));
@@ -1582,18 +1661,24 @@ skip_pio:
1582 ha->msix_count = msix; 1661 ha->msix_count = msix;
1583 /* Max queues are bounded by available msix vectors */ 1662 /* Max queues are bounded by available msix vectors */
1584 /* queue 0 uses two msix vectors */ 1663 /* queue 0 uses two msix vectors */
1585 if (ha->msix_count - 1 < ql2xmaxqueues) 1664 if (ql2xmultique_tag) {
1586 ha->max_queues = ha->msix_count - 1; 1665 cpus = num_online_cpus();
1587 else if (ql2xmaxqueues > QLA_MQ_SIZE) 1666 ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ?
1588 ha->max_queues = QLA_MQ_SIZE; 1667 (cpus + 1) : (ha->msix_count - 1);
1589 else 1668 ha->max_req_queues = 2;
1590 ha->max_queues = ql2xmaxqueues; 1669 } else if (ql2xmaxqueues > 1) {
1670 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1671 QLA_MQ_SIZE : ql2xmaxqueues;
1672 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1673 " of request queues:%d\n", ha->max_req_queues));
1674 }
1591 qla_printk(KERN_INFO, ha, 1675 qla_printk(KERN_INFO, ha,
1592 "MSI-X vector count: %d\n", msix); 1676 "MSI-X vector count: %d\n", msix);
1593 } 1677 } else
1678 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1594 1679
1595mqiobase_exit: 1680mqiobase_exit:
1596 ha->msix_count = ha->max_queues + 1; 1681 ha->msix_count = ha->max_rsp_queues + 1;
1597 return (0); 1682 return (0);
1598 1683
1599iospace_error_exit: 1684iospace_error_exit:
@@ -1605,6 +1690,9 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
1605{ 1690{
1606 scsi_qla_host_t *vha = shost_priv(shost); 1691 scsi_qla_host_t *vha = shost_priv(shost);
1607 1692
1693 if (vha->hw->flags.running_gold_fw)
1694 return;
1695
1608 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1696 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1609 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1697 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1610 set_bit(RSCN_UPDATE, &vha->dpc_flags); 1698 set_bit(RSCN_UPDATE, &vha->dpc_flags);
@@ -1768,6 +1856,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1768 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 1856 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
1769 ha->gid_list_info_size = 8; 1857 ha->gid_list_info_size = 8;
1770 ha->optrom_size = OPTROM_SIZE_81XX; 1858 ha->optrom_size = OPTROM_SIZE_81XX;
1859 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1771 ha->isp_ops = &qla81xx_isp_ops; 1860 ha->isp_ops = &qla81xx_isp_ops;
1772 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; 1861 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
1773 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 1862 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
@@ -1803,14 +1892,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1803 1892
1804 ret = -ENOMEM; 1893 ret = -ENOMEM;
1805 qla2x00_mem_free(ha); 1894 qla2x00_mem_free(ha);
1806 qla2x00_free_que(ha, req, rsp); 1895 qla2x00_free_req_que(ha, req);
1896 qla2x00_free_rsp_que(ha, rsp);
1807 goto probe_hw_failed; 1897 goto probe_hw_failed;
1808 } 1898 }
1809 1899
1810 pci_set_drvdata(pdev, base_vha); 1900 pci_set_drvdata(pdev, base_vha);
1811 1901
1812 host = base_vha->host; 1902 host = base_vha->host;
1813 base_vha->req_ques[0] = req->id; 1903 base_vha->req = req;
1814 host->can_queue = req->length + 128; 1904 host->can_queue = req->length + 128;
1815 if (IS_QLA2XXX_MIDTYPE(ha)) 1905 if (IS_QLA2XXX_MIDTYPE(ha))
1816 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 1906 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
@@ -1841,7 +1931,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1841 } 1931 }
1842 ha->rsp_q_map[0] = rsp; 1932 ha->rsp_q_map[0] = rsp;
1843 ha->req_q_map[0] = req; 1933 ha->req_q_map[0] = req;
1844 1934 rsp->req = req;
1935 req->rsp = rsp;
1936 set_bit(0, ha->req_qid_map);
1937 set_bit(0, ha->rsp_qid_map);
1845 /* FWI2-capable only. */ 1938 /* FWI2-capable only. */
1846 req->req_q_in = &ha->iobase->isp24.req_q_in; 1939 req->req_q_in = &ha->iobase->isp24.req_q_in;
1847 req->req_q_out = &ha->iobase->isp24.req_q_out; 1940 req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -1866,6 +1959,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1866 goto probe_failed; 1959 goto probe_failed;
1867 } 1960 }
1868 1961
1962 if (ha->mqenable)
1963 if (qla25xx_setup_mode(base_vha))
1964 qla_printk(KERN_WARNING, ha,
1965 "Can't create queues, falling back to single"
1966 " queue mode\n");
1967
1968 if (ha->flags.running_gold_fw)
1969 goto skip_dpc;
1970
1869 /* 1971 /*
1870 * Startup the kernel thread for this host adapter 1972 * Startup the kernel thread for this host adapter
1871 */ 1973 */
@@ -1878,6 +1980,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1878 goto probe_failed; 1980 goto probe_failed;
1879 } 1981 }
1880 1982
1983skip_dpc:
1881 list_add_tail(&base_vha->list, &ha->vp_list); 1984 list_add_tail(&base_vha->list, &ha->vp_list);
1882 base_vha->host->irq = ha->pdev->irq; 1985 base_vha->host->irq = ha->pdev->irq;
1883 1986
@@ -1917,8 +2020,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1917 return 0; 2020 return 0;
1918 2021
1919probe_init_failed: 2022probe_init_failed:
1920 qla2x00_free_que(ha, req, rsp); 2023 qla2x00_free_req_que(ha, req);
1921 ha->max_queues = 0; 2024 qla2x00_free_rsp_que(ha, rsp);
2025 ha->max_req_queues = ha->max_rsp_queues = 0;
1922 2026
1923probe_failed: 2027probe_failed:
1924 if (base_vha->timer_active) 2028 if (base_vha->timer_active)
@@ -1976,6 +2080,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
1976 2080
1977 base_vha->flags.online = 0; 2081 base_vha->flags.online = 0;
1978 2082
2083 /* Flush the work queue and remove it */
2084 if (ha->wq) {
2085 flush_workqueue(ha->wq);
2086 destroy_workqueue(ha->wq);
2087 ha->wq = NULL;
2088 }
2089
1979 /* Kill the kernel thread for this host */ 2090 /* Kill the kernel thread for this host */
1980 if (ha->dpc_thread) { 2091 if (ha->dpc_thread) {
1981 struct task_struct *t = ha->dpc_thread; 2092 struct task_struct *t = ha->dpc_thread;
@@ -2017,6 +2128,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2017{ 2128{
2018 struct qla_hw_data *ha = vha->hw; 2129 struct qla_hw_data *ha = vha->hw;
2019 2130
2131 qla25xx_delete_queues(vha);
2132
2020 if (ha->flags.fce_enabled) 2133 if (ha->flags.fce_enabled)
2021 qla2x00_disable_fce_trace(vha, NULL, NULL); 2134 qla2x00_disable_fce_trace(vha, NULL, NULL);
2022 2135
@@ -2329,6 +2442,14 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2329 vfree(ha->fw_dump); 2442 vfree(ha->fw_dump);
2330 } 2443 }
2331 2444
2445 if (ha->dcbx_tlv)
2446 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2447 ha->dcbx_tlv, ha->dcbx_tlv_dma);
2448
2449 if (ha->xgmac_data)
2450 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2451 ha->xgmac_data, ha->xgmac_data_dma);
2452
2332 if (ha->sns_cmd) 2453 if (ha->sns_cmd)
2333 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2454 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2334 ha->sns_cmd, ha->sns_cmd_dma); 2455 ha->sns_cmd, ha->sns_cmd_dma);
@@ -2412,6 +2533,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2412 INIT_LIST_HEAD(&vha->work_list); 2533 INIT_LIST_HEAD(&vha->work_list);
2413 INIT_LIST_HEAD(&vha->list); 2534 INIT_LIST_HEAD(&vha->list);
2414 2535
2536 spin_lock_init(&vha->work_lock);
2537
2415 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 2538 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2416 return vha; 2539 return vha;
2417 2540
@@ -2420,13 +2543,11 @@ fail:
2420} 2543}
2421 2544
2422static struct qla_work_evt * 2545static struct qla_work_evt *
2423qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type, 2546qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2424 int locked)
2425{ 2547{
2426 struct qla_work_evt *e; 2548 struct qla_work_evt *e;
2427 2549
2428 e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: 2550 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
2429 GFP_KERNEL);
2430 if (!e) 2551 if (!e)
2431 return NULL; 2552 return NULL;
2432 2553
@@ -2437,17 +2558,15 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
2437} 2558}
2438 2559
2439static int 2560static int
2440qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked) 2561qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
2441{ 2562{
2442 unsigned long uninitialized_var(flags); 2563 unsigned long flags;
2443 struct qla_hw_data *ha = vha->hw;
2444 2564
2445 if (!locked) 2565 spin_lock_irqsave(&vha->work_lock, flags);
2446 spin_lock_irqsave(&ha->hardware_lock, flags);
2447 list_add_tail(&e->list, &vha->work_list); 2566 list_add_tail(&e->list, &vha->work_list);
2567 spin_unlock_irqrestore(&vha->work_lock, flags);
2448 qla2xxx_wake_dpc(vha); 2568 qla2xxx_wake_dpc(vha);
2449 if (!locked) 2569
2450 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2451 return QLA_SUCCESS; 2570 return QLA_SUCCESS;
2452} 2571}
2453 2572
@@ -2457,13 +2576,13 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2457{ 2576{
2458 struct qla_work_evt *e; 2577 struct qla_work_evt *e;
2459 2578
2460 e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1); 2579 e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
2461 if (!e) 2580 if (!e)
2462 return QLA_FUNCTION_FAILED; 2581 return QLA_FUNCTION_FAILED;
2463 2582
2464 e->u.aen.code = code; 2583 e->u.aen.code = code;
2465 e->u.aen.data = data; 2584 e->u.aen.data = data;
2466 return qla2x00_post_work(vha, e, 1); 2585 return qla2x00_post_work(vha, e);
2467} 2586}
2468 2587
2469int 2588int
@@ -2471,25 +2590,27 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
2471{ 2590{
2472 struct qla_work_evt *e; 2591 struct qla_work_evt *e;
2473 2592
2474 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1); 2593 e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
2475 if (!e) 2594 if (!e)
2476 return QLA_FUNCTION_FAILED; 2595 return QLA_FUNCTION_FAILED;
2477 2596
2478 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 2597 memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
2479 return qla2x00_post_work(vha, e, 1); 2598 return qla2x00_post_work(vha, e);
2480} 2599}
2481 2600
2482static void 2601static void
2483qla2x00_do_work(struct scsi_qla_host *vha) 2602qla2x00_do_work(struct scsi_qla_host *vha)
2484{ 2603{
2485 struct qla_work_evt *e; 2604 struct qla_work_evt *e, *tmp;
2486 struct qla_hw_data *ha = vha->hw; 2605 unsigned long flags;
2606 LIST_HEAD(work);
2487 2607
2488 spin_lock_irq(&ha->hardware_lock); 2608 spin_lock_irqsave(&vha->work_lock, flags);
2489 while (!list_empty(&vha->work_list)) { 2609 list_splice_init(&vha->work_list, &work);
2490 e = list_entry(vha->work_list.next, struct qla_work_evt, list); 2610 spin_unlock_irqrestore(&vha->work_lock, flags);
2611
2612 list_for_each_entry_safe(e, tmp, &work, list) {
2491 list_del_init(&e->list); 2613 list_del_init(&e->list);
2492 spin_unlock_irq(&ha->hardware_lock);
2493 2614
2494 switch (e->type) { 2615 switch (e->type) {
2495 case QLA_EVT_AEN: 2616 case QLA_EVT_AEN:
@@ -2502,10 +2623,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
2502 } 2623 }
2503 if (e->flags & QLA_EVT_FLAG_FREE) 2624 if (e->flags & QLA_EVT_FLAG_FREE)
2504 kfree(e); 2625 kfree(e);
2505 spin_lock_irq(&ha->hardware_lock);
2506 } 2626 }
2507 spin_unlock_irq(&ha->hardware_lock);
2508} 2627}
2628
2509/* Relogins all the fcports of a vport 2629/* Relogins all the fcports of a vport
2510 * Context: dpc thread 2630 * Context: dpc thread
2511 */ 2631 */
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 152ecfc26cd..6260505dceb 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -219,8 +219,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
219 wait_cnt = NVR_WAIT_CNT; 219 wait_cnt = NVR_WAIT_CNT;
220 do { 220 do {
221 if (!--wait_cnt) { 221 if (!--wait_cnt) {
222 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", 222 DEBUG9_10(qla_printk(KERN_WARNING, ha,
223 __func__, vha->host_no)); 223 "NVRAM didn't go ready...\n"));
224 break; 224 break;
225 } 225 }
226 NVRAM_DELAY(); 226 NVRAM_DELAY();
@@ -349,7 +349,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
349 wait_cnt = NVR_WAIT_CNT; 349 wait_cnt = NVR_WAIT_CNT;
350 do { 350 do {
351 if (!--wait_cnt) { 351 if (!--wait_cnt) {
352 DEBUG9_10(qla_printk( 352 DEBUG9_10(qla_printk(KERN_WARNING, ha,
353 "NVRAM didn't go ready...\n")); 353 "NVRAM didn't go ready...\n"));
354 break; 354 break;
355 } 355 }
@@ -408,7 +408,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
408 wait_cnt = NVR_WAIT_CNT; 408 wait_cnt = NVR_WAIT_CNT;
409 do { 409 do {
410 if (!--wait_cnt) { 410 if (!--wait_cnt) {
411 DEBUG9_10(qla_printk("NVRAM didn't go ready...\n")); 411 DEBUG9_10(qla_printk(KERN_WARNING, ha,
412 "NVRAM didn't go ready...\n"));
412 break; 413 break;
413 } 414 }
414 NVRAM_DELAY(); 415 NVRAM_DELAY();
@@ -701,32 +702,35 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
701 break; 702 break;
702 case FLT_REG_VPD_0: 703 case FLT_REG_VPD_0:
703 ha->flt_region_vpd_nvram = start; 704 ha->flt_region_vpd_nvram = start;
704 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 705 if (ha->flags.port0)
705 ha->flt_region_vpd = start; 706 ha->flt_region_vpd = start;
706 break; 707 break;
707 case FLT_REG_VPD_1: 708 case FLT_REG_VPD_1:
708 if (PCI_FUNC(ha->pdev->devfn) & 1) 709 if (!ha->flags.port0)
709 ha->flt_region_vpd = start; 710 ha->flt_region_vpd = start;
710 break; 711 break;
711 case FLT_REG_NVRAM_0: 712 case FLT_REG_NVRAM_0:
712 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 713 if (ha->flags.port0)
713 ha->flt_region_nvram = start; 714 ha->flt_region_nvram = start;
714 break; 715 break;
715 case FLT_REG_NVRAM_1: 716 case FLT_REG_NVRAM_1:
716 if (PCI_FUNC(ha->pdev->devfn) & 1) 717 if (!ha->flags.port0)
717 ha->flt_region_nvram = start; 718 ha->flt_region_nvram = start;
718 break; 719 break;
719 case FLT_REG_FDT: 720 case FLT_REG_FDT:
720 ha->flt_region_fdt = start; 721 ha->flt_region_fdt = start;
721 break; 722 break;
722 case FLT_REG_NPIV_CONF_0: 723 case FLT_REG_NPIV_CONF_0:
723 if (!(PCI_FUNC(ha->pdev->devfn) & 1)) 724 if (ha->flags.port0)
724 ha->flt_region_npiv_conf = start; 725 ha->flt_region_npiv_conf = start;
725 break; 726 break;
726 case FLT_REG_NPIV_CONF_1: 727 case FLT_REG_NPIV_CONF_1:
727 if (PCI_FUNC(ha->pdev->devfn) & 1) 728 if (!ha->flags.port0)
728 ha->flt_region_npiv_conf = start; 729 ha->flt_region_npiv_conf = start;
729 break; 730 break;
731 case FLT_REG_GOLD_FW:
732 ha->flt_region_gold_fw = start;
733 break;
730 } 734 }
731 } 735 }
732 goto done; 736 goto done;
@@ -744,12 +748,12 @@ no_flash_data:
744 ha->flt_region_fw = def_fw[def]; 748 ha->flt_region_fw = def_fw[def];
745 ha->flt_region_boot = def_boot[def]; 749 ha->flt_region_boot = def_boot[def];
746 ha->flt_region_vpd_nvram = def_vpd_nvram[def]; 750 ha->flt_region_vpd_nvram = def_vpd_nvram[def];
747 ha->flt_region_vpd = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 751 ha->flt_region_vpd = ha->flags.port0 ?
748 def_vpd0[def]: def_vpd1[def]; 752 def_vpd0[def]: def_vpd1[def];
749 ha->flt_region_nvram = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 753 ha->flt_region_nvram = ha->flags.port0 ?
750 def_nvram0[def]: def_nvram1[def]; 754 def_nvram0[def]: def_nvram1[def];
751 ha->flt_region_fdt = def_fdt[def]; 755 ha->flt_region_fdt = def_fdt[def];
752 ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ? 756 ha->flt_region_npiv_conf = ha->flags.port0 ?
753 def_npiv_conf0[def]: def_npiv_conf1[def]; 757 def_npiv_conf0[def]: def_npiv_conf1[def];
754done: 758done:
755 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 759 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
@@ -924,6 +928,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
924 struct fc_vport_identifiers vid; 928 struct fc_vport_identifiers vid;
925 struct fc_vport *vport; 929 struct fc_vport *vport;
926 930
931 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
932
927 flags = le16_to_cpu(entry->flags); 933 flags = le16_to_cpu(entry->flags);
928 if (flags == 0xffff) 934 if (flags == 0xffff)
929 continue; 935 continue;
@@ -937,9 +943,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
937 vid.port_name = wwn_to_u64(entry->port_name); 943 vid.port_name = wwn_to_u64(entry->port_name);
938 vid.node_name = wwn_to_u64(entry->node_name); 944 vid.node_name = wwn_to_u64(entry->node_name);
939 945
940 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); 946 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
941
942 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
943 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, 947 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
944 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), 948 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
945 entry->q_qos, entry->f_qos)); 949 entry->q_qos, entry->f_qos));
@@ -955,7 +959,6 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
955 } 959 }
956done: 960done:
957 kfree(data); 961 kfree(data);
958 ha->npiv_info = NULL;
959} 962}
960 963
961static int 964static int
@@ -1079,8 +1082,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1079 0xff0000) | ((fdata >> 16) & 0xff)); 1082 0xff0000) | ((fdata >> 16) & 0xff));
1080 ret = qla24xx_erase_sector(vha, fdata); 1083 ret = qla24xx_erase_sector(vha, fdata);
1081 if (ret != QLA_SUCCESS) { 1084 if (ret != QLA_SUCCESS) {
1082 DEBUG9(qla_printk("Unable to erase sector: " 1085 DEBUG9(qla_printk(KERN_WARNING, ha,
1083 "address=%x.\n", faddr)); 1086 "Unable to erase sector: address=%x.\n",
1087 faddr));
1084 break; 1088 break;
1085 } 1089 }
1086 } 1090 }
@@ -1240,8 +1244,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1240 ret = qla24xx_write_flash_dword(ha, 1244 ret = qla24xx_write_flash_dword(ha,
1241 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); 1245 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
1242 if (ret != QLA_SUCCESS) { 1246 if (ret != QLA_SUCCESS) {
1243 DEBUG9(qla_printk("Unable to program nvram address=%x " 1247 DEBUG9(qla_printk(KERN_WARNING, ha,
1244 "data=%x.\n", naddr, *dwptr)); 1248 "Unable to program nvram address=%x data=%x.\n",
1249 naddr, *dwptr));
1245 break; 1250 break;
1246 } 1251 }
1247 } 1252 }
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 19d1afc3a34..b63feaf4312 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k1" 10#define QLA2XXX_VERSION "8.03.01-k3"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 166417a6afb..2de5f3ad640 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1225,8 +1225,8 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1225 * @starget: SCSI target pointer 1225 * @starget: SCSI target pointer
1226 * @lun: SCSI Logical Unit Number 1226 * @lun: SCSI Logical Unit Number
1227 * 1227 *
1228 * Description: Looks up the scsi_device with the specified @channel, @id, @lun 1228 * Description: Looks up the scsi_device with the specified @lun for a given
1229 * for a given host. The returned scsi_device has an additional reference that 1229 * @starget. The returned scsi_device has an additional reference that
1230 * needs to be released with scsi_device_put once you're done with it. 1230 * needs to be released with scsi_device_put once you're done with it.
1231 **/ 1231 **/
1232struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1232struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 213123b0486..41a21772df1 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -887,7 +887,7 @@ static int resp_start_stop(struct scsi_cmnd * scp,
887static sector_t get_sdebug_capacity(void) 887static sector_t get_sdebug_capacity(void)
888{ 888{
889 if (scsi_debug_virtual_gb > 0) 889 if (scsi_debug_virtual_gb > 0)
890 return 2048 * 1024 * scsi_debug_virtual_gb; 890 return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
891 else 891 else
892 return sdebug_store_sectors; 892 return sdebug_store_sectors;
893} 893}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 0c2c73be197..a1689353d7f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -641,9 +641,9 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
641/** 641/**
642 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory 642 * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory
643 * @scmd: SCSI command structure to restore 643 * @scmd: SCSI command structure to restore
644 * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd 644 * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd
645 * 645 *
646 * Undo any damage done by above scsi_prep_eh_cmnd(). 646 * Undo any damage done by above scsi_eh_prep_cmnd().
647 */ 647 */
648void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) 648void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
649{ 649{
@@ -1451,28 +1451,21 @@ static void eh_lock_door_done(struct request *req, int uptodate)
1451 * @sdev: SCSI device to prevent medium removal 1451 * @sdev: SCSI device to prevent medium removal
1452 * 1452 *
1453 * Locking: 1453 * Locking:
1454 * We must be called from process context; scsi_allocate_request() 1454 * We must be called from process context.
1455 * may sleep.
1456 * 1455 *
1457 * Notes: 1456 * Notes:
1458 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the 1457 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1459 * head of the devices request queue, and continue. 1458 * head of the devices request queue, and continue.
1460 *
1461 * Bugs:
1462 * scsi_allocate_request() may sleep waiting for existing requests to
1463 * be processed. However, since we haven't kicked off any request
1464 * processing for this host, this may deadlock.
1465 *
1466 * If scsi_allocate_request() fails for what ever reason, we
1467 * completely forget to lock the door.
1468 */ 1459 */
1469static void scsi_eh_lock_door(struct scsi_device *sdev) 1460static void scsi_eh_lock_door(struct scsi_device *sdev)
1470{ 1461{
1471 struct request *req; 1462 struct request *req;
1472 1463
1464 /*
1465 * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
1466 * request becomes available
1467 */
1473 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); 1468 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
1474 if (!req)
1475 return;
1476 1469
1477 req->cmd[0] = ALLOW_MEDIUM_REMOVAL; 1470 req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1478 req->cmd[1] = 0; 1471 req->cmd[1] = 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index bb218c8b6e9..30f3275e119 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
240 * is invalid. Prevent the garbage from being misinterpreted 240 * is invalid. Prevent the garbage from being misinterpreted
241 * and prevent security leaks by zeroing out the excess data. 241 * and prevent security leaks by zeroing out the excess data.
242 */ 242 */
243 if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->data_len), 0, req->data_len); 244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245 245
246 if (resid) 246 if (resid)
247 *resid = req->data_len; 247 *resid = req->resid_len;
248 ret = req->errors; 248 ret = req->errors;
249 out: 249 out:
250 blk_put_request(req); 250 blk_put_request(req);
@@ -546,14 +546,9 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
546 * to queue the remainder of them. 546 * to queue the remainder of them.
547 */ 547 */
548 if (blk_end_request(req, error, bytes)) { 548 if (blk_end_request(req, error, bytes)) {
549 int leftover = (req->hard_nr_sectors << 9);
550
551 if (blk_pc_request(req))
552 leftover = req->data_len;
553
554 /* kill remainder if no retrys */ 549 /* kill remainder if no retrys */
555 if (error && scsi_noretry_cmd(cmd)) 550 if (error && scsi_noretry_cmd(cmd))
556 blk_end_request(req, error, leftover); 551 blk_end_request_all(req, error);
557 else { 552 else {
558 if (requeue) { 553 if (requeue) {
559 /* 554 /*
@@ -673,34 +668,6 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
673EXPORT_SYMBOL(scsi_release_buffers); 668EXPORT_SYMBOL(scsi_release_buffers);
674 669
675/* 670/*
676 * Bidi commands Must be complete as a whole, both sides at once.
677 * If part of the bytes were written and lld returned
678 * scsi_in()->resid and/or scsi_out()->resid this information will be left
679 * in req->data_len and req->next_rq->data_len. The upper-layer driver can
680 * decide what to do with this information.
681 */
682static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
683{
684 struct request *req = cmd->request;
685 unsigned int dlen = req->data_len;
686 unsigned int next_dlen = req->next_rq->data_len;
687
688 req->data_len = scsi_out(cmd)->resid;
689 req->next_rq->data_len = scsi_in(cmd)->resid;
690
691 /* The req and req->next_rq have not been completed */
692 BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
693
694 scsi_release_buffers(cmd);
695
696 /*
697 * This will goose the queue request function at the end, so we don't
698 * need to worry about launching another command.
699 */
700 scsi_next_command(cmd);
701}
702
703/*
704 * Function: scsi_io_completion() 671 * Function: scsi_io_completion()
705 * 672 *
706 * Purpose: Completion processing for block device I/O requests. 673 * Purpose: Completion processing for block device I/O requests.
@@ -739,7 +706,6 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
739void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
740{ 707{
741 int result = cmd->result; 708 int result = cmd->result;
742 int this_count;
743 struct request_queue *q = cmd->device->request_queue; 709 struct request_queue *q = cmd->device->request_queue;
744 struct request *req = cmd->request; 710 struct request *req = cmd->request;
745 int error = 0; 711 int error = 0;
@@ -773,12 +739,22 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
773 if (!sense_deferred) 739 if (!sense_deferred)
774 error = -EIO; 740 error = -EIO;
775 } 741 }
742
743 req->resid_len = scsi_get_resid(cmd);
744
776 if (scsi_bidi_cmnd(cmd)) { 745 if (scsi_bidi_cmnd(cmd)) {
777 /* will also release_buffers */ 746 /*
778 scsi_end_bidi_request(cmd); 747 * Bidi commands Must be complete as a whole,
748 * both sides at once.
749 */
750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752 blk_end_request_all(req, 0);
753
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd);
779 return; 756 return;
780 } 757 }
781 req->data_len = scsi_get_resid(cmd);
782 } 758 }
783 759
784 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 760 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
@@ -787,9 +763,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
787 * Next deal with any sectors which we were able to correctly 763 * Next deal with any sectors which we were able to correctly
788 * handle. 764 * handle.
789 */ 765 */
790 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 766 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
791 "%d bytes done.\n", 767 "%d bytes done.\n",
792 req->nr_sectors, good_bytes)); 768 blk_rq_sectors(req), good_bytes));
793 769
794 /* 770 /*
795 * Recovered errors need reporting, but they're always treated 771 * Recovered errors need reporting, but they're always treated
@@ -812,7 +788,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
812 */ 788 */
813 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 789 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
814 return; 790 return;
815 this_count = blk_rq_bytes(req);
816 791
817 error = -EIO; 792 error = -EIO;
818 793
@@ -922,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
922 if (driver_byte(result) & DRIVER_SENSE) 897 if (driver_byte(result) & DRIVER_SENSE)
923 scsi_print_sense("", cmd); 898 scsi_print_sense("", cmd);
924 } 899 }
925 blk_end_request(req, -EIO, blk_rq_bytes(req)); 900 blk_end_request_all(req, -EIO);
926 scsi_next_command(cmd); 901 scsi_next_command(cmd);
927 break; 902 break;
928 case ACTION_REPREP: 903 case ACTION_REPREP:
@@ -965,10 +940,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
965 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 940 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
966 BUG_ON(count > sdb->table.nents); 941 BUG_ON(count > sdb->table.nents);
967 sdb->table.nents = count; 942 sdb->table.nents = count;
968 if (blk_pc_request(req)) 943 sdb->length = blk_rq_bytes(req);
969 sdb->length = req->data_len;
970 else
971 sdb->length = req->nr_sectors << 9;
972 return BLKPREP_OK; 944 return BLKPREP_OK;
973} 945}
974 946
@@ -1087,22 +1059,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1087 if (unlikely(ret)) 1059 if (unlikely(ret))
1088 return ret; 1060 return ret;
1089 } else { 1061 } else {
1090 BUG_ON(req->data_len); 1062 BUG_ON(blk_rq_bytes(req));
1091 BUG_ON(req->data);
1092 1063
1093 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1064 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1094 req->buffer = NULL; 1065 req->buffer = NULL;
1095 } 1066 }
1096 1067
1097 cmd->cmd_len = req->cmd_len; 1068 cmd->cmd_len = req->cmd_len;
1098 if (!req->data_len) 1069 if (!blk_rq_bytes(req))
1099 cmd->sc_data_direction = DMA_NONE; 1070 cmd->sc_data_direction = DMA_NONE;
1100 else if (rq_data_dir(req) == WRITE) 1071 else if (rq_data_dir(req) == WRITE)
1101 cmd->sc_data_direction = DMA_TO_DEVICE; 1072 cmd->sc_data_direction = DMA_TO_DEVICE;
1102 else 1073 else
1103 cmd->sc_data_direction = DMA_FROM_DEVICE; 1074 cmd->sc_data_direction = DMA_FROM_DEVICE;
1104 1075
1105 cmd->transfersize = req->data_len; 1076 cmd->transfersize = blk_rq_bytes(req);
1106 cmd->allowed = req->retries; 1077 cmd->allowed = req->retries;
1107 return BLKPREP_OK; 1078 return BLKPREP_OK;
1108} 1079}
@@ -1212,7 +1183,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1212 break; 1183 break;
1213 case BLKPREP_DEFER: 1184 case BLKPREP_DEFER:
1214 /* 1185 /*
1215 * If we defer, the elv_next_request() returns NULL, but the 1186 * If we defer, the blk_peek_request() returns NULL, but the
1216 * queue must be restarted, so we plug here if no returning 1187 * queue must be restarted, so we plug here if no returning
1217 * command will automatically do that. 1188 * command will automatically do that.
1218 */ 1189 */
@@ -1388,7 +1359,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1388 struct scsi_target *starget = scsi_target(sdev); 1359 struct scsi_target *starget = scsi_target(sdev);
1389 struct Scsi_Host *shost = sdev->host; 1360 struct Scsi_Host *shost = sdev->host;
1390 1361
1391 blkdev_dequeue_request(req); 1362 blk_start_request(req);
1392 1363
1393 if (unlikely(cmd == NULL)) { 1364 if (unlikely(cmd == NULL)) {
1394 printk(KERN_CRIT "impossible request in %s.\n", 1365 printk(KERN_CRIT "impossible request in %s.\n",
@@ -1480,7 +1451,7 @@ static void scsi_request_fn(struct request_queue *q)
1480 1451
1481 if (!sdev) { 1452 if (!sdev) {
1482 printk("scsi: killing requests for dead queue\n"); 1453 printk("scsi: killing requests for dead queue\n");
1483 while ((req = elv_next_request(q)) != NULL) 1454 while ((req = blk_peek_request(q)) != NULL)
1484 scsi_kill_request(req, q); 1455 scsi_kill_request(req, q);
1485 return; 1456 return;
1486 } 1457 }
@@ -1501,7 +1472,7 @@ static void scsi_request_fn(struct request_queue *q)
1501 * that the request is fully prepared even if we cannot 1472 * that the request is fully prepared even if we cannot
1502 * accept it. 1473 * accept it.
1503 */ 1474 */
1504 req = elv_next_request(q); 1475 req = blk_peek_request(q);
1505 if (!req || !scsi_dev_queue_ready(q, sdev)) 1476 if (!req || !scsi_dev_queue_ready(q, sdev))
1506 break; 1477 break;
1507 1478
@@ -1517,7 +1488,7 @@ static void scsi_request_fn(struct request_queue *q)
1517 * Remove the request from the request list. 1488 * Remove the request from the request list.
1518 */ 1489 */
1519 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1490 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1520 blkdev_dequeue_request(req); 1491 blk_start_request(req);
1521 sdev->device_busy++; 1492 sdev->device_busy++;
1522 1493
1523 spin_unlock(q->queue_lock); 1494 spin_unlock(q->queue_lock);
@@ -2441,20 +2412,18 @@ int
2441scsi_internal_device_unblock(struct scsi_device *sdev) 2412scsi_internal_device_unblock(struct scsi_device *sdev)
2442{ 2413{
2443 struct request_queue *q = sdev->request_queue; 2414 struct request_queue *q = sdev->request_queue;
2444 int err;
2445 unsigned long flags; 2415 unsigned long flags;
2446 2416
2447 /* 2417 /*
2448 * Try to transition the scsi device to SDEV_RUNNING 2418 * Try to transition the scsi device to SDEV_RUNNING
2449 * and goose the device queue if successful. 2419 * and goose the device queue if successful.
2450 */ 2420 */
2451 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2421 if (sdev->sdev_state == SDEV_BLOCK)
2452 if (err) { 2422 sdev->sdev_state = SDEV_RUNNING;
2453 err = scsi_device_set_state(sdev, SDEV_CREATED); 2423 else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2454 2424 sdev->sdev_state = SDEV_CREATED;
2455 if (err) 2425 else
2456 return err; 2426 return -EINVAL;
2457 }
2458 2427
2459 spin_lock_irqsave(q->queue_lock, flags); 2428 spin_lock_irqsave(q->queue_lock, flags);
2460 blk_start_queue(q); 2429 blk_start_queue(q);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6f51ca485f3..c4478380140 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -115,12 +115,12 @@ MODULE_PARM_DESC(max_report_luns,
115 "REPORT LUNS maximum number of LUNS received (should be" 115 "REPORT LUNS maximum number of LUNS received (should be"
116 " between 1 and 16384)"); 116 " between 1 and 16384)");
117 117
118static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3; 118static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
119 119
120module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); 120module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
121MODULE_PARM_DESC(inq_timeout, 121MODULE_PARM_DESC(inq_timeout,
122 "Timeout (in seconds) waiting for devices to answer INQUIRY." 122 "Timeout (in seconds) waiting for devices to answer INQUIRY."
123 " Default is 5. Some non-compliant devices need more."); 123 " Default is 20. Some devices may need more; most need less.");
124 124
125/* This lock protects only this list */ 125/* This lock protects only this list */
126static DEFINE_SPINLOCK(async_scan_lock); 126static DEFINE_SPINLOCK(async_scan_lock);
@@ -425,6 +425,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
425 INIT_LIST_HEAD(&starget->devices); 425 INIT_LIST_HEAD(&starget->devices);
426 starget->state = STARGET_CREATED; 426 starget->state = STARGET_CREATED;
427 starget->scsi_level = SCSI_2; 427 starget->scsi_level = SCSI_2;
428 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
428 retry: 429 retry:
429 spin_lock_irqsave(shost->host_lock, flags); 430 spin_lock_irqsave(shost->host_lock, flags);
430 431
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 48ba413f7f6..10303272ba4 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -387,7 +387,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the 387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
388 * length for us. 388 * length for us.
389 */ 389 */
390 cmd->sdb.length = rq->data_len; 390 cmd->sdb.length = blk_rq_bytes(rq);
391 391
392 return 0; 392 return 0;
393 393
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 09479545529..f3e664628d7 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -37,7 +37,6 @@
37#define ISCSI_TRANSPORT_VERSION "2.0-870" 37#define ISCSI_TRANSPORT_VERSION "2.0-870"
38 38
39struct iscsi_internal { 39struct iscsi_internal {
40 int daemon_pid;
41 struct scsi_transport_template t; 40 struct scsi_transport_template t;
42 struct iscsi_transport *iscsi_transport; 41 struct iscsi_transport *iscsi_transport;
43 struct list_head list; 42 struct list_head list;
@@ -357,7 +356,7 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
357 err = 0; 356 err = 0;
358 break; 357 break;
359 case ISCSI_SESSION_FAILED: 358 case ISCSI_SESSION_FAILED:
360 err = DID_TRANSPORT_DISRUPTED << 16; 359 err = DID_IMM_RETRY << 16;
361 break; 360 break;
362 case ISCSI_SESSION_FREE: 361 case ISCSI_SESSION_FREE:
363 err = DID_TRANSPORT_FAILFAST << 16; 362 err = DID_TRANSPORT_FAILFAST << 16;
@@ -938,23 +937,9 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt)
938} 937}
939 938
940static int 939static int
941iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp) 940iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
942{ 941{
943 return netlink_broadcast(nls, skb, 0, 1, gfp); 942 return nlmsg_multicast(nls, skb, 0, group, gfp);
944}
945
946static int
947iscsi_unicast_skb(struct sk_buff *skb, int pid)
948{
949 int rc;
950
951 rc = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
952 if (rc < 0) {
953 printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc);
954 return rc;
955 }
956
957 return 0;
958} 943}
959 944
960int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 945int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
@@ -980,7 +965,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
980 return -ENOMEM; 965 return -ENOMEM;
981 } 966 }
982 967
983 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 968 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
984 ev = NLMSG_DATA(nlh); 969 ev = NLMSG_DATA(nlh);
985 memset(ev, 0, sizeof(*ev)); 970 memset(ev, 0, sizeof(*ev));
986 ev->transport_handle = iscsi_handle(conn->transport); 971 ev->transport_handle = iscsi_handle(conn->transport);
@@ -991,10 +976,45 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
991 memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); 976 memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
992 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); 977 memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
993 978
994 return iscsi_unicast_skb(skb, priv->daemon_pid); 979 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
995} 980}
996EXPORT_SYMBOL_GPL(iscsi_recv_pdu); 981EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
997 982
983int iscsi_offload_mesg(struct Scsi_Host *shost,
984 struct iscsi_transport *transport, uint32_t type,
985 char *data, uint16_t data_size)
986{
987 struct nlmsghdr *nlh;
988 struct sk_buff *skb;
989 struct iscsi_uevent *ev;
990 int len = NLMSG_SPACE(sizeof(*ev) + data_size);
991
992 skb = alloc_skb(len, GFP_NOIO);
993 if (!skb) {
994 printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
995 return -ENOMEM;
996 }
997
998 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
999 ev = NLMSG_DATA(nlh);
1000 memset(ev, 0, sizeof(*ev));
1001 ev->type = type;
1002 ev->transport_handle = iscsi_handle(transport);
1003 switch (type) {
1004 case ISCSI_KEVENT_PATH_REQ:
1005 ev->r.req_path.host_no = shost->host_no;
1006 break;
1007 case ISCSI_KEVENT_IF_DOWN:
1008 ev->r.notify_if_down.host_no = shost->host_no;
1009 break;
1010 }
1011
1012 memcpy((char *)ev + sizeof(*ev), data, data_size);
1013
1014 return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO);
1015}
1016EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
1017
998void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) 1018void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
999{ 1019{
1000 struct nlmsghdr *nlh; 1020 struct nlmsghdr *nlh;
@@ -1014,7 +1034,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1014 return; 1034 return;
1015 } 1035 }
1016 1036
1017 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 1037 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1018 ev = NLMSG_DATA(nlh); 1038 ev = NLMSG_DATA(nlh);
1019 ev->transport_handle = iscsi_handle(conn->transport); 1039 ev->transport_handle = iscsi_handle(conn->transport);
1020 ev->type = ISCSI_KEVENT_CONN_ERROR; 1040 ev->type = ISCSI_KEVENT_CONN_ERROR;
@@ -1022,7 +1042,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1022 ev->r.connerror.cid = conn->cid; 1042 ev->r.connerror.cid = conn->cid;
1023 ev->r.connerror.sid = iscsi_conn_get_sid(conn); 1043 ev->r.connerror.sid = iscsi_conn_get_sid(conn);
1024 1044
1025 iscsi_broadcast_skb(skb, GFP_ATOMIC); 1045 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
1026 1046
1027 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", 1047 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
1028 error); 1048 error);
@@ -1030,8 +1050,8 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1030EXPORT_SYMBOL_GPL(iscsi_conn_error_event); 1050EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
1031 1051
1032static int 1052static int
1033iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, 1053iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
1034 void *payload, int size) 1054 void *payload, int size)
1035{ 1055{
1036 struct sk_buff *skb; 1056 struct sk_buff *skb;
1037 struct nlmsghdr *nlh; 1057 struct nlmsghdr *nlh;
@@ -1045,10 +1065,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
1045 return -ENOMEM; 1065 return -ENOMEM;
1046 } 1066 }
1047 1067
1048 nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0); 1068 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
1049 nlh->nlmsg_flags = flags; 1069 nlh->nlmsg_flags = flags;
1050 memcpy(NLMSG_DATA(nlh), payload, size); 1070 memcpy(NLMSG_DATA(nlh), payload, size);
1051 return iscsi_unicast_skb(skb, pid); 1071 return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
1052} 1072}
1053 1073
1054static int 1074static int
@@ -1085,7 +1105,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1085 return -ENOMEM; 1105 return -ENOMEM;
1086 } 1106 }
1087 1107
1088 nlhstat = __nlmsg_put(skbstat, priv->daemon_pid, 0, 0, 1108 nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
1089 (len - sizeof(*nlhstat)), 0); 1109 (len - sizeof(*nlhstat)), 0);
1090 evstat = NLMSG_DATA(nlhstat); 1110 evstat = NLMSG_DATA(nlhstat);
1091 memset(evstat, 0, sizeof(*evstat)); 1111 memset(evstat, 0, sizeof(*evstat));
@@ -1109,7 +1129,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1109 skb_trim(skbstat, NLMSG_ALIGN(actual_size)); 1129 skb_trim(skbstat, NLMSG_ALIGN(actual_size));
1110 nlhstat->nlmsg_len = actual_size; 1130 nlhstat->nlmsg_len = actual_size;
1111 1131
1112 err = iscsi_unicast_skb(skbstat, priv->daemon_pid); 1132 err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID,
1133 GFP_ATOMIC);
1113 } while (err < 0 && err != -ECONNREFUSED); 1134 } while (err < 0 && err != -ECONNREFUSED);
1114 1135
1115 return err; 1136 return err;
@@ -1143,7 +1164,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1143 return -ENOMEM; 1164 return -ENOMEM;
1144 } 1165 }
1145 1166
1146 nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); 1167 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1147 ev = NLMSG_DATA(nlh); 1168 ev = NLMSG_DATA(nlh);
1148 ev->transport_handle = iscsi_handle(session->transport); 1169 ev->transport_handle = iscsi_handle(session->transport);
1149 1170
@@ -1172,7 +1193,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1172 * this will occur if the daemon is not up, so we just warn 1193 * this will occur if the daemon is not up, so we just warn
1173 * the user and when the daemon is restarted it will handle it 1194 * the user and when the daemon is restarted it will handle it
1174 */ 1195 */
1175 rc = iscsi_broadcast_skb(skb, GFP_KERNEL); 1196 rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
1176 if (rc == -ESRCH) 1197 if (rc == -ESRCH)
1177 iscsi_cls_session_printk(KERN_ERR, session, 1198 iscsi_cls_session_printk(KERN_ERR, session,
1178 "Cannot notify userspace of session " 1199 "Cannot notify userspace of session "
@@ -1268,26 +1289,54 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1268 return err; 1289 return err;
1269} 1290}
1270 1291
1292static int iscsi_if_ep_connect(struct iscsi_transport *transport,
1293 struct iscsi_uevent *ev, int msg_type)
1294{
1295 struct iscsi_endpoint *ep;
1296 struct sockaddr *dst_addr;
1297 struct Scsi_Host *shost = NULL;
1298 int non_blocking, err = 0;
1299
1300 if (!transport->ep_connect)
1301 return -EINVAL;
1302
1303 if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) {
1304 shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no);
1305 if (!shost) {
1306 printk(KERN_ERR "ep connect failed. Could not find "
1307 "host no %u\n",
1308 ev->u.ep_connect_through_host.host_no);
1309 return -ENODEV;
1310 }
1311 non_blocking = ev->u.ep_connect_through_host.non_blocking;
1312 } else
1313 non_blocking = ev->u.ep_connect.non_blocking;
1314
1315 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1316 ep = transport->ep_connect(shost, dst_addr, non_blocking);
1317 if (IS_ERR(ep)) {
1318 err = PTR_ERR(ep);
1319 goto release_host;
1320 }
1321
1322 ev->r.ep_connect_ret.handle = ep->id;
1323release_host:
1324 if (shost)
1325 scsi_host_put(shost);
1326 return err;
1327}
1328
1271static int 1329static int
1272iscsi_if_transport_ep(struct iscsi_transport *transport, 1330iscsi_if_transport_ep(struct iscsi_transport *transport,
1273 struct iscsi_uevent *ev, int msg_type) 1331 struct iscsi_uevent *ev, int msg_type)
1274{ 1332{
1275 struct iscsi_endpoint *ep; 1333 struct iscsi_endpoint *ep;
1276 struct sockaddr *dst_addr;
1277 int rc = 0; 1334 int rc = 0;
1278 1335
1279 switch (msg_type) { 1336 switch (msg_type) {
1337 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
1280 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: 1338 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
1281 if (!transport->ep_connect) 1339 rc = iscsi_if_ep_connect(transport, ev, msg_type);
1282 return -EINVAL;
1283
1284 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
1285 ep = transport->ep_connect(dst_addr,
1286 ev->u.ep_connect.non_blocking);
1287 if (IS_ERR(ep))
1288 return PTR_ERR(ep);
1289
1290 ev->r.ep_connect_ret.handle = ep->id;
1291 break; 1340 break;
1292 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1341 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1293 if (!transport->ep_poll) 1342 if (!transport->ep_poll)
@@ -1365,7 +1414,31 @@ iscsi_set_host_param(struct iscsi_transport *transport,
1365} 1414}
1366 1415
1367static int 1416static int
1368iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1417iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1418{
1419 struct Scsi_Host *shost;
1420 struct iscsi_path *params;
1421 int err;
1422
1423 if (!transport->set_path)
1424 return -ENOSYS;
1425
1426 shost = scsi_host_lookup(ev->u.set_path.host_no);
1427 if (!shost) {
1428 printk(KERN_ERR "set path could not find host no %u\n",
1429 ev->u.set_path.host_no);
1430 return -ENODEV;
1431 }
1432
1433 params = (struct iscsi_path *)((char *)ev + sizeof(*ev));
1434 err = transport->set_path(shost, params);
1435
1436 scsi_host_put(shost);
1437 return err;
1438}
1439
1440static int
1441iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1369{ 1442{
1370 int err = 0; 1443 int err = 0;
1371 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 1444 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
@@ -1375,6 +1448,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1375 struct iscsi_cls_conn *conn; 1448 struct iscsi_cls_conn *conn;
1376 struct iscsi_endpoint *ep = NULL; 1449 struct iscsi_endpoint *ep = NULL;
1377 1450
1451 if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
1452 *group = ISCSI_NL_GRP_UIP;
1453 else
1454 *group = ISCSI_NL_GRP_ISCSID;
1455
1378 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); 1456 priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
1379 if (!priv) 1457 if (!priv)
1380 return -EINVAL; 1458 return -EINVAL;
@@ -1383,8 +1461,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1383 if (!try_module_get(transport->owner)) 1461 if (!try_module_get(transport->owner))
1384 return -EINVAL; 1462 return -EINVAL;
1385 1463
1386 priv->daemon_pid = NETLINK_CREDS(skb)->pid;
1387
1388 switch (nlh->nlmsg_type) { 1464 switch (nlh->nlmsg_type) {
1389 case ISCSI_UEVENT_CREATE_SESSION: 1465 case ISCSI_UEVENT_CREATE_SESSION:
1390 err = iscsi_if_create_session(priv, ep, ev, 1466 err = iscsi_if_create_session(priv, ep, ev,
@@ -1469,6 +1545,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1469 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: 1545 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
1470 case ISCSI_UEVENT_TRANSPORT_EP_POLL: 1546 case ISCSI_UEVENT_TRANSPORT_EP_POLL:
1471 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: 1547 case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
1548 case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
1472 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type); 1549 err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
1473 break; 1550 break;
1474 case ISCSI_UEVENT_TGT_DSCVR: 1551 case ISCSI_UEVENT_TGT_DSCVR:
@@ -1477,6 +1554,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1477 case ISCSI_UEVENT_SET_HOST_PARAM: 1554 case ISCSI_UEVENT_SET_HOST_PARAM:
1478 err = iscsi_set_host_param(transport, ev); 1555 err = iscsi_set_host_param(transport, ev);
1479 break; 1556 break;
1557 case ISCSI_UEVENT_PATH_UPDATE:
1558 err = iscsi_set_path(transport, ev);
1559 break;
1480 default: 1560 default:
1481 err = -ENOSYS; 1561 err = -ENOSYS;
1482 break; 1562 break;
@@ -1499,6 +1579,7 @@ iscsi_if_rx(struct sk_buff *skb)
1499 uint32_t rlen; 1579 uint32_t rlen;
1500 struct nlmsghdr *nlh; 1580 struct nlmsghdr *nlh;
1501 struct iscsi_uevent *ev; 1581 struct iscsi_uevent *ev;
1582 uint32_t group;
1502 1583
1503 nlh = nlmsg_hdr(skb); 1584 nlh = nlmsg_hdr(skb);
1504 if (nlh->nlmsg_len < sizeof(*nlh) || 1585 if (nlh->nlmsg_len < sizeof(*nlh) ||
@@ -1511,7 +1592,7 @@ iscsi_if_rx(struct sk_buff *skb)
1511 if (rlen > skb->len) 1592 if (rlen > skb->len)
1512 rlen = skb->len; 1593 rlen = skb->len;
1513 1594
1514 err = iscsi_if_recv_msg(skb, nlh); 1595 err = iscsi_if_recv_msg(skb, nlh, &group);
1515 if (err) { 1596 if (err) {
1516 ev->type = ISCSI_KEVENT_IF_ERROR; 1597 ev->type = ISCSI_KEVENT_IF_ERROR;
1517 ev->iferror = err; 1598 ev->iferror = err;
@@ -1525,8 +1606,7 @@ iscsi_if_rx(struct sk_buff *skb)
1525 */ 1606 */
1526 if (ev->type == ISCSI_UEVENT_GET_STATS && !err) 1607 if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
1527 break; 1608 break;
1528 err = iscsi_if_send_reply( 1609 err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
1529 NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
1530 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); 1610 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
1531 } while (err < 0 && err != -ECONNREFUSED); 1611 } while (err < 0 && err != -ECONNREFUSED);
1532 skb_pull(skb, rlen); 1612 skb_pull(skb, rlen);
@@ -1774,7 +1854,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
1774 if (!priv) 1854 if (!priv)
1775 return NULL; 1855 return NULL;
1776 INIT_LIST_HEAD(&priv->list); 1856 INIT_LIST_HEAD(&priv->list);
1777 priv->daemon_pid = -1;
1778 priv->iscsi_transport = tt; 1857 priv->iscsi_transport = tt;
1779 priv->t.user_scan = iscsi_user_scan; 1858 priv->t.user_scan = iscsi_user_scan;
1780 priv->t.create_work_queue = 1; 1859 priv->t.create_work_queue = 1;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 50988cbf7b2..d606452297c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -163,12 +163,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); 163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
164 164
165 while (!blk_queue_plugged(q)) { 165 while (!blk_queue_plugged(q)) {
166 req = elv_next_request(q); 166 req = blk_fetch_request(q);
167 if (!req) 167 if (!req)
168 break; 168 break;
169 169
170 blkdev_dequeue_request(req);
171
172 spin_unlock_irq(q->queue_lock); 170 spin_unlock_irq(q->queue_lock);
173 171
174 handler = to_sas_internal(shost->transportt)->f->smp_handler; 172 handler = to_sas_internal(shost->transportt)->f->smp_handler;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 84044233b63..878b17a9af3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -384,9 +384,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
384 struct scsi_device *sdp = q->queuedata; 384 struct scsi_device *sdp = q->queuedata;
385 struct gendisk *disk = rq->rq_disk; 385 struct gendisk *disk = rq->rq_disk;
386 struct scsi_disk *sdkp; 386 struct scsi_disk *sdkp;
387 sector_t block = rq->sector; 387 sector_t block = blk_rq_pos(rq);
388 sector_t threshold; 388 sector_t threshold;
389 unsigned int this_count = rq->nr_sectors; 389 unsigned int this_count = blk_rq_sectors(rq);
390 int ret, host_dif; 390 int ret, host_dif;
391 391
392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -413,10 +413,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
413 this_count)); 413 this_count));
414 414
415 if (!sdp || !scsi_device_online(sdp) || 415 if (!sdp || !scsi_device_online(sdp) ||
416 block + rq->nr_sectors > get_capacity(disk)) { 416 block + blk_rq_sectors(rq) > get_capacity(disk)) {
417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
418 "Finishing %ld sectors\n", 418 "Finishing %u sectors\n",
419 rq->nr_sectors)); 419 blk_rq_sectors(rq)));
420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
421 "Retry with 0x%p\n", SCpnt)); 421 "Retry with 0x%p\n", SCpnt));
422 goto out; 422 goto out;
@@ -463,7 +463,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
463 * for this. 463 * for this.
464 */ 464 */
465 if (sdp->sector_size == 1024) { 465 if (sdp->sector_size == 1024) {
466 if ((block & 1) || (rq->nr_sectors & 1)) { 466 if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
467 scmd_printk(KERN_ERR, SCpnt, 467 scmd_printk(KERN_ERR, SCpnt,
468 "Bad block number requested\n"); 468 "Bad block number requested\n");
469 goto out; 469 goto out;
@@ -473,7 +473,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
473 } 473 }
474 } 474 }
475 if (sdp->sector_size == 2048) { 475 if (sdp->sector_size == 2048) {
476 if ((block & 3) || (rq->nr_sectors & 3)) { 476 if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
477 scmd_printk(KERN_ERR, SCpnt, 477 scmd_printk(KERN_ERR, SCpnt,
478 "Bad block number requested\n"); 478 "Bad block number requested\n");
479 goto out; 479 goto out;
@@ -483,7 +483,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
483 } 483 }
484 } 484 }
485 if (sdp->sector_size == 4096) { 485 if (sdp->sector_size == 4096) {
486 if ((block & 7) || (rq->nr_sectors & 7)) { 486 if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
487 scmd_printk(KERN_ERR, SCpnt, 487 scmd_printk(KERN_ERR, SCpnt,
488 "Bad block number requested\n"); 488 "Bad block number requested\n");
489 goto out; 489 goto out;
@@ -512,10 +512,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
512 } 512 }
513 513
514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
515 "%s %d/%ld 512 byte blocks.\n", 515 "%s %d/%u 512 byte blocks.\n",
516 (rq_data_dir(rq) == WRITE) ? 516 (rq_data_dir(rq) == WRITE) ?
517 "writing" : "reading", this_count, 517 "writing" : "reading", this_count,
518 rq->nr_sectors)); 518 blk_rq_sectors(rq)));
519 519
520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@ -971,8 +971,8 @@ static struct block_device_operations sd_fops = {
971 971
972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 972static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
973{ 973{
974 u64 start_lba = scmd->request->sector; 974 u64 start_lba = blk_rq_pos(scmd->request);
975 u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512); 975 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
976 u64 bad_lba; 976 u64 bad_lba;
977 int info_valid; 977 int info_valid;
978 978
@@ -1510,7 +1510,7 @@ got_data:
1510 */ 1510 */
1511 sector_size = 512; 1511 sector_size = 512;
1512 } 1512 }
1513 blk_queue_hardsect_size(sdp->request_queue, sector_size); 1513 blk_queue_logical_block_size(sdp->request_queue, sector_size);
1514 1514
1515 { 1515 {
1516 char cap_str_2[10], cap_str_10[10]; 1516 char cap_str_2[10], cap_str_10[10];
@@ -1902,24 +1902,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1902 index = sdkp->index; 1902 index = sdkp->index;
1903 dev = &sdp->sdev_gendev; 1903 dev = &sdp->sdev_gendev;
1904 1904
1905 if (!sdp->request_queue->rq_timeout) {
1906 if (sdp->type != TYPE_MOD)
1907 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1908 else
1909 blk_queue_rq_timeout(sdp->request_queue,
1910 SD_MOD_TIMEOUT);
1911 }
1912
1913 device_initialize(&sdkp->dev);
1914 sdkp->dev.parent = &sdp->sdev_gendev;
1915 sdkp->dev.class = &sd_disk_class;
1916 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
1917
1918 if (device_add(&sdkp->dev))
1919 goto out_free_index;
1920
1921 get_device(&sdp->sdev_gendev);
1922
1923 if (index < SD_MAX_DISKS) { 1905 if (index < SD_MAX_DISKS) {
1924 gd->major = sd_major((index & 0xf0) >> 4); 1906 gd->major = sd_major((index & 0xf0) >> 4);
1925 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 1907 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
@@ -1954,11 +1936,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1954 1936
1955 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 1937 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1956 sdp->removable ? "removable " : ""); 1938 sdp->removable ? "removable " : "");
1957
1958 return;
1959
1960 out_free_index:
1961 ida_remove(&sd_index_ida, index);
1962} 1939}
1963 1940
1964/** 1941/**
@@ -2026,6 +2003,24 @@ static int sd_probe(struct device *dev)
2026 sdkp->openers = 0; 2003 sdkp->openers = 0;
2027 sdkp->previous_state = 1; 2004 sdkp->previous_state = 1;
2028 2005
2006 if (!sdp->request_queue->rq_timeout) {
2007 if (sdp->type != TYPE_MOD)
2008 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
2009 else
2010 blk_queue_rq_timeout(sdp->request_queue,
2011 SD_MOD_TIMEOUT);
2012 }
2013
2014 device_initialize(&sdkp->dev);
2015 sdkp->dev.parent = &sdp->sdev_gendev;
2016 sdkp->dev.class = &sd_disk_class;
2017 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
2018
2019 if (device_add(&sdkp->dev))
2020 goto out_free_index;
2021
2022 get_device(&sdp->sdev_gendev);
2023
2029 async_schedule(sd_probe_async, sdkp); 2024 async_schedule(sd_probe_async, sdkp);
2030 2025
2031 return 0; 2026 return 0;
@@ -2055,8 +2050,10 @@ static int sd_probe(struct device *dev)
2055 **/ 2050 **/
2056static int sd_remove(struct device *dev) 2051static int sd_remove(struct device *dev)
2057{ 2052{
2058 struct scsi_disk *sdkp = dev_get_drvdata(dev); 2053 struct scsi_disk *sdkp;
2059 2054
2055 async_synchronize_full();
2056 sdkp = dev_get_drvdata(dev);
2060 device_del(&sdkp->dev); 2057 device_del(&sdkp->dev);
2061 del_gendisk(sdkp->disk); 2058 del_gendisk(sdkp->disk);
2062 sd_shutdown(dev); 2059 sd_shutdown(dev);
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 184dff49279..82f14a9482d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -507,7 +507,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
507 sector_sz = scmd->device->sector_size; 507 sector_sz = scmd->device->sector_size;
508 sectors = good_bytes / sector_sz; 508 sectors = good_bytes / sector_sz;
509 509
510 phys = scmd->request->sector & 0xffffffff; 510 phys = blk_rq_pos(scmd->request) & 0xffffffff;
511 if (sector_sz == 4096) 511 if (sector_sz == 4096)
512 phys >>= 3; 512 phys >>= 3;
513 513
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index e1716f14cd4..8201387b4da 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp)
289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ 289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
290 sdp->sgdebug = 0; 290 sdp->sgdebug = 0;
291 q = sdp->device->request_queue; 291 q = sdp->device->request_queue;
292 sdp->sg_tablesize = min(q->max_hw_segments, 292 sdp->sg_tablesize = min(queue_max_hw_segments(q),
293 q->max_phys_segments); 293 queue_max_phys_segments(q));
294 } 294 }
295 if ((sfp = sg_add_sfp(sdp, dev))) 295 if ((sfp = sg_add_sfp(sdp, dev)))
296 filp->private_data = sfp; 296 filp->private_data = sfp;
@@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
909 if (val < 0) 909 if (val < 0)
910 return -EINVAL; 910 return -EINVAL;
911 val = min_t(int, val, 911 val = min_t(int, val,
912 sdp->device->request_queue->max_sectors * 512); 912 queue_max_sectors(sdp->device->request_queue) * 512);
913 if (val != sfp->reserve.bufflen) { 913 if (val != sfp->reserve.bufflen) {
914 if (sg_res_in_use(sfp) || sfp->mmap_called) 914 if (sg_res_in_use(sfp) || sfp->mmap_called)
915 return -EBUSY; 915 return -EBUSY;
@@ -919,7 +919,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
919 return 0; 919 return 0;
920 case SG_GET_RESERVED_SIZE: 920 case SG_GET_RESERVED_SIZE:
921 val = min_t(int, sfp->reserve.bufflen, 921 val = min_t(int, sfp->reserve.bufflen,
922 sdp->device->request_queue->max_sectors * 512); 922 queue_max_sectors(sdp->device->request_queue) * 512);
923 return put_user(val, ip); 923 return put_user(val, ip);
924 case SG_SET_COMMAND_Q: 924 case SG_SET_COMMAND_Q:
925 result = get_user(val, ip); 925 result = get_user(val, ip);
@@ -1059,12 +1059,13 @@ sg_ioctl(struct inode *inode, struct file *filp,
1059 return -ENODEV; 1059 return -ENODEV;
1060 return scsi_ioctl(sdp->device, cmd_in, p); 1060 return scsi_ioctl(sdp->device, cmd_in, p);
1061 case BLKSECTGET: 1061 case BLKSECTGET:
1062 return put_user(sdp->device->request_queue->max_sectors * 512, 1062 return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
1063 ip); 1063 ip);
1064 case BLKTRACESETUP: 1064 case BLKTRACESETUP:
1065 return blk_trace_setup(sdp->device->request_queue, 1065 return blk_trace_setup(sdp->device->request_queue,
1066 sdp->disk->disk_name, 1066 sdp->disk->disk_name,
1067 MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1067 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
1068 NULL,
1068 (char *)arg); 1069 (char *)arg);
1069 case BLKTRACESTART: 1070 case BLKTRACESTART:
1070 return blk_trace_startstop(sdp->device->request_queue, 1); 1071 return blk_trace_startstop(sdp->device->request_queue, 1);
@@ -1260,7 +1261,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1260 1261
1261 sense = rq->sense; 1262 sense = rq->sense;
1262 result = rq->errors; 1263 result = rq->errors;
1263 resid = rq->data_len; 1264 resid = rq->resid_len;
1264 1265
1265 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1266 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1266 sdp->disk->disk_name, srp->header.pack_id, result)); 1267 sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1377,7 +1378,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1377 sdp->device = scsidp; 1378 sdp->device = scsidp;
1378 INIT_LIST_HEAD(&sdp->sfds); 1379 INIT_LIST_HEAD(&sdp->sfds);
1379 init_waitqueue_head(&sdp->o_excl_wait); 1380 init_waitqueue_head(&sdp->o_excl_wait);
1380 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); 1381 sdp->sg_tablesize = min(queue_max_hw_segments(q),
1382 queue_max_phys_segments(q));
1381 sdp->index = k; 1383 sdp->index = k;
1382 kref_init(&sdp->d_ref); 1384 kref_init(&sdp->d_ref);
1383 1385
@@ -2055,7 +2057,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2055 sg_big_buff = def_reserved_size; 2057 sg_big_buff = def_reserved_size;
2056 2058
2057 bufflen = min_t(int, sg_big_buff, 2059 bufflen = min_t(int, sg_big_buff,
2058 sdp->device->request_queue->max_sectors * 512); 2060 queue_max_sectors(sdp->device->request_queue) * 512);
2059 sg_build_reserve(sfp, bufflen); 2061 sg_build_reserve(sfp, bufflen);
2060 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2062 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2061 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2063 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0e1a0f2d2ad..cd350dfc121 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
292 if (cd->device->sector_size == 2048) 292 if (cd->device->sector_size == 2048)
293 error_sector <<= 2; 293 error_sector <<= 2;
294 error_sector &= ~(block_sectors - 1); 294 error_sector &= ~(block_sectors - 1);
295 good_bytes = (error_sector - SCpnt->request->sector) << 9; 295 good_bytes = (error_sector -
296 blk_rq_pos(SCpnt->request)) << 9;
296 if (good_bytes < 0 || good_bytes >= this_count) 297 if (good_bytes < 0 || good_bytes >= this_count)
297 good_bytes = 0; 298 good_bytes = 0;
298 /* 299 /*
@@ -349,8 +350,8 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
349 cd->disk->disk_name, block)); 350 cd->disk->disk_name, block));
350 351
351 if (!cd->device || !scsi_device_online(cd->device)) { 352 if (!cd->device || !scsi_device_online(cd->device)) {
352 SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 353 SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
353 rq->nr_sectors)); 354 blk_rq_sectors(rq)));
354 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); 355 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
355 goto out; 356 goto out;
356 } 357 }
@@ -413,7 +414,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
413 /* 414 /*
414 * request doesn't start on hw block boundary, add scatter pads 415 * request doesn't start on hw block boundary, add scatter pads
415 */ 416 */
416 if (((unsigned int)rq->sector % (s_size >> 9)) || 417 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
417 (scsi_bufflen(SCpnt) % s_size)) { 418 (scsi_bufflen(SCpnt) % s_size)) {
418 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n"); 419 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
419 goto out; 420 goto out;
@@ -422,14 +423,14 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
422 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); 423 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
423 424
424 425
425 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", 426 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
426 cd->cdi.name, 427 cd->cdi.name,
427 (rq_data_dir(rq) == WRITE) ? 428 (rq_data_dir(rq) == WRITE) ?
428 "writing" : "reading", 429 "writing" : "reading",
429 this_count, rq->nr_sectors)); 430 this_count, blk_rq_sectors(rq)));
430 431
431 SCpnt->cmnd[1] = 0; 432 SCpnt->cmnd[1] = 0;
432 block = (unsigned int)rq->sector / (s_size >> 9); 433 block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
433 434
434 if (this_count > 0xffff) { 435 if (this_count > 0xffff) {
435 this_count = 0xffff; 436 this_count = 0xffff;
@@ -726,7 +727,7 @@ static void get_sectorsize(struct scsi_cd *cd)
726 } 727 }
727 728
728 queue = cd->device->request_queue; 729 queue = cd->device->request_queue;
729 blk_queue_hardsect_size(queue, sector_size); 730 blk_queue_logical_block_size(queue, sector_size);
730 731
731 return; 732 return;
732} 733}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index eb24efea8f1..b33d04250bb 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -463,7 +463,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
463 struct scsi_tape *STp = SRpnt->stp; 463 struct scsi_tape *STp = SRpnt->stp;
464 464
465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; 465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
466 STp->buffer->cmdstat.residual = req->data_len; 466 STp->buffer->cmdstat.residual = req->resid_len;
467 467
468 if (SRpnt->waiting) 468 if (SRpnt->waiting)
469 complete(SRpnt->waiting); 469 complete(SRpnt->waiting);
@@ -2964,7 +2964,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2964 !(STp->use_pf & PF_TESTED)) { 2964 !(STp->use_pf & PF_TESTED)) {
2965 /* Try the other possible state of Page Format if not 2965 /* Try the other possible state of Page Format if not
2966 already tried */ 2966 already tried */
2967 STp->use_pf = !STp->use_pf | PF_TESTED; 2967 STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
2968 st_release_request(SRpnt); 2968 st_release_request(SRpnt);
2969 SRpnt = NULL; 2969 SRpnt = NULL;
2970 return st_int_ioctl(STp, cmd_in, arg); 2970 return st_int_ioctl(STp, cmd_in, arg);
@@ -3983,8 +3983,8 @@ static int st_probe(struct device *dev)
3983 return -ENODEV; 3983 return -ENODEV;
3984 } 3984 }
3985 3985
3986 i = min(SDp->request_queue->max_hw_segments, 3986 i = min(queue_max_hw_segments(SDp->request_queue),
3987 SDp->request_queue->max_phys_segments); 3987 queue_max_phys_segments(SDp->request_queue));
3988 if (st_max_sg_segs < i) 3988 if (st_max_sg_segs < i)
3989 i = st_max_sg_segs; 3989 i = st_max_sg_segs;
3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); 3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 583966ec826..45374d66d26 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -737,11 +737,14 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
737 struct sym_hcb *np = sym_get_hcb(sdev->host); 737 struct sym_hcb *np = sym_get_hcb(sdev->host);
738 struct sym_tcb *tp = &np->target[sdev->id]; 738 struct sym_tcb *tp = &np->target[sdev->id];
739 struct sym_lcb *lp; 739 struct sym_lcb *lp;
740 unsigned long flags;
741 int error;
740 742
741 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) 743 if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
742 return -ENXIO; 744 return -ENXIO;
743 745
744 tp->starget = sdev->sdev_target; 746 spin_lock_irqsave(np->s.host->host_lock, flags);
747
745 /* 748 /*
746 * Fail the device init if the device is flagged NOSCAN at BOOT in 749 * Fail the device init if the device is flagged NOSCAN at BOOT in
747 * the NVRAM. This may speed up boot and maintain coherency with 750 * the NVRAM. This may speed up boot and maintain coherency with
@@ -753,26 +756,37 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
753 756
754 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { 757 if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
755 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 758 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
756 starget_printk(KERN_INFO, tp->starget, 759 starget_printk(KERN_INFO, sdev->sdev_target,
757 "Scan at boot disabled in NVRAM\n"); 760 "Scan at boot disabled in NVRAM\n");
758 return -ENXIO; 761 error = -ENXIO;
762 goto out;
759 } 763 }
760 764
761 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { 765 if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
762 if (sdev->lun != 0) 766 if (sdev->lun != 0) {
763 return -ENXIO; 767 error = -ENXIO;
764 starget_printk(KERN_INFO, tp->starget, 768 goto out;
769 }
770 starget_printk(KERN_INFO, sdev->sdev_target,
765 "Multiple LUNs disabled in NVRAM\n"); 771 "Multiple LUNs disabled in NVRAM\n");
766 } 772 }
767 773
768 lp = sym_alloc_lcb(np, sdev->id, sdev->lun); 774 lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
769 if (!lp) 775 if (!lp) {
770 return -ENOMEM; 776 error = -ENOMEM;
777 goto out;
778 }
779 if (tp->nlcb == 1)
780 tp->starget = sdev->sdev_target;
771 781
772 spi_min_period(tp->starget) = tp->usr_period; 782 spi_min_period(tp->starget) = tp->usr_period;
773 spi_max_width(tp->starget) = tp->usr_width; 783 spi_max_width(tp->starget) = tp->usr_width;
774 784
775 return 0; 785 error = 0;
786out:
787 spin_unlock_irqrestore(np->s.host->host_lock, flags);
788
789 return error;
776} 790}
777 791
778/* 792/*
@@ -819,12 +833,34 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
819static void sym53c8xx_slave_destroy(struct scsi_device *sdev) 833static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
820{ 834{
821 struct sym_hcb *np = sym_get_hcb(sdev->host); 835 struct sym_hcb *np = sym_get_hcb(sdev->host);
822 struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); 836 struct sym_tcb *tp = &np->target[sdev->id];
837 struct sym_lcb *lp = sym_lp(tp, sdev->lun);
838 unsigned long flags;
839
840 spin_lock_irqsave(np->s.host->host_lock, flags);
841
842 if (lp->busy_itlq || lp->busy_itl) {
843 /*
844 * This really shouldn't happen, but we can't return an error
845 * so let's try to stop all on-going I/O.
846 */
847 starget_printk(KERN_WARNING, tp->starget,
848 "Removing busy LCB (%d)\n", sdev->lun);
849 sym_reset_scsi_bus(np, 1);
850 }
823 851
824 if (lp->itlq_tbl) 852 if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) {
825 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); 853 /*
826 kfree(lp->cb_tags); 854 * It was the last unit for this target.
827 sym_mfree_dma(lp, sizeof(*lp), "LCB"); 855 */
856 tp->head.sval = 0;
857 tp->head.wval = np->rv_scntl3;
858 tp->head.uval = 0;
859 tp->tgoal.check_nego = 1;
860 tp->starget = NULL;
861 }
862
863 spin_unlock_irqrestore(np->s.host->host_lock, flags);
828} 864}
829 865
830/* 866/*
@@ -890,6 +926,8 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
890 if (!((uc->target >> t) & 1)) 926 if (!((uc->target >> t) & 1))
891 continue; 927 continue;
892 tp = &np->target[t]; 928 tp = &np->target[t];
929 if (!tp->nlcb)
930 continue;
893 931
894 switch (uc->cmd) { 932 switch (uc->cmd) {
895 933
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index ffa70d1ed18..69ad4945c93 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -1896,6 +1896,15 @@ void sym_start_up(struct Scsi_Host *shost, int reason)
1896 tp->head.sval = 0; 1896 tp->head.sval = 0;
1897 tp->head.wval = np->rv_scntl3; 1897 tp->head.wval = np->rv_scntl3;
1898 tp->head.uval = 0; 1898 tp->head.uval = 0;
1899 if (tp->lun0p)
1900 tp->lun0p->to_clear = 0;
1901 if (tp->lunmp) {
1902 int ln;
1903
1904 for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++)
1905 if (tp->lunmp[ln])
1906 tp->lunmp[ln]->to_clear = 0;
1907 }
1899 } 1908 }
1900 1909
1901 /* 1910 /*
@@ -4988,7 +4997,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
4988 */ 4997 */
4989 if (ln && !tp->lunmp) { 4998 if (ln && !tp->lunmp) {
4990 tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), 4999 tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *),
4991 GFP_KERNEL); 5000 GFP_ATOMIC);
4992 if (!tp->lunmp) 5001 if (!tp->lunmp)
4993 goto fail; 5002 goto fail;
4994 } 5003 }
@@ -5008,6 +5017,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
5008 tp->lun0p = lp; 5017 tp->lun0p = lp;
5009 tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); 5018 tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
5010 } 5019 }
5020 tp->nlcb++;
5011 5021
5012 /* 5022 /*
5013 * Let the itl task point to error handling. 5023 * Let the itl task point to error handling.
@@ -5085,6 +5095,43 @@ fail:
5085} 5095}
5086 5096
5087/* 5097/*
5098 * Lun control block deallocation. Returns the number of valid remaing LCBs
5099 * for the target.
5100 */
5101int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln)
5102{
5103 struct sym_tcb *tp = &np->target[tn];
5104 struct sym_lcb *lp = sym_lp(tp, ln);
5105
5106 tp->nlcb--;
5107
5108 if (ln) {
5109 if (!tp->nlcb) {
5110 kfree(tp->lunmp);
5111 sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
5112 tp->lunmp = NULL;
5113 tp->luntbl = NULL;
5114 tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl));
5115 } else {
5116 tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa));
5117 tp->lunmp[ln] = NULL;
5118 }
5119 } else {
5120 tp->lun0p = NULL;
5121 tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa));
5122 }
5123
5124 if (lp->itlq_tbl) {
5125 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
5126 kfree(lp->cb_tags);
5127 }
5128
5129 sym_mfree_dma(lp, sizeof(*lp), "LCB");
5130
5131 return tp->nlcb;
5132}
5133
5134/*
5088 * Queue a SCSI IO to the controller. 5135 * Queue a SCSI IO to the controller.
5089 */ 5136 */
5090int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) 5137int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 9ebc8706b6b..053e63c8682 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -401,6 +401,7 @@ struct sym_tcb {
401 * An array of bus addresses is used on reselection. 401 * An array of bus addresses is used on reselection.
402 */ 402 */
403 u32 *luntbl; /* LCBs bus address table */ 403 u32 *luntbl; /* LCBs bus address table */
404 int nlcb; /* Number of valid LCBs (including LUN #0) */
404 405
405 /* 406 /*
406 * LUN table used by the C code. 407 * LUN table used by the C code.
@@ -1065,6 +1066,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int
1065struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); 1066struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
1066void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); 1067void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
1067struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln); 1068struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1069int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
1068int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); 1070int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
1069int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out); 1071int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
1070int sym_reset_scsi_target(struct sym_hcb *np, int target); 1072int sym_reset_scsi_target(struct sym_hcb *np, int target);
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 601e95141cb..54023d41fd1 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1306,7 +1306,7 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
1306 if (linked_comm && SCpnt->device->queue_depth > 2 1306 if (linked_comm && SCpnt->device->queue_depth > 2
1307 && TLDEV(SCpnt->device->type)) { 1307 && TLDEV(SCpnt->device->type)) {
1308 HD(j)->cp_stat[i] = READY; 1308 HD(j)->cp_stat[i] = READY;
1309 flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE); 1309 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
1310 return 0; 1310 return 0;
1311 } 1311 }
1312 1312
@@ -1610,11 +1610,13 @@ static int reorder(unsigned int j, unsigned long cursec,
1610 1610
1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE; 1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
1612 1612
1613 if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector; 1613 if (blk_rq_pos(SCpnt->request) < minsec)
1614 if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector; 1614 minsec = blk_rq_pos(SCpnt->request);
1615 if (blk_rq_pos(SCpnt->request) > maxsec)
1616 maxsec = blk_rq_pos(SCpnt->request);
1615 1617
1616 sl[n] = SCpnt->request->sector; 1618 sl[n] = blk_rq_pos(SCpnt->request);
1617 ioseek += SCpnt->request->nr_sectors; 1619 ioseek += blk_rq_sectors(SCpnt->request);
1618 1620
1619 if (!n) continue; 1621 if (!n) continue;
1620 1622
@@ -1642,7 +1644,7 @@ static int reorder(unsigned int j, unsigned long cursec,
1642 1644
1643 if (!input_only) for (n = 0; n < n_ready; n++) { 1645 if (!input_only) for (n = 0; n < n_ready; n++) {
1644 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1646 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1645 ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number; 1647 ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
1646 1648
1647 if (!n) continue; 1649 if (!n) continue;
1648 1650
@@ -1666,12 +1668,12 @@ static int reorder(unsigned int j, unsigned long cursec,
1666 if (link_statistics && (overlap || !(flushcount % link_statistics))) 1668 if (link_statistics && (overlap || !(flushcount % link_statistics)))
1667 for (n = 0; n < n_ready; n++) { 1669 for (n = 0; n < n_ready; n++) {
1668 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1670 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1669 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\ 1671 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\
1670 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 1672 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
1671 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, 1673 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
1672 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, 1674 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready,
1673 SCpnt->request->sector, SCpnt->request->nr_sectors, cursec, 1675 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
1674 YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1676 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
1675 YESNO(overlap), cpp->xdir); 1677 YESNO(overlap), cpp->xdir);
1676 } 1678 }
1677#endif 1679#endif
@@ -1799,7 +1801,7 @@ static irqreturn_t ihdlr(unsigned int j)
1799 1801
1800 if (linked_comm && SCpnt->device->queue_depth > 2 1802 if (linked_comm && SCpnt->device->queue_depth > 2
1801 && TLDEV(SCpnt->device->type)) 1803 && TLDEV(SCpnt->device->type))
1802 flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE); 1804 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
1803 1805
1804 tstatus = status_byte(spp->target_status); 1806 tstatus = status_byte(spp->target_status);
1805 1807
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index b4b39811b44..fb867a9f55e 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -137,6 +137,7 @@ struct uart_8250_port {
137 unsigned char mcr; 137 unsigned char mcr;
138 unsigned char mcr_mask; /* mask of user bits */ 138 unsigned char mcr_mask; /* mask of user bits */
139 unsigned char mcr_force; /* mask of forced bits */ 139 unsigned char mcr_force; /* mask of forced bits */
140 unsigned char cur_iotype; /* Running I/O type */
140 141
141 /* 142 /*
142 * Some bits in registers are cleared on a read, so they must 143 * Some bits in registers are cleared on a read, so they must
@@ -286,6 +287,13 @@ static const struct serial8250_config uart_config[] = {
286 .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, 287 .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
287 .flags = UART_CAP_FIFO, 288 .flags = UART_CAP_FIFO,
288 }, 289 },
290 [PORT_AR7] = {
291 .name = "AR7",
292 .fifo_size = 16,
293 .tx_loadsz = 16,
294 .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
295 .flags = UART_CAP_FIFO | UART_CAP_AFE,
296 },
289}; 297};
290 298
291#if defined (CONFIG_SERIAL_8250_AU1X00) 299#if defined (CONFIG_SERIAL_8250_AU1X00)
@@ -471,6 +479,7 @@ static void io_serial_out(struct uart_port *p, int offset, int value)
471 479
472static void set_io_from_upio(struct uart_port *p) 480static void set_io_from_upio(struct uart_port *p)
473{ 481{
482 struct uart_8250_port *up = (struct uart_8250_port *)p;
474 switch (p->iotype) { 483 switch (p->iotype) {
475 case UPIO_HUB6: 484 case UPIO_HUB6:
476 p->serial_in = hub6_serial_in; 485 p->serial_in = hub6_serial_in;
@@ -509,6 +518,8 @@ static void set_io_from_upio(struct uart_port *p)
509 p->serial_out = io_serial_out; 518 p->serial_out = io_serial_out;
510 break; 519 break;
511 } 520 }
521 /* Remember loaded iotype */
522 up->cur_iotype = p->iotype;
512} 523}
513 524
514static void 525static void
@@ -1937,6 +1948,9 @@ static int serial8250_startup(struct uart_port *port)
1937 up->capabilities = uart_config[up->port.type].flags; 1948 up->capabilities = uart_config[up->port.type].flags;
1938 up->mcr = 0; 1949 up->mcr = 0;
1939 1950
1951 if (up->port.iotype != up->cur_iotype)
1952 set_io_from_upio(port);
1953
1940 if (up->port.type == PORT_16C950) { 1954 if (up->port.type == PORT_16C950) {
1941 /* Wake up and initialize UART */ 1955 /* Wake up and initialize UART */
1942 up->acr = 0; 1956 up->acr = 0;
@@ -2563,6 +2577,9 @@ static void serial8250_config_port(struct uart_port *port, int flags)
2563 if (ret < 0) 2577 if (ret < 0)
2564 probeflags &= ~PROBE_RSA; 2578 probeflags &= ~PROBE_RSA;
2565 2579
2580 if (up->port.iotype != up->cur_iotype)
2581 set_io_from_upio(port);
2582
2566 if (flags & UART_CONFIG_TYPE) 2583 if (flags & UART_CONFIG_TYPE)
2567 autoconfig(up, probeflags); 2584 autoconfig(up, probeflags);
2568 if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ) 2585 if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
@@ -2671,6 +2688,11 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
2671{ 2688{
2672 int i; 2689 int i;
2673 2690
2691 for (i = 0; i < nr_uarts; i++) {
2692 struct uart_8250_port *up = &serial8250_ports[i];
2693 up->cur_iotype = 0xFF;
2694 }
2695
2674 serial8250_isa_init_ports(); 2696 serial8250_isa_init_ports();
2675 2697
2676 for (i = 0; i < nr_uarts; i++) { 2698 for (i = 0; i < nr_uarts; i++) {
diff --git a/drivers/serial/8250_gsc.c b/drivers/serial/8250_gsc.c
index 418b4fe9a0a..33149d982e8 100644
--- a/drivers/serial/8250_gsc.c
+++ b/drivers/serial/8250_gsc.c
@@ -39,9 +39,9 @@ static int __init serial_init_chip(struct parisc_device *dev)
39 */ 39 */
40 if (parisc_parent(dev)->id.hw_type != HPHW_IOA) 40 if (parisc_parent(dev)->id.hw_type != HPHW_IOA)
41 printk(KERN_INFO 41 printk(KERN_INFO
42 "Serial: device 0x%lx not configured.\n" 42 "Serial: device 0x%llx not configured.\n"
43 "Enable support for Wax, Lasi, Asp or Dino.\n", 43 "Enable support for Wax, Lasi, Asp or Dino.\n",
44 dev->hpa.start); 44 (unsigned long long)dev->hpa.start);
45 return -ENODEV; 45 return -ENODEV;
46 } 46 }
47 47
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 938bc1b6c3f..e371a9c1534 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -2776,6 +2776,9 @@ static struct pci_device_id serial_pci_tbl[] = {
2776 { PCI_VENDOR_ID_OXSEMI, 0x950a, 2776 { PCI_VENDOR_ID_OXSEMI, 0x950a,
2777 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2777 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2778 pbn_b0_2_1130000 }, 2778 pbn_b0_2_1130000 },
2779 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_C950,
2780 PCI_VENDOR_ID_OXSEMI, PCI_SUBDEVICE_ID_OXSEMI_C950, 0, 0,
2781 pbn_b0_1_921600 },
2779 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, 2782 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
2780 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2783 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2781 pbn_b0_4_115200 }, 2784 pbn_b0_4_115200 },
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 343e3a35b6a..1132c5cae7a 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -833,6 +833,7 @@ config SERIAL_IMX
833 bool "IMX serial port support" 833 bool "IMX serial port support"
834 depends on ARM && (ARCH_IMX || ARCH_MXC) 834 depends on ARM && (ARCH_IMX || ARCH_MXC)
835 select SERIAL_CORE 835 select SERIAL_CORE
836 select RATIONAL
836 help 837 help
837 If you have a machine based on a Motorola IMX CPU you 838 If you have a machine based on a Motorola IMX CPU you
838 can enable its onboard serial port by enabling this option. 839 can enable its onboard serial port by enabling this option.
@@ -860,7 +861,7 @@ config SERIAL_UARTLITE
860 Say Y here if you want to use the Xilinx uartlite serial controller. 861 Say Y here if you want to use the Xilinx uartlite serial controller.
861 862
862 To compile this driver as a module, choose M here: the 863 To compile this driver as a module, choose M here: the
863 module will be called uartlite.ko. 864 module will be called uartlite.
864 865
865config SERIAL_UARTLITE_CONSOLE 866config SERIAL_UARTLITE_CONSOLE
866 bool "Support for console on Xilinx uartlite serial port" 867 bool "Support for console on Xilinx uartlite serial port"
@@ -1433,4 +1434,11 @@ config SPORT_BAUD_RATE
1433 default 19200 if (SERIAL_SPORT_BAUD_RATE_19200) 1434 default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
1434 default 9600 if (SERIAL_SPORT_BAUD_RATE_9600) 1435 default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
1435 1436
1437config SERIAL_TIMBERDALE
1438 tristate "Support for timberdale UART"
1439 depends on MFD_TIMBERDALE
1440 select SERIAL_CORE
1441 ---help---
1442 Add support for UART controller on timberdale.
1443
1436endmenu 1444endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index d438eb2a73d..45a8658f54d 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -77,3 +77,4 @@ obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
77obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o 77obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
78obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o 78obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
79obj-$(CONFIG_SERIAL_QE) += ucc_uart.o 79obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
80obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index e3a5ad5ef1d..58a4879c7e4 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -665,7 +665,7 @@ static struct uart_driver amba_reg = {
665 .cons = AMBA_CONSOLE, 665 .cons = AMBA_CONSOLE,
666}; 666};
667 667
668static int pl010_probe(struct amba_device *dev, void *id) 668static int pl010_probe(struct amba_device *dev, struct amba_id *id)
669{ 669{
670 struct uart_amba_port *uap; 670 struct uart_amba_port *uap;
671 void __iomem *base; 671 void __iomem *base;
@@ -686,7 +686,7 @@ static int pl010_probe(struct amba_device *dev, void *id)
686 goto out; 686 goto out;
687 } 687 }
688 688
689 base = ioremap(dev->res.start, PAGE_SIZE); 689 base = ioremap(dev->res.start, resource_size(&dev->res));
690 if (!base) { 690 if (!base) {
691 ret = -ENOMEM; 691 ret = -ENOMEM;
692 goto free; 692 goto free;
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 8b2b9700f3e..bf82e28770a 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -70,6 +70,23 @@ struct uart_amba_port {
70 struct clk *clk; 70 struct clk *clk;
71 unsigned int im; /* interrupt mask */ 71 unsigned int im; /* interrupt mask */
72 unsigned int old_status; 72 unsigned int old_status;
73 unsigned int ifls; /* vendor-specific */
74};
75
76/* There is by now at least one vendor with differing details, so handle it */
77struct vendor_data {
78 unsigned int ifls;
79 unsigned int fifosize;
80};
81
82static struct vendor_data vendor_arm = {
83 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
84 .fifosize = 16,
85};
86
87static struct vendor_data vendor_st = {
88 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
89 .fifosize = 64,
73}; 90};
74 91
75static void pl011_stop_tx(struct uart_port *port) 92static void pl011_stop_tx(struct uart_port *port)
@@ -360,8 +377,7 @@ static int pl011_startup(struct uart_port *port)
360 if (retval) 377 if (retval)
361 goto clk_dis; 378 goto clk_dis;
362 379
363 writew(UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, 380 writew(uap->ifls, uap->port.membase + UART011_IFLS);
364 uap->port.membase + UART011_IFLS);
365 381
366 /* 382 /*
367 * Provoke TX FIFO interrupt into asserting. 383 * Provoke TX FIFO interrupt into asserting.
@@ -729,9 +745,10 @@ static struct uart_driver amba_reg = {
729 .cons = AMBA_CONSOLE, 745 .cons = AMBA_CONSOLE,
730}; 746};
731 747
732static int pl011_probe(struct amba_device *dev, void *id) 748static int pl011_probe(struct amba_device *dev, struct amba_id *id)
733{ 749{
734 struct uart_amba_port *uap; 750 struct uart_amba_port *uap;
751 struct vendor_data *vendor = id->data;
735 void __iomem *base; 752 void __iomem *base;
736 int i, ret; 753 int i, ret;
737 754
@@ -750,7 +767,7 @@ static int pl011_probe(struct amba_device *dev, void *id)
750 goto out; 767 goto out;
751 } 768 }
752 769
753 base = ioremap(dev->res.start, PAGE_SIZE); 770 base = ioremap(dev->res.start, resource_size(&dev->res));
754 if (!base) { 771 if (!base) {
755 ret = -ENOMEM; 772 ret = -ENOMEM;
756 goto free; 773 goto free;
@@ -762,12 +779,13 @@ static int pl011_probe(struct amba_device *dev, void *id)
762 goto unmap; 779 goto unmap;
763 } 780 }
764 781
782 uap->ifls = vendor->ifls;
765 uap->port.dev = &dev->dev; 783 uap->port.dev = &dev->dev;
766 uap->port.mapbase = dev->res.start; 784 uap->port.mapbase = dev->res.start;
767 uap->port.membase = base; 785 uap->port.membase = base;
768 uap->port.iotype = UPIO_MEM; 786 uap->port.iotype = UPIO_MEM;
769 uap->port.irq = dev->irq[0]; 787 uap->port.irq = dev->irq[0];
770 uap->port.fifosize = 16; 788 uap->port.fifosize = vendor->fifosize;
771 uap->port.ops = &amba_pl011_pops; 789 uap->port.ops = &amba_pl011_pops;
772 uap->port.flags = UPF_BOOT_AUTOCONF; 790 uap->port.flags = UPF_BOOT_AUTOCONF;
773 uap->port.line = i; 791 uap->port.line = i;
@@ -812,6 +830,12 @@ static struct amba_id pl011_ids[] __initdata = {
812 { 830 {
813 .id = 0x00041011, 831 .id = 0x00041011,
814 .mask = 0x000fffff, 832 .mask = 0x000fffff,
833 .data = &vendor_arm,
834 },
835 {
836 .id = 0x00380802,
837 .mask = 0x00ffffff,
838 .data = &vendor_st,
815 }, 839 },
816 { 0, 0 }, 840 { 0, 0 },
817}; 841};
@@ -845,7 +869,11 @@ static void __exit pl011_exit(void)
845 uart_unregister_driver(&amba_reg); 869 uart_unregister_driver(&amba_reg);
846} 870}
847 871
848module_init(pl011_init); 872/*
873 * While this can be a module, if builtin it's most likely the console
874 * So let's leave module_exit but move module_init to an earlier place
875 */
876arch_initcall(pl011_init);
849module_exit(pl011_exit); 877module_exit(pl011_exit);
850 878
851MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); 879MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index d86123e0339..e2f6b1bfac9 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -330,6 +330,11 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
330 /* Clear TFI bit */ 330 /* Clear TFI bit */
331 UART_PUT_LSR(uart, TFI); 331 UART_PUT_LSR(uart, TFI);
332#endif 332#endif
333 /* Anomaly notes:
334 * 05000215 - we always clear ETBEI within last UART TX
335 * interrupt to end a string. It is always set
336 * when start a new tx.
337 */
333 UART_CLEAR_IER(uart, ETBEI); 338 UART_CLEAR_IER(uart, ETBEI);
334 return; 339 return;
335 } 340 }
@@ -415,6 +420,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
415 set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail)); 420 set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
416 set_dma_x_count(uart->tx_dma_channel, uart->tx_count); 421 set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
417 set_dma_x_modify(uart->tx_dma_channel, 1); 422 set_dma_x_modify(uart->tx_dma_channel, 1);
423 SSYNC();
418 enable_dma(uart->tx_dma_channel); 424 enable_dma(uart->tx_dma_channel);
419 425
420 UART_SET_IER(uart, ETBEI); 426 UART_SET_IER(uart, ETBEI);
@@ -473,27 +479,41 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
473void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) 479void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
474{ 480{
475 int x_pos, pos; 481 int x_pos, pos;
476 unsigned long flags;
477
478 spin_lock_irqsave(&uart->port.lock, flags);
479 482
483 dma_disable_irq(uart->rx_dma_channel);
484 spin_lock_bh(&uart->port.lock);
485
486 /* 2D DMA RX buffer ring is used. Because curr_y_count and
487 * curr_x_count can't be read as an atomic operation,
488 * curr_y_count should be read before curr_x_count. When
489 * curr_x_count is read, curr_y_count may already indicate
490 * next buffer line. But, the position calculated here is
491 * still indicate the old line. The wrong position data may
492 * be smaller than current buffer tail, which cause garbages
493 * are received if it is not prohibit.
494 */
480 uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); 495 uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
481 x_pos = get_dma_curr_xcount(uart->rx_dma_channel); 496 x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
482 uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows; 497 uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
483 if (uart->rx_dma_nrows == DMA_RX_YCOUNT) 498 if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
484 uart->rx_dma_nrows = 0; 499 uart->rx_dma_nrows = 0;
485 x_pos = DMA_RX_XCOUNT - x_pos; 500 x_pos = DMA_RX_XCOUNT - x_pos;
486 if (x_pos == DMA_RX_XCOUNT) 501 if (x_pos == DMA_RX_XCOUNT)
487 x_pos = 0; 502 x_pos = 0;
488 503
489 pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; 504 pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
490 if (pos != uart->rx_dma_buf.tail) { 505 /* Ignore receiving data if new position is in the same line of
506 * current buffer tail and small.
507 */
508 if (pos > uart->rx_dma_buf.tail ||
509 uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
491 uart->rx_dma_buf.head = pos; 510 uart->rx_dma_buf.head = pos;
492 bfin_serial_dma_rx_chars(uart); 511 bfin_serial_dma_rx_chars(uart);
493 uart->rx_dma_buf.tail = uart->rx_dma_buf.head; 512 uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
494 } 513 }
495 514
496 spin_unlock_irqrestore(&uart->port.lock, flags); 515 spin_unlock_bh(&uart->port.lock);
516 dma_enable_irq(uart->rx_dma_channel);
497 517
498 mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); 518 mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
499} 519}
@@ -514,6 +534,11 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
514 if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { 534 if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
515 disable_dma(uart->tx_dma_channel); 535 disable_dma(uart->tx_dma_channel);
516 clear_dma_irqstat(uart->tx_dma_channel); 536 clear_dma_irqstat(uart->tx_dma_channel);
537 /* Anomaly notes:
538 * 05000215 - we always clear ETBEI within last UART TX
539 * interrupt to end a string. It is always set
540 * when start a new tx.
541 */
517 UART_CLEAR_IER(uart, ETBEI); 542 UART_CLEAR_IER(uart, ETBEI);
518 xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); 543 xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
519 uart->port.icount.tx += uart->tx_count; 544 uart->port.icount.tx += uart->tx_count;
@@ -532,11 +557,26 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
532{ 557{
533 struct bfin_serial_port *uart = dev_id; 558 struct bfin_serial_port *uart = dev_id;
534 unsigned short irqstat; 559 unsigned short irqstat;
560 int x_pos, pos;
535 561
536 spin_lock(&uart->port.lock); 562 spin_lock(&uart->port.lock);
537 irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); 563 irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
538 clear_dma_irqstat(uart->rx_dma_channel); 564 clear_dma_irqstat(uart->rx_dma_channel);
539 bfin_serial_dma_rx_chars(uart); 565
566 uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
567 x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
568 uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
569 if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
570 uart->rx_dma_nrows = 0;
571
572 pos = uart->rx_dma_nrows * DMA_RX_XCOUNT;
573 if (pos > uart->rx_dma_buf.tail ||
574 uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
575 uart->rx_dma_buf.head = pos;
576 bfin_serial_dma_rx_chars(uart);
577 uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
578 }
579
540 spin_unlock(&uart->port.lock); 580 spin_unlock(&uart->port.lock);
541 581
542 return IRQ_HANDLED; 582 return IRQ_HANDLED;
@@ -789,8 +829,16 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
789 __func__); 829 __func__);
790 } 830 }
791 831
792 if (termios->c_cflag & CSTOPB) 832 /* Anomaly notes:
793 lcr |= STB; 833 * 05000231 - STOP bit is always set to 1 whatever the user is set.
834 */
835 if (termios->c_cflag & CSTOPB) {
836 if (ANOMALY_05000231)
837 printk(KERN_WARNING "STOP bits other than 1 is not "
838 "supported in case of anomaly 05000231.\n");
839 else
840 lcr |= STB;
841 }
794 if (termios->c_cflag & PARENB) 842 if (termios->c_cflag & PARENB)
795 lcr |= PEN; 843 lcr |= PEN;
796 if (!(termios->c_cflag & PARODD)) 844 if (!(termios->c_cflag & PARODD))
@@ -940,6 +988,10 @@ static void bfin_serial_reset_irda(struct uart_port *port)
940} 988}
941 989
942#ifdef CONFIG_CONSOLE_POLL 990#ifdef CONFIG_CONSOLE_POLL
991/* Anomaly notes:
992 * 05000099 - Because we only use THRE in poll_put and DR in poll_get,
993 * losing other bits of UART_LSR is not a problem here.
994 */
943static void bfin_serial_poll_put_char(struct uart_port *port, unsigned char chr) 995static void bfin_serial_poll_put_char(struct uart_port *port, unsigned char chr)
944{ 996{
945 struct bfin_serial_port *uart = (struct bfin_serial_port *)port; 997 struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
@@ -1245,12 +1297,17 @@ static __init void early_serial_write(struct console *con, const char *s,
1245 } 1297 }
1246} 1298}
1247 1299
1300/*
1301 * This should have a .setup or .early_setup in it, but then things get called
1302 * without the command line options, and the baud rate gets messed up - so
1303 * don't let the common infrastructure play with things. (see calls to setup
1304 * & earlysetup in ./kernel/printk.c:register_console()
1305 */
1248static struct __initdata console bfin_early_serial_console = { 1306static struct __initdata console bfin_early_serial_console = {
1249 .name = "early_BFuart", 1307 .name = "early_BFuart",
1250 .write = early_serial_write, 1308 .write = early_serial_write,
1251 .device = uart_console_device, 1309 .device = uart_console_device,
1252 .flags = CON_PRINTBUFFER, 1310 .flags = CON_PRINTBUFFER,
1253 .setup = bfin_serial_console_setup,
1254 .index = -1, 1311 .index = -1,
1255 .data = &bfin_serial_reg, 1312 .data = &bfin_serial_reg,
1256}; 1313};
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index 529c0ff7952..34b4ae0fe76 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -101,15 +101,16 @@ static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
101{ 101{
102 pr_debug("%s value:%x\n", __func__, value); 102 pr_debug("%s value:%x\n", __func__, value);
103 /* Place a Start and Stop bit */ 103 /* Place a Start and Stop bit */
104 __asm__ volatile ( 104 __asm__ __volatile__ (
105 "R2 = b#01111111100;\n\t" 105 "R2 = b#01111111100;"
106 "R3 = b#10000000001;\n\t" 106 "R3 = b#10000000001;"
107 "%0 <<= 2;\n\t" 107 "%0 <<= 2;"
108 "%0 = %0 & R2;\n\t" 108 "%0 = %0 & R2;"
109 "%0 = %0 | R3;\n\t" 109 "%0 = %0 | R3;"
110 :"=r"(value) 110 : "=d"(value)
111 :"0"(value) 111 : "d"(value)
112 :"R2", "R3"); 112 : "ASTAT", "R2", "R3"
113 );
113 pr_debug("%s value:%x\n", __func__, value); 114 pr_debug("%s value:%x\n", __func__, value);
114 115
115 SPORT_PUT_TX(up, value); 116 SPORT_PUT_TX(up, value);
@@ -118,27 +119,30 @@ static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
118static inline unsigned int rx_one_byte(struct sport_uart_port *up) 119static inline unsigned int rx_one_byte(struct sport_uart_port *up)
119{ 120{
120 unsigned int value, extract; 121 unsigned int value, extract;
122 u32 tmp_mask1, tmp_mask2, tmp_shift, tmp;
121 123
122 value = SPORT_GET_RX32(up); 124 value = SPORT_GET_RX32(up);
123 pr_debug("%s value:%x\n", __func__, value); 125 pr_debug("%s value:%x\n", __func__, value);
124 126
125 /* Extract 8 bits data */ 127 /* Extract 8 bits data */
126 __asm__ volatile ( 128 __asm__ __volatile__ (
127 "R5 = 0;\n\t" 129 "%[extr] = 0;"
128 "P0 = 8;\n\t" 130 "%[mask1] = 0x1801(Z);"
129 "R1 = 0x1801(Z);\n\t" 131 "%[mask2] = 0x0300(Z);"
130 "R3 = 0x0300(Z);\n\t" 132 "%[shift] = 0;"
131 "R4 = 0;\n\t" 133 "LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];"
132 "LSETUP(loop_s, loop_e) LC0 = P0;\nloop_s:\t" 134 ".Lloop_s:"
133 "R2 = extract(%1, R1.L)(Z);\n\t" 135 "%[tmp] = extract(%[val], %[mask1].L)(Z);"
134 "R2 <<= R4;\n\t" 136 "%[tmp] <<= %[shift];"
135 "R5 = R5 | R2;\n\t" 137 "%[extr] = %[extr] | %[tmp];"
136 "R1 = R1 - R3;\nloop_e:\t" 138 "%[mask1] = %[mask1] - %[mask2];"
137 "R4 += 1;\n\t" 139 ".Lloop_e:"
138 "%0 = R5;\n\t" 140 "%[shift] += 1;"
139 :"=r"(extract) 141 : [val]"=d"(value), [extr]"=d"(extract), [shift]"=d"(tmp_shift), [tmp]"=d"(tmp),
140 :"r"(value) 142 [mask1]"=d"(tmp_mask1), [mask2]"=d"(tmp_mask2)
141 :"P0", "R1", "R2","R3","R4", "R5"); 143 : "d"(value), [lc]"a"(8)
144 : "ASTAT", "LB0", "LC0", "LT0"
145 );
142 146
143 pr_debug(" extract:%x\n", extract); 147 pr_debug(" extract:%x\n", extract);
144 return extract; 148 return extract;
@@ -149,7 +153,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate)
149 int tclkdiv, tfsdiv, rclkdiv; 153 int tclkdiv, tfsdiv, rclkdiv;
150 154
151 /* Set TCR1 and TCR2 */ 155 /* Set TCR1 and TCR2 */
152 SPORT_PUT_TCR1(up, (LTFS | ITFS | TFSR | TLSBIT | ITCLK)); 156 SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
153 SPORT_PUT_TCR2(up, 10); 157 SPORT_PUT_TCR2(up, 10);
154 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); 158 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
155 159
@@ -419,7 +423,7 @@ static void sport_shutdown(struct uart_port *port)
419} 423}
420 424
421static void sport_set_termios(struct uart_port *port, 425static void sport_set_termios(struct uart_port *port,
422 struct termios *termios, struct termios *old) 426 struct ktermios *termios, struct ktermios *old)
423{ 427{
424 pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag); 428 pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
425 uart_update_timeout(port, CS8 ,port->uartclk); 429 uart_update_timeout(port, CS8 ,port->uartclk);
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 6579e2be1dd..9f2891c2c4a 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -137,7 +137,12 @@ static LIST_HEAD(icom_adapter_head);
137static spinlock_t icom_lock; 137static spinlock_t icom_lock;
138 138
139#ifdef ICOM_TRACE 139#ifdef ICOM_TRACE
140static inline void trace(struct icom_port *, char *, unsigned long) {}; 140static inline void trace(struct icom_port *icom_port, char *trace_pt,
141 unsigned long trace_data)
142{
143 dev_info(&icom_port->adapter->pci_dev->dev, ":%d:%s - %lx\n",
144 icom_port->port, trace_pt, trace_data);
145}
141#else 146#else
142static inline void trace(struct icom_port *icom_port, char *trace_pt, unsigned long trace_data) {}; 147static inline void trace(struct icom_port *icom_port, char *trace_pt, unsigned long trace_data) {};
143#endif 148#endif
@@ -408,7 +413,7 @@ static void load_code(struct icom_port *icom_port)
408 release_firmware(fw); 413 release_firmware(fw);
409 414
410 /* Set Hardware level */ 415 /* Set Hardware level */
411 if ((icom_port->adapter->version | ADAPTER_V2) == ADAPTER_V2) 416 if (icom_port->adapter->version == ADAPTER_V2)
412 writeb(V2_HARDWARE, &(icom_port->dram->misc_flags)); 417 writeb(V2_HARDWARE, &(icom_port->dram->misc_flags));
413 418
414 /* Start the processor in Adapter */ 419 /* Start the processor in Adapter */
@@ -861,7 +866,7 @@ static irqreturn_t icom_interrupt(int irq, void *dev_id)
861 /* find icom_port for this interrupt */ 866 /* find icom_port for this interrupt */
862 icom_adapter = (struct icom_adapter *) dev_id; 867 icom_adapter = (struct icom_adapter *) dev_id;
863 868
864 if ((icom_adapter->version | ADAPTER_V2) == ADAPTER_V2) { 869 if (icom_adapter->version == ADAPTER_V2) {
865 int_reg = icom_adapter->base_addr + 0x8024; 870 int_reg = icom_adapter->base_addr + 0x8024;
866 871
867 adapter_interrupts = readl(int_reg); 872 adapter_interrupts = readl(int_reg);
@@ -1472,8 +1477,8 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
1472 1477
1473 free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter); 1478 free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
1474 iounmap(icom_adapter->base_addr); 1479 iounmap(icom_adapter->base_addr);
1475 icom_free_adapter(icom_adapter);
1476 pci_release_regions(icom_adapter->pci_dev); 1480 pci_release_regions(icom_adapter->pci_dev);
1481 icom_free_adapter(icom_adapter);
1477} 1482}
1478 1483
1479static void icom_kref_release(struct kref *kref) 1484static void icom_kref_release(struct kref *kref)
@@ -1647,15 +1652,6 @@ static void __exit icom_exit(void)
1647module_init(icom_init); 1652module_init(icom_init);
1648module_exit(icom_exit); 1653module_exit(icom_exit);
1649 1654
1650#ifdef ICOM_TRACE
1651static inline void trace(struct icom_port *icom_port, char *trace_pt,
1652 unsigned long trace_data)
1653{
1654 dev_info(&icom_port->adapter->pci_dev->dev, ":%d:%s - %lx\n",
1655 icom_port->port, trace_pt, trace_data);
1656}
1657#endif
1658
1659MODULE_AUTHOR("Michael Anderson <mjanders@us.ibm.com>"); 1655MODULE_AUTHOR("Michael Anderson <mjanders@us.ibm.com>");
1660MODULE_DESCRIPTION("IBM iSeries Serial IOA driver"); 1656MODULE_DESCRIPTION("IBM iSeries Serial IOA driver");
1661MODULE_SUPPORTED_DEVICE 1657MODULE_SUPPORTED_DEVICE
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 9f460b175c5..285b414f305 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -8,6 +8,9 @@
8 * Author: Sascha Hauer <sascha@saschahauer.de> 8 * Author: Sascha Hauer <sascha@saschahauer.de>
9 * Copyright (C) 2004 Pengutronix 9 * Copyright (C) 2004 Pengutronix
10 * 10 *
11 * Copyright (C) 2009 emlix GmbH
12 * Author: Fabian Godehardt (added IrDA support for iMX)
13 *
11 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 16 * the Free Software Foundation; either version 2 of the License, or
@@ -41,6 +44,8 @@
41#include <linux/serial_core.h> 44#include <linux/serial_core.h>
42#include <linux/serial.h> 45#include <linux/serial.h>
43#include <linux/clk.h> 46#include <linux/clk.h>
47#include <linux/delay.h>
48#include <linux/rational.h>
44 49
45#include <asm/io.h> 50#include <asm/io.h>
46#include <asm/irq.h> 51#include <asm/irq.h>
@@ -66,7 +71,7 @@
66#define ONEMS 0xb0 /* One Millisecond register */ 71#define ONEMS 0xb0 /* One Millisecond register */
67#define UTS 0xb4 /* UART Test Register */ 72#define UTS 0xb4 /* UART Test Register */
68#endif 73#endif
69#if defined(CONFIG_ARCH_IMX) || defined(CONFIG_ARCH_MX1) 74#ifdef CONFIG_ARCH_MX1
70#define BIPR1 0xb0 /* Incremental Preset Register 1 */ 75#define BIPR1 0xb0 /* Incremental Preset Register 1 */
71#define BIPR2 0xb4 /* Incremental Preset Register 2 */ 76#define BIPR2 0xb4 /* Incremental Preset Register 2 */
72#define BIPR3 0xb8 /* Incremental Preset Register 3 */ 77#define BIPR3 0xb8 /* Incremental Preset Register 3 */
@@ -96,7 +101,7 @@
96#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */ 101#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
97#define UCR1_SNDBRK (1<<4) /* Send break */ 102#define UCR1_SNDBRK (1<<4) /* Send break */
98#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */ 103#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
99#if defined(CONFIG_ARCH_IMX) || defined(CONFIG_ARCH_MX1) 104#ifdef CONFIG_ARCH_MX1
100#define UCR1_UARTCLKEN (1<<2) /* UART clock enabled */ 105#define UCR1_UARTCLKEN (1<<2) /* UART clock enabled */
101#endif 106#endif
102#if defined CONFIG_ARCH_MX3 || defined CONFIG_ARCH_MX2 107#if defined CONFIG_ARCH_MX3 || defined CONFIG_ARCH_MX2
@@ -127,7 +132,7 @@
127#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */ 132#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
128#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */ 133#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
129#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */ 134#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
130#ifdef CONFIG_ARCH_IMX 135#ifdef CONFIG_ARCH_MX1
131#define UCR3_REF25 (1<<3) /* Ref freq 25 MHz, only on mx1 */ 136#define UCR3_REF25 (1<<3) /* Ref freq 25 MHz, only on mx1 */
132#define UCR3_REF30 (1<<2) /* Ref Freq 30 MHz, only on mx1 */ 137#define UCR3_REF30 (1<<2) /* Ref Freq 30 MHz, only on mx1 */
133#endif 138#endif
@@ -148,6 +153,7 @@
148#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */ 153#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
149#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */ 154#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
150#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */ 155#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
156#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
151#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */ 157#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
152#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */ 158#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */
153#define USR1_RTSS (1<<14) /* RTS pin status */ 159#define USR1_RTSS (1<<14) /* RTS pin status */
@@ -180,13 +186,6 @@
180#define UTS_SOFTRST (1<<0) /* Software reset */ 186#define UTS_SOFTRST (1<<0) /* Software reset */
181 187
182/* We've been assigned a range on the "Low-density serial ports" major */ 188/* We've been assigned a range on the "Low-density serial ports" major */
183#ifdef CONFIG_ARCH_IMX
184#define SERIAL_IMX_MAJOR 204
185#define MINOR_START 41
186#define DEV_NAME "ttySMX"
187#define MAX_INTERNAL_IRQ IMX_IRQS
188#endif
189
190#ifdef CONFIG_ARCH_MXC 189#ifdef CONFIG_ARCH_MXC
191#define SERIAL_IMX_MAJOR 207 190#define SERIAL_IMX_MAJOR 207
192#define MINOR_START 16 191#define MINOR_START 16
@@ -211,10 +210,20 @@ struct imx_port {
211 struct timer_list timer; 210 struct timer_list timer;
212 unsigned int old_status; 211 unsigned int old_status;
213 int txirq,rxirq,rtsirq; 212 int txirq,rxirq,rtsirq;
214 int have_rtscts:1; 213 unsigned int have_rtscts:1;
214 unsigned int use_irda:1;
215 unsigned int irda_inv_rx:1;
216 unsigned int irda_inv_tx:1;
217 unsigned short trcv_delay; /* transceiver delay */
215 struct clk *clk; 218 struct clk *clk;
216}; 219};
217 220
221#ifdef CONFIG_IRDA
222#define USE_IRDA(sport) ((sport)->use_irda)
223#else
224#define USE_IRDA(sport) (0)
225#endif
226
218/* 227/*
219 * Handle any change of modem status signal since we were last called. 228 * Handle any change of modem status signal since we were last called.
220 */ 229 */
@@ -268,6 +277,48 @@ static void imx_stop_tx(struct uart_port *port)
268 struct imx_port *sport = (struct imx_port *)port; 277 struct imx_port *sport = (struct imx_port *)port;
269 unsigned long temp; 278 unsigned long temp;
270 279
280 if (USE_IRDA(sport)) {
281 /* half duplex - wait for end of transmission */
282 int n = 256;
283 while ((--n > 0) &&
284 !(readl(sport->port.membase + USR2) & USR2_TXDC)) {
285 udelay(5);
286 barrier();
287 }
288 /*
289 * irda transceiver - wait a bit more to avoid
290 * cutoff, hardware dependent
291 */
292 udelay(sport->trcv_delay);
293
294 /*
295 * half duplex - reactivate receive mode,
296 * flush receive pipe echo crap
297 */
298 if (readl(sport->port.membase + USR2) & USR2_TXDC) {
299 temp = readl(sport->port.membase + UCR1);
300 temp &= ~(UCR1_TXMPTYEN | UCR1_TRDYEN);
301 writel(temp, sport->port.membase + UCR1);
302
303 temp = readl(sport->port.membase + UCR4);
304 temp &= ~(UCR4_TCEN);
305 writel(temp, sport->port.membase + UCR4);
306
307 while (readl(sport->port.membase + URXD0) &
308 URXD_CHARRDY)
309 barrier();
310
311 temp = readl(sport->port.membase + UCR1);
312 temp |= UCR1_RRDYEN;
313 writel(temp, sport->port.membase + UCR1);
314
315 temp = readl(sport->port.membase + UCR4);
316 temp |= UCR4_DREN;
317 writel(temp, sport->port.membase + UCR4);
318 }
319 return;
320 }
321
271 temp = readl(sport->port.membase + UCR1); 322 temp = readl(sport->port.membase + UCR1);
272 writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1); 323 writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1);
273} 324}
@@ -302,13 +353,15 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
302 /* send xmit->buf[xmit->tail] 353 /* send xmit->buf[xmit->tail]
303 * out the port here */ 354 * out the port here */
304 writel(xmit->buf[xmit->tail], sport->port.membase + URTX0); 355 writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
305 xmit->tail = (xmit->tail + 1) & 356 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
306 (UART_XMIT_SIZE - 1);
307 sport->port.icount.tx++; 357 sport->port.icount.tx++;
308 if (uart_circ_empty(xmit)) 358 if (uart_circ_empty(xmit))
309 break; 359 break;
310 } 360 }
311 361
362 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
363 uart_write_wakeup(&sport->port);
364
312 if (uart_circ_empty(xmit)) 365 if (uart_circ_empty(xmit))
313 imx_stop_tx(&sport->port); 366 imx_stop_tx(&sport->port);
314} 367}
@@ -321,9 +374,30 @@ static void imx_start_tx(struct uart_port *port)
321 struct imx_port *sport = (struct imx_port *)port; 374 struct imx_port *sport = (struct imx_port *)port;
322 unsigned long temp; 375 unsigned long temp;
323 376
377 if (USE_IRDA(sport)) {
378 /* half duplex in IrDA mode; have to disable receive mode */
379 temp = readl(sport->port.membase + UCR4);
380 temp &= ~(UCR4_DREN);
381 writel(temp, sport->port.membase + UCR4);
382
383 temp = readl(sport->port.membase + UCR1);
384 temp &= ~(UCR1_RRDYEN);
385 writel(temp, sport->port.membase + UCR1);
386 }
387
324 temp = readl(sport->port.membase + UCR1); 388 temp = readl(sport->port.membase + UCR1);
325 writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1); 389 writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
326 390
391 if (USE_IRDA(sport)) {
392 temp = readl(sport->port.membase + UCR1);
393 temp |= UCR1_TRDYEN;
394 writel(temp, sport->port.membase + UCR1);
395
396 temp = readl(sport->port.membase + UCR4);
397 temp |= UCR4_TCEN;
398 writel(temp, sport->port.membase + UCR4);
399 }
400
327 if (readl(sport->port.membase + UTS) & UTS_TXEMPTY) 401 if (readl(sport->port.membase + UTS) & UTS_TXEMPTY)
328 imx_transmit_buffer(sport); 402 imx_transmit_buffer(sport);
329} 403}
@@ -395,8 +469,7 @@ static irqreturn_t imx_rxint(int irq, void *dev_id)
395 continue; 469 continue;
396 } 470 }
397 471
398 if (uart_handle_sysrq_char 472 if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
399 (&sport->port, (unsigned char)rx))
400 continue; 473 continue;
401 474
402 if (rx & (URXD_PRERR | URXD_OVRRUN | URXD_FRMERR) ) { 475 if (rx & (URXD_PRERR | URXD_OVRRUN | URXD_FRMERR) ) {
@@ -471,26 +544,26 @@ static unsigned int imx_tx_empty(struct uart_port *port)
471 */ 544 */
472static unsigned int imx_get_mctrl(struct uart_port *port) 545static unsigned int imx_get_mctrl(struct uart_port *port)
473{ 546{
474 struct imx_port *sport = (struct imx_port *)port; 547 struct imx_port *sport = (struct imx_port *)port;
475 unsigned int tmp = TIOCM_DSR | TIOCM_CAR; 548 unsigned int tmp = TIOCM_DSR | TIOCM_CAR;
476 549
477 if (readl(sport->port.membase + USR1) & USR1_RTSS) 550 if (readl(sport->port.membase + USR1) & USR1_RTSS)
478 tmp |= TIOCM_CTS; 551 tmp |= TIOCM_CTS;
479 552
480 if (readl(sport->port.membase + UCR2) & UCR2_CTS) 553 if (readl(sport->port.membase + UCR2) & UCR2_CTS)
481 tmp |= TIOCM_RTS; 554 tmp |= TIOCM_RTS;
482 555
483 return tmp; 556 return tmp;
484} 557}
485 558
486static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl) 559static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
487{ 560{
488 struct imx_port *sport = (struct imx_port *)port; 561 struct imx_port *sport = (struct imx_port *)port;
489 unsigned long temp; 562 unsigned long temp;
490 563
491 temp = readl(sport->port.membase + UCR2) & ~UCR2_CTS; 564 temp = readl(sport->port.membase + UCR2) & ~UCR2_CTS;
492 565
493 if (mctrl & TIOCM_RTS) 566 if (mctrl & TIOCM_RTS)
494 temp |= UCR2_CTS; 567 temp |= UCR2_CTS;
495 568
496 writel(temp, sport->port.membase + UCR2); 569 writel(temp, sport->port.membase + UCR2);
@@ -534,12 +607,7 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
534 if(!ufcr_rfdiv) 607 if(!ufcr_rfdiv)
535 ufcr_rfdiv = 1; 608 ufcr_rfdiv = 1;
536 609
537 if(ufcr_rfdiv >= 7) 610 val |= UFCR_RFDIV_REG(ufcr_rfdiv);
538 ufcr_rfdiv = 6;
539 else
540 ufcr_rfdiv = 6 - ufcr_rfdiv;
541
542 val |= UFCR_RFDIV & (ufcr_rfdiv << 7);
543 611
544 writel(val, sport->port.membase + UFCR); 612 writel(val, sport->port.membase + UFCR);
545 613
@@ -558,8 +626,24 @@ static int imx_startup(struct uart_port *port)
558 * requesting IRQs 626 * requesting IRQs
559 */ 627 */
560 temp = readl(sport->port.membase + UCR4); 628 temp = readl(sport->port.membase + UCR4);
629
630 if (USE_IRDA(sport))
631 temp |= UCR4_IRSC;
632
561 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); 633 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
562 634
635 if (USE_IRDA(sport)) {
636 /* reset fifo's and state machines */
637 int i = 100;
638 temp = readl(sport->port.membase + UCR2);
639 temp &= ~UCR2_SRST;
640 writel(temp, sport->port.membase + UCR2);
641 while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) &&
642 (--i > 0)) {
643 udelay(1);
644 }
645 }
646
563 /* 647 /*
564 * Allocate the IRQ(s) i.MX1 has three interrupts whereas later 648 * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
565 * chips only have one interrupt. 649 * chips only have one interrupt.
@@ -575,12 +659,16 @@ static int imx_startup(struct uart_port *port)
575 if (retval) 659 if (retval)
576 goto error_out2; 660 goto error_out2;
577 661
578 retval = request_irq(sport->rtsirq, imx_rtsint, 662 /* do not use RTS IRQ on IrDA */
579 (sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 : 663 if (!USE_IRDA(sport)) {
580 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 664 retval = request_irq(sport->rtsirq, imx_rtsint,
581 DRIVER_NAME, sport); 665 (sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 :
582 if (retval) 666 IRQF_TRIGGER_FALLING |
583 goto error_out3; 667 IRQF_TRIGGER_RISING,
668 DRIVER_NAME, sport);
669 if (retval)
670 goto error_out3;
671 }
584 } else { 672 } else {
585 retval = request_irq(sport->port.irq, imx_int, 0, 673 retval = request_irq(sport->port.irq, imx_int, 0,
586 DRIVER_NAME, sport); 674 DRIVER_NAME, sport);
@@ -597,18 +685,49 @@ static int imx_startup(struct uart_port *port)
597 685
598 temp = readl(sport->port.membase + UCR1); 686 temp = readl(sport->port.membase + UCR1);
599 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; 687 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
688
689 if (USE_IRDA(sport)) {
690 temp |= UCR1_IREN;
691 temp &= ~(UCR1_RTSDEN);
692 }
693
600 writel(temp, sport->port.membase + UCR1); 694 writel(temp, sport->port.membase + UCR1);
601 695
602 temp = readl(sport->port.membase + UCR2); 696 temp = readl(sport->port.membase + UCR2);
603 temp |= (UCR2_RXEN | UCR2_TXEN); 697 temp |= (UCR2_RXEN | UCR2_TXEN);
604 writel(temp, sport->port.membase + UCR2); 698 writel(temp, sport->port.membase + UCR2);
605 699
700 if (USE_IRDA(sport)) {
701 /* clear RX-FIFO */
702 int i = 64;
703 while ((--i > 0) &&
704 (readl(sport->port.membase + URXD0) & URXD_CHARRDY)) {
705 barrier();
706 }
707 }
708
606#if defined CONFIG_ARCH_MX2 || defined CONFIG_ARCH_MX3 709#if defined CONFIG_ARCH_MX2 || defined CONFIG_ARCH_MX3
607 temp = readl(sport->port.membase + UCR3); 710 temp = readl(sport->port.membase + UCR3);
608 temp |= UCR3_RXDMUXSEL; 711 temp |= UCR3_RXDMUXSEL;
609 writel(temp, sport->port.membase + UCR3); 712 writel(temp, sport->port.membase + UCR3);
610#endif 713#endif
611 714
715 if (USE_IRDA(sport)) {
716 temp = readl(sport->port.membase + UCR4);
717 if (sport->irda_inv_rx)
718 temp |= UCR4_INVR;
719 else
720 temp &= ~(UCR4_INVR);
721 writel(temp | UCR4_DREN, sport->port.membase + UCR4);
722
723 temp = readl(sport->port.membase + UCR3);
724 if (sport->irda_inv_tx)
725 temp |= UCR3_INVT;
726 else
727 temp &= ~(UCR3_INVT);
728 writel(temp, sport->port.membase + UCR3);
729 }
730
612 /* 731 /*
613 * Enable modem status interrupts 732 * Enable modem status interrupts
614 */ 733 */
@@ -616,6 +735,16 @@ static int imx_startup(struct uart_port *port)
616 imx_enable_ms(&sport->port); 735 imx_enable_ms(&sport->port);
617 spin_unlock_irqrestore(&sport->port.lock,flags); 736 spin_unlock_irqrestore(&sport->port.lock,flags);
618 737
738 if (USE_IRDA(sport)) {
739 struct imxuart_platform_data *pdata;
740 pdata = sport->port.dev->platform_data;
741 sport->irda_inv_rx = pdata->irda_inv_rx;
742 sport->irda_inv_tx = pdata->irda_inv_tx;
743 sport->trcv_delay = pdata->transceiver_delay;
744 if (pdata->irda_enable)
745 pdata->irda_enable(1);
746 }
747
619 return 0; 748 return 0;
620 749
621error_out3: 750error_out3:
@@ -633,6 +762,17 @@ static void imx_shutdown(struct uart_port *port)
633 struct imx_port *sport = (struct imx_port *)port; 762 struct imx_port *sport = (struct imx_port *)port;
634 unsigned long temp; 763 unsigned long temp;
635 764
765 temp = readl(sport->port.membase + UCR2);
766 temp &= ~(UCR2_TXEN);
767 writel(temp, sport->port.membase + UCR2);
768
769 if (USE_IRDA(sport)) {
770 struct imxuart_platform_data *pdata;
771 pdata = sport->port.dev->platform_data;
772 if (pdata->irda_enable)
773 pdata->irda_enable(0);
774 }
775
636 /* 776 /*
637 * Stop our timer. 777 * Stop our timer.
638 */ 778 */
@@ -642,7 +782,8 @@ static void imx_shutdown(struct uart_port *port)
642 * Free the interrupts 782 * Free the interrupts
643 */ 783 */
644 if (sport->txirq > 0) { 784 if (sport->txirq > 0) {
645 free_irq(sport->rtsirq, sport); 785 if (!USE_IRDA(sport))
786 free_irq(sport->rtsirq, sport);
646 free_irq(sport->txirq, sport); 787 free_irq(sport->txirq, sport);
647 free_irq(sport->rxirq, sport); 788 free_irq(sport->rxirq, sport);
648 } else 789 } else
@@ -654,6 +795,9 @@ static void imx_shutdown(struct uart_port *port)
654 795
655 temp = readl(sport->port.membase + UCR1); 796 temp = readl(sport->port.membase + UCR1);
656 temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN); 797 temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
798 if (USE_IRDA(sport))
799 temp &= ~(UCR1_IREN);
800
657 writel(temp, sport->port.membase + UCR1); 801 writel(temp, sport->port.membase + UCR1);
658} 802}
659 803
@@ -665,7 +809,9 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
665 unsigned long flags; 809 unsigned long flags;
666 unsigned int ucr2, old_ucr1, old_txrxen, baud, quot; 810 unsigned int ucr2, old_ucr1, old_txrxen, baud, quot;
667 unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; 811 unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
668 unsigned int div, num, denom, ufcr; 812 unsigned int div, ufcr;
813 unsigned long num, denom;
814 uint64_t tdiv64;
669 815
670 /* 816 /*
671 * If we don't support modem control lines, don't allow 817 * If we don't support modem control lines, don't allow
@@ -761,38 +907,39 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
761 sport->port.membase + UCR2); 907 sport->port.membase + UCR2);
762 old_txrxen &= (UCR2_TXEN | UCR2_RXEN); 908 old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
763 909
764 div = sport->port.uartclk / (baud * 16); 910 if (USE_IRDA(sport)) {
765 if (div > 7) 911 /*
766 div = 7; 912 * use maximum available submodule frequency to
767 if (!div) 913 * avoid missing short pulses due to low sampling rate
914 */
768 div = 1; 915 div = 1;
769 916 } else {
770 num = baud; 917 div = sport->port.uartclk / (baud * 16);
771 denom = port->uartclk / div / 16; 918 if (div > 7)
772 919 div = 7;
773 /* shift num and denom right until they fit into 16 bits */ 920 if (!div)
774 while (num > 0x10000 || denom > 0x10000) { 921 div = 1;
775 num >>= 1;
776 denom >>= 1;
777 } 922 }
778 if (num > 0)
779 num -= 1;
780 if (denom > 0)
781 denom -= 1;
782 923
783 writel(num, sport->port.membase + UBIR); 924 rational_best_approximation(16 * div * baud, sport->port.uartclk,
784 writel(denom, sport->port.membase + UBMR); 925 1 << 16, 1 << 16, &num, &denom);
785 926
786 if (div == 7) 927 tdiv64 = sport->port.uartclk;
787 div = 6; /* 6 in RFDIV means divide by 7 */ 928 tdiv64 *= num;
788 else 929 do_div(tdiv64, denom * 16 * div);
789 div = 6 - div; 930 tty_encode_baud_rate(sport->port.info->port.tty,
931 (speed_t)tdiv64, (speed_t)tdiv64);
932
933 num -= 1;
934 denom -= 1;
790 935
791 ufcr = readl(sport->port.membase + UFCR); 936 ufcr = readl(sport->port.membase + UFCR);
792 ufcr = (ufcr & (~UFCR_RFDIV)) | 937 ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
793 (div << 7);
794 writel(ufcr, sport->port.membase + UFCR); 938 writel(ufcr, sport->port.membase + UFCR);
795 939
940 writel(num, sport->port.membase + UBIR);
941 writel(denom, sport->port.membase + UBMR);
942
796#ifdef ONEMS 943#ifdef ONEMS
797 writel(sport->port.uartclk / div / 1000, sport->port.membase + ONEMS); 944 writel(sport->port.uartclk / div / 1000, sport->port.membase + ONEMS);
798#endif 945#endif
@@ -1031,6 +1178,8 @@ imx_console_setup(struct console *co, char *options)
1031 if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports)) 1178 if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
1032 co->index = 0; 1179 co->index = 0;
1033 sport = imx_ports[co->index]; 1180 sport = imx_ports[co->index];
1181 if(sport == NULL)
1182 return -ENODEV;
1034 1183
1035 if (options) 1184 if (options)
1036 uart_parse_options(options, &baud, &parity, &bits, &flow); 1185 uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1070,22 +1219,22 @@ static struct uart_driver imx_reg = {
1070 1219
1071static int serial_imx_suspend(struct platform_device *dev, pm_message_t state) 1220static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
1072{ 1221{
1073 struct imx_port *sport = platform_get_drvdata(dev); 1222 struct imx_port *sport = platform_get_drvdata(dev);
1074 1223
1075 if (sport) 1224 if (sport)
1076 uart_suspend_port(&imx_reg, &sport->port); 1225 uart_suspend_port(&imx_reg, &sport->port);
1077 1226
1078 return 0; 1227 return 0;
1079} 1228}
1080 1229
1081static int serial_imx_resume(struct platform_device *dev) 1230static int serial_imx_resume(struct platform_device *dev)
1082{ 1231{
1083 struct imx_port *sport = platform_get_drvdata(dev); 1232 struct imx_port *sport = platform_get_drvdata(dev);
1084 1233
1085 if (sport) 1234 if (sport)
1086 uart_resume_port(&imx_reg, &sport->port); 1235 uart_resume_port(&imx_reg, &sport->port);
1087 1236
1088 return 0; 1237 return 0;
1089} 1238}
1090 1239
1091static int serial_imx_probe(struct platform_device *pdev) 1240static int serial_imx_probe(struct platform_device *pdev)
@@ -1141,19 +1290,29 @@ static int serial_imx_probe(struct platform_device *pdev)
1141 imx_ports[pdev->id] = sport; 1290 imx_ports[pdev->id] = sport;
1142 1291
1143 pdata = pdev->dev.platform_data; 1292 pdata = pdev->dev.platform_data;
1144 if(pdata && (pdata->flags & IMXUART_HAVE_RTSCTS)) 1293 if (pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
1145 sport->have_rtscts = 1; 1294 sport->have_rtscts = 1;
1146 1295
1296#ifdef CONFIG_IRDA
1297 if (pdata && (pdata->flags & IMXUART_IRDA))
1298 sport->use_irda = 1;
1299#endif
1300
1147 if (pdata->init) { 1301 if (pdata->init) {
1148 ret = pdata->init(pdev); 1302 ret = pdata->init(pdev);
1149 if (ret) 1303 if (ret)
1150 goto clkput; 1304 goto clkput;
1151 } 1305 }
1152 1306
1153 uart_add_one_port(&imx_reg, &sport->port); 1307 ret = uart_add_one_port(&imx_reg, &sport->port);
1308 if (ret)
1309 goto deinit;
1154 platform_set_drvdata(pdev, &sport->port); 1310 platform_set_drvdata(pdev, &sport->port);
1155 1311
1156 return 0; 1312 return 0;
1313deinit:
1314 if (pdata->exit)
1315 pdata->exit(pdev);
1157clkput: 1316clkput:
1158 clk_put(sport->clk); 1317 clk_put(sport->clk);
1159 clk_disable(sport->clk); 1318 clk_disable(sport->clk);
@@ -1191,13 +1350,13 @@ static int serial_imx_remove(struct platform_device *pdev)
1191} 1350}
1192 1351
1193static struct platform_driver serial_imx_driver = { 1352static struct platform_driver serial_imx_driver = {
1194 .probe = serial_imx_probe, 1353 .probe = serial_imx_probe,
1195 .remove = serial_imx_remove, 1354 .remove = serial_imx_remove,
1196 1355
1197 .suspend = serial_imx_suspend, 1356 .suspend = serial_imx_suspend,
1198 .resume = serial_imx_resume, 1357 .resume = serial_imx_resume,
1199 .driver = { 1358 .driver = {
1200 .name = "imx-uart", 1359 .name = "imx-uart",
1201 .owner = THIS_MODULE, 1360 .owner = THIS_MODULE,
1202 }, 1361 },
1203}; 1362};
diff --git a/drivers/serial/jsm/jsm.h b/drivers/serial/jsm/jsm.h
index c0a3e2734e2..4e5f3bde046 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/serial/jsm/jsm.h
@@ -61,6 +61,7 @@ enum {
61 if ((DBG_##nlevel & jsm_debug)) \ 61 if ((DBG_##nlevel & jsm_debug)) \
62 dev_printk(KERN_##klevel, pdev->dev, fmt, ## args) 62 dev_printk(KERN_##klevel, pdev->dev, fmt, ## args)
63 63
64#define MAXLINES 256
64#define MAXPORTS 8 65#define MAXPORTS 8
65#define MAX_STOPS_SENT 5 66#define MAX_STOPS_SENT 5
66 67
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 31496dc0a0d..107ce2e187b 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -33,6 +33,8 @@
33 33
34#include "jsm.h" 34#include "jsm.h"
35 35
36static DECLARE_BITMAP(linemap, MAXLINES);
37
36static void jsm_carrier(struct jsm_channel *ch); 38static void jsm_carrier(struct jsm_channel *ch);
37 39
38static inline int jsm_get_mstat(struct jsm_channel *ch) 40static inline int jsm_get_mstat(struct jsm_channel *ch)
@@ -433,6 +435,7 @@ int __devinit jsm_tty_init(struct jsm_board *brd)
433int __devinit jsm_uart_port_init(struct jsm_board *brd) 435int __devinit jsm_uart_port_init(struct jsm_board *brd)
434{ 436{
435 int i; 437 int i;
438 unsigned int line;
436 struct jsm_channel *ch; 439 struct jsm_channel *ch;
437 440
438 if (!brd) 441 if (!brd)
@@ -459,9 +462,15 @@ int __devinit jsm_uart_port_init(struct jsm_board *brd)
459 brd->channels[i]->uart_port.membase = brd->re_map_membase; 462 brd->channels[i]->uart_port.membase = brd->re_map_membase;
460 brd->channels[i]->uart_port.fifosize = 16; 463 brd->channels[i]->uart_port.fifosize = 16;
461 brd->channels[i]->uart_port.ops = &jsm_ops; 464 brd->channels[i]->uart_port.ops = &jsm_ops;
462 brd->channels[i]->uart_port.line = brd->channels[i]->ch_portnum + brd->boardnum * 2; 465 line = find_first_zero_bit(linemap, MAXLINES);
466 if (line >= MAXLINES) {
467 printk(KERN_INFO "jsm: linemap is full, added device failed\n");
468 continue;
469 } else
470 set_bit((int)line, linemap);
471 brd->channels[i]->uart_port.line = line;
463 if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port)) 472 if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
464 printk(KERN_INFO "Added device failed\n"); 473 printk(KERN_INFO "jsm: add device failed\n");
465 else 474 else
466 printk(KERN_INFO "Added device \n"); 475 printk(KERN_INFO "Added device \n");
467 } 476 }
@@ -494,6 +503,7 @@ int jsm_remove_uart_port(struct jsm_board *brd)
494 503
495 ch = brd->channels[i]; 504 ch = brd->channels[i];
496 505
506 clear_bit((int)(ch->uart_port.line), linemap);
497 uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port); 507 uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
498 } 508 }
499 509
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 7f72f8ceaa6..b3feb6198d5 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -988,7 +988,7 @@ mpc52xx_console_setup(struct console *co, char *options)
988 pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n", 988 pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
989 co, co->index, options); 989 co, co->index, options);
990 990
991 if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) { 991 if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) {
992 pr_debug("PSC%x out of range\n", co->index); 992 pr_debug("PSC%x out of range\n", co->index);
993 return -EINVAL; 993 return -EINVAL;
994 } 994 }
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index dbf5357a77b..a4cf1079b31 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -47,12 +47,17 @@
47#include <linux/clk.h> 47#include <linux/clk.h>
48#include <linux/ctype.h> 48#include <linux/ctype.h>
49#include <linux/err.h> 49#include <linux/err.h>
50#include <linux/list.h>
50 51
51#ifdef CONFIG_SUPERH 52#ifdef CONFIG_SUPERH
52#include <asm/clock.h> 53#include <asm/clock.h>
53#include <asm/sh_bios.h> 54#include <asm/sh_bios.h>
54#endif 55#endif
55 56
57#ifdef CONFIG_H8300
58#include <asm/gpio.h>
59#endif
60
56#include "sh-sci.h" 61#include "sh-sci.h"
57 62
58struct sci_port { 63struct sci_port {
@@ -75,14 +80,22 @@ struct sci_port {
75 int break_flag; 80 int break_flag;
76 81
77#ifdef CONFIG_HAVE_CLK 82#ifdef CONFIG_HAVE_CLK
78 /* Port clock */ 83 /* Interface clock */
79 struct clk *clk; 84 struct clk *iclk;
85 /* Data clock */
86 struct clk *dclk;
80#endif 87#endif
88 struct list_head node;
81}; 89};
82 90
83#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 91struct sh_sci_priv {
84static struct sci_port *serial_console_port; 92 spinlock_t lock;
93 struct list_head ports;
94
95#ifdef CONFIG_HAVE_CLK
96 struct notifier_block clk_nb;
85#endif 97#endif
98};
86 99
87/* Function prototypes */ 100/* Function prototypes */
88static void sci_stop_tx(struct uart_port *port); 101static void sci_stop_tx(struct uart_port *port);
@@ -138,9 +151,8 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
138 status = sci_in(port, SCxSR); 151 status = sci_in(port, SCxSR);
139 } while (!(status & SCxSR_TDxE(port))); 152 } while (!(status & SCxSR_TDxE(port)));
140 153
141 sci_in(port, SCxSR); /* Dummy read */
142 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
143 sci_out(port, SCxTDR, c); 154 sci_out(port, SCxTDR, c);
155 sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
144} 156}
145#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ 157#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
146 158
@@ -159,12 +171,12 @@ static void h8300_sci_config(struct uart_port *port, unsigned int ctrl)
159 *mstpcrl &= ~mask; 171 *mstpcrl &= ~mask;
160} 172}
161 173
162static inline void h8300_sci_enable(struct uart_port *port) 174static void h8300_sci_enable(struct uart_port *port)
163{ 175{
164 h8300_sci_config(port, sci_enable); 176 h8300_sci_config(port, sci_enable);
165} 177}
166 178
167static inline void h8300_sci_disable(struct uart_port *port) 179static void h8300_sci_disable(struct uart_port *port)
168{ 180{
169 h8300_sci_config(port, sci_disable); 181 h8300_sci_config(port, sci_disable);
170} 182}
@@ -611,7 +623,7 @@ static inline int sci_handle_breaks(struct uart_port *port)
611 int copied = 0; 623 int copied = 0;
612 unsigned short status = sci_in(port, SCxSR); 624 unsigned short status = sci_in(port, SCxSR);
613 struct tty_struct *tty = port->info->port.tty; 625 struct tty_struct *tty = port->info->port.tty;
614 struct sci_port *s = &sci_ports[port->line]; 626 struct sci_port *s = to_sci_port(port);
615 627
616 if (uart_handle_break(port)) 628 if (uart_handle_break(port))
617 return 0; 629 return 0;
@@ -726,19 +738,43 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
726static int sci_notifier(struct notifier_block *self, 738static int sci_notifier(struct notifier_block *self,
727 unsigned long phase, void *p) 739 unsigned long phase, void *p)
728{ 740{
729 int i; 741 struct sh_sci_priv *priv = container_of(self,
742 struct sh_sci_priv, clk_nb);
743 struct sci_port *sci_port;
744 unsigned long flags;
730 745
731 if ((phase == CPUFREQ_POSTCHANGE) || 746 if ((phase == CPUFREQ_POSTCHANGE) ||
732 (phase == CPUFREQ_RESUMECHANGE)) 747 (phase == CPUFREQ_RESUMECHANGE)) {
733 for (i = 0; i < SCI_NPORTS; i++) { 748 spin_lock_irqsave(&priv->lock, flags);
734 struct sci_port *s = &sci_ports[i]; 749 list_for_each_entry(sci_port, &priv->ports, node)
735 s->port.uartclk = clk_get_rate(s->clk); 750 sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
736 } 751
752 spin_unlock_irqrestore(&priv->lock, flags);
753 }
737 754
738 return NOTIFY_OK; 755 return NOTIFY_OK;
739} 756}
740 757
741static struct notifier_block sci_nb = { &sci_notifier, NULL, 0 }; 758static void sci_clk_enable(struct uart_port *port)
759{
760 struct sci_port *sci_port = to_sci_port(port);
761
762 clk_enable(sci_port->dclk);
763 sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
764
765 if (sci_port->iclk)
766 clk_enable(sci_port->iclk);
767}
768
769static void sci_clk_disable(struct uart_port *port)
770{
771 struct sci_port *sci_port = to_sci_port(port);
772
773 if (sci_port->iclk)
774 clk_disable(sci_port->iclk);
775
776 clk_disable(sci_port->dclk);
777}
742#endif 778#endif
743 779
744static int sci_request_irq(struct sci_port *port) 780static int sci_request_irq(struct sci_port *port)
@@ -865,15 +901,11 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
865 901
866static int sci_startup(struct uart_port *port) 902static int sci_startup(struct uart_port *port)
867{ 903{
868 struct sci_port *s = &sci_ports[port->line]; 904 struct sci_port *s = to_sci_port(port);
869 905
870 if (s->enable) 906 if (s->enable)
871 s->enable(port); 907 s->enable(port);
872 908
873#ifdef CONFIG_HAVE_CLK
874 s->clk = clk_get(NULL, "module_clk");
875#endif
876
877 sci_request_irq(s); 909 sci_request_irq(s);
878 sci_start_tx(port); 910 sci_start_tx(port);
879 sci_start_rx(port, 1); 911 sci_start_rx(port, 1);
@@ -883,7 +915,7 @@ static int sci_startup(struct uart_port *port)
883 915
884static void sci_shutdown(struct uart_port *port) 916static void sci_shutdown(struct uart_port *port)
885{ 917{
886 struct sci_port *s = &sci_ports[port->line]; 918 struct sci_port *s = to_sci_port(port);
887 919
888 sci_stop_rx(port); 920 sci_stop_rx(port);
889 sci_stop_tx(port); 921 sci_stop_tx(port);
@@ -891,11 +923,6 @@ static void sci_shutdown(struct uart_port *port)
891 923
892 if (s->disable) 924 if (s->disable)
893 s->disable(port); 925 s->disable(port);
894
895#ifdef CONFIG_HAVE_CLK
896 clk_put(s->clk);
897 s->clk = NULL;
898#endif
899} 926}
900 927
901static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 928static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
@@ -980,25 +1007,31 @@ static int sci_request_port(struct uart_port *port)
980 1007
981static void sci_config_port(struct uart_port *port, int flags) 1008static void sci_config_port(struct uart_port *port, int flags)
982{ 1009{
983 struct sci_port *s = &sci_ports[port->line]; 1010 struct sci_port *s = to_sci_port(port);
984 1011
985 port->type = s->type; 1012 port->type = s->type;
986 1013
987 if (port->flags & UPF_IOREMAP && !port->membase) { 1014 if (port->membase)
988#if defined(CONFIG_SUPERH64) 1015 return;
989 port->mapbase = onchip_remap(SCIF_ADDR_SH5, 1024, "SCIF"); 1016
990 port->membase = (void __iomem *)port->mapbase; 1017 if (port->flags & UPF_IOREMAP) {
991#else
992 port->membase = ioremap_nocache(port->mapbase, 0x40); 1018 port->membase = ioremap_nocache(port->mapbase, 0x40);
993#endif
994 1019
995 dev_err(port->dev, "can't remap port#%d\n", port->line); 1020 if (IS_ERR(port->membase))
1021 dev_err(port->dev, "can't remap port#%d\n", port->line);
1022 } else {
1023 /*
1024 * For the simple (and majority of) cases where we don't
1025 * need to do any remapping, just cast the cookie
1026 * directly.
1027 */
1028 port->membase = (void __iomem *)port->mapbase;
996 } 1029 }
997} 1030}
998 1031
999static int sci_verify_port(struct uart_port *port, struct serial_struct *ser) 1032static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
1000{ 1033{
1001 struct sci_port *s = &sci_ports[port->line]; 1034 struct sci_port *s = to_sci_port(port);
1002 1035
1003 if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs) 1036 if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
1004 return -EINVAL; 1037 return -EINVAL;
@@ -1032,63 +1065,60 @@ static struct uart_ops sci_uart_ops = {
1032#endif 1065#endif
1033}; 1066};
1034 1067
1035static void __init sci_init_ports(void) 1068static void __devinit sci_init_single(struct platform_device *dev,
1069 struct sci_port *sci_port,
1070 unsigned int index,
1071 struct plat_sci_port *p)
1036{ 1072{
1037 static int first = 1; 1073 sci_port->port.ops = &sci_uart_ops;
1038 int i; 1074 sci_port->port.iotype = UPIO_MEM;
1039 1075 sci_port->port.line = index;
1040 if (!first) 1076 sci_port->port.fifosize = 1;
1041 return;
1042
1043 first = 0;
1044
1045 for (i = 0; i < SCI_NPORTS; i++) {
1046 sci_ports[i].port.ops = &sci_uart_ops;
1047 sci_ports[i].port.iotype = UPIO_MEM;
1048 sci_ports[i].port.line = i;
1049 sci_ports[i].port.fifosize = 1;
1050 1077
1051#if defined(__H8300H__) || defined(__H8300S__) 1078#if defined(__H8300H__) || defined(__H8300S__)
1052#ifdef __H8300S__ 1079#ifdef __H8300S__
1053 sci_ports[i].enable = h8300_sci_enable; 1080 sci_port->enable = h8300_sci_enable;
1054 sci_ports[i].disable = h8300_sci_disable; 1081 sci_port->disable = h8300_sci_disable;
1055#endif 1082#endif
1056 sci_ports[i].port.uartclk = CONFIG_CPU_CLOCK; 1083 sci_port->port.uartclk = CONFIG_CPU_CLOCK;
1057#elif defined(CONFIG_HAVE_CLK) 1084#elif defined(CONFIG_HAVE_CLK)
1058 /* 1085 sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
1059 * XXX: We should use a proper SCI/SCIF clock 1086 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
1060 */ 1087 sci_port->enable = sci_clk_enable;
1061 { 1088 sci_port->disable = sci_clk_disable;
1062 struct clk *clk = clk_get(NULL, "module_clk");
1063 sci_ports[i].port.uartclk = clk_get_rate(clk);
1064 clk_put(clk);
1065 }
1066#else 1089#else
1067#error "Need a valid uartclk" 1090#error "Need a valid uartclk"
1068#endif 1091#endif
1069 1092
1070 sci_ports[i].break_timer.data = (unsigned long)&sci_ports[i]; 1093 sci_port->break_timer.data = (unsigned long)sci_port;
1071 sci_ports[i].break_timer.function = sci_break_timer; 1094 sci_port->break_timer.function = sci_break_timer;
1095 init_timer(&sci_port->break_timer);
1072 1096
1073 init_timer(&sci_ports[i].break_timer); 1097 sci_port->port.mapbase = p->mapbase;
1074 } 1098 sci_port->port.membase = p->membase;
1075}
1076
1077int __init early_sci_setup(struct uart_port *port)
1078{
1079 if (unlikely(port->line > SCI_NPORTS))
1080 return -ENODEV;
1081 1099
1082 sci_init_ports(); 1100 sci_port->port.irq = p->irqs[SCIx_TXI_IRQ];
1101 sci_port->port.flags = p->flags;
1102 sci_port->port.dev = &dev->dev;
1103 sci_port->type = sci_port->port.type = p->type;
1083 1104
1084 sci_ports[port->line].port.membase = port->membase; 1105 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
1085 sci_ports[port->line].port.mapbase = port->mapbase;
1086 sci_ports[port->line].port.type = port->type;
1087 1106
1088 return 0;
1089} 1107}
1090 1108
1091#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 1109#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
1110static struct tty_driver *serial_console_device(struct console *co, int *index)
1111{
1112 struct uart_driver *p = &sci_uart_driver;
1113 *index = co->index;
1114 return p->tty_driver;
1115}
1116
1117static void serial_console_putchar(struct uart_port *port, int ch)
1118{
1119 sci_poll_put_char(port, ch);
1120}
1121
1092/* 1122/*
1093 * Print a string to the serial port trying not to disturb 1123 * Print a string to the serial port trying not to disturb
1094 * any possible real use of the port... 1124 * any possible real use of the port...
@@ -1096,25 +1126,27 @@ int __init early_sci_setup(struct uart_port *port)
1096static void serial_console_write(struct console *co, const char *s, 1126static void serial_console_write(struct console *co, const char *s,
1097 unsigned count) 1127 unsigned count)
1098{ 1128{
1099 struct uart_port *port = &serial_console_port->port; 1129 struct uart_port *port = co->data;
1130 struct sci_port *sci_port = to_sci_port(port);
1100 unsigned short bits; 1131 unsigned short bits;
1101 int i;
1102 1132
1103 for (i = 0; i < count; i++) { 1133 if (sci_port->enable)
1104 if (*s == 10) 1134 sci_port->enable(port);
1105 sci_poll_put_char(port, '\r');
1106 1135
1107 sci_poll_put_char(port, *s++); 1136 uart_console_write(port, s, count, serial_console_putchar);
1108 }
1109 1137
1110 /* wait until fifo is empty and last bit has been transmitted */ 1138 /* wait until fifo is empty and last bit has been transmitted */
1111 bits = SCxSR_TDxE(port) | SCxSR_TEND(port); 1139 bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
1112 while ((sci_in(port, SCxSR) & bits) != bits) 1140 while ((sci_in(port, SCxSR) & bits) != bits)
1113 cpu_relax(); 1141 cpu_relax();
1142
1143 if (sci_port->disable);
1144 sci_port->disable(port);
1114} 1145}
1115 1146
1116static int __init serial_console_setup(struct console *co, char *options) 1147static int __init serial_console_setup(struct console *co, char *options)
1117{ 1148{
1149 struct sci_port *sci_port;
1118 struct uart_port *port; 1150 struct uart_port *port;
1119 int baud = 115200; 1151 int baud = 115200;
1120 int bits = 8; 1152 int bits = 8;
@@ -1130,8 +1162,9 @@ static int __init serial_console_setup(struct console *co, char *options)
1130 if (co->index >= SCI_NPORTS) 1162 if (co->index >= SCI_NPORTS)
1131 co->index = 0; 1163 co->index = 0;
1132 1164
1133 serial_console_port = &sci_ports[co->index]; 1165 sci_port = &sci_ports[co->index];
1134 port = &serial_console_port->port; 1166 port = &sci_port->port;
1167 co->data = port;
1135 1168
1136 /* 1169 /*
1137 * Also need to check port->type, we don't actually have any 1170 * Also need to check port->type, we don't actually have any
@@ -1141,21 +1174,11 @@ static int __init serial_console_setup(struct console *co, char *options)
1141 */ 1174 */
1142 if (!port->type) 1175 if (!port->type)
1143 return -ENODEV; 1176 return -ENODEV;
1144 if (!port->membase || !port->mapbase)
1145 return -ENODEV;
1146
1147 port->type = serial_console_port->type;
1148
1149#ifdef CONFIG_HAVE_CLK
1150 if (!serial_console_port->clk)
1151 serial_console_port->clk = clk_get(NULL, "module_clk");
1152#endif
1153 1177
1154 if (port->flags & UPF_IOREMAP) 1178 sci_config_port(port, 0);
1155 sci_config_port(port, 0);
1156 1179
1157 if (serial_console_port->enable) 1180 if (sci_port->enable)
1158 serial_console_port->enable(port); 1181 sci_port->enable(port);
1159 1182
1160 if (options) 1183 if (options)
1161 uart_parse_options(options, &baud, &parity, &bits, &flow); 1184 uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1166,22 +1189,21 @@ static int __init serial_console_setup(struct console *co, char *options)
1166 if (ret == 0) 1189 if (ret == 0)
1167 sci_stop_rx(port); 1190 sci_stop_rx(port);
1168#endif 1191#endif
1192 /* TODO: disable clock */
1169 return ret; 1193 return ret;
1170} 1194}
1171 1195
1172static struct console serial_console = { 1196static struct console serial_console = {
1173 .name = "ttySC", 1197 .name = "ttySC",
1174 .device = uart_console_device, 1198 .device = serial_console_device,
1175 .write = serial_console_write, 1199 .write = serial_console_write,
1176 .setup = serial_console_setup, 1200 .setup = serial_console_setup,
1177 .flags = CON_PRINTBUFFER, 1201 .flags = CON_PRINTBUFFER,
1178 .index = -1, 1202 .index = -1,
1179 .data = &sci_uart_driver,
1180}; 1203};
1181 1204
1182static int __init sci_console_init(void) 1205static int __init sci_console_init(void)
1183{ 1206{
1184 sci_init_ports();
1185 register_console(&serial_console); 1207 register_console(&serial_console);
1186 return 0; 1208 return 0;
1187} 1209}
@@ -1207,6 +1229,61 @@ static struct uart_driver sci_uart_driver = {
1207 .cons = SCI_CONSOLE, 1229 .cons = SCI_CONSOLE,
1208}; 1230};
1209 1231
1232
1233static int sci_remove(struct platform_device *dev)
1234{
1235 struct sh_sci_priv *priv = platform_get_drvdata(dev);
1236 struct sci_port *p;
1237 unsigned long flags;
1238
1239#ifdef CONFIG_HAVE_CLK
1240 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
1241#endif
1242
1243 spin_lock_irqsave(&priv->lock, flags);
1244 list_for_each_entry(p, &priv->ports, node)
1245 uart_remove_one_port(&sci_uart_driver, &p->port);
1246
1247 spin_unlock_irqrestore(&priv->lock, flags);
1248
1249 kfree(priv);
1250 return 0;
1251}
1252
1253static int __devinit sci_probe_single(struct platform_device *dev,
1254 unsigned int index,
1255 struct plat_sci_port *p,
1256 struct sci_port *sciport)
1257{
1258 struct sh_sci_priv *priv = platform_get_drvdata(dev);
1259 unsigned long flags;
1260 int ret;
1261
1262 /* Sanity check */
1263 if (unlikely(index >= SCI_NPORTS)) {
1264 dev_notice(&dev->dev, "Attempting to register port "
1265 "%d when only %d are available.\n",
1266 index+1, SCI_NPORTS);
1267 dev_notice(&dev->dev, "Consider bumping "
1268 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
1269 return 0;
1270 }
1271
1272 sci_init_single(dev, sciport, index, p);
1273
1274 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
1275 if (ret)
1276 return ret;
1277
1278 INIT_LIST_HEAD(&sciport->node);
1279
1280 spin_lock_irqsave(&priv->lock, flags);
1281 list_add(&sciport->node, &priv->ports);
1282 spin_unlock_irqrestore(&priv->lock, flags);
1283
1284 return 0;
1285}
1286
1210/* 1287/*
1211 * Register a set of serial devices attached to a platform device. The 1288 * Register a set of serial devices attached to a platform device. The
1212 * list is terminated with a zero flags entry, which means we expect 1289 * list is terminated with a zero flags entry, which means we expect
@@ -1216,57 +1293,34 @@ static struct uart_driver sci_uart_driver = {
1216static int __devinit sci_probe(struct platform_device *dev) 1293static int __devinit sci_probe(struct platform_device *dev)
1217{ 1294{
1218 struct plat_sci_port *p = dev->dev.platform_data; 1295 struct plat_sci_port *p = dev->dev.platform_data;
1296 struct sh_sci_priv *priv;
1219 int i, ret = -EINVAL; 1297 int i, ret = -EINVAL;
1220 1298
1221 for (i = 0; p && p->flags != 0; p++, i++) { 1299 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1222 struct sci_port *sciport = &sci_ports[i]; 1300 if (!priv)
1301 return -ENOMEM;
1223 1302
1224 /* Sanity check */ 1303 INIT_LIST_HEAD(&priv->ports);
1225 if (unlikely(i == SCI_NPORTS)) { 1304 spin_lock_init(&priv->lock);
1226 dev_notice(&dev->dev, "Attempting to register port " 1305 platform_set_drvdata(dev, priv);
1227 "%d when only %d are available.\n",
1228 i+1, SCI_NPORTS);
1229 dev_notice(&dev->dev, "Consider bumping "
1230 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
1231 break;
1232 }
1233 1306
1234 sciport->port.mapbase = p->mapbase; 1307#ifdef CONFIG_HAVE_CLK
1308 priv->clk_nb.notifier_call = sci_notifier;
1309 cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
1310#endif
1235 1311
1236 if (p->mapbase && !p->membase) { 1312 if (dev->id != -1) {
1237 if (p->flags & UPF_IOREMAP) { 1313 ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]);
1238 p->membase = ioremap_nocache(p->mapbase, 0x40); 1314 if (ret)
1239 if (IS_ERR(p->membase)) { 1315 goto err_unreg;
1240 ret = PTR_ERR(p->membase); 1316 } else {
1241 goto err_unreg; 1317 for (i = 0; p && p->flags != 0; p++, i++) {
1242 } 1318 ret = sci_probe_single(dev, i, p, &sci_ports[i]);
1243 } else { 1319 if (ret)
1244 /* 1320 goto err_unreg;
1245 * For the simple (and majority of) cases
1246 * where we don't need to do any remapping,
1247 * just cast the cookie directly.
1248 */
1249 p->membase = (void __iomem *)p->mapbase;
1250 }
1251 } 1321 }
1252
1253 sciport->port.membase = p->membase;
1254
1255 sciport->port.irq = p->irqs[SCIx_TXI_IRQ];
1256 sciport->port.flags = p->flags;
1257 sciport->port.dev = &dev->dev;
1258
1259 sciport->type = sciport->port.type = p->type;
1260
1261 memcpy(&sciport->irqs, &p->irqs, sizeof(p->irqs));
1262
1263 uart_add_one_port(&sci_uart_driver, &sciport->port);
1264 } 1322 }
1265 1323
1266#ifdef CONFIG_HAVE_CLK
1267 cpufreq_register_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER);
1268#endif
1269
1270#ifdef CONFIG_SH_STANDARD_BIOS 1324#ifdef CONFIG_SH_STANDARD_BIOS
1271 sh_bios_gdb_detach(); 1325 sh_bios_gdb_detach();
1272#endif 1326#endif
@@ -1274,50 +1328,36 @@ static int __devinit sci_probe(struct platform_device *dev)
1274 return 0; 1328 return 0;
1275 1329
1276err_unreg: 1330err_unreg:
1277 for (i = i - 1; i >= 0; i--) 1331 sci_remove(dev);
1278 uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port);
1279
1280 return ret; 1332 return ret;
1281} 1333}
1282 1334
1283static int __devexit sci_remove(struct platform_device *dev)
1284{
1285 int i;
1286
1287#ifdef CONFIG_HAVE_CLK
1288 cpufreq_unregister_notifier(&sci_nb, CPUFREQ_TRANSITION_NOTIFIER);
1289#endif
1290
1291 for (i = 0; i < SCI_NPORTS; i++)
1292 uart_remove_one_port(&sci_uart_driver, &sci_ports[i].port);
1293
1294 return 0;
1295}
1296
1297static int sci_suspend(struct platform_device *dev, pm_message_t state) 1335static int sci_suspend(struct platform_device *dev, pm_message_t state)
1298{ 1336{
1299 int i; 1337 struct sh_sci_priv *priv = platform_get_drvdata(dev);
1338 struct sci_port *p;
1339 unsigned long flags;
1300 1340
1301 for (i = 0; i < SCI_NPORTS; i++) { 1341 spin_lock_irqsave(&priv->lock, flags);
1302 struct sci_port *p = &sci_ports[i]; 1342 list_for_each_entry(p, &priv->ports, node)
1343 uart_suspend_port(&sci_uart_driver, &p->port);
1303 1344
1304 if (p->type != PORT_UNKNOWN && p->port.dev == &dev->dev) 1345 spin_unlock_irqrestore(&priv->lock, flags);
1305 uart_suspend_port(&sci_uart_driver, &p->port);
1306 }
1307 1346
1308 return 0; 1347 return 0;
1309} 1348}
1310 1349
1311static int sci_resume(struct platform_device *dev) 1350static int sci_resume(struct platform_device *dev)
1312{ 1351{
1313 int i; 1352 struct sh_sci_priv *priv = platform_get_drvdata(dev);
1353 struct sci_port *p;
1354 unsigned long flags;
1314 1355
1315 for (i = 0; i < SCI_NPORTS; i++) { 1356 spin_lock_irqsave(&priv->lock, flags);
1316 struct sci_port *p = &sci_ports[i]; 1357 list_for_each_entry(p, &priv->ports, node)
1358 uart_resume_port(&sci_uart_driver, &p->port);
1317 1359
1318 if (p->type != PORT_UNKNOWN && p->port.dev == &dev->dev) 1360 spin_unlock_irqrestore(&priv->lock, flags);
1319 uart_resume_port(&sci_uart_driver, &p->port);
1320 }
1321 1361
1322 return 0; 1362 return 0;
1323} 1363}
@@ -1339,8 +1379,6 @@ static int __init sci_init(void)
1339 1379
1340 printk(banner); 1380 printk(banner);
1341 1381
1342 sci_init_ports();
1343
1344 ret = uart_register_driver(&sci_uart_driver); 1382 ret = uart_register_driver(&sci_uart_driver);
1345 if (likely(ret == 0)) { 1383 if (likely(ret == 0)) {
1346 ret = platform_driver_register(&sci_driver); 1384 ret = platform_driver_register(&sci_driver);
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index d0aa82d7fce..38072c15b84 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -91,6 +91,9 @@
91# define SCSPTR5 0xa4050128 91# define SCSPTR5 0xa4050128
92# define SCIF_ORER 0x0001 /* overrun error bit */ 92# define SCIF_ORER 0x0001 /* overrun error bit */
93# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 93# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
94#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
95# define SCIF_ORER 0x0001 /* overrun error bit */
96# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
94#elif defined(CONFIG_CPU_SUBTYPE_SH4_202) 97#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
95# define SCSPTR2 0xffe80020 /* 16 bit SCIF */ 98# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
96# define SCIF_ORER 0x0001 /* overrun error bit */ 99# define SCIF_ORER 0x0001 /* overrun error bit */
@@ -314,7 +317,18 @@
314 } \ 317 } \
315 } 318 }
316 319
317#define CPU_SCIF_FNS(name, scif_offset, scif_size) \ 320#ifdef CONFIG_H8300
321/* h8300 don't have SCIF */
322#define CPU_SCIF_FNS(name) \
323 static inline unsigned int sci_##name##_in(struct uart_port *port) \
324 { \
325 return 0; \
326 } \
327 static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
328 { \
329 }
330#else
331#define CPU_SCIF_FNS(name, scif_offset, scif_size) \
318 static inline unsigned int sci_##name##_in(struct uart_port *port) \ 332 static inline unsigned int sci_##name##_in(struct uart_port *port) \
319 { \ 333 { \
320 SCI_IN(scif_size, scif_offset); \ 334 SCI_IN(scif_size, scif_offset); \
@@ -323,6 +337,7 @@
323 { \ 337 { \
324 SCI_OUT(scif_size, scif_offset, value); \ 338 SCI_OUT(scif_size, scif_offset, value); \
325 } 339 }
340#endif
326 341
327#define CPU_SCI_FNS(name, sci_offset, sci_size) \ 342#define CPU_SCI_FNS(name, sci_offset, sci_size) \
328 static inline unsigned int sci_##name##_in(struct uart_port* port) \ 343 static inline unsigned int sci_##name##_in(struct uart_port* port) \
@@ -360,8 +375,10 @@
360 sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ 375 sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
361 h8_sci_offset, h8_sci_size) \ 376 h8_sci_offset, h8_sci_size) \
362 CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size) 377 CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size)
363#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) 378#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
364#elif defined(CONFIG_CPU_SUBTYPE_SH7723) 379 CPU_SCIF_FNS(name)
380#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
381 defined(CONFIG_CPU_SUBTYPE_SH7724)
365 #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \ 382 #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \
366 CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) 383 CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size)
367 #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \ 384 #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \
@@ -390,7 +407,8 @@ SCIF_FNS(SCFDR, 0x1c, 16)
390SCIF_FNS(SCxTDR, 0x20, 8) 407SCIF_FNS(SCxTDR, 0x20, 8)
391SCIF_FNS(SCxRDR, 0x24, 8) 408SCIF_FNS(SCxRDR, 0x24, 8)
392SCIF_FNS(SCLSR, 0x24, 16) 409SCIF_FNS(SCLSR, 0x24, 16)
393#elif defined(CONFIG_CPU_SUBTYPE_SH7723) 410#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
411 defined(CONFIG_CPU_SUBTYPE_SH7724)
394SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16) 412SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16)
395SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8) 413SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8)
396SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16) 414SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
@@ -604,10 +622,21 @@ static inline int sci_rxd_in(struct uart_port *port)
604 return ctrl_inb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */ 622 return ctrl_inb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */
605 return 1; 623 return 1;
606} 624}
625#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
626# define SCFSR 0x0010
627# define SCASSR 0x0014
628static inline int sci_rxd_in(struct uart_port *port)
629{
630 if (port->type == PORT_SCIF)
631 return ctrl_inw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0;
632 if (port->type == PORT_SCIFA)
633 return ctrl_inw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0;
634 return 1;
635}
607#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) 636#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
608static inline int sci_rxd_in(struct uart_port *port) 637static inline int sci_rxd_in(struct uart_port *port)
609{ 638{
610 return sci_in(port, SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */ 639 return sci_in(port, SCSPTR)&0x0001 ? 1 : 0; /* SCIF */
611} 640}
612#elif defined(__H8300H__) || defined(__H8300S__) 641#elif defined(__H8300H__) || defined(__H8300S__)
613static inline int sci_rxd_in(struct uart_port *port) 642static inline int sci_rxd_in(struct uart_port *port)
@@ -757,7 +786,8 @@ static inline int sci_rxd_in(struct uart_port *port)
757 defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 786 defined(CONFIG_CPU_SUBTYPE_SH7720) || \
758 defined(CONFIG_CPU_SUBTYPE_SH7721) 787 defined(CONFIG_CPU_SUBTYPE_SH7721)
759#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1) 788#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1)
760#elif defined(CONFIG_CPU_SUBTYPE_SH7723) 789#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
790 defined(CONFIG_CPU_SUBTYPE_SH7724)
761static inline int scbrr_calc(struct uart_port *port, int bps, int clk) 791static inline int scbrr_calc(struct uart_port *port, int bps, int clk)
762{ 792{
763 if (port->type == PORT_SCIF) 793 if (port->type == PORT_SCIF)
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
new file mode 100644
index 00000000000..ac9e5d5f742
--- /dev/null
+++ b/drivers/serial/timbuart.c
@@ -0,0 +1,526 @@
1/*
2 * timbuart.c timberdale FPGA UART driver
3 * Copyright (c) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* Supports:
20 * Timberdale FPGA UART
21 */
22
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/serial_core.h>
26#include <linux/kernel.h>
27#include <linux/platform_device.h>
28#include <linux/ioport.h>
29
30#include "timbuart.h"
31
32struct timbuart_port {
33 struct uart_port port;
34 struct tasklet_struct tasklet;
35 int usedma;
36 u8 last_ier;
37 struct platform_device *dev;
38};
39
40static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
41 921600, 1843200, 3250000};
42
43static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier);
44
45static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
46
47static void timbuart_stop_rx(struct uart_port *port)
48{
49 /* spin lock held by upper layer, disable all RX interrupts */
50 u8 ier = ioread8(port->membase + TIMBUART_IER) & ~RXFLAGS;
51 iowrite8(ier, port->membase + TIMBUART_IER);
52}
53
54static void timbuart_stop_tx(struct uart_port *port)
55{
56 /* spinlock held by upper layer, disable TX interrupt */
57 u8 ier = ioread8(port->membase + TIMBUART_IER) & ~TXBAE;
58 iowrite8(ier, port->membase + TIMBUART_IER);
59}
60
61static void timbuart_start_tx(struct uart_port *port)
62{
63 struct timbuart_port *uart =
64 container_of(port, struct timbuart_port, port);
65
66 /* do not transfer anything here -> fire off the tasklet */
67 tasklet_schedule(&uart->tasklet);
68}
69
70static void timbuart_flush_buffer(struct uart_port *port)
71{
72 u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
73
74 iowrite8(ctl, port->membase + TIMBUART_CTRL);
75 iowrite8(TXBF, port->membase + TIMBUART_ISR);
76}
77
78static void timbuart_rx_chars(struct uart_port *port)
79{
80 struct tty_struct *tty = port->info->port.tty;
81
82 while (ioread8(port->membase + TIMBUART_ISR) & RXDP) {
83 u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
84 port->icount.rx++;
85 tty_insert_flip_char(tty, ch, TTY_NORMAL);
86 }
87
88 spin_unlock(&port->lock);
89 tty_flip_buffer_push(port->info->port.tty);
90 spin_lock(&port->lock);
91
92 dev_dbg(port->dev, "%s - total read %d bytes\n",
93 __func__, port->icount.rx);
94}
95
96static void timbuart_tx_chars(struct uart_port *port)
97{
98 struct circ_buf *xmit = &port->info->xmit;
99
100 while (!(ioread8(port->membase + TIMBUART_ISR) & TXBF) &&
101 !uart_circ_empty(xmit)) {
102 iowrite8(xmit->buf[xmit->tail],
103 port->membase + TIMBUART_TXFIFO);
104 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
105 port->icount.tx++;
106 }
107
108 dev_dbg(port->dev,
109 "%s - total written %d bytes, CTL: %x, RTS: %x, baud: %x\n",
110 __func__,
111 port->icount.tx,
112 ioread8(port->membase + TIMBUART_CTRL),
113 port->mctrl & TIOCM_RTS,
114 ioread8(port->membase + TIMBUART_BAUDRATE));
115}
116
117static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
118{
119 struct timbuart_port *uart =
120 container_of(port, struct timbuart_port, port);
121 struct circ_buf *xmit = &port->info->xmit;
122
123 if (uart_circ_empty(xmit) || uart_tx_stopped(port))
124 return;
125
126 if (port->x_char)
127 return;
128
129 if (isr & TXFLAGS) {
130 timbuart_tx_chars(port);
131 /* clear all TX interrupts */
132 iowrite8(TXFLAGS, port->membase + TIMBUART_ISR);
133
134 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
135 uart_write_wakeup(port);
136 } else
137 /* Re-enable any tx interrupt */
138 *ier |= uart->last_ier & TXFLAGS;
139
140 /* enable interrupts if there are chars in the transmit buffer,
141 * Or if we delivered some bytes and want the almost empty interrupt
142 * we wake up the upper layer later when we got the interrupt
143 * to give it some time to go out...
144 */
145 if (!uart_circ_empty(xmit))
146 *ier |= TXBAE;
147
148 dev_dbg(port->dev, "%s - leaving\n", __func__);
149}
150
151void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
152{
153 if (isr & RXFLAGS) {
154 /* Some RX status is set */
155 if (isr & RXBF) {
156 u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
157 TIMBUART_CTRL_FLSHRX;
158 iowrite8(ctl, port->membase + TIMBUART_CTRL);
159 port->icount.overrun++;
160 } else if (isr & (RXDP))
161 timbuart_rx_chars(port);
162
163 /* ack all RX interrupts */
164 iowrite8(RXFLAGS, port->membase + TIMBUART_ISR);
165 }
166
167 /* always have the RX interrupts enabled */
168 *ier |= RXBAF | RXBF | RXTT;
169
170 dev_dbg(port->dev, "%s - leaving\n", __func__);
171}
172
173void timbuart_tasklet(unsigned long arg)
174{
175 struct timbuart_port *uart = (struct timbuart_port *)arg;
176 u8 isr, ier = 0;
177
178 spin_lock(&uart->port.lock);
179
180 isr = ioread8(uart->port.membase + TIMBUART_ISR);
181 dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
182
183 if (!uart->usedma)
184 timbuart_handle_tx_port(&uart->port, isr, &ier);
185
186 timbuart_mctrl_check(&uart->port, isr, &ier);
187
188 if (!uart->usedma)
189 timbuart_handle_rx_port(&uart->port, isr, &ier);
190
191 iowrite8(ier, uart->port.membase + TIMBUART_IER);
192
193 spin_unlock(&uart->port.lock);
194 dev_dbg(uart->port.dev, "%s leaving\n", __func__);
195}
196
197static unsigned int timbuart_tx_empty(struct uart_port *port)
198{
199 u8 isr = ioread8(port->membase + TIMBUART_ISR);
200
201 return (isr & TXBAE) ? TIOCSER_TEMT : 0;
202}
203
204static unsigned int timbuart_get_mctrl(struct uart_port *port)
205{
206 u8 cts = ioread8(port->membase + TIMBUART_CTRL);
207 dev_dbg(port->dev, "%s - cts %x\n", __func__, cts);
208
209 if (cts & TIMBUART_CTRL_CTS)
210 return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
211 else
212 return TIOCM_DSR | TIOCM_CAR;
213}
214
215static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
216{
217 dev_dbg(port->dev, "%s - %x\n", __func__, mctrl);
218
219 if (mctrl & TIOCM_RTS)
220 iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
221 else
222 iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
223}
224
225static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier)
226{
227 unsigned int cts;
228
229 if (isr & CTS_DELTA) {
230 /* ack */
231 iowrite8(CTS_DELTA, port->membase + TIMBUART_ISR);
232 cts = timbuart_get_mctrl(port);
233 uart_handle_cts_change(port, cts & TIOCM_CTS);
234 wake_up_interruptible(&port->info->delta_msr_wait);
235 }
236
237 *ier |= CTS_DELTA;
238}
239
240static void timbuart_enable_ms(struct uart_port *port)
241{
242 /* N/A */
243}
244
245static void timbuart_break_ctl(struct uart_port *port, int ctl)
246{
247 /* N/A */
248}
249
250static int timbuart_startup(struct uart_port *port)
251{
252 struct timbuart_port *uart =
253 container_of(port, struct timbuart_port, port);
254
255 dev_dbg(port->dev, "%s\n", __func__);
256
257 iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
258 iowrite8(0xff, port->membase + TIMBUART_ISR);
259 /* Enable all but TX interrupts */
260 iowrite8(RXBAF | RXBF | RXTT | CTS_DELTA,
261 port->membase + TIMBUART_IER);
262
263 return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
264 "timb-uart", uart);
265}
266
267static void timbuart_shutdown(struct uart_port *port)
268{
269 struct timbuart_port *uart =
270 container_of(port, struct timbuart_port, port);
271 dev_dbg(port->dev, "%s\n", __func__);
272 free_irq(port->irq, uart);
273 iowrite8(0, port->membase + TIMBUART_IER);
274}
275
276static int get_bindex(int baud)
277{
278 int i;
279
280 for (i = 0; i < ARRAY_SIZE(baudrates); i++)
281 if (baud <= baudrates[i])
282 return i;
283
284 return -1;
285}
286
287static void timbuart_set_termios(struct uart_port *port,
288 struct ktermios *termios,
289 struct ktermios *old)
290{
291 unsigned int baud;
292 short bindex;
293 unsigned long flags;
294
295 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
296 bindex = get_bindex(baud);
297 dev_dbg(port->dev, "%s - bindex %d\n", __func__, bindex);
298
299 if (bindex < 0)
300 bindex = 0;
301 baud = baudrates[bindex];
302
303 /* The serial layer calls into this once with old = NULL when setting
304 up initially */
305 if (old)
306 tty_termios_copy_hw(termios, old);
307 tty_termios_encode_baud_rate(termios, baud, baud);
308
309 spin_lock_irqsave(&port->lock, flags);
310 iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
311 uart_update_timeout(port, termios->c_cflag, baud);
312 spin_unlock_irqrestore(&port->lock, flags);
313}
314
315static const char *timbuart_type(struct uart_port *port)
316{
317 return port->type == PORT_UNKNOWN ? "timbuart" : NULL;
318}
319
320/* We do not request/release mappings of the registers here,
321 * currently it's done in the proble function.
322 */
323static void timbuart_release_port(struct uart_port *port)
324{
325 struct platform_device *pdev = to_platform_device(port->dev);
326 int size =
327 resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
328
329 if (port->flags & UPF_IOREMAP) {
330 iounmap(port->membase);
331 port->membase = NULL;
332 }
333
334 release_mem_region(port->mapbase, size);
335}
336
337static int timbuart_request_port(struct uart_port *port)
338{
339 struct platform_device *pdev = to_platform_device(port->dev);
340 int size =
341 resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
342
343 if (!request_mem_region(port->mapbase, size, "timb-uart"))
344 return -EBUSY;
345
346 if (port->flags & UPF_IOREMAP) {
347 port->membase = ioremap(port->mapbase, size);
348 if (port->membase == NULL) {
349 release_mem_region(port->mapbase, size);
350 return -ENOMEM;
351 }
352 }
353
354 return 0;
355}
356
357static irqreturn_t timbuart_handleinterrupt(int irq, void *devid)
358{
359 struct timbuart_port *uart = (struct timbuart_port *)devid;
360
361 if (ioread8(uart->port.membase + TIMBUART_IPR)) {
362 uart->last_ier = ioread8(uart->port.membase + TIMBUART_IER);
363
364 /* disable interrupts, the tasklet enables them again */
365 iowrite8(0, uart->port.membase + TIMBUART_IER);
366
367 /* fire off bottom half */
368 tasklet_schedule(&uart->tasklet);
369
370 return IRQ_HANDLED;
371 } else
372 return IRQ_NONE;
373}
374
375/*
376 * Configure/autoconfigure the port.
377 */
378static void timbuart_config_port(struct uart_port *port, int flags)
379{
380 if (flags & UART_CONFIG_TYPE) {
381 port->type = PORT_TIMBUART;
382 timbuart_request_port(port);
383 }
384}
385
386static int timbuart_verify_port(struct uart_port *port,
387 struct serial_struct *ser)
388{
389 /* we don't want the core code to modify any port params */
390 return -EINVAL;
391}
392
393static struct uart_ops timbuart_ops = {
394 .tx_empty = timbuart_tx_empty,
395 .set_mctrl = timbuart_set_mctrl,
396 .get_mctrl = timbuart_get_mctrl,
397 .stop_tx = timbuart_stop_tx,
398 .start_tx = timbuart_start_tx,
399 .flush_buffer = timbuart_flush_buffer,
400 .stop_rx = timbuart_stop_rx,
401 .enable_ms = timbuart_enable_ms,
402 .break_ctl = timbuart_break_ctl,
403 .startup = timbuart_startup,
404 .shutdown = timbuart_shutdown,
405 .set_termios = timbuart_set_termios,
406 .type = timbuart_type,
407 .release_port = timbuart_release_port,
408 .request_port = timbuart_request_port,
409 .config_port = timbuart_config_port,
410 .verify_port = timbuart_verify_port
411};
412
413static struct uart_driver timbuart_driver = {
414 .owner = THIS_MODULE,
415 .driver_name = "timberdale_uart",
416 .dev_name = "ttyTU",
417 .major = TIMBUART_MAJOR,
418 .minor = TIMBUART_MINOR,
419 .nr = 1
420};
421
422static int timbuart_probe(struct platform_device *dev)
423{
424 int err;
425 struct timbuart_port *uart;
426 struct resource *iomem;
427
428 dev_dbg(&dev->dev, "%s\n", __func__);
429
430 uart = kzalloc(sizeof(*uart), GFP_KERNEL);
431 if (!uart) {
432 err = -EINVAL;
433 goto err_mem;
434 }
435
436 uart->usedma = 0;
437
438 uart->port.uartclk = 3250000 * 16;
439 uart->port.fifosize = TIMBUART_FIFO_SIZE;
440 uart->port.regshift = 2;
441 uart->port.iotype = UPIO_MEM;
442 uart->port.ops = &timbuart_ops;
443 uart->port.irq = 0;
444 uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
445 uart->port.line = 0;
446 uart->port.dev = &dev->dev;
447
448 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
449 if (!iomem) {
450 err = -ENOMEM;
451 goto err_register;
452 }
453 uart->port.mapbase = iomem->start;
454 uart->port.membase = NULL;
455
456 uart->port.irq = platform_get_irq(dev, 0);
457 if (uart->port.irq < 0) {
458 err = -EINVAL;
459 goto err_register;
460 }
461
462 tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
463
464 err = uart_register_driver(&timbuart_driver);
465 if (err)
466 goto err_register;
467
468 err = uart_add_one_port(&timbuart_driver, &uart->port);
469 if (err)
470 goto err_add_port;
471
472 platform_set_drvdata(dev, uart);
473
474 return 0;
475
476err_add_port:
477 uart_unregister_driver(&timbuart_driver);
478err_register:
479 kfree(uart);
480err_mem:
481 printk(KERN_ERR "timberdale: Failed to register Timberdale UART: %d\n",
482 err);
483
484 return err;
485}
486
487static int timbuart_remove(struct platform_device *dev)
488{
489 struct timbuart_port *uart = platform_get_drvdata(dev);
490
491 tasklet_kill(&uart->tasklet);
492 uart_remove_one_port(&timbuart_driver, &uart->port);
493 uart_unregister_driver(&timbuart_driver);
494 kfree(uart);
495
496 return 0;
497}
498
499static struct platform_driver timbuart_platform_driver = {
500 .driver = {
501 .name = "timb-uart",
502 .owner = THIS_MODULE,
503 },
504 .probe = timbuart_probe,
505 .remove = timbuart_remove,
506};
507
508/*--------------------------------------------------------------------------*/
509
510static int __init timbuart_init(void)
511{
512 return platform_driver_register(&timbuart_platform_driver);
513}
514
515static void __exit timbuart_exit(void)
516{
517 platform_driver_unregister(&timbuart_platform_driver);
518}
519
520module_init(timbuart_init);
521module_exit(timbuart_exit);
522
523MODULE_DESCRIPTION("Timberdale UART driver");
524MODULE_LICENSE("GPL v2");
525MODULE_ALIAS("platform:timb-uart");
526
diff --git a/drivers/serial/timbuart.h b/drivers/serial/timbuart.h
new file mode 100644
index 00000000000..7e566766bc4
--- /dev/null
+++ b/drivers/serial/timbuart.h
@@ -0,0 +1,58 @@
1/*
2 * timbuart.c timberdale FPGA GPIO driver
3 * Copyright (c) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* Supports:
20 * Timberdale FPGA UART
21 */
22
23#ifndef _TIMBUART_H
24#define _TIMBUART_H
25
26#define TIMBUART_FIFO_SIZE 2048
27
28#define TIMBUART_RXFIFO 0x08
29#define TIMBUART_TXFIFO 0x0c
30#define TIMBUART_IER 0x10
31#define TIMBUART_IPR 0x14
32#define TIMBUART_ISR 0x18
33#define TIMBUART_CTRL 0x1c
34#define TIMBUART_BAUDRATE 0x20
35
36#define TIMBUART_CTRL_RTS 0x01
37#define TIMBUART_CTRL_CTS 0x02
38#define TIMBUART_CTRL_FLSHTX 0x40
39#define TIMBUART_CTRL_FLSHRX 0x80
40
41#define TXBF 0x01
42#define TXBAE 0x02
43#define CTS_DELTA 0x04
44#define RXDP 0x08
45#define RXBAF 0x10
46#define RXBF 0x20
47#define RXTT 0x40
48#define RXBNAE 0x80
49#define TXBE 0x100
50
51#define RXFLAGS (RXDP | RXBAF | RXBF | RXTT | RXBNAE)
52#define TXFLAGS (TXBF | TXBAE)
53
54#define TIMBUART_MAJOR 204
55#define TIMBUART_MINOR 192
56
57#endif /* _TIMBUART_H */
58
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 12d13d99b6f..d687a9b93d0 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -24,6 +24,7 @@
24#include <linux/sh_intc.h> 24#include <linux/sh_intc.h>
25#include <linux/sysdev.h> 25#include <linux/sysdev.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/topology.h>
27 28
28#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ 29#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
29 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ 30 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -770,11 +771,19 @@ void __init register_intc_controller(struct intc_desc *desc)
770 /* register the vectors one by one */ 771 /* register the vectors one by one */
771 for (i = 0; i < desc->nr_vectors; i++) { 772 for (i = 0; i < desc->nr_vectors; i++) {
772 struct intc_vect *vect = desc->vectors + i; 773 struct intc_vect *vect = desc->vectors + i;
774 unsigned int irq = evt2irq(vect->vect);
775 struct irq_desc *irq_desc;
773 776
774 if (!vect->enum_id) 777 if (!vect->enum_id)
775 continue; 778 continue;
776 779
777 intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect)); 780 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
781 if (unlikely(!irq_desc)) {
782 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
783 continue;
784 }
785
786 intc_register_irq(desc, d, vect->enum_id, irq);
778 } 787 }
779} 788}
780 789
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 83a185d5296..e8aae227b5e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -118,7 +118,7 @@ config SPI_GPIO
118 118
119config SPI_IMX 119config SPI_IMX
120 tristate "Freescale iMX SPI controller" 120 tristate "Freescale iMX SPI controller"
121 depends on ARCH_IMX && EXPERIMENTAL 121 depends on ARCH_MX1 && EXPERIMENTAL
122 help 122 help
123 This enables using the Freescale iMX SPI controller in master 123 This enables using the Freescale iMX SPI controller in master
124 mode. 124 mode.
@@ -171,6 +171,15 @@ config SPI_ORION
171 help 171 help
172 This enables using the SPI master controller on the Orion chips. 172 This enables using the SPI master controller on the Orion chips.
173 173
174config SPI_PL022
175 tristate "ARM AMBA PL022 SSP controller (EXPERIMENTAL)"
176 depends on ARM_AMBA && EXPERIMENTAL
177 default y if MACH_U300
178 help
179 This selects the ARM(R) AMBA(R) PrimeCell PL022 SSP
180 controller. If you have an embedded system with an AMBA(R)
181 bus and a PL022 controller, say Y or M here.
182
174config SPI_PXA2XX 183config SPI_PXA2XX
175 tristate "PXA2xx SSP SPI master" 184 tristate "PXA2xx SSP SPI master"
176 depends on ARCH_PXA && EXPERIMENTAL 185 depends on ARCH_PXA && EXPERIMENTAL
@@ -212,7 +221,7 @@ config SPI_TXX9
212 221
213config SPI_XILINX 222config SPI_XILINX
214 tristate "Xilinx SPI controller" 223 tristate "Xilinx SPI controller"
215 depends on XILINX_VIRTEX && EXPERIMENTAL 224 depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL
216 select SPI_BITBANG 225 select SPI_BITBANG
217 help 226 help
218 This exposes the SPI controller IP from the Xilinx EDK. 227 This exposes the SPI controller IP from the Xilinx EDK.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 5d0451936d8..ecfadb18048 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
23obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 23obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
24obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o 24obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
25obj-$(CONFIG_SPI_ORION) += orion_spi.o 25obj-$(CONFIG_SPI_ORION) += orion_spi.o
26obj-$(CONFIG_SPI_PL022) += amba-pl022.o
26obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 27obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
27obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o 28obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o
28obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 29obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
new file mode 100644
index 00000000000..da76797ce8b
--- /dev/null
+++ b/drivers/spi/amba-pl022.c
@@ -0,0 +1,1866 @@
1/*
2 * drivers/spi/amba-pl022.c
3 *
4 * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
5 *
6 * Copyright (C) 2008-2009 ST-Ericsson AB
7 * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
8 *
9 * Author: Linus Walleij <linus.walleij@stericsson.com>
10 *
11 * Initial version inspired by:
12 * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
13 * Initial adoption to PL022 by:
14 * Sachin Verma <sachin.verma@st.com>
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 */
26
27/*
28 * TODO:
29 * - add timeout on polled transfers
30 * - add generic DMA framework support
31 */
32
33#include <linux/init.h>
34#include <linux/module.h>
35#include <linux/device.h>
36#include <linux/ioport.h>
37#include <linux/errno.h>
38#include <linux/interrupt.h>
39#include <linux/spi/spi.h>
40#include <linux/workqueue.h>
41#include <linux/errno.h>
42#include <linux/delay.h>
43#include <linux/clk.h>
44#include <linux/err.h>
45#include <linux/amba/bus.h>
46#include <linux/amba/pl022.h>
47#include <linux/io.h>
48#include <linux/delay.h>
49
50/*
51 * This macro is used to define some register default values.
52 * reg is masked with mask, the OR:ed with an (again masked)
53 * val shifted sb steps to the left.
54 */
55#define SSP_WRITE_BITS(reg, val, mask, sb) \
56 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
57
58/*
59 * This macro is also used to define some default values.
60 * It will just shift val by sb steps to the left and mask
61 * the result with mask.
62 */
63#define GEN_MASK_BITS(val, mask, sb) \
64 (((val)<<(sb)) & (mask))
65
66#define DRIVE_TX 0
67#define DO_NOT_DRIVE_TX 1
68
69#define DO_NOT_QUEUE_DMA 0
70#define QUEUE_DMA 1
71
72#define RX_TRANSFER 1
73#define TX_TRANSFER 2
74
75/*
76 * Macros to access SSP Registers with their offsets
77 */
78#define SSP_CR0(r) (r + 0x000)
79#define SSP_CR1(r) (r + 0x004)
80#define SSP_DR(r) (r + 0x008)
81#define SSP_SR(r) (r + 0x00C)
82#define SSP_CPSR(r) (r + 0x010)
83#define SSP_IMSC(r) (r + 0x014)
84#define SSP_RIS(r) (r + 0x018)
85#define SSP_MIS(r) (r + 0x01C)
86#define SSP_ICR(r) (r + 0x020)
87#define SSP_DMACR(r) (r + 0x024)
88#define SSP_ITCR(r) (r + 0x080)
89#define SSP_ITIP(r) (r + 0x084)
90#define SSP_ITOP(r) (r + 0x088)
91#define SSP_TDR(r) (r + 0x08C)
92
93#define SSP_PID0(r) (r + 0xFE0)
94#define SSP_PID1(r) (r + 0xFE4)
95#define SSP_PID2(r) (r + 0xFE8)
96#define SSP_PID3(r) (r + 0xFEC)
97
98#define SSP_CID0(r) (r + 0xFF0)
99#define SSP_CID1(r) (r + 0xFF4)
100#define SSP_CID2(r) (r + 0xFF8)
101#define SSP_CID3(r) (r + 0xFFC)
102
103/*
104 * SSP Control Register 0 - SSP_CR0
105 */
106#define SSP_CR0_MASK_DSS (0x1FUL << 0)
107#define SSP_CR0_MASK_HALFDUP (0x1UL << 5)
108#define SSP_CR0_MASK_SPO (0x1UL << 6)
109#define SSP_CR0_MASK_SPH (0x1UL << 7)
110#define SSP_CR0_MASK_SCR (0xFFUL << 8)
111#define SSP_CR0_MASK_CSS (0x1FUL << 16)
112#define SSP_CR0_MASK_FRF (0x3UL << 21)
113
114/*
115 * SSP Control Register 0 - SSP_CR1
116 */
117#define SSP_CR1_MASK_LBM (0x1UL << 0)
118#define SSP_CR1_MASK_SSE (0x1UL << 1)
119#define SSP_CR1_MASK_MS (0x1UL << 2)
120#define SSP_CR1_MASK_SOD (0x1UL << 3)
121#define SSP_CR1_MASK_RENDN (0x1UL << 4)
122#define SSP_CR1_MASK_TENDN (0x1UL << 5)
123#define SSP_CR1_MASK_MWAIT (0x1UL << 6)
124#define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7)
125#define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10)
126
127/*
128 * SSP Data Register - SSP_DR
129 */
130#define SSP_DR_MASK_DATA 0xFFFFFFFF
131
132/*
133 * SSP Status Register - SSP_SR
134 */
135#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
136#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
137#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
138#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
139#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
140
141/*
142 * SSP Clock Prescale Register - SSP_CPSR
143 */
144#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
145
146/*
147 * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
148 */
149#define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
150#define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */
151#define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */
152#define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */
153
154/*
155 * SSP Raw Interrupt Status Register - SSP_RIS
156 */
157/* Receive Overrun Raw Interrupt status */
158#define SSP_RIS_MASK_RORRIS (0x1UL << 0)
159/* Receive Timeout Raw Interrupt status */
160#define SSP_RIS_MASK_RTRIS (0x1UL << 1)
161/* Receive FIFO Raw Interrupt status */
162#define SSP_RIS_MASK_RXRIS (0x1UL << 2)
163/* Transmit FIFO Raw Interrupt status */
164#define SSP_RIS_MASK_TXRIS (0x1UL << 3)
165
166/*
167 * SSP Masked Interrupt Status Register - SSP_MIS
168 */
169/* Receive Overrun Masked Interrupt status */
170#define SSP_MIS_MASK_RORMIS (0x1UL << 0)
171/* Receive Timeout Masked Interrupt status */
172#define SSP_MIS_MASK_RTMIS (0x1UL << 1)
173/* Receive FIFO Masked Interrupt status */
174#define SSP_MIS_MASK_RXMIS (0x1UL << 2)
175/* Transmit FIFO Masked Interrupt status */
176#define SSP_MIS_MASK_TXMIS (0x1UL << 3)
177
178/*
179 * SSP Interrupt Clear Register - SSP_ICR
180 */
181/* Receive Overrun Raw Clear Interrupt bit */
182#define SSP_ICR_MASK_RORIC (0x1UL << 0)
183/* Receive Timeout Clear Interrupt bit */
184#define SSP_ICR_MASK_RTIC (0x1UL << 1)
185
186/*
187 * SSP DMA Control Register - SSP_DMACR
188 */
189/* Receive DMA Enable bit */
190#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
191/* Transmit DMA Enable bit */
192#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
193
194/*
195 * SSP Integration Test control Register - SSP_ITCR
196 */
197#define SSP_ITCR_MASK_ITEN (0x1UL << 0)
198#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
199
200/*
201 * SSP Integration Test Input Register - SSP_ITIP
202 */
203#define ITIP_MASK_SSPRXD (0x1UL << 0)
204#define ITIP_MASK_SSPFSSIN (0x1UL << 1)
205#define ITIP_MASK_SSPCLKIN (0x1UL << 2)
206#define ITIP_MASK_RXDMAC (0x1UL << 3)
207#define ITIP_MASK_TXDMAC (0x1UL << 4)
208#define ITIP_MASK_SSPTXDIN (0x1UL << 5)
209
210/*
211 * SSP Integration Test output Register - SSP_ITOP
212 */
213#define ITOP_MASK_SSPTXD (0x1UL << 0)
214#define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
215#define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
216#define ITOP_MASK_SSPOEn (0x1UL << 3)
217#define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
218#define ITOP_MASK_RORINTR (0x1UL << 5)
219#define ITOP_MASK_RTINTR (0x1UL << 6)
220#define ITOP_MASK_RXINTR (0x1UL << 7)
221#define ITOP_MASK_TXINTR (0x1UL << 8)
222#define ITOP_MASK_INTR (0x1UL << 9)
223#define ITOP_MASK_RXDMABREQ (0x1UL << 10)
224#define ITOP_MASK_RXDMASREQ (0x1UL << 11)
225#define ITOP_MASK_TXDMABREQ (0x1UL << 12)
226#define ITOP_MASK_TXDMASREQ (0x1UL << 13)
227
228/*
229 * SSP Test Data Register - SSP_TDR
230 */
231#define TDR_MASK_TESTDATA (0xFFFFFFFF)
232
233/*
234 * Message State
235 * we use the spi_message.state (void *) pointer to
236 * hold a single state value, that's why all this
237 * (void *) casting is done here.
238 */
239#define STATE_START ((void *) 0)
240#define STATE_RUNNING ((void *) 1)
241#define STATE_DONE ((void *) 2)
242#define STATE_ERROR ((void *) -1)
243
244/*
245 * Queue State
246 */
247#define QUEUE_RUNNING (0)
248#define QUEUE_STOPPED (1)
249/*
250 * SSP State - Whether Enabled or Disabled
251 */
252#define SSP_DISABLED (0)
253#define SSP_ENABLED (1)
254
255/*
256 * SSP DMA State - Whether DMA Enabled or Disabled
257 */
258#define SSP_DMA_DISABLED (0)
259#define SSP_DMA_ENABLED (1)
260
261/*
262 * SSP Clock Defaults
263 */
264#define NMDK_SSP_DEFAULT_CLKRATE 0x2
265#define NMDK_SSP_DEFAULT_PRESCALE 0x40
266
267/*
268 * SSP Clock Parameter ranges
269 */
270#define CPSDVR_MIN 0x02
271#define CPSDVR_MAX 0xFE
272#define SCR_MIN 0x00
273#define SCR_MAX 0xFF
274
275/*
276 * SSP Interrupt related Macros
277 */
278#define DEFAULT_SSP_REG_IMSC 0x0UL
279#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
280#define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
281
282#define CLEAR_ALL_INTERRUPTS 0x3
283
284
285/*
286 * The type of reading going on on this chip
287 */
288enum ssp_reading {
289 READING_NULL,
290 READING_U8,
291 READING_U16,
292 READING_U32
293};
294
295/**
296 * The type of writing going on on this chip
297 */
298enum ssp_writing {
299 WRITING_NULL,
300 WRITING_U8,
301 WRITING_U16,
302 WRITING_U32
303};
304
305/**
306 * struct vendor_data - vendor-specific config parameters
307 * for PL022 derivates
308 * @fifodepth: depth of FIFOs (both)
309 * @max_bpw: maximum number of bits per word
310 * @unidir: supports unidirection transfers
311 */
312struct vendor_data {
313 int fifodepth;
314 int max_bpw;
315 bool unidir;
316};
317
318/**
319 * struct pl022 - This is the private SSP driver data structure
320 * @adev: AMBA device model hookup
321 * @phybase: The physical memory where the SSP device resides
322 * @virtbase: The virtual memory where the SSP is mapped
323 * @master: SPI framework hookup
324 * @master_info: controller-specific data from machine setup
325 * @regs: SSP controller register's virtual address
326 * @pump_messages: Work struct for scheduling work to the workqueue
327 * @lock: spinlock to syncronise access to driver data
328 * @workqueue: a workqueue on which any spi_message request is queued
329 * @busy: workqueue is busy
330 * @run: workqueue is running
331 * @pump_transfers: Tasklet used in Interrupt Transfer mode
332 * @cur_msg: Pointer to current spi_message being processed
333 * @cur_transfer: Pointer to current spi_transfer
334 * @cur_chip: pointer to current clients chip(assigned from controller_state)
335 * @tx: current position in TX buffer to be read
336 * @tx_end: end position in TX buffer to be read
337 * @rx: current position in RX buffer to be written
338 * @rx_end: end position in RX buffer to be written
339 * @readingtype: the type of read currently going on
340 * @writingtype: the type or write currently going on
341 */
342struct pl022 {
343 struct amba_device *adev;
344 struct vendor_data *vendor;
345 resource_size_t phybase;
346 void __iomem *virtbase;
347 struct clk *clk;
348 struct spi_master *master;
349 struct pl022_ssp_controller *master_info;
350 /* Driver message queue */
351 struct workqueue_struct *workqueue;
352 struct work_struct pump_messages;
353 spinlock_t queue_lock;
354 struct list_head queue;
355 int busy;
356 int run;
357 /* Message transfer pump */
358 struct tasklet_struct pump_transfers;
359 struct spi_message *cur_msg;
360 struct spi_transfer *cur_transfer;
361 struct chip_data *cur_chip;
362 void *tx;
363 void *tx_end;
364 void *rx;
365 void *rx_end;
366 enum ssp_reading read;
367 enum ssp_writing write;
368};
369
370/**
371 * struct chip_data - To maintain runtime state of SSP for each client chip
372 * @cr0: Value of control register CR0 of SSP
373 * @cr1: Value of control register CR1 of SSP
374 * @dmacr: Value of DMA control Register of SSP
375 * @cpsr: Value of Clock prescale register
376 * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
377 * @enable_dma: Whether to enable DMA or not
378 * @write: function ptr to be used to write when doing xfer for this chip
379 * @read: function ptr to be used to read when doing xfer for this chip
380 * @cs_control: chip select callback provided by chip
381 * @xfer_type: polling/interrupt/DMA
382 *
383 * Runtime state of the SSP controller, maintained per chip,
384 * This would be set according to the current message that would be served
385 */
386struct chip_data {
387 u16 cr0;
388 u16 cr1;
389 u16 dmacr;
390 u16 cpsr;
391 u8 n_bytes;
392 u8 enable_dma:1;
393 enum ssp_reading read;
394 enum ssp_writing write;
395 void (*cs_control) (u32 command);
396 int xfer_type;
397};
398
399/**
400 * null_cs_control - Dummy chip select function
401 * @command: select/delect the chip
402 *
403 * If no chip select function is provided by client this is used as dummy
404 * chip select
405 */
406static void null_cs_control(u32 command)
407{
408 pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
409}
410
411/**
412 * giveback - current spi_message is over, schedule next message and call
413 * callback of this message. Assumes that caller already
414 * set message->status; dma and pio irqs are blocked
415 * @pl022: SSP driver private data structure
416 */
417static void giveback(struct pl022 *pl022)
418{
419 struct spi_transfer *last_transfer;
420 unsigned long flags;
421 struct spi_message *msg;
422 void (*curr_cs_control) (u32 command);
423
424 /*
425 * This local reference to the chip select function
426 * is needed because we set curr_chip to NULL
427 * as a step toward termininating the message.
428 */
429 curr_cs_control = pl022->cur_chip->cs_control;
430 spin_lock_irqsave(&pl022->queue_lock, flags);
431 msg = pl022->cur_msg;
432 pl022->cur_msg = NULL;
433 pl022->cur_transfer = NULL;
434 pl022->cur_chip = NULL;
435 queue_work(pl022->workqueue, &pl022->pump_messages);
436 spin_unlock_irqrestore(&pl022->queue_lock, flags);
437
438 last_transfer = list_entry(msg->transfers.prev,
439 struct spi_transfer,
440 transfer_list);
441
442 /* Delay if requested before any change in chip select */
443 if (last_transfer->delay_usecs)
444 /*
445 * FIXME: This runs in interrupt context.
446 * Is this really smart?
447 */
448 udelay(last_transfer->delay_usecs);
449
450 /*
451 * Drop chip select UNLESS cs_change is true or we are returning
452 * a message with an error, or next message is for another chip
453 */
454 if (!last_transfer->cs_change)
455 curr_cs_control(SSP_CHIP_DESELECT);
456 else {
457 struct spi_message *next_msg;
458
459 /* Holding of cs was hinted, but we need to make sure
460 * the next message is for the same chip. Don't waste
461 * time with the following tests unless this was hinted.
462 *
463 * We cannot postpone this until pump_messages, because
464 * after calling msg->complete (below) the driver that
465 * sent the current message could be unloaded, which
466 * could invalidate the cs_control() callback...
467 */
468
469 /* get a pointer to the next message, if any */
470 spin_lock_irqsave(&pl022->queue_lock, flags);
471 if (list_empty(&pl022->queue))
472 next_msg = NULL;
473 else
474 next_msg = list_entry(pl022->queue.next,
475 struct spi_message, queue);
476 spin_unlock_irqrestore(&pl022->queue_lock, flags);
477
478 /* see if the next and current messages point
479 * to the same chip
480 */
481 if (next_msg && next_msg->spi != msg->spi)
482 next_msg = NULL;
483 if (!next_msg || msg->state == STATE_ERROR)
484 curr_cs_control(SSP_CHIP_DESELECT);
485 }
486 msg->state = NULL;
487 if (msg->complete)
488 msg->complete(msg->context);
489 /* This message is completed, so let's turn off the clock! */
490 clk_disable(pl022->clk);
491}
492
493/**
494 * flush - flush the FIFO to reach a clean state
495 * @pl022: SSP driver private data structure
496 */
497static int flush(struct pl022 *pl022)
498{
499 unsigned long limit = loops_per_jiffy << 1;
500
501 dev_dbg(&pl022->adev->dev, "flush\n");
502 do {
503 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
504 readw(SSP_DR(pl022->virtbase));
505 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
506 return limit;
507}
508
509/**
510 * restore_state - Load configuration of current chip
511 * @pl022: SSP driver private data structure
512 */
513static void restore_state(struct pl022 *pl022)
514{
515 struct chip_data *chip = pl022->cur_chip;
516
517 writew(chip->cr0, SSP_CR0(pl022->virtbase));
518 writew(chip->cr1, SSP_CR1(pl022->virtbase));
519 writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
520 writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
521 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
522 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
523}
524
525/**
526 * load_ssp_default_config - Load default configuration for SSP
527 * @pl022: SSP driver private data structure
528 */
529
530/*
531 * Default SSP Register Values
532 */
533#define DEFAULT_SSP_REG_CR0 ( \
534 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
535 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
536 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
537 GEN_MASK_BITS(SSP_CLK_FALLING_EDGE, SSP_CR0_MASK_SPH, 7) | \
538 GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
539 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
540 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
541)
542
543#define DEFAULT_SSP_REG_CR1 ( \
544 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
545 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
546 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
547 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
548 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \
549 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \
550 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\
551 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \
552 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \
553)
554
555#define DEFAULT_SSP_REG_CPSR ( \
556 GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
557)
558
559#define DEFAULT_SSP_REG_DMACR (\
560 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
561 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
562)
563
564
565static void load_ssp_default_config(struct pl022 *pl022)
566{
567 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
568 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
569 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
570 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
571 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
572 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
573}
574
575/**
576 * This will write to TX and read from RX according to the parameters
577 * set in pl022.
578 */
579static void readwriter(struct pl022 *pl022)
580{
581
582 /*
583 * The FIFO depth is different inbetween primecell variants.
584 * I believe filling in too much in the FIFO might cause
585 * errons in 8bit wide transfers on ARM variants (just 8 words
586 * FIFO, means only 8x8 = 64 bits in FIFO) at least.
587 *
588 * FIXME: currently we have no logic to account for this.
589 * perhaps there is even something broken in HW regarding
590 * 8bit transfers (it doesn't fail on 16bit) so this needs
591 * more investigation...
592 */
593 dev_dbg(&pl022->adev->dev,
594 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
595 __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
596
597 /* Read as much as you can */
598 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
599 && (pl022->rx < pl022->rx_end)) {
600 switch (pl022->read) {
601 case READING_NULL:
602 readw(SSP_DR(pl022->virtbase));
603 break;
604 case READING_U8:
605 *(u8 *) (pl022->rx) =
606 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
607 break;
608 case READING_U16:
609 *(u16 *) (pl022->rx) =
610 (u16) readw(SSP_DR(pl022->virtbase));
611 break;
612 case READING_U32:
613 *(u32 *) (pl022->rx) =
614 readl(SSP_DR(pl022->virtbase));
615 break;
616 }
617 pl022->rx += (pl022->cur_chip->n_bytes);
618 }
619 /*
620 * Write as much as you can, while keeping an eye on the RX FIFO!
621 */
622 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
623 && (pl022->tx < pl022->tx_end)) {
624 switch (pl022->write) {
625 case WRITING_NULL:
626 writew(0x0, SSP_DR(pl022->virtbase));
627 break;
628 case WRITING_U8:
629 writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
630 break;
631 case WRITING_U16:
632 writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
633 break;
634 case WRITING_U32:
635 writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
636 break;
637 }
638 pl022->tx += (pl022->cur_chip->n_bytes);
639 /*
640 * This inner reader takes care of things appearing in the RX
641 * FIFO as we're transmitting. This will happen a lot since the
642 * clock starts running when you put things into the TX FIFO,
643 * and then things are continously clocked into the RX FIFO.
644 */
645 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
646 && (pl022->rx < pl022->rx_end)) {
647 switch (pl022->read) {
648 case READING_NULL:
649 readw(SSP_DR(pl022->virtbase));
650 break;
651 case READING_U8:
652 *(u8 *) (pl022->rx) =
653 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
654 break;
655 case READING_U16:
656 *(u16 *) (pl022->rx) =
657 (u16) readw(SSP_DR(pl022->virtbase));
658 break;
659 case READING_U32:
660 *(u32 *) (pl022->rx) =
661 readl(SSP_DR(pl022->virtbase));
662 break;
663 }
664 pl022->rx += (pl022->cur_chip->n_bytes);
665 }
666 }
667 /*
668 * When we exit here the TX FIFO should be full and the RX FIFO
669 * should be empty
670 */
671}
672
673
674/**
675 * next_transfer - Move to the Next transfer in the current spi message
676 * @pl022: SSP driver private data structure
677 *
678 * This function moves though the linked list of spi transfers in the
679 * current spi message and returns with the state of current spi
680 * message i.e whether its last transfer is done(STATE_DONE) or
681 * Next transfer is ready(STATE_RUNNING)
682 */
683static void *next_transfer(struct pl022 *pl022)
684{
685 struct spi_message *msg = pl022->cur_msg;
686 struct spi_transfer *trans = pl022->cur_transfer;
687
688 /* Move to next transfer */
689 if (trans->transfer_list.next != &msg->transfers) {
690 pl022->cur_transfer =
691 list_entry(trans->transfer_list.next,
692 struct spi_transfer, transfer_list);
693 return STATE_RUNNING;
694 }
695 return STATE_DONE;
696}
697/**
698 * pl022_interrupt_handler - Interrupt handler for SSP controller
699 *
700 * This function handles interrupts generated for an interrupt based transfer.
701 * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
702 * current message's state as STATE_ERROR and schedule the tasklet
703 * pump_transfers which will do the postprocessing of the current message by
704 * calling giveback(). Otherwise it reads data from RX FIFO till there is no
705 * more data, and writes data in TX FIFO till it is not full. If we complete
706 * the transfer we move to the next transfer and schedule the tasklet.
707 */
708static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
709{
710 struct pl022 *pl022 = dev_id;
711 struct spi_message *msg = pl022->cur_msg;
712 u16 irq_status = 0;
713 u16 flag = 0;
714
715 if (unlikely(!msg)) {
716 dev_err(&pl022->adev->dev,
717 "bad message state in interrupt handler");
718 /* Never fail */
719 return IRQ_HANDLED;
720 }
721
722 /* Read the Interrupt Status Register */
723 irq_status = readw(SSP_MIS(pl022->virtbase));
724
725 if (unlikely(!irq_status))
726 return IRQ_NONE;
727
728 /* This handles the error code interrupts */
729 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
730 /*
731 * Overrun interrupt - bail out since our Data has been
732 * corrupted
733 */
734 dev_err(&pl022->adev->dev,
735 "FIFO overrun\n");
736 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
737 dev_err(&pl022->adev->dev,
738 "RXFIFO is full\n");
739 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
740 dev_err(&pl022->adev->dev,
741 "TXFIFO is full\n");
742
743 /*
744 * Disable and clear interrupts, disable SSP,
745 * mark message with bad status so it can be
746 * retried.
747 */
748 writew(DISABLE_ALL_INTERRUPTS,
749 SSP_IMSC(pl022->virtbase));
750 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
751 writew((readw(SSP_CR1(pl022->virtbase)) &
752 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
753 msg->state = STATE_ERROR;
754
755 /* Schedule message queue handler */
756 tasklet_schedule(&pl022->pump_transfers);
757 return IRQ_HANDLED;
758 }
759
760 readwriter(pl022);
761
762 if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
763 flag = 1;
764 /* Disable Transmit interrupt */
765 writew(readw(SSP_IMSC(pl022->virtbase)) &
766 (~SSP_IMSC_MASK_TXIM),
767 SSP_IMSC(pl022->virtbase));
768 }
769
770 /*
771 * Since all transactions must write as much as shall be read,
772 * we can conclude the entire transaction once RX is complete.
773 * At this point, all TX will always be finished.
774 */
775 if (pl022->rx >= pl022->rx_end) {
776 writew(DISABLE_ALL_INTERRUPTS,
777 SSP_IMSC(pl022->virtbase));
778 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
779 if (unlikely(pl022->rx > pl022->rx_end)) {
780 dev_warn(&pl022->adev->dev, "read %u surplus "
781 "bytes (did you request an odd "
782 "number of bytes on a 16bit bus?)\n",
783 (u32) (pl022->rx - pl022->rx_end));
784 }
785 /* Update total bytes transfered */
786 msg->actual_length += pl022->cur_transfer->len;
787 if (pl022->cur_transfer->cs_change)
788 pl022->cur_chip->
789 cs_control(SSP_CHIP_DESELECT);
790 /* Move to next transfer */
791 msg->state = next_transfer(pl022);
792 tasklet_schedule(&pl022->pump_transfers);
793 return IRQ_HANDLED;
794 }
795
796 return IRQ_HANDLED;
797}
798
799/**
800 * This sets up the pointers to memory for the next message to
801 * send out on the SPI bus.
802 */
803static int set_up_next_transfer(struct pl022 *pl022,
804 struct spi_transfer *transfer)
805{
806 int residue;
807
808 /* Sanity check the message for this bus width */
809 residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
810 if (unlikely(residue != 0)) {
811 dev_err(&pl022->adev->dev,
812 "message of %u bytes to transmit but the current "
813 "chip bus has a data width of %u bytes!\n",
814 pl022->cur_transfer->len,
815 pl022->cur_chip->n_bytes);
816 dev_err(&pl022->adev->dev, "skipping this message\n");
817 return -EIO;
818 }
819 pl022->tx = (void *)transfer->tx_buf;
820 pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
821 pl022->rx = (void *)transfer->rx_buf;
822 pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
823 pl022->write =
824 pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
825 pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
826 return 0;
827}
828
829/**
830 * pump_transfers - Tasklet function which schedules next interrupt transfer
831 * when running in interrupt transfer mode.
832 * @data: SSP driver private data structure
833 *
834 */
835static void pump_transfers(unsigned long data)
836{
837 struct pl022 *pl022 = (struct pl022 *) data;
838 struct spi_message *message = NULL;
839 struct spi_transfer *transfer = NULL;
840 struct spi_transfer *previous = NULL;
841
842 /* Get current state information */
843 message = pl022->cur_msg;
844 transfer = pl022->cur_transfer;
845
846 /* Handle for abort */
847 if (message->state == STATE_ERROR) {
848 message->status = -EIO;
849 giveback(pl022);
850 return;
851 }
852
853 /* Handle end of message */
854 if (message->state == STATE_DONE) {
855 message->status = 0;
856 giveback(pl022);
857 return;
858 }
859
860 /* Delay if requested at end of transfer before CS change */
861 if (message->state == STATE_RUNNING) {
862 previous = list_entry(transfer->transfer_list.prev,
863 struct spi_transfer,
864 transfer_list);
865 if (previous->delay_usecs)
866 /*
867 * FIXME: This runs in interrupt context.
868 * Is this really smart?
869 */
870 udelay(previous->delay_usecs);
871
872 /* Drop chip select only if cs_change is requested */
873 if (previous->cs_change)
874 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
875 } else {
876 /* STATE_START */
877 message->state = STATE_RUNNING;
878 }
879
880 if (set_up_next_transfer(pl022, transfer)) {
881 message->state = STATE_ERROR;
882 message->status = -EIO;
883 giveback(pl022);
884 return;
885 }
886 /* Flush the FIFOs and let's go! */
887 flush(pl022);
888 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
889}
890
891/**
892 * NOT IMPLEMENTED
893 * configure_dma - It configures the DMA pipes for DMA transfers
894 * @data: SSP driver's private data structure
895 *
896 */
897static int configure_dma(void *data)
898{
899 struct pl022 *pl022 = data;
900 dev_dbg(&pl022->adev->dev, "configure DMA\n");
901 return -ENOTSUPP;
902}
903
904/**
905 * do_dma_transfer - It handles transfers of the current message
906 * if it is DMA xfer.
907 * NOT FULLY IMPLEMENTED
908 * @data: SSP driver's private data structure
909 */
910static void do_dma_transfer(void *data)
911{
912 struct pl022 *pl022 = data;
913
914 if (configure_dma(data)) {
915 dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n");
916 goto err_config_dma;
917 }
918
919 /* TODO: Implememt DMA setup of pipes here */
920
921 /* Enable target chip, set up transfer */
922 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
923 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
924 /* Error path */
925 pl022->cur_msg->state = STATE_ERROR;
926 pl022->cur_msg->status = -EIO;
927 giveback(pl022);
928 return;
929 }
930 /* Enable SSP */
931 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
932 SSP_CR1(pl022->virtbase));
933
934 /* TODO: Enable the DMA transfer here */
935 return;
936
937 err_config_dma:
938 pl022->cur_msg->state = STATE_ERROR;
939 pl022->cur_msg->status = -EIO;
940 giveback(pl022);
941 return;
942}
943
944static void do_interrupt_transfer(void *data)
945{
946 struct pl022 *pl022 = data;
947
948 /* Enable target chip */
949 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
950 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
951 /* Error path */
952 pl022->cur_msg->state = STATE_ERROR;
953 pl022->cur_msg->status = -EIO;
954 giveback(pl022);
955 return;
956 }
957 /* Enable SSP, turn on interrupts */
958 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
959 SSP_CR1(pl022->virtbase));
960 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
961}
962
963static void do_polling_transfer(void *data)
964{
965 struct pl022 *pl022 = data;
966 struct spi_message *message = NULL;
967 struct spi_transfer *transfer = NULL;
968 struct spi_transfer *previous = NULL;
969 struct chip_data *chip;
970
971 chip = pl022->cur_chip;
972 message = pl022->cur_msg;
973
974 while (message->state != STATE_DONE) {
975 /* Handle for abort */
976 if (message->state == STATE_ERROR)
977 break;
978 transfer = pl022->cur_transfer;
979
980 /* Delay if requested at end of transfer */
981 if (message->state == STATE_RUNNING) {
982 previous =
983 list_entry(transfer->transfer_list.prev,
984 struct spi_transfer, transfer_list);
985 if (previous->delay_usecs)
986 udelay(previous->delay_usecs);
987 if (previous->cs_change)
988 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
989 } else {
990 /* STATE_START */
991 message->state = STATE_RUNNING;
992 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
993 }
994
995 /* Configuration Changing Per Transfer */
996 if (set_up_next_transfer(pl022, transfer)) {
997 /* Error path */
998 message->state = STATE_ERROR;
999 break;
1000 }
1001 /* Flush FIFOs and enable SSP */
1002 flush(pl022);
1003 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1004 SSP_CR1(pl022->virtbase));
1005
1006 dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n");
1007 /* FIXME: insert a timeout so we don't hang here indefinately */
1008 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
1009 readwriter(pl022);
1010
1011 /* Update total byte transfered */
1012 message->actual_length += pl022->cur_transfer->len;
1013 if (pl022->cur_transfer->cs_change)
1014 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
1015 /* Move to next transfer */
1016 message->state = next_transfer(pl022);
1017 }
1018
1019 /* Handle end of message */
1020 if (message->state == STATE_DONE)
1021 message->status = 0;
1022 else
1023 message->status = -EIO;
1024
1025 giveback(pl022);
1026 return;
1027}
1028
1029/**
1030 * pump_messages - Workqueue function which processes spi message queue
1031 * @data: pointer to private data of SSP driver
1032 *
1033 * This function checks if there is any spi message in the queue that
1034 * needs processing and delegate control to appropriate function
1035 * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer()
1036 * based on the kind of the transfer
1037 *
1038 */
1039static void pump_messages(struct work_struct *work)
1040{
1041 struct pl022 *pl022 =
1042 container_of(work, struct pl022, pump_messages);
1043 unsigned long flags;
1044
1045 /* Lock queue and check for queue work */
1046 spin_lock_irqsave(&pl022->queue_lock, flags);
1047 if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) {
1048 pl022->busy = 0;
1049 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1050 return;
1051 }
1052 /* Make sure we are not already running a message */
1053 if (pl022->cur_msg) {
1054 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1055 return;
1056 }
1057 /* Extract head of queue */
1058 pl022->cur_msg =
1059 list_entry(pl022->queue.next, struct spi_message, queue);
1060
1061 list_del_init(&pl022->cur_msg->queue);
1062 pl022->busy = 1;
1063 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1064
1065 /* Initial message state */
1066 pl022->cur_msg->state = STATE_START;
1067 pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
1068 struct spi_transfer,
1069 transfer_list);
1070
1071 /* Setup the SPI using the per chip configuration */
1072 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
1073 /*
1074 * We enable the clock here, then the clock will be disabled when
1075 * giveback() is called in each method (poll/interrupt/DMA)
1076 */
1077 clk_enable(pl022->clk);
1078 restore_state(pl022);
1079 flush(pl022);
1080
1081 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
1082 do_polling_transfer(pl022);
1083 else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER)
1084 do_interrupt_transfer(pl022);
1085 else
1086 do_dma_transfer(pl022);
1087}
1088
1089
1090static int __init init_queue(struct pl022 *pl022)
1091{
1092 INIT_LIST_HEAD(&pl022->queue);
1093 spin_lock_init(&pl022->queue_lock);
1094
1095 pl022->run = QUEUE_STOPPED;
1096 pl022->busy = 0;
1097
1098 tasklet_init(&pl022->pump_transfers,
1099 pump_transfers, (unsigned long)pl022);
1100
1101 INIT_WORK(&pl022->pump_messages, pump_messages);
1102 pl022->workqueue = create_singlethread_workqueue(
1103 dev_name(pl022->master->dev.parent));
1104 if (pl022->workqueue == NULL)
1105 return -EBUSY;
1106
1107 return 0;
1108}
1109
1110
1111static int start_queue(struct pl022 *pl022)
1112{
1113 unsigned long flags;
1114
1115 spin_lock_irqsave(&pl022->queue_lock, flags);
1116
1117 if (pl022->run == QUEUE_RUNNING || pl022->busy) {
1118 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1119 return -EBUSY;
1120 }
1121
1122 pl022->run = QUEUE_RUNNING;
1123 pl022->cur_msg = NULL;
1124 pl022->cur_transfer = NULL;
1125 pl022->cur_chip = NULL;
1126 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1127
1128 queue_work(pl022->workqueue, &pl022->pump_messages);
1129
1130 return 0;
1131}
1132
1133
1134static int stop_queue(struct pl022 *pl022)
1135{
1136 unsigned long flags;
1137 unsigned limit = 500;
1138 int status = 0;
1139
1140 spin_lock_irqsave(&pl022->queue_lock, flags);
1141
1142 /* This is a bit lame, but is optimized for the common execution path.
1143 * A wait_queue on the pl022->busy could be used, but then the common
1144 * execution path (pump_messages) would be required to call wake_up or
1145 * friends on every SPI message. Do this instead */
1146 pl022->run = QUEUE_STOPPED;
1147 while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
1148 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1149 msleep(10);
1150 spin_lock_irqsave(&pl022->queue_lock, flags);
1151 }
1152
1153 if (!list_empty(&pl022->queue) || pl022->busy)
1154 status = -EBUSY;
1155
1156 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1157
1158 return status;
1159}
1160
1161static int destroy_queue(struct pl022 *pl022)
1162{
1163 int status;
1164
1165 status = stop_queue(pl022);
1166 /* we are unloading the module or failing to load (only two calls
1167 * to this routine), and neither call can handle a return value.
1168 * However, destroy_workqueue calls flush_workqueue, and that will
1169 * block until all work is done. If the reason that stop_queue
1170 * timed out is that the work will never finish, then it does no
1171 * good to call destroy_workqueue, so return anyway. */
1172 if (status != 0)
1173 return status;
1174
1175 destroy_workqueue(pl022->workqueue);
1176
1177 return 0;
1178}
1179
1180static int verify_controller_parameters(struct pl022 *pl022,
1181 struct pl022_config_chip *chip_info)
1182{
1183 if ((chip_info->lbm != LOOPBACK_ENABLED)
1184 && (chip_info->lbm != LOOPBACK_DISABLED)) {
1185 dev_err(chip_info->dev,
1186 "loopback Mode is configured incorrectly\n");
1187 return -EINVAL;
1188 }
1189 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
1190 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
1191 dev_err(chip_info->dev,
1192 "interface is configured incorrectly\n");
1193 return -EINVAL;
1194 }
1195 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
1196 (!pl022->vendor->unidir)) {
1197 dev_err(chip_info->dev,
1198 "unidirectional mode not supported in this "
1199 "hardware version\n");
1200 return -EINVAL;
1201 }
1202 if ((chip_info->hierarchy != SSP_MASTER)
1203 && (chip_info->hierarchy != SSP_SLAVE)) {
1204 dev_err(chip_info->dev,
1205 "hierarchy is configured incorrectly\n");
1206 return -EINVAL;
1207 }
1208 if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN)
1209 || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) {
1210 dev_err(chip_info->dev,
1211 "cpsdvsr is configured incorrectly\n");
1212 return -EINVAL;
1213 }
1214 if ((chip_info->endian_rx != SSP_RX_MSB)
1215 && (chip_info->endian_rx != SSP_RX_LSB)) {
1216 dev_err(chip_info->dev,
1217 "RX FIFO endianess is configured incorrectly\n");
1218 return -EINVAL;
1219 }
1220 if ((chip_info->endian_tx != SSP_TX_MSB)
1221 && (chip_info->endian_tx != SSP_TX_LSB)) {
1222 dev_err(chip_info->dev,
1223 "TX FIFO endianess is configured incorrectly\n");
1224 return -EINVAL;
1225 }
1226 if ((chip_info->data_size < SSP_DATA_BITS_4)
1227 || (chip_info->data_size > SSP_DATA_BITS_32)) {
1228 dev_err(chip_info->dev,
1229 "DATA Size is configured incorrectly\n");
1230 return -EINVAL;
1231 }
1232 if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1233 && (chip_info->com_mode != DMA_TRANSFER)
1234 && (chip_info->com_mode != POLLING_TRANSFER)) {
1235 dev_err(chip_info->dev,
1236 "Communication mode is configured incorrectly\n");
1237 return -EINVAL;
1238 }
1239 if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
1240 || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
1241 dev_err(chip_info->dev,
1242 "RX FIFO Trigger Level is configured incorrectly\n");
1243 return -EINVAL;
1244 }
1245 if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
1246 || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
1247 dev_err(chip_info->dev,
1248 "TX FIFO Trigger Level is configured incorrectly\n");
1249 return -EINVAL;
1250 }
1251 if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
1252 if ((chip_info->clk_phase != SSP_CLK_RISING_EDGE)
1253 && (chip_info->clk_phase != SSP_CLK_FALLING_EDGE)) {
1254 dev_err(chip_info->dev,
1255 "Clock Phase is configured incorrectly\n");
1256 return -EINVAL;
1257 }
1258 if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW)
1259 && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) {
1260 dev_err(chip_info->dev,
1261 "Clock Polarity is configured incorrectly\n");
1262 return -EINVAL;
1263 }
1264 }
1265 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1266 if ((chip_info->ctrl_len < SSP_BITS_4)
1267 || (chip_info->ctrl_len > SSP_BITS_32)) {
1268 dev_err(chip_info->dev,
1269 "CTRL LEN is configured incorrectly\n");
1270 return -EINVAL;
1271 }
1272 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
1273 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
1274 dev_err(chip_info->dev,
1275 "Wait State is configured incorrectly\n");
1276 return -EINVAL;
1277 }
1278 if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1279 && (chip_info->duplex !=
1280 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
1281 dev_err(chip_info->dev,
1282 "DUPLEX is configured incorrectly\n");
1283 return -EINVAL;
1284 }
1285 }
1286 if (chip_info->cs_control == NULL) {
1287 dev_warn(chip_info->dev,
1288 "Chip Select Function is NULL for this chip\n");
1289 chip_info->cs_control = null_cs_control;
1290 }
1291 return 0;
1292}
1293
1294/**
1295 * pl022_transfer - transfer function registered to SPI master framework
1296 * @spi: spi device which is requesting transfer
1297 * @msg: spi message which is to handled is queued to driver queue
1298 *
1299 * This function is registered to the SPI framework for this SPI master
1300 * controller. It will queue the spi_message in the queue of driver if
1301 * the queue is not stopped and return.
1302 */
1303static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
1304{
1305 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1306 unsigned long flags;
1307
1308 spin_lock_irqsave(&pl022->queue_lock, flags);
1309
1310 if (pl022->run == QUEUE_STOPPED) {
1311 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1312 return -ESHUTDOWN;
1313 }
1314 msg->actual_length = 0;
1315 msg->status = -EINPROGRESS;
1316 msg->state = STATE_START;
1317
1318 list_add_tail(&msg->queue, &pl022->queue);
1319 if (pl022->run == QUEUE_RUNNING && !pl022->busy)
1320 queue_work(pl022->workqueue, &pl022->pump_messages);
1321
1322 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1323 return 0;
1324}
1325
1326static int calculate_effective_freq(struct pl022 *pl022,
1327 int freq,
1328 struct ssp_clock_params *clk_freq)
1329{
1330 /* Lets calculate the frequency parameters */
1331 u16 cpsdvsr = 2;
1332 u16 scr = 0;
1333 bool freq_found = false;
1334 u32 rate;
1335 u32 max_tclk;
1336 u32 min_tclk;
1337
1338 rate = clk_get_rate(pl022->clk);
1339 /* cpsdvscr = 2 & scr 0 */
1340 max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN)));
1341 /* cpsdvsr = 254 & scr = 255 */
1342 min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX)));
1343
1344 if ((freq <= max_tclk) && (freq >= min_tclk)) {
1345 while (cpsdvsr <= CPSDVR_MAX && !freq_found) {
1346 while (scr <= SCR_MAX && !freq_found) {
1347 if ((rate /
1348 (cpsdvsr * (1 + scr))) > freq)
1349 scr += 1;
1350 else {
1351 /*
1352 * This bool is made true when
1353 * effective frequency >=
1354 * target frequency is found
1355 */
1356 freq_found = true;
1357 if ((rate /
1358 (cpsdvsr * (1 + scr))) != freq) {
1359 if (scr == SCR_MIN) {
1360 cpsdvsr -= 2;
1361 scr = SCR_MAX;
1362 } else
1363 scr -= 1;
1364 }
1365 }
1366 }
1367 if (!freq_found) {
1368 cpsdvsr += 2;
1369 scr = SCR_MIN;
1370 }
1371 }
1372 if (cpsdvsr != 0) {
1373 dev_dbg(&pl022->adev->dev,
1374 "SSP Effective Frequency is %u\n",
1375 (rate / (cpsdvsr * (1 + scr))));
1376 clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF);
1377 clk_freq->scr = (u8) (scr & 0xFF);
1378 dev_dbg(&pl022->adev->dev,
1379 "SSP cpsdvsr = %d, scr = %d\n",
1380 clk_freq->cpsdvsr, clk_freq->scr);
1381 }
1382 } else {
1383 dev_err(&pl022->adev->dev,
1384 "controller data is incorrect: out of range frequency");
1385 return -EINVAL;
1386 }
1387 return 0;
1388}
1389
1390/**
1391 * NOT IMPLEMENTED
1392 * process_dma_info - Processes the DMA info provided by client drivers
1393 * @chip_info: chip info provided by client device
1394 * @chip: Runtime state maintained by the SSP controller for each spi device
1395 *
1396 * This function processes and stores DMA config provided by client driver
1397 * into the runtime state maintained by the SSP controller driver
1398 */
1399static int process_dma_info(struct pl022_config_chip *chip_info,
1400 struct chip_data *chip)
1401{
1402 dev_err(chip_info->dev,
1403 "cannot process DMA info, DMA not implemented!\n");
1404 return -ENOTSUPP;
1405}
1406
1407/**
1408 * pl022_setup - setup function registered to SPI master framework
1409 * @spi: spi device which is requesting setup
1410 *
1411 * This function is registered to the SPI framework for this SPI master
1412 * controller. If it is the first time when setup is called by this device,
1413 * this function will initialize the runtime state for this chip and save
1414 * the same in the device structure. Else it will update the runtime info
1415 * with the updated chip info. Nothing is really being written to the
1416 * controller hardware here, that is not done until the actual transfer
1417 * commence.
1418 */
1419
1420/* FIXME: JUST GUESSING the spi->mode bits understood by this driver */
1421#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
1422 | SPI_LSB_FIRST | SPI_LOOP)
1423
1424static int pl022_setup(struct spi_device *spi)
1425{
1426 struct pl022_config_chip *chip_info;
1427 struct chip_data *chip;
1428 int status = 0;
1429 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1430
1431 if (spi->mode & ~MODEBITS) {
1432 dev_dbg(&spi->dev, "unsupported mode bits %x\n",
1433 spi->mode & ~MODEBITS);
1434 return -EINVAL;
1435 }
1436
1437 if (!spi->max_speed_hz)
1438 return -EINVAL;
1439
1440 /* Get controller_state if one is supplied */
1441 chip = spi_get_ctldata(spi);
1442
1443 if (chip == NULL) {
1444 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1445 if (!chip) {
1446 dev_err(&spi->dev,
1447 "cannot allocate controller state\n");
1448 return -ENOMEM;
1449 }
1450 dev_dbg(&spi->dev,
1451 "allocated memory for controller's runtime state\n");
1452 }
1453
1454 /* Get controller data if one is supplied */
1455 chip_info = spi->controller_data;
1456
1457 if (chip_info == NULL) {
1458 /* spi_board_info.controller_data not is supplied */
1459 dev_dbg(&spi->dev,
1460 "using default controller_data settings\n");
1461
1462 chip_info =
1463 kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL);
1464
1465 if (!chip_info) {
1466 dev_err(&spi->dev,
1467 "cannot allocate controller data\n");
1468 status = -ENOMEM;
1469 goto err_first_setup;
1470 }
1471
1472 dev_dbg(&spi->dev, "allocated memory for controller data\n");
1473
1474 /* Pointer back to the SPI device */
1475 chip_info->dev = &spi->dev;
1476 /*
1477 * Set controller data default values:
1478 * Polling is supported by default
1479 */
1480 chip_info->lbm = LOOPBACK_DISABLED;
1481 chip_info->com_mode = POLLING_TRANSFER;
1482 chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI;
1483 chip_info->hierarchy = SSP_SLAVE;
1484 chip_info->slave_tx_disable = DO_NOT_DRIVE_TX;
1485 chip_info->endian_tx = SSP_TX_LSB;
1486 chip_info->endian_rx = SSP_RX_LSB;
1487 chip_info->data_size = SSP_DATA_BITS_12;
1488 chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
1489 chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
1490 chip_info->clk_phase = SSP_CLK_FALLING_EDGE;
1491 chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
1492 chip_info->ctrl_len = SSP_BITS_8;
1493 chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
1494 chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX;
1495 chip_info->cs_control = null_cs_control;
1496 } else {
1497 dev_dbg(&spi->dev,
1498 "using user supplied controller_data settings\n");
1499 }
1500
1501 /*
1502 * We can override with custom divisors, else we use the board
1503 * frequency setting
1504 */
1505 if ((0 == chip_info->clk_freq.cpsdvsr)
1506 && (0 == chip_info->clk_freq.scr)) {
1507 status = calculate_effective_freq(pl022,
1508 spi->max_speed_hz,
1509 &chip_info->clk_freq);
1510 if (status < 0)
1511 goto err_config_params;
1512 } else {
1513 if ((chip_info->clk_freq.cpsdvsr % 2) != 0)
1514 chip_info->clk_freq.cpsdvsr =
1515 chip_info->clk_freq.cpsdvsr - 1;
1516 }
1517 status = verify_controller_parameters(pl022, chip_info);
1518 if (status) {
1519 dev_err(&spi->dev, "controller data is incorrect");
1520 goto err_config_params;
1521 }
1522 /* Now set controller state based on controller data */
1523 chip->xfer_type = chip_info->com_mode;
1524 chip->cs_control = chip_info->cs_control;
1525
1526 if (chip_info->data_size <= 8) {
1527 dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n");
1528 chip->n_bytes = 1;
1529 chip->read = READING_U8;
1530 chip->write = WRITING_U8;
1531 } else if (chip_info->data_size <= 16) {
1532 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
1533 chip->n_bytes = 2;
1534 chip->read = READING_U16;
1535 chip->write = WRITING_U16;
1536 } else {
1537 if (pl022->vendor->max_bpw >= 32) {
1538 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1539 chip->n_bytes = 4;
1540 chip->read = READING_U32;
1541 chip->write = WRITING_U32;
1542 } else {
1543 dev_err(&spi->dev,
1544 "illegal data size for this controller!\n");
1545 dev_err(&spi->dev,
1546 "a standard pl022 can only handle "
1547 "1 <= n <= 16 bit words\n");
1548 goto err_config_params;
1549 }
1550 }
1551
1552 /* Now Initialize all register settings required for this chip */
1553 chip->cr0 = 0;
1554 chip->cr1 = 0;
1555 chip->dmacr = 0;
1556 chip->cpsr = 0;
1557 if ((chip_info->com_mode == DMA_TRANSFER)
1558 && ((pl022->master_info)->enable_dma)) {
1559 chip->enable_dma = 1;
1560 dev_dbg(&spi->dev, "DMA mode set in controller state\n");
1561 status = process_dma_info(chip_info, chip);
1562 if (status < 0)
1563 goto err_config_params;
1564 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1565 SSP_DMACR_MASK_RXDMAE, 0);
1566 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1567 SSP_DMACR_MASK_TXDMAE, 1);
1568 } else {
1569 chip->enable_dma = 0;
1570 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
1571 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1572 SSP_DMACR_MASK_RXDMAE, 0);
1573 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1574 SSP_DMACR_MASK_TXDMAE, 1);
1575 }
1576
1577 chip->cpsr = chip_info->clk_freq.cpsdvsr;
1578
1579 SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0);
1580 SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5);
1581 SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
1582 SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
1583 SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
1584 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16);
1585 SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21);
1586 SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
1587 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
1588 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
1589 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
1590 SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4);
1591 SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5);
1592 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6);
1593 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7);
1594 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10);
1595
1596 /* Save controller_state */
1597 spi_set_ctldata(spi, chip);
1598 return status;
1599 err_config_params:
1600 err_first_setup:
1601 kfree(chip);
1602 return status;
1603}
1604
1605/**
1606 * pl022_cleanup - cleanup function registered to SPI master framework
1607 * @spi: spi device which is requesting cleanup
1608 *
1609 * This function is registered to the SPI framework for this SPI master
1610 * controller. It will free the runtime state of chip.
1611 */
1612static void pl022_cleanup(struct spi_device *spi)
1613{
1614 struct chip_data *chip = spi_get_ctldata(spi);
1615
1616 spi_set_ctldata(spi, NULL);
1617 kfree(chip);
1618}
1619
1620
1621static int __init
1622pl022_probe(struct amba_device *adev, struct amba_id *id)
1623{
1624 struct device *dev = &adev->dev;
1625 struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
1626 struct spi_master *master;
1627 struct pl022 *pl022 = NULL; /*Data for this driver */
1628 int status = 0;
1629
1630 dev_info(&adev->dev,
1631 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
1632 if (platform_info == NULL) {
1633 dev_err(&adev->dev, "probe - no platform data supplied\n");
1634 status = -ENODEV;
1635 goto err_no_pdata;
1636 }
1637
1638 /* Allocate master with space for data */
1639 master = spi_alloc_master(dev, sizeof(struct pl022));
1640 if (master == NULL) {
1641 dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
1642 status = -ENOMEM;
1643 goto err_no_master;
1644 }
1645
1646 pl022 = spi_master_get_devdata(master);
1647 pl022->master = master;
1648 pl022->master_info = platform_info;
1649 pl022->adev = adev;
1650 pl022->vendor = id->data;
1651
1652 /*
1653 * Bus Number Which has been Assigned to this SSP controller
1654 * on this board
1655 */
1656 master->bus_num = platform_info->bus_id;
1657 master->num_chipselect = platform_info->num_chipselect;
1658 master->cleanup = pl022_cleanup;
1659 master->setup = pl022_setup;
1660 master->transfer = pl022_transfer;
1661
1662 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
1663
1664 status = amba_request_regions(adev, NULL);
1665 if (status)
1666 goto err_no_ioregion;
1667
1668 pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
1669 if (pl022->virtbase == NULL) {
1670 status = -ENOMEM;
1671 goto err_no_ioremap;
1672 }
1673 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
1674 adev->res.start, pl022->virtbase);
1675
1676 pl022->clk = clk_get(&adev->dev, NULL);
1677 if (IS_ERR(pl022->clk)) {
1678 status = PTR_ERR(pl022->clk);
1679 dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
1680 goto err_no_clk;
1681 }
1682
1683 /* Disable SSP */
1684 clk_enable(pl022->clk);
1685 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
1686 SSP_CR1(pl022->virtbase));
1687 load_ssp_default_config(pl022);
1688 clk_disable(pl022->clk);
1689
1690 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
1691 pl022);
1692 if (status < 0) {
1693 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
1694 goto err_no_irq;
1695 }
1696 /* Initialize and start queue */
1697 status = init_queue(pl022);
1698 if (status != 0) {
1699 dev_err(&adev->dev, "probe - problem initializing queue\n");
1700 goto err_init_queue;
1701 }
1702 status = start_queue(pl022);
1703 if (status != 0) {
1704 dev_err(&adev->dev, "probe - problem starting queue\n");
1705 goto err_start_queue;
1706 }
1707 /* Register with the SPI framework */
1708 amba_set_drvdata(adev, pl022);
1709 status = spi_register_master(master);
1710 if (status != 0) {
1711 dev_err(&adev->dev,
1712 "probe - problem registering spi master\n");
1713 goto err_spi_register;
1714 }
1715 dev_dbg(dev, "probe succeded\n");
1716 return 0;
1717
1718 err_spi_register:
1719 err_start_queue:
1720 err_init_queue:
1721 destroy_queue(pl022);
1722 free_irq(adev->irq[0], pl022);
1723 err_no_irq:
1724 clk_put(pl022->clk);
1725 err_no_clk:
1726 iounmap(pl022->virtbase);
1727 err_no_ioremap:
1728 amba_release_regions(adev);
1729 err_no_ioregion:
1730 spi_master_put(master);
1731 err_no_master:
1732 err_no_pdata:
1733 return status;
1734}
1735
1736static int __exit
1737pl022_remove(struct amba_device *adev)
1738{
1739 struct pl022 *pl022 = amba_get_drvdata(adev);
1740 int status = 0;
1741 if (!pl022)
1742 return 0;
1743
1744 /* Remove the queue */
1745 status = destroy_queue(pl022);
1746 if (status != 0) {
1747 dev_err(&adev->dev,
1748 "queue remove failed (%d)\n", status);
1749 return status;
1750 }
1751 load_ssp_default_config(pl022);
1752 free_irq(adev->irq[0], pl022);
1753 clk_disable(pl022->clk);
1754 clk_put(pl022->clk);
1755 iounmap(pl022->virtbase);
1756 amba_release_regions(adev);
1757 tasklet_disable(&pl022->pump_transfers);
1758 spi_unregister_master(pl022->master);
1759 spi_master_put(pl022->master);
1760 amba_set_drvdata(adev, NULL);
1761 dev_dbg(&adev->dev, "remove succeded\n");
1762 return 0;
1763}
1764
1765#ifdef CONFIG_PM
1766static int pl022_suspend(struct amba_device *adev, pm_message_t state)
1767{
1768 struct pl022 *pl022 = amba_get_drvdata(adev);
1769 int status = 0;
1770
1771 status = stop_queue(pl022);
1772 if (status) {
1773 dev_warn(&adev->dev, "suspend cannot stop queue\n");
1774 return status;
1775 }
1776
1777 clk_enable(pl022->clk);
1778 load_ssp_default_config(pl022);
1779 clk_disable(pl022->clk);
1780 dev_dbg(&adev->dev, "suspended\n");
1781 return 0;
1782}
1783
1784static int pl022_resume(struct amba_device *adev)
1785{
1786 struct pl022 *pl022 = amba_get_drvdata(adev);
1787 int status = 0;
1788
1789 /* Start the queue running */
1790 status = start_queue(pl022);
1791 if (status)
1792 dev_err(&adev->dev, "problem starting queue (%d)\n", status);
1793 else
1794 dev_dbg(&adev->dev, "resumed\n");
1795
1796 return status;
1797}
1798#else
1799#define pl022_suspend NULL
1800#define pl022_resume NULL
1801#endif /* CONFIG_PM */
1802
1803static struct vendor_data vendor_arm = {
1804 .fifodepth = 8,
1805 .max_bpw = 16,
1806 .unidir = false,
1807};
1808
1809
1810static struct vendor_data vendor_st = {
1811 .fifodepth = 32,
1812 .max_bpw = 32,
1813 .unidir = false,
1814};
1815
1816static struct amba_id pl022_ids[] = {
1817 {
1818 /*
1819 * ARM PL022 variant, this has a 16bit wide
1820 * and 8 locations deep TX/RX FIFO
1821 */
1822 .id = 0x00041022,
1823 .mask = 0x000fffff,
1824 .data = &vendor_arm,
1825 },
1826 {
1827 /*
1828 * ST Micro derivative, this has 32bit wide
1829 * and 32 locations deep TX/RX FIFO
1830 */
1831 .id = 0x00108022,
1832 .mask = 0xffffffff,
1833 .data = &vendor_st,
1834 },
1835 { 0, 0 },
1836};
1837
1838static struct amba_driver pl022_driver = {
1839 .drv = {
1840 .name = "ssp-pl022",
1841 },
1842 .id_table = pl022_ids,
1843 .probe = pl022_probe,
1844 .remove = __exit_p(pl022_remove),
1845 .suspend = pl022_suspend,
1846 .resume = pl022_resume,
1847};
1848
1849
1850static int __init pl022_init(void)
1851{
1852 return amba_driver_register(&pl022_driver);
1853}
1854
1855module_init(pl022_init);
1856
1857static void __exit pl022_exit(void)
1858{
1859 amba_driver_unregister(&pl022_driver);
1860}
1861
1862module_exit(pl022_exit);
1863
1864MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
1865MODULE_DESCRIPTION("PL022 SSP Controller Driver");
1866MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
index f2447a5476b..bbf9371cd28 100644
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ b/drivers/spi/spi_s3c24xx_gpio.c
@@ -17,6 +17,7 @@
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/workqueue.h> 18#include <linux/workqueue.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/gpio.h>
20 21
21#include <linux/spi/spi.h> 22#include <linux/spi/spi.h>
22#include <linux/spi/spi_bitbang.h> 23#include <linux/spi/spi_bitbang.h>
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
index 7dc3a6b4139..a0e0d246b59 100644
--- a/drivers/ssb/embedded.c
+++ b/drivers/ssb/embedded.c
@@ -29,6 +29,7 @@ int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks)
29 } 29 }
30 return -ENODEV; 30 return -ENODEV;
31} 31}
32EXPORT_SYMBOL(ssb_watchdog_timer_set);
32 33
33u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask) 34u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask)
34{ 35{
diff --git a/drivers/staging/go7007/go7007.txt b/drivers/staging/go7007/go7007.txt
index 9f6772bc68c..1c2907c1dc8 100644
--- a/drivers/staging/go7007/go7007.txt
+++ b/drivers/staging/go7007/go7007.txt
@@ -2,7 +2,7 @@ This is a driver for the WIS GO7007SB multi-format video encoder.
2 2
3Pete Eberlein <pete@sensoray.com> 3Pete Eberlein <pete@sensoray.com>
4 4
5The driver was orignally released under the GPL and is currently hosted at: 5The driver was originally released under the GPL and is currently hosted at:
6http://nikosapi.org/wiki/index.php/WIS_Go7007_Linux_driver 6http://nikosapi.org/wiki/index.php/WIS_Go7007_Linux_driver
7The go7007 firmware can be acquired from the package on the site above. 7The go7007 firmware can be acquired from the package on the site above.
8 8
@@ -24,7 +24,7 @@ These should be used instead of the non-standard GO7007 ioctls described
24below. 24below.
25 25
26 26
27The README files from the orignal package appear below: 27The README files from the original package appears below:
28 28
29--------------------------------------------------------------------------- 29---------------------------------------------------------------------------
30 WIS GO7007SB Public Linux Driver 30 WIS GO7007SB Public Linux Driver
diff --git a/drivers/staging/panel/lcd-panel-cgram.txt b/drivers/staging/panel/lcd-panel-cgram.txt
index f9ceef4322a..7f82c905763 100644
--- a/drivers/staging/panel/lcd-panel-cgram.txt
+++ b/drivers/staging/panel/lcd-panel-cgram.txt
@@ -3,7 +3,7 @@ characters 0 to 7. The escape code to define a new character is
3'\e[LG' followed by one digit from 0 to 7, representing the character 3'\e[LG' followed by one digit from 0 to 7, representing the character
4number, and up to 8 couples of hex digits terminated by a semi-colon 4number, and up to 8 couples of hex digits terminated by a semi-colon
5(';'). Each couple of digits represents a line, with 1-bits for each 5(';'). Each couple of digits represents a line, with 1-bits for each
6illuminated pixel with LSB on the right. Lines are numberred from the 6illuminated pixel with LSB on the right. Lines are numbered from the
7top of the character to the bottom. On a 5x7 matrix, only the 5 lower 7top of the character to the bottom. On a 5x7 matrix, only the 5 lower
8bits of the 7 first bytes are used for each character. If the string 8bits of the 7 first bytes are used for each character. If the string
9is incomplete, only complete lines will be redefined. Here are some 9is incomplete, only complete lines will be redefined. Here are some
diff --git a/drivers/staging/rt2860/common/mlme.c b/drivers/staging/rt2860/common/mlme.c
index c00f9ab9c46..2edf2999f5c 100644
--- a/drivers/staging/rt2860/common/mlme.c
+++ b/drivers/staging/rt2860/common/mlme.c
@@ -5664,7 +5664,7 @@ VOID AsicUpdateProtect(
5664#if 0 5664#if 0
5665 MacReg |= (pAd->CommonCfg.RtsThreshold << 8); 5665 MacReg |= (pAd->CommonCfg.RtsThreshold << 8);
5666#else 5666#else
5667 // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 5667 // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096
5668 if (( 5668 if ((
5669#ifdef DOT11_N_SUPPORT 5669#ifdef DOT11_N_SUPPORT
5670 (pAd->CommonCfg.BACapability.field.AmsduEnable) || 5670 (pAd->CommonCfg.BACapability.field.AmsduEnable) ||
diff --git a/drivers/staging/rt2870/common/mlme.c b/drivers/staging/rt2870/common/mlme.c
index 8a82cee8bf2..a26bc033337 100644
--- a/drivers/staging/rt2870/common/mlme.c
+++ b/drivers/staging/rt2870/common/mlme.c
@@ -5561,7 +5561,7 @@ VOID AsicUpdateProtect(
5561#if 0 5561#if 0
5562 MacReg |= (pAd->CommonCfg.RtsThreshold << 8); 5562 MacReg |= (pAd->CommonCfg.RtsThreshold << 8);
5563#else 5563#else
5564 // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 5564 // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096
5565 if (( 5565 if ((
5566#ifdef DOT11_N_SUPPORT 5566#ifdef DOT11_N_SUPPORT
5567 (pAd->CommonCfg.BACapability.field.AmsduEnable) || 5567 (pAd->CommonCfg.BACapability.field.AmsduEnable) ||
diff --git a/drivers/staging/rt3070/common/mlme.c b/drivers/staging/rt3070/common/mlme.c
index 0ffbfa36699..0189bab013c 100644
--- a/drivers/staging/rt3070/common/mlme.c
+++ b/drivers/staging/rt3070/common/mlme.c
@@ -5575,7 +5575,7 @@ VOID AsicUpdateProtect(
5575 RTMP_IO_READ32(pAd, TX_RTS_CFG, &MacReg); 5575 RTMP_IO_READ32(pAd, TX_RTS_CFG, &MacReg);
5576 MacReg &= 0xFF0000FF; 5576 MacReg &= 0xFF0000FF;
5577 5577
5578 // If the user want disable RtsThreshold and enbale Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096 5578 // If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096
5579 if (( 5579 if ((
5580#ifdef DOT11_N_SUPPORT 5580#ifdef DOT11_N_SUPPORT
5581 (pAd->CommonCfg.BACapability.field.AmsduEnable) || 5581 (pAd->CommonCfg.BACapability.field.AmsduEnable) ||
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 888198c9a10..824e65bdc43 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -2424,7 +2424,7 @@ int hfa384x_drvr_ramdl_write(hfa384x_t *hw, u32 daddr, void *buf, u32 len)
2424* 0 success 2424* 0 success
2425* >0 f/w reported error - f/w status code 2425* >0 f/w reported error - f/w status code
2426* <0 driver reported error 2426* <0 driver reported error
2427* -ETIMEOUT timout waiting for the cmd regs to become 2427* -ETIMEDOUT timout waiting for the cmd regs to become
2428* available, or waiting for the control reg 2428* available, or waiting for the control reg
2429* to indicate the Aux port is enabled. 2429* to indicate the Aux port is enabled.
2430* -ENODATA the buffer does NOT contain a valid PDA. 2430* -ENODATA the buffer does NOT contain a valid PDA.
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index c6c816b7ecb..5eee3f82be5 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -22,6 +22,7 @@ config USB_ARCH_HAS_HCD
22 default y if PCMCIA && !M32R # sl811_cs 22 default y if PCMCIA && !M32R # sl811_cs
23 default y if ARM # SL-811 23 default y if ARM # SL-811
24 default y if SUPERH # r8a66597-hcd 24 default y if SUPERH # r8a66597-hcd
25 default y if MICROBLAZE
25 default PCI 26 default PCI
26 27
27# many non-PCI SOC chips embed OHCI 28# many non-PCI SOC chips embed OHCI
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 0716cdb44cd..0a3dc5ece63 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_USB_MON) += mon/
11obj-$(CONFIG_PCI) += host/ 11obj-$(CONFIG_PCI) += host/
12obj-$(CONFIG_USB_EHCI_HCD) += host/ 12obj-$(CONFIG_USB_EHCI_HCD) += host/
13obj-$(CONFIG_USB_ISP116X_HCD) += host/ 13obj-$(CONFIG_USB_ISP116X_HCD) += host/
14obj-$(CONFIG_USB_ISP1760_HCD) += host/
15obj-$(CONFIG_USB_OHCI_HCD) += host/ 14obj-$(CONFIG_USB_OHCI_HCD) += host/
16obj-$(CONFIG_USB_UHCI_HCD) += host/ 15obj-$(CONFIG_USB_UHCI_HCD) += host/
17obj-$(CONFIG_USB_FHCI_HCD) += host/ 16obj-$(CONFIG_USB_FHCI_HCD) += host/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0a69c0977e3..ddeb6919253 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -16,7 +16,8 @@
16 * v0.9 - thorough cleaning, URBification, almost a rewrite 16 * v0.9 - thorough cleaning, URBification, almost a rewrite
17 * v0.10 - some more cleanups 17 * v0.10 - some more cleanups
18 * v0.11 - fixed flow control, read error doesn't stop reads 18 * v0.11 - fixed flow control, read error doesn't stop reads
19 * v0.12 - added TIOCM ioctls, added break handling, made struct acm kmalloced 19 * v0.12 - added TIOCM ioctls, added break handling, made struct acm
20 * kmalloced
20 * v0.13 - added termios, added hangup 21 * v0.13 - added termios, added hangup
21 * v0.14 - sized down struct acm 22 * v0.14 - sized down struct acm
22 * v0.15 - fixed flow control again - characters could be lost 23 * v0.15 - fixed flow control again - characters could be lost
@@ -62,7 +63,7 @@
62#include <linux/tty_flip.h> 63#include <linux/tty_flip.h>
63#include <linux/module.h> 64#include <linux/module.h>
64#include <linux/mutex.h> 65#include <linux/mutex.h>
65#include <asm/uaccess.h> 66#include <linux/uaccess.h>
66#include <linux/usb.h> 67#include <linux/usb.h>
67#include <linux/usb/cdc.h> 68#include <linux/usb/cdc.h>
68#include <asm/byteorder.h> 69#include <asm/byteorder.h>
@@ -87,7 +88,10 @@ static struct acm *acm_table[ACM_TTY_MINORS];
87 88
88static DEFINE_MUTEX(open_mutex); 89static DEFINE_MUTEX(open_mutex);
89 90
90#define ACM_READY(acm) (acm && acm->dev && acm->used) 91#define ACM_READY(acm) (acm && acm->dev && acm->port.count)
92
93static const struct tty_port_operations acm_port_ops = {
94};
91 95
92#ifdef VERBOSE_DEBUG 96#ifdef VERBOSE_DEBUG
93#define verbose 1 97#define verbose 1
@@ -99,13 +103,15 @@ static DEFINE_MUTEX(open_mutex);
99 * Functions for ACM control messages. 103 * Functions for ACM control messages.
100 */ 104 */
101 105
102static int acm_ctrl_msg(struct acm *acm, int request, int value, void *buf, int len) 106static int acm_ctrl_msg(struct acm *acm, int request, int value,
107 void *buf, int len)
103{ 108{
104 int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0), 109 int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
105 request, USB_RT_ACM, value, 110 request, USB_RT_ACM, value,
106 acm->control->altsetting[0].desc.bInterfaceNumber, 111 acm->control->altsetting[0].desc.bInterfaceNumber,
107 buf, len, 5000); 112 buf, len, 5000);
108 dbg("acm_control_msg: rq: 0x%02x val: %#x len: %#x result: %d", request, value, len, retval); 113 dbg("acm_control_msg: rq: 0x%02x val: %#x len: %#x result: %d",
114 request, value, len, retval);
109 return retval < 0 ? retval : 0; 115 return retval < 0 ? retval : 0;
110} 116}
111 117
@@ -150,9 +156,8 @@ static int acm_wb_is_avail(struct acm *acm)
150 156
151 n = ACM_NW; 157 n = ACM_NW;
152 spin_lock_irqsave(&acm->write_lock, flags); 158 spin_lock_irqsave(&acm->write_lock, flags);
153 for (i = 0; i < ACM_NW; i++) { 159 for (i = 0; i < ACM_NW; i++)
154 n -= acm->wb[i].use; 160 n -= acm->wb[i].use;
155 }
156 spin_unlock_irqrestore(&acm->write_lock, flags); 161 spin_unlock_irqrestore(&acm->write_lock, flags);
157 return n; 162 return n;
158} 163}
@@ -183,7 +188,8 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
183 wb->urb->transfer_buffer_length = wb->len; 188 wb->urb->transfer_buffer_length = wb->len;
184 wb->urb->dev = acm->dev; 189 wb->urb->dev = acm->dev;
185 190
186 if ((rc = usb_submit_urb(wb->urb, GFP_ATOMIC)) < 0) { 191 rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
192 if (rc < 0) {
187 dbg("usb_submit_urb(write bulk) failed: %d", rc); 193 dbg("usb_submit_urb(write bulk) failed: %d", rc);
188 acm_write_done(acm, wb); 194 acm_write_done(acm, wb);
189 } 195 }
@@ -262,6 +268,7 @@ static void acm_ctrl_irq(struct urb *urb)
262{ 268{
263 struct acm *acm = urb->context; 269 struct acm *acm = urb->context;
264 struct usb_cdc_notification *dr = urb->transfer_buffer; 270 struct usb_cdc_notification *dr = urb->transfer_buffer;
271 struct tty_struct *tty;
265 unsigned char *data; 272 unsigned char *data;
266 int newctrl; 273 int newctrl;
267 int retval; 274 int retval;
@@ -287,40 +294,45 @@ static void acm_ctrl_irq(struct urb *urb)
287 294
288 data = (unsigned char *)(dr + 1); 295 data = (unsigned char *)(dr + 1);
289 switch (dr->bNotificationType) { 296 switch (dr->bNotificationType) {
297 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
298 dbg("%s network", dr->wValue ?
299 "connected to" : "disconnected from");
300 break;
290 301
291 case USB_CDC_NOTIFY_NETWORK_CONNECTION: 302 case USB_CDC_NOTIFY_SERIAL_STATE:
292 303 tty = tty_port_tty_get(&acm->port);
293 dbg("%s network", dr->wValue ? "connected to" : "disconnected from"); 304 newctrl = get_unaligned_le16(data);
294 break;
295
296 case USB_CDC_NOTIFY_SERIAL_STATE:
297
298 newctrl = get_unaligned_le16(data);
299 305
300 if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { 306 if (tty) {
307 if (!acm->clocal &&
308 (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
301 dbg("calling hangup"); 309 dbg("calling hangup");
302 tty_hangup(acm->tty); 310 tty_hangup(tty);
303 } 311 }
312 tty_kref_put(tty);
313 }
304 314
305 acm->ctrlin = newctrl; 315 acm->ctrlin = newctrl;
306
307 dbg("input control lines: dcd%c dsr%c break%c ring%c framing%c parity%c overrun%c",
308 acm->ctrlin & ACM_CTRL_DCD ? '+' : '-', acm->ctrlin & ACM_CTRL_DSR ? '+' : '-',
309 acm->ctrlin & ACM_CTRL_BRK ? '+' : '-', acm->ctrlin & ACM_CTRL_RI ? '+' : '-',
310 acm->ctrlin & ACM_CTRL_FRAMING ? '+' : '-', acm->ctrlin & ACM_CTRL_PARITY ? '+' : '-',
311 acm->ctrlin & ACM_CTRL_OVERRUN ? '+' : '-');
312 316
317 dbg("input control lines: dcd%c dsr%c break%c ring%c framing%c parity%c overrun%c",
318 acm->ctrlin & ACM_CTRL_DCD ? '+' : '-',
319 acm->ctrlin & ACM_CTRL_DSR ? '+' : '-',
320 acm->ctrlin & ACM_CTRL_BRK ? '+' : '-',
321 acm->ctrlin & ACM_CTRL_RI ? '+' : '-',
322 acm->ctrlin & ACM_CTRL_FRAMING ? '+' : '-',
323 acm->ctrlin & ACM_CTRL_PARITY ? '+' : '-',
324 acm->ctrlin & ACM_CTRL_OVERRUN ? '+' : '-');
313 break; 325 break;
314 326
315 default: 327 default:
316 dbg("unknown notification %d received: index %d len %d data0 %d data1 %d", 328 dbg("unknown notification %d received: index %d len %d data0 %d data1 %d",
317 dr->bNotificationType, dr->wIndex, 329 dr->bNotificationType, dr->wIndex,
318 dr->wLength, data[0], data[1]); 330 dr->wLength, data[0], data[1]);
319 break; 331 break;
320 } 332 }
321exit: 333exit:
322 usb_mark_last_busy(acm->dev); 334 usb_mark_last_busy(acm->dev);
323 retval = usb_submit_urb (urb, GFP_ATOMIC); 335 retval = usb_submit_urb(urb, GFP_ATOMIC);
324 if (retval) 336 if (retval)
325 dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with " 337 dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with "
326 "result %d", __func__, retval); 338 "result %d", __func__, retval);
@@ -371,15 +383,14 @@ static void acm_rx_tasklet(unsigned long _acm)
371{ 383{
372 struct acm *acm = (void *)_acm; 384 struct acm *acm = (void *)_acm;
373 struct acm_rb *buf; 385 struct acm_rb *buf;
374 struct tty_struct *tty = acm->tty; 386 struct tty_struct *tty;
375 struct acm_ru *rcv; 387 struct acm_ru *rcv;
376 unsigned long flags; 388 unsigned long flags;
377 unsigned char throttled; 389 unsigned char throttled;
378 390
379 dbg("Entering acm_rx_tasklet"); 391 dbg("Entering acm_rx_tasklet");
380 392
381 if (!ACM_READY(acm)) 393 if (!ACM_READY(acm)) {
382 {
383 dbg("acm_rx_tasklet: ACM not ready"); 394 dbg("acm_rx_tasklet: ACM not ready");
384 return; 395 return;
385 } 396 }
@@ -387,12 +398,13 @@ static void acm_rx_tasklet(unsigned long _acm)
387 spin_lock_irqsave(&acm->throttle_lock, flags); 398 spin_lock_irqsave(&acm->throttle_lock, flags);
388 throttled = acm->throttle; 399 throttled = acm->throttle;
389 spin_unlock_irqrestore(&acm->throttle_lock, flags); 400 spin_unlock_irqrestore(&acm->throttle_lock, flags);
390 if (throttled) 401 if (throttled) {
391 {
392 dbg("acm_rx_tasklet: throttled"); 402 dbg("acm_rx_tasklet: throttled");
393 return; 403 return;
394 } 404 }
395 405
406 tty = tty_port_tty_get(&acm->port);
407
396next_buffer: 408next_buffer:
397 spin_lock_irqsave(&acm->read_lock, flags); 409 spin_lock_irqsave(&acm->read_lock, flags);
398 if (list_empty(&acm->filled_read_bufs)) { 410 if (list_empty(&acm->filled_read_bufs)) {
@@ -406,20 +418,22 @@ next_buffer:
406 418
407 dbg("acm_rx_tasklet: procesing buf 0x%p, size = %d", buf, buf->size); 419 dbg("acm_rx_tasklet: procesing buf 0x%p, size = %d", buf, buf->size);
408 420
409 tty_buffer_request_room(tty, buf->size); 421 if (tty) {
410 spin_lock_irqsave(&acm->throttle_lock, flags); 422 spin_lock_irqsave(&acm->throttle_lock, flags);
411 throttled = acm->throttle; 423 throttled = acm->throttle;
412 spin_unlock_irqrestore(&acm->throttle_lock, flags); 424 spin_unlock_irqrestore(&acm->throttle_lock, flags);
413 if (!throttled) 425 if (!throttled) {
414 tty_insert_flip_string(tty, buf->base, buf->size); 426 tty_buffer_request_room(tty, buf->size);
415 tty_flip_buffer_push(tty); 427 tty_insert_flip_string(tty, buf->base, buf->size);
416 428 tty_flip_buffer_push(tty);
417 if (throttled) { 429 } else {
418 dbg("Throttling noticed"); 430 tty_kref_put(tty);
419 spin_lock_irqsave(&acm->read_lock, flags); 431 dbg("Throttling noticed");
420 list_add(&buf->list, &acm->filled_read_bufs); 432 spin_lock_irqsave(&acm->read_lock, flags);
421 spin_unlock_irqrestore(&acm->read_lock, flags); 433 list_add(&buf->list, &acm->filled_read_bufs);
422 return; 434 spin_unlock_irqrestore(&acm->read_lock, flags);
435 return;
436 }
423 } 437 }
424 438
425 spin_lock_irqsave(&acm->read_lock, flags); 439 spin_lock_irqsave(&acm->read_lock, flags);
@@ -428,6 +442,8 @@ next_buffer:
428 goto next_buffer; 442 goto next_buffer;
429 443
430urbs: 444urbs:
445 tty_kref_put(tty);
446
431 while (!list_empty(&acm->spare_read_bufs)) { 447 while (!list_empty(&acm->spare_read_bufs)) {
432 spin_lock_irqsave(&acm->read_lock, flags); 448 spin_lock_irqsave(&acm->read_lock, flags);
433 if (list_empty(&acm->spare_read_urbs)) { 449 if (list_empty(&acm->spare_read_urbs)) {
@@ -454,10 +470,11 @@ urbs:
454 rcv->urb->transfer_dma = buf->dma; 470 rcv->urb->transfer_dma = buf->dma;
455 rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 471 rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
456 472
457 /* This shouldn't kill the driver as unsuccessful URBs are returned to the 473 /* This shouldn't kill the driver as unsuccessful URBs are
458 free-urbs-pool and resubmited ASAP */ 474 returned to the free-urbs-pool and resubmited ASAP */
459 spin_lock_irqsave(&acm->read_lock, flags); 475 spin_lock_irqsave(&acm->read_lock, flags);
460 if (acm->susp_count || usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) { 476 if (acm->susp_count ||
477 usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) {
461 list_add(&buf->list, &acm->spare_read_bufs); 478 list_add(&buf->list, &acm->spare_read_bufs);
462 list_add(&rcv->list, &acm->spare_read_urbs); 479 list_add(&rcv->list, &acm->spare_read_urbs);
463 acm->processing = 0; 480 acm->processing = 0;
@@ -499,11 +516,14 @@ static void acm_write_bulk(struct urb *urb)
499static void acm_softint(struct work_struct *work) 516static void acm_softint(struct work_struct *work)
500{ 517{
501 struct acm *acm = container_of(work, struct acm, work); 518 struct acm *acm = container_of(work, struct acm, work);
519 struct tty_struct *tty;
502 520
503 dev_vdbg(&acm->data->dev, "tx work\n"); 521 dev_vdbg(&acm->data->dev, "tx work\n");
504 if (!ACM_READY(acm)) 522 if (!ACM_READY(acm))
505 return; 523 return;
506 tty_wakeup(acm->tty); 524 tty = tty_port_tty_get(&acm->port);
525 tty_wakeup(tty);
526 tty_kref_put(tty);
507} 527}
508 528
509static void acm_waker(struct work_struct *waker) 529static void acm_waker(struct work_struct *waker)
@@ -543,8 +563,9 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
543 rv = 0; 563 rv = 0;
544 564
545 set_bit(TTY_NO_WRITE_SPLIT, &tty->flags); 565 set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
566
546 tty->driver_data = acm; 567 tty->driver_data = acm;
547 acm->tty = tty; 568 tty_port_tty_set(&acm->port, tty);
548 569
549 if (usb_autopm_get_interface(acm->control) < 0) 570 if (usb_autopm_get_interface(acm->control) < 0)
550 goto early_bail; 571 goto early_bail;
@@ -552,11 +573,10 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
552 acm->control->needs_remote_wakeup = 1; 573 acm->control->needs_remote_wakeup = 1;
553 574
554 mutex_lock(&acm->mutex); 575 mutex_lock(&acm->mutex);
555 if (acm->used++) { 576 if (acm->port.count++) {
556 usb_autopm_put_interface(acm->control); 577 usb_autopm_put_interface(acm->control);
557 goto done; 578 goto done;
558 } 579 }
559
560 580
561 acm->ctrlurb->dev = acm->dev; 581 acm->ctrlurb->dev = acm->dev;
562 if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) { 582 if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
@@ -567,22 +587,22 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
567 if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) && 587 if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
568 (acm->ctrl_caps & USB_CDC_CAP_LINE)) 588 (acm->ctrl_caps & USB_CDC_CAP_LINE))
569 goto full_bailout; 589 goto full_bailout;
590
570 usb_autopm_put_interface(acm->control); 591 usb_autopm_put_interface(acm->control);
571 592
572 INIT_LIST_HEAD(&acm->spare_read_urbs); 593 INIT_LIST_HEAD(&acm->spare_read_urbs);
573 INIT_LIST_HEAD(&acm->spare_read_bufs); 594 INIT_LIST_HEAD(&acm->spare_read_bufs);
574 INIT_LIST_HEAD(&acm->filled_read_bufs); 595 INIT_LIST_HEAD(&acm->filled_read_bufs);
575 for (i = 0; i < acm->rx_buflimit; i++) { 596
597 for (i = 0; i < acm->rx_buflimit; i++)
576 list_add(&(acm->ru[i].list), &acm->spare_read_urbs); 598 list_add(&(acm->ru[i].list), &acm->spare_read_urbs);
577 } 599 for (i = 0; i < acm->rx_buflimit; i++)
578 for (i = 0; i < acm->rx_buflimit; i++) {
579 list_add(&(acm->rb[i].list), &acm->spare_read_bufs); 600 list_add(&(acm->rb[i].list), &acm->spare_read_bufs);
580 }
581 601
582 acm->throttle = 0; 602 acm->throttle = 0;
583 603
584 tasklet_schedule(&acm->urb_task); 604 tasklet_schedule(&acm->urb_task);
585 605 rv = tty_port_block_til_ready(&acm->port, tty, filp);
586done: 606done:
587 mutex_unlock(&acm->mutex); 607 mutex_unlock(&acm->mutex);
588err_out: 608err_out:
@@ -593,16 +613,17 @@ full_bailout:
593 usb_kill_urb(acm->ctrlurb); 613 usb_kill_urb(acm->ctrlurb);
594bail_out: 614bail_out:
595 usb_autopm_put_interface(acm->control); 615 usb_autopm_put_interface(acm->control);
596 acm->used--; 616 acm->port.count--;
597 mutex_unlock(&acm->mutex); 617 mutex_unlock(&acm->mutex);
598early_bail: 618early_bail:
599 mutex_unlock(&open_mutex); 619 mutex_unlock(&open_mutex);
620 tty_port_tty_set(&acm->port, NULL);
600 return -EIO; 621 return -EIO;
601} 622}
602 623
603static void acm_tty_unregister(struct acm *acm) 624static void acm_tty_unregister(struct acm *acm)
604{ 625{
605 int i,nr; 626 int i, nr;
606 627
607 nr = acm->rx_buflimit; 628 nr = acm->rx_buflimit;
608 tty_unregister_device(acm_tty_driver, acm->minor); 629 tty_unregister_device(acm_tty_driver, acm->minor);
@@ -619,41 +640,56 @@ static void acm_tty_unregister(struct acm *acm)
619 640
620static int acm_tty_chars_in_buffer(struct tty_struct *tty); 641static int acm_tty_chars_in_buffer(struct tty_struct *tty);
621 642
643static void acm_port_down(struct acm *acm, int drain)
644{
645 int i, nr = acm->rx_buflimit;
646 mutex_lock(&open_mutex);
647 if (acm->dev) {
648 usb_autopm_get_interface(acm->control);
649 acm_set_control(acm, acm->ctrlout = 0);
650 /* try letting the last writes drain naturally */
651 if (drain) {
652 wait_event_interruptible_timeout(acm->drain_wait,
653 (ACM_NW == acm_wb_is_avail(acm)) || !acm->dev,
654 ACM_CLOSE_TIMEOUT * HZ);
655 }
656 usb_kill_urb(acm->ctrlurb);
657 for (i = 0; i < ACM_NW; i++)
658 usb_kill_urb(acm->wb[i].urb);
659 for (i = 0; i < nr; i++)
660 usb_kill_urb(acm->ru[i].urb);
661 acm->control->needs_remote_wakeup = 0;
662 usb_autopm_put_interface(acm->control);
663 }
664 mutex_unlock(&open_mutex);
665}
666
667static void acm_tty_hangup(struct tty_struct *tty)
668{
669 struct acm *acm = tty->driver_data;
670 tty_port_hangup(&acm->port);
671 acm_port_down(acm, 0);
672}
673
622static void acm_tty_close(struct tty_struct *tty, struct file *filp) 674static void acm_tty_close(struct tty_struct *tty, struct file *filp)
623{ 675{
624 struct acm *acm = tty->driver_data; 676 struct acm *acm = tty->driver_data;
625 int i,nr;
626 677
627 if (!acm || !acm->used) 678 /* Perform the closing process and see if we need to do the hardware
679 shutdown */
680 if (tty_port_close_start(&acm->port, tty, filp) == 0)
628 return; 681 return;
629 682 acm_port_down(acm, 0);
630 nr = acm->rx_buflimit; 683 tty_port_close_end(&acm->port, tty);
631 mutex_lock(&open_mutex); 684 mutex_lock(&open_mutex);
632 if (!--acm->used) { 685 tty_port_tty_set(&acm->port, NULL);
633 if (acm->dev) { 686 if (!acm->dev)
634 usb_autopm_get_interface(acm->control); 687 acm_tty_unregister(acm);
635 acm_set_control(acm, acm->ctrlout = 0);
636
637 /* try letting the last writes drain naturally */
638 wait_event_interruptible_timeout(acm->drain_wait,
639 (ACM_NW == acm_wb_is_avail(acm))
640 || !acm->dev,
641 ACM_CLOSE_TIMEOUT * HZ);
642
643 usb_kill_urb(acm->ctrlurb);
644 for (i = 0; i < ACM_NW; i++)
645 usb_kill_urb(acm->wb[i].urb);
646 for (i = 0; i < nr; i++)
647 usb_kill_urb(acm->ru[i].urb);
648 acm->control->needs_remote_wakeup = 0;
649 usb_autopm_put_interface(acm->control);
650 } else
651 acm_tty_unregister(acm);
652 }
653 mutex_unlock(&open_mutex); 688 mutex_unlock(&open_mutex);
654} 689}
655 690
656static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) 691static int acm_tty_write(struct tty_struct *tty,
692 const unsigned char *buf, int count)
657{ 693{
658 struct acm *acm = tty->driver_data; 694 struct acm *acm = tty->driver_data;
659 int stat; 695 int stat;
@@ -669,7 +705,8 @@ static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int c
669 return 0; 705 return 0;
670 706
671 spin_lock_irqsave(&acm->write_lock, flags); 707 spin_lock_irqsave(&acm->write_lock, flags);
672 if ((wbn = acm_wb_alloc(acm)) < 0) { 708 wbn = acm_wb_alloc(acm);
709 if (wbn < 0) {
673 spin_unlock_irqrestore(&acm->write_lock, flags); 710 spin_unlock_irqrestore(&acm->write_lock, flags);
674 return 0; 711 return 0;
675 } 712 }
@@ -681,7 +718,8 @@ static int acm_tty_write(struct tty_struct *tty, const unsigned char *buf, int c
681 wb->len = count; 718 wb->len = count;
682 spin_unlock_irqrestore(&acm->write_lock, flags); 719 spin_unlock_irqrestore(&acm->write_lock, flags);
683 720
684 if ((stat = acm_write_start(acm, wbn)) < 0) 721 stat = acm_write_start(acm, wbn);
722 if (stat < 0)
685 return stat; 723 return stat;
686 return count; 724 return count;
687} 725}
@@ -767,8 +805,10 @@ static int acm_tty_tiocmset(struct tty_struct *tty, struct file *file,
767 return -EINVAL; 805 return -EINVAL;
768 806
769 newctrl = acm->ctrlout; 807 newctrl = acm->ctrlout;
770 set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) | (set & TIOCM_RTS ? ACM_CTRL_RTS : 0); 808 set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
771 clear = (clear & TIOCM_DTR ? ACM_CTRL_DTR : 0) | (clear & TIOCM_RTS ? ACM_CTRL_RTS : 0); 809 (set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
810 clear = (clear & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
811 (clear & TIOCM_RTS ? ACM_CTRL_RTS : 0);
772 812
773 newctrl = (newctrl & ~clear) | set; 813 newctrl = (newctrl & ~clear) | set;
774 814
@@ -777,7 +817,8 @@ static int acm_tty_tiocmset(struct tty_struct *tty, struct file *file,
777 return acm_set_control(acm, acm->ctrlout = newctrl); 817 return acm_set_control(acm, acm->ctrlout = newctrl);
778} 818}
779 819
780static int acm_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) 820static int acm_tty_ioctl(struct tty_struct *tty, struct file *file,
821 unsigned int cmd, unsigned long arg)
781{ 822{
782 struct acm *acm = tty->driver_data; 823 struct acm *acm = tty->driver_data;
783 824
@@ -799,7 +840,8 @@ static const __u8 acm_tty_size[] = {
799 5, 6, 7, 8 840 5, 6, 7, 8
800}; 841};
801 842
802static void acm_tty_set_termios(struct tty_struct *tty, struct ktermios *termios_old) 843static void acm_tty_set_termios(struct tty_struct *tty,
844 struct ktermios *termios_old)
803{ 845{
804 struct acm *acm = tty->driver_data; 846 struct acm *acm = tty->driver_data;
805 struct ktermios *termios = tty->termios; 847 struct ktermios *termios = tty->termios;
@@ -809,19 +851,23 @@ static void acm_tty_set_termios(struct tty_struct *tty, struct ktermios *termios
809 if (!ACM_READY(acm)) 851 if (!ACM_READY(acm))
810 return; 852 return;
811 853
854 /* FIXME: Needs to support the tty_baud interface */
855 /* FIXME: Broken on sparc */
812 newline.dwDTERate = cpu_to_le32p(acm_tty_speed + 856 newline.dwDTERate = cpu_to_le32p(acm_tty_speed +
813 (termios->c_cflag & CBAUD & ~CBAUDEX) + (termios->c_cflag & CBAUDEX ? 15 : 0)); 857 (termios->c_cflag & CBAUD & ~CBAUDEX) + (termios->c_cflag & CBAUDEX ? 15 : 0));
814 newline.bCharFormat = termios->c_cflag & CSTOPB ? 2 : 0; 858 newline.bCharFormat = termios->c_cflag & CSTOPB ? 2 : 0;
815 newline.bParityType = termios->c_cflag & PARENB ? 859 newline.bParityType = termios->c_cflag & PARENB ?
816 (termios->c_cflag & PARODD ? 1 : 2) + (termios->c_cflag & CMSPAR ? 2 : 0) : 0; 860 (termios->c_cflag & PARODD ? 1 : 2) +
861 (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
817 newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4]; 862 newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4];
818 863 /* FIXME: Needs to clear unsupported bits in the termios */
819 acm->clocal = ((termios->c_cflag & CLOCAL) != 0); 864 acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
820 865
821 if (!newline.dwDTERate) { 866 if (!newline.dwDTERate) {
822 newline.dwDTERate = acm->line.dwDTERate; 867 newline.dwDTERate = acm->line.dwDTERate;
823 newctrl &= ~ACM_CTRL_DTR; 868 newctrl &= ~ACM_CTRL_DTR;
824 } else newctrl |= ACM_CTRL_DTR; 869 } else
870 newctrl |= ACM_CTRL_DTR;
825 871
826 if (newctrl != acm->ctrlout) 872 if (newctrl != acm->ctrlout)
827 acm_set_control(acm, acm->ctrlout = newctrl); 873 acm_set_control(acm, acm->ctrlout = newctrl);
@@ -846,9 +892,8 @@ static void acm_write_buffers_free(struct acm *acm)
846 struct acm_wb *wb; 892 struct acm_wb *wb;
847 struct usb_device *usb_dev = interface_to_usbdev(acm->control); 893 struct usb_device *usb_dev = interface_to_usbdev(acm->control);
848 894
849 for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) { 895 for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++)
850 usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah); 896 usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah);
851 }
852} 897}
853 898
854static void acm_read_buffers_free(struct acm *acm) 899static void acm_read_buffers_free(struct acm *acm)
@@ -857,7 +902,8 @@ static void acm_read_buffers_free(struct acm *acm)
857 int i, n = acm->rx_buflimit; 902 int i, n = acm->rx_buflimit;
858 903
859 for (i = 0; i < n; i++) 904 for (i = 0; i < n; i++)
860 usb_buffer_free(usb_dev, acm->readsize, acm->rb[i].base, acm->rb[i].dma); 905 usb_buffer_free(usb_dev, acm->readsize,
906 acm->rb[i].base, acm->rb[i].dma);
861} 907}
862 908
863/* Little helper: write buffers allocate */ 909/* Little helper: write buffers allocate */
@@ -882,8 +928,8 @@ static int acm_write_buffers_alloc(struct acm *acm)
882 return 0; 928 return 0;
883} 929}
884 930
885static int acm_probe (struct usb_interface *intf, 931static int acm_probe(struct usb_interface *intf,
886 const struct usb_device_id *id) 932 const struct usb_device_id *id)
887{ 933{
888 struct usb_cdc_union_desc *union_header = NULL; 934 struct usb_cdc_union_desc *union_header = NULL;
889 struct usb_cdc_country_functional_desc *cfd = NULL; 935 struct usb_cdc_country_functional_desc *cfd = NULL;
@@ -897,7 +943,7 @@ static int acm_probe (struct usb_interface *intf,
897 struct usb_device *usb_dev = interface_to_usbdev(intf); 943 struct usb_device *usb_dev = interface_to_usbdev(intf);
898 struct acm *acm; 944 struct acm *acm;
899 int minor; 945 int minor;
900 int ctrlsize,readsize; 946 int ctrlsize, readsize;
901 u8 *buf; 947 u8 *buf;
902 u8 ac_management_function = 0; 948 u8 ac_management_function = 0;
903 u8 call_management_function = 0; 949 u8 call_management_function = 0;
@@ -917,7 +963,7 @@ static int acm_probe (struct usb_interface *intf,
917 control_interface = usb_ifnum_to_if(usb_dev, 0); 963 control_interface = usb_ifnum_to_if(usb_dev, 0);
918 goto skip_normal_probe; 964 goto skip_normal_probe;
919 } 965 }
920 966
921 /* normal probing*/ 967 /* normal probing*/
922 if (!buffer) { 968 if (!buffer) {
923 dev_err(&intf->dev, "Weird descriptor references\n"); 969 dev_err(&intf->dev, "Weird descriptor references\n");
@@ -925,8 +971,10 @@ static int acm_probe (struct usb_interface *intf,
925 } 971 }
926 972
927 if (!buflen) { 973 if (!buflen) {
928 if (intf->cur_altsetting->endpoint->extralen && intf->cur_altsetting->endpoint->extra) { 974 if (intf->cur_altsetting->endpoint->extralen &&
929 dev_dbg(&intf->dev,"Seeking extra descriptors on endpoint\n"); 975 intf->cur_altsetting->endpoint->extra) {
976 dev_dbg(&intf->dev,
977 "Seeking extra descriptors on endpoint\n");
930 buflen = intf->cur_altsetting->endpoint->extralen; 978 buflen = intf->cur_altsetting->endpoint->extralen;
931 buffer = intf->cur_altsetting->endpoint->extra; 979 buffer = intf->cur_altsetting->endpoint->extra;
932 } else { 980 } else {
@@ -937,47 +985,43 @@ static int acm_probe (struct usb_interface *intf,
937 } 985 }
938 986
939 while (buflen > 0) { 987 while (buflen > 0) {
940 if (buffer [1] != USB_DT_CS_INTERFACE) { 988 if (buffer[1] != USB_DT_CS_INTERFACE) {
941 dev_err(&intf->dev, "skipping garbage\n"); 989 dev_err(&intf->dev, "skipping garbage\n");
942 goto next_desc; 990 goto next_desc;
943 } 991 }
944 992
945 switch (buffer [2]) { 993 switch (buffer[2]) {
946 case USB_CDC_UNION_TYPE: /* we've found it */ 994 case USB_CDC_UNION_TYPE: /* we've found it */
947 if (union_header) { 995 if (union_header) {
948 dev_err(&intf->dev, "More than one " 996 dev_err(&intf->dev, "More than one "
949 "union descriptor, " 997 "union descriptor, skipping ...\n");
950 "skipping ...\n"); 998 goto next_desc;
951 goto next_desc;
952 }
953 union_header = (struct usb_cdc_union_desc *)
954 buffer;
955 break;
956 case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
957 cfd = (struct usb_cdc_country_functional_desc *)buffer;
958 break;
959 case USB_CDC_HEADER_TYPE: /* maybe check version */
960 break; /* for now we ignore it */
961 case USB_CDC_ACM_TYPE:
962 ac_management_function = buffer[3];
963 break;
964 case USB_CDC_CALL_MANAGEMENT_TYPE:
965 call_management_function = buffer[3];
966 call_interface_num = buffer[4];
967 if ((call_management_function & 3) != 3)
968 dev_err(&intf->dev, "This device "
969 "cannot do calls on its own. "
970 "It is no modem.\n");
971 break;
972 default:
973 /* there are LOTS more CDC descriptors that
974 * could legitimately be found here.
975 */
976 dev_dbg(&intf->dev, "Ignoring descriptor: "
977 "type %02x, length %d\n",
978 buffer[2], buffer[0]);
979 break;
980 } 999 }
1000 union_header = (struct usb_cdc_union_desc *)buffer;
1001 break;
1002 case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
1003 cfd = (struct usb_cdc_country_functional_desc *)buffer;
1004 break;
1005 case USB_CDC_HEADER_TYPE: /* maybe check version */
1006 break; /* for now we ignore it */
1007 case USB_CDC_ACM_TYPE:
1008 ac_management_function = buffer[3];
1009 break;
1010 case USB_CDC_CALL_MANAGEMENT_TYPE:
1011 call_management_function = buffer[3];
1012 call_interface_num = buffer[4];
1013 if ((call_management_function & 3) != 3)
1014 dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
1015 break;
1016 default:
1017 /* there are LOTS more CDC descriptors that
1018 * could legitimately be found here.
1019 */
1020 dev_dbg(&intf->dev, "Ignoring descriptor: "
1021 "type %02x, length %d\n",
1022 buffer[2], buffer[0]);
1023 break;
1024 }
981next_desc: 1025next_desc:
982 buflen -= buffer[0]; 1026 buflen -= buffer[0];
983 buffer += buffer[0]; 1027 buffer += buffer[0];
@@ -985,33 +1029,36 @@ next_desc:
985 1029
986 if (!union_header) { 1030 if (!union_header) {
987 if (call_interface_num > 0) { 1031 if (call_interface_num > 0) {
988 dev_dbg(&intf->dev,"No union descriptor, using call management descriptor\n"); 1032 dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n");
989 data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num)); 1033 data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
990 control_interface = intf; 1034 control_interface = intf;
991 } else { 1035 } else {
992 dev_dbg(&intf->dev,"No union descriptor, giving up\n"); 1036 dev_dbg(&intf->dev,
1037 "No union descriptor, giving up\n");
993 return -ENODEV; 1038 return -ENODEV;
994 } 1039 }
995 } else { 1040 } else {
996 control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); 1041 control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
997 data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0)); 1042 data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
998 if (!control_interface || !data_interface) { 1043 if (!control_interface || !data_interface) {
999 dev_dbg(&intf->dev,"no interfaces\n"); 1044 dev_dbg(&intf->dev, "no interfaces\n");
1000 return -ENODEV; 1045 return -ENODEV;
1001 } 1046 }
1002 } 1047 }
1003 1048
1004 if (data_interface_num != call_interface_num) 1049 if (data_interface_num != call_interface_num)
1005 dev_dbg(&intf->dev,"Separate call control interface. That is not fully supported.\n"); 1050 dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
1006 1051
1007skip_normal_probe: 1052skip_normal_probe:
1008 1053
1009 /*workaround for switched interfaces */ 1054 /*workaround for switched interfaces */
1010 if (data_interface->cur_altsetting->desc.bInterfaceClass != CDC_DATA_INTERFACE_TYPE) { 1055 if (data_interface->cur_altsetting->desc.bInterfaceClass
1011 if (control_interface->cur_altsetting->desc.bInterfaceClass == CDC_DATA_INTERFACE_TYPE) { 1056 != CDC_DATA_INTERFACE_TYPE) {
1057 if (control_interface->cur_altsetting->desc.bInterfaceClass
1058 == CDC_DATA_INTERFACE_TYPE) {
1012 struct usb_interface *t; 1059 struct usb_interface *t;
1013 dev_dbg(&intf->dev,"Your device has switched interfaces.\n"); 1060 dev_dbg(&intf->dev,
1014 1061 "Your device has switched interfaces.\n");
1015 t = control_interface; 1062 t = control_interface;
1016 control_interface = data_interface; 1063 control_interface = data_interface;
1017 data_interface = t; 1064 data_interface = t;
@@ -1023,9 +1070,9 @@ skip_normal_probe:
1023 /* Accept probe requests only for the control interface */ 1070 /* Accept probe requests only for the control interface */
1024 if (intf != control_interface) 1071 if (intf != control_interface)
1025 return -ENODEV; 1072 return -ENODEV;
1026 1073
1027 if (usb_interface_claimed(data_interface)) { /* valid in this context */ 1074 if (usb_interface_claimed(data_interface)) { /* valid in this context */
1028 dev_dbg(&intf->dev,"The data interface isn't available\n"); 1075 dev_dbg(&intf->dev, "The data interface isn't available\n");
1029 return -EBUSY; 1076 return -EBUSY;
1030 } 1077 }
1031 1078
@@ -1042,8 +1089,8 @@ skip_normal_probe:
1042 if (!usb_endpoint_dir_in(epread)) { 1089 if (!usb_endpoint_dir_in(epread)) {
1043 /* descriptors are swapped */ 1090 /* descriptors are swapped */
1044 struct usb_endpoint_descriptor *t; 1091 struct usb_endpoint_descriptor *t;
1045 dev_dbg(&intf->dev,"The data interface has switched endpoints\n"); 1092 dev_dbg(&intf->dev,
1046 1093 "The data interface has switched endpoints\n");
1047 t = epread; 1094 t = epread;
1048 epread = epwrite; 1095 epread = epwrite;
1049 epwrite = t; 1096 epwrite = t;
@@ -1056,13 +1103,15 @@ skip_normal_probe:
1056 return -ENODEV; 1103 return -ENODEV;
1057 } 1104 }
1058 1105
1059 if (!(acm = kzalloc(sizeof(struct acm), GFP_KERNEL))) { 1106 acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
1107 if (acm == NULL) {
1060 dev_dbg(&intf->dev, "out of memory (acm kzalloc)\n"); 1108 dev_dbg(&intf->dev, "out of memory (acm kzalloc)\n");
1061 goto alloc_fail; 1109 goto alloc_fail;
1062 } 1110 }
1063 1111
1064 ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize); 1112 ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize);
1065 readsize = le16_to_cpu(epread->wMaxPacketSize)* ( quirks == SINGLE_RX_URB ? 1 : 2); 1113 readsize = le16_to_cpu(epread->wMaxPacketSize) *
1114 (quirks == SINGLE_RX_URB ? 1 : 2);
1066 acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20; 1115 acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20;
1067 acm->control = control_interface; 1116 acm->control = control_interface;
1068 acm->data = data_interface; 1117 acm->data = data_interface;
@@ -1082,6 +1131,8 @@ skip_normal_probe:
1082 spin_lock_init(&acm->read_lock); 1131 spin_lock_init(&acm->read_lock);
1083 mutex_init(&acm->mutex); 1132 mutex_init(&acm->mutex);
1084 acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); 1133 acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
1134 tty_port_init(&acm->port);
1135 acm->port.ops = &acm_port_ops;
1085 1136
1086 buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); 1137 buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
1087 if (!buf) { 1138 if (!buf) {
@@ -1103,8 +1154,10 @@ skip_normal_probe:
1103 for (i = 0; i < num_rx_buf; i++) { 1154 for (i = 0; i < num_rx_buf; i++) {
1104 struct acm_ru *rcv = &(acm->ru[i]); 1155 struct acm_ru *rcv = &(acm->ru[i]);
1105 1156
1106 if (!(rcv->urb = usb_alloc_urb(0, GFP_KERNEL))) { 1157 rcv->urb = usb_alloc_urb(0, GFP_KERNEL);
1107 dev_dbg(&intf->dev, "out of memory (read urbs usb_alloc_urb)\n"); 1158 if (rcv->urb == NULL) {
1159 dev_dbg(&intf->dev,
1160 "out of memory (read urbs usb_alloc_urb)\n");
1108 goto alloc_fail7; 1161 goto alloc_fail7;
1109 } 1162 }
1110 1163
@@ -1117,26 +1170,29 @@ skip_normal_probe:
1117 rb->base = usb_buffer_alloc(acm->dev, readsize, 1170 rb->base = usb_buffer_alloc(acm->dev, readsize,
1118 GFP_KERNEL, &rb->dma); 1171 GFP_KERNEL, &rb->dma);
1119 if (!rb->base) { 1172 if (!rb->base) {
1120 dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n"); 1173 dev_dbg(&intf->dev,
1174 "out of memory (read bufs usb_buffer_alloc)\n");
1121 goto alloc_fail7; 1175 goto alloc_fail7;
1122 } 1176 }
1123 } 1177 }
1124 for(i = 0; i < ACM_NW; i++) 1178 for (i = 0; i < ACM_NW; i++) {
1125 {
1126 struct acm_wb *snd = &(acm->wb[i]); 1179 struct acm_wb *snd = &(acm->wb[i]);
1127 1180
1128 if (!(snd->urb = usb_alloc_urb(0, GFP_KERNEL))) { 1181 snd->urb = usb_alloc_urb(0, GFP_KERNEL);
1129 dev_dbg(&intf->dev, "out of memory (write urbs usb_alloc_urb)"); 1182 if (snd->urb == NULL) {
1183 dev_dbg(&intf->dev,
1184 "out of memory (write urbs usb_alloc_urb)");
1130 goto alloc_fail7; 1185 goto alloc_fail7;
1131 } 1186 }
1132 1187
1133 usb_fill_bulk_urb(snd->urb, usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), 1188 usb_fill_bulk_urb(snd->urb, usb_dev,
1134 NULL, acm->writesize, acm_write_bulk, snd); 1189 usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
1190 NULL, acm->writesize, acm_write_bulk, snd);
1135 snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1191 snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1136 snd->instance = acm; 1192 snd->instance = acm;
1137 } 1193 }
1138 1194
1139 usb_set_intfdata (intf, acm); 1195 usb_set_intfdata(intf, acm);
1140 1196
1141 i = device_create_file(&intf->dev, &dev_attr_bmCapabilities); 1197 i = device_create_file(&intf->dev, &dev_attr_bmCapabilities);
1142 if (i < 0) 1198 if (i < 0)
@@ -1147,7 +1203,8 @@ skip_normal_probe:
1147 if (!acm->country_codes) 1203 if (!acm->country_codes)
1148 goto skip_countries; 1204 goto skip_countries;
1149 acm->country_code_size = cfd->bLength - 4; 1205 acm->country_code_size = cfd->bLength - 4;
1150 memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0, cfd->bLength - 4); 1206 memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0,
1207 cfd->bLength - 4);
1151 acm->country_rel_date = cfd->iCountryCodeRelDate; 1208 acm->country_rel_date = cfd->iCountryCodeRelDate;
1152 1209
1153 i = device_create_file(&intf->dev, &dev_attr_wCountryCodes); 1210 i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
@@ -1156,7 +1213,8 @@ skip_normal_probe:
1156 goto skip_countries; 1213 goto skip_countries;
1157 } 1214 }
1158 1215
1159 i = device_create_file(&intf->dev, &dev_attr_iCountryCodeRelDate); 1216 i = device_create_file(&intf->dev,
1217 &dev_attr_iCountryCodeRelDate);
1160 if (i < 0) { 1218 if (i < 0) {
1161 kfree(acm->country_codes); 1219 kfree(acm->country_codes);
1162 goto skip_countries; 1220 goto skip_countries;
@@ -1164,8 +1222,10 @@ skip_normal_probe:
1164 } 1222 }
1165 1223
1166skip_countries: 1224skip_countries:
1167 usb_fill_int_urb(acm->ctrlurb, usb_dev, usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress), 1225 usb_fill_int_urb(acm->ctrlurb, usb_dev,
1168 acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm, epctrl->bInterval); 1226 usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
1227 acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm,
1228 epctrl->bInterval);
1169 acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1229 acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1170 acm->ctrlurb->transfer_dma = acm->ctrl_dma; 1230 acm->ctrlurb->transfer_dma = acm->ctrl_dma;
1171 1231
@@ -1212,7 +1272,7 @@ static void stop_data_traffic(struct acm *acm)
1212 tasklet_disable(&acm->urb_task); 1272 tasklet_disable(&acm->urb_task);
1213 1273
1214 usb_kill_urb(acm->ctrlurb); 1274 usb_kill_urb(acm->ctrlurb);
1215 for(i = 0; i < ACM_NW; i++) 1275 for (i = 0; i < ACM_NW; i++)
1216 usb_kill_urb(acm->wb[i].urb); 1276 usb_kill_urb(acm->wb[i].urb);
1217 for (i = 0; i < acm->rx_buflimit; i++) 1277 for (i = 0; i < acm->rx_buflimit; i++)
1218 usb_kill_urb(acm->ru[i].urb); 1278 usb_kill_urb(acm->ru[i].urb);
@@ -1227,13 +1287,14 @@ static void acm_disconnect(struct usb_interface *intf)
1227{ 1287{
1228 struct acm *acm = usb_get_intfdata(intf); 1288 struct acm *acm = usb_get_intfdata(intf);
1229 struct usb_device *usb_dev = interface_to_usbdev(intf); 1289 struct usb_device *usb_dev = interface_to_usbdev(intf);
1290 struct tty_struct *tty;
1230 1291
1231 /* sibling interface is already cleaning up */ 1292 /* sibling interface is already cleaning up */
1232 if (!acm) 1293 if (!acm)
1233 return; 1294 return;
1234 1295
1235 mutex_lock(&open_mutex); 1296 mutex_lock(&open_mutex);
1236 if (acm->country_codes){ 1297 if (acm->country_codes) {
1237 device_remove_file(&acm->control->dev, 1298 device_remove_file(&acm->control->dev,
1238 &dev_attr_wCountryCodes); 1299 &dev_attr_wCountryCodes);
1239 device_remove_file(&acm->control->dev, 1300 device_remove_file(&acm->control->dev,
@@ -1247,22 +1308,25 @@ static void acm_disconnect(struct usb_interface *intf)
1247 stop_data_traffic(acm); 1308 stop_data_traffic(acm);
1248 1309
1249 acm_write_buffers_free(acm); 1310 acm_write_buffers_free(acm);
1250 usb_buffer_free(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); 1311 usb_buffer_free(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
1312 acm->ctrl_dma);
1251 acm_read_buffers_free(acm); 1313 acm_read_buffers_free(acm);
1252 1314
1253 usb_driver_release_interface(&acm_driver, intf == acm->control ? 1315 usb_driver_release_interface(&acm_driver, intf == acm->control ?
1254 acm->data : acm->control); 1316 acm->data : acm->control);
1255 1317
1256 if (!acm->used) { 1318 if (acm->port.count == 0) {
1257 acm_tty_unregister(acm); 1319 acm_tty_unregister(acm);
1258 mutex_unlock(&open_mutex); 1320 mutex_unlock(&open_mutex);
1259 return; 1321 return;
1260 } 1322 }
1261 1323
1262 mutex_unlock(&open_mutex); 1324 mutex_unlock(&open_mutex);
1263 1325 tty = tty_port_tty_get(&acm->port);
1264 if (acm->tty) 1326 if (tty) {
1265 tty_hangup(acm->tty); 1327 tty_hangup(tty);
1328 tty_kref_put(tty);
1329 }
1266} 1330}
1267 1331
1268#ifdef CONFIG_PM 1332#ifdef CONFIG_PM
@@ -1297,7 +1361,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1297 */ 1361 */
1298 mutex_lock(&acm->mutex); 1362 mutex_lock(&acm->mutex);
1299 1363
1300 if (acm->used) 1364 if (acm->port.count)
1301 stop_data_traffic(acm); 1365 stop_data_traffic(acm);
1302 1366
1303 mutex_unlock(&acm->mutex); 1367 mutex_unlock(&acm->mutex);
@@ -1319,7 +1383,7 @@ static int acm_resume(struct usb_interface *intf)
1319 return 0; 1383 return 0;
1320 1384
1321 mutex_lock(&acm->mutex); 1385 mutex_lock(&acm->mutex);
1322 if (acm->used) { 1386 if (acm->port.count) {
1323 rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); 1387 rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
1324 if (rv < 0) 1388 if (rv < 0)
1325 goto err_out; 1389 goto err_out;
@@ -1375,6 +1439,9 @@ static struct usb_device_id acm_ids[] = {
1375 { USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */ 1439 { USB_DEVICE(0x0572, 0x1324), /* Conexant USB MODEM RD02-D400 */
1376 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1440 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1377 }, 1441 },
1442 { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
1443 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1444 },
1378 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ 1445 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
1379 }, 1446 },
1380 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */ 1447 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
@@ -1395,7 +1462,7 @@ static struct usb_device_id acm_ids[] = {
1395 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1462 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1396 USB_CDC_ACM_PROTO_AT_GSM) }, 1463 USB_CDC_ACM_PROTO_AT_GSM) },
1397 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1464 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1398 USB_CDC_ACM_PROTO_AT_3G ) }, 1465 USB_CDC_ACM_PROTO_AT_3G) },
1399 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1466 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1400 USB_CDC_ACM_PROTO_AT_CDMA) }, 1467 USB_CDC_ACM_PROTO_AT_CDMA) },
1401 1468
@@ -1403,7 +1470,7 @@ static struct usb_device_id acm_ids[] = {
1403 { } 1470 { }
1404}; 1471};
1405 1472
1406MODULE_DEVICE_TABLE (usb, acm_ids); 1473MODULE_DEVICE_TABLE(usb, acm_ids);
1407 1474
1408static struct usb_driver acm_driver = { 1475static struct usb_driver acm_driver = {
1409 .name = "cdc_acm", 1476 .name = "cdc_acm",
@@ -1426,6 +1493,7 @@ static struct usb_driver acm_driver = {
1426static const struct tty_operations acm_ops = { 1493static const struct tty_operations acm_ops = {
1427 .open = acm_tty_open, 1494 .open = acm_tty_open,
1428 .close = acm_tty_close, 1495 .close = acm_tty_close,
1496 .hangup = acm_tty_hangup,
1429 .write = acm_tty_write, 1497 .write = acm_tty_write,
1430 .write_room = acm_tty_write_room, 1498 .write_room = acm_tty_write_room,
1431 .ioctl = acm_tty_ioctl, 1499 .ioctl = acm_tty_ioctl,
@@ -1457,7 +1525,8 @@ static int __init acm_init(void)
1457 acm_tty_driver->subtype = SERIAL_TYPE_NORMAL, 1525 acm_tty_driver->subtype = SERIAL_TYPE_NORMAL,
1458 acm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; 1526 acm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1459 acm_tty_driver->init_termios = tty_std_termios; 1527 acm_tty_driver->init_termios = tty_std_termios;
1460 acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; 1528 acm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD |
1529 HUPCL | CLOCAL;
1461 tty_set_operations(acm_tty_driver, &acm_ops); 1530 tty_set_operations(acm_tty_driver, &acm_ops);
1462 1531
1463 retval = tty_register_driver(acm_tty_driver); 1532 retval = tty_register_driver(acm_tty_driver);
@@ -1489,7 +1558,7 @@ static void __exit acm_exit(void)
1489module_init(acm_init); 1558module_init(acm_init);
1490module_exit(acm_exit); 1559module_exit(acm_exit);
1491 1560
1492MODULE_AUTHOR( DRIVER_AUTHOR ); 1561MODULE_AUTHOR(DRIVER_AUTHOR);
1493MODULE_DESCRIPTION( DRIVER_DESC ); 1562MODULE_DESCRIPTION(DRIVER_DESC);
1494MODULE_LICENSE("GPL"); 1563MODULE_LICENSE("GPL");
1495MODULE_ALIAS_CHARDEV_MAJOR(ACM_TTY_MAJOR); 1564MODULE_ALIAS_CHARDEV_MAJOR(ACM_TTY_MAJOR);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 1f95e7aa1b6..4c3856420ad 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -89,8 +89,8 @@ struct acm {
89 struct usb_device *dev; /* the corresponding usb device */ 89 struct usb_device *dev; /* the corresponding usb device */
90 struct usb_interface *control; /* control interface */ 90 struct usb_interface *control; /* control interface */
91 struct usb_interface *data; /* data interface */ 91 struct usb_interface *data; /* data interface */
92 struct tty_struct *tty; /* the corresponding tty */ 92 struct tty_port port; /* our tty port data */
93 struct urb *ctrlurb; /* urbs */ 93 struct urb *ctrlurb; /* urbs */
94 u8 *ctrl_buffer; /* buffers of urbs */ 94 u8 *ctrl_buffer; /* buffers of urbs */
95 dma_addr_t ctrl_dma; /* dma handles of buffers */ 95 dma_addr_t ctrl_dma; /* dma handles of buffers */
96 u8 *country_codes; /* country codes from device */ 96 u8 *country_codes; /* country codes from device */
@@ -120,7 +120,6 @@ struct acm {
120 unsigned int ctrlout; /* output control lines (DTR, RTS) */ 120 unsigned int ctrlout; /* output control lines (DTR, RTS) */
121 unsigned int writesize; /* max packet size for the output bulk endpoint */ 121 unsigned int writesize; /* max packet size for the output bulk endpoint */
122 unsigned int readsize,ctrlsize; /* buffer sizes for freeing */ 122 unsigned int readsize,ctrlsize; /* buffer sizes for freeing */
123 unsigned int used; /* someone has this acm's device open */
124 unsigned int minor; /* acm minor number */ 123 unsigned int minor; /* acm minor number */
125 unsigned char throttle; /* throttled by tty layer */ 124 unsigned char throttle; /* throttled by tty layer */
126 unsigned char clocal; /* termios CLOCAL */ 125 unsigned char clocal; /* termios CLOCAL */
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index dff5760a37f..ffe75e83787 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -39,6 +39,7 @@
39#include <linux/parser.h> 39#include <linux/parser.h>
40#include <linux/notifier.h> 40#include <linux/notifier.h>
41#include <linux/seq_file.h> 41#include <linux/seq_file.h>
42#include <linux/smp_lock.h>
42#include <asm/byteorder.h> 43#include <asm/byteorder.h>
43#include "usb.h" 44#include "usb.h"
44#include "hcd.h" 45#include "hcd.h"
@@ -265,9 +266,13 @@ static int remount(struct super_block *sb, int *flags, char *data)
265 return -EINVAL; 266 return -EINVAL;
266 } 267 }
267 268
269 lock_kernel();
270
268 if (usbfs_mount && usbfs_mount->mnt_sb) 271 if (usbfs_mount && usbfs_mount->mnt_sb)
269 update_sb(usbfs_mount->mnt_sb); 272 update_sb(usbfs_mount->mnt_sb);
270 273
274 unlock_kernel();
275
271 return 0; 276 return 0;
272} 277}
273 278
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 563d5727544..05c913cc365 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -794,7 +794,8 @@ usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
794 if (ep->desc) { 794 if (ep->desc) {
795 list_add_tail(&req->queue, &ep->queue); 795 list_add_tail(&req->queue, &ep->queue);
796 796
797 if (ep->is_in || (ep_is_control(ep) 797 if ((!ep_is_control(ep) && ep->is_in) ||
798 (ep_is_control(ep)
798 && (ep->state == DATA_STAGE_IN 799 && (ep->state == DATA_STAGE_IN
799 || ep->state == STATUS_STAGE_IN))) 800 || ep->state == STATUS_STAGE_IN)))
800 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); 801 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
@@ -1940,7 +1941,7 @@ static int __init usba_udc_probe(struct platform_device *pdev)
1940 usba_writel(udc, CTRL, USBA_DISABLE_MASK); 1941 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1941 clk_disable(pclk); 1942 clk_disable(pclk);
1942 1943
1943 usba_ep = kmalloc(sizeof(struct usba_ep) * pdata->num_ep, 1944 usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep,
1944 GFP_KERNEL); 1945 GFP_KERNEL);
1945 if (!usba_ep) 1946 if (!usba_ep)
1946 goto err_alloc_ep; 1947 goto err_alloc_ep;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index cd07ea3f0c6..15438469f21 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1658,6 +1658,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1658 u32 reg_base, or_reg, skip_reg; 1658 u32 reg_base, or_reg, skip_reg;
1659 unsigned long flags; 1659 unsigned long flags;
1660 struct ptd ptd; 1660 struct ptd ptd;
1661 packet_enqueue *pe;
1661 1662
1662 switch (usb_pipetype(urb->pipe)) { 1663 switch (usb_pipetype(urb->pipe)) {
1663 case PIPE_ISOCHRONOUS: 1664 case PIPE_ISOCHRONOUS:
@@ -1669,6 +1670,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1669 reg_base = INT_REGS_OFFSET; 1670 reg_base = INT_REGS_OFFSET;
1670 or_reg = HC_INT_IRQ_MASK_OR_REG; 1671 or_reg = HC_INT_IRQ_MASK_OR_REG;
1671 skip_reg = HC_INT_PTD_SKIPMAP_REG; 1672 skip_reg = HC_INT_PTD_SKIPMAP_REG;
1673 pe = enqueue_an_INT_packet;
1672 break; 1674 break;
1673 1675
1674 default: 1676 default:
@@ -1676,6 +1678,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1676 reg_base = ATL_REGS_OFFSET; 1678 reg_base = ATL_REGS_OFFSET;
1677 or_reg = HC_ATL_IRQ_MASK_OR_REG; 1679 or_reg = HC_ATL_IRQ_MASK_OR_REG;
1678 skip_reg = HC_ATL_PTD_SKIPMAP_REG; 1680 skip_reg = HC_ATL_PTD_SKIPMAP_REG;
1681 pe = enqueue_an_ATL_packet;
1679 break; 1682 break;
1680 } 1683 }
1681 1684
@@ -1687,6 +1690,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1687 u32 skip_map; 1690 u32 skip_map;
1688 u32 or_map; 1691 u32 or_map;
1689 struct isp1760_qtd *qtd; 1692 struct isp1760_qtd *qtd;
1693 struct isp1760_qh *qh = ints->qh;
1690 1694
1691 skip_map = isp1760_readl(hcd->regs + skip_reg); 1695 skip_map = isp1760_readl(hcd->regs + skip_reg);
1692 skip_map |= 1 << i; 1696 skip_map |= 1 << i;
@@ -1699,8 +1703,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1699 priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base 1703 priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
1700 + i * sizeof(ptd), sizeof(ptd)); 1704 + i * sizeof(ptd), sizeof(ptd));
1701 qtd = ints->qtd; 1705 qtd = ints->qtd;
1702 1706 qtd = clean_up_qtdlist(qtd);
1703 clean_up_qtdlist(qtd);
1704 1707
1705 free_mem(priv, ints->payload); 1708 free_mem(priv, ints->payload);
1706 1709
@@ -1711,7 +1714,24 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1711 ints->payload = 0; 1714 ints->payload = 0;
1712 1715
1713 isp1760_urb_done(priv, urb, status); 1716 isp1760_urb_done(priv, urb, status);
1717 if (qtd)
1718 pe(hcd, qh, qtd);
1714 break; 1719 break;
1720
1721 } else if (ints->qtd) {
1722 struct isp1760_qtd *qtd, *prev_qtd = ints->qtd;
1723
1724 for (qtd = ints->qtd->hw_next; qtd; qtd = qtd->hw_next) {
1725 if (qtd->urb == urb) {
1726 prev_qtd->hw_next = clean_up_qtdlist(qtd);
1727 isp1760_urb_done(priv, urb, status);
1728 break;
1729 }
1730 prev_qtd = qtd;
1731 }
1732 /* we found the urb before the end of the list */
1733 if (qtd)
1734 break;
1715 } 1735 }
1716 ints++; 1736 ints++;
1717 } 1737 }
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index 7cf74f8c2db..b0dbf4157d2 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -47,7 +47,7 @@ static int usb_hcd_ep93xx_probe(const struct hc_driver *driver,
47 struct usb_hcd *hcd; 47 struct usb_hcd *hcd;
48 48
49 if (pdev->resource[1].flags != IORESOURCE_IRQ) { 49 if (pdev->resource[1].flags != IORESOURCE_IRQ) {
50 pr_debug("resource[1] is not IORESOURCE_IRQ"); 50 dbg("resource[1] is not IORESOURCE_IRQ");
51 return -ENOMEM; 51 return -ENOMEM;
52 } 52 }
53 53
@@ -65,12 +65,18 @@ static int usb_hcd_ep93xx_probe(const struct hc_driver *driver,
65 65
66 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 66 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
67 if (hcd->regs == NULL) { 67 if (hcd->regs == NULL) {
68 pr_debug("ioremap failed"); 68 dbg("ioremap failed");
69 retval = -ENOMEM; 69 retval = -ENOMEM;
70 goto err2; 70 goto err2;
71 } 71 }
72 72
73 usb_host_clock = clk_get(&pdev->dev, "usb_host"); 73 usb_host_clock = clk_get(&pdev->dev, NULL);
74 if (IS_ERR(usb_host_clock)) {
75 dbg("clk_get failed");
76 retval = PTR_ERR(usb_host_clock);
77 goto err3;
78 }
79
74 ep93xx_start_hc(&pdev->dev); 80 ep93xx_start_hc(&pdev->dev);
75 81
76 ohci_hcd_init(hcd_to_ohci(hcd)); 82 ohci_hcd_init(hcd_to_ohci(hcd));
@@ -80,6 +86,7 @@ static int usb_hcd_ep93xx_probe(const struct hc_driver *driver,
80 return retval; 86 return retval;
81 87
82 ep93xx_stop_hc(&pdev->dev); 88 ep93xx_stop_hc(&pdev->dev);
89err3:
83 iounmap(hcd->regs); 90 iounmap(hcd->regs);
84err2: 91err2:
85 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 92 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index b7eacad4d48..2bfd6dd85b5 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -93,8 +93,7 @@ static int belkin_sa_startup(struct usb_serial *serial);
93static void belkin_sa_shutdown(struct usb_serial *serial); 93static void belkin_sa_shutdown(struct usb_serial *serial);
94static int belkin_sa_open(struct tty_struct *tty, 94static int belkin_sa_open(struct tty_struct *tty,
95 struct usb_serial_port *port, struct file *filp); 95 struct usb_serial_port *port, struct file *filp);
96static void belkin_sa_close(struct tty_struct *tty, 96static void belkin_sa_close(struct usb_serial_port *port);
97 struct usb_serial_port *port, struct file *filp);
98static void belkin_sa_read_int_callback(struct urb *urb); 97static void belkin_sa_read_int_callback(struct urb *urb);
99static void belkin_sa_set_termios(struct tty_struct *tty, 98static void belkin_sa_set_termios(struct tty_struct *tty,
100 struct usb_serial_port *port, struct ktermios * old); 99 struct usb_serial_port *port, struct ktermios * old);
@@ -244,8 +243,7 @@ exit:
244} /* belkin_sa_open */ 243} /* belkin_sa_open */
245 244
246 245
247static void belkin_sa_close(struct tty_struct *tty, 246static void belkin_sa_close(struct usb_serial_port *port)
248 struct usb_serial_port *port, struct file *filp)
249{ 247{
250 dbg("%s port %d", __func__, port->number); 248 dbg("%s port %d", __func__, port->number);
251 249
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index ab4cc277aa6..2830766f5b3 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -262,32 +262,40 @@ error: kfree(priv);
262 return r; 262 return r;
263} 263}
264 264
265static void ch341_close(struct tty_struct *tty, struct usb_serial_port *port, 265static int ch341_carrier_raised(struct usb_serial_port *port)
266 struct file *filp) 266{
267 struct ch341_private *priv = usb_get_serial_port_data(port);
268 if (priv->line_status & CH341_BIT_DCD)
269 return 1;
270 return 0;
271}
272
273static void ch341_dtr_rts(struct usb_serial_port *port, int on)
267{ 274{
268 struct ch341_private *priv = usb_get_serial_port_data(port); 275 struct ch341_private *priv = usb_get_serial_port_data(port);
269 unsigned long flags; 276 unsigned long flags;
270 unsigned int c_cflag;
271 277
272 dbg("%s - port %d", __func__, port->number); 278 dbg("%s - port %d", __func__, port->number);
279 /* drop DTR and RTS */
280 spin_lock_irqsave(&priv->lock, flags);
281 if (on)
282 priv->line_control |= CH341_BIT_RTS | CH341_BIT_DTR;
283 else
284 priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR);
285 spin_unlock_irqrestore(&priv->lock, flags);
286 ch341_set_handshake(port->serial->dev, priv->line_control);
287 wake_up_interruptible(&priv->delta_msr_wait);
288}
289
290static void ch341_close(struct usb_serial_port *port)
291{
292 dbg("%s - port %d", __func__, port->number);
273 293
274 /* shutdown our urbs */ 294 /* shutdown our urbs */
275 dbg("%s - shutting down urbs", __func__); 295 dbg("%s - shutting down urbs", __func__);
276 usb_kill_urb(port->write_urb); 296 usb_kill_urb(port->write_urb);
277 usb_kill_urb(port->read_urb); 297 usb_kill_urb(port->read_urb);
278 usb_kill_urb(port->interrupt_in_urb); 298 usb_kill_urb(port->interrupt_in_urb);
279
280 if (tty) {
281 c_cflag = tty->termios->c_cflag;
282 if (c_cflag & HUPCL) {
283 /* drop DTR and RTS */
284 spin_lock_irqsave(&priv->lock, flags);
285 priv->line_control = 0;
286 spin_unlock_irqrestore(&priv->lock, flags);
287 ch341_set_handshake(port->serial->dev, 0);
288 }
289 }
290 wake_up_interruptible(&priv->delta_msr_wait);
291} 299}
292 300
293 301
@@ -302,7 +310,6 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port,
302 dbg("ch341_open()"); 310 dbg("ch341_open()");
303 311
304 priv->baud_rate = DEFAULT_BAUD_RATE; 312 priv->baud_rate = DEFAULT_BAUD_RATE;
305 priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
306 313
307 r = ch341_configure(serial->dev, priv); 314 r = ch341_configure(serial->dev, priv);
308 if (r) 315 if (r)
@@ -322,7 +329,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port,
322 if (r) { 329 if (r) {
323 dev_err(&port->dev, "%s - failed submitting interrupt urb," 330 dev_err(&port->dev, "%s - failed submitting interrupt urb,"
324 " error %d\n", __func__, r); 331 " error %d\n", __func__, r);
325 ch341_close(tty, port, NULL); 332 ch341_close(port);
326 return -EPROTO; 333 return -EPROTO;
327 } 334 }
328 335
@@ -343,9 +350,6 @@ static void ch341_set_termios(struct tty_struct *tty,
343 350
344 dbg("ch341_set_termios()"); 351 dbg("ch341_set_termios()");
345 352
346 if (!tty || !tty->termios)
347 return;
348
349 baud_rate = tty_get_baud_rate(tty); 353 baud_rate = tty_get_baud_rate(tty);
350 354
351 priv->baud_rate = baud_rate; 355 priv->baud_rate = baud_rate;
@@ -568,6 +572,8 @@ static struct usb_serial_driver ch341_device = {
568 .usb_driver = &ch341_driver, 572 .usb_driver = &ch341_driver,
569 .num_ports = 1, 573 .num_ports = 1,
570 .open = ch341_open, 574 .open = ch341_open,
575 .dtr_rts = ch341_dtr_rts,
576 .carrier_raised = ch341_carrier_raised,
571 .close = ch341_close, 577 .close = ch341_close,
572 .ioctl = ch341_ioctl, 578 .ioctl = ch341_ioctl,
573 .set_termios = ch341_set_termios, 579 .set_termios = ch341_set_termios,
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 19e24045b13..247b61bfb7f 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -169,7 +169,9 @@ static int usb_console_setup(struct console *co, char *options)
169 kfree(tty); 169 kfree(tty);
170 } 170 }
171 } 171 }
172 172 /* So we know not to kill the hardware on a hangup on this
173 port. We have also bumped the use count by one so it won't go
174 idle */
173 port->console = 1; 175 port->console = 1;
174 retval = 0; 176 retval = 0;
175 177
@@ -182,7 +184,7 @@ free_tty:
182 kfree(tty); 184 kfree(tty);
183reset_open_count: 185reset_open_count:
184 port->port.count = 0; 186 port->port.count = 0;
185goto out; 187 goto out;
186} 188}
187 189
188static void usb_console_write(struct console *co, 190static void usb_console_write(struct console *co,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index e8d5133ce9c..16a154d3b2f 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Silicon Laboratories CP2101/CP2102 USB to RS232 serial adaptor driver 2 * Silicon Laboratories CP210x USB to RS232 serial adaptor driver
3 * 3 *
4 * Copyright (C) 2005 Craig Shelley (craig@microtron.org.uk) 4 * Copyright (C) 2005 Craig Shelley (craig@microtron.org.uk)
5 * 5 *
@@ -27,44 +27,46 @@
27/* 27/*
28 * Version Information 28 * Version Information
29 */ 29 */
30#define DRIVER_VERSION "v0.08" 30#define DRIVER_VERSION "v0.09"
31#define DRIVER_DESC "Silicon Labs CP2101/CP2102 RS232 serial adaptor driver" 31#define DRIVER_DESC "Silicon Labs CP210x RS232 serial adaptor driver"
32 32
33/* 33/*
34 * Function Prototypes 34 * Function Prototypes
35 */ 35 */
36static int cp2101_open(struct tty_struct *, struct usb_serial_port *, 36static int cp210x_open(struct tty_struct *, struct usb_serial_port *,
37 struct file *); 37 struct file *);
38static void cp2101_cleanup(struct usb_serial_port *); 38static void cp210x_cleanup(struct usb_serial_port *);
39static void cp2101_close(struct tty_struct *, struct usb_serial_port *, 39static void cp210x_close(struct usb_serial_port *);
40 struct file*); 40static void cp210x_get_termios(struct tty_struct *,
41static void cp2101_get_termios(struct tty_struct *,
42 struct usb_serial_port *port); 41 struct usb_serial_port *port);
43static void cp2101_get_termios_port(struct usb_serial_port *port, 42static void cp210x_get_termios_port(struct usb_serial_port *port,
44 unsigned int *cflagp, unsigned int *baudp); 43 unsigned int *cflagp, unsigned int *baudp);
45static void cp2101_set_termios(struct tty_struct *, struct usb_serial_port *, 44static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
46 struct ktermios*); 45 struct ktermios*);
47static int cp2101_tiocmget(struct tty_struct *, struct file *); 46static int cp210x_tiocmget(struct tty_struct *, struct file *);
48static int cp2101_tiocmset(struct tty_struct *, struct file *, 47static int cp210x_tiocmset(struct tty_struct *, struct file *,
49 unsigned int, unsigned int); 48 unsigned int, unsigned int);
50static int cp2101_tiocmset_port(struct usb_serial_port *port, struct file *, 49static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
51 unsigned int, unsigned int); 50 unsigned int, unsigned int);
52static void cp2101_break_ctl(struct tty_struct *, int); 51static void cp210x_break_ctl(struct tty_struct *, int);
53static int cp2101_startup(struct usb_serial *); 52static int cp210x_startup(struct usb_serial *);
54static void cp2101_shutdown(struct usb_serial *); 53static void cp210x_shutdown(struct usb_serial *);
55 54
56static int debug; 55static int debug;
57 56
58static struct usb_device_id id_table [] = { 57static struct usb_device_id id_table [] = {
59 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ 58 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
60 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 59 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
60 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
61 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ 61 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
62 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
62 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ 63 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
63 { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ 64 { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
64 { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ 65 { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
65 { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ 66 { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
66 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ 67 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
67 { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ 68 { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
69 { USB_DEVICE(0x10C4, 0x0F91) }, /* Vstabi */
68 { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */ 70 { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
69 { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */ 71 { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
70 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ 72 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
@@ -85,10 +87,12 @@ static struct usb_device_id id_table [] = {
85 { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ 87 { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
86 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ 88 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
87 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ 89 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
90 { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */
88 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ 91 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
89 { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */ 92 { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
90 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */ 93 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */
91 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ 94 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
95 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
92 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 96 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
93 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ 97 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
94 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ 98 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
@@ -99,7 +103,9 @@ static struct usb_device_id id_table [] = {
99 { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ 103 { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
100 { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ 104 { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
101 { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ 105 { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
106 { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
102 { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ 107 { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
108 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
103 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ 109 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
104 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ 110 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
105 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 111 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
@@ -108,53 +114,70 @@ static struct usb_device_id id_table [] = {
108 114
109MODULE_DEVICE_TABLE(usb, id_table); 115MODULE_DEVICE_TABLE(usb, id_table);
110 116
111static struct usb_driver cp2101_driver = { 117static struct usb_driver cp210x_driver = {
112 .name = "cp2101", 118 .name = "cp210x",
113 .probe = usb_serial_probe, 119 .probe = usb_serial_probe,
114 .disconnect = usb_serial_disconnect, 120 .disconnect = usb_serial_disconnect,
115 .id_table = id_table, 121 .id_table = id_table,
116 .no_dynamic_id = 1, 122 .no_dynamic_id = 1,
117}; 123};
118 124
119static struct usb_serial_driver cp2101_device = { 125static struct usb_serial_driver cp210x_device = {
120 .driver = { 126 .driver = {
121 .owner = THIS_MODULE, 127 .owner = THIS_MODULE,
122 .name = "cp2101", 128 .name = "cp210x",
123 }, 129 },
124 .usb_driver = &cp2101_driver, 130 .usb_driver = &cp210x_driver,
125 .id_table = id_table, 131 .id_table = id_table,
126 .num_ports = 1, 132 .num_ports = 1,
127 .open = cp2101_open, 133 .open = cp210x_open,
128 .close = cp2101_close, 134 .close = cp210x_close,
129 .break_ctl = cp2101_break_ctl, 135 .break_ctl = cp210x_break_ctl,
130 .set_termios = cp2101_set_termios, 136 .set_termios = cp210x_set_termios,
131 .tiocmget = cp2101_tiocmget, 137 .tiocmget = cp210x_tiocmget,
132 .tiocmset = cp2101_tiocmset, 138 .tiocmset = cp210x_tiocmset,
133 .attach = cp2101_startup, 139 .attach = cp210x_startup,
134 .shutdown = cp2101_shutdown, 140 .shutdown = cp210x_shutdown,
135}; 141};
136 142
137/* Config request types */ 143/* Config request types */
138#define REQTYPE_HOST_TO_DEVICE 0x41 144#define REQTYPE_HOST_TO_DEVICE 0x41
139#define REQTYPE_DEVICE_TO_HOST 0xc1 145#define REQTYPE_DEVICE_TO_HOST 0xc1
140 146
141/* Config SET requests. To GET, add 1 to the request number */ 147/* Config request codes */
142#define CP2101_UART 0x00 /* Enable / Disable */ 148#define CP210X_IFC_ENABLE 0x00
143#define CP2101_BAUDRATE 0x01 /* (BAUD_RATE_GEN_FREQ / baudrate) */ 149#define CP210X_SET_BAUDDIV 0x01
144#define CP2101_BITS 0x03 /* 0x(0)(databits)(parity)(stopbits) */ 150#define CP210X_GET_BAUDDIV 0x02
145#define CP2101_BREAK 0x05 /* On / Off */ 151#define CP210X_SET_LINE_CTL 0x03
146#define CP2101_CONTROL 0x07 /* Flow control line states */ 152#define CP210X_GET_LINE_CTL 0x04
147#define CP2101_MODEMCTL 0x13 /* Modem controls */ 153#define CP210X_SET_BREAK 0x05
148#define CP2101_CONFIG_6 0x19 /* 6 bytes of config data ??? */ 154#define CP210X_IMM_CHAR 0x06
149 155#define CP210X_SET_MHS 0x07
150/* CP2101_UART */ 156#define CP210X_GET_MDMSTS 0x08
157#define CP210X_SET_XON 0x09
158#define CP210X_SET_XOFF 0x0A
159#define CP210X_SET_EVENTMASK 0x0B
160#define CP210X_GET_EVENTMASK 0x0C
161#define CP210X_SET_CHAR 0x0D
162#define CP210X_GET_CHARS 0x0E
163#define CP210X_GET_PROPS 0x0F
164#define CP210X_GET_COMM_STATUS 0x10
165#define CP210X_RESET 0x11
166#define CP210X_PURGE 0x12
167#define CP210X_SET_FLOW 0x13
168#define CP210X_GET_FLOW 0x14
169#define CP210X_EMBED_EVENTS 0x15
170#define CP210X_GET_EVENTSTATE 0x16
171#define CP210X_SET_CHARS 0x19
172
173/* CP210X_IFC_ENABLE */
151#define UART_ENABLE 0x0001 174#define UART_ENABLE 0x0001
152#define UART_DISABLE 0x0000 175#define UART_DISABLE 0x0000
153 176
154/* CP2101_BAUDRATE */ 177/* CP210X_(SET|GET)_BAUDDIV */
155#define BAUD_RATE_GEN_FREQ 0x384000 178#define BAUD_RATE_GEN_FREQ 0x384000
156 179
157/* CP2101_BITS */ 180/* CP210X_(SET|GET)_LINE_CTL */
158#define BITS_DATA_MASK 0X0f00 181#define BITS_DATA_MASK 0X0f00
159#define BITS_DATA_5 0X0500 182#define BITS_DATA_5 0X0500
160#define BITS_DATA_6 0X0600 183#define BITS_DATA_6 0X0600
@@ -174,11 +197,11 @@ static struct usb_serial_driver cp2101_device = {
174#define BITS_STOP_1_5 0x0001 197#define BITS_STOP_1_5 0x0001
175#define BITS_STOP_2 0x0002 198#define BITS_STOP_2 0x0002
176 199
177/* CP2101_BREAK */ 200/* CP210X_SET_BREAK */
178#define BREAK_ON 0x0000 201#define BREAK_ON 0x0000
179#define BREAK_OFF 0x0001 202#define BREAK_OFF 0x0001
180 203
181/* CP2101_CONTROL */ 204/* CP210X_(SET_MHS|GET_MDMSTS) */
182#define CONTROL_DTR 0x0001 205#define CONTROL_DTR 0x0001
183#define CONTROL_RTS 0x0002 206#define CONTROL_RTS 0x0002
184#define CONTROL_CTS 0x0010 207#define CONTROL_CTS 0x0010
@@ -189,13 +212,13 @@ static struct usb_serial_driver cp2101_device = {
189#define CONTROL_WRITE_RTS 0x0200 212#define CONTROL_WRITE_RTS 0x0200
190 213
191/* 214/*
192 * cp2101_get_config 215 * cp210x_get_config
193 * Reads from the CP2101 configuration registers 216 * Reads from the CP210x configuration registers
194 * 'size' is specified in bytes. 217 * 'size' is specified in bytes.
195 * 'data' is a pointer to a pre-allocated array of integers large 218 * 'data' is a pointer to a pre-allocated array of integers large
196 * enough to hold 'size' bytes (with 4 bytes to each integer) 219 * enough to hold 'size' bytes (with 4 bytes to each integer)
197 */ 220 */
198static int cp2101_get_config(struct usb_serial_port *port, u8 request, 221static int cp210x_get_config(struct usb_serial_port *port, u8 request,
199 unsigned int *data, int size) 222 unsigned int *data, int size)
200{ 223{
201 struct usb_serial *serial = port->serial; 224 struct usb_serial *serial = port->serial;
@@ -211,9 +234,6 @@ static int cp2101_get_config(struct usb_serial_port *port, u8 request,
211 return -ENOMEM; 234 return -ENOMEM;
212 } 235 }
213 236
214 /* For get requests, the request number must be incremented */
215 request++;
216
217 /* Issue the request, attempting to read 'size' bytes */ 237 /* Issue the request, attempting to read 'size' bytes */
218 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 238 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
219 request, REQTYPE_DEVICE_TO_HOST, 0x0000, 239 request, REQTYPE_DEVICE_TO_HOST, 0x0000,
@@ -236,12 +256,12 @@ static int cp2101_get_config(struct usb_serial_port *port, u8 request,
236} 256}
237 257
238/* 258/*
239 * cp2101_set_config 259 * cp210x_set_config
240 * Writes to the CP2101 configuration registers 260 * Writes to the CP210x configuration registers
241 * Values less than 16 bits wide are sent directly 261 * Values less than 16 bits wide are sent directly
242 * 'size' is specified in bytes. 262 * 'size' is specified in bytes.
243 */ 263 */
244static int cp2101_set_config(struct usb_serial_port *port, u8 request, 264static int cp210x_set_config(struct usb_serial_port *port, u8 request,
245 unsigned int *data, int size) 265 unsigned int *data, int size)
246{ 266{
247 struct usb_serial *serial = port->serial; 267 struct usb_serial *serial = port->serial;
@@ -292,21 +312,21 @@ static int cp2101_set_config(struct usb_serial_port *port, u8 request,
292} 312}
293 313
294/* 314/*
295 * cp2101_set_config_single 315 * cp210x_set_config_single
296 * Convenience function for calling cp2101_set_config on single data values 316 * Convenience function for calling cp210x_set_config on single data values
297 * without requiring an integer pointer 317 * without requiring an integer pointer
298 */ 318 */
299static inline int cp2101_set_config_single(struct usb_serial_port *port, 319static inline int cp210x_set_config_single(struct usb_serial_port *port,
300 u8 request, unsigned int data) 320 u8 request, unsigned int data)
301{ 321{
302 return cp2101_set_config(port, request, &data, 2); 322 return cp210x_set_config(port, request, &data, 2);
303} 323}
304 324
305/* 325/*
306 * cp2101_quantise_baudrate 326 * cp210x_quantise_baudrate
307 * Quantises the baud rate as per AN205 Table 1 327 * Quantises the baud rate as per AN205 Table 1
308 */ 328 */
309static unsigned int cp2101_quantise_baudrate(unsigned int baud) { 329static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
310 if (baud <= 56) baud = 0; 330 if (baud <= 56) baud = 0;
311 else if (baud <= 300) baud = 300; 331 else if (baud <= 300) baud = 300;
312 else if (baud <= 600) baud = 600; 332 else if (baud <= 600) baud = 600;
@@ -343,7 +363,7 @@ static unsigned int cp2101_quantise_baudrate(unsigned int baud) {
343 return baud; 363 return baud;
344} 364}
345 365
346static int cp2101_open(struct tty_struct *tty, struct usb_serial_port *port, 366static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port,
347 struct file *filp) 367 struct file *filp)
348{ 368{
349 struct usb_serial *serial = port->serial; 369 struct usb_serial *serial = port->serial;
@@ -351,7 +371,7 @@ static int cp2101_open(struct tty_struct *tty, struct usb_serial_port *port,
351 371
352 dbg("%s - port %d", __func__, port->number); 372 dbg("%s - port %d", __func__, port->number);
353 373
354 if (cp2101_set_config_single(port, CP2101_UART, UART_ENABLE)) { 374 if (cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_ENABLE)) {
355 dev_err(&port->dev, "%s - Unable to enable UART\n", 375 dev_err(&port->dev, "%s - Unable to enable UART\n",
356 __func__); 376 __func__);
357 return -EPROTO; 377 return -EPROTO;
@@ -373,17 +393,17 @@ static int cp2101_open(struct tty_struct *tty, struct usb_serial_port *port,
373 } 393 }
374 394
375 /* Configure the termios structure */ 395 /* Configure the termios structure */
376 cp2101_get_termios(tty, port); 396 cp210x_get_termios(tty, port);
377 397
378 /* Set the DTR and RTS pins low */ 398 /* Set the DTR and RTS pins low */
379 cp2101_tiocmset_port(tty ? (struct usb_serial_port *) tty->driver_data 399 cp210x_tiocmset_port(tty ? (struct usb_serial_port *) tty->driver_data
380 : port, 400 : port,
381 NULL, TIOCM_DTR | TIOCM_RTS, 0); 401 NULL, TIOCM_DTR | TIOCM_RTS, 0);
382 402
383 return 0; 403 return 0;
384} 404}
385 405
386static void cp2101_cleanup(struct usb_serial_port *port) 406static void cp210x_cleanup(struct usb_serial_port *port)
387{ 407{
388 struct usb_serial *serial = port->serial; 408 struct usb_serial *serial = port->serial;
389 409
@@ -398,8 +418,7 @@ static void cp2101_cleanup(struct usb_serial_port *port)
398 } 418 }
399} 419}
400 420
401static void cp2101_close(struct tty_struct *tty, struct usb_serial_port *port, 421static void cp210x_close(struct usb_serial_port *port)
402 struct file *filp)
403{ 422{
404 dbg("%s - port %d", __func__, port->number); 423 dbg("%s - port %d", __func__, port->number);
405 424
@@ -410,23 +429,23 @@ static void cp2101_close(struct tty_struct *tty, struct usb_serial_port *port,
410 429
411 mutex_lock(&port->serial->disc_mutex); 430 mutex_lock(&port->serial->disc_mutex);
412 if (!port->serial->disconnected) 431 if (!port->serial->disconnected)
413 cp2101_set_config_single(port, CP2101_UART, UART_DISABLE); 432 cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_DISABLE);
414 mutex_unlock(&port->serial->disc_mutex); 433 mutex_unlock(&port->serial->disc_mutex);
415} 434}
416 435
417/* 436/*
418 * cp2101_get_termios 437 * cp210x_get_termios
419 * Reads the baud rate, data bits, parity, stop bits and flow control mode 438 * Reads the baud rate, data bits, parity, stop bits and flow control mode
420 * from the device, corrects any unsupported values, and configures the 439 * from the device, corrects any unsupported values, and configures the
421 * termios structure to reflect the state of the device 440 * termios structure to reflect the state of the device
422 */ 441 */
423static void cp2101_get_termios(struct tty_struct *tty, 442static void cp210x_get_termios(struct tty_struct *tty,
424 struct usb_serial_port *port) 443 struct usb_serial_port *port)
425{ 444{
426 unsigned int baud; 445 unsigned int baud;
427 446
428 if (tty) { 447 if (tty) {
429 cp2101_get_termios_port(tty->driver_data, 448 cp210x_get_termios_port(tty->driver_data,
430 &tty->termios->c_cflag, &baud); 449 &tty->termios->c_cflag, &baud);
431 tty_encode_baud_rate(tty, baud, baud); 450 tty_encode_baud_rate(tty, baud, baud);
432 } 451 }
@@ -434,15 +453,15 @@ static void cp2101_get_termios(struct tty_struct *tty,
434 else { 453 else {
435 unsigned int cflag; 454 unsigned int cflag;
436 cflag = 0; 455 cflag = 0;
437 cp2101_get_termios_port(port, &cflag, &baud); 456 cp210x_get_termios_port(port, &cflag, &baud);
438 } 457 }
439} 458}
440 459
441/* 460/*
442 * cp2101_get_termios_port 461 * cp210x_get_termios_port
443 * This is the heart of cp2101_get_termios which always uses a &usb_serial_port. 462 * This is the heart of cp210x_get_termios which always uses a &usb_serial_port.
444 */ 463 */
445static void cp2101_get_termios_port(struct usb_serial_port *port, 464static void cp210x_get_termios_port(struct usb_serial_port *port,
446 unsigned int *cflagp, unsigned int *baudp) 465 unsigned int *cflagp, unsigned int *baudp)
447{ 466{
448 unsigned int cflag, modem_ctl[4]; 467 unsigned int cflag, modem_ctl[4];
@@ -451,17 +470,17 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
451 470
452 dbg("%s - port %d", __func__, port->number); 471 dbg("%s - port %d", __func__, port->number);
453 472
454 cp2101_get_config(port, CP2101_BAUDRATE, &baud, 2); 473 cp210x_get_config(port, CP210X_GET_BAUDDIV, &baud, 2);
455 /* Convert to baudrate */ 474 /* Convert to baudrate */
456 if (baud) 475 if (baud)
457 baud = cp2101_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud); 476 baud = cp210x_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud);
458 477
459 dbg("%s - baud rate = %d", __func__, baud); 478 dbg("%s - baud rate = %d", __func__, baud);
460 *baudp = baud; 479 *baudp = baud;
461 480
462 cflag = *cflagp; 481 cflag = *cflagp;
463 482
464 cp2101_get_config(port, CP2101_BITS, &bits, 2); 483 cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
465 cflag &= ~CSIZE; 484 cflag &= ~CSIZE;
466 switch (bits & BITS_DATA_MASK) { 485 switch (bits & BITS_DATA_MASK) {
467 case BITS_DATA_5: 486 case BITS_DATA_5:
@@ -486,14 +505,14 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
486 cflag |= CS8; 505 cflag |= CS8;
487 bits &= ~BITS_DATA_MASK; 506 bits &= ~BITS_DATA_MASK;
488 bits |= BITS_DATA_8; 507 bits |= BITS_DATA_8;
489 cp2101_set_config(port, CP2101_BITS, &bits, 2); 508 cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
490 break; 509 break;
491 default: 510 default:
492 dbg("%s - Unknown number of data bits, using 8", __func__); 511 dbg("%s - Unknown number of data bits, using 8", __func__);
493 cflag |= CS8; 512 cflag |= CS8;
494 bits &= ~BITS_DATA_MASK; 513 bits &= ~BITS_DATA_MASK;
495 bits |= BITS_DATA_8; 514 bits |= BITS_DATA_8;
496 cp2101_set_config(port, CP2101_BITS, &bits, 2); 515 cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
497 break; 516 break;
498 } 517 }
499 518
@@ -516,20 +535,20 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
516 __func__); 535 __func__);
517 cflag &= ~PARENB; 536 cflag &= ~PARENB;
518 bits &= ~BITS_PARITY_MASK; 537 bits &= ~BITS_PARITY_MASK;
519 cp2101_set_config(port, CP2101_BITS, &bits, 2); 538 cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
520 break; 539 break;
521 case BITS_PARITY_SPACE: 540 case BITS_PARITY_SPACE:
522 dbg("%s - parity = SPACE (not supported, disabling parity)", 541 dbg("%s - parity = SPACE (not supported, disabling parity)",
523 __func__); 542 __func__);
524 cflag &= ~PARENB; 543 cflag &= ~PARENB;
525 bits &= ~BITS_PARITY_MASK; 544 bits &= ~BITS_PARITY_MASK;
526 cp2101_set_config(port, CP2101_BITS, &bits, 2); 545 cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
527 break; 546 break;
528 default: 547 default:
529 dbg("%s - Unknown parity mode, disabling parity", __func__); 548 dbg("%s - Unknown parity mode, disabling parity", __func__);
530 cflag &= ~PARENB; 549 cflag &= ~PARENB;
531 bits &= ~BITS_PARITY_MASK; 550 bits &= ~BITS_PARITY_MASK;
532 cp2101_set_config(port, CP2101_BITS, &bits, 2); 551 cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
533 break; 552 break;
534 } 553 }
535 554
@@ -542,7 +561,7 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
542 dbg("%s - stop bits = 1.5 (not supported, using 1 stop bit)", 561 dbg("%s - stop bits = 1.5 (not supported, using 1 stop bit)",
543 __func__); 562 __func__);
544 bits &= ~BITS_STOP_MASK; 563 bits &= ~BITS_STOP_MASK;
545 cp2101_set_config(port, CP2101_BITS, &bits, 2); 564 cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
546 break; 565 break;
547 case BITS_STOP_2: 566 case BITS_STOP_2:
548 dbg("%s - stop bits = 2", __func__); 567 dbg("%s - stop bits = 2", __func__);
@@ -552,11 +571,11 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
552 dbg("%s - Unknown number of stop bits, using 1 stop bit", 571 dbg("%s - Unknown number of stop bits, using 1 stop bit",
553 __func__); 572 __func__);
554 bits &= ~BITS_STOP_MASK; 573 bits &= ~BITS_STOP_MASK;
555 cp2101_set_config(port, CP2101_BITS, &bits, 2); 574 cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
556 break; 575 break;
557 } 576 }
558 577
559 cp2101_get_config(port, CP2101_MODEMCTL, modem_ctl, 16); 578 cp210x_get_config(port, CP210X_GET_FLOW, modem_ctl, 16);
560 if (modem_ctl[0] & 0x0008) { 579 if (modem_ctl[0] & 0x0008) {
561 dbg("%s - flow control = CRTSCTS", __func__); 580 dbg("%s - flow control = CRTSCTS", __func__);
562 cflag |= CRTSCTS; 581 cflag |= CRTSCTS;
@@ -568,7 +587,7 @@ static void cp2101_get_termios_port(struct usb_serial_port *port,
568 *cflagp = cflag; 587 *cflagp = cflag;
569} 588}
570 589
571static void cp2101_set_termios(struct tty_struct *tty, 590static void cp210x_set_termios(struct tty_struct *tty,
572 struct usb_serial_port *port, struct ktermios *old_termios) 591 struct usb_serial_port *port, struct ktermios *old_termios)
573{ 592{
574 unsigned int cflag, old_cflag; 593 unsigned int cflag, old_cflag;
@@ -583,13 +602,13 @@ static void cp2101_set_termios(struct tty_struct *tty,
583 tty->termios->c_cflag &= ~CMSPAR; 602 tty->termios->c_cflag &= ~CMSPAR;
584 cflag = tty->termios->c_cflag; 603 cflag = tty->termios->c_cflag;
585 old_cflag = old_termios->c_cflag; 604 old_cflag = old_termios->c_cflag;
586 baud = cp2101_quantise_baudrate(tty_get_baud_rate(tty)); 605 baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty));
587 606
588 /* If the baud rate is to be updated*/ 607 /* If the baud rate is to be updated*/
589 if (baud != tty_termios_baud_rate(old_termios) && baud != 0) { 608 if (baud != tty_termios_baud_rate(old_termios) && baud != 0) {
590 dbg("%s - Setting baud rate to %d baud", __func__, 609 dbg("%s - Setting baud rate to %d baud", __func__,
591 baud); 610 baud);
592 if (cp2101_set_config_single(port, CP2101_BAUDRATE, 611 if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
593 ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) { 612 ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
594 dbg("Baud rate requested not supported by device\n"); 613 dbg("Baud rate requested not supported by device\n");
595 baud = tty_termios_baud_rate(old_termios); 614 baud = tty_termios_baud_rate(old_termios);
@@ -600,7 +619,7 @@ static void cp2101_set_termios(struct tty_struct *tty,
600 619
601 /* If the number of data bits is to be updated */ 620 /* If the number of data bits is to be updated */
602 if ((cflag & CSIZE) != (old_cflag & CSIZE)) { 621 if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
603 cp2101_get_config(port, CP2101_BITS, &bits, 2); 622 cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
604 bits &= ~BITS_DATA_MASK; 623 bits &= ~BITS_DATA_MASK;
605 switch (cflag & CSIZE) { 624 switch (cflag & CSIZE) {
606 case CS5: 625 case CS5:
@@ -624,19 +643,19 @@ static void cp2101_set_termios(struct tty_struct *tty,
624 dbg("%s - data bits = 9", __func__); 643 dbg("%s - data bits = 9", __func__);
625 break;*/ 644 break;*/
626 default: 645 default:
627 dbg("cp2101 driver does not " 646 dbg("cp210x driver does not "
628 "support the number of bits requested," 647 "support the number of bits requested,"
629 " using 8 bit mode\n"); 648 " using 8 bit mode\n");
630 bits |= BITS_DATA_8; 649 bits |= BITS_DATA_8;
631 break; 650 break;
632 } 651 }
633 if (cp2101_set_config(port, CP2101_BITS, &bits, 2)) 652 if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
634 dbg("Number of data bits requested " 653 dbg("Number of data bits requested "
635 "not supported by device\n"); 654 "not supported by device\n");
636 } 655 }
637 656
638 if ((cflag & (PARENB|PARODD)) != (old_cflag & (PARENB|PARODD))) { 657 if ((cflag & (PARENB|PARODD)) != (old_cflag & (PARENB|PARODD))) {
639 cp2101_get_config(port, CP2101_BITS, &bits, 2); 658 cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
640 bits &= ~BITS_PARITY_MASK; 659 bits &= ~BITS_PARITY_MASK;
641 if (cflag & PARENB) { 660 if (cflag & PARENB) {
642 if (cflag & PARODD) { 661 if (cflag & PARODD) {
@@ -647,13 +666,13 @@ static void cp2101_set_termios(struct tty_struct *tty,
647 dbg("%s - parity = EVEN", __func__); 666 dbg("%s - parity = EVEN", __func__);
648 } 667 }
649 } 668 }
650 if (cp2101_set_config(port, CP2101_BITS, &bits, 2)) 669 if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
651 dbg("Parity mode not supported " 670 dbg("Parity mode not supported "
652 "by device\n"); 671 "by device\n");
653 } 672 }
654 673
655 if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) { 674 if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) {
656 cp2101_get_config(port, CP2101_BITS, &bits, 2); 675 cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
657 bits &= ~BITS_STOP_MASK; 676 bits &= ~BITS_STOP_MASK;
658 if (cflag & CSTOPB) { 677 if (cflag & CSTOPB) {
659 bits |= BITS_STOP_2; 678 bits |= BITS_STOP_2;
@@ -662,13 +681,13 @@ static void cp2101_set_termios(struct tty_struct *tty,
662 bits |= BITS_STOP_1; 681 bits |= BITS_STOP_1;
663 dbg("%s - stop bits = 1", __func__); 682 dbg("%s - stop bits = 1", __func__);
664 } 683 }
665 if (cp2101_set_config(port, CP2101_BITS, &bits, 2)) 684 if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
666 dbg("Number of stop bits requested " 685 dbg("Number of stop bits requested "
667 "not supported by device\n"); 686 "not supported by device\n");
668 } 687 }
669 688
670 if ((cflag & CRTSCTS) != (old_cflag & CRTSCTS)) { 689 if ((cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
671 cp2101_get_config(port, CP2101_MODEMCTL, modem_ctl, 16); 690 cp210x_get_config(port, CP210X_GET_FLOW, modem_ctl, 16);
672 dbg("%s - read modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x", 691 dbg("%s - read modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x",
673 __func__, modem_ctl[0], modem_ctl[1], 692 __func__, modem_ctl[0], modem_ctl[1],
674 modem_ctl[2], modem_ctl[3]); 693 modem_ctl[2], modem_ctl[3]);
@@ -688,19 +707,19 @@ static void cp2101_set_termios(struct tty_struct *tty,
688 dbg("%s - write modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x", 707 dbg("%s - write modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x",
689 __func__, modem_ctl[0], modem_ctl[1], 708 __func__, modem_ctl[0], modem_ctl[1],
690 modem_ctl[2], modem_ctl[3]); 709 modem_ctl[2], modem_ctl[3]);
691 cp2101_set_config(port, CP2101_MODEMCTL, modem_ctl, 16); 710 cp210x_set_config(port, CP210X_SET_FLOW, modem_ctl, 16);
692 } 711 }
693 712
694} 713}
695 714
696static int cp2101_tiocmset (struct tty_struct *tty, struct file *file, 715static int cp210x_tiocmset (struct tty_struct *tty, struct file *file,
697 unsigned int set, unsigned int clear) 716 unsigned int set, unsigned int clear)
698{ 717{
699 struct usb_serial_port *port = tty->driver_data; 718 struct usb_serial_port *port = tty->driver_data;
700 return cp2101_tiocmset_port(port, file, set, clear); 719 return cp210x_tiocmset_port(port, file, set, clear);
701} 720}
702 721
703static int cp2101_tiocmset_port(struct usb_serial_port *port, struct file *file, 722static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *file,
704 unsigned int set, unsigned int clear) 723 unsigned int set, unsigned int clear)
705{ 724{
706 unsigned int control = 0; 725 unsigned int control = 0;
@@ -726,10 +745,10 @@ static int cp2101_tiocmset_port(struct usb_serial_port *port, struct file *file,
726 745
727 dbg("%s - control = 0x%.4x", __func__, control); 746 dbg("%s - control = 0x%.4x", __func__, control);
728 747
729 return cp2101_set_config(port, CP2101_CONTROL, &control, 2); 748 return cp210x_set_config(port, CP210X_SET_MHS, &control, 2);
730} 749}
731 750
732static int cp2101_tiocmget (struct tty_struct *tty, struct file *file) 751static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
733{ 752{
734 struct usb_serial_port *port = tty->driver_data; 753 struct usb_serial_port *port = tty->driver_data;
735 unsigned int control; 754 unsigned int control;
@@ -737,7 +756,7 @@ static int cp2101_tiocmget (struct tty_struct *tty, struct file *file)
737 756
738 dbg("%s - port %d", __func__, port->number); 757 dbg("%s - port %d", __func__, port->number);
739 758
740 cp2101_get_config(port, CP2101_CONTROL, &control, 1); 759 cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
741 760
742 result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0) 761 result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
743 |((control & CONTROL_RTS) ? TIOCM_RTS : 0) 762 |((control & CONTROL_RTS) ? TIOCM_RTS : 0)
@@ -751,7 +770,7 @@ static int cp2101_tiocmget (struct tty_struct *tty, struct file *file)
751 return result; 770 return result;
752} 771}
753 772
754static void cp2101_break_ctl (struct tty_struct *tty, int break_state) 773static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
755{ 774{
756 struct usb_serial_port *port = tty->driver_data; 775 struct usb_serial_port *port = tty->driver_data;
757 unsigned int state; 776 unsigned int state;
@@ -763,17 +782,17 @@ static void cp2101_break_ctl (struct tty_struct *tty, int break_state)
763 state = BREAK_ON; 782 state = BREAK_ON;
764 dbg("%s - turning break %s", __func__, 783 dbg("%s - turning break %s", __func__,
765 state == BREAK_OFF ? "off" : "on"); 784 state == BREAK_OFF ? "off" : "on");
766 cp2101_set_config(port, CP2101_BREAK, &state, 2); 785 cp210x_set_config(port, CP210X_SET_BREAK, &state, 2);
767} 786}
768 787
769static int cp2101_startup(struct usb_serial *serial) 788static int cp210x_startup(struct usb_serial *serial)
770{ 789{
771 /* CP2101 buffers behave strangely unless device is reset */ 790 /* cp210x buffers behave strangely unless device is reset */
772 usb_reset_device(serial->dev); 791 usb_reset_device(serial->dev);
773 return 0; 792 return 0;
774} 793}
775 794
776static void cp2101_shutdown(struct usb_serial *serial) 795static void cp210x_shutdown(struct usb_serial *serial)
777{ 796{
778 int i; 797 int i;
779 798
@@ -781,21 +800,21 @@ static void cp2101_shutdown(struct usb_serial *serial)
781 800
782 /* Stop reads and writes on all ports */ 801 /* Stop reads and writes on all ports */
783 for (i = 0; i < serial->num_ports; ++i) 802 for (i = 0; i < serial->num_ports; ++i)
784 cp2101_cleanup(serial->port[i]); 803 cp210x_cleanup(serial->port[i]);
785} 804}
786 805
787static int __init cp2101_init(void) 806static int __init cp210x_init(void)
788{ 807{
789 int retval; 808 int retval;
790 809
791 retval = usb_serial_register(&cp2101_device); 810 retval = usb_serial_register(&cp210x_device);
792 if (retval) 811 if (retval)
793 return retval; /* Failed to register */ 812 return retval; /* Failed to register */
794 813
795 retval = usb_register(&cp2101_driver); 814 retval = usb_register(&cp210x_driver);
796 if (retval) { 815 if (retval) {
797 /* Failed to register */ 816 /* Failed to register */
798 usb_serial_deregister(&cp2101_device); 817 usb_serial_deregister(&cp210x_device);
799 return retval; 818 return retval;
800 } 819 }
801 820
@@ -805,14 +824,14 @@ static int __init cp2101_init(void)
805 return 0; 824 return 0;
806} 825}
807 826
808static void __exit cp2101_exit(void) 827static void __exit cp210x_exit(void)
809{ 828{
810 usb_deregister(&cp2101_driver); 829 usb_deregister(&cp210x_driver);
811 usb_serial_deregister(&cp2101_device); 830 usb_serial_deregister(&cp210x_device);
812} 831}
813 832
814module_init(cp2101_init); 833module_init(cp210x_init);
815module_exit(cp2101_exit); 834module_exit(cp210x_exit);
816 835
817MODULE_DESCRIPTION(DRIVER_DESC); 836MODULE_DESCRIPTION(DRIVER_DESC);
818MODULE_VERSION(DRIVER_VERSION); 837MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index dd501bb63ed..933ba913e66 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -61,8 +61,7 @@ static int cyberjack_startup(struct usb_serial *serial);
61static void cyberjack_shutdown(struct usb_serial *serial); 61static void cyberjack_shutdown(struct usb_serial *serial);
62static int cyberjack_open(struct tty_struct *tty, 62static int cyberjack_open(struct tty_struct *tty,
63 struct usb_serial_port *port, struct file *filp); 63 struct usb_serial_port *port, struct file *filp);
64static void cyberjack_close(struct tty_struct *tty, 64static void cyberjack_close(struct usb_serial_port *port);
65 struct usb_serial_port *port, struct file *filp);
66static int cyberjack_write(struct tty_struct *tty, 65static int cyberjack_write(struct tty_struct *tty,
67 struct usb_serial_port *port, const unsigned char *buf, int count); 66 struct usb_serial_port *port, const unsigned char *buf, int count);
68static int cyberjack_write_room(struct tty_struct *tty); 67static int cyberjack_write_room(struct tty_struct *tty);
@@ -185,8 +184,7 @@ static int cyberjack_open(struct tty_struct *tty,
185 return result; 184 return result;
186} 185}
187 186
188static void cyberjack_close(struct tty_struct *tty, 187static void cyberjack_close(struct usb_serial_port *port)
189 struct usb_serial_port *port, struct file *filp)
190{ 188{
191 dbg("%s - port %d", __func__, port->number); 189 dbg("%s - port %d", __func__, port->number);
192 190
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index e568710b263..669f9384853 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -174,8 +174,8 @@ static int cypress_ca42v2_startup(struct usb_serial *serial);
174static void cypress_shutdown(struct usb_serial *serial); 174static void cypress_shutdown(struct usb_serial *serial);
175static int cypress_open(struct tty_struct *tty, 175static int cypress_open(struct tty_struct *tty,
176 struct usb_serial_port *port, struct file *filp); 176 struct usb_serial_port *port, struct file *filp);
177static void cypress_close(struct tty_struct *tty, 177static void cypress_close(struct usb_serial_port *port);
178 struct usb_serial_port *port, struct file *filp); 178static void cypress_dtr_rts(struct usb_serial_port *port, int on);
179static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port, 179static int cypress_write(struct tty_struct *tty, struct usb_serial_port *port,
180 const unsigned char *buf, int count); 180 const unsigned char *buf, int count);
181static void cypress_send(struct usb_serial_port *port); 181static void cypress_send(struct usb_serial_port *port);
@@ -218,6 +218,7 @@ static struct usb_serial_driver cypress_earthmate_device = {
218 .shutdown = cypress_shutdown, 218 .shutdown = cypress_shutdown,
219 .open = cypress_open, 219 .open = cypress_open,
220 .close = cypress_close, 220 .close = cypress_close,
221 .dtr_rts = cypress_dtr_rts,
221 .write = cypress_write, 222 .write = cypress_write,
222 .write_room = cypress_write_room, 223 .write_room = cypress_write_room,
223 .ioctl = cypress_ioctl, 224 .ioctl = cypress_ioctl,
@@ -244,6 +245,7 @@ static struct usb_serial_driver cypress_hidcom_device = {
244 .shutdown = cypress_shutdown, 245 .shutdown = cypress_shutdown,
245 .open = cypress_open, 246 .open = cypress_open,
246 .close = cypress_close, 247 .close = cypress_close,
248 .dtr_rts = cypress_dtr_rts,
247 .write = cypress_write, 249 .write = cypress_write,
248 .write_room = cypress_write_room, 250 .write_room = cypress_write_room,
249 .ioctl = cypress_ioctl, 251 .ioctl = cypress_ioctl,
@@ -270,6 +272,7 @@ static struct usb_serial_driver cypress_ca42v2_device = {
270 .shutdown = cypress_shutdown, 272 .shutdown = cypress_shutdown,
271 .open = cypress_open, 273 .open = cypress_open,
272 .close = cypress_close, 274 .close = cypress_close,
275 .dtr_rts = cypress_dtr_rts,
273 .write = cypress_write, 276 .write = cypress_write,
274 .write_room = cypress_write_room, 277 .write_room = cypress_write_room,
275 .ioctl = cypress_ioctl, 278 .ioctl = cypress_ioctl,
@@ -656,11 +659,7 @@ static int cypress_open(struct tty_struct *tty,
656 priv->rx_flags = 0; 659 priv->rx_flags = 0;
657 spin_unlock_irqrestore(&priv->lock, flags); 660 spin_unlock_irqrestore(&priv->lock, flags);
658 661
659 /* raise both lines and set termios */ 662 /* Set termios */
660 spin_lock_irqsave(&priv->lock, flags);
661 priv->line_control = CONTROL_DTR | CONTROL_RTS;
662 priv->cmd_ctrl = 1;
663 spin_unlock_irqrestore(&priv->lock, flags);
664 result = cypress_write(tty, port, NULL, 0); 663 result = cypress_write(tty, port, NULL, 0);
665 664
666 if (result) { 665 if (result) {
@@ -694,76 +693,42 @@ static int cypress_open(struct tty_struct *tty,
694 __func__, result); 693 __func__, result);
695 cypress_set_dead(port); 694 cypress_set_dead(port);
696 } 695 }
697 696 port->port.drain_delay = 256;
698 return result; 697 return result;
699} /* cypress_open */ 698} /* cypress_open */
700 699
700static void cypress_dtr_rts(struct usb_serial_port *port, int on)
701{
702 struct cypress_private *priv = usb_get_serial_port_data(port);
703 /* drop dtr and rts */
704 priv = usb_get_serial_port_data(port);
705 spin_lock_irq(&priv->lock);
706 if (on == 0)
707 priv->line_control = 0;
708 else
709 priv->line_control = CONTROL_DTR | CONTROL_RTS;
710 priv->cmd_ctrl = 1;
711 spin_unlock_irq(&priv->lock);
712 cypress_write(NULL, port, NULL, 0);
713}
701 714
702static void cypress_close(struct tty_struct *tty, 715static void cypress_close(struct usb_serial_port *port)
703 struct usb_serial_port *port, struct file *filp)
704{ 716{
705 struct cypress_private *priv = usb_get_serial_port_data(port); 717 struct cypress_private *priv = usb_get_serial_port_data(port);
706 unsigned int c_cflag;
707 int bps;
708 long timeout;
709 wait_queue_t wait;
710 718
711 dbg("%s - port %d", __func__, port->number); 719 dbg("%s - port %d", __func__, port->number);
712 720
713 /* wait for data to drain from buffer */
714 spin_lock_irq(&priv->lock);
715 timeout = CYPRESS_CLOSING_WAIT;
716 init_waitqueue_entry(&wait, current);
717 add_wait_queue(&tty->write_wait, &wait);
718 for (;;) {
719 set_current_state(TASK_INTERRUPTIBLE);
720 if (cypress_buf_data_avail(priv->buf) == 0
721 || timeout == 0 || signal_pending(current)
722 /* without mutex, allowed due to harmless failure mode */
723 || port->serial->disconnected)
724 break;
725 spin_unlock_irq(&priv->lock);
726 timeout = schedule_timeout(timeout);
727 spin_lock_irq(&priv->lock);
728 }
729 set_current_state(TASK_RUNNING);
730 remove_wait_queue(&tty->write_wait, &wait);
731 /* clear out any remaining data in the buffer */
732 cypress_buf_clear(priv->buf);
733 spin_unlock_irq(&priv->lock);
734
735 /* writing is potentially harmful, lock must be taken */ 721 /* writing is potentially harmful, lock must be taken */
736 mutex_lock(&port->serial->disc_mutex); 722 mutex_lock(&port->serial->disc_mutex);
737 if (port->serial->disconnected) { 723 if (port->serial->disconnected) {
738 mutex_unlock(&port->serial->disc_mutex); 724 mutex_unlock(&port->serial->disc_mutex);
739 return; 725 return;
740 } 726 }
741 /* wait for characters to drain from device */ 727 cypress_buf_clear(priv->buf);
742 if (tty) {
743 bps = tty_get_baud_rate(tty);
744 if (bps > 1200)
745 timeout = max((HZ * 2560) / bps, HZ / 10);
746 else
747 timeout = 2 * HZ;
748 schedule_timeout_interruptible(timeout);
749 }
750
751 dbg("%s - stopping urbs", __func__); 728 dbg("%s - stopping urbs", __func__);
752 usb_kill_urb(port->interrupt_in_urb); 729 usb_kill_urb(port->interrupt_in_urb);
753 usb_kill_urb(port->interrupt_out_urb); 730 usb_kill_urb(port->interrupt_out_urb);
754 731
755 if (tty) {
756 c_cflag = tty->termios->c_cflag;
757 if (c_cflag & HUPCL) {
758 /* drop dtr and rts */
759 priv = usb_get_serial_port_data(port);
760 spin_lock_irq(&priv->lock);
761 priv->line_control = 0;
762 priv->cmd_ctrl = 1;
763 spin_unlock_irq(&priv->lock);
764 cypress_write(tty, port, NULL, 0);
765 }
766 }
767 732
768 if (stats) 733 if (stats)
769 dev_info(&port->dev, "Statistics: %d Bytes In | %d Bytes Out | %d Commands Issued\n", 734 dev_info(&port->dev, "Statistics: %d Bytes In | %d Bytes Out | %d Commands Issued\n",
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 38ba4ea8b6b..30f5140eff0 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -422,7 +422,6 @@ struct digi_port {
422 int dp_throttled; 422 int dp_throttled;
423 int dp_throttle_restart; 423 int dp_throttle_restart;
424 wait_queue_head_t dp_flush_wait; 424 wait_queue_head_t dp_flush_wait;
425 int dp_in_close; /* close in progress */
426 wait_queue_head_t dp_close_wait; /* wait queue for close */ 425 wait_queue_head_t dp_close_wait; /* wait queue for close */
427 struct work_struct dp_wakeup_work; 426 struct work_struct dp_wakeup_work;
428 struct usb_serial_port *dp_port; 427 struct usb_serial_port *dp_port;
@@ -456,8 +455,9 @@ static int digi_write_room(struct tty_struct *tty);
456static int digi_chars_in_buffer(struct tty_struct *tty); 455static int digi_chars_in_buffer(struct tty_struct *tty);
457static int digi_open(struct tty_struct *tty, struct usb_serial_port *port, 456static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
458 struct file *filp); 457 struct file *filp);
459static void digi_close(struct tty_struct *tty, struct usb_serial_port *port, 458static void digi_close(struct usb_serial_port *port);
460 struct file *filp); 459static int digi_carrier_raised(struct usb_serial_port *port);
460static void digi_dtr_rts(struct usb_serial_port *port, int on);
461static int digi_startup_device(struct usb_serial *serial); 461static int digi_startup_device(struct usb_serial *serial);
462static int digi_startup(struct usb_serial *serial); 462static int digi_startup(struct usb_serial *serial);
463static void digi_shutdown(struct usb_serial *serial); 463static void digi_shutdown(struct usb_serial *serial);
@@ -510,6 +510,8 @@ static struct usb_serial_driver digi_acceleport_2_device = {
510 .num_ports = 3, 510 .num_ports = 3,
511 .open = digi_open, 511 .open = digi_open,
512 .close = digi_close, 512 .close = digi_close,
513 .dtr_rts = digi_dtr_rts,
514 .carrier_raised = digi_carrier_raised,
513 .write = digi_write, 515 .write = digi_write,
514 .write_room = digi_write_room, 516 .write_room = digi_write_room,
515 .write_bulk_callback = digi_write_bulk_callback, 517 .write_bulk_callback = digi_write_bulk_callback,
@@ -1328,6 +1330,19 @@ static int digi_chars_in_buffer(struct tty_struct *tty)
1328 1330
1329} 1331}
1330 1332
1333static void digi_dtr_rts(struct usb_serial_port *port, int on)
1334{
1335 /* Adjust DTR and RTS */
1336 digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
1337}
1338
1339static int digi_carrier_raised(struct usb_serial_port *port)
1340{
1341 struct digi_port *priv = usb_get_serial_port_data(port);
1342 if (priv->dp_modem_signals & TIOCM_CD)
1343 return 1;
1344 return 0;
1345}
1331 1346
1332static int digi_open(struct tty_struct *tty, struct usb_serial_port *port, 1347static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
1333 struct file *filp) 1348 struct file *filp)
@@ -1336,7 +1351,6 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
1336 unsigned char buf[32]; 1351 unsigned char buf[32];
1337 struct digi_port *priv = usb_get_serial_port_data(port); 1352 struct digi_port *priv = usb_get_serial_port_data(port);
1338 struct ktermios not_termios; 1353 struct ktermios not_termios;
1339 unsigned long flags = 0;
1340 1354
1341 dbg("digi_open: TOP: port=%d, open_count=%d", 1355 dbg("digi_open: TOP: port=%d, open_count=%d",
1342 priv->dp_port_num, port->port.count); 1356 priv->dp_port_num, port->port.count);
@@ -1345,26 +1359,6 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
1345 if (digi_startup_device(port->serial) != 0) 1359 if (digi_startup_device(port->serial) != 0)
1346 return -ENXIO; 1360 return -ENXIO;
1347 1361
1348 spin_lock_irqsave(&priv->dp_port_lock, flags);
1349
1350 /* don't wait on a close in progress for non-blocking opens */
1351 if (priv->dp_in_close && (filp->f_flags&(O_NDELAY|O_NONBLOCK)) == 0) {
1352 spin_unlock_irqrestore(&priv->dp_port_lock, flags);
1353 return -EAGAIN;
1354 }
1355
1356 /* wait for a close in progress to finish */
1357 while (priv->dp_in_close) {
1358 cond_wait_interruptible_timeout_irqrestore(
1359 &priv->dp_close_wait, DIGI_RETRY_TIMEOUT,
1360 &priv->dp_port_lock, flags);
1361 if (signal_pending(current))
1362 return -EINTR;
1363 spin_lock_irqsave(&priv->dp_port_lock, flags);
1364 }
1365
1366 spin_unlock_irqrestore(&priv->dp_port_lock, flags);
1367
1368 /* read modem signals automatically whenever they change */ 1362 /* read modem signals automatically whenever they change */
1369 buf[0] = DIGI_CMD_READ_INPUT_SIGNALS; 1363 buf[0] = DIGI_CMD_READ_INPUT_SIGNALS;
1370 buf[1] = priv->dp_port_num; 1364 buf[1] = priv->dp_port_num;
@@ -1387,16 +1381,11 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port,
1387 not_termios.c_iflag = ~tty->termios->c_iflag; 1381 not_termios.c_iflag = ~tty->termios->c_iflag;
1388 digi_set_termios(tty, port, &not_termios); 1382 digi_set_termios(tty, port, &not_termios);
1389 } 1383 }
1390
1391 /* set DTR and RTS */
1392 digi_set_modem_signals(port, TIOCM_DTR|TIOCM_RTS, 1);
1393
1394 return 0; 1384 return 0;
1395} 1385}
1396 1386
1397 1387
1398static void digi_close(struct tty_struct *tty, struct usb_serial_port *port, 1388static void digi_close(struct usb_serial_port *port)
1399 struct file *filp)
1400{ 1389{
1401 DEFINE_WAIT(wait); 1390 DEFINE_WAIT(wait);
1402 int ret; 1391 int ret;
@@ -1411,28 +1400,9 @@ static void digi_close(struct tty_struct *tty, struct usb_serial_port *port,
1411 if (port->serial->disconnected) 1400 if (port->serial->disconnected)
1412 goto exit; 1401 goto exit;
1413 1402
1414 /* do cleanup only after final close on this port */
1415 spin_lock_irq(&priv->dp_port_lock);
1416 priv->dp_in_close = 1;
1417 spin_unlock_irq(&priv->dp_port_lock);
1418
1419 /* tell line discipline to process only XON/XOFF */
1420 tty->closing = 1;
1421
1422 /* wait for output to drain */
1423 if ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) == 0)
1424 tty_wait_until_sent(tty, DIGI_CLOSE_TIMEOUT);
1425
1426 /* flush driver and line discipline buffers */
1427 tty_driver_flush_buffer(tty);
1428 tty_ldisc_flush(tty);
1429
1430 if (port->serial->dev) { 1403 if (port->serial->dev) {
1431 /* wait for transmit idle */ 1404 /* FIXME: Transmit idle belongs in the wait_unti_sent path */
1432 if ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) == 0) 1405 digi_transmit_idle(port, DIGI_CLOSE_TIMEOUT);
1433 digi_transmit_idle(port, DIGI_CLOSE_TIMEOUT);
1434 /* drop DTR and RTS */
1435 digi_set_modem_signals(port, 0, 0);
1436 1406
1437 /* disable input flow control */ 1407 /* disable input flow control */
1438 buf[0] = DIGI_CMD_SET_INPUT_FLOW_CONTROL; 1408 buf[0] = DIGI_CMD_SET_INPUT_FLOW_CONTROL;
@@ -1477,11 +1447,9 @@ static void digi_close(struct tty_struct *tty, struct usb_serial_port *port,
1477 /* shutdown any outstanding bulk writes */ 1447 /* shutdown any outstanding bulk writes */
1478 usb_kill_urb(port->write_urb); 1448 usb_kill_urb(port->write_urb);
1479 } 1449 }
1480 tty->closing = 0;
1481exit: 1450exit:
1482 spin_lock_irq(&priv->dp_port_lock); 1451 spin_lock_irq(&priv->dp_port_lock);
1483 priv->dp_write_urb_in_use = 0; 1452 priv->dp_write_urb_in_use = 0;
1484 priv->dp_in_close = 0;
1485 wake_up_interruptible(&priv->dp_close_wait); 1453 wake_up_interruptible(&priv->dp_close_wait);
1486 spin_unlock_irq(&priv->dp_port_lock); 1454 spin_unlock_irq(&priv->dp_port_lock);
1487 mutex_unlock(&port->serial->disc_mutex); 1455 mutex_unlock(&port->serial->disc_mutex);
@@ -1560,7 +1528,6 @@ static int digi_startup(struct usb_serial *serial)
1560 priv->dp_throttled = 0; 1528 priv->dp_throttled = 0;
1561 priv->dp_throttle_restart = 0; 1529 priv->dp_throttle_restart = 0;
1562 init_waitqueue_head(&priv->dp_flush_wait); 1530 init_waitqueue_head(&priv->dp_flush_wait);
1563 priv->dp_in_close = 0;
1564 init_waitqueue_head(&priv->dp_close_wait); 1531 init_waitqueue_head(&priv->dp_close_wait);
1565 INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock); 1532 INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
1566 priv->dp_port = serial->port[i]; 1533 priv->dp_port = serial->port[i];
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index c709ec474a8..2b141ccb0cd 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -81,8 +81,7 @@ static int debug;
81/* function prototypes for an empeg-car player */ 81/* function prototypes for an empeg-car player */
82static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port, 82static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port,
83 struct file *filp); 83 struct file *filp);
84static void empeg_close(struct tty_struct *tty, struct usb_serial_port *port, 84static void empeg_close(struct usb_serial_port *port);
85 struct file *filp);
86static int empeg_write(struct tty_struct *tty, struct usb_serial_port *port, 85static int empeg_write(struct tty_struct *tty, struct usb_serial_port *port,
87 const unsigned char *buf, 86 const unsigned char *buf,
88 int count); 87 int count);
@@ -181,8 +180,7 @@ static int empeg_open(struct tty_struct *tty, struct usb_serial_port *port,
181} 180}
182 181
183 182
184static void empeg_close(struct tty_struct *tty, struct usb_serial_port *port, 183static void empeg_close(struct usb_serial_port *port)
185 struct file *filp)
186{ 184{
187 dbg("%s - port %d", __func__, port->number); 185 dbg("%s - port %d", __func__, port->number);
188 186
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0ab8474b00c..683304d6061 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -89,6 +89,7 @@ struct ftdi_private {
89 int force_rtscts; /* if non-zero, force RTS-CTS to always 89 int force_rtscts; /* if non-zero, force RTS-CTS to always
90 be enabled */ 90 be enabled */
91 91
92 unsigned int latency; /* latency setting in use */
92 spinlock_t tx_lock; /* spinlock for transmit state */ 93 spinlock_t tx_lock; /* spinlock for transmit state */
93 unsigned long tx_bytes; 94 unsigned long tx_bytes;
94 unsigned long tx_outstanding_bytes; 95 unsigned long tx_outstanding_bytes;
@@ -719,8 +720,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port);
719static int ftdi_sio_port_remove(struct usb_serial_port *port); 720static int ftdi_sio_port_remove(struct usb_serial_port *port);
720static int ftdi_open(struct tty_struct *tty, 721static int ftdi_open(struct tty_struct *tty,
721 struct usb_serial_port *port, struct file *filp); 722 struct usb_serial_port *port, struct file *filp);
722static void ftdi_close(struct tty_struct *tty, 723static void ftdi_close(struct usb_serial_port *port);
723 struct usb_serial_port *port, struct file *filp); 724static void ftdi_dtr_rts(struct usb_serial_port *port, int on);
724static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port, 725static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
725 const unsigned char *buf, int count); 726 const unsigned char *buf, int count);
726static int ftdi_write_room(struct tty_struct *tty); 727static int ftdi_write_room(struct tty_struct *tty);
@@ -758,6 +759,7 @@ static struct usb_serial_driver ftdi_sio_device = {
758 .port_remove = ftdi_sio_port_remove, 759 .port_remove = ftdi_sio_port_remove,
759 .open = ftdi_open, 760 .open = ftdi_open,
760 .close = ftdi_close, 761 .close = ftdi_close,
762 .dtr_rts = ftdi_dtr_rts,
761 .throttle = ftdi_throttle, 763 .throttle = ftdi_throttle,
762 .unthrottle = ftdi_unthrottle, 764 .unthrottle = ftdi_unthrottle,
763 .write = ftdi_write, 765 .write = ftdi_write,
@@ -1037,7 +1039,54 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
1037 return rv; 1039 return rv;
1038} 1040}
1039 1041
1042static int write_latency_timer(struct usb_serial_port *port)
1043{
1044 struct ftdi_private *priv = usb_get_serial_port_data(port);
1045 struct usb_device *udev = port->serial->dev;
1046 char buf[1];
1047 int rv = 0;
1048 int l = priv->latency;
1049
1050 if (priv->flags & ASYNC_LOW_LATENCY)
1051 l = 1;
1052
1053 dbg("%s: setting latency timer = %i", __func__, l);
1054
1055 rv = usb_control_msg(udev,
1056 usb_sndctrlpipe(udev, 0),
1057 FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
1058 FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
1059 l, priv->interface,
1060 buf, 0, WDR_TIMEOUT);
1061
1062 if (rv < 0)
1063 dev_err(&port->dev, "Unable to write latency timer: %i\n", rv);
1064 return rv;
1065}
1066
1067static int read_latency_timer(struct usb_serial_port *port)
1068{
1069 struct ftdi_private *priv = usb_get_serial_port_data(port);
1070 struct usb_device *udev = port->serial->dev;
1071 unsigned short latency = 0;
1072 int rv = 0;
1073
1074
1075 dbg("%s", __func__);
1040 1076
1077 rv = usb_control_msg(udev,
1078 usb_rcvctrlpipe(udev, 0),
1079 FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
1080 FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
1081 0, priv->interface,
1082 (char *) &latency, 1, WDR_TIMEOUT);
1083
1084 if (rv < 0) {
1085 dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
1086 return -EIO;
1087 }
1088 return latency;
1089}
1041 1090
1042static int get_serial_info(struct usb_serial_port *port, 1091static int get_serial_info(struct usb_serial_port *port,
1043 struct serial_struct __user *retinfo) 1092 struct serial_struct __user *retinfo)
@@ -1097,6 +1146,7 @@ static int set_serial_info(struct tty_struct *tty,
1097 priv->custom_divisor = new_serial.custom_divisor; 1146 priv->custom_divisor = new_serial.custom_divisor;
1098 1147
1099 tty->low_latency = (priv->flags & ASYNC_LOW_LATENCY) ? 1 : 0; 1148 tty->low_latency = (priv->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
1149 write_latency_timer(port);
1100 1150
1101check_and_exit: 1151check_and_exit:
1102 if ((old_priv.flags & ASYNC_SPD_MASK) != 1152 if ((old_priv.flags & ASYNC_SPD_MASK) !=
@@ -1192,27 +1242,13 @@ static ssize_t show_latency_timer(struct device *dev,
1192{ 1242{
1193 struct usb_serial_port *port = to_usb_serial_port(dev); 1243 struct usb_serial_port *port = to_usb_serial_port(dev);
1194 struct ftdi_private *priv = usb_get_serial_port_data(port); 1244 struct ftdi_private *priv = usb_get_serial_port_data(port);
1195 struct usb_device *udev = port->serial->dev; 1245 if (priv->flags & ASYNC_LOW_LATENCY)
1196 unsigned short latency = 0; 1246 return sprintf(buf, "1\n");
1197 int rv = 0; 1247 else
1198 1248 return sprintf(buf, "%i\n", priv->latency);
1199
1200 dbg("%s", __func__);
1201
1202 rv = usb_control_msg(udev,
1203 usb_rcvctrlpipe(udev, 0),
1204 FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
1205 FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
1206 0, priv->interface,
1207 (char *) &latency, 1, WDR_TIMEOUT);
1208
1209 if (rv < 0) {
1210 dev_err(dev, "Unable to read latency timer: %i\n", rv);
1211 return -EIO;
1212 }
1213 return sprintf(buf, "%i\n", latency);
1214} 1249}
1215 1250
1251
1216/* Write a new value of the latency timer, in units of milliseconds. */ 1252/* Write a new value of the latency timer, in units of milliseconds. */
1217static ssize_t store_latency_timer(struct device *dev, 1253static ssize_t store_latency_timer(struct device *dev,
1218 struct device_attribute *attr, const char *valbuf, 1254 struct device_attribute *attr, const char *valbuf,
@@ -1220,25 +1256,13 @@ static ssize_t store_latency_timer(struct device *dev,
1220{ 1256{
1221 struct usb_serial_port *port = to_usb_serial_port(dev); 1257 struct usb_serial_port *port = to_usb_serial_port(dev);
1222 struct ftdi_private *priv = usb_get_serial_port_data(port); 1258 struct ftdi_private *priv = usb_get_serial_port_data(port);
1223 struct usb_device *udev = port->serial->dev;
1224 char buf[1];
1225 int v = simple_strtoul(valbuf, NULL, 10); 1259 int v = simple_strtoul(valbuf, NULL, 10);
1226 int rv = 0; 1260 int rv = 0;
1227 1261
1228 dbg("%s: setting latency timer = %i", __func__, v); 1262 priv->latency = v;
1229 1263 rv = write_latency_timer(port);
1230 rv = usb_control_msg(udev, 1264 if (rv < 0)
1231 usb_sndctrlpipe(udev, 0),
1232 FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
1233 FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
1234 v, priv->interface,
1235 buf, 0, WDR_TIMEOUT);
1236
1237 if (rv < 0) {
1238 dev_err(dev, "Unable to write latency timer: %i\n", rv);
1239 return -EIO; 1265 return -EIO;
1240 }
1241
1242 return count; 1266 return count;
1243} 1267}
1244 1268
@@ -1392,6 +1416,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1392 usb_set_serial_port_data(port, priv); 1416 usb_set_serial_port_data(port, priv);
1393 1417
1394 ftdi_determine_type(port); 1418 ftdi_determine_type(port);
1419 read_latency_timer(port);
1395 create_sysfs_attrs(port); 1420 create_sysfs_attrs(port);
1396 return 0; 1421 return 0;
1397} 1422}
@@ -1487,14 +1512,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
1487 1512
1488 remove_sysfs_attrs(port); 1513 remove_sysfs_attrs(port);
1489 1514
1490 /* all open ports are closed at this point 1515 kref_put(&priv->kref, ftdi_sio_priv_release);
1491 * (by usbserial.c:__serial_close, which calls ftdi_close)
1492 */
1493
1494 if (priv) {
1495 usb_set_serial_port_data(port, NULL);
1496 kref_put(&priv->kref, ftdi_sio_priv_release);
1497 }
1498 1516
1499 return 0; 1517 return 0;
1500} 1518}
@@ -1521,6 +1539,8 @@ static int ftdi_open(struct tty_struct *tty,
1521 if (tty) 1539 if (tty)
1522 tty->low_latency = (priv->flags & ASYNC_LOW_LATENCY) ? 1 : 0; 1540 tty->low_latency = (priv->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
1523 1541
1542 write_latency_timer(port);
1543
1524 /* No error checking for this (will get errors later anyway) */ 1544 /* No error checking for this (will get errors later anyway) */
1525 /* See ftdi_sio.h for description of what is reset */ 1545 /* See ftdi_sio.h for description of what is reset */
1526 usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 1546 usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -1536,11 +1556,6 @@ static int ftdi_open(struct tty_struct *tty,
1536 if (tty) 1556 if (tty)
1537 ftdi_set_termios(tty, port, tty->termios); 1557 ftdi_set_termios(tty, port, tty->termios);
1538 1558
1539 /* FIXME: Flow control might be enabled, so it should be checked -
1540 we have no control of defaults! */
1541 /* Turn on RTS and DTR since we are not flow controlling by default */
1542 set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1543
1544 /* Not throttled */ 1559 /* Not throttled */
1545 spin_lock_irqsave(&priv->rx_lock, flags); 1560 spin_lock_irqsave(&priv->rx_lock, flags);
1546 priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); 1561 priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
@@ -1565,6 +1580,30 @@ static int ftdi_open(struct tty_struct *tty,
1565} /* ftdi_open */ 1580} /* ftdi_open */
1566 1581
1567 1582
1583static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
1584{
1585 struct ftdi_private *priv = usb_get_serial_port_data(port);
1586 char buf[1];
1587
1588 mutex_lock(&port->serial->disc_mutex);
1589 if (!port->serial->disconnected) {
1590 /* Disable flow control */
1591 if (!on && usb_control_msg(port->serial->dev,
1592 usb_sndctrlpipe(port->serial->dev, 0),
1593 FTDI_SIO_SET_FLOW_CTRL_REQUEST,
1594 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
1595 0, priv->interface, buf, 0,
1596 WDR_TIMEOUT) < 0) {
1597 dev_err(&port->dev, "error from flowcontrol urb\n");
1598 }
1599 /* drop RTS and DTR */
1600 if (on)
1601 set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1602 else
1603 clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1604 }
1605 mutex_unlock(&port->serial->disc_mutex);
1606}
1568 1607
1569/* 1608/*
1570 * usbserial:__serial_close only calls ftdi_close if the point is open 1609 * usbserial:__serial_close only calls ftdi_close if the point is open
@@ -1574,31 +1613,12 @@ static int ftdi_open(struct tty_struct *tty,
1574 * 1613 *
1575 */ 1614 */
1576 1615
1577static void ftdi_close(struct tty_struct *tty, 1616static void ftdi_close(struct usb_serial_port *port)
1578 struct usb_serial_port *port, struct file *filp)
1579{ /* ftdi_close */ 1617{ /* ftdi_close */
1580 unsigned int c_cflag = tty->termios->c_cflag;
1581 struct ftdi_private *priv = usb_get_serial_port_data(port); 1618 struct ftdi_private *priv = usb_get_serial_port_data(port);
1582 char buf[1];
1583 1619
1584 dbg("%s", __func__); 1620 dbg("%s", __func__);
1585 1621
1586 mutex_lock(&port->serial->disc_mutex);
1587 if (c_cflag & HUPCL && !port->serial->disconnected) {
1588 /* Disable flow control */
1589 if (usb_control_msg(port->serial->dev,
1590 usb_sndctrlpipe(port->serial->dev, 0),
1591 FTDI_SIO_SET_FLOW_CTRL_REQUEST,
1592 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
1593 0, priv->interface, buf, 0,
1594 WDR_TIMEOUT) < 0) {
1595 dev_err(&port->dev, "error from flowcontrol urb\n");
1596 }
1597
1598 /* drop RTS and DTR */
1599 clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1600 } /* Note change no line if hupcl is off */
1601 mutex_unlock(&port->serial->disc_mutex);
1602 1622
1603 /* cancel any scheduled reading */ 1623 /* cancel any scheduled reading */
1604 cancel_delayed_work_sync(&priv->rx_work); 1624 cancel_delayed_work_sync(&priv->rx_work);
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 586d30ff450..ee25a3fe3b0 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -993,8 +993,7 @@ static int garmin_open(struct tty_struct *tty,
993} 993}
994 994
995 995
996static void garmin_close(struct tty_struct *tty, 996static void garmin_close(struct usb_serial_port *port)
997 struct usb_serial_port *port, struct file *filp)
998{ 997{
999 struct usb_serial *serial = port->serial; 998 struct usb_serial *serial = port->serial;
1000 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); 999 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 4cec9906ccf..be82ea95672 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -184,8 +184,7 @@ int usb_serial_generic_resume(struct usb_serial *serial)
184} 184}
185EXPORT_SYMBOL_GPL(usb_serial_generic_resume); 185EXPORT_SYMBOL_GPL(usb_serial_generic_resume);
186 186
187void usb_serial_generic_close(struct tty_struct *tty, 187void usb_serial_generic_close(struct usb_serial_port *port)
188 struct usb_serial_port *port, struct file *filp)
189{ 188{
190 dbg("%s - port %d", __func__, port->number); 189 dbg("%s - port %d", __func__, port->number);
191 generic_cleanup(port); 190 generic_cleanup(port);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index fb4a73d090f..53ef5996e33 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -207,8 +207,7 @@ static void edge_bulk_out_cmd_callback(struct urb *urb);
207/* function prototypes for the usbserial callbacks */ 207/* function prototypes for the usbserial callbacks */
208static int edge_open(struct tty_struct *tty, struct usb_serial_port *port, 208static int edge_open(struct tty_struct *tty, struct usb_serial_port *port,
209 struct file *filp); 209 struct file *filp);
210static void edge_close(struct tty_struct *tty, struct usb_serial_port *port, 210static void edge_close(struct usb_serial_port *port);
211 struct file *filp);
212static int edge_write(struct tty_struct *tty, struct usb_serial_port *port, 211static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
213 const unsigned char *buf, int count); 212 const unsigned char *buf, int count);
214static int edge_write_room(struct tty_struct *tty); 213static int edge_write_room(struct tty_struct *tty);
@@ -965,7 +964,7 @@ static int edge_open(struct tty_struct *tty,
965 964
966 if (!edge_port->txfifo.fifo) { 965 if (!edge_port->txfifo.fifo) {
967 dbg("%s - no memory", __func__); 966 dbg("%s - no memory", __func__);
968 edge_close(tty, port, filp); 967 edge_close(port);
969 return -ENOMEM; 968 return -ENOMEM;
970 } 969 }
971 970
@@ -975,7 +974,7 @@ static int edge_open(struct tty_struct *tty,
975 974
976 if (!edge_port->write_urb) { 975 if (!edge_port->write_urb) {
977 dbg("%s - no memory", __func__); 976 dbg("%s - no memory", __func__);
978 edge_close(tty, port, filp); 977 edge_close(port);
979 return -ENOMEM; 978 return -ENOMEM;
980 } 979 }
981 980
@@ -1099,8 +1098,7 @@ static void block_until_tx_empty(struct edgeport_port *edge_port)
1099 * edge_close 1098 * edge_close
1100 * this function is called by the tty driver when a port is closed 1099 * this function is called by the tty driver when a port is closed
1101 *****************************************************************************/ 1100 *****************************************************************************/
1102static void edge_close(struct tty_struct *tty, 1101static void edge_close(struct usb_serial_port *port)
1103 struct usb_serial_port *port, struct file *filp)
1104{ 1102{
1105 struct edgeport_serial *edge_serial; 1103 struct edgeport_serial *edge_serial;
1106 struct edgeport_port *edge_port; 1104 struct edgeport_port *edge_port;
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 513b25e044c..db964db42d3 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -102,7 +102,7 @@ struct edgeport_port {
102 __u8 shadow_mcr; 102 __u8 shadow_mcr;
103 __u8 shadow_lsr; 103 __u8 shadow_lsr;
104 __u8 lsr_mask; 104 __u8 lsr_mask;
105 __u32 ump_read_timeout; /* Number of miliseconds the UMP will 105 __u32 ump_read_timeout; /* Number of milliseconds the UMP will
106 wait without data before completing 106 wait without data before completing
107 a read short */ 107 a read short */
108 int baud_rate; 108 int baud_rate;
@@ -2009,8 +2009,7 @@ release_es_lock:
2009 return status; 2009 return status;
2010} 2010}
2011 2011
2012static void edge_close(struct tty_struct *tty, 2012static void edge_close(struct usb_serial_port *port)
2013 struct usb_serial_port *port, struct file *filp)
2014{ 2013{
2015 struct edgeport_serial *edge_serial; 2014 struct edgeport_serial *edge_serial;
2016 struct edgeport_port *edge_port; 2015 struct edgeport_port *edge_port;
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index cd62825a9ac..c610a99fa47 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -76,8 +76,7 @@ static int initial_wait;
76/* Function prototypes for an ipaq */ 76/* Function prototypes for an ipaq */
77static int ipaq_open(struct tty_struct *tty, 77static int ipaq_open(struct tty_struct *tty,
78 struct usb_serial_port *port, struct file *filp); 78 struct usb_serial_port *port, struct file *filp);
79static void ipaq_close(struct tty_struct *tty, 79static void ipaq_close(struct usb_serial_port *port);
80 struct usb_serial_port *port, struct file *filp);
81static int ipaq_calc_num_ports(struct usb_serial *serial); 80static int ipaq_calc_num_ports(struct usb_serial *serial);
82static int ipaq_startup(struct usb_serial *serial); 81static int ipaq_startup(struct usb_serial *serial);
83static void ipaq_shutdown(struct usb_serial *serial); 82static void ipaq_shutdown(struct usb_serial *serial);
@@ -714,8 +713,7 @@ error:
714} 713}
715 714
716 715
717static void ipaq_close(struct tty_struct *tty, 716static void ipaq_close(struct usb_serial_port *port)
718 struct usb_serial_port *port, struct file *filp)
719{ 717{
720 struct ipaq_private *priv = usb_get_serial_port_data(port); 718 struct ipaq_private *priv = usb_get_serial_port_data(port);
721 719
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index da2a2b46644..29ad038b9c8 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -302,23 +302,17 @@ static int ipw_open(struct tty_struct *tty,
302 return 0; 302 return 0;
303} 303}
304 304
305static void ipw_close(struct tty_struct *tty, 305static void ipw_dtr_rts(struct usb_serial_port *port, int on)
306 struct usb_serial_port *port, struct file *filp)
307{ 306{
308 struct usb_device *dev = port->serial->dev; 307 struct usb_device *dev = port->serial->dev;
309 int result; 308 int result;
310 309
311 if (tty_hung_up_p(filp)) {
312 dbg("%s: tty_hung_up_p ...", __func__);
313 return;
314 }
315
316 /*--1: drop the dtr */ 310 /*--1: drop the dtr */
317 dbg("%s:dropping dtr", __func__); 311 dbg("%s:dropping dtr", __func__);
318 result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 312 result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
319 IPW_SIO_SET_PIN, 313 IPW_SIO_SET_PIN,
320 USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT, 314 USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT,
321 IPW_PIN_CLRDTR, 315 on ? IPW_PIN_SETDTR : IPW_PIN_CLRDTR,
322 0, 316 0,
323 NULL, 317 NULL,
324 0, 318 0,
@@ -332,7 +326,7 @@ static void ipw_close(struct tty_struct *tty,
332 result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 326 result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
333 IPW_SIO_SET_PIN, USB_TYPE_VENDOR | 327 IPW_SIO_SET_PIN, USB_TYPE_VENDOR |
334 USB_RECIP_INTERFACE | USB_DIR_OUT, 328 USB_RECIP_INTERFACE | USB_DIR_OUT,
335 IPW_PIN_CLRRTS, 329 on ? IPW_PIN_SETRTS : IPW_PIN_CLRRTS,
336 0, 330 0,
337 NULL, 331 NULL,
338 0, 332 0,
@@ -340,7 +334,12 @@ static void ipw_close(struct tty_struct *tty,
340 if (result < 0) 334 if (result < 0)
341 dev_err(&port->dev, 335 dev_err(&port->dev,
342 "dropping rts failed (error = %d)\n", result); 336 "dropping rts failed (error = %d)\n", result);
337}
343 338
339static void ipw_close(struct usb_serial_port *port)
340{
341 struct usb_device *dev = port->serial->dev;
342 int result;
344 343
345 /*--3: purge */ 344 /*--3: purge */
346 dbg("%s:sending purge", __func__); 345 dbg("%s:sending purge", __func__);
@@ -461,6 +460,7 @@ static struct usb_serial_driver ipw_device = {
461 .num_ports = 1, 460 .num_ports = 1,
462 .open = ipw_open, 461 .open = ipw_open,
463 .close = ipw_close, 462 .close = ipw_close,
463 .dtr_rts = ipw_dtr_rts,
464 .port_probe = ipw_probe, 464 .port_probe = ipw_probe,
465 .port_remove = ipw_disconnect, 465 .port_remove = ipw_disconnect,
466 .write = ipw_write, 466 .write = ipw_write,
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 4e2cda93da5..66009b6b763 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -88,8 +88,7 @@ static int xbof = -1;
88static int ir_startup (struct usb_serial *serial); 88static int ir_startup (struct usb_serial *serial);
89static int ir_open(struct tty_struct *tty, struct usb_serial_port *port, 89static int ir_open(struct tty_struct *tty, struct usb_serial_port *port,
90 struct file *filep); 90 struct file *filep);
91static void ir_close(struct tty_struct *tty, struct usb_serial_port *port, 91static void ir_close(struct usb_serial_port *port);
92 struct file *filep);
93static int ir_write(struct tty_struct *tty, struct usb_serial_port *port, 92static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
94 const unsigned char *buf, int count); 93 const unsigned char *buf, int count);
95static void ir_write_bulk_callback (struct urb *urb); 94static void ir_write_bulk_callback (struct urb *urb);
@@ -346,8 +345,7 @@ static int ir_open(struct tty_struct *tty,
346 return result; 345 return result;
347} 346}
348 347
349static void ir_close(struct tty_struct *tty, 348static void ir_close(struct usb_serial_port *port)
350 struct usb_serial_port *port, struct file * filp)
351{ 349{
352 dbg("%s - port %d", __func__, port->number); 350 dbg("%s - port %d", __func__, port->number);
353 351
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 4473d442b2a..76a3cc327bb 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -40,7 +40,7 @@ static int debug;
40/* 40/*
41 * Version Information 41 * Version Information
42 */ 42 */
43#define DRIVER_VERSION "v0.5" 43#define DRIVER_VERSION "v0.10"
44#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver" 44#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
45 45
46static struct usb_device_id id_table[] = { 46static struct usb_device_id id_table[] = {
@@ -70,7 +70,6 @@ static void read_rxcmd_callback(struct urb *urb);
70struct iuu_private { 70struct iuu_private {
71 spinlock_t lock; /* store irq state */ 71 spinlock_t lock; /* store irq state */
72 wait_queue_head_t delta_msr_wait; 72 wait_queue_head_t delta_msr_wait;
73 u8 line_control;
74 u8 line_status; 73 u8 line_status;
75 u8 termios_initialized; 74 u8 termios_initialized;
76 int tiostatus; /* store IUART SIGNAL for tiocmget call */ 75 int tiostatus; /* store IUART SIGNAL for tiocmget call */
@@ -651,32 +650,33 @@ static int iuu_bulk_write(struct usb_serial_port *port)
651 unsigned long flags; 650 unsigned long flags;
652 int result; 651 int result;
653 int i; 652 int i;
653 int buf_len;
654 char *buf_ptr = port->write_urb->transfer_buffer; 654 char *buf_ptr = port->write_urb->transfer_buffer;
655 dbg("%s - enter", __func__); 655 dbg("%s - enter", __func__);
656 656
657 spin_lock_irqsave(&priv->lock, flags);
657 *buf_ptr++ = IUU_UART_ESC; 658 *buf_ptr++ = IUU_UART_ESC;
658 *buf_ptr++ = IUU_UART_TX; 659 *buf_ptr++ = IUU_UART_TX;
659 *buf_ptr++ = priv->writelen; 660 *buf_ptr++ = priv->writelen;
660 661
661 memcpy(buf_ptr, priv->writebuf, 662 memcpy(buf_ptr, priv->writebuf, priv->writelen);
662 priv->writelen); 663 buf_len = priv->writelen;
664 priv->writelen = 0;
665 spin_unlock_irqrestore(&priv->lock, flags);
663 if (debug == 1) { 666 if (debug == 1) {
664 for (i = 0; i < priv->writelen; i++) 667 for (i = 0; i < buf_len; i++)
665 sprintf(priv->dbgbuf + i*2 , 668 sprintf(priv->dbgbuf + i*2 ,
666 "%02X", priv->writebuf[i]); 669 "%02X", priv->writebuf[i]);
667 priv->dbgbuf[priv->writelen+i*2] = 0; 670 priv->dbgbuf[buf_len+i*2] = 0;
668 dbg("%s - writing %i chars : %s", __func__, 671 dbg("%s - writing %i chars : %s", __func__,
669 priv->writelen, priv->dbgbuf); 672 buf_len, priv->dbgbuf);
670 } 673 }
671 usb_fill_bulk_urb(port->write_urb, port->serial->dev, 674 usb_fill_bulk_urb(port->write_urb, port->serial->dev,
672 usb_sndbulkpipe(port->serial->dev, 675 usb_sndbulkpipe(port->serial->dev,
673 port->bulk_out_endpointAddress), 676 port->bulk_out_endpointAddress),
674 port->write_urb->transfer_buffer, priv->writelen + 3, 677 port->write_urb->transfer_buffer, buf_len + 3,
675 iuu_rxcmd, port); 678 iuu_rxcmd, port);
676 result = usb_submit_urb(port->write_urb, GFP_ATOMIC); 679 result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
677 spin_lock_irqsave(&priv->lock, flags);
678 priv->writelen = 0;
679 spin_unlock_irqrestore(&priv->lock, flags);
680 usb_serial_port_softint(port); 680 usb_serial_port_softint(port);
681 return result; 681 return result;
682} 682}
@@ -770,14 +770,10 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port,
770 return -ENOMEM; 770 return -ENOMEM;
771 771
772 spin_lock_irqsave(&priv->lock, flags); 772 spin_lock_irqsave(&priv->lock, flags);
773 if (priv->writelen > 0) { 773
774 /* buffer already filled but not commited */
775 spin_unlock_irqrestore(&priv->lock, flags);
776 return 0;
777 }
778 /* fill the buffer */ 774 /* fill the buffer */
779 memcpy(priv->writebuf, buf, count); 775 memcpy(priv->writebuf + priv->writelen, buf, count);
780 priv->writelen = count; 776 priv->writelen += count;
781 spin_unlock_irqrestore(&priv->lock, flags); 777 spin_unlock_irqrestore(&priv->lock, flags);
782 778
783 return count; 779 return count;
@@ -819,7 +815,7 @@ static int iuu_uart_on(struct usb_serial_port *port)
819 buf[0] = IUU_UART_ENABLE; 815 buf[0] = IUU_UART_ENABLE;
820 buf[1] = (u8) ((IUU_BAUD_9600 >> 8) & 0x00FF); 816 buf[1] = (u8) ((IUU_BAUD_9600 >> 8) & 0x00FF);
821 buf[2] = (u8) (0x00FF & IUU_BAUD_9600); 817 buf[2] = (u8) (0x00FF & IUU_BAUD_9600);
822 buf[3] = (u8) (0x0F0 & IUU_TWO_STOP_BITS) | (0x07 & IUU_PARITY_EVEN); 818 buf[3] = (u8) (0x0F0 & IUU_ONE_STOP_BIT) | (0x07 & IUU_PARITY_EVEN);
823 819
824 status = bulk_immediate(port, buf, 4); 820 status = bulk_immediate(port, buf, 4);
825 if (status != IUU_OPERATION_OK) { 821 if (status != IUU_OPERATION_OK) {
@@ -946,19 +942,59 @@ static int iuu_uart_baud(struct usb_serial_port *port, u32 baud,
946 return status; 942 return status;
947} 943}
948 944
949static int set_control_lines(struct usb_device *dev, u8 value) 945static void iuu_set_termios(struct tty_struct *tty,
946 struct usb_serial_port *port, struct ktermios *old_termios)
950{ 947{
951 return 0; 948 const u32 supported_mask = CMSPAR|PARENB|PARODD;
949
950 unsigned int cflag = tty->termios->c_cflag;
951 int status;
952 u32 actual;
953 u32 parity;
954 int csize = CS7;
955 int baud = 9600; /* Fixed for the moment */
956 u32 newval = cflag & supported_mask;
957
958 /* compute the parity parameter */
959 parity = 0;
960 if (cflag & CMSPAR) { /* Using mark space */
961 if (cflag & PARODD)
962 parity |= IUU_PARITY_SPACE;
963 else
964 parity |= IUU_PARITY_MARK;
965 } else if (!(cflag & PARENB)) {
966 parity |= IUU_PARITY_NONE;
967 csize = CS8;
968 } else if (cflag & PARODD)
969 parity |= IUU_PARITY_ODD;
970 else
971 parity |= IUU_PARITY_EVEN;
972
973 parity |= (cflag & CSTOPB ? IUU_TWO_STOP_BITS : IUU_ONE_STOP_BIT);
974
975 /* set it */
976 status = iuu_uart_baud(port,
977 (clockmode == 2) ? 16457 : 9600 * boost / 100,
978 &actual, parity);
979
980 /* set the termios value to the real one, so the user now what has
981 * changed. We support few fields so its easies to copy the old hw
982 * settings back over and then adjust them
983 */
984 if (old_termios)
985 tty_termios_copy_hw(tty->termios, old_termios);
986 if (status != 0) /* Set failed - return old bits */
987 return;
988 /* Re-encode speed, parity and csize */
989 tty_encode_baud_rate(tty, baud, baud);
990 tty->termios->c_cflag &= ~(supported_mask|CSIZE);
991 tty->termios->c_cflag |= newval | csize;
952} 992}
953 993
954static void iuu_close(struct tty_struct *tty, 994static void iuu_close(struct usb_serial_port *port)
955 struct usb_serial_port *port, struct file *filp)
956{ 995{
957 /* iuu_led (port,255,0,0,0); */ 996 /* iuu_led (port,255,0,0,0); */
958 struct usb_serial *serial; 997 struct usb_serial *serial;
959 struct iuu_private *priv = usb_get_serial_port_data(port);
960 unsigned long flags;
961 unsigned int c_cflag;
962 998
963 serial = port->serial; 999 serial = port->serial;
964 if (!serial) 1000 if (!serial)
@@ -968,17 +1004,6 @@ static void iuu_close(struct tty_struct *tty,
968 1004
969 iuu_uart_off(port); 1005 iuu_uart_off(port);
970 if (serial->dev) { 1006 if (serial->dev) {
971 if (tty) {
972 c_cflag = tty->termios->c_cflag;
973 if (c_cflag & HUPCL) {
974 /* drop DTR and RTS */
975 priv = usb_get_serial_port_data(port);
976 spin_lock_irqsave(&priv->lock, flags);
977 priv->line_control = 0;
978 spin_unlock_irqrestore(&priv->lock, flags);
979 set_control_lines(port->serial->dev, 0);
980 }
981 }
982 /* free writebuf */ 1007 /* free writebuf */
983 /* shutdown our urbs */ 1008 /* shutdown our urbs */
984 dbg("%s - shutting down urbs", __func__); 1009 dbg("%s - shutting down urbs", __func__);
@@ -1154,7 +1179,7 @@ static int iuu_open(struct tty_struct *tty,
1154 if (result) { 1179 if (result) {
1155 dev_err(&port->dev, "%s - failed submitting read urb," 1180 dev_err(&port->dev, "%s - failed submitting read urb,"
1156 " error %d\n", __func__, result); 1181 " error %d\n", __func__, result);
1157 iuu_close(tty, port, NULL); 1182 iuu_close(port);
1158 return -EPROTO; 1183 return -EPROTO;
1159 } else { 1184 } else {
1160 dbg("%s - rxcmd OK", __func__); 1185 dbg("%s - rxcmd OK", __func__);
@@ -1175,6 +1200,7 @@ static struct usb_serial_driver iuu_device = {
1175 .read_bulk_callback = iuu_uart_read_callback, 1200 .read_bulk_callback = iuu_uart_read_callback,
1176 .tiocmget = iuu_tiocmget, 1201 .tiocmget = iuu_tiocmget,
1177 .tiocmset = iuu_tiocmset, 1202 .tiocmset = iuu_tiocmset,
1203 .set_termios = iuu_set_termios,
1178 .attach = iuu_startup, 1204 .attach = iuu_startup,
1179 .shutdown = iuu_shutdown, 1205 .shutdown = iuu_shutdown,
1180}; 1206};
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 00daa8f7759..f1195a98f31 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1298,8 +1298,16 @@ static inline void stop_urb(struct urb *urb)
1298 usb_kill_urb(urb); 1298 usb_kill_urb(urb);
1299} 1299}
1300 1300
1301static void keyspan_close(struct tty_struct *tty, 1301static void keyspan_dtr_rts(struct usb_serial_port *port, int on)
1302 struct usb_serial_port *port, struct file *filp) 1302{
1303 struct keyspan_port_private *p_priv = usb_get_serial_port_data(port);
1304
1305 p_priv->rts_state = on;
1306 p_priv->dtr_state = on;
1307 keyspan_send_setup(port, 0);
1308}
1309
1310static void keyspan_close(struct usb_serial_port *port)
1303{ 1311{
1304 int i; 1312 int i;
1305 struct usb_serial *serial = port->serial; 1313 struct usb_serial *serial = port->serial;
@@ -1336,7 +1344,6 @@ static void keyspan_close(struct tty_struct *tty,
1336 stop_urb(p_priv->out_urbs[i]); 1344 stop_urb(p_priv->out_urbs[i]);
1337 } 1345 }
1338 } 1346 }
1339 tty_port_tty_set(&port->port, NULL);
1340} 1347}
1341 1348
1342/* download the firmware to a pre-renumeration device */ 1349/* download the firmware to a pre-renumeration device */
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 38b4582e073..0d4569b6076 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -38,9 +38,8 @@
38static int keyspan_open (struct tty_struct *tty, 38static int keyspan_open (struct tty_struct *tty,
39 struct usb_serial_port *port, 39 struct usb_serial_port *port,
40 struct file *filp); 40 struct file *filp);
41static void keyspan_close (struct tty_struct *tty, 41static void keyspan_close (struct usb_serial_port *port);
42 struct usb_serial_port *port, 42static void keyspan_dtr_rts (struct usb_serial_port *port, int on);
43 struct file *filp);
44static int keyspan_startup (struct usb_serial *serial); 43static int keyspan_startup (struct usb_serial *serial);
45static void keyspan_shutdown (struct usb_serial *serial); 44static void keyspan_shutdown (struct usb_serial *serial);
46static int keyspan_write_room (struct tty_struct *tty); 45static int keyspan_write_room (struct tty_struct *tty);
@@ -562,6 +561,7 @@ static struct usb_serial_driver keyspan_1port_device = {
562 .num_ports = 1, 561 .num_ports = 1,
563 .open = keyspan_open, 562 .open = keyspan_open,
564 .close = keyspan_close, 563 .close = keyspan_close,
564 .dtr_rts = keyspan_dtr_rts,
565 .write = keyspan_write, 565 .write = keyspan_write,
566 .write_room = keyspan_write_room, 566 .write_room = keyspan_write_room,
567 .set_termios = keyspan_set_termios, 567 .set_termios = keyspan_set_termios,
@@ -582,6 +582,7 @@ static struct usb_serial_driver keyspan_2port_device = {
582 .num_ports = 2, 582 .num_ports = 2,
583 .open = keyspan_open, 583 .open = keyspan_open,
584 .close = keyspan_close, 584 .close = keyspan_close,
585 .dtr_rts = keyspan_dtr_rts,
585 .write = keyspan_write, 586 .write = keyspan_write,
586 .write_room = keyspan_write_room, 587 .write_room = keyspan_write_room,
587 .set_termios = keyspan_set_termios, 588 .set_termios = keyspan_set_termios,
@@ -602,6 +603,7 @@ static struct usb_serial_driver keyspan_4port_device = {
602 .num_ports = 4, 603 .num_ports = 4,
603 .open = keyspan_open, 604 .open = keyspan_open,
604 .close = keyspan_close, 605 .close = keyspan_close,
606 .dtr_rts = keyspan_dtr_rts,
605 .write = keyspan_write, 607 .write = keyspan_write,
606 .write_room = keyspan_write_room, 608 .write_room = keyspan_write_room,
607 .set_termios = keyspan_set_termios, 609 .set_termios = keyspan_set_termios,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index bf1ae247da6..ab769dbea1b 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -651,6 +651,35 @@ static int keyspan_pda_chars_in_buffer(struct tty_struct *tty)
651} 651}
652 652
653 653
654static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on)
655{
656 struct usb_serial *serial = port->serial;
657
658 if (serial->dev) {
659 if (on)
660 keyspan_pda_set_modem_info(serial, (1<<7) | (1<< 2));
661 else
662 keyspan_pda_set_modem_info(serial, 0);
663 }
664}
665
666static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
667{
668 struct usb_serial *serial = port->serial;
669 unsigned char modembits;
670
671 /* If we can read the modem status and the DCD is low then
672 carrier is not raised yet */
673 if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
674 if (!(modembits & (1>>6)))
675 return 0;
676 }
677 /* Carrier raised, or we failed (eg disconnected) so
678 progress accordingly */
679 return 1;
680}
681
682
654static int keyspan_pda_open(struct tty_struct *tty, 683static int keyspan_pda_open(struct tty_struct *tty,
655 struct usb_serial_port *port, struct file *filp) 684 struct usb_serial_port *port, struct file *filp)
656{ 685{
@@ -682,13 +711,6 @@ static int keyspan_pda_open(struct tty_struct *tty,
682 priv->tx_room = room; 711 priv->tx_room = room;
683 priv->tx_throttled = room ? 0 : 1; 712 priv->tx_throttled = room ? 0 : 1;
684 713
685 /* the normal serial device seems to always turn on DTR and RTS here,
686 so do the same */
687 if (tty && (tty->termios->c_cflag & CBAUD))
688 keyspan_pda_set_modem_info(serial, (1<<7) | (1<<2));
689 else
690 keyspan_pda_set_modem_info(serial, 0);
691
692 /*Start reading from the device*/ 714 /*Start reading from the device*/
693 port->interrupt_in_urb->dev = serial->dev; 715 port->interrupt_in_urb->dev = serial->dev;
694 rc = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 716 rc = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
@@ -700,19 +722,11 @@ static int keyspan_pda_open(struct tty_struct *tty,
700error: 722error:
701 return rc; 723 return rc;
702} 724}
703 725static void keyspan_pda_close(struct usb_serial_port *port)
704
705static void keyspan_pda_close(struct tty_struct *tty,
706 struct usb_serial_port *port, struct file *filp)
707{ 726{
708 struct usb_serial *serial = port->serial; 727 struct usb_serial *serial = port->serial;
709 728
710 if (serial->dev) { 729 if (serial->dev) {
711 /* the normal serial device seems to always shut
712 off DTR and RTS now */
713 if (tty->termios->c_cflag & HUPCL)
714 keyspan_pda_set_modem_info(serial, 0);
715
716 /* shutdown our bulk reads and writes */ 730 /* shutdown our bulk reads and writes */
717 usb_kill_urb(port->write_urb); 731 usb_kill_urb(port->write_urb);
718 usb_kill_urb(port->interrupt_in_urb); 732 usb_kill_urb(port->interrupt_in_urb);
@@ -839,6 +853,8 @@ static struct usb_serial_driver keyspan_pda_device = {
839 .usb_driver = &keyspan_pda_driver, 853 .usb_driver = &keyspan_pda_driver,
840 .id_table = id_table_std, 854 .id_table = id_table_std,
841 .num_ports = 1, 855 .num_ports = 1,
856 .dtr_rts = keyspan_pda_dtr_rts,
857 .carrier_raised = keyspan_pda_carrier_raised,
842 .open = keyspan_pda_open, 858 .open = keyspan_pda_open,
843 .close = keyspan_pda_close, 859 .close = keyspan_pda_close,
844 .write = keyspan_pda_write, 860 .write = keyspan_pda_write,
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index fcd9082f3e7..fa817c66b3e 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -76,8 +76,7 @@ static int klsi_105_startup(struct usb_serial *serial);
76static void klsi_105_shutdown(struct usb_serial *serial); 76static void klsi_105_shutdown(struct usb_serial *serial);
77static int klsi_105_open(struct tty_struct *tty, 77static int klsi_105_open(struct tty_struct *tty,
78 struct usb_serial_port *port, struct file *filp); 78 struct usb_serial_port *port, struct file *filp);
79static void klsi_105_close(struct tty_struct *tty, 79static void klsi_105_close(struct usb_serial_port *port);
80 struct usb_serial_port *port, struct file *filp);
81static int klsi_105_write(struct tty_struct *tty, 80static int klsi_105_write(struct tty_struct *tty,
82 struct usb_serial_port *port, const unsigned char *buf, int count); 81 struct usb_serial_port *port, const unsigned char *buf, int count);
83static void klsi_105_write_bulk_callback(struct urb *urb); 82static void klsi_105_write_bulk_callback(struct urb *urb);
@@ -447,8 +446,7 @@ exit:
447} /* klsi_105_open */ 446} /* klsi_105_open */
448 447
449 448
450static void klsi_105_close(struct tty_struct *tty, 449static void klsi_105_close(struct usb_serial_port *port)
451 struct usb_serial_port *port, struct file *filp)
452{ 450{
453 struct klsi_105_private *priv = usb_get_serial_port_data(port); 451 struct klsi_105_private *priv = usb_get_serial_port_data(port);
454 int rc; 452 int rc;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index c148544953b..6b570498287 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -72,8 +72,7 @@ static int kobil_startup(struct usb_serial *serial);
72static void kobil_shutdown(struct usb_serial *serial); 72static void kobil_shutdown(struct usb_serial *serial);
73static int kobil_open(struct tty_struct *tty, 73static int kobil_open(struct tty_struct *tty,
74 struct usb_serial_port *port, struct file *filp); 74 struct usb_serial_port *port, struct file *filp);
75static void kobil_close(struct tty_struct *tty, struct usb_serial_port *port, 75static void kobil_close(struct usb_serial_port *port);
76 struct file *filp);
77static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, 76static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
78 const unsigned char *buf, int count); 77 const unsigned char *buf, int count);
79static int kobil_write_room(struct tty_struct *tty); 78static int kobil_write_room(struct tty_struct *tty);
@@ -209,7 +208,7 @@ static void kobil_shutdown(struct usb_serial *serial)
209 208
210 for (i = 0; i < serial->num_ports; ++i) { 209 for (i = 0; i < serial->num_ports; ++i) {
211 while (serial->port[i]->port.count > 0) 210 while (serial->port[i]->port.count > 0)
212 kobil_close(NULL, serial->port[i], NULL); 211 kobil_close(serial->port[i]);
213 kfree(usb_get_serial_port_data(serial->port[i])); 212 kfree(usb_get_serial_port_data(serial->port[i]));
214 usb_set_serial_port_data(serial->port[i], NULL); 213 usb_set_serial_port_data(serial->port[i], NULL);
215 } 214 }
@@ -346,11 +345,11 @@ static int kobil_open(struct tty_struct *tty,
346} 345}
347 346
348 347
349static void kobil_close(struct tty_struct *tty, 348static void kobil_close(struct usb_serial_port *port)
350 struct usb_serial_port *port, struct file *filp)
351{ 349{
352 dbg("%s - port %d", __func__, port->number); 350 dbg("%s - port %d", __func__, port->number);
353 351
352 /* FIXME: Add rts/dtr methods */
354 if (port->write_urb) { 353 if (port->write_urb) {
355 usb_kill_urb(port->write_urb); 354 usb_kill_urb(port->write_urb);
356 usb_free_urb(port->write_urb); 355 usb_free_urb(port->write_urb);
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 82930a7d509..873795548fc 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -95,8 +95,8 @@ static int mct_u232_startup(struct usb_serial *serial);
95static void mct_u232_shutdown(struct usb_serial *serial); 95static void mct_u232_shutdown(struct usb_serial *serial);
96static int mct_u232_open(struct tty_struct *tty, 96static int mct_u232_open(struct tty_struct *tty,
97 struct usb_serial_port *port, struct file *filp); 97 struct usb_serial_port *port, struct file *filp);
98static void mct_u232_close(struct tty_struct *tty, 98static void mct_u232_close(struct usb_serial_port *port);
99 struct usb_serial_port *port, struct file *filp); 99static void mct_u232_dtr_rts(struct usb_serial_port *port, int on);
100static void mct_u232_read_int_callback(struct urb *urb); 100static void mct_u232_read_int_callback(struct urb *urb);
101static void mct_u232_set_termios(struct tty_struct *tty, 101static void mct_u232_set_termios(struct tty_struct *tty,
102 struct usb_serial_port *port, struct ktermios *old); 102 struct usb_serial_port *port, struct ktermios *old);
@@ -140,6 +140,7 @@ static struct usb_serial_driver mct_u232_device = {
140 .num_ports = 1, 140 .num_ports = 1,
141 .open = mct_u232_open, 141 .open = mct_u232_open,
142 .close = mct_u232_close, 142 .close = mct_u232_close,
143 .dtr_rts = mct_u232_dtr_rts,
143 .throttle = mct_u232_throttle, 144 .throttle = mct_u232_throttle,
144 .unthrottle = mct_u232_unthrottle, 145 .unthrottle = mct_u232_unthrottle,
145 .read_int_callback = mct_u232_read_int_callback, 146 .read_int_callback = mct_u232_read_int_callback,
@@ -496,29 +497,29 @@ error:
496 return retval; 497 return retval;
497} /* mct_u232_open */ 498} /* mct_u232_open */
498 499
499 500static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
500static void mct_u232_close(struct tty_struct *tty,
501 struct usb_serial_port *port, struct file *filp)
502{ 501{
503 unsigned int c_cflag;
504 unsigned int control_state; 502 unsigned int control_state;
505 struct mct_u232_private *priv = usb_get_serial_port_data(port); 503 struct mct_u232_private *priv = usb_get_serial_port_data(port);
506 dbg("%s port %d", __func__, port->number);
507 504
508 if (tty) { 505 mutex_lock(&port->serial->disc_mutex);
509 c_cflag = tty->termios->c_cflag; 506 if (!port->serial->disconnected) {
510 mutex_lock(&port->serial->disc_mutex); 507 /* drop DTR and RTS */
511 if (c_cflag & HUPCL && !port->serial->disconnected) { 508 spin_lock_irq(&priv->lock);
512 /* drop DTR and RTS */ 509 if (on)
513 spin_lock_irq(&priv->lock); 510 priv->control_state |= TIOCM_DTR | TIOCM_RTS;
511 else
514 priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS); 512 priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
515 control_state = priv->control_state; 513 control_state = priv->control_state;
516 spin_unlock_irq(&priv->lock); 514 spin_unlock_irq(&priv->lock);
517 mct_u232_set_modem_ctrl(port->serial, control_state); 515 mct_u232_set_modem_ctrl(port->serial, control_state);
518 }
519 mutex_unlock(&port->serial->disc_mutex);
520 } 516 }
517 mutex_unlock(&port->serial->disc_mutex);
518}
521 519
520static void mct_u232_close(struct usb_serial_port *port)
521{
522 dbg("%s port %d", __func__, port->number);
522 523
523 if (port->serial->dev) { 524 if (port->serial->dev) {
524 /* shutdown our urbs */ 525 /* shutdown our urbs */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 24e3b5d4b4d..9e1a013ee7f 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -533,8 +533,7 @@ static int mos7720_chars_in_buffer(struct tty_struct *tty)
533 return chars; 533 return chars;
534} 534}
535 535
536static void mos7720_close(struct tty_struct *tty, 536static void mos7720_close(struct usb_serial_port *port)
537 struct usb_serial_port *port, struct file *filp)
538{ 537{
539 struct usb_serial *serial; 538 struct usb_serial *serial;
540 struct moschip_port *mos7720_port; 539 struct moschip_port *mos7720_port;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 84fb1dcd30d..10b78a37214 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1135,54 +1135,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
1135 1135
1136} 1136}
1137 1137
1138/************************************************************************
1139 *
1140 * mos7840_block_until_tx_empty
1141 *
1142 * This function will block the close until one of the following:
1143 * 1. TX count are 0
1144 * 2. The mos7840 has stopped
1145 * 3. A timeout of 3 seconds without activity has expired
1146 *
1147 ************************************************************************/
1148static void mos7840_block_until_tx_empty(struct tty_struct *tty,
1149 struct moschip_port *mos7840_port)
1150{
1151 int timeout = HZ / 10;
1152 int wait = 30;
1153 int count;
1154
1155 while (1) {
1156
1157 count = mos7840_chars_in_buffer(tty);
1158
1159 /* Check for Buffer status */
1160 if (count <= 0)
1161 return;
1162
1163 /* Block the thread for a while */
1164 interruptible_sleep_on_timeout(&mos7840_port->wait_chase,
1165 timeout);
1166
1167 /* No activity.. count down section */
1168 wait--;
1169 if (wait == 0) {
1170 dbg("%s - TIMEOUT", __func__);
1171 return;
1172 } else {
1173 /* Reset timeout value back to seconds */
1174 wait = 30;
1175 }
1176 }
1177}
1178
1179/***************************************************************************** 1138/*****************************************************************************
1180 * mos7840_close 1139 * mos7840_close
1181 * this function is called by the tty driver when a port is closed 1140 * this function is called by the tty driver when a port is closed
1182 *****************************************************************************/ 1141 *****************************************************************************/
1183 1142
1184static void mos7840_close(struct tty_struct *tty, 1143static void mos7840_close(struct usb_serial_port *port)
1185 struct usb_serial_port *port, struct file *filp)
1186{ 1144{
1187 struct usb_serial *serial; 1145 struct usb_serial *serial;
1188 struct moschip_port *mos7840_port; 1146 struct moschip_port *mos7840_port;
@@ -1223,10 +1181,6 @@ static void mos7840_close(struct tty_struct *tty,
1223 } 1181 }
1224 } 1182 }
1225 1183
1226 if (serial->dev)
1227 /* flush and block until tx is empty */
1228 mos7840_block_until_tx_empty(tty, mos7840_port);
1229
1230 /* While closing port, shutdown all bulk read, write * 1184 /* While closing port, shutdown all bulk read, write *
1231 * and interrupt read if they exists */ 1185 * and interrupt read if they exists */
1232 if (serial->dev) { 1186 if (serial->dev) {
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index bcdcbb82270..f5f3751a888 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -98,8 +98,7 @@ static int navman_open(struct tty_struct *tty,
98 return result; 98 return result;
99} 99}
100 100
101static void navman_close(struct tty_struct *tty, 101static void navman_close(struct usb_serial_port *port)
102 struct usb_serial_port *port, struct file *filp)
103{ 102{
104 dbg("%s - port %d", __func__, port->number); 103 dbg("%s - port %d", __func__, port->number);
105 104
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index df653971272..1104617334f 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -66,8 +66,7 @@ static int debug;
66/* function prototypes */ 66/* function prototypes */
67static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port, 67static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port,
68 struct file *filp); 68 struct file *filp);
69static void omninet_close(struct tty_struct *tty, struct usb_serial_port *port, 69static void omninet_close(struct usb_serial_port *port);
70 struct file *filp);
71static void omninet_read_bulk_callback(struct urb *urb); 70static void omninet_read_bulk_callback(struct urb *urb);
72static void omninet_write_bulk_callback(struct urb *urb); 71static void omninet_write_bulk_callback(struct urb *urb);
73static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, 72static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
@@ -189,8 +188,7 @@ static int omninet_open(struct tty_struct *tty,
189 return result; 188 return result;
190} 189}
191 190
192static void omninet_close(struct tty_struct *tty, 191static void omninet_close(struct usb_serial_port *port)
193 struct usb_serial_port *port, struct file *filp)
194{ 192{
195 dbg("%s - port %d", __func__, port->number); 193 dbg("%s - port %d", __func__, port->number);
196 usb_kill_urb(port->read_urb); 194 usb_kill_urb(port->read_urb);
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index b500ad10b75..c20480aa975 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -173,8 +173,7 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port,
173 return result; 173 return result;
174} 174}
175 175
176static void opticon_close(struct tty_struct *tty, struct usb_serial_port *port, 176static void opticon_close(struct usb_serial_port *port)
177 struct file *filp)
178{ 177{
179 struct opticon_private *priv = usb_get_serial_data(port->serial); 178 struct opticon_private *priv = usb_get_serial_data(port->serial);
180 179
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7817b82889c..a16d69fadba 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -45,8 +45,9 @@
45/* Function prototypes */ 45/* Function prototypes */
46static int option_open(struct tty_struct *tty, struct usb_serial_port *port, 46static int option_open(struct tty_struct *tty, struct usb_serial_port *port,
47 struct file *filp); 47 struct file *filp);
48static void option_close(struct tty_struct *tty, struct usb_serial_port *port, 48static void option_close(struct usb_serial_port *port);
49 struct file *filp); 49static void option_dtr_rts(struct usb_serial_port *port, int on);
50
50static int option_startup(struct usb_serial *serial); 51static int option_startup(struct usb_serial *serial);
51static void option_shutdown(struct usb_serial *serial); 52static void option_shutdown(struct usb_serial *serial);
52static int option_write_room(struct tty_struct *tty); 53static int option_write_room(struct tty_struct *tty);
@@ -61,7 +62,7 @@ static void option_set_termios(struct tty_struct *tty,
61static int option_tiocmget(struct tty_struct *tty, struct file *file); 62static int option_tiocmget(struct tty_struct *tty, struct file *file);
62static int option_tiocmset(struct tty_struct *tty, struct file *file, 63static int option_tiocmset(struct tty_struct *tty, struct file *file,
63 unsigned int set, unsigned int clear); 64 unsigned int set, unsigned int clear);
64static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *port); 65static int option_send_setup(struct usb_serial_port *port);
65static int option_suspend(struct usb_serial *serial, pm_message_t message); 66static int option_suspend(struct usb_serial *serial, pm_message_t message);
66static int option_resume(struct usb_serial *serial); 67static int option_resume(struct usb_serial *serial);
67 68
@@ -551,6 +552,7 @@ static struct usb_serial_driver option_1port_device = {
551 .num_ports = 1, 552 .num_ports = 1,
552 .open = option_open, 553 .open = option_open,
553 .close = option_close, 554 .close = option_close,
555 .dtr_rts = option_dtr_rts,
554 .write = option_write, 556 .write = option_write,
555 .write_room = option_write_room, 557 .write_room = option_write_room,
556 .chars_in_buffer = option_chars_in_buffer, 558 .chars_in_buffer = option_chars_in_buffer,
@@ -630,7 +632,7 @@ static void option_set_termios(struct tty_struct *tty,
630 dbg("%s", __func__); 632 dbg("%s", __func__);
631 /* Doesn't support option setting */ 633 /* Doesn't support option setting */
632 tty_termios_copy_hw(tty->termios, old_termios); 634 tty_termios_copy_hw(tty->termios, old_termios);
633 option_send_setup(tty, port); 635 option_send_setup(port);
634} 636}
635 637
636static int option_tiocmget(struct tty_struct *tty, struct file *file) 638static int option_tiocmget(struct tty_struct *tty, struct file *file)
@@ -669,7 +671,7 @@ static int option_tiocmset(struct tty_struct *tty, struct file *file,
669 portdata->rts_state = 0; 671 portdata->rts_state = 0;
670 if (clear & TIOCM_DTR) 672 if (clear & TIOCM_DTR)
671 portdata->dtr_state = 0; 673 portdata->dtr_state = 0;
672 return option_send_setup(tty, port); 674 return option_send_setup(port);
673} 675}
674 676
675/* Write */ 677/* Write */
@@ -897,10 +899,6 @@ static int option_open(struct tty_struct *tty,
897 899
898 dbg("%s", __func__); 900 dbg("%s", __func__);
899 901
900 /* Set some sane defaults */
901 portdata->rts_state = 1;
902 portdata->dtr_state = 1;
903
904 /* Reset low level data toggle and start reading from endpoints */ 902 /* Reset low level data toggle and start reading from endpoints */
905 for (i = 0; i < N_IN_URB; i++) { 903 for (i = 0; i < N_IN_URB; i++) {
906 urb = portdata->in_urbs[i]; 904 urb = portdata->in_urbs[i];
@@ -936,37 +934,43 @@ static int option_open(struct tty_struct *tty,
936 usb_pipeout(urb->pipe), 0); */ 934 usb_pipeout(urb->pipe), 0); */
937 } 935 }
938 936
939 option_send_setup(tty, port); 937 option_send_setup(port);
940 938
941 return 0; 939 return 0;
942} 940}
943 941
944static void option_close(struct tty_struct *tty, 942static void option_dtr_rts(struct usb_serial_port *port, int on)
945 struct usb_serial_port *port, struct file *filp)
946{ 943{
947 int i;
948 struct usb_serial *serial = port->serial; 944 struct usb_serial *serial = port->serial;
949 struct option_port_private *portdata; 945 struct option_port_private *portdata;
950 946
951 dbg("%s", __func__); 947 dbg("%s", __func__);
952 portdata = usb_get_serial_port_data(port); 948 portdata = usb_get_serial_port_data(port);
949 mutex_lock(&serial->disc_mutex);
950 portdata->rts_state = on;
951 portdata->dtr_state = on;
952 if (serial->dev)
953 option_send_setup(port);
954 mutex_unlock(&serial->disc_mutex);
955}
953 956
954 portdata->rts_state = 0;
955 portdata->dtr_state = 0;
956 957
957 if (serial->dev) { 958static void option_close(struct usb_serial_port *port)
958 mutex_lock(&serial->disc_mutex); 959{
959 if (!serial->disconnected) 960 int i;
960 option_send_setup(tty, port); 961 struct usb_serial *serial = port->serial;
961 mutex_unlock(&serial->disc_mutex); 962 struct option_port_private *portdata;
963
964 dbg("%s", __func__);
965 portdata = usb_get_serial_port_data(port);
962 966
967 if (serial->dev) {
963 /* Stop reading/writing urbs */ 968 /* Stop reading/writing urbs */
964 for (i = 0; i < N_IN_URB; i++) 969 for (i = 0; i < N_IN_URB; i++)
965 usb_kill_urb(portdata->in_urbs[i]); 970 usb_kill_urb(portdata->in_urbs[i]);
966 for (i = 0; i < N_OUT_URB; i++) 971 for (i = 0; i < N_OUT_URB; i++)
967 usb_kill_urb(portdata->out_urbs[i]); 972 usb_kill_urb(portdata->out_urbs[i]);
968 } 973 }
969 tty_port_tty_set(&port->port, NULL);
970} 974}
971 975
972/* Helper functions used by option_setup_urbs */ 976/* Helper functions used by option_setup_urbs */
@@ -1032,28 +1036,24 @@ static void option_setup_urbs(struct usb_serial *serial)
1032 * This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN 1036 * This is exactly the same as SET_CONTROL_LINE_STATE from the PSTN
1033 * CDC. 1037 * CDC.
1034*/ 1038*/
1035static int option_send_setup(struct tty_struct *tty, 1039static int option_send_setup(struct usb_serial_port *port)
1036 struct usb_serial_port *port)
1037{ 1040{
1038 struct usb_serial *serial = port->serial; 1041 struct usb_serial *serial = port->serial;
1039 struct option_port_private *portdata; 1042 struct option_port_private *portdata;
1040 int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; 1043 int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
1044 int val = 0;
1041 dbg("%s", __func__); 1045 dbg("%s", __func__);
1042 1046
1043 portdata = usb_get_serial_port_data(port); 1047 portdata = usb_get_serial_port_data(port);
1044 1048
1045 if (tty) { 1049 if (portdata->dtr_state)
1046 int val = 0; 1050 val |= 0x01;
1047 if (portdata->dtr_state) 1051 if (portdata->rts_state)
1048 val |= 0x01; 1052 val |= 0x02;
1049 if (portdata->rts_state)
1050 val |= 0x02;
1051 1053
1052 return usb_control_msg(serial->dev, 1054 return usb_control_msg(serial->dev,
1053 usb_rcvctrlpipe(serial->dev, 0), 1055 usb_rcvctrlpipe(serial->dev, 0),
1054 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT); 1056 0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT);
1055 }
1056 return 0;
1057} 1057}
1058 1058
1059static int option_startup(struct usb_serial *serial) 1059static int option_startup(struct usb_serial *serial)
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index ba551f00f16..7de54781fe6 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -143,8 +143,7 @@ struct oti6858_control_pkt {
143/* function prototypes */ 143/* function prototypes */
144static int oti6858_open(struct tty_struct *tty, 144static int oti6858_open(struct tty_struct *tty,
145 struct usb_serial_port *port, struct file *filp); 145 struct usb_serial_port *port, struct file *filp);
146static void oti6858_close(struct tty_struct *tty, 146static void oti6858_close(struct usb_serial_port *port);
147 struct usb_serial_port *port, struct file *filp);
148static void oti6858_set_termios(struct tty_struct *tty, 147static void oti6858_set_termios(struct tty_struct *tty,
149 struct usb_serial_port *port, struct ktermios *old); 148 struct usb_serial_port *port, struct ktermios *old);
150static int oti6858_ioctl(struct tty_struct *tty, struct file *file, 149static int oti6858_ioctl(struct tty_struct *tty, struct file *file,
@@ -622,67 +621,30 @@ static int oti6858_open(struct tty_struct *tty,
622 if (result != 0) { 621 if (result != 0) {
623 dev_err(&port->dev, "%s(): usb_submit_urb() failed" 622 dev_err(&port->dev, "%s(): usb_submit_urb() failed"
624 " with error %d\n", __func__, result); 623 " with error %d\n", __func__, result);
625 oti6858_close(tty, port, NULL); 624 oti6858_close(port);
626 return -EPROTO; 625 return -EPROTO;
627 } 626 }
628 627
629 /* setup termios */ 628 /* setup termios */
630 if (tty) 629 if (tty)
631 oti6858_set_termios(tty, port, &tmp_termios); 630 oti6858_set_termios(tty, port, &tmp_termios);
632 631 port->port.drain_delay = 256; /* FIXME: check the FIFO length */
633 return 0; 632 return 0;
634} 633}
635 634
636static void oti6858_close(struct tty_struct *tty, 635static void oti6858_close(struct usb_serial_port *port)
637 struct usb_serial_port *port, struct file *filp)
638{ 636{
639 struct oti6858_private *priv = usb_get_serial_port_data(port); 637 struct oti6858_private *priv = usb_get_serial_port_data(port);
640 unsigned long flags; 638 unsigned long flags;
641 long timeout;
642 wait_queue_t wait;
643 639
644 dbg("%s(port = %d)", __func__, port->number); 640 dbg("%s(port = %d)", __func__, port->number);
645 641
646 /* wait for data to drain from the buffer */
647 spin_lock_irqsave(&priv->lock, flags); 642 spin_lock_irqsave(&priv->lock, flags);
648 timeout = 30 * HZ; /* PL2303_CLOSING_WAIT */
649 init_waitqueue_entry(&wait, current);
650 add_wait_queue(&tty->write_wait, &wait);
651 dbg("%s(): entering wait loop", __func__);
652 for (;;) {
653 set_current_state(TASK_INTERRUPTIBLE);
654 if (oti6858_buf_data_avail(priv->buf) == 0
655 || timeout == 0 || signal_pending(current)
656 || port->serial->disconnected)
657 break;
658 spin_unlock_irqrestore(&priv->lock, flags);
659 timeout = schedule_timeout(timeout);
660 spin_lock_irqsave(&priv->lock, flags);
661 }
662 set_current_state(TASK_RUNNING);
663 remove_wait_queue(&tty->write_wait, &wait);
664 dbg("%s(): after wait loop", __func__);
665
666 /* clear out any remaining data in the buffer */ 643 /* clear out any remaining data in the buffer */
667 oti6858_buf_clear(priv->buf); 644 oti6858_buf_clear(priv->buf);
668 spin_unlock_irqrestore(&priv->lock, flags); 645 spin_unlock_irqrestore(&priv->lock, flags);
669 646
670 /* wait for characters to drain from the device */ 647 dbg("%s(): after buf_clear()", __func__);
671 /* (this is long enough for the entire 256 byte */
672 /* pl2303 hardware buffer to drain with no flow */
673 /* control for data rates of 1200 bps or more, */
674 /* for lower rates we should really know how much */
675 /* data is in the buffer to compute a delay */
676 /* that is not unnecessarily long) */
677 /* FIXME
678 bps = tty_get_baud_rate(tty);
679 if (bps > 1200)
680 timeout = max((HZ*2560)/bps,HZ/10);
681 else
682 */
683 timeout = 2*HZ;
684 schedule_timeout_interruptible(timeout);
685 dbg("%s(): after schedule_timeout_interruptible()", __func__);
686 648
687 /* cancel scheduled setup */ 649 /* cancel scheduled setup */
688 cancel_delayed_work(&priv->delayed_setup_work); 650 cancel_delayed_work(&priv->delayed_setup_work);
@@ -694,15 +656,6 @@ static void oti6858_close(struct tty_struct *tty,
694 usb_kill_urb(port->write_urb); 656 usb_kill_urb(port->write_urb);
695 usb_kill_urb(port->read_urb); 657 usb_kill_urb(port->read_urb);
696 usb_kill_urb(port->interrupt_in_urb); 658 usb_kill_urb(port->interrupt_in_urb);
697
698 /*
699 if (tty && (tty->termios->c_cflag) & HUPCL) {
700 // drop DTR and RTS
701 spin_lock_irqsave(&priv->lock, flags);
702 priv->pending_setup.control &= ~CONTROL_MASK;
703 spin_unlock_irqrestore(&priv->lock, flags);
704 }
705 */
706} 659}
707 660
708static int oti6858_tiocmset(struct tty_struct *tty, struct file *file, 661static int oti6858_tiocmset(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 751a533a434..e02dc3d643c 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -652,69 +652,41 @@ static void pl2303_set_termios(struct tty_struct *tty,
652 kfree(buf); 652 kfree(buf);
653} 653}
654 654
655static void pl2303_close(struct tty_struct *tty, 655static void pl2303_dtr_rts(struct usb_serial_port *port, int on)
656 struct usb_serial_port *port, struct file *filp) 656{
657 struct pl2303_private *priv = usb_get_serial_port_data(port);
658 unsigned long flags;
659 u8 control;
660
661 spin_lock_irqsave(&priv->lock, flags);
662 /* Change DTR and RTS */
663 if (on)
664 priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
665 else
666 priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
667 control = priv->line_control;
668 spin_unlock_irqrestore(&priv->lock, flags);
669 set_control_lines(port->serial->dev, control);
670}
671
672static void pl2303_close(struct usb_serial_port *port)
657{ 673{
658 struct pl2303_private *priv = usb_get_serial_port_data(port); 674 struct pl2303_private *priv = usb_get_serial_port_data(port);
659 unsigned long flags; 675 unsigned long flags;
660 unsigned int c_cflag;
661 int bps;
662 long timeout;
663 wait_queue_t wait;
664 676
665 dbg("%s - port %d", __func__, port->number); 677 dbg("%s - port %d", __func__, port->number);
666 678
667 /* wait for data to drain from the buffer */
668 spin_lock_irqsave(&priv->lock, flags); 679 spin_lock_irqsave(&priv->lock, flags);
669 timeout = PL2303_CLOSING_WAIT;
670 init_waitqueue_entry(&wait, current);
671 add_wait_queue(&tty->write_wait, &wait);
672 for (;;) {
673 set_current_state(TASK_INTERRUPTIBLE);
674 if (pl2303_buf_data_avail(priv->buf) == 0 ||
675 timeout == 0 || signal_pending(current) ||
676 port->serial->disconnected)
677 break;
678 spin_unlock_irqrestore(&priv->lock, flags);
679 timeout = schedule_timeout(timeout);
680 spin_lock_irqsave(&priv->lock, flags);
681 }
682 set_current_state(TASK_RUNNING);
683 remove_wait_queue(&tty->write_wait, &wait);
684 /* clear out any remaining data in the buffer */ 680 /* clear out any remaining data in the buffer */
685 pl2303_buf_clear(priv->buf); 681 pl2303_buf_clear(priv->buf);
686 spin_unlock_irqrestore(&priv->lock, flags); 682 spin_unlock_irqrestore(&priv->lock, flags);
687 683
688 /* wait for characters to drain from the device */
689 /* (this is long enough for the entire 256 byte */
690 /* pl2303 hardware buffer to drain with no flow */
691 /* control for data rates of 1200 bps or more, */
692 /* for lower rates we should really know how much */
693 /* data is in the buffer to compute a delay */
694 /* that is not unnecessarily long) */
695 bps = tty_get_baud_rate(tty);
696 if (bps > 1200)
697 timeout = max((HZ*2560)/bps, HZ/10);
698 else
699 timeout = 2*HZ;
700 schedule_timeout_interruptible(timeout);
701
702 /* shutdown our urbs */ 684 /* shutdown our urbs */
703 dbg("%s - shutting down urbs", __func__); 685 dbg("%s - shutting down urbs", __func__);
704 usb_kill_urb(port->write_urb); 686 usb_kill_urb(port->write_urb);
705 usb_kill_urb(port->read_urb); 687 usb_kill_urb(port->read_urb);
706 usb_kill_urb(port->interrupt_in_urb); 688 usb_kill_urb(port->interrupt_in_urb);
707 689
708 if (tty) {
709 c_cflag = tty->termios->c_cflag;
710 if (c_cflag & HUPCL) {
711 /* drop DTR and RTS */
712 spin_lock_irqsave(&priv->lock, flags);
713 priv->line_control = 0;
714 spin_unlock_irqrestore(&priv->lock, flags);
715 set_control_lines(port->serial->dev, 0);
716 }
717 }
718} 690}
719 691
720static int pl2303_open(struct tty_struct *tty, 692static int pl2303_open(struct tty_struct *tty,
@@ -748,7 +720,7 @@ static int pl2303_open(struct tty_struct *tty,
748 if (result) { 720 if (result) {
749 dev_err(&port->dev, "%s - failed submitting read urb," 721 dev_err(&port->dev, "%s - failed submitting read urb,"
750 " error %d\n", __func__, result); 722 " error %d\n", __func__, result);
751 pl2303_close(tty, port, NULL); 723 pl2303_close(port);
752 return -EPROTO; 724 return -EPROTO;
753 } 725 }
754 726
@@ -758,9 +730,10 @@ static int pl2303_open(struct tty_struct *tty,
758 if (result) { 730 if (result) {
759 dev_err(&port->dev, "%s - failed submitting interrupt urb," 731 dev_err(&port->dev, "%s - failed submitting interrupt urb,"
760 " error %d\n", __func__, result); 732 " error %d\n", __func__, result);
761 pl2303_close(tty, port, NULL); 733 pl2303_close(port);
762 return -EPROTO; 734 return -EPROTO;
763 } 735 }
736 port->port.drain_delay = 256;
764 return 0; 737 return 0;
765} 738}
766 739
@@ -821,6 +794,14 @@ static int pl2303_tiocmget(struct tty_struct *tty, struct file *file)
821 return result; 794 return result;
822} 795}
823 796
797static int pl2303_carrier_raised(struct usb_serial_port *port)
798{
799 struct pl2303_private *priv = usb_get_serial_port_data(port);
800 if (priv->line_status & UART_DCD)
801 return 1;
802 return 0;
803}
804
824static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) 805static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
825{ 806{
826 struct pl2303_private *priv = usb_get_serial_port_data(port); 807 struct pl2303_private *priv = usb_get_serial_port_data(port);
@@ -1125,6 +1106,8 @@ static struct usb_serial_driver pl2303_device = {
1125 .num_ports = 1, 1106 .num_ports = 1,
1126 .open = pl2303_open, 1107 .open = pl2303_open,
1127 .close = pl2303_close, 1108 .close = pl2303_close,
1109 .dtr_rts = pl2303_dtr_rts,
1110 .carrier_raised = pl2303_carrier_raised,
1128 .write = pl2303_write, 1111 .write = pl2303_write,
1129 .ioctl = pl2303_ioctl, 1112 .ioctl = pl2303_ioctl,
1130 .break_ctl = pl2303_break_ctl, 1113 .break_ctl = pl2303_break_ctl,
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 913225c6161..17ac34f4d66 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -26,12 +26,10 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/usb.h> 27#include <linux/usb.h>
28#include <linux/usb/serial.h> 28#include <linux/usb/serial.h>
29#include <linux/usb/ch9.h>
30 29
31#define SWIMS_USB_REQUEST_SetPower 0x00 30#define SWIMS_USB_REQUEST_SetPower 0x00
32#define SWIMS_USB_REQUEST_SetNmea 0x07 31#define SWIMS_USB_REQUEST_SetNmea 0x07
33 32
34/* per port private data */
35#define N_IN_URB 4 33#define N_IN_URB 4
36#define N_OUT_URB 4 34#define N_OUT_URB 4
37#define IN_BUFLEN 4096 35#define IN_BUFLEN 4096
@@ -39,6 +37,12 @@
39static int debug; 37static int debug;
40static int nmea; 38static int nmea;
41 39
40/* Used in interface blacklisting */
41struct sierra_iface_info {
42 const u32 infolen; /* number of interface numbers on blacklist */
43 const u8 *ifaceinfo; /* pointer to the array holding the numbers */
44};
45
42static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) 46static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
43{ 47{
44 int result; 48 int result;
@@ -85,6 +89,23 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
85 return result; 89 return result;
86} 90}
87 91
92static int is_blacklisted(const u8 ifnum,
93 const struct sierra_iface_info *blacklist)
94{
95 const u8 *info;
96 int i;
97
98 if (blacklist) {
99 info = blacklist->ifaceinfo;
100
101 for (i = 0; i < blacklist->infolen; i++) {
102 if (info[i] == ifnum)
103 return 1;
104 }
105 }
106 return 0;
107}
108
88static int sierra_calc_interface(struct usb_serial *serial) 109static int sierra_calc_interface(struct usb_serial *serial)
89{ 110{
90 int interface; 111 int interface;
@@ -153,9 +174,25 @@ static int sierra_probe(struct usb_serial *serial,
153 */ 174 */
154 usb_set_serial_data(serial, (void *)num_ports); 175 usb_set_serial_data(serial, (void *)num_ports);
155 176
177 /* ifnum could have changed - by calling usb_set_interface */
178 ifnum = sierra_calc_interface(serial);
179
180 if (is_blacklisted(ifnum,
181 (struct sierra_iface_info *)id->driver_info)) {
182 dev_dbg(&serial->dev->dev,
183 "Ignoring blacklisted interface #%d\n", ifnum);
184 return -ENODEV;
185 }
186
156 return result; 187 return result;
157} 188}
158 189
190static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 };
191static const struct sierra_iface_info direct_ip_interface_blacklist = {
192 .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
193 .ifaceinfo = direct_ip_non_serial_ifaces,
194};
195
159static struct usb_device_id id_table [] = { 196static struct usb_device_id id_table [] = {
160 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ 197 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
161 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ 198 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
@@ -188,9 +225,11 @@ static struct usb_device_id id_table [] = {
188 { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ 225 { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
189 { USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */ 226 { USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */
190 { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ 227 { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
191 { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */ 228 /* Sierra Wireless MC8790, MC8791, MC8792 Composite */
192 { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */ 229 { USB_DEVICE(0x1199, 0x683C) },
193 { USB_DEVICE(0x1199, 0x683E) }, /* Sierra Wireless MC8790 */ 230 { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8791 Composite */
231 /* Sierra Wireless MC8790, MC8791, MC8792 */
232 { USB_DEVICE(0x1199, 0x683E) },
194 { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ 233 { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */
195 { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ 234 { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */
196 { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ 235 { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */
@@ -211,6 +250,10 @@ static struct usb_device_id id_table [] = {
211 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ 250 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
212 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ 251 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
213 252
253 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
254 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
255 },
256
214 { } 257 { }
215}; 258};
216MODULE_DEVICE_TABLE(usb, id_table); 259MODULE_DEVICE_TABLE(usb, id_table);
@@ -229,7 +272,6 @@ struct sierra_port_private {
229 272
230 /* Input endpoints and buffers for this port */ 273 /* Input endpoints and buffers for this port */
231 struct urb *in_urbs[N_IN_URB]; 274 struct urb *in_urbs[N_IN_URB];
232 char *in_buffer[N_IN_URB];
233 275
234 /* Settings for the port */ 276 /* Settings for the port */
235 int rts_state; /* Handshaking pins (outputs) */ 277 int rts_state; /* Handshaking pins (outputs) */
@@ -240,57 +282,50 @@ struct sierra_port_private {
240 int ri_state; 282 int ri_state;
241}; 283};
242 284
243static int sierra_send_setup(struct tty_struct *tty, 285static int sierra_send_setup(struct usb_serial_port *port)
244 struct usb_serial_port *port)
245{ 286{
246 struct usb_serial *serial = port->serial; 287 struct usb_serial *serial = port->serial;
247 struct sierra_port_private *portdata; 288 struct sierra_port_private *portdata;
248 __u16 interface = 0; 289 __u16 interface = 0;
290 int val = 0;
249 291
250 dev_dbg(&port->dev, "%s", __func__); 292 dev_dbg(&port->dev, "%s", __func__);
251 293
252 portdata = usb_get_serial_port_data(port); 294 portdata = usb_get_serial_port_data(port);
253 295
254 if (tty) { 296 if (portdata->dtr_state)
255 int val = 0; 297 val |= 0x01;
256 if (portdata->dtr_state) 298 if (portdata->rts_state)
257 val |= 0x01; 299 val |= 0x02;
258 if (portdata->rts_state)
259 val |= 0x02;
260
261 /* If composite device then properly report interface */
262 if (serial->num_ports == 1) {
263 interface = sierra_calc_interface(serial);
264
265 /* Control message is sent only to interfaces with
266 * interrupt_in endpoints
267 */
268 if (port->interrupt_in_urb) {
269 /* send control message */
270 return usb_control_msg(serial->dev,
271 usb_rcvctrlpipe(serial->dev, 0),
272 0x22, 0x21, val, interface,
273 NULL, 0, USB_CTRL_SET_TIMEOUT);
274 }
275 }
276
277 /* Otherwise the need to do non-composite mapping */
278 else {
279 if (port->bulk_out_endpointAddress == 2)
280 interface = 0;
281 else if (port->bulk_out_endpointAddress == 4)
282 interface = 1;
283 else if (port->bulk_out_endpointAddress == 5)
284 interface = 2;
285 300
301 /* If composite device then properly report interface */
302 if (serial->num_ports == 1) {
303 interface = sierra_calc_interface(serial);
304 /* Control message is sent only to interfaces with
305 * interrupt_in endpoints
306 */
307 if (port->interrupt_in_urb) {
308 /* send control message */
286 return usb_control_msg(serial->dev, 309 return usb_control_msg(serial->dev,
287 usb_rcvctrlpipe(serial->dev, 0), 310 usb_rcvctrlpipe(serial->dev, 0),
288 0x22, 0x21, val, interface, 311 0x22, 0x21, val, interface,
289 NULL, 0, USB_CTRL_SET_TIMEOUT); 312 NULL, 0, USB_CTRL_SET_TIMEOUT);
290
291 } 313 }
292 } 314 }
293 315
316 /* Otherwise the need to do non-composite mapping */
317 else {
318 if (port->bulk_out_endpointAddress == 2)
319 interface = 0;
320 else if (port->bulk_out_endpointAddress == 4)
321 interface = 1;
322 else if (port->bulk_out_endpointAddress == 5)
323 interface = 2;
324 return usb_control_msg(serial->dev,
325 usb_rcvctrlpipe(serial->dev, 0),
326 0x22, 0x21, val, interface,
327 NULL, 0, USB_CTRL_SET_TIMEOUT);
328 }
294 return 0; 329 return 0;
295} 330}
296 331
@@ -299,7 +334,7 @@ static void sierra_set_termios(struct tty_struct *tty,
299{ 334{
300 dev_dbg(&port->dev, "%s", __func__); 335 dev_dbg(&port->dev, "%s", __func__);
301 tty_termios_copy_hw(tty->termios, old_termios); 336 tty_termios_copy_hw(tty->termios, old_termios);
302 sierra_send_setup(tty, port); 337 sierra_send_setup(port);
303} 338}
304 339
305static int sierra_tiocmget(struct tty_struct *tty, struct file *file) 340static int sierra_tiocmget(struct tty_struct *tty, struct file *file)
@@ -338,7 +373,18 @@ static int sierra_tiocmset(struct tty_struct *tty, struct file *file,
338 portdata->rts_state = 0; 373 portdata->rts_state = 0;
339 if (clear & TIOCM_DTR) 374 if (clear & TIOCM_DTR)
340 portdata->dtr_state = 0; 375 portdata->dtr_state = 0;
341 return sierra_send_setup(tty, port); 376 return sierra_send_setup(port);
377}
378
379static void sierra_release_urb(struct urb *urb)
380{
381 struct usb_serial_port *port;
382 if (urb) {
383 port = urb->context;
384 dev_dbg(&port->dev, "%s: %p\n", __func__, urb);
385 kfree(urb->transfer_buffer);
386 usb_free_urb(urb);
387 }
342} 388}
343 389
344static void sierra_outdat_callback(struct urb *urb) 390static void sierra_outdat_callback(struct urb *urb)
@@ -465,7 +511,7 @@ static void sierra_indat_callback(struct urb *urb)
465 " received", __func__); 511 " received", __func__);
466 512
467 /* Resubmit urb so we continue receiving */ 513 /* Resubmit urb so we continue receiving */
468 if (port->port.count && status != -ESHUTDOWN) { 514 if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
469 err = usb_submit_urb(urb, GFP_ATOMIC); 515 err = usb_submit_urb(urb, GFP_ATOMIC);
470 if (err) 516 if (err)
471 dev_err(&port->dev, "resubmit read urb failed." 517 dev_err(&port->dev, "resubmit read urb failed."
@@ -557,67 +603,99 @@ static int sierra_write_room(struct tty_struct *tty)
557 return 2048; 603 return 2048;
558} 604}
559 605
560static int sierra_open(struct tty_struct *tty, 606static void sierra_stop_rx_urbs(struct usb_serial_port *port)
561 struct usb_serial_port *port, struct file *filp)
562{ 607{
563 struct sierra_port_private *portdata;
564 struct usb_serial *serial = port->serial;
565 int i; 608 int i;
566 struct urb *urb; 609 struct sierra_port_private *portdata = usb_get_serial_port_data(port);
567 int result;
568 610
569 portdata = usb_get_serial_port_data(port); 611 for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++)
612 usb_kill_urb(portdata->in_urbs[i]);
570 613
571 dev_dbg(&port->dev, "%s", __func__); 614 usb_kill_urb(port->interrupt_in_urb);
615}
572 616
573 /* Set some sane defaults */ 617static int sierra_submit_rx_urbs(struct usb_serial_port *port, gfp_t mem_flags)
574 portdata->rts_state = 1; 618{
575 portdata->dtr_state = 1; 619 int ok_cnt;
620 int err = -EINVAL;
621 int i;
622 struct urb *urb;
623 struct sierra_port_private *portdata = usb_get_serial_port_data(port);
576 624
577 /* Reset low level data toggle and start reading from endpoints */ 625 ok_cnt = 0;
578 for (i = 0; i < N_IN_URB; i++) { 626 for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) {
579 urb = portdata->in_urbs[i]; 627 urb = portdata->in_urbs[i];
580 if (!urb) 628 if (!urb)
581 continue; 629 continue;
582 if (urb->dev != serial->dev) { 630 err = usb_submit_urb(urb, mem_flags);
583 dev_dbg(&port->dev, "%s: dev %p != %p", 631 if (err) {
584 __func__, urb->dev, serial->dev); 632 dev_err(&port->dev, "%s: submit urb failed: %d\n",
585 continue; 633 __func__, err);
634 } else {
635 ok_cnt++;
586 } 636 }
637 }
587 638
588 /* 639 if (ok_cnt && port->interrupt_in_urb) {
589 * make sure endpoint data toggle is synchronized with the 640 err = usb_submit_urb(port->interrupt_in_urb, mem_flags);
590 * device 641 if (err) {
591 */ 642 dev_err(&port->dev, "%s: submit intr urb failed: %d\n",
592 usb_clear_halt(urb->dev, urb->pipe); 643 __func__, err);
593
594 result = usb_submit_urb(urb, GFP_KERNEL);
595 if (result) {
596 dev_err(&port->dev, "submit urb %d failed (%d) %d\n",
597 i, result, urb->transfer_buffer_length);
598 } 644 }
599 } 645 }
600 646
601 sierra_send_setup(tty, port); 647 if (ok_cnt > 0) /* at least one rx urb submitted */
648 return 0;
649 else
650 return err;
651}
652
653static struct urb *sierra_setup_urb(struct usb_serial *serial, int endpoint,
654 int dir, void *ctx, int len,
655 gfp_t mem_flags,
656 usb_complete_t callback)
657{
658 struct urb *urb;
659 u8 *buf;
660
661 if (endpoint == -1)
662 return NULL;
602 663
603 /* start up the interrupt endpoint if we have one */ 664 urb = usb_alloc_urb(0, mem_flags);
604 if (port->interrupt_in_urb) { 665 if (urb == NULL) {
605 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 666 dev_dbg(&serial->dev->dev, "%s: alloc for endpoint %d failed\n",
606 if (result) 667 __func__, endpoint);
607 dev_err(&port->dev, "submit irq_in urb failed %d\n", 668 return NULL;
608 result);
609 } 669 }
610 return 0; 670
671 buf = kmalloc(len, mem_flags);
672 if (buf) {
673 /* Fill URB using supplied data */
674 usb_fill_bulk_urb(urb, serial->dev,
675 usb_sndbulkpipe(serial->dev, endpoint) | dir,
676 buf, len, callback, ctx);
677
678 /* debug */
679 dev_dbg(&serial->dev->dev, "%s %c u : %p d:%p\n", __func__,
680 dir == USB_DIR_IN ? 'i' : 'o', urb, buf);
681 } else {
682 dev_dbg(&serial->dev->dev, "%s %c u:%p d:%p\n", __func__,
683 dir == USB_DIR_IN ? 'i' : 'o', urb, buf);
684
685 sierra_release_urb(urb);
686 urb = NULL;
687 }
688
689 return urb;
611} 690}
612 691
613static void sierra_close(struct tty_struct *tty, 692static void sierra_close(struct usb_serial_port *port)
614 struct usb_serial_port *port, struct file *filp)
615{ 693{
616 int i; 694 int i;
617 struct usb_serial *serial = port->serial; 695 struct usb_serial *serial = port->serial;
618 struct sierra_port_private *portdata; 696 struct sierra_port_private *portdata;
619 697
620 dev_dbg(&port->dev, "%s", __func__); 698 dev_dbg(&port->dev, "%s\n", __func__);
621 portdata = usb_get_serial_port_data(port); 699 portdata = usb_get_serial_port_data(port);
622 700
623 portdata->rts_state = 0; 701 portdata->rts_state = 0;
@@ -626,25 +704,83 @@ static void sierra_close(struct tty_struct *tty,
626 if (serial->dev) { 704 if (serial->dev) {
627 mutex_lock(&serial->disc_mutex); 705 mutex_lock(&serial->disc_mutex);
628 if (!serial->disconnected) 706 if (!serial->disconnected)
629 sierra_send_setup(tty, port); 707 sierra_send_setup(port);
630 mutex_unlock(&serial->disc_mutex); 708 mutex_unlock(&serial->disc_mutex);
631 709
632 /* Stop reading/writing urbs */ 710 /* Stop reading urbs */
633 for (i = 0; i < N_IN_URB; i++) 711 sierra_stop_rx_urbs(port);
634 usb_kill_urb(portdata->in_urbs[i]); 712 /* .. and release them */
713 for (i = 0; i < N_IN_URB; i++) {
714 sierra_release_urb(portdata->in_urbs[i]);
715 portdata->in_urbs[i] = NULL;
716 }
635 } 717 }
718}
636 719
637 usb_kill_urb(port->interrupt_in_urb); 720static int sierra_open(struct tty_struct *tty,
638 tty_port_tty_set(&port->port, NULL); 721 struct usb_serial_port *port, struct file *filp)
722{
723 struct sierra_port_private *portdata;
724 struct usb_serial *serial = port->serial;
725 int i;
726 int err;
727 int endpoint;
728 struct urb *urb;
729
730 portdata = usb_get_serial_port_data(port);
731
732 dev_dbg(&port->dev, "%s", __func__);
733
734 /* Set some sane defaults */
735 portdata->rts_state = 1;
736 portdata->dtr_state = 1;
737
738
739 endpoint = port->bulk_in_endpointAddress;
740 for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) {
741 urb = sierra_setup_urb(serial, endpoint, USB_DIR_IN, port,
742 IN_BUFLEN, GFP_KERNEL,
743 sierra_indat_callback);
744 portdata->in_urbs[i] = urb;
745 }
746 /* clear halt condition */
747 usb_clear_halt(serial->dev,
748 usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN);
749
750 err = sierra_submit_rx_urbs(port, GFP_KERNEL);
751 if (err) {
752 /* get rid of everything as in close */
753 sierra_close(port);
754 return err;
755 }
756 sierra_send_setup(port);
757
758 return 0;
759}
760
761
762static void sierra_dtr_rts(struct usb_serial_port *port, int on)
763{
764 struct usb_serial *serial = port->serial;
765 struct sierra_port_private *portdata;
766
767 portdata = usb_get_serial_port_data(port);
768 portdata->rts_state = on;
769 portdata->dtr_state = on;
770
771 if (serial->dev) {
772 mutex_lock(&serial->disc_mutex);
773 if (!serial->disconnected)
774 sierra_send_setup(port);
775 mutex_unlock(&serial->disc_mutex);
776 }
639} 777}
640 778
641static int sierra_startup(struct usb_serial *serial) 779static int sierra_startup(struct usb_serial *serial)
642{ 780{
643 struct usb_serial_port *port; 781 struct usb_serial_port *port;
644 struct sierra_port_private *portdata; 782 struct sierra_port_private *portdata;
645 struct urb *urb;
646 int i; 783 int i;
647 int j;
648 784
649 dev_dbg(&serial->dev->dev, "%s", __func__); 785 dev_dbg(&serial->dev->dev, "%s", __func__);
650 786
@@ -666,34 +802,8 @@ static int sierra_startup(struct usb_serial *serial)
666 return -ENOMEM; 802 return -ENOMEM;
667 } 803 }
668 spin_lock_init(&portdata->lock); 804 spin_lock_init(&portdata->lock);
669 for (j = 0; j < N_IN_URB; j++) { 805 /* Set the port private data pointer */
670 portdata->in_buffer[j] = kmalloc(IN_BUFLEN, GFP_KERNEL);
671 if (!portdata->in_buffer[j]) {
672 for (--j; j >= 0; j--)
673 kfree(portdata->in_buffer[j]);
674 kfree(portdata);
675 return -ENOMEM;
676 }
677 }
678
679 usb_set_serial_port_data(port, portdata); 806 usb_set_serial_port_data(port, portdata);
680
681 /* initialize the in urbs */
682 for (j = 0; j < N_IN_URB; ++j) {
683 urb = usb_alloc_urb(0, GFP_KERNEL);
684 if (urb == NULL) {
685 dev_dbg(&port->dev, "%s: alloc for in "
686 "port failed.", __func__);
687 continue;
688 }
689 /* Fill URB using supplied data. */
690 usb_fill_bulk_urb(urb, serial->dev,
691 usb_rcvbulkpipe(serial->dev,
692 port->bulk_in_endpointAddress),
693 portdata->in_buffer[j], IN_BUFLEN,
694 sierra_indat_callback, port);
695 portdata->in_urbs[j] = urb;
696 }
697 } 807 }
698 808
699 return 0; 809 return 0;
@@ -701,7 +811,7 @@ static int sierra_startup(struct usb_serial *serial)
701 811
702static void sierra_shutdown(struct usb_serial *serial) 812static void sierra_shutdown(struct usb_serial *serial)
703{ 813{
704 int i, j; 814 int i;
705 struct usb_serial_port *port; 815 struct usb_serial_port *port;
706 struct sierra_port_private *portdata; 816 struct sierra_port_private *portdata;
707 817
@@ -714,12 +824,6 @@ static void sierra_shutdown(struct usb_serial *serial)
714 portdata = usb_get_serial_port_data(port); 824 portdata = usb_get_serial_port_data(port);
715 if (!portdata) 825 if (!portdata)
716 continue; 826 continue;
717
718 for (j = 0; j < N_IN_URB; j++) {
719 usb_kill_urb(portdata->in_urbs[j]);
720 usb_free_urb(portdata->in_urbs[j]);
721 kfree(portdata->in_buffer[j]);
722 }
723 kfree(portdata); 827 kfree(portdata);
724 usb_set_serial_port_data(port, NULL); 828 usb_set_serial_port_data(port, NULL);
725 } 829 }
@@ -737,6 +841,7 @@ static struct usb_serial_driver sierra_device = {
737 .probe = sierra_probe, 841 .probe = sierra_probe,
738 .open = sierra_open, 842 .open = sierra_open,
739 .close = sierra_close, 843 .close = sierra_close,
844 .dtr_rts = sierra_dtr_rts,
740 .write = sierra_write, 845 .write = sierra_write,
741 .write_room = sierra_write_room, 846 .write_room = sierra_write_room,
742 .set_termios = sierra_set_termios, 847 .set_termios = sierra_set_termios,
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 5e7528cc81a..8f7ed8f1399 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -446,66 +446,47 @@ static void spcp8x5_set_workMode(struct usb_device *dev, u16 value,
446 "RTSCTS usb_control_msg(enable flowctrl) = %d\n", ret); 446 "RTSCTS usb_control_msg(enable flowctrl) = %d\n", ret);
447} 447}
448 448
449static int spcp8x5_carrier_raised(struct usb_serial_port *port)
450{
451 struct spcp8x5_private *priv = usb_get_serial_port_data(port);
452 if (priv->line_status & MSR_STATUS_LINE_DCD)
453 return 1;
454 return 0;
455}
456
457static void spcp8x5_dtr_rts(struct usb_serial_port *port, int on)
458{
459 struct spcp8x5_private *priv = usb_get_serial_port_data(port);
460 unsigned long flags;
461 u8 control;
462
463 spin_lock_irqsave(&priv->lock, flags);
464 if (on)
465 priv->line_control = MCR_CONTROL_LINE_DTR
466 | MCR_CONTROL_LINE_RTS;
467 else
468 priv->line_control &= ~ (MCR_CONTROL_LINE_DTR
469 | MCR_CONTROL_LINE_RTS);
470 control = priv->line_control;
471 spin_unlock_irqrestore(&priv->lock, flags);
472 spcp8x5_set_ctrlLine(port->serial->dev, control , priv->type);
473}
474
449/* close the serial port. We should wait for data sending to device 1st and 475/* close the serial port. We should wait for data sending to device 1st and
450 * then kill all urb. */ 476 * then kill all urb. */
451static void spcp8x5_close(struct tty_struct *tty, 477static void spcp8x5_close(struct usb_serial_port *port)
452 struct usb_serial_port *port, struct file *filp)
453{ 478{
454 struct spcp8x5_private *priv = usb_get_serial_port_data(port); 479 struct spcp8x5_private *priv = usb_get_serial_port_data(port);
455 unsigned long flags; 480 unsigned long flags;
456 unsigned int c_cflag;
457 int bps;
458 long timeout;
459 wait_queue_t wait;
460 int result; 481 int result;
461 482
462 dbg("%s - port %d", __func__, port->number); 483 dbg("%s - port %d", __func__, port->number);
463 484
464 /* wait for data to drain from the buffer */
465 spin_lock_irqsave(&priv->lock, flags); 485 spin_lock_irqsave(&priv->lock, flags);
466 timeout = SPCP8x5_CLOSING_WAIT;
467 init_waitqueue_entry(&wait, current);
468 add_wait_queue(&tty->write_wait, &wait);
469 for (;;) {
470 set_current_state(TASK_INTERRUPTIBLE);
471 if (ringbuf_avail_data(priv->buf) == 0 ||
472 timeout == 0 || signal_pending(current))
473 break;
474 spin_unlock_irqrestore(&priv->lock, flags);
475 timeout = schedule_timeout(timeout);
476 spin_lock_irqsave(&priv->lock, flags);
477 }
478 set_current_state(TASK_RUNNING);
479 remove_wait_queue(&tty->write_wait, &wait);
480
481 /* clear out any remaining data in the buffer */ 486 /* clear out any remaining data in the buffer */
482 clear_ringbuf(priv->buf); 487 clear_ringbuf(priv->buf);
483 spin_unlock_irqrestore(&priv->lock, flags); 488 spin_unlock_irqrestore(&priv->lock, flags);
484 489
485 /* wait for characters to drain from the device (this is long enough
486 * for the entire all byte spcp8x5 hardware buffer to drain with no
487 * flow control for data rates of 1200 bps or more, for lower rates we
488 * should really know how much data is in the buffer to compute a delay
489 * that is not unnecessarily long) */
490 bps = tty_get_baud_rate(tty);
491 if (bps > 1200)
492 timeout = max((HZ*2560) / bps, HZ/10);
493 else
494 timeout = 2*HZ;
495 set_current_state(TASK_INTERRUPTIBLE);
496 schedule_timeout(timeout);
497
498 /* clear control lines */
499 if (tty) {
500 c_cflag = tty->termios->c_cflag;
501 if (c_cflag & HUPCL) {
502 spin_lock_irqsave(&priv->lock, flags);
503 priv->line_control = 0;
504 spin_unlock_irqrestore(&priv->lock, flags);
505 spcp8x5_set_ctrlLine(port->serial->dev, 0 , priv->type);
506 }
507 }
508
509 /* kill urb */ 490 /* kill urb */
510 if (port->write_urb != NULL) { 491 if (port->write_urb != NULL) {
511 result = usb_unlink_urb(port->write_urb); 492 result = usb_unlink_urb(port->write_urb);
@@ -665,13 +646,6 @@ static int spcp8x5_open(struct tty_struct *tty,
665 if (ret) 646 if (ret)
666 return ret; 647 return ret;
667 648
668 spin_lock_irqsave(&priv->lock, flags);
669 if (tty && (tty->termios->c_cflag & CBAUD))
670 priv->line_control = MCR_DTR | MCR_RTS;
671 else
672 priv->line_control = 0;
673 spin_unlock_irqrestore(&priv->lock, flags);
674
675 spcp8x5_set_ctrlLine(serial->dev, priv->line_control , priv->type); 649 spcp8x5_set_ctrlLine(serial->dev, priv->line_control , priv->type);
676 650
677 /* Setup termios */ 651 /* Setup termios */
@@ -691,9 +665,10 @@ static int spcp8x5_open(struct tty_struct *tty,
691 port->read_urb->dev = serial->dev; 665 port->read_urb->dev = serial->dev;
692 ret = usb_submit_urb(port->read_urb, GFP_KERNEL); 666 ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
693 if (ret) { 667 if (ret) {
694 spcp8x5_close(tty, port, NULL); 668 spcp8x5_close(port);
695 return -EPROTO; 669 return -EPROTO;
696 } 670 }
671 port->port.drain_delay = 256;
697 return 0; 672 return 0;
698} 673}
699 674
@@ -1033,6 +1008,8 @@ static struct usb_serial_driver spcp8x5_device = {
1033 .num_ports = 1, 1008 .num_ports = 1,
1034 .open = spcp8x5_open, 1009 .open = spcp8x5_open,
1035 .close = spcp8x5_close, 1010 .close = spcp8x5_close,
1011 .dtr_rts = spcp8x5_dtr_rts,
1012 .carrier_raised = spcp8x5_carrier_raised,
1036 .write = spcp8x5_write, 1013 .write = spcp8x5_write,
1037 .set_termios = spcp8x5_set_termios, 1014 .set_termios = spcp8x5_set_termios,
1038 .ioctl = spcp8x5_ioctl, 1015 .ioctl = spcp8x5_ioctl,
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 69879e43794..8b07ebc6bae 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -152,8 +152,7 @@ static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port,
152 return result; 152 return result;
153} 153}
154 154
155static void symbol_close(struct tty_struct *tty, struct usb_serial_port *port, 155static void symbol_close(struct usb_serial_port *port)
156 struct file *filp)
157{ 156{
158 struct symbol_private *priv = usb_get_serial_data(port->serial); 157 struct symbol_private *priv = usb_get_serial_data(port->serial);
159 158
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 0a64bac306e..42cb04c403b 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -100,8 +100,7 @@ static int ti_startup(struct usb_serial *serial);
100static void ti_shutdown(struct usb_serial *serial); 100static void ti_shutdown(struct usb_serial *serial);
101static int ti_open(struct tty_struct *tty, struct usb_serial_port *port, 101static int ti_open(struct tty_struct *tty, struct usb_serial_port *port,
102 struct file *file); 102 struct file *file);
103static void ti_close(struct tty_struct *tty, struct usb_serial_port *port, 103static void ti_close(struct usb_serial_port *port);
104 struct file *file);
105static int ti_write(struct tty_struct *tty, struct usb_serial_port *port, 104static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
106 const unsigned char *data, int count); 105 const unsigned char *data, int count);
107static int ti_write_room(struct tty_struct *tty); 106static int ti_write_room(struct tty_struct *tty);
@@ -647,8 +646,7 @@ release_lock:
647} 646}
648 647
649 648
650static void ti_close(struct tty_struct *tty, struct usb_serial_port *port, 649static void ti_close(struct usb_serial_port *port)
651 struct file *file)
652{ 650{
653 struct ti_device *tdev; 651 struct ti_device *tdev;
654 struct ti_port *tport; 652 struct ti_port *tport;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 0a566eea49c..1967a7edc10 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -238,9 +238,11 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
238 goto bailout_interface_put; 238 goto bailout_interface_put;
239 mutex_unlock(&serial->disc_mutex); 239 mutex_unlock(&serial->disc_mutex);
240 } 240 }
241
242 mutex_unlock(&port->mutex); 241 mutex_unlock(&port->mutex);
243 return 0; 242 /* Now do the correct tty layer semantics */
243 retval = tty_port_block_til_ready(&port->port, tty, filp);
244 if (retval == 0)
245 return 0;
244 246
245bailout_interface_put: 247bailout_interface_put:
246 usb_autopm_put_interface(serial->interface); 248 usb_autopm_put_interface(serial->interface);
@@ -259,64 +261,89 @@ bailout_serial_put:
259 return retval; 261 return retval;
260} 262}
261 263
262static void serial_close(struct tty_struct *tty, struct file *filp) 264/**
265 * serial_do_down - shut down hardware
266 * @port: port to shut down
267 *
268 * Shut down a USB port unless it is the console. We never shut down the
269 * console hardware as it will always be in use.
270 *
271 * Don't free any resources at this point
272 */
273static void serial_do_down(struct usb_serial_port *port)
263{ 274{
264 struct usb_serial_port *port = tty->driver_data; 275 struct usb_serial_driver *drv = port->serial->type;
265 struct usb_serial *serial; 276 struct usb_serial *serial;
266 struct module *owner; 277 struct module *owner;
267 int count;
268 278
269 if (!port) 279 /* The console is magical, do not hang up the console hardware
280 or there will be tears */
281 if (port->console)
270 return; 282 return;
271 283
272 dbg("%s - port %d", __func__, port->number);
273
274 mutex_lock(&port->mutex); 284 mutex_lock(&port->mutex);
275 serial = port->serial; 285 serial = port->serial;
276 owner = serial->type->driver.owner; 286 owner = serial->type->driver.owner;
277 287
278 if (port->port.count == 0) { 288 if (drv->close)
279 mutex_unlock(&port->mutex); 289 drv->close(port);
280 return;
281 }
282
283 if (port->port.count == 1)
284 /* only call the device specific close if this
285 * port is being closed by the last owner. Ensure we do
286 * this before we drop the port count. The call is protected
287 * by the port mutex
288 */
289 serial->type->close(tty, port, filp);
290
291 if (port->port.count == (port->console ? 2 : 1)) {
292 struct tty_struct *tty = tty_port_tty_get(&port->port);
293 if (tty) {
294 /* We must do this before we drop the port count to
295 zero. */
296 if (tty->driver_data)
297 tty->driver_data = NULL;
298 tty_port_tty_set(&port->port, NULL);
299 tty_kref_put(tty);
300 }
301 }
302 290
303 --port->port.count;
304 count = port->port.count;
305 mutex_unlock(&port->mutex); 291 mutex_unlock(&port->mutex);
306 put_device(&port->dev); 292}
293
294/**
295 * serial_do_free - free resources post close/hangup
296 * @port: port to free up
297 *
298 * Do the resource freeing and refcount dropping for the port. We must
299 * be careful about ordering and we must avoid freeing up the console.
300 */
307 301
302static void serial_do_free(struct usb_serial_port *port)
303{
304 struct usb_serial *serial;
305 struct module *owner;
306
307 /* The console is magical, do not hang up the console hardware
308 or there will be tears */
309 if (port->console)
310 return;
311
312 serial = port->serial;
313 owner = serial->type->driver.owner;
314 put_device(&port->dev);
308 /* Mustn't dereference port any more */ 315 /* Mustn't dereference port any more */
309 if (count == 0) { 316 mutex_lock(&serial->disc_mutex);
310 mutex_lock(&serial->disc_mutex); 317 if (!serial->disconnected)
311 if (!serial->disconnected) 318 usb_autopm_put_interface(serial->interface);
312 usb_autopm_put_interface(serial->interface); 319 mutex_unlock(&serial->disc_mutex);
313 mutex_unlock(&serial->disc_mutex);
314 }
315 usb_serial_put(serial); 320 usb_serial_put(serial);
316
317 /* Mustn't dereference serial any more */ 321 /* Mustn't dereference serial any more */
318 if (count == 0) 322 module_put(owner);
319 module_put(owner); 323}
324
325static void serial_close(struct tty_struct *tty, struct file *filp)
326{
327 struct usb_serial_port *port = tty->driver_data;
328
329 dbg("%s - port %d", __func__, port->number);
330
331
332 if (tty_port_close_start(&port->port, tty, filp) == 0)
333 return;
334
335 serial_do_down(port);
336 tty_port_close_end(&port->port, tty);
337 tty_port_tty_set(&port->port, NULL);
338 serial_do_free(port);
339}
340
341static void serial_hangup(struct tty_struct *tty)
342{
343 struct usb_serial_port *port = tty->driver_data;
344 serial_do_down(port);
345 tty_port_hangup(&port->port);
346 serial_do_free(port);
320} 347}
321 348
322static int serial_write(struct tty_struct *tty, const unsigned char *buf, 349static int serial_write(struct tty_struct *tty, const unsigned char *buf,
@@ -648,6 +675,29 @@ static struct usb_serial_driver *search_serial_device(
648 return NULL; 675 return NULL;
649} 676}
650 677
678static int serial_carrier_raised(struct tty_port *port)
679{
680 struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
681 struct usb_serial_driver *drv = p->serial->type;
682 if (drv->carrier_raised)
683 return drv->carrier_raised(p);
684 /* No carrier control - don't block */
685 return 1;
686}
687
688static void serial_dtr_rts(struct tty_port *port, int on)
689{
690 struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
691 struct usb_serial_driver *drv = p->serial->type;
692 if (drv->dtr_rts)
693 drv->dtr_rts(p, on);
694}
695
696static const struct tty_port_operations serial_port_ops = {
697 .carrier_raised = serial_carrier_raised,
698 .dtr_rts = serial_dtr_rts,
699};
700
651int usb_serial_probe(struct usb_interface *interface, 701int usb_serial_probe(struct usb_interface *interface,
652 const struct usb_device_id *id) 702 const struct usb_device_id *id)
653{ 703{
@@ -841,6 +891,7 @@ int usb_serial_probe(struct usb_interface *interface,
841 if (!port) 891 if (!port)
842 goto probe_error; 892 goto probe_error;
843 tty_port_init(&port->port); 893 tty_port_init(&port->port);
894 port->port.ops = &serial_port_ops;
844 port->serial = serial; 895 port->serial = serial;
845 spin_lock_init(&port->lock); 896 spin_lock_init(&port->lock);
846 mutex_init(&port->mutex); 897 mutex_init(&port->mutex);
@@ -974,6 +1025,7 @@ int usb_serial_probe(struct usb_interface *interface,
974 if (retval > 0) { 1025 if (retval > 0) {
975 /* quietly accept this device, but don't bind to a 1026 /* quietly accept this device, but don't bind to a
976 serial port as it's about to disappear */ 1027 serial port as it's about to disappear */
1028 serial->num_ports = 0;
977 goto exit; 1029 goto exit;
978 } 1030 }
979 } 1031 }
@@ -1070,6 +1122,9 @@ void usb_serial_disconnect(struct usb_interface *interface)
1070 if (port) { 1122 if (port) {
1071 struct tty_struct *tty = tty_port_tty_get(&port->port); 1123 struct tty_struct *tty = tty_port_tty_get(&port->port);
1072 if (tty) { 1124 if (tty) {
1125 /* The hangup will occur asynchronously but
1126 the object refcounts will sort out all the
1127 cleanup */
1073 tty_hangup(tty); 1128 tty_hangup(tty);
1074 tty_kref_put(tty); 1129 tty_kref_put(tty);
1075 } 1130 }
@@ -1134,6 +1189,7 @@ static const struct tty_operations serial_ops = {
1134 .open = serial_open, 1189 .open = serial_open,
1135 .close = serial_close, 1190 .close = serial_close,
1136 .write = serial_write, 1191 .write = serial_write,
1192 .hangup = serial_hangup,
1137 .write_room = serial_write_room, 1193 .write_room = serial_write_room,
1138 .ioctl = serial_ioctl, 1194 .ioctl = serial_ioctl,
1139 .set_termios = serial_set_termios, 1195 .set_termios = serial_set_termios,
@@ -1146,6 +1202,7 @@ static const struct tty_operations serial_ops = {
1146 .proc_fops = &serial_proc_fops, 1202 .proc_fops = &serial_proc_fops,
1147}; 1203};
1148 1204
1205
1149struct tty_driver *usb_serial_tty_driver; 1206struct tty_driver *usb_serial_tty_driver;
1150 1207
1151static int __init usb_serial_init(void) 1208static int __init usb_serial_init(void)
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 5ac414bda71..b15f1c0e1d4 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -38,8 +38,7 @@
38/* function prototypes for a handspring visor */ 38/* function prototypes for a handspring visor */
39static int visor_open(struct tty_struct *tty, struct usb_serial_port *port, 39static int visor_open(struct tty_struct *tty, struct usb_serial_port *port,
40 struct file *filp); 40 struct file *filp);
41static void visor_close(struct tty_struct *tty, struct usb_serial_port *port, 41static void visor_close(struct usb_serial_port *port);
42 struct file *filp);
43static int visor_write(struct tty_struct *tty, struct usb_serial_port *port, 42static int visor_write(struct tty_struct *tty, struct usb_serial_port *port,
44 const unsigned char *buf, int count); 43 const unsigned char *buf, int count);
45static int visor_write_room(struct tty_struct *tty); 44static int visor_write_room(struct tty_struct *tty);
@@ -324,8 +323,7 @@ exit:
324} 323}
325 324
326 325
327static void visor_close(struct tty_struct *tty, 326static void visor_close(struct usb_serial_port *port)
328 struct usb_serial_port *port, struct file *filp)
329{ 327{
330 struct visor_private *priv = usb_get_serial_port_data(port); 328 struct visor_private *priv = usb_get_serial_port_data(port);
331 unsigned char *transfer_buffer; 329 unsigned char *transfer_buffer;
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5335d3211c0..7c7295d09f3 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -147,8 +147,7 @@ static int whiteheat_attach(struct usb_serial *serial);
147static void whiteheat_shutdown(struct usb_serial *serial); 147static void whiteheat_shutdown(struct usb_serial *serial);
148static int whiteheat_open(struct tty_struct *tty, 148static int whiteheat_open(struct tty_struct *tty,
149 struct usb_serial_port *port, struct file *filp); 149 struct usb_serial_port *port, struct file *filp);
150static void whiteheat_close(struct tty_struct *tty, 150static void whiteheat_close(struct usb_serial_port *port);
151 struct usb_serial_port *port, struct file *filp);
152static int whiteheat_write(struct tty_struct *tty, 151static int whiteheat_write(struct tty_struct *tty,
153 struct usb_serial_port *port, 152 struct usb_serial_port *port,
154 const unsigned char *buf, int count); 153 const unsigned char *buf, int count);
@@ -712,8 +711,7 @@ exit:
712} 711}
713 712
714 713
715static void whiteheat_close(struct tty_struct *tty, 714static void whiteheat_close(struct usb_serial_port *port)
716 struct usb_serial_port *port, struct file *filp)
717{ 715{
718 struct whiteheat_private *info = usb_get_serial_port_data(port); 716 struct whiteheat_private *info = usb_get_serial_port_data(port);
719 struct whiteheat_urb_wrap *wrap; 717 struct whiteheat_urb_wrap *wrap;
@@ -723,31 +721,7 @@ static void whiteheat_close(struct tty_struct *tty,
723 721
724 dbg("%s - port %d", __func__, port->number); 722 dbg("%s - port %d", __func__, port->number);
725 723
726 mutex_lock(&port->serial->disc_mutex);
727 /* filp is NULL when called from usb_serial_disconnect */
728 if ((filp && (tty_hung_up_p(filp))) || port->serial->disconnected) {
729 mutex_unlock(&port->serial->disc_mutex);
730 return;
731 }
732 mutex_unlock(&port->serial->disc_mutex);
733
734 tty->closing = 1;
735
736/*
737 * Not currently in use; tty_wait_until_sent() calls
738 * serial_chars_in_buffer() which deadlocks on the second semaphore
739 * acquisition. This should be fixed at some point. Greg's been
740 * notified.
741 if ((filp->f_flags & (O_NDELAY | O_NONBLOCK)) == 0) {
742 tty_wait_until_sent(tty, CLOSING_DELAY);
743 }
744*/
745
746 tty_driver_flush_buffer(tty);
747 tty_ldisc_flush(tty);
748
749 firm_report_tx_done(port); 724 firm_report_tx_done(port);
750
751 firm_close(port); 725 firm_close(port);
752 726
753 /* shutdown our bulk reads and writes */ 727 /* shutdown our bulk reads and writes */
@@ -775,10 +749,7 @@ static void whiteheat_close(struct tty_struct *tty,
775 } 749 }
776 spin_unlock_irq(&info->lock); 750 spin_unlock_irq(&info->lock);
777 mutex_unlock(&info->deathwarrant); 751 mutex_unlock(&info->deathwarrant);
778
779 stop_command_port(port->serial); 752 stop_command_port(port->serial);
780
781 tty->closing = 0;
782} 753}
783 754
784 755
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 4ca3b586064..cfa26d56ce6 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -132,7 +132,7 @@ static int slave_configure(struct scsi_device *sdev)
132 132
133 if (us->fflags & US_FL_MAX_SECTORS_MIN) 133 if (us->fflags & US_FL_MAX_SECTORS_MIN)
134 max_sectors = PAGE_CACHE_SIZE >> 9; 134 max_sectors = PAGE_CACHE_SIZE >> 9;
135 if (sdev->request_queue->max_sectors > max_sectors) 135 if (queue_max_sectors(sdev->request_queue) > max_sectors)
136 blk_queue_max_sectors(sdev->request_queue, 136 blk_queue_max_sectors(sdev->request_queue,
137 max_sectors); 137 max_sectors);
138 } else if (sdev->type == TYPE_TAPE) { 138 } else if (sdev->type == TYPE_TAPE) {
@@ -483,7 +483,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
483{ 483{
484 struct scsi_device *sdev = to_scsi_device(dev); 484 struct scsi_device *sdev = to_scsi_device(dev);
485 485
486 return sprintf(buf, "%u\n", sdev->request_queue->max_sectors); 486 return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
487} 487}
488 488
489/* Input routine for the sysfs max_sectors file */ 489/* Input routine for the sysfs max_sectors file */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 0048f1185a6..2b5a691064b 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -397,7 +397,7 @@ config FB_SA1100
397 397
398config FB_IMX 398config FB_IMX
399 tristate "Motorola i.MX LCD support" 399 tristate "Motorola i.MX LCD support"
400 depends on FB && (ARCH_IMX || ARCH_MX2) 400 depends on FB && (ARCH_MX1 || ARCH_MX2)
401 select FB_CFB_FILLRECT 401 select FB_CFB_FILLRECT
402 select FB_CFB_COPYAREA 402 select FB_CFB_COPYAREA
403 select FB_CFB_IMAGEBLIT 403 select FB_CFB_IMAGEBLIT
@@ -1759,6 +1759,16 @@ config FB_68328
1759 Say Y here if you want to support the built-in frame buffer of 1759 Say Y here if you want to support the built-in frame buffer of
1760 the Motorola 68328 CPU family. 1760 the Motorola 68328 CPU family.
1761 1761
1762config FB_PXA168
1763 tristate "PXA168/910 LCD framebuffer support"
1764 depends on FB && (CPU_PXA168 || CPU_PXA910)
1765 select FB_CFB_FILLRECT
1766 select FB_CFB_COPYAREA
1767 select FB_CFB_IMAGEBLIT
1768 ---help---
1769 Frame buffer driver for the built-in LCD controller in the Marvell
1770 MMP processor.
1771
1762config FB_PXA 1772config FB_PXA
1763 tristate "PXA LCD framebuffer support" 1773 tristate "PXA LCD framebuffer support"
1764 depends on FB && ARCH_PXA 1774 depends on FB && ARCH_PXA
@@ -1996,7 +2006,7 @@ config FB_PS3_DEFAULT_SIZE_M
1996 2006
1997config FB_XILINX 2007config FB_XILINX
1998 tristate "Xilinx frame buffer support" 2008 tristate "Xilinx frame buffer support"
1999 depends on FB && XILINX_VIRTEX 2009 depends on FB && (XILINX_VIRTEX || MICROBLAZE)
2000 select FB_CFB_FILLRECT 2010 select FB_CFB_FILLRECT
2001 select FB_CFB_COPYAREA 2011 select FB_CFB_COPYAREA
2002 select FB_CFB_IMAGEBLIT 2012 select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index d8d0be5151e..01a819f4737 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -97,6 +97,7 @@ obj-$(CONFIG_FB_GBE) += gbefb.o
97obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o 97obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o
98obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o 98obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o
99obj-$(CONFIG_FB_PXA) += pxafb.o 99obj-$(CONFIG_FB_PXA) += pxafb.o
100obj-$(CONFIG_FB_PXA168) += pxa168fb.o
100obj-$(CONFIG_FB_W100) += w100fb.o 101obj-$(CONFIG_FB_W100) += w100fb.o
101obj-$(CONFIG_FB_TMIO) += tmiofb.o 102obj-$(CONFIG_FB_TMIO) += tmiofb.o
102obj-$(CONFIG_FB_AU1100) += au1100fb.o 103obj-$(CONFIG_FB_AU1100) += au1100fb.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 61050ab1412..fb8163d181a 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -351,7 +351,7 @@ static int clcdfb_register(struct clcd_fb *fb)
351 } 351 }
352 352
353 fb->fb.fix.mmio_start = fb->dev->res.start; 353 fb->fb.fix.mmio_start = fb->dev->res.start;
354 fb->fb.fix.mmio_len = 4096; 354 fb->fb.fix.mmio_len = resource_size(&fb->dev->res);
355 355
356 fb->regs = ioremap(fb->fb.fix.mmio_start, fb->fb.fix.mmio_len); 356 fb->regs = ioremap(fb->fb.fix.mmio_start, fb->fb.fix.mmio_len);
357 if (!fb->regs) { 357 if (!fb->regs) {
@@ -437,7 +437,7 @@ static int clcdfb_register(struct clcd_fb *fb)
437 return ret; 437 return ret;
438} 438}
439 439
440static int clcdfb_probe(struct amba_device *dev, void *id) 440static int clcdfb_probe(struct amba_device *dev, struct amba_id *id)
441{ 441{
442 struct clcd_board *board = dev->dev.platform_data; 442 struct clcd_board *board = dev->dev.platform_data;
443 struct clcd_fb *fb; 443 struct clcd_fb *fb;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 9a577a800db..2fb63f6ea2f 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -29,14 +29,8 @@
29 29
30/* configurable parameters */ 30/* configurable parameters */
31#define ATMEL_LCDC_CVAL_DEFAULT 0xc8 31#define ATMEL_LCDC_CVAL_DEFAULT 0xc8
32#define ATMEL_LCDC_DMA_BURST_LEN 8 32#define ATMEL_LCDC_DMA_BURST_LEN 8 /* words */
33 33#define ATMEL_LCDC_FIFO_SIZE 512 /* words */
34#if defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91CAP9) || \
35 defined(CONFIG_ARCH_AT91SAM9RL)
36#define ATMEL_LCDC_FIFO_SIZE 2048
37#else
38#define ATMEL_LCDC_FIFO_SIZE 512
39#endif
40 34
41#if defined(CONFIG_ARCH_AT91) 35#if defined(CONFIG_ARCH_AT91)
42#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \ 36#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 35e8eb02b9e..e4e4d433b00 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -354,7 +354,7 @@ static int default_crt_on __devinitdata = 0;
354static int default_lcd_on __devinitdata = 1; 354static int default_lcd_on __devinitdata = 1;
355 355
356#ifdef CONFIG_MTRR 356#ifdef CONFIG_MTRR
357static int mtrr = 1; 357static bool mtrr = true;
358#endif 358#endif
359 359
360#ifdef CONFIG_PMAC_BACKLIGHT 360#ifdef CONFIG_PMAC_BACKLIGHT
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 38e86b84dce..59d7d5ec17a 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -180,7 +180,7 @@ static inline void vga_set_mem_top(struct vc_data *c)
180} 180}
181 181
182#ifdef CONFIG_VGACON_SOFT_SCROLLBACK 182#ifdef CONFIG_VGACON_SOFT_SCROLLBACK
183#include <linux/bootmem.h> 183#include <linux/slab.h>
184/* software scrollback */ 184/* software scrollback */
185static void *vgacon_scrollback; 185static void *vgacon_scrollback;
186static int vgacon_scrollback_tail; 186static int vgacon_scrollback_tail;
@@ -210,8 +210,7 @@ static void vgacon_scrollback_init(int pitch)
210 */ 210 */
211static void __init_refok vgacon_scrollback_startup(void) 211static void __init_refok vgacon_scrollback_startup(void)
212{ 212{
213 vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE 213 vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT);
214 * 1024);
215 vgacon_scrollback_init(vga_video_num_columns * 2); 214 vgacon_scrollback_init(vga_video_num_columns * 2);
216} 215}
217 216
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 83c5cefc266..da7c01b39be 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1736,10 +1736,8 @@ static int __init cyber2000fb_init(void)
1736 1736
1737#ifdef CONFIG_ARCH_SHARK 1737#ifdef CONFIG_ARCH_SHARK
1738 err = cyberpro_vl_probe(); 1738 err = cyberpro_vl_probe();
1739 if (!err) { 1739 if (!err)
1740 ret = 0; 1740 ret = 0;
1741 __module_get(THIS_MODULE);
1742 }
1743#endif 1741#endif
1744#ifdef CONFIG_PCI 1742#ifdef CONFIG_PCI
1745 err = pci_register_driver(&cyberpro_driver); 1743 err = pci_register_driver(&cyberpro_driver);
@@ -1749,14 +1747,15 @@ static int __init cyber2000fb_init(void)
1749 1747
1750 return ret ? err : 0; 1748 return ret ? err : 0;
1751} 1749}
1750module_init(cyber2000fb_init);
1752 1751
1752#ifndef CONFIG_ARCH_SHARK
1753static void __exit cyberpro_exit(void) 1753static void __exit cyberpro_exit(void)
1754{ 1754{
1755 pci_unregister_driver(&cyberpro_driver); 1755 pci_unregister_driver(&cyberpro_driver);
1756} 1756}
1757
1758module_init(cyber2000fb_init);
1759module_exit(cyberpro_exit); 1757module_exit(cyberpro_exit);
1758#endif
1760 1759
1761MODULE_AUTHOR("Russell King"); 1760MODULE_AUTHOR("Russell King");
1762MODULE_DESCRIPTION("CyberPro 2000, 2010 and 5000 framebuffer driver"); 1761MODULE_DESCRIPTION("CyberPro 2000, 2010 and 5000 framebuffer driver");
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index e6467cf9f19..020db7fc915 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -335,9 +335,9 @@ static int __init hitfb_probe(struct platform_device *dev)
335 if (fb_get_options("hitfb", NULL)) 335 if (fb_get_options("hitfb", NULL))
336 return -ENODEV; 336 return -ENODEV;
337 337
338 hitfb_fix.mmio_start = CONFIG_HD64461_IOBASE+0x1000; 338 hitfb_fix.mmio_start = HD64461_IO_OFFSET(0x1000);
339 hitfb_fix.mmio_len = 0x1000; 339 hitfb_fix.mmio_len = 0x1000;
340 hitfb_fix.smem_start = CONFIG_HD64461_IOBASE + 0x02000000; 340 hitfb_fix.smem_start = HD64461_IO_OFFSET(0x02000000);
341 hitfb_fix.smem_len = 512 * 1024; 341 hitfb_fix.smem_len = 512 * 1024;
342 342
343 lcdclor = fb_readw(HD64461_LCDCLOR); 343 lcdclor = fb_readw(HD64461_LCDCLOR);
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 9894de1c9b9..b7af5256e88 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -706,7 +706,7 @@ static void mx3fb_dma_done(void *arg)
706 dev_dbg(mx3fb->dev, "irq %d callback\n", ichannel->eof_irq); 706 dev_dbg(mx3fb->dev, "irq %d callback\n", ichannel->eof_irq);
707 707
708 /* We only need one interrupt, it will be re-enabled as needed */ 708 /* We only need one interrupt, it will be re-enabled as needed */
709 disable_irq(ichannel->eof_irq); 709 disable_irq_nosync(ichannel->eof_irq);
710 710
711 complete(&mx3_fbi->flip_cmpl); 711 complete(&mx3_fbi->flip_cmpl);
712} 712}
@@ -1366,7 +1366,7 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
1366 1366
1367 mx3fb_blank(FB_BLANK_UNBLANK, fbi); 1367 mx3fb_blank(FB_BLANK_UNBLANK, fbi);
1368 1368
1369 dev_info(dev, "mx3fb: fb registered, using mode %s\n", fb_mode); 1369 dev_info(dev, "registered, using mode %s\n", fb_mode);
1370 1370
1371 ret = register_framebuffer(fbi); 1371 ret = register_framebuffer(fbi);
1372 if (ret < 0) 1372 if (ret < 0)
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index dfb72f5e4c9..148cbcc3960 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -880,20 +880,22 @@ static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
880 880
881static int get_dss_clocks(void) 881static int get_dss_clocks(void)
882{ 882{
883 if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) { 883 dispc.dss_ick = clk_get(dispc.fbdev->dev, "ick");
884 dev_err(dispc.fbdev->dev, "can't get dss_ick\n"); 884 if (IS_ERR(dispc.dss_ick)) {
885 dev_err(dispc.fbdev->dev, "can't get ick\n");
885 return PTR_ERR(dispc.dss_ick); 886 return PTR_ERR(dispc.dss_ick);
886 } 887 }
887 888
888 if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) { 889 dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck");
890 if (IS_ERR(dispc.dss1_fck)) {
889 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); 891 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
890 clk_put(dispc.dss_ick); 892 clk_put(dispc.dss_ick);
891 return PTR_ERR(dispc.dss1_fck); 893 return PTR_ERR(dispc.dss1_fck);
892 } 894 }
893 895
894 if (IS_ERR((dispc.dss_54m_fck = 896 dispc.dss_54m_fck = clk_get(dispc.fbdev->dev, "tv_fck");
895 clk_get(dispc.fbdev->dev, "dss_54m_fck")))) { 897 if (IS_ERR(dispc.dss_54m_fck)) {
896 dev_err(dispc.fbdev->dev, "can't get dss_54m_fck\n"); 898 dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
897 clk_put(dispc.dss_ick); 899 clk_put(dispc.dss_ick);
898 clk_put(dispc.dss1_fck); 900 clk_put(dispc.dss1_fck);
899 return PTR_ERR(dispc.dss_54m_fck); 901 return PTR_ERR(dispc.dss_54m_fck);
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index 8aa6e47202b..5d4f34887a2 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -133,8 +133,7 @@ struct {
133 struct lcd_ctrl_extif *extif; 133 struct lcd_ctrl_extif *extif;
134 struct lcd_ctrl *int_ctrl; 134 struct lcd_ctrl *int_ctrl;
135 135
136 void (*power_up)(struct device *dev); 136 struct clk *sys_ck;
137 void (*power_down)(struct device *dev);
138} hwa742; 137} hwa742;
139 138
140struct lcd_ctrl hwa742_ctrl; 139struct lcd_ctrl hwa742_ctrl;
@@ -915,14 +914,13 @@ static void hwa742_suspend(void)
915 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED); 914 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
916 /* Enable sleep mode */ 915 /* Enable sleep mode */
917 hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1); 916 hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1);
918 if (hwa742.power_down != NULL) 917 clk_disable(hwa742.sys_ck);
919 hwa742.power_down(hwa742.fbdev->dev);
920} 918}
921 919
922static void hwa742_resume(void) 920static void hwa742_resume(void)
923{ 921{
924 if (hwa742.power_up != NULL) 922 clk_enable(hwa742.sys_ck);
925 hwa742.power_up(hwa742.fbdev->dev); 923
926 /* Disable sleep mode */ 924 /* Disable sleep mode */
927 hwa742_write_reg(HWA742_POWER_SAVE, 0); 925 hwa742_write_reg(HWA742_POWER_SAVE, 0);
928 while (1) { 926 while (1) {
@@ -955,14 +953,13 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
955 omapfb_conf = fbdev->dev->platform_data; 953 omapfb_conf = fbdev->dev->platform_data;
956 ctrl_conf = omapfb_conf->ctrl_platform_data; 954 ctrl_conf = omapfb_conf->ctrl_platform_data;
957 955
958 if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) { 956 if (ctrl_conf == NULL) {
959 dev_err(fbdev->dev, "HWA742: missing platform data\n"); 957 dev_err(fbdev->dev, "HWA742: missing platform data\n");
960 r = -ENOENT; 958 r = -ENOENT;
961 goto err1; 959 goto err1;
962 } 960 }
963 961
964 hwa742.power_down = ctrl_conf->power_down; 962 hwa742.sys_ck = clk_get(NULL, "hwa_sys_ck");
965 hwa742.power_up = ctrl_conf->power_up;
966 963
967 spin_lock_init(&hwa742.req_lock); 964 spin_lock_init(&hwa742.req_lock);
968 965
@@ -972,12 +969,11 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
972 if ((r = hwa742.extif->init(fbdev)) < 0) 969 if ((r = hwa742.extif->init(fbdev)) < 0)
973 goto err2; 970 goto err2;
974 971
975 ext_clk = ctrl_conf->get_clock_rate(fbdev->dev); 972 ext_clk = clk_get_rate(hwa742.sys_ck);
976 if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0) 973 if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0)
977 goto err3; 974 goto err3;
978 hwa742.extif->set_timings(&hwa742.reg_timings); 975 hwa742.extif->set_timings(&hwa742.reg_timings);
979 if (hwa742.power_up != NULL) 976 clk_enable(hwa742.sys_ck);
980 hwa742.power_up(fbdev->dev);
981 977
982 calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk); 978 calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk);
983 if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0) 979 if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0)
@@ -1040,8 +1036,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
1040 1036
1041 return 0; 1037 return 0;
1042err4: 1038err4:
1043 if (hwa742.power_down != NULL) 1039 clk_disable(hwa742.sys_ck);
1044 hwa742.power_down(fbdev->dev);
1045err3: 1040err3:
1046 hwa742.extif->cleanup(); 1041 hwa742.extif->cleanup();
1047err2: 1042err2:
@@ -1055,8 +1050,7 @@ static void hwa742_cleanup(void)
1055 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED); 1050 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
1056 hwa742.extif->cleanup(); 1051 hwa742.extif->cleanup();
1057 hwa742.int_ctrl->cleanup(); 1052 hwa742.int_ctrl->cleanup();
1058 if (hwa742.power_down != NULL) 1053 clk_disable(hwa742.sys_ck);
1059 hwa742.power_down(hwa742.fbdev->dev);
1060} 1054}
1061 1055
1062struct lcd_ctrl hwa742_ctrl = { 1056struct lcd_ctrl hwa742_ctrl = {
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index a13c8dcad2a..9332d6ca645 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -83,12 +83,14 @@ static inline u32 rfbi_read_reg(int idx)
83 83
84static int rfbi_get_clocks(void) 84static int rfbi_get_clocks(void)
85{ 85{
86 if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) { 86 rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "ick");
87 dev_err(rfbi.fbdev->dev, "can't get dss_ick\n"); 87 if (IS_ERR(rfbi.dss_ick)) {
88 dev_err(rfbi.fbdev->dev, "can't get ick\n");
88 return PTR_ERR(rfbi.dss_ick); 89 return PTR_ERR(rfbi.dss_ick);
89 } 90 }
90 91
91 if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) { 92 rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck");
93 if (IS_ERR(rfbi.dss1_fck)) {
92 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); 94 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n");
93 clk_put(rfbi.dss_ick); 95 clk_put(rfbi.dss_ick);
94 return PTR_ERR(rfbi.dss1_fck); 96 return PTR_ERR(rfbi.dss1_fck);
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
new file mode 100644
index 00000000000..84d8327e47d
--- /dev/null
+++ b/drivers/video/pxa168fb.c
@@ -0,0 +1,803 @@
1/*
2 * linux/drivers/video/pxa168fb.c -- Marvell PXA168 LCD Controller
3 *
4 * Copyright (C) 2008 Marvell International Ltd.
5 * All rights reserved.
6 *
7 * 2009-02-16 adapted from original version for PXA168/910
8 * Jun Nie <njun@marvell.com>
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/string.h>
19#include <linux/interrupt.h>
20#include <linux/slab.h>
21#include <linux/fb.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/ioport.h>
25#include <linux/platform_device.h>
26#include <linux/dma-mapping.h>
27#include <linux/clk.h>
28#include <linux/err.h>
29#include <linux/uaccess.h>
30#include <video/pxa168fb.h>
31
32#include "pxa168fb.h"
33
34#define DEFAULT_REFRESH 60 /* Hz */
35
36static int determine_best_pix_fmt(struct fb_var_screeninfo *var)
37{
38 /*
39 * Pseudocolor mode?
40 */
41 if (var->bits_per_pixel == 8)
42 return PIX_FMT_PSEUDOCOLOR;
43
44 /*
45 * Check for 565/1555.
46 */
47 if (var->bits_per_pixel == 16 && var->red.length <= 5 &&
48 var->green.length <= 6 && var->blue.length <= 5) {
49 if (var->transp.length == 0) {
50 if (var->red.offset >= var->blue.offset)
51 return PIX_FMT_RGB565;
52 else
53 return PIX_FMT_BGR565;
54 }
55
56 if (var->transp.length == 1 && var->green.length <= 5) {
57 if (var->red.offset >= var->blue.offset)
58 return PIX_FMT_RGB1555;
59 else
60 return PIX_FMT_BGR1555;
61 }
62
63 /* fall through */
64 }
65
66 /*
67 * Check for 888/A888.
68 */
69 if (var->bits_per_pixel <= 32 && var->red.length <= 8 &&
70 var->green.length <= 8 && var->blue.length <= 8) {
71 if (var->bits_per_pixel == 24 && var->transp.length == 0) {
72 if (var->red.offset >= var->blue.offset)
73 return PIX_FMT_RGB888PACK;
74 else
75 return PIX_FMT_BGR888PACK;
76 }
77
78 if (var->bits_per_pixel == 32 && var->transp.length == 8) {
79 if (var->red.offset >= var->blue.offset)
80 return PIX_FMT_RGBA888;
81 else
82 return PIX_FMT_BGRA888;
83 } else {
84 if (var->red.offset >= var->blue.offset)
85 return PIX_FMT_RGB888UNPACK;
86 else
87 return PIX_FMT_BGR888UNPACK;
88 }
89
90 /* fall through */
91 }
92
93 return -EINVAL;
94}
95
96static void set_pix_fmt(struct fb_var_screeninfo *var, int pix_fmt)
97{
98 switch (pix_fmt) {
99 case PIX_FMT_RGB565:
100 var->bits_per_pixel = 16;
101 var->red.offset = 11; var->red.length = 5;
102 var->green.offset = 5; var->green.length = 6;
103 var->blue.offset = 0; var->blue.length = 5;
104 var->transp.offset = 0; var->transp.length = 0;
105 break;
106 case PIX_FMT_BGR565:
107 var->bits_per_pixel = 16;
108 var->red.offset = 0; var->red.length = 5;
109 var->green.offset = 5; var->green.length = 6;
110 var->blue.offset = 11; var->blue.length = 5;
111 var->transp.offset = 0; var->transp.length = 0;
112 break;
113 case PIX_FMT_RGB1555:
114 var->bits_per_pixel = 16;
115 var->red.offset = 10; var->red.length = 5;
116 var->green.offset = 5; var->green.length = 5;
117 var->blue.offset = 0; var->blue.length = 5;
118 var->transp.offset = 15; var->transp.length = 1;
119 break;
120 case PIX_FMT_BGR1555:
121 var->bits_per_pixel = 16;
122 var->red.offset = 0; var->red.length = 5;
123 var->green.offset = 5; var->green.length = 5;
124 var->blue.offset = 10; var->blue.length = 5;
125 var->transp.offset = 15; var->transp.length = 1;
126 break;
127 case PIX_FMT_RGB888PACK:
128 var->bits_per_pixel = 24;
129 var->red.offset = 16; var->red.length = 8;
130 var->green.offset = 8; var->green.length = 8;
131 var->blue.offset = 0; var->blue.length = 8;
132 var->transp.offset = 0; var->transp.length = 0;
133 break;
134 case PIX_FMT_BGR888PACK:
135 var->bits_per_pixel = 24;
136 var->red.offset = 0; var->red.length = 8;
137 var->green.offset = 8; var->green.length = 8;
138 var->blue.offset = 16; var->blue.length = 8;
139 var->transp.offset = 0; var->transp.length = 0;
140 break;
141 case PIX_FMT_RGBA888:
142 var->bits_per_pixel = 32;
143 var->red.offset = 16; var->red.length = 8;
144 var->green.offset = 8; var->green.length = 8;
145 var->blue.offset = 0; var->blue.length = 8;
146 var->transp.offset = 24; var->transp.length = 8;
147 break;
148 case PIX_FMT_BGRA888:
149 var->bits_per_pixel = 32;
150 var->red.offset = 0; var->red.length = 8;
151 var->green.offset = 8; var->green.length = 8;
152 var->blue.offset = 16; var->blue.length = 8;
153 var->transp.offset = 24; var->transp.length = 8;
154 break;
155 case PIX_FMT_PSEUDOCOLOR:
156 var->bits_per_pixel = 8;
157 var->red.offset = 0; var->red.length = 8;
158 var->green.offset = 0; var->green.length = 8;
159 var->blue.offset = 0; var->blue.length = 8;
160 var->transp.offset = 0; var->transp.length = 0;
161 break;
162 }
163}
164
165static void set_mode(struct pxa168fb_info *fbi, struct fb_var_screeninfo *var,
166 struct fb_videomode *mode, int pix_fmt, int ystretch)
167{
168 struct fb_info *info = fbi->info;
169
170 set_pix_fmt(var, pix_fmt);
171
172 var->xres = mode->xres;
173 var->yres = mode->yres;
174 var->xres_virtual = max(var->xres, var->xres_virtual);
175 if (ystretch)
176 var->yres_virtual = info->fix.smem_len /
177 (var->xres_virtual * (var->bits_per_pixel >> 3));
178 else
179 var->yres_virtual = max(var->yres, var->yres_virtual);
180 var->grayscale = 0;
181 var->accel_flags = FB_ACCEL_NONE;
182 var->pixclock = mode->pixclock;
183 var->left_margin = mode->left_margin;
184 var->right_margin = mode->right_margin;
185 var->upper_margin = mode->upper_margin;
186 var->lower_margin = mode->lower_margin;
187 var->hsync_len = mode->hsync_len;
188 var->vsync_len = mode->vsync_len;
189 var->sync = mode->sync;
190 var->vmode = FB_VMODE_NONINTERLACED;
191 var->rotate = FB_ROTATE_UR;
192}
193
194static int pxa168fb_check_var(struct fb_var_screeninfo *var,
195 struct fb_info *info)
196{
197 struct pxa168fb_info *fbi = info->par;
198 int pix_fmt;
199
200 /*
201 * Determine which pixel format we're going to use.
202 */
203 pix_fmt = determine_best_pix_fmt(var);
204 if (pix_fmt < 0)
205 return pix_fmt;
206 set_pix_fmt(var, pix_fmt);
207 fbi->pix_fmt = pix_fmt;
208
209 /*
210 * Basic geometry sanity checks.
211 */
212 if (var->xoffset + var->xres > var->xres_virtual)
213 return -EINVAL;
214 if (var->yoffset + var->yres > var->yres_virtual)
215 return -EINVAL;
216 if (var->xres + var->right_margin +
217 var->hsync_len + var->left_margin > 2048)
218 return -EINVAL;
219 if (var->yres + var->lower_margin +
220 var->vsync_len + var->upper_margin > 2048)
221 return -EINVAL;
222
223 /*
224 * Check size of framebuffer.
225 */
226 if (var->xres_virtual * var->yres_virtual *
227 (var->bits_per_pixel >> 3) > info->fix.smem_len)
228 return -EINVAL;
229
230 return 0;
231}
232
233/*
234 * The hardware clock divider has an integer and a fractional
235 * stage:
236 *
237 * clk2 = clk_in / integer_divider
238 * clk_out = clk2 * (1 - (fractional_divider >> 12))
239 *
240 * Calculate integer and fractional divider for given clk_in
241 * and clk_out.
242 */
243static void set_clock_divider(struct pxa168fb_info *fbi,
244 const struct fb_videomode *m)
245{
246 int divider_int;
247 int needed_pixclk;
248 u64 div_result;
249 u32 x = 0;
250
251 /*
252 * Notice: The field pixclock is used by linux fb
253 * is in pixel second. E.g. struct fb_videomode &
254 * struct fb_var_screeninfo
255 */
256
257 /*
258 * Check input values.
259 */
260 if (!m || !m->pixclock || !m->refresh) {
261 dev_err(fbi->dev, "Input refresh or pixclock is wrong.\n");
262 return;
263 }
264
265 /*
266 * Using PLL/AXI clock.
267 */
268 x = 0x80000000;
269
270 /*
271 * Calc divider according to refresh rate.
272 */
273 div_result = 1000000000000ll;
274 do_div(div_result, m->pixclock);
275 needed_pixclk = (u32)div_result;
276
277 divider_int = clk_get_rate(fbi->clk) / needed_pixclk;
278
279 /* check whether divisor is too small. */
280 if (divider_int < 2) {
281 dev_warn(fbi->dev, "Warning: clock source is too slow."
282 "Try smaller resolution\n");
283 divider_int = 2;
284 }
285
286 /*
287 * Set setting to reg.
288 */
289 x |= divider_int;
290 writel(x, fbi->reg_base + LCD_CFG_SCLK_DIV);
291}
292
293static void set_dma_control0(struct pxa168fb_info *fbi)
294{
295 u32 x;
296
297 /*
298 * Set bit to enable graphics DMA.
299 */
300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
301 x |= fbi->active ? 0x00000100 : 0;
302 fbi->active = 0;
303
304 /*
305 * If we are in a pseudo-color mode, we need to enable
306 * palette lookup.
307 */
308 if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR)
309 x |= 0x10000000;
310
311 /*
312 * Configure hardware pixel format.
313 */
314 x &= ~(0xF << 16);
315 x |= (fbi->pix_fmt >> 1) << 16;
316
317 /*
318 * Check red and blue pixel swap.
319 * 1. source data swap
320 * 2. panel output data swap
321 */
322 x &= ~(1 << 12);
323 x |= ((fbi->pix_fmt & 1) ^ (fbi->panel_rbswap)) << 12;
324
325 writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL0);
326}
327
328static void set_dma_control1(struct pxa168fb_info *fbi, int sync)
329{
330 u32 x;
331
332 /*
333 * Configure default bits: vsync triggers DMA, gated clock
334 * enable, power save enable, configure alpha registers to
335 * display 100% graphics, and set pixel command.
336 */
337 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL1);
338 x |= 0x2032ff81;
339
340 /*
341 * We trigger DMA on the falling edge of vsync if vsync is
342 * active low, or on the rising edge if vsync is active high.
343 */
344 if (!(sync & FB_SYNC_VERT_HIGH_ACT))
345 x |= 0x08000000;
346
347 writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL1);
348}
349
350static void set_graphics_start(struct fb_info *info, int xoffset, int yoffset)
351{
352 struct pxa168fb_info *fbi = info->par;
353 struct fb_var_screeninfo *var = &info->var;
354 int pixel_offset;
355 unsigned long addr;
356
357 pixel_offset = (yoffset * var->xres_virtual) + xoffset;
358
359 addr = fbi->fb_start_dma + (pixel_offset * (var->bits_per_pixel >> 3));
360 writel(addr, fbi->reg_base + LCD_CFG_GRA_START_ADDR0);
361}
362
363static void set_dumb_panel_control(struct fb_info *info)
364{
365 struct pxa168fb_info *fbi = info->par;
366 struct pxa168fb_mach_info *mi = fbi->dev->platform_data;
367 u32 x;
368
369 /*
370 * Preserve enable flag.
371 */
372 x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL) & 0x00000001;
373
374 x |= (fbi->is_blanked ? 0x7 : mi->dumb_mode) << 28;
375 x |= mi->gpio_output_data << 20;
376 x |= mi->gpio_output_mask << 12;
377 x |= mi->panel_rgb_reverse_lanes ? 0x00000080 : 0;
378 x |= mi->invert_composite_blank ? 0x00000040 : 0;
379 x |= (info->var.sync & FB_SYNC_COMP_HIGH_ACT) ? 0x00000020 : 0;
380 x |= mi->invert_pix_val_ena ? 0x00000010 : 0;
381 x |= (info->var.sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 0x00000008;
382 x |= (info->var.sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 0x00000004;
383 x |= mi->invert_pixclock ? 0x00000002 : 0;
384
385 writel(x, fbi->reg_base + LCD_SPU_DUMB_CTRL);
386}
387
388static void set_dumb_screen_dimensions(struct fb_info *info)
389{
390 struct pxa168fb_info *fbi = info->par;
391 struct fb_var_screeninfo *v = &info->var;
392 int x;
393 int y;
394
395 x = v->xres + v->right_margin + v->hsync_len + v->left_margin;
396 y = v->yres + v->lower_margin + v->vsync_len + v->upper_margin;
397
398 writel((y << 16) | x, fbi->reg_base + LCD_SPUT_V_H_TOTAL);
399}
400
401static int pxa168fb_set_par(struct fb_info *info)
402{
403 struct pxa168fb_info *fbi = info->par;
404 struct fb_var_screeninfo *var = &info->var;
405 struct fb_videomode mode;
406 u32 x;
407 struct pxa168fb_mach_info *mi;
408
409 mi = fbi->dev->platform_data;
410
411 /*
412 * Set additional mode info.
413 */
414 if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR)
415 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
416 else
417 info->fix.visual = FB_VISUAL_TRUECOLOR;
418 info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8;
419 info->fix.ypanstep = var->yres;
420
421 /*
422 * Disable panel output while we setup the display.
423 */
424 x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL);
425 writel(x & ~1, fbi->reg_base + LCD_SPU_DUMB_CTRL);
426
427 /*
428 * Configure global panel parameters.
429 */
430 writel((var->yres << 16) | var->xres,
431 fbi->reg_base + LCD_SPU_V_H_ACTIVE);
432
433 /*
434 * convet var to video mode
435 */
436 fb_var_to_videomode(&mode, &info->var);
437
438 /* Calculate clock divisor. */
439 set_clock_divider(fbi, &mode);
440
441 /* Configure dma ctrl regs. */
442 set_dma_control0(fbi);
443 set_dma_control1(fbi, info->var.sync);
444
445 /*
446 * Configure graphics DMA parameters.
447 */
448 x = readl(fbi->reg_base + LCD_CFG_GRA_PITCH);
449 x = (x & ~0xFFFF) | ((var->xres_virtual * var->bits_per_pixel) >> 3);
450 writel(x, fbi->reg_base + LCD_CFG_GRA_PITCH);
451 writel((var->yres << 16) | var->xres,
452 fbi->reg_base + LCD_SPU_GRA_HPXL_VLN);
453 writel((var->yres << 16) | var->xres,
454 fbi->reg_base + LCD_SPU_GZM_HPXL_VLN);
455
456 /*
457 * Configure dumb panel ctrl regs & timings.
458 */
459 set_dumb_panel_control(info);
460 set_dumb_screen_dimensions(info);
461
462 writel((var->left_margin << 16) | var->right_margin,
463 fbi->reg_base + LCD_SPU_H_PORCH);
464 writel((var->upper_margin << 16) | var->lower_margin,
465 fbi->reg_base + LCD_SPU_V_PORCH);
466
467 /*
468 * Re-enable panel output.
469 */
470 x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL);
471 writel(x | 1, fbi->reg_base + LCD_SPU_DUMB_CTRL);
472
473 return 0;
474}
475
476static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf)
477{
478 return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset;
479}
480
481static u32 to_rgb(u16 red, u16 green, u16 blue)
482{
483 red >>= 8;
484 green >>= 8;
485 blue >>= 8;
486
487 return (red << 16) | (green << 8) | blue;
488}
489
490static int
491pxa168fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
492 unsigned int blue, unsigned int trans, struct fb_info *info)
493{
494 struct pxa168fb_info *fbi = info->par;
495 u32 val;
496
497 if (info->var.grayscale)
498 red = green = blue = (19595 * red + 38470 * green +
499 7471 * blue) >> 16;
500
501 if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) {
502 val = chan_to_field(red, &info->var.red);
503 val |= chan_to_field(green, &info->var.green);
504 val |= chan_to_field(blue , &info->var.blue);
505 fbi->pseudo_palette[regno] = val;
506 }
507
508 if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR && regno < 256) {
509 val = to_rgb(red, green, blue);
510 writel(val, fbi->reg_base + LCD_SPU_SRAM_WRDAT);
511 writel(0x8300 | regno, fbi->reg_base + LCD_SPU_SRAM_CTRL);
512 }
513
514 return 0;
515}
516
517static int pxa168fb_blank(int blank, struct fb_info *info)
518{
519 struct pxa168fb_info *fbi = info->par;
520
521 fbi->is_blanked = (blank == FB_BLANK_UNBLANK) ? 0 : 1;
522 set_dumb_panel_control(info);
523
524 return 0;
525}
526
527static int pxa168fb_pan_display(struct fb_var_screeninfo *var,
528 struct fb_info *info)
529{
530 set_graphics_start(info, var->xoffset, var->yoffset);
531
532 return 0;
533}
534
535static irqreturn_t pxa168fb_handle_irq(int irq, void *dev_id)
536{
537 struct pxa168fb_info *fbi = dev_id;
538 u32 isr = readl(fbi->reg_base + SPU_IRQ_ISR);
539
540 if ((isr & GRA_FRAME_IRQ0_ENA_MASK)) {
541
542 writel(isr & (~GRA_FRAME_IRQ0_ENA_MASK),
543 fbi->reg_base + SPU_IRQ_ISR);
544
545 return IRQ_HANDLED;
546 }
547 return IRQ_NONE;
548}
549
550static struct fb_ops pxa168fb_ops = {
551 .owner = THIS_MODULE,
552 .fb_check_var = pxa168fb_check_var,
553 .fb_set_par = pxa168fb_set_par,
554 .fb_setcolreg = pxa168fb_setcolreg,
555 .fb_blank = pxa168fb_blank,
556 .fb_pan_display = pxa168fb_pan_display,
557 .fb_fillrect = cfb_fillrect,
558 .fb_copyarea = cfb_copyarea,
559 .fb_imageblit = cfb_imageblit,
560};
561
562static int __init pxa168fb_init_mode(struct fb_info *info,
563 struct pxa168fb_mach_info *mi)
564{
565 struct pxa168fb_info *fbi = info->par;
566 struct fb_var_screeninfo *var = &info->var;
567 int ret = 0;
568 u32 total_w, total_h, refresh;
569 u64 div_result;
570 const struct fb_videomode *m;
571
572 /*
573 * Set default value
574 */
575 refresh = DEFAULT_REFRESH;
576
577 /* try to find best video mode. */
578 m = fb_find_best_mode(&info->var, &info->modelist);
579 if (m)
580 fb_videomode_to_var(&info->var, m);
581
582 /* Init settings. */
583 var->xres_virtual = var->xres;
584 var->yres_virtual = info->fix.smem_len /
585 (var->xres_virtual * (var->bits_per_pixel >> 3));
586 dev_dbg(fbi->dev, "pxa168fb: find best mode: res = %dx%d\n",
587 var->xres, var->yres);
588
589 /* correct pixclock. */
590 total_w = var->xres + var->left_margin + var->right_margin +
591 var->hsync_len;
592 total_h = var->yres + var->upper_margin + var->lower_margin +
593 var->vsync_len;
594
595 div_result = 1000000000000ll;
596 do_div(div_result, total_w * total_h * refresh);
597 var->pixclock = (u32)div_result;
598
599 return ret;
600}
601
602static int __init pxa168fb_probe(struct platform_device *pdev)
603{
604 struct pxa168fb_mach_info *mi;
605 struct fb_info *info = 0;
606 struct pxa168fb_info *fbi = 0;
607 struct resource *res;
608 struct clk *clk;
609 int irq, ret;
610
611 mi = pdev->dev.platform_data;
612 if (mi == NULL) {
613 dev_err(&pdev->dev, "no platform data defined\n");
614 return -EINVAL;
615 }
616
617 clk = clk_get(&pdev->dev, "LCDCLK");
618 if (IS_ERR(clk)) {
619 dev_err(&pdev->dev, "unable to get LCDCLK");
620 return PTR_ERR(clk);
621 }
622
623 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
624 if (res == NULL) {
625 dev_err(&pdev->dev, "no IO memory defined\n");
626 return -ENOENT;
627 }
628
629 irq = platform_get_irq(pdev, 0);
630 if (irq < 0) {
631 dev_err(&pdev->dev, "no IRQ defined\n");
632 return -ENOENT;
633 }
634
635 info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
636 if (info == NULL) {
637 clk_put(clk);
638 return -ENOMEM;
639 }
640
641 /* Initialize private data */
642 fbi = info->par;
643 fbi->info = info;
644 fbi->clk = clk;
645 fbi->dev = info->dev = &pdev->dev;
646 fbi->panel_rbswap = mi->panel_rbswap;
647 fbi->is_blanked = 0;
648 fbi->active = mi->active;
649
650 /*
651 * Initialise static fb parameters.
652 */
653 info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
654 FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
655 info->node = -1;
656 strlcpy(info->fix.id, mi->id, 16);
657 info->fix.type = FB_TYPE_PACKED_PIXELS;
658 info->fix.type_aux = 0;
659 info->fix.xpanstep = 0;
660 info->fix.ypanstep = 0;
661 info->fix.ywrapstep = 0;
662 info->fix.mmio_start = res->start;
663 info->fix.mmio_len = res->end - res->start + 1;
664 info->fix.accel = FB_ACCEL_NONE;
665 info->fbops = &pxa168fb_ops;
666 info->pseudo_palette = fbi->pseudo_palette;
667
668 /*
669 * Map LCD controller registers.
670 */
671 fbi->reg_base = ioremap_nocache(res->start, res->end - res->start);
672 if (fbi->reg_base == NULL) {
673 ret = -ENOMEM;
674 goto failed;
675 }
676
677 /*
678 * Allocate framebuffer memory.
679 */
680 info->fix.smem_len = PAGE_ALIGN(DEFAULT_FB_SIZE);
681
682 info->screen_base = dma_alloc_writecombine(fbi->dev, info->fix.smem_len,
683 &fbi->fb_start_dma, GFP_KERNEL);
684 if (info->screen_base == NULL) {
685 ret = -ENOMEM;
686 goto failed;
687 }
688
689 info->fix.smem_start = (unsigned long)fbi->fb_start_dma;
690
691 /*
692 * Set video mode according to platform data.
693 */
694 set_mode(fbi, &info->var, mi->modes, mi->pix_fmt, 1);
695
696 fb_videomode_to_modelist(mi->modes, mi->num_modes, &info->modelist);
697
698 /*
699 * init video mode data.
700 */
701 pxa168fb_init_mode(info, mi);
702
703 ret = pxa168fb_check_var(&info->var, info);
704 if (ret)
705 goto failed_free_fbmem;
706
707 /*
708 * Fill in sane defaults.
709 */
710 ret = pxa168fb_check_var(&info->var, info);
711 if (ret)
712 goto failed;
713
714 /*
715 * enable controller clock
716 */
717 clk_enable(fbi->clk);
718
719 pxa168fb_set_par(info);
720
721 /*
722 * Configure default register values.
723 */
724 writel(0, fbi->reg_base + LCD_SPU_BLANKCOLOR);
725 writel(mi->io_pin_allocation_mode, fbi->reg_base + SPU_IOPAD_CONTROL);
726 writel(0, fbi->reg_base + LCD_CFG_GRA_START_ADDR1);
727 writel(0, fbi->reg_base + LCD_SPU_GRA_OVSA_HPXL_VLN);
728 writel(0, fbi->reg_base + LCD_SPU_SRAM_PARA0);
729 writel(CFG_CSB_256x32(0x1)|CFG_CSB_256x24(0x1)|CFG_CSB_256x8(0x1),
730 fbi->reg_base + LCD_SPU_SRAM_PARA1);
731
732 /*
733 * Allocate color map.
734 */
735 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
736 ret = -ENOMEM;
737 goto failed_free_clk;
738 }
739
740 /*
741 * Register irq handler.
742 */
743 ret = request_irq(irq, pxa168fb_handle_irq, IRQF_SHARED,
744 info->fix.id, fbi);
745 if (ret < 0) {
746 dev_err(&pdev->dev, "unable to request IRQ\n");
747 ret = -ENXIO;
748 goto failed_free_cmap;
749 }
750
751 /*
752 * Enable GFX interrupt
753 */
754 writel(GRA_FRAME_IRQ0_ENA(0x1), fbi->reg_base + SPU_IRQ_ENA);
755
756 /*
757 * Register framebuffer.
758 */
759 ret = register_framebuffer(info);
760 if (ret < 0) {
761 dev_err(&pdev->dev, "Failed to register pxa168-fb: %d\n", ret);
762 ret = -ENXIO;
763 goto failed_free_irq;
764 }
765
766 platform_set_drvdata(pdev, fbi);
767 return 0;
768
769failed_free_irq:
770 free_irq(irq, fbi);
771failed_free_cmap:
772 fb_dealloc_cmap(&info->cmap);
773failed_free_clk:
774 clk_disable(fbi->clk);
775failed_free_fbmem:
776 dma_free_coherent(fbi->dev, info->fix.smem_len,
777 info->screen_base, fbi->fb_start_dma);
778failed:
779 kfree(info);
780 clk_put(clk);
781
782 dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
783 return ret;
784}
785
786static struct platform_driver pxa168fb_driver = {
787 .driver = {
788 .name = "pxa168-fb",
789 .owner = THIS_MODULE,
790 },
791 .probe = pxa168fb_probe,
792};
793
794static int __devinit pxa168fb_init(void)
795{
796 return platform_driver_register(&pxa168fb_driver);
797}
798module_init(pxa168fb_init);
799
800MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com> "
801 "Green Wan <gwan@marvell.com>");
802MODULE_DESCRIPTION("Framebuffer driver for PXA168/910");
803MODULE_LICENSE("GPL");
diff --git a/drivers/video/pxa168fb.h b/drivers/video/pxa168fb.h
new file mode 100644
index 00000000000..eee09279c52
--- /dev/null
+++ b/drivers/video/pxa168fb.h
@@ -0,0 +1,558 @@
1#ifndef __PXA168FB_H__
2#define __PXA168FB_H__
3
4/* ------------< LCD register >------------ */
5/* Video Frame 0&1 start address registers */
6#define LCD_SPU_DMA_START_ADDR_Y0 0x00C0
7#define LCD_SPU_DMA_START_ADDR_U0 0x00C4
8#define LCD_SPU_DMA_START_ADDR_V0 0x00C8
9#define LCD_CFG_DMA_START_ADDR_0 0x00CC /* Cmd address */
10#define LCD_SPU_DMA_START_ADDR_Y1 0x00D0
11#define LCD_SPU_DMA_START_ADDR_U1 0x00D4
12#define LCD_SPU_DMA_START_ADDR_V1 0x00D8
13#define LCD_CFG_DMA_START_ADDR_1 0x00DC /* Cmd address */
14
15/* YC & UV Pitch */
16#define LCD_SPU_DMA_PITCH_YC 0x00E0
17#define SPU_DMA_PITCH_C(c) ((c) << 16)
18#define SPU_DMA_PITCH_Y(y) (y)
19#define LCD_SPU_DMA_PITCH_UV 0x00E4
20#define SPU_DMA_PITCH_V(v) ((v) << 16)
21#define SPU_DMA_PITCH_U(u) (u)
22
23/* Video Starting Point on Screen Register */
24#define LCD_SPUT_DMA_OVSA_HPXL_VLN 0x00E8
25#define CFG_DMA_OVSA_VLN(y) ((y) << 16) /* 0~0xfff */
26#define CFG_DMA_OVSA_HPXL(x) (x) /* 0~0xfff */
27
28/* Video Size Register */
29#define LCD_SPU_DMA_HPXL_VLN 0x00EC
30#define CFG_DMA_VLN(y) ((y) << 16)
31#define CFG_DMA_HPXL(x) (x)
32
33/* Video Size After zooming Register */
34#define LCD_SPU_DZM_HPXL_VLN 0x00F0
35#define CFG_DZM_VLN(y) ((y) << 16)
36#define CFG_DZM_HPXL(x) (x)
37
38/* Graphic Frame 0&1 Starting Address Register */
39#define LCD_CFG_GRA_START_ADDR0 0x00F4
40#define LCD_CFG_GRA_START_ADDR1 0x00F8
41
42/* Graphic Frame Pitch */
43#define LCD_CFG_GRA_PITCH 0x00FC
44
45/* Graphic Starting Point on Screen Register */
46#define LCD_SPU_GRA_OVSA_HPXL_VLN 0x0100
47#define CFG_GRA_OVSA_VLN(y) ((y) << 16)
48#define CFG_GRA_OVSA_HPXL(x) (x)
49
50/* Graphic Size Register */
51#define LCD_SPU_GRA_HPXL_VLN 0x0104
52#define CFG_GRA_VLN(y) ((y) << 16)
53#define CFG_GRA_HPXL(x) (x)
54
55/* Graphic Size after Zooming Register */
56#define LCD_SPU_GZM_HPXL_VLN 0x0108
57#define CFG_GZM_VLN(y) ((y) << 16)
58#define CFG_GZM_HPXL(x) (x)
59
60/* HW Cursor Starting Point on Screen Register */
61#define LCD_SPU_HWC_OVSA_HPXL_VLN 0x010C
62#define CFG_HWC_OVSA_VLN(y) ((y) << 16)
63#define CFG_HWC_OVSA_HPXL(x) (x)
64
65/* HW Cursor Size */
66#define LCD_SPU_HWC_HPXL_VLN 0x0110
67#define CFG_HWC_VLN(y) ((y) << 16)
68#define CFG_HWC_HPXL(x) (x)
69
70/* Total Screen Size Register */
71#define LCD_SPUT_V_H_TOTAL 0x0114
72#define CFG_V_TOTAL(y) ((y) << 16)
73#define CFG_H_TOTAL(x) (x)
74
75/* Total Screen Active Size Register */
76#define LCD_SPU_V_H_ACTIVE 0x0118
77#define CFG_V_ACTIVE(y) ((y) << 16)
78#define CFG_H_ACTIVE(x) (x)
79
80/* Screen H&V Porch Register */
81#define LCD_SPU_H_PORCH 0x011C
82#define CFG_H_BACK_PORCH(b) ((b) << 16)
83#define CFG_H_FRONT_PORCH(f) (f)
84#define LCD_SPU_V_PORCH 0x0120
85#define CFG_V_BACK_PORCH(b) ((b) << 16)
86#define CFG_V_FRONT_PORCH(f) (f)
87
88/* Screen Blank Color Register */
89#define LCD_SPU_BLANKCOLOR 0x0124
90#define CFG_BLANKCOLOR_MASK 0x00FFFFFF
91#define CFG_BLANKCOLOR_R_MASK 0x000000FF
92#define CFG_BLANKCOLOR_G_MASK 0x0000FF00
93#define CFG_BLANKCOLOR_B_MASK 0x00FF0000
94
95/* HW Cursor Color 1&2 Register */
96#define LCD_SPU_ALPHA_COLOR1 0x0128
97#define CFG_HWC_COLOR1 0x00FFFFFF
98#define CFG_HWC_COLOR1_R(red) ((red) << 16)
99#define CFG_HWC_COLOR1_G(green) ((green) << 8)
100#define CFG_HWC_COLOR1_B(blue) (blue)
101#define CFG_HWC_COLOR1_R_MASK 0x000000FF
102#define CFG_HWC_COLOR1_G_MASK 0x0000FF00
103#define CFG_HWC_COLOR1_B_MASK 0x00FF0000
104#define LCD_SPU_ALPHA_COLOR2 0x012C
105#define CFG_HWC_COLOR2 0x00FFFFFF
106#define CFG_HWC_COLOR2_R_MASK 0x000000FF
107#define CFG_HWC_COLOR2_G_MASK 0x0000FF00
108#define CFG_HWC_COLOR2_B_MASK 0x00FF0000
109
110/* Video YUV Color Key Control */
111#define LCD_SPU_COLORKEY_Y 0x0130
112#define CFG_CKEY_Y2(y2) ((y2) << 24)
113#define CFG_CKEY_Y2_MASK 0xFF000000
114#define CFG_CKEY_Y1(y1) ((y1) << 16)
115#define CFG_CKEY_Y1_MASK 0x00FF0000
116#define CFG_CKEY_Y(y) ((y) << 8)
117#define CFG_CKEY_Y_MASK 0x0000FF00
118#define CFG_ALPHA_Y(y) (y)
119#define CFG_ALPHA_Y_MASK 0x000000FF
120#define LCD_SPU_COLORKEY_U 0x0134
121#define CFG_CKEY_U2(u2) ((u2) << 24)
122#define CFG_CKEY_U2_MASK 0xFF000000
123#define CFG_CKEY_U1(u1) ((u1) << 16)
124#define CFG_CKEY_U1_MASK 0x00FF0000
125#define CFG_CKEY_U(u) ((u) << 8)
126#define CFG_CKEY_U_MASK 0x0000FF00
127#define CFG_ALPHA_U(u) (u)
128#define CFG_ALPHA_U_MASK 0x000000FF
129#define LCD_SPU_COLORKEY_V 0x0138
130#define CFG_CKEY_V2(v2) ((v2) << 24)
131#define CFG_CKEY_V2_MASK 0xFF000000
132#define CFG_CKEY_V1(v1) ((v1) << 16)
133#define CFG_CKEY_V1_MASK 0x00FF0000
134#define CFG_CKEY_V(v) ((v) << 8)
135#define CFG_CKEY_V_MASK 0x0000FF00
136#define CFG_ALPHA_V(v) (v)
137#define CFG_ALPHA_V_MASK 0x000000FF
138
139/* SPI Read Data Register */
140#define LCD_SPU_SPI_RXDATA 0x0140
141
142/* Smart Panel Read Data Register */
143#define LCD_SPU_ISA_RSDATA 0x0144
144#define ISA_RXDATA_16BIT_1_DATA_MASK 0x000000FF
145#define ISA_RXDATA_16BIT_2_DATA_MASK 0x0000FF00
146#define ISA_RXDATA_16BIT_3_DATA_MASK 0x00FF0000
147#define ISA_RXDATA_16BIT_4_DATA_MASK 0xFF000000
148#define ISA_RXDATA_32BIT_1_DATA_MASK 0x00FFFFFF
149
150/* HWC SRAM Read Data Register */
151#define LCD_SPU_HWC_RDDAT 0x0158
152
153/* Gamma Table SRAM Read Data Register */
154#define LCD_SPU_GAMMA_RDDAT 0x015c
155#define CFG_GAMMA_RDDAT_MASK 0x000000FF
156
157/* Palette Table SRAM Read Data Register */
158#define LCD_SPU_PALETTE_RDDAT 0x0160
159#define CFG_PALETTE_RDDAT_MASK 0x00FFFFFF
160
161/* I/O Pads Input Read Only Register */
162#define LCD_SPU_IOPAD_IN 0x0178
163#define CFG_IOPAD_IN_MASK 0x0FFFFFFF
164
165/* Reserved Read Only Registers */
166#define LCD_CFG_RDREG5F 0x017C
167#define IRE_FRAME_CNT_MASK 0x000000C0
168#define IPE_FRAME_CNT_MASK 0x00000030
169#define GRA_FRAME_CNT_MASK 0x0000000C /* Graphic */
170#define DMA_FRAME_CNT_MASK 0x00000003 /* Video */
171
172/* SPI Control Register. */
173#define LCD_SPU_SPI_CTRL 0x0180
174#define CFG_SCLKCNT(div) ((div) << 24) /* 0xFF~0x2 */
175#define CFG_SCLKCNT_MASK 0xFF000000
176#define CFG_RXBITS(rx) ((rx) << 16) /* 0x1F~0x1 */
177#define CFG_RXBITS_MASK 0x00FF0000
178#define CFG_TXBITS(tx) ((tx) << 8) /* 0x1F~0x1 */
179#define CFG_TXBITS_MASK 0x0000FF00
180#define CFG_CLKINV(clk) ((clk) << 7)
181#define CFG_CLKINV_MASK 0x00000080
182#define CFG_KEEPXFER(transfer) ((transfer) << 6)
183#define CFG_KEEPXFER_MASK 0x00000040
184#define CFG_RXBITSTO0(rx) ((rx) << 5)
185#define CFG_RXBITSTO0_MASK 0x00000020
186#define CFG_TXBITSTO0(tx) ((tx) << 4)
187#define CFG_TXBITSTO0_MASK 0x00000010
188#define CFG_SPI_ENA(spi) ((spi) << 3)
189#define CFG_SPI_ENA_MASK 0x00000008
190#define CFG_SPI_SEL(spi) ((spi) << 2)
191#define CFG_SPI_SEL_MASK 0x00000004
192#define CFG_SPI_3W4WB(wire) ((wire) << 1)
193#define CFG_SPI_3W4WB_MASK 0x00000002
194#define CFG_SPI_START(start) (start)
195#define CFG_SPI_START_MASK 0x00000001
196
197/* SPI Tx Data Register */
198#define LCD_SPU_SPI_TXDATA 0x0184
199
200/*
201 1. Smart Pannel 8-bit Bus Control Register.
202 2. AHB Slave Path Data Port Register
203*/
204#define LCD_SPU_SMPN_CTRL 0x0188
205
206/* DMA Control 0 Register */
207#define LCD_SPU_DMA_CTRL0 0x0190
208#define CFG_NOBLENDING(nb) ((nb) << 31)
209#define CFG_NOBLENDING_MASK 0x80000000
210#define CFG_GAMMA_ENA(gn) ((gn) << 30)
211#define CFG_GAMMA_ENA_MASK 0x40000000
212#define CFG_CBSH_ENA(cn) ((cn) << 29)
213#define CFG_CBSH_ENA_MASK 0x20000000
214#define CFG_PALETTE_ENA(pn) ((pn) << 28)
215#define CFG_PALETTE_ENA_MASK 0x10000000
216#define CFG_ARBFAST_ENA(an) ((an) << 27)
217#define CFG_ARBFAST_ENA_MASK 0x08000000
218#define CFG_HWC_1BITMOD(mode) ((mode) << 26)
219#define CFG_HWC_1BITMOD_MASK 0x04000000
220#define CFG_HWC_1BITENA(mn) ((mn) << 25)
221#define CFG_HWC_1BITENA_MASK 0x02000000
222#define CFG_HWC_ENA(cn) ((cn) << 24)
223#define CFG_HWC_ENA_MASK 0x01000000
224#define CFG_DMAFORMAT(dmaformat) ((dmaformat) << 20)
225#define CFG_DMAFORMAT_MASK 0x00F00000
226#define CFG_GRAFORMAT(graformat) ((graformat) << 16)
227#define CFG_GRAFORMAT_MASK 0x000F0000
228/* for graphic part */
229#define CFG_GRA_FTOGGLE(toggle) ((toggle) << 15)
230#define CFG_GRA_FTOGGLE_MASK 0x00008000
231#define CFG_GRA_HSMOOTH(smooth) ((smooth) << 14)
232#define CFG_GRA_HSMOOTH_MASK 0x00004000
233#define CFG_GRA_TSTMODE(test) ((test) << 13)
234#define CFG_GRA_TSTMODE_MASK 0x00002000
235#define CFG_GRA_SWAPRB(swap) ((swap) << 12)
236#define CFG_GRA_SWAPRB_MASK 0x00001000
237#define CFG_GRA_SWAPUV(swap) ((swap) << 11)
238#define CFG_GRA_SWAPUV_MASK 0x00000800
239#define CFG_GRA_SWAPYU(swap) ((swap) << 10)
240#define CFG_GRA_SWAPYU_MASK 0x00000400
241#define CFG_YUV2RGB_GRA(cvrt) ((cvrt) << 9)
242#define CFG_YUV2RGB_GRA_MASK 0x00000200
243#define CFG_GRA_ENA(gra) ((gra) << 8)
244#define CFG_GRA_ENA_MASK 0x00000100
245/* for video part */
246#define CFG_DMA_FTOGGLE(toggle) ((toggle) << 7)
247#define CFG_DMA_FTOGGLE_MASK 0x00000080
248#define CFG_DMA_HSMOOTH(smooth) ((smooth) << 6)
249#define CFG_DMA_HSMOOTH_MASK 0x00000040
250#define CFG_DMA_TSTMODE(test) ((test) << 5)
251#define CFG_DMA_TSTMODE_MASK 0x00000020
252#define CFG_DMA_SWAPRB(swap) ((swap) << 4)
253#define CFG_DMA_SWAPRB_MASK 0x00000010
254#define CFG_DMA_SWAPUV(swap) ((swap) << 3)
255#define CFG_DMA_SWAPUV_MASK 0x00000008
256#define CFG_DMA_SWAPYU(swap) ((swap) << 2)
257#define CFG_DMA_SWAPYU_MASK 0x00000004
258#define CFG_DMA_SWAP_MASK 0x0000001C
259#define CFG_YUV2RGB_DMA(cvrt) ((cvrt) << 1)
260#define CFG_YUV2RGB_DMA_MASK 0x00000002
261#define CFG_DMA_ENA(video) (video)
262#define CFG_DMA_ENA_MASK 0x00000001
263
264/* DMA Control 1 Register */
265#define LCD_SPU_DMA_CTRL1 0x0194
266#define CFG_FRAME_TRIG(trig) ((trig) << 31)
267#define CFG_FRAME_TRIG_MASK 0x80000000
268#define CFG_VSYNC_TRIG(trig) ((trig) << 28)
269#define CFG_VSYNC_TRIG_MASK 0x70000000
270#define CFG_VSYNC_INV(inv) ((inv) << 27)
271#define CFG_VSYNC_INV_MASK 0x08000000
272#define CFG_COLOR_KEY_MODE(cmode) ((cmode) << 24)
273#define CFG_COLOR_KEY_MASK 0x07000000
274#define CFG_CARRY(carry) ((carry) << 23)
275#define CFG_CARRY_MASK 0x00800000
276#define CFG_LNBUF_ENA(lnbuf) ((lnbuf) << 22)
277#define CFG_LNBUF_ENA_MASK 0x00400000
278#define CFG_GATED_ENA(gated) ((gated) << 21)
279#define CFG_GATED_ENA_MASK 0x00200000
280#define CFG_PWRDN_ENA(power) ((power) << 20)
281#define CFG_PWRDN_ENA_MASK 0x00100000
282#define CFG_DSCALE(dscale) ((dscale) << 18)
283#define CFG_DSCALE_MASK 0x000C0000
284#define CFG_ALPHA_MODE(amode) ((amode) << 16)
285#define CFG_ALPHA_MODE_MASK 0x00030000
286#define CFG_ALPHA(alpha) ((alpha) << 8)
287#define CFG_ALPHA_MASK 0x0000FF00
288#define CFG_PXLCMD(pxlcmd) (pxlcmd)
289#define CFG_PXLCMD_MASK 0x000000FF
290
291/* SRAM Control Register */
292#define LCD_SPU_SRAM_CTRL 0x0198
293#define CFG_SRAM_INIT_WR_RD(mode) ((mode) << 14)
294#define CFG_SRAM_INIT_WR_RD_MASK 0x0000C000
295#define CFG_SRAM_ADDR_LCDID(id) ((id) << 8)
296#define CFG_SRAM_ADDR_LCDID_MASK 0x00000F00
297#define CFG_SRAM_ADDR(addr) (addr)
298#define CFG_SRAM_ADDR_MASK 0x000000FF
299
300/* SRAM Write Data Register */
301#define LCD_SPU_SRAM_WRDAT 0x019C
302
303/* SRAM RTC/WTC Control Register */
304#define LCD_SPU_SRAM_PARA0 0x01A0
305
306/* SRAM Power Down Control Register */
307#define LCD_SPU_SRAM_PARA1 0x01A4
308#define CFG_CSB_256x32(hwc) ((hwc) << 15) /* HWC */
309#define CFG_CSB_256x32_MASK 0x00008000
310#define CFG_CSB_256x24(palette) ((palette) << 14) /* Palette */
311#define CFG_CSB_256x24_MASK 0x00004000
312#define CFG_CSB_256x8(gamma) ((gamma) << 13) /* Gamma */
313#define CFG_CSB_256x8_MASK 0x00002000
314#define CFG_PDWN256x32(pdwn) ((pdwn) << 7) /* HWC */
315#define CFG_PDWN256x32_MASK 0x00000080
316#define CFG_PDWN256x24(pdwn) ((pdwn) << 6) /* Palette */
317#define CFG_PDWN256x24_MASK 0x00000040
318#define CFG_PDWN256x8(pdwn) ((pdwn) << 5) /* Gamma */
319#define CFG_PDWN256x8_MASK 0x00000020
320#define CFG_PDWN32x32(pdwn) ((pdwn) << 3)
321#define CFG_PDWN32x32_MASK 0x00000008
322#define CFG_PDWN16x66(pdwn) ((pdwn) << 2)
323#define CFG_PDWN16x66_MASK 0x00000004
324#define CFG_PDWN32x66(pdwn) ((pdwn) << 1)
325#define CFG_PDWN32x66_MASK 0x00000002
326#define CFG_PDWN64x66(pdwn) (pdwn)
327#define CFG_PDWN64x66_MASK 0x00000001
328
329/* Smart or Dumb Panel Clock Divider */
330#define LCD_CFG_SCLK_DIV 0x01A8
331#define SCLK_SOURCE_SELECT(src) ((src) << 31)
332#define SCLK_SOURCE_SELECT_MASK 0x80000000
333#define CLK_FRACDIV(frac) ((frac) << 16)
334#define CLK_FRACDIV_MASK 0x0FFF0000
335#define CLK_INT_DIV(div) (div)
336#define CLK_INT_DIV_MASK 0x0000FFFF
337
338/* Video Contrast Register */
339#define LCD_SPU_CONTRAST 0x01AC
340#define CFG_BRIGHTNESS(bright) ((bright) << 16)
341#define CFG_BRIGHTNESS_MASK 0xFFFF0000
342#define CFG_CONTRAST(contrast) (contrast)
343#define CFG_CONTRAST_MASK 0x0000FFFF
344
345/* Video Saturation Register */
346#define LCD_SPU_SATURATION 0x01B0
347#define CFG_C_MULTS(mult) ((mult) << 16)
348#define CFG_C_MULTS_MASK 0xFFFF0000
349#define CFG_SATURATION(sat) (sat)
350#define CFG_SATURATION_MASK 0x0000FFFF
351
352/* Video Hue Adjust Register */
353#define LCD_SPU_CBSH_HUE 0x01B4
354#define CFG_SIN0(sin0) ((sin0) << 16)
355#define CFG_SIN0_MASK 0xFFFF0000
356#define CFG_COS0(con0) (con0)
357#define CFG_COS0_MASK 0x0000FFFF
358
359/* Dump LCD Panel Control Register */
360#define LCD_SPU_DUMB_CTRL 0x01B8
361#define CFG_DUMBMODE(mode) ((mode) << 28)
362#define CFG_DUMBMODE_MASK 0xF0000000
363#define CFG_LCDGPIO_O(data) ((data) << 20)
364#define CFG_LCDGPIO_O_MASK 0x0FF00000
365#define CFG_LCDGPIO_ENA(gpio) ((gpio) << 12)
366#define CFG_LCDGPIO_ENA_MASK 0x000FF000
367#define CFG_BIAS_OUT(bias) ((bias) << 8)
368#define CFG_BIAS_OUT_MASK 0x00000100
369#define CFG_REVERSE_RGB(rRGB) ((rRGB) << 7)
370#define CFG_REVERSE_RGB_MASK 0x00000080
371#define CFG_INV_COMPBLANK(blank) ((blank) << 6)
372#define CFG_INV_COMPBLANK_MASK 0x00000040
373#define CFG_INV_COMPSYNC(sync) ((sync) << 5)
374#define CFG_INV_COMPSYNC_MASK 0x00000020
375#define CFG_INV_HENA(hena) ((hena) << 4)
376#define CFG_INV_HENA_MASK 0x00000010
377#define CFG_INV_VSYNC(vsync) ((vsync) << 3)
378#define CFG_INV_VSYNC_MASK 0x00000008
379#define CFG_INV_HSYNC(hsync) ((hsync) << 2)
380#define CFG_INV_HSYNC_MASK 0x00000004
381#define CFG_INV_PCLK(pclk) ((pclk) << 1)
382#define CFG_INV_PCLK_MASK 0x00000002
383#define CFG_DUMB_ENA(dumb) (dumb)
384#define CFG_DUMB_ENA_MASK 0x00000001
385
386/* LCD I/O Pads Control Register */
387#define SPU_IOPAD_CONTROL 0x01BC
388#define CFG_GRA_VM_ENA(vm) ((vm) << 15) /* gfx */
389#define CFG_GRA_VM_ENA_MASK 0x00008000
390#define CFG_DMA_VM_ENA(vm) ((vm) << 13) /* video */
391#define CFG_DMA_VM_ENA_MASK 0x00002000
392#define CFG_CMD_VM_ENA(vm) ((vm) << 13)
393#define CFG_CMD_VM_ENA_MASK 0x00000800
394#define CFG_CSC(csc) ((csc) << 8) /* csc */
395#define CFG_CSC_MASK 0x00000300
396#define CFG_AXICTRL(axi) ((axi) << 4)
397#define CFG_AXICTRL_MASK 0x000000F0
398#define CFG_IOPADMODE(iopad) (iopad)
399#define CFG_IOPADMODE_MASK 0x0000000F
400
401/* LCD Interrupt Control Register */
402#define SPU_IRQ_ENA 0x01C0
403#define DMA_FRAME_IRQ0_ENA(irq) ((irq) << 31)
404#define DMA_FRAME_IRQ0_ENA_MASK 0x80000000
405#define DMA_FRAME_IRQ1_ENA(irq) ((irq) << 30)
406#define DMA_FRAME_IRQ1_ENA_MASK 0x40000000
407#define DMA_FF_UNDERFLOW_ENA(ff) ((ff) << 29)
408#define DMA_FF_UNDERFLOW_ENA_MASK 0x20000000
409#define GRA_FRAME_IRQ0_ENA(irq) ((irq) << 27)
410#define GRA_FRAME_IRQ0_ENA_MASK 0x08000000
411#define GRA_FRAME_IRQ1_ENA(irq) ((irq) << 26)
412#define GRA_FRAME_IRQ1_ENA_MASK 0x04000000
413#define GRA_FF_UNDERFLOW_ENA(ff) ((ff) << 25)
414#define GRA_FF_UNDERFLOW_ENA_MASK 0x02000000
415#define VSYNC_IRQ_ENA(vsync_irq) ((vsync_irq) << 23)
416#define VSYNC_IRQ_ENA_MASK 0x00800000
417#define DUMB_FRAMEDONE_ENA(fdone) ((fdone) << 22)
418#define DUMB_FRAMEDONE_ENA_MASK 0x00400000
419#define TWC_FRAMEDONE_ENA(fdone) ((fdone) << 21)
420#define TWC_FRAMEDONE_ENA_MASK 0x00200000
421#define HWC_FRAMEDONE_ENA(fdone) ((fdone) << 20)
422#define HWC_FRAMEDONE_ENA_MASK 0x00100000
423#define SLV_IRQ_ENA(irq) ((irq) << 19)
424#define SLV_IRQ_ENA_MASK 0x00080000
425#define SPI_IRQ_ENA(irq) ((irq) << 18)
426#define SPI_IRQ_ENA_MASK 0x00040000
427#define PWRDN_IRQ_ENA(irq) ((irq) << 17)
428#define PWRDN_IRQ_ENA_MASK 0x00020000
429#define ERR_IRQ_ENA(irq) ((irq) << 16)
430#define ERR_IRQ_ENA_MASK 0x00010000
431#define CLEAN_SPU_IRQ_ISR(irq) (irq)
432#define CLEAN_SPU_IRQ_ISR_MASK 0x0000FFFF
433
434/* LCD Interrupt Status Register */
435#define SPU_IRQ_ISR 0x01C4
436#define DMA_FRAME_IRQ0(irq) ((irq) << 31)
437#define DMA_FRAME_IRQ0_MASK 0x80000000
438#define DMA_FRAME_IRQ1(irq) ((irq) << 30)
439#define DMA_FRAME_IRQ1_MASK 0x40000000
440#define DMA_FF_UNDERFLOW(ff) ((ff) << 29)
441#define DMA_FF_UNDERFLOW_MASK 0x20000000
442#define GRA_FRAME_IRQ0(irq) ((irq) << 27)
443#define GRA_FRAME_IRQ0_MASK 0x08000000
444#define GRA_FRAME_IRQ1(irq) ((irq) << 26)
445#define GRA_FRAME_IRQ1_MASK 0x04000000
446#define GRA_FF_UNDERFLOW(ff) ((ff) << 25)
447#define GRA_FF_UNDERFLOW_MASK 0x02000000
448#define VSYNC_IRQ(vsync_irq) ((vsync_irq) << 23)
449#define VSYNC_IRQ_MASK 0x00800000
450#define DUMB_FRAMEDONE(fdone) ((fdone) << 22)
451#define DUMB_FRAMEDONE_MASK 0x00400000
452#define TWC_FRAMEDONE(fdone) ((fdone) << 21)
453#define TWC_FRAMEDONE_MASK 0x00200000
454#define HWC_FRAMEDONE(fdone) ((fdone) << 20)
455#define HWC_FRAMEDONE_MASK 0x00100000
456#define SLV_IRQ(irq) ((irq) << 19)
457#define SLV_IRQ_MASK 0x00080000
458#define SPI_IRQ(irq) ((irq) << 18)
459#define SPI_IRQ_MASK 0x00040000
460#define PWRDN_IRQ(irq) ((irq) << 17)
461#define PWRDN_IRQ_MASK 0x00020000
462#define ERR_IRQ(irq) ((irq) << 16)
463#define ERR_IRQ_MASK 0x00010000
464/* read-only */
465#define DMA_FRAME_IRQ0_LEVEL_MASK 0x00008000
466#define DMA_FRAME_IRQ1_LEVEL_MASK 0x00004000
467#define DMA_FRAME_CNT_ISR_MASK 0x00003000
468#define GRA_FRAME_IRQ0_LEVEL_MASK 0x00000800
469#define GRA_FRAME_IRQ1_LEVEL_MASK 0x00000400
470#define GRA_FRAME_CNT_ISR_MASK 0x00000300
471#define VSYNC_IRQ_LEVEL_MASK 0x00000080
472#define DUMB_FRAMEDONE_LEVEL_MASK 0x00000040
473#define TWC_FRAMEDONE_LEVEL_MASK 0x00000020
474#define HWC_FRAMEDONE_LEVEL_MASK 0x00000010
475#define SLV_FF_EMPTY_MASK 0x00000008
476#define DMA_FF_ALLEMPTY_MASK 0x00000004
477#define GRA_FF_ALLEMPTY_MASK 0x00000002
478#define PWRDN_IRQ_LEVEL_MASK 0x00000001
479
480
481/*
482 * defined Video Memory Color format for DMA control 0 register
483 * DMA0 bit[23:20]
484 */
485#define VMODE_RGB565 0x0
486#define VMODE_RGB1555 0x1
487#define VMODE_RGB888PACKED 0x2
488#define VMODE_RGB888UNPACKED 0x3
489#define VMODE_RGBA888 0x4
490#define VMODE_YUV422PACKED 0x5
491#define VMODE_YUV422PLANAR 0x6
492#define VMODE_YUV420PLANAR 0x7
493#define VMODE_SMPNCMD 0x8
494#define VMODE_PALETTE4BIT 0x9
495#define VMODE_PALETTE8BIT 0xa
496#define VMODE_RESERVED 0xb
497
498/*
499 * defined Graphic Memory Color format for DMA control 0 register
500 * DMA0 bit[19:16]
501 */
502#define GMODE_RGB565 0x0
503#define GMODE_RGB1555 0x1
504#define GMODE_RGB888PACKED 0x2
505#define GMODE_RGB888UNPACKED 0x3
506#define GMODE_RGBA888 0x4
507#define GMODE_YUV422PACKED 0x5
508#define GMODE_YUV422PLANAR 0x6
509#define GMODE_YUV420PLANAR 0x7
510#define GMODE_SMPNCMD 0x8
511#define GMODE_PALETTE4BIT 0x9
512#define GMODE_PALETTE8BIT 0xa
513#define GMODE_RESERVED 0xb
514
515/*
516 * define for DMA control 1 register
517 */
518#define DMA1_FRAME_TRIG 31 /* bit location */
519#define DMA1_VSYNC_MODE 28
520#define DMA1_VSYNC_INV 27
521#define DMA1_CKEY 24
522#define DMA1_CARRY 23
523#define DMA1_LNBUF_ENA 22
524#define DMA1_GATED_ENA 21
525#define DMA1_PWRDN_ENA 20
526#define DMA1_DSCALE 18
527#define DMA1_ALPHA_MODE 16
528#define DMA1_ALPHA 08
529#define DMA1_PXLCMD 00
530
531/*
532 * defined for Configure Dumb Mode
533 * DUMB LCD Panel bit[31:28]
534 */
535#define DUMB16_RGB565_0 0x0
536#define DUMB16_RGB565_1 0x1
537#define DUMB18_RGB666_0 0x2
538#define DUMB18_RGB666_1 0x3
539#define DUMB12_RGB444_0 0x4
540#define DUMB12_RGB444_1 0x5
541#define DUMB24_RGB888_0 0x6
542#define DUMB_BLANK 0x7
543
544/*
545 * defined for Configure I/O Pin Allocation Mode
546 * LCD LCD I/O Pads control register bit[3:0]
547 */
548#define IOPAD_DUMB24 0x0
549#define IOPAD_DUMB18SPI 0x1
550#define IOPAD_DUMB18GPIO 0x2
551#define IOPAD_DUMB16SPI 0x3
552#define IOPAD_DUMB16GPIO 0x4
553#define IOPAD_DUMB12 0x5
554#define IOPAD_SMART18SPI 0x6
555#define IOPAD_SMART16SPI 0x7
556#define IOPAD_SMART8BOTH 0x8
557
558#endif /* __PXA168FB_H__ */
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 5e9c6302433..d3a568e6b16 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -947,7 +947,8 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
947 int win; 947 int win;
948 948
949 for (win = 0; win <= S3C_FB_MAX_WIN; win++) 949 for (win = 0; win <= S3C_FB_MAX_WIN; win++)
950 s3c_fb_release_win(sfb, sfb->windows[win]); 950 if (sfb->windows[win])
951 s3c_fb_release_win(sfb, sfb->windows[win]);
951 952
952 iounmap(sfb->regs); 953 iounmap(sfb->regs);
953 954
@@ -985,11 +986,20 @@ static int s3c_fb_suspend(struct platform_device *pdev, pm_message_t state)
985static int s3c_fb_resume(struct platform_device *pdev) 986static int s3c_fb_resume(struct platform_device *pdev)
986{ 987{
987 struct s3c_fb *sfb = platform_get_drvdata(pdev); 988 struct s3c_fb *sfb = platform_get_drvdata(pdev);
989 struct s3c_fb_platdata *pd = sfb->pdata;
988 struct s3c_fb_win *win; 990 struct s3c_fb_win *win;
989 int win_no; 991 int win_no;
990 992
991 clk_enable(sfb->bus_clk); 993 clk_enable(sfb->bus_clk);
992 994
995 /* setup registers */
996 writel(pd->vidcon1, sfb->regs + VIDCON1);
997
998 /* zero all windows before we do anything */
999 for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++)
1000 s3c_fb_clear_win(sfb, win_no);
1001
1002 /* restore framebuffers */
993 for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) { 1003 for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
994 win = sfb->windows[win_no]; 1004 win = sfb->windows[win_no];
995 if (!win) 1005 if (!win)
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 92ea0ab44ce..f10d2fbeda0 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -47,6 +47,7 @@ struct sh_mobile_lcdc_priv {
47#endif 47#endif
48 unsigned long lddckr; 48 unsigned long lddckr;
49 struct sh_mobile_lcdc_chan ch[2]; 49 struct sh_mobile_lcdc_chan ch[2];
50 int started;
50}; 51};
51 52
52/* shared registers */ 53/* shared registers */
@@ -451,6 +452,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
451 452
452 /* start the lcdc */ 453 /* start the lcdc */
453 sh_mobile_lcdc_start_stop(priv, 1); 454 sh_mobile_lcdc_start_stop(priv, 1);
455 priv->started = 1;
454 456
455 /* tell the board code to enable the panel */ 457 /* tell the board code to enable the panel */
456 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { 458 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -493,7 +495,10 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
493 } 495 }
494 496
495 /* stop the lcdc */ 497 /* stop the lcdc */
496 sh_mobile_lcdc_start_stop(priv, 0); 498 if (priv->started) {
499 sh_mobile_lcdc_start_stop(priv, 0);
500 priv->started = 0;
501 }
497 502
498 /* stop clocks */ 503 /* stop clocks */
499 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) 504 for (k = 0; k < ARRAY_SIZE(priv->ch); k++)
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 421770b5e6a..ca5b4643a40 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -45,7 +45,7 @@ static struct fb_fix_screeninfo uvesafb_fix __devinitdata = {
45static int mtrr __devinitdata = 3; /* enable mtrr by default */ 45static int mtrr __devinitdata = 3; /* enable mtrr by default */
46static int blank = 1; /* enable blanking by default */ 46static int blank = 1; /* enable blanking by default */
47static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */ 47static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
48static int pmi_setpal __devinitdata = 1; /* use PMI for palette changes */ 48static bool pmi_setpal __devinitdata = true; /* use PMI for palette changes */
49static int nocrtc __devinitdata; /* ignore CRTC settings */ 49static int nocrtc __devinitdata; /* ignore CRTC settings */
50static int noedid __devinitdata; /* don't try DDC transfers */ 50static int noedid __devinitdata; /* don't try DDC transfers */
51static int vram_remap __devinitdata; /* set amt. of memory to be used */ 51static int vram_remap __devinitdata; /* set amt. of memory to be used */
@@ -2002,11 +2002,7 @@ static void __devexit uvesafb_exit(void)
2002 2002
2003module_exit(uvesafb_exit); 2003module_exit(uvesafb_exit);
2004 2004
2005static int param_get_scroll(char *buffer, struct kernel_param *kp) 2005#define param_get_scroll NULL
2006{
2007 return 0;
2008}
2009
2010static int param_set_scroll(const char *val, struct kernel_param *kp) 2006static int param_set_scroll(const char *val, struct kernel_param *kp)
2011{ 2007{
2012 ypan = 0; 2008 ypan = 0;
@@ -2017,6 +2013,8 @@ static int param_set_scroll(const char *val, struct kernel_param *kp)
2017 ypan = 1; 2013 ypan = 1;
2018 else if (!strcmp(val, "ywrap")) 2014 else if (!strcmp(val, "ywrap"))
2019 ypan = 2; 2015 ypan = 2;
2016 else
2017 return -EINVAL;
2020 2018
2021 return 0; 2019 return 0;
2022} 2020}
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 018c070a357..3a43ebf83a4 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -31,21 +31,37 @@ static ssize_t modalias_show(struct device *_d,
31 return sprintf(buf, "virtio:d%08Xv%08X\n", 31 return sprintf(buf, "virtio:d%08Xv%08X\n",
32 dev->id.device, dev->id.vendor); 32 dev->id.device, dev->id.vendor);
33} 33}
34static ssize_t features_show(struct device *_d,
35 struct device_attribute *attr, char *buf)
36{
37 struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
38 unsigned int i;
39 ssize_t len = 0;
40
41 /* We actually represent this as a bitstring, as it could be
42 * arbitrary length in future. */
43 for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++)
44 len += sprintf(buf+len, "%c",
45 test_bit(i, dev->features) ? '1' : '0');
46 len += sprintf(buf+len, "\n");
47 return len;
48}
34static struct device_attribute virtio_dev_attrs[] = { 49static struct device_attribute virtio_dev_attrs[] = {
35 __ATTR_RO(device), 50 __ATTR_RO(device),
36 __ATTR_RO(vendor), 51 __ATTR_RO(vendor),
37 __ATTR_RO(status), 52 __ATTR_RO(status),
38 __ATTR_RO(modalias), 53 __ATTR_RO(modalias),
54 __ATTR_RO(features),
39 __ATTR_NULL 55 __ATTR_NULL
40}; 56};
41 57
42static inline int virtio_id_match(const struct virtio_device *dev, 58static inline int virtio_id_match(const struct virtio_device *dev,
43 const struct virtio_device_id *id) 59 const struct virtio_device_id *id)
44{ 60{
45 if (id->device != dev->id.device) 61 if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID)
46 return 0; 62 return 0;
47 63
48 return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor != dev->id.vendor; 64 return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor;
49} 65}
50 66
51/* This looks through all the IDs a driver claims to support. If any of them 67/* This looks through all the IDs a driver claims to support. If any of them
@@ -118,13 +134,14 @@ static int virtio_dev_probe(struct device *_d)
118 if (device_features & (1 << i)) 134 if (device_features & (1 << i))
119 set_bit(i, dev->features); 135 set_bit(i, dev->features);
120 136
137 dev->config->finalize_features(dev);
138
121 err = drv->probe(dev); 139 err = drv->probe(dev);
122 if (err) 140 if (err)
123 add_status(dev, VIRTIO_CONFIG_S_FAILED); 141 add_status(dev, VIRTIO_CONFIG_S_FAILED);
124 else { 142 else
125 dev->config->finalize_features(dev);
126 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); 143 add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
127 } 144
128 return err; 145 return err;
129} 146}
130 147
@@ -185,6 +202,8 @@ int register_virtio_device(struct virtio_device *dev)
185 /* Acknowledge that we've seen the device. */ 202 /* Acknowledge that we've seen the device. */
186 add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); 203 add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
187 204
205 INIT_LIST_HEAD(&dev->vqs);
206
188 /* device_register() causes the bus infrastructure to look for a 207 /* device_register() causes the bus infrastructure to look for a
189 * matching driver. */ 208 * matching driver. */
190 err = device_register(&dev->dev); 209 err = device_register(&dev->dev);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 9c76a061a04..26b27826479 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -204,6 +204,9 @@ static int balloon(void *_vballoon)
204static int virtballoon_probe(struct virtio_device *vdev) 204static int virtballoon_probe(struct virtio_device *vdev)
205{ 205{
206 struct virtio_balloon *vb; 206 struct virtio_balloon *vb;
207 struct virtqueue *vqs[2];
208 vq_callback_t *callbacks[] = { balloon_ack, balloon_ack };
209 const char *names[] = { "inflate", "deflate" };
207 int err; 210 int err;
208 211
209 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); 212 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
@@ -218,22 +221,17 @@ static int virtballoon_probe(struct virtio_device *vdev)
218 vb->vdev = vdev; 221 vb->vdev = vdev;
219 222
220 /* We expect two virtqueues. */ 223 /* We expect two virtqueues. */
221 vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack); 224 err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
222 if (IS_ERR(vb->inflate_vq)) { 225 if (err)
223 err = PTR_ERR(vb->inflate_vq);
224 goto out_free_vb; 226 goto out_free_vb;
225 }
226 227
227 vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack); 228 vb->inflate_vq = vqs[0];
228 if (IS_ERR(vb->deflate_vq)) { 229 vb->deflate_vq = vqs[1];
229 err = PTR_ERR(vb->deflate_vq);
230 goto out_del_inflate_vq;
231 }
232 230
233 vb->thread = kthread_run(balloon, vb, "vballoon"); 231 vb->thread = kthread_run(balloon, vb, "vballoon");
234 if (IS_ERR(vb->thread)) { 232 if (IS_ERR(vb->thread)) {
235 err = PTR_ERR(vb->thread); 233 err = PTR_ERR(vb->thread);
236 goto out_del_deflate_vq; 234 goto out_del_vqs;
237 } 235 }
238 236
239 vb->tell_host_first 237 vb->tell_host_first
@@ -241,10 +239,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
241 239
242 return 0; 240 return 0;
243 241
244out_del_deflate_vq: 242out_del_vqs:
245 vdev->config->del_vq(vb->deflate_vq); 243 vdev->config->del_vqs(vdev);
246out_del_inflate_vq:
247 vdev->config->del_vq(vb->inflate_vq);
248out_free_vb: 244out_free_vb:
249 kfree(vb); 245 kfree(vb);
250out: 246out:
@@ -264,8 +260,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
264 /* Now we reset the device so we can clean up the queues. */ 260 /* Now we reset the device so we can clean up the queues. */
265 vdev->config->reset(vdev); 261 vdev->config->reset(vdev);
266 262
267 vdev->config->del_vq(vb->deflate_vq); 263 vdev->config->del_vqs(vdev);
268 vdev->config->del_vq(vb->inflate_vq);
269 kfree(vb); 264 kfree(vb);
270} 265}
271 266
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 330aacbdec1..193c8f0e5cc 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -42,6 +42,26 @@ struct virtio_pci_device
42 /* a list of queues so we can dispatch IRQs */ 42 /* a list of queues so we can dispatch IRQs */
43 spinlock_t lock; 43 spinlock_t lock;
44 struct list_head virtqueues; 44 struct list_head virtqueues;
45
46 /* MSI-X support */
47 int msix_enabled;
48 int intx_enabled;
49 struct msix_entry *msix_entries;
50 /* Name strings for interrupts. This size should be enough,
51 * and I'm too lazy to allocate each name separately. */
52 char (*msix_names)[256];
53 /* Number of available vectors */
54 unsigned msix_vectors;
55 /* Vectors allocated */
56 unsigned msix_used_vectors;
57};
58
59/* Constants for MSI-X */
60/* Use first vector for configuration changes, second and the rest for
61 * virtqueues Thus, we need at least 2 vectors for MSI. */
62enum {
63 VP_MSIX_CONFIG_VECTOR = 0,
64 VP_MSIX_VQ_VECTOR = 1,
45}; 65};
46 66
47struct virtio_pci_vq_info 67struct virtio_pci_vq_info
@@ -60,6 +80,9 @@ struct virtio_pci_vq_info
60 80
61 /* the list node for the virtqueues list */ 81 /* the list node for the virtqueues list */
62 struct list_head node; 82 struct list_head node;
83
84 /* MSI-X vector (or none) */
85 unsigned vector;
63}; 86};
64 87
65/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ 88/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
@@ -109,7 +132,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
109 void *buf, unsigned len) 132 void *buf, unsigned len)
110{ 133{
111 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 134 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
112 void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; 135 void __iomem *ioaddr = vp_dev->ioaddr +
136 VIRTIO_PCI_CONFIG(vp_dev) + offset;
113 u8 *ptr = buf; 137 u8 *ptr = buf;
114 int i; 138 int i;
115 139
@@ -123,7 +147,8 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
123 const void *buf, unsigned len) 147 const void *buf, unsigned len)
124{ 148{
125 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 149 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
126 void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; 150 void __iomem *ioaddr = vp_dev->ioaddr +
151 VIRTIO_PCI_CONFIG(vp_dev) + offset;
127 const u8 *ptr = buf; 152 const u8 *ptr = buf;
128 int i; 153 int i;
129 154
@@ -164,6 +189,37 @@ static void vp_notify(struct virtqueue *vq)
164 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); 189 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
165} 190}
166 191
192/* Handle a configuration change: Tell driver if it wants to know. */
193static irqreturn_t vp_config_changed(int irq, void *opaque)
194{
195 struct virtio_pci_device *vp_dev = opaque;
196 struct virtio_driver *drv;
197 drv = container_of(vp_dev->vdev.dev.driver,
198 struct virtio_driver, driver);
199
200 if (drv && drv->config_changed)
201 drv->config_changed(&vp_dev->vdev);
202 return IRQ_HANDLED;
203}
204
205/* Notify all virtqueues on an interrupt. */
206static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
207{
208 struct virtio_pci_device *vp_dev = opaque;
209 struct virtio_pci_vq_info *info;
210 irqreturn_t ret = IRQ_NONE;
211 unsigned long flags;
212
213 spin_lock_irqsave(&vp_dev->lock, flags);
214 list_for_each_entry(info, &vp_dev->virtqueues, node) {
215 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
216 ret = IRQ_HANDLED;
217 }
218 spin_unlock_irqrestore(&vp_dev->lock, flags);
219
220 return ret;
221}
222
167/* A small wrapper to also acknowledge the interrupt when it's handled. 223/* A small wrapper to also acknowledge the interrupt when it's handled.
168 * I really need an EIO hook for the vring so I can ack the interrupt once we 224 * I really need an EIO hook for the vring so I can ack the interrupt once we
169 * know that we'll be handling the IRQ but before we invoke the callback since 225 * know that we'll be handling the IRQ but before we invoke the callback since
@@ -173,9 +229,6 @@ static void vp_notify(struct virtqueue *vq)
173static irqreturn_t vp_interrupt(int irq, void *opaque) 229static irqreturn_t vp_interrupt(int irq, void *opaque)
174{ 230{
175 struct virtio_pci_device *vp_dev = opaque; 231 struct virtio_pci_device *vp_dev = opaque;
176 struct virtio_pci_vq_info *info;
177 irqreturn_t ret = IRQ_NONE;
178 unsigned long flags;
179 u8 isr; 232 u8 isr;
180 233
181 /* reading the ISR has the effect of also clearing it so it's very 234 /* reading the ISR has the effect of also clearing it so it's very
@@ -187,34 +240,137 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
187 return IRQ_NONE; 240 return IRQ_NONE;
188 241
189 /* Configuration change? Tell driver if it wants to know. */ 242 /* Configuration change? Tell driver if it wants to know. */
190 if (isr & VIRTIO_PCI_ISR_CONFIG) { 243 if (isr & VIRTIO_PCI_ISR_CONFIG)
191 struct virtio_driver *drv; 244 vp_config_changed(irq, opaque);
192 drv = container_of(vp_dev->vdev.dev.driver,
193 struct virtio_driver, driver);
194 245
195 if (drv && drv->config_changed) 246 return vp_vring_interrupt(irq, opaque);
196 drv->config_changed(&vp_dev->vdev); 247}
248
249static void vp_free_vectors(struct virtio_device *vdev)
250{
251 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
252 int i;
253
254 if (vp_dev->intx_enabled) {
255 free_irq(vp_dev->pci_dev->irq, vp_dev);
256 vp_dev->intx_enabled = 0;
197 } 257 }
198 258
199 spin_lock_irqsave(&vp_dev->lock, flags); 259 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
200 list_for_each_entry(info, &vp_dev->virtqueues, node) { 260 free_irq(vp_dev->msix_entries[i].vector, vp_dev);
201 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) 261 vp_dev->msix_used_vectors = 0;
202 ret = IRQ_HANDLED; 262
263 if (vp_dev->msix_enabled) {
264 /* Disable the vector used for configuration */
265 iowrite16(VIRTIO_MSI_NO_VECTOR,
266 vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
267 /* Flush the write out to device */
268 ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
269
270 vp_dev->msix_enabled = 0;
271 pci_disable_msix(vp_dev->pci_dev);
203 } 272 }
204 spin_unlock_irqrestore(&vp_dev->lock, flags); 273}
205 274
206 return ret; 275static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
276 int *options, int noptions)
277{
278 int i;
279 for (i = 0; i < noptions; ++i)
280 if (!pci_enable_msix(dev, entries, options[i]))
281 return options[i];
282 return -EBUSY;
283}
284
285static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs)
286{
287 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
288 const char *name = dev_name(&vp_dev->vdev.dev);
289 unsigned i, v;
290 int err = -ENOMEM;
291 /* We want at most one vector per queue and one for config changes.
292 * Fallback to separate vectors for config and a shared for queues.
293 * Finally fall back to regular interrupts. */
294 int options[] = { max_vqs + 1, 2 };
295 int nvectors = max(options[0], options[1]);
296
297 vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
298 GFP_KERNEL);
299 if (!vp_dev->msix_entries)
300 goto error_entries;
301 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
302 GFP_KERNEL);
303 if (!vp_dev->msix_names)
304 goto error_names;
305
306 for (i = 0; i < nvectors; ++i)
307 vp_dev->msix_entries[i].entry = i;
308
309 err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries,
310 options, ARRAY_SIZE(options));
311 if (err < 0) {
312 /* Can't allocate enough MSI-X vectors, use regular interrupt */
313 vp_dev->msix_vectors = 0;
314 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
315 IRQF_SHARED, name, vp_dev);
316 if (err)
317 goto error_irq;
318 vp_dev->intx_enabled = 1;
319 } else {
320 vp_dev->msix_vectors = err;
321 vp_dev->msix_enabled = 1;
322
323 /* Set the vector used for configuration */
324 v = vp_dev->msix_used_vectors;
325 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
326 "%s-config", name);
327 err = request_irq(vp_dev->msix_entries[v].vector,
328 vp_config_changed, 0, vp_dev->msix_names[v],
329 vp_dev);
330 if (err)
331 goto error_irq;
332 ++vp_dev->msix_used_vectors;
333
334 iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
335 /* Verify we had enough resources to assign the vector */
336 v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
337 if (v == VIRTIO_MSI_NO_VECTOR) {
338 err = -EBUSY;
339 goto error_irq;
340 }
341 }
342
343 if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) {
344 /* Shared vector for all VQs */
345 v = vp_dev->msix_used_vectors;
346 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
347 "%s-virtqueues", name);
348 err = request_irq(vp_dev->msix_entries[v].vector,
349 vp_vring_interrupt, 0, vp_dev->msix_names[v],
350 vp_dev);
351 if (err)
352 goto error_irq;
353 ++vp_dev->msix_used_vectors;
354 }
355 return 0;
356error_irq:
357 vp_free_vectors(vdev);
358 kfree(vp_dev->msix_names);
359error_names:
360 kfree(vp_dev->msix_entries);
361error_entries:
362 return err;
207} 363}
208 364
209/* the config->find_vq() implementation */
210static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, 365static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
211 void (*callback)(struct virtqueue *vq)) 366 void (*callback)(struct virtqueue *vq),
367 const char *name)
212{ 368{
213 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 369 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
214 struct virtio_pci_vq_info *info; 370 struct virtio_pci_vq_info *info;
215 struct virtqueue *vq; 371 struct virtqueue *vq;
216 unsigned long flags, size; 372 unsigned long flags, size;
217 u16 num; 373 u16 num, vector;
218 int err; 374 int err;
219 375
220 /* Select the queue we're interested in */ 376 /* Select the queue we're interested in */
@@ -233,6 +389,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
233 389
234 info->queue_index = index; 390 info->queue_index = index;
235 info->num = num; 391 info->num = num;
392 info->vector = VIRTIO_MSI_NO_VECTOR;
236 393
237 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); 394 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
238 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); 395 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
@@ -247,7 +404,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
247 404
248 /* create the vring */ 405 /* create the vring */
249 vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, 406 vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN,
250 vdev, info->queue, vp_notify, callback); 407 vdev, info->queue, vp_notify, callback, name);
251 if (!vq) { 408 if (!vq) {
252 err = -ENOMEM; 409 err = -ENOMEM;
253 goto out_activate_queue; 410 goto out_activate_queue;
@@ -256,12 +413,43 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
256 vq->priv = info; 413 vq->priv = info;
257 info->vq = vq; 414 info->vq = vq;
258 415
416 /* allocate per-vq vector if available and necessary */
417 if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) {
418 vector = vp_dev->msix_used_vectors;
419 snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names,
420 "%s-%s", dev_name(&vp_dev->vdev.dev), name);
421 err = request_irq(vp_dev->msix_entries[vector].vector,
422 vring_interrupt, 0,
423 vp_dev->msix_names[vector], vq);
424 if (err)
425 goto out_request_irq;
426 info->vector = vector;
427 ++vp_dev->msix_used_vectors;
428 } else
429 vector = VP_MSIX_VQ_VECTOR;
430
431 if (callback && vp_dev->msix_enabled) {
432 iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
433 vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
434 if (vector == VIRTIO_MSI_NO_VECTOR) {
435 err = -EBUSY;
436 goto out_assign;
437 }
438 }
439
259 spin_lock_irqsave(&vp_dev->lock, flags); 440 spin_lock_irqsave(&vp_dev->lock, flags);
260 list_add(&info->node, &vp_dev->virtqueues); 441 list_add(&info->node, &vp_dev->virtqueues);
261 spin_unlock_irqrestore(&vp_dev->lock, flags); 442 spin_unlock_irqrestore(&vp_dev->lock, flags);
262 443
263 return vq; 444 return vq;
264 445
446out_assign:
447 if (info->vector != VIRTIO_MSI_NO_VECTOR) {
448 free_irq(vp_dev->msix_entries[info->vector].vector, vq);
449 --vp_dev->msix_used_vectors;
450 }
451out_request_irq:
452 vring_del_virtqueue(vq);
265out_activate_queue: 453out_activate_queue:
266 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 454 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
267 free_pages_exact(info->queue, size); 455 free_pages_exact(info->queue, size);
@@ -270,21 +458,27 @@ out_info:
270 return ERR_PTR(err); 458 return ERR_PTR(err);
271} 459}
272 460
273/* the config->del_vq() implementation */
274static void vp_del_vq(struct virtqueue *vq) 461static void vp_del_vq(struct virtqueue *vq)
275{ 462{
276 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 463 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
277 struct virtio_pci_vq_info *info = vq->priv; 464 struct virtio_pci_vq_info *info = vq->priv;
278 unsigned long flags, size; 465 unsigned long size;
279 466
280 spin_lock_irqsave(&vp_dev->lock, flags); 467 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
281 list_del(&info->node); 468
282 spin_unlock_irqrestore(&vp_dev->lock, flags); 469 if (info->vector != VIRTIO_MSI_NO_VECTOR)
470 free_irq(vp_dev->msix_entries[info->vector].vector, vq);
471
472 if (vp_dev->msix_enabled) {
473 iowrite16(VIRTIO_MSI_NO_VECTOR,
474 vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
475 /* Flush the write out to device */
476 ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
477 }
283 478
284 vring_del_virtqueue(vq); 479 vring_del_virtqueue(vq);
285 480
286 /* Select and deactivate the queue */ 481 /* Select and deactivate the queue */
287 iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
288 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 482 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
289 483
290 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); 484 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
@@ -292,14 +486,57 @@ static void vp_del_vq(struct virtqueue *vq)
292 kfree(info); 486 kfree(info);
293} 487}
294 488
489/* the config->del_vqs() implementation */
490static void vp_del_vqs(struct virtio_device *vdev)
491{
492 struct virtqueue *vq, *n;
493
494 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
495 vp_del_vq(vq);
496
497 vp_free_vectors(vdev);
498}
499
500/* the config->find_vqs() implementation */
501static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
502 struct virtqueue *vqs[],
503 vq_callback_t *callbacks[],
504 const char *names[])
505{
506 int vectors = 0;
507 int i, err;
508
509 /* How many vectors would we like? */
510 for (i = 0; i < nvqs; ++i)
511 if (callbacks[i])
512 ++vectors;
513
514 err = vp_request_vectors(vdev, vectors);
515 if (err)
516 goto error_request;
517
518 for (i = 0; i < nvqs; ++i) {
519 vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]);
520 if (IS_ERR(vqs[i]))
521 goto error_find;
522 }
523 return 0;
524
525error_find:
526 vp_del_vqs(vdev);
527
528error_request:
529 return PTR_ERR(vqs[i]);
530}
531
295static struct virtio_config_ops virtio_pci_config_ops = { 532static struct virtio_config_ops virtio_pci_config_ops = {
296 .get = vp_get, 533 .get = vp_get,
297 .set = vp_set, 534 .set = vp_set,
298 .get_status = vp_get_status, 535 .get_status = vp_get_status,
299 .set_status = vp_set_status, 536 .set_status = vp_set_status,
300 .reset = vp_reset, 537 .reset = vp_reset,
301 .find_vq = vp_find_vq, 538 .find_vqs = vp_find_vqs,
302 .del_vq = vp_del_vq, 539 .del_vqs = vp_del_vqs,
303 .get_features = vp_get_features, 540 .get_features = vp_get_features,
304 .finalize_features = vp_finalize_features, 541 .finalize_features = vp_finalize_features,
305}; 542};
@@ -310,7 +547,7 @@ static void virtio_pci_release_dev(struct device *_d)
310 struct virtio_pci_device *vp_dev = to_vp_device(dev); 547 struct virtio_pci_device *vp_dev = to_vp_device(dev);
311 struct pci_dev *pci_dev = vp_dev->pci_dev; 548 struct pci_dev *pci_dev = vp_dev->pci_dev;
312 549
313 free_irq(pci_dev->irq, vp_dev); 550 vp_del_vqs(dev);
314 pci_set_drvdata(pci_dev, NULL); 551 pci_set_drvdata(pci_dev, NULL);
315 pci_iounmap(pci_dev, vp_dev->ioaddr); 552 pci_iounmap(pci_dev, vp_dev->ioaddr);
316 pci_release_regions(pci_dev); 553 pci_release_regions(pci_dev);
@@ -369,21 +606,13 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
369 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; 606 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
370 vp_dev->vdev.id.device = pci_dev->subsystem_device; 607 vp_dev->vdev.id.device = pci_dev->subsystem_device;
371 608
372 /* register a handler for the queue with the PCI device's interrupt */
373 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
374 dev_name(&vp_dev->vdev.dev), vp_dev);
375 if (err)
376 goto out_set_drvdata;
377
378 /* finally register the virtio device */ 609 /* finally register the virtio device */
379 err = register_virtio_device(&vp_dev->vdev); 610 err = register_virtio_device(&vp_dev->vdev);
380 if (err) 611 if (err)
381 goto out_req_irq; 612 goto out_set_drvdata;
382 613
383 return 0; 614 return 0;
384 615
385out_req_irq:
386 free_irq(pci_dev->irq, vp_dev);
387out_set_drvdata: 616out_set_drvdata:
388 pci_set_drvdata(pci_dev, NULL); 617 pci_set_drvdata(pci_dev, NULL);
389 pci_iounmap(pci_dev, vp_dev->ioaddr); 618 pci_iounmap(pci_dev, vp_dev->ioaddr);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5c52369ab9b..a882f260651 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -23,21 +23,30 @@
23 23
24#ifdef DEBUG 24#ifdef DEBUG
25/* For development, we want to crash whenever the ring is screwed. */ 25/* For development, we want to crash whenever the ring is screwed. */
26#define BAD_RING(_vq, fmt...) \ 26#define BAD_RING(_vq, fmt, args...) \
27 do { dev_err(&(_vq)->vq.vdev->dev, fmt); BUG(); } while(0) 27 do { \
28 dev_err(&(_vq)->vq.vdev->dev, \
29 "%s:"fmt, (_vq)->vq.name, ##args); \
30 BUG(); \
31 } while (0)
28/* Caller is supposed to guarantee no reentry. */ 32/* Caller is supposed to guarantee no reentry. */
29#define START_USE(_vq) \ 33#define START_USE(_vq) \
30 do { \ 34 do { \
31 if ((_vq)->in_use) \ 35 if ((_vq)->in_use) \
32 panic("in_use = %i\n", (_vq)->in_use); \ 36 panic("%s:in_use = %i\n", \
37 (_vq)->vq.name, (_vq)->in_use); \
33 (_vq)->in_use = __LINE__; \ 38 (_vq)->in_use = __LINE__; \
34 mb(); \ 39 mb(); \
35 } while(0) 40 } while (0)
36#define END_USE(_vq) \ 41#define END_USE(_vq) \
37 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) 42 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0)
38#else 43#else
39#define BAD_RING(_vq, fmt...) \ 44#define BAD_RING(_vq, fmt, args...) \
40 do { dev_err(&_vq->vq.vdev->dev, fmt); (_vq)->broken = true; } while(0) 45 do { \
46 dev_err(&_vq->vq.vdev->dev, \
47 "%s:"fmt, (_vq)->vq.name, ##args); \
48 (_vq)->broken = true; \
49 } while (0)
41#define START_USE(vq) 50#define START_USE(vq)
42#define END_USE(vq) 51#define END_USE(vq)
43#endif 52#endif
@@ -52,6 +61,9 @@ struct vring_virtqueue
52 /* Other side has made a mess, don't try any more. */ 61 /* Other side has made a mess, don't try any more. */
53 bool broken; 62 bool broken;
54 63
64 /* Host supports indirect buffers */
65 bool indirect;
66
55 /* Number of free buffers */ 67 /* Number of free buffers */
56 unsigned int num_free; 68 unsigned int num_free;
57 /* Head of free buffer list. */ 69 /* Head of free buffer list. */
@@ -76,6 +88,55 @@ struct vring_virtqueue
76 88
77#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 89#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
78 90
91/* Set up an indirect table of descriptors and add it to the queue. */
92static int vring_add_indirect(struct vring_virtqueue *vq,
93 struct scatterlist sg[],
94 unsigned int out,
95 unsigned int in)
96{
97 struct vring_desc *desc;
98 unsigned head;
99 int i;
100
101 desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
102 if (!desc)
103 return vq->vring.num;
104
105 /* Transfer entries from the sg list into the indirect page */
106 for (i = 0; i < out; i++) {
107 desc[i].flags = VRING_DESC_F_NEXT;
108 desc[i].addr = sg_phys(sg);
109 desc[i].len = sg->length;
110 desc[i].next = i+1;
111 sg++;
112 }
113 for (; i < (out + in); i++) {
114 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
115 desc[i].addr = sg_phys(sg);
116 desc[i].len = sg->length;
117 desc[i].next = i+1;
118 sg++;
119 }
120
121 /* Last one doesn't continue. */
122 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
123 desc[i-1].next = 0;
124
125 /* We're about to use a buffer */
126 vq->num_free--;
127
128 /* Use a single buffer which doesn't continue */
129 head = vq->free_head;
130 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
131 vq->vring.desc[head].addr = virt_to_phys(desc);
132 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
133
134 /* Update free pointer */
135 vq->free_head = vq->vring.desc[head].next;
136
137 return head;
138}
139
79static int vring_add_buf(struct virtqueue *_vq, 140static int vring_add_buf(struct virtqueue *_vq,
80 struct scatterlist sg[], 141 struct scatterlist sg[],
81 unsigned int out, 142 unsigned int out,
@@ -85,12 +146,21 @@ static int vring_add_buf(struct virtqueue *_vq,
85 struct vring_virtqueue *vq = to_vvq(_vq); 146 struct vring_virtqueue *vq = to_vvq(_vq);
86 unsigned int i, avail, head, uninitialized_var(prev); 147 unsigned int i, avail, head, uninitialized_var(prev);
87 148
149 START_USE(vq);
150
88 BUG_ON(data == NULL); 151 BUG_ON(data == NULL);
152
153 /* If the host supports indirect descriptor tables, and we have multiple
154 * buffers, then go indirect. FIXME: tune this threshold */
155 if (vq->indirect && (out + in) > 1 && vq->num_free) {
156 head = vring_add_indirect(vq, sg, out, in);
157 if (head != vq->vring.num)
158 goto add_head;
159 }
160
89 BUG_ON(out + in > vq->vring.num); 161 BUG_ON(out + in > vq->vring.num);
90 BUG_ON(out + in == 0); 162 BUG_ON(out + in == 0);
91 163
92 START_USE(vq);
93
94 if (vq->num_free < out + in) { 164 if (vq->num_free < out + in) {
95 pr_debug("Can't add buf len %i - avail = %i\n", 165 pr_debug("Can't add buf len %i - avail = %i\n",
96 out + in, vq->num_free); 166 out + in, vq->num_free);
@@ -127,6 +197,7 @@ static int vring_add_buf(struct virtqueue *_vq,
127 /* Update free pointer */ 197 /* Update free pointer */
128 vq->free_head = i; 198 vq->free_head = i;
129 199
200add_head:
130 /* Set token. */ 201 /* Set token. */
131 vq->data[head] = data; 202 vq->data[head] = data;
132 203
@@ -170,6 +241,11 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
170 241
171 /* Put back on free list: find end */ 242 /* Put back on free list: find end */
172 i = head; 243 i = head;
244
245 /* Free the indirect table */
246 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
247 kfree(phys_to_virt(vq->vring.desc[i].addr));
248
173 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { 249 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
174 i = vq->vring.desc[i].next; 250 i = vq->vring.desc[i].next;
175 vq->num_free++; 251 vq->num_free++;
@@ -284,7 +360,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
284 struct virtio_device *vdev, 360 struct virtio_device *vdev,
285 void *pages, 361 void *pages,
286 void (*notify)(struct virtqueue *), 362 void (*notify)(struct virtqueue *),
287 void (*callback)(struct virtqueue *)) 363 void (*callback)(struct virtqueue *),
364 const char *name)
288{ 365{
289 struct vring_virtqueue *vq; 366 struct vring_virtqueue *vq;
290 unsigned int i; 367 unsigned int i;
@@ -303,14 +380,18 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
303 vq->vq.callback = callback; 380 vq->vq.callback = callback;
304 vq->vq.vdev = vdev; 381 vq->vq.vdev = vdev;
305 vq->vq.vq_ops = &vring_vq_ops; 382 vq->vq.vq_ops = &vring_vq_ops;
383 vq->vq.name = name;
306 vq->notify = notify; 384 vq->notify = notify;
307 vq->broken = false; 385 vq->broken = false;
308 vq->last_used_idx = 0; 386 vq->last_used_idx = 0;
309 vq->num_added = 0; 387 vq->num_added = 0;
388 list_add_tail(&vq->vq.list, &vdev->vqs);
310#ifdef DEBUG 389#ifdef DEBUG
311 vq->in_use = false; 390 vq->in_use = false;
312#endif 391#endif
313 392
393 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
394
314 /* No callback? Tell other side not to bother us. */ 395 /* No callback? Tell other side not to bother us. */
315 if (!callback) 396 if (!callback)
316 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 397 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
@@ -327,6 +408,7 @@ EXPORT_SYMBOL_GPL(vring_new_virtqueue);
327 408
328void vring_del_virtqueue(struct virtqueue *vq) 409void vring_del_virtqueue(struct virtqueue *vq)
329{ 410{
411 list_del(&vq->list);
330 kfree(to_vvq(vq)); 412 kfree(to_vvq(vq));
331} 413}
332EXPORT_SYMBOL_GPL(vring_del_virtqueue); 414EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -338,6 +420,8 @@ void vring_transport_features(struct virtio_device *vdev)
338 420
339 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { 421 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
340 switch (i) { 422 switch (i) {
423 case VIRTIO_RING_F_INDIRECT_DESC:
424 break;
341 default: 425 default:
342 /* We don't understand this bit. */ 426 /* We don't understand this bit. */
343 clear_bit(i, vdev->features); 427 clear_bit(i, vdev->features);
diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig
index 9adbb4f9047..fd2c7bd9dfb 100644
--- a/drivers/w1/Kconfig
+++ b/drivers/w1/Kconfig
@@ -8,7 +8,7 @@ menuconfig W1
8 If you want W1 support, you should say Y here. 8 If you want W1 support, you should say Y here.
9 9
10 This W1 support can also be built as a module. If so, the module 10 This W1 support can also be built as a module. If so, the module
11 will be called wire.ko. 11 will be called wire.
12 12
13if W1 13if W1
14 14
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 96d2f8e4c27..3195fb8b7d9 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -12,7 +12,7 @@ config W1_MASTER_MATROX
12 using Matrox's G400 GPIO pins. 12 using Matrox's G400 GPIO pins.
13 13
14 This support is also available as a module. If so, the module 14 This support is also available as a module. If so, the module
15 will be called matrox_w1.ko. 15 will be called matrox_w1.
16 16
17config W1_MASTER_DS2490 17config W1_MASTER_DS2490
18 tristate "DS2490 USB <-> W1 transport layer for 1-wire" 18 tristate "DS2490 USB <-> W1 transport layer for 1-wire"
@@ -22,7 +22,7 @@ config W1_MASTER_DS2490
22 for example DS9490*. 22 for example DS9490*.
23 23
24 This support is also available as a module. If so, the module 24 This support is also available as a module. If so, the module
25 will be called ds2490.ko. 25 will be called ds2490.
26 26
27config W1_MASTER_DS2482 27config W1_MASTER_DS2482
28 tristate "Maxim DS2482 I2C to 1-Wire bridge" 28 tristate "Maxim DS2482 I2C to 1-Wire bridge"
@@ -56,7 +56,7 @@ config W1_MASTER_GPIO
56 GPIO pins. This driver uses the GPIO API to control the wire. 56 GPIO pins. This driver uses the GPIO API to control the wire.
57 57
58 This support is also available as a module. If so, the module 58 This support is also available as a module. If so, the module
59 will be called w1-gpio.ko. 59 will be called w1-gpio.
60 60
61config HDQ_MASTER_OMAP 61config HDQ_MASTER_OMAP
62 tristate "OMAP HDQ driver" 62 tristate "OMAP HDQ driver"
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 5eb8f21da82..b166f2852a6 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -231,14 +231,14 @@ config DAVINCI_WATCHDOG
231 NOTE: once enabled, this timer cannot be disabled. 231 NOTE: once enabled, this timer cannot be disabled.
232 Say N if you are unsure. 232 Say N if you are unsure.
233 233
234config ORION5X_WATCHDOG 234config ORION_WATCHDOG
235 tristate "Orion5x watchdog" 235 tristate "Orion watchdog"
236 depends on ARCH_ORION5X 236 depends on ARCH_ORION5X || ARCH_KIRKWOOD
237 help 237 help
238 Say Y here if to include support for the watchdog timer 238 Say Y here if to include support for the watchdog timer
239 in the Orion5x ARM SoCs. 239 in the Marvell Orion5x and Kirkwood ARM SoCs.
240 To compile this driver as a module, choose M here: the 240 To compile this driver as a module, choose M here: the
241 module will be called orion5x_wdt. 241 module will be called orion_wdt.
242 242
243# AVR32 Architecture 243# AVR32 Architecture
244 244
@@ -531,7 +531,7 @@ config SBC8360_WDT
531 Board Computer produced by Axiomtek Co., Ltd. (www.axiomtek.com). 531 Board Computer produced by Axiomtek Co., Ltd. (www.axiomtek.com).
532 532
533 To compile this driver as a module, choose M here: the 533 To compile this driver as a module, choose M here: the
534 module will be called sbc8360.ko. 534 module will be called sbc8360.
535 535
536 Most people will say N. 536 Most people will say N.
537 537
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 7f8c56b14f5..c3afa14d5be 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -40,7 +40,7 @@ obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
40obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o 40obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
41obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o 41obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
42obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o 42obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
43obj-$(CONFIG_ORION5X_WATCHDOG) += orion5x_wdt.o 43obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
44 44
45# AVR32 Architecture 45# AVR32 Architecture
46obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o 46obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c
index 96eb2cbe587..0c905967669 100644
--- a/drivers/watchdog/iop_wdt.c
+++ b/drivers/watchdog/iop_wdt.c
@@ -192,7 +192,7 @@ static int iop_wdt_release(struct inode *inode, struct file *file)
192 if (test_bit(WDT_ENABLED, &wdt_status)) 192 if (test_bit(WDT_ENABLED, &wdt_status))
193 state = wdt_disable(); 193 state = wdt_disable();
194 194
195 /* if the timer is not disbaled reload and notify that we are still 195 /* if the timer is not disabled reload and notify that we are still
196 * going down 196 * going down
197 */ 197 */
198 if (state != 0) { 198 if (state != 0) {
diff --git a/drivers/watchdog/orion5x_wdt.c b/drivers/watchdog/orion_wdt.c
index 2cde568e4fb..2d9fb96a9ee 100644
--- a/drivers/watchdog/orion5x_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/watchdog/orion5x_wdt.c 2 * drivers/watchdog/orion_wdt.c
3 * 3 *
4 * Watchdog driver for Orion5x processors 4 * Watchdog driver for Orion/Kirkwood processors
5 * 5 *
6 * Author: Sylver Bruneau <sylver.bruneau@googlemail.com> 6 * Author: Sylver Bruneau <sylver.bruneau@googlemail.com>
7 * 7 *
@@ -23,7 +23,7 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <mach/bridge-regs.h> 25#include <mach/bridge-regs.h>
26#include <plat/orion5x_wdt.h> 26#include <plat/orion_wdt.h>
27 27
28/* 28/*
29 * Watchdog timer block registers. 29 * Watchdog timer block registers.
@@ -43,7 +43,7 @@ static unsigned int wdt_tclk;
43static unsigned long wdt_status; 43static unsigned long wdt_status;
44static spinlock_t wdt_lock; 44static spinlock_t wdt_lock;
45 45
46static void orion5x_wdt_ping(void) 46static void orion_wdt_ping(void)
47{ 47{
48 spin_lock(&wdt_lock); 48 spin_lock(&wdt_lock);
49 49
@@ -53,7 +53,7 @@ static void orion5x_wdt_ping(void)
53 spin_unlock(&wdt_lock); 53 spin_unlock(&wdt_lock);
54} 54}
55 55
56static void orion5x_wdt_enable(void) 56static void orion_wdt_enable(void)
57{ 57{
58 u32 reg; 58 u32 reg;
59 59
@@ -73,23 +73,23 @@ static void orion5x_wdt_enable(void)
73 writel(reg, TIMER_CTRL); 73 writel(reg, TIMER_CTRL);
74 74
75 /* Enable reset on watchdog */ 75 /* Enable reset on watchdog */
76 reg = readl(CPU_RESET_MASK); 76 reg = readl(RSTOUTn_MASK);
77 reg |= WDT_RESET; 77 reg |= WDT_RESET_OUT_EN;
78 writel(reg, CPU_RESET_MASK); 78 writel(reg, RSTOUTn_MASK);
79 79
80 spin_unlock(&wdt_lock); 80 spin_unlock(&wdt_lock);
81} 81}
82 82
83static void orion5x_wdt_disable(void) 83static void orion_wdt_disable(void)
84{ 84{
85 u32 reg; 85 u32 reg;
86 86
87 spin_lock(&wdt_lock); 87 spin_lock(&wdt_lock);
88 88
89 /* Disable reset on watchdog */ 89 /* Disable reset on watchdog */
90 reg = readl(CPU_RESET_MASK); 90 reg = readl(RSTOUTn_MASK);
91 reg &= ~WDT_RESET; 91 reg &= ~WDT_RESET_OUT_EN;
92 writel(reg, CPU_RESET_MASK); 92 writel(reg, RSTOUTn_MASK);
93 93
94 /* Disable watchdog timer */ 94 /* Disable watchdog timer */
95 reg = readl(TIMER_CTRL); 95 reg = readl(TIMER_CTRL);
@@ -99,7 +99,7 @@ static void orion5x_wdt_disable(void)
99 spin_unlock(&wdt_lock); 99 spin_unlock(&wdt_lock);
100} 100}
101 101
102static int orion5x_wdt_get_timeleft(int *time_left) 102static int orion_wdt_get_timeleft(int *time_left)
103{ 103{
104 spin_lock(&wdt_lock); 104 spin_lock(&wdt_lock);
105 *time_left = readl(WDT_VAL) / wdt_tclk; 105 *time_left = readl(WDT_VAL) / wdt_tclk;
@@ -107,16 +107,16 @@ static int orion5x_wdt_get_timeleft(int *time_left)
107 return 0; 107 return 0;
108} 108}
109 109
110static int orion5x_wdt_open(struct inode *inode, struct file *file) 110static int orion_wdt_open(struct inode *inode, struct file *file)
111{ 111{
112 if (test_and_set_bit(WDT_IN_USE, &wdt_status)) 112 if (test_and_set_bit(WDT_IN_USE, &wdt_status))
113 return -EBUSY; 113 return -EBUSY;
114 clear_bit(WDT_OK_TO_CLOSE, &wdt_status); 114 clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
115 orion5x_wdt_enable(); 115 orion_wdt_enable();
116 return nonseekable_open(inode, file); 116 return nonseekable_open(inode, file);
117} 117}
118 118
119static ssize_t orion5x_wdt_write(struct file *file, const char *data, 119static ssize_t orion_wdt_write(struct file *file, const char *data,
120 size_t len, loff_t *ppos) 120 size_t len, loff_t *ppos)
121{ 121{
122 if (len) { 122 if (len) {
@@ -133,18 +133,18 @@ static ssize_t orion5x_wdt_write(struct file *file, const char *data,
133 set_bit(WDT_OK_TO_CLOSE, &wdt_status); 133 set_bit(WDT_OK_TO_CLOSE, &wdt_status);
134 } 134 }
135 } 135 }
136 orion5x_wdt_ping(); 136 orion_wdt_ping();
137 } 137 }
138 return len; 138 return len;
139} 139}
140 140
141static int orion5x_wdt_settimeout(int new_time) 141static int orion_wdt_settimeout(int new_time)
142{ 142{
143 if ((new_time <= 0) || (new_time > wdt_max_duration)) 143 if ((new_time <= 0) || (new_time > wdt_max_duration))
144 return -EINVAL; 144 return -EINVAL;
145 145
146 /* Set new watchdog time to be used when 146 /* Set new watchdog time to be used when
147 * orion5x_wdt_enable() or orion5x_wdt_ping() is called. */ 147 * orion_wdt_enable() or orion_wdt_ping() is called. */
148 heartbeat = new_time; 148 heartbeat = new_time;
149 return 0; 149 return 0;
150} 150}
@@ -152,10 +152,10 @@ static int orion5x_wdt_settimeout(int new_time)
152static const struct watchdog_info ident = { 152static const struct watchdog_info ident = {
153 .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | 153 .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
154 WDIOF_KEEPALIVEPING, 154 WDIOF_KEEPALIVEPING,
155 .identity = "Orion5x Watchdog", 155 .identity = "Orion Watchdog",
156}; 156};
157 157
158static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, 158static long orion_wdt_ioctl(struct file *file, unsigned int cmd,
159 unsigned long arg) 159 unsigned long arg)
160{ 160{
161 int ret = -ENOTTY; 161 int ret = -ENOTTY;
@@ -173,7 +173,7 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
173 break; 173 break;
174 174
175 case WDIOC_KEEPALIVE: 175 case WDIOC_KEEPALIVE:
176 orion5x_wdt_ping(); 176 orion_wdt_ping();
177 ret = 0; 177 ret = 0;
178 break; 178 break;
179 179
@@ -182,11 +182,11 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
182 if (ret) 182 if (ret)
183 break; 183 break;
184 184
185 if (orion5x_wdt_settimeout(time)) { 185 if (orion_wdt_settimeout(time)) {
186 ret = -EINVAL; 186 ret = -EINVAL;
187 break; 187 break;
188 } 188 }
189 orion5x_wdt_ping(); 189 orion_wdt_ping();
190 /* Fall through */ 190 /* Fall through */
191 191
192 case WDIOC_GETTIMEOUT: 192 case WDIOC_GETTIMEOUT:
@@ -194,7 +194,7 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
194 break; 194 break;
195 195
196 case WDIOC_GETTIMELEFT: 196 case WDIOC_GETTIMELEFT:
197 if (orion5x_wdt_get_timeleft(&time)) { 197 if (orion_wdt_get_timeleft(&time)) {
198 ret = -EINVAL; 198 ret = -EINVAL;
199 break; 199 break;
200 } 200 }
@@ -204,10 +204,10 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd,
204 return ret; 204 return ret;
205} 205}
206 206
207static int orion5x_wdt_release(struct inode *inode, struct file *file) 207static int orion_wdt_release(struct inode *inode, struct file *file)
208{ 208{
209 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) 209 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
210 orion5x_wdt_disable(); 210 orion_wdt_disable();
211 else 211 else
212 printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " 212 printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
213 "timer will not stop\n"); 213 "timer will not stop\n");
@@ -218,98 +218,98 @@ static int orion5x_wdt_release(struct inode *inode, struct file *file)
218} 218}
219 219
220 220
221static const struct file_operations orion5x_wdt_fops = { 221static const struct file_operations orion_wdt_fops = {
222 .owner = THIS_MODULE, 222 .owner = THIS_MODULE,
223 .llseek = no_llseek, 223 .llseek = no_llseek,
224 .write = orion5x_wdt_write, 224 .write = orion_wdt_write,
225 .unlocked_ioctl = orion5x_wdt_ioctl, 225 .unlocked_ioctl = orion_wdt_ioctl,
226 .open = orion5x_wdt_open, 226 .open = orion_wdt_open,
227 .release = orion5x_wdt_release, 227 .release = orion_wdt_release,
228}; 228};
229 229
230static struct miscdevice orion5x_wdt_miscdev = { 230static struct miscdevice orion_wdt_miscdev = {
231 .minor = WATCHDOG_MINOR, 231 .minor = WATCHDOG_MINOR,
232 .name = "watchdog", 232 .name = "watchdog",
233 .fops = &orion5x_wdt_fops, 233 .fops = &orion_wdt_fops,
234}; 234};
235 235
236static int __devinit orion5x_wdt_probe(struct platform_device *pdev) 236static int __devinit orion_wdt_probe(struct platform_device *pdev)
237{ 237{
238 struct orion5x_wdt_platform_data *pdata = pdev->dev.platform_data; 238 struct orion_wdt_platform_data *pdata = pdev->dev.platform_data;
239 int ret; 239 int ret;
240 240
241 if (pdata) { 241 if (pdata) {
242 wdt_tclk = pdata->tclk; 242 wdt_tclk = pdata->tclk;
243 } else { 243 } else {
244 printk(KERN_ERR "Orion5x Watchdog misses platform data\n"); 244 printk(KERN_ERR "Orion Watchdog misses platform data\n");
245 return -ENODEV; 245 return -ENODEV;
246 } 246 }
247 247
248 if (orion5x_wdt_miscdev.parent) 248 if (orion_wdt_miscdev.parent)
249 return -EBUSY; 249 return -EBUSY;
250 orion5x_wdt_miscdev.parent = &pdev->dev; 250 orion_wdt_miscdev.parent = &pdev->dev;
251 251
252 wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk; 252 wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk;
253 if (orion5x_wdt_settimeout(heartbeat)) 253 if (orion_wdt_settimeout(heartbeat))
254 heartbeat = wdt_max_duration; 254 heartbeat = wdt_max_duration;
255 255
256 ret = misc_register(&orion5x_wdt_miscdev); 256 ret = misc_register(&orion_wdt_miscdev);
257 if (ret) 257 if (ret)
258 return ret; 258 return ret;
259 259
260 printk(KERN_INFO "Orion5x Watchdog Timer: Initial timeout %d sec%s\n", 260 printk(KERN_INFO "Orion Watchdog Timer: Initial timeout %d sec%s\n",
261 heartbeat, nowayout ? ", nowayout" : ""); 261 heartbeat, nowayout ? ", nowayout" : "");
262 return 0; 262 return 0;
263} 263}
264 264
265static int __devexit orion5x_wdt_remove(struct platform_device *pdev) 265static int __devexit orion_wdt_remove(struct platform_device *pdev)
266{ 266{
267 int ret; 267 int ret;
268 268
269 if (test_bit(WDT_IN_USE, &wdt_status)) { 269 if (test_bit(WDT_IN_USE, &wdt_status)) {
270 orion5x_wdt_disable(); 270 orion_wdt_disable();
271 clear_bit(WDT_IN_USE, &wdt_status); 271 clear_bit(WDT_IN_USE, &wdt_status);
272 } 272 }
273 273
274 ret = misc_deregister(&orion5x_wdt_miscdev); 274 ret = misc_deregister(&orion_wdt_miscdev);
275 if (!ret) 275 if (!ret)
276 orion5x_wdt_miscdev.parent = NULL; 276 orion_wdt_miscdev.parent = NULL;
277 277
278 return ret; 278 return ret;
279} 279}
280 280
281static void orion5x_wdt_shutdown(struct platform_device *pdev) 281static void orion_wdt_shutdown(struct platform_device *pdev)
282{ 282{
283 if (test_bit(WDT_IN_USE, &wdt_status)) 283 if (test_bit(WDT_IN_USE, &wdt_status))
284 orion5x_wdt_disable(); 284 orion_wdt_disable();
285} 285}
286 286
287static struct platform_driver orion5x_wdt_driver = { 287static struct platform_driver orion_wdt_driver = {
288 .probe = orion5x_wdt_probe, 288 .probe = orion_wdt_probe,
289 .remove = __devexit_p(orion5x_wdt_remove), 289 .remove = __devexit_p(orion_wdt_remove),
290 .shutdown = orion5x_wdt_shutdown, 290 .shutdown = orion_wdt_shutdown,
291 .driver = { 291 .driver = {
292 .owner = THIS_MODULE, 292 .owner = THIS_MODULE,
293 .name = "orion5x_wdt", 293 .name = "orion_wdt",
294 }, 294 },
295}; 295};
296 296
297static int __init orion5x_wdt_init(void) 297static int __init orion_wdt_init(void)
298{ 298{
299 spin_lock_init(&wdt_lock); 299 spin_lock_init(&wdt_lock);
300 return platform_driver_register(&orion5x_wdt_driver); 300 return platform_driver_register(&orion_wdt_driver);
301} 301}
302 302
303static void __exit orion5x_wdt_exit(void) 303static void __exit orion_wdt_exit(void)
304{ 304{
305 platform_driver_unregister(&orion5x_wdt_driver); 305 platform_driver_unregister(&orion_wdt_driver);
306} 306}
307 307
308module_init(orion5x_wdt_init); 308module_init(orion_wdt_init);
309module_exit(orion5x_wdt_exit); 309module_exit(orion_wdt_exit);
310 310
311MODULE_AUTHOR("Sylver Bruneau <sylver.bruneau@googlemail.com>"); 311MODULE_AUTHOR("Sylver Bruneau <sylver.bruneau@googlemail.com>");
312MODULE_DESCRIPTION("Orion5x Processor Watchdog"); 312MODULE_DESCRIPTION("Orion Processor Watchdog");
313 313
314module_param(heartbeat, int, 0); 314module_param(heartbeat, int, 0);
315MODULE_PARM_DESC(heartbeat, "Initial watchdog heartbeat in seconds"); 315MODULE_PARM_DESC(heartbeat, "Initial watchdog heartbeat in seconds");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 8ac9cddac57..cab100acf98 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -18,6 +18,16 @@ config XEN_SCRUB_PAGES
18 secure, but slightly less efficient. 18 secure, but slightly less efficient.
19 If in doubt, say yes. 19 If in doubt, say yes.
20 20
21config XEN_DEV_EVTCHN
22 tristate "Xen /dev/xen/evtchn device"
23 depends on XEN
24 default y
25 help
26 The evtchn driver allows a userspace process to triger event
27 channels and to receive notification of an event channel
28 firing.
29 If in doubt, say yes.
30
21config XENFS 31config XENFS
22 tristate "Xen filesystem" 32 tristate "Xen filesystem"
23 depends on XEN 33 depends on XEN
@@ -41,3 +51,13 @@ config XEN_COMPAT_XENFS
41 a xen platform. 51 a xen platform.
42 If in doubt, say yes. 52 If in doubt, say yes.
43 53
54config XEN_SYS_HYPERVISOR
55 bool "Create xen entries under /sys/hypervisor"
56 depends on XEN && SYSFS
57 select SYS_HYPERVISOR
58 default y
59 help
60 Create entries under /sys/hypervisor describing the Xen
61 hypervisor environment. When running native or in another
62 virtual environment, /sys/hypervisor will still be present,
63 but will have no xen contents. \ No newline at end of file
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index ff8accc9e10..ec2a39b1e26 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -4,4 +4,6 @@ obj-y += xenbus/
4obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 4obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
5obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 5obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
6obj-$(CONFIG_XEN_BALLOON) += balloon.o 6obj-$(CONFIG_XEN_BALLOON) += balloon.o
7obj-$(CONFIG_XENFS) += xenfs/ \ No newline at end of file 7obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o
8obj-$(CONFIG_XENFS) += xenfs/
9obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o \ No newline at end of file
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 30963af5dba..891d2e90753 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -151,6 +151,12 @@ static unsigned int evtchn_from_irq(unsigned irq)
151 return info_for_irq(irq)->evtchn; 151 return info_for_irq(irq)->evtchn;
152} 152}
153 153
154unsigned irq_from_evtchn(unsigned int evtchn)
155{
156 return evtchn_to_irq[evtchn];
157}
158EXPORT_SYMBOL_GPL(irq_from_evtchn);
159
154static enum ipi_vector ipi_from_irq(unsigned irq) 160static enum ipi_vector ipi_from_irq(unsigned irq)
155{ 161{
156 struct irq_info *info = info_for_irq(irq); 162 struct irq_info *info = info_for_irq(irq);
@@ -335,7 +341,7 @@ static int find_unbound_irq(void)
335 if (irq == nr_irqs) 341 if (irq == nr_irqs)
336 panic("No available IRQ to bind to: increase nr_irqs!\n"); 342 panic("No available IRQ to bind to: increase nr_irqs!\n");
337 343
338 desc = irq_to_desc_alloc_cpu(irq, 0); 344 desc = irq_to_desc_alloc_node(irq, 0);
339 if (WARN_ON(desc == NULL)) 345 if (WARN_ON(desc == NULL))
340 return -1; 346 return -1;
341 347
@@ -688,13 +694,13 @@ void rebind_evtchn_irq(int evtchn, int irq)
688} 694}
689 695
690/* Rebind an evtchn so that it gets delivered to a specific cpu */ 696/* Rebind an evtchn so that it gets delivered to a specific cpu */
691static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 697static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
692{ 698{
693 struct evtchn_bind_vcpu bind_vcpu; 699 struct evtchn_bind_vcpu bind_vcpu;
694 int evtchn = evtchn_from_irq(irq); 700 int evtchn = evtchn_from_irq(irq);
695 701
696 if (!VALID_EVTCHN(evtchn)) 702 if (!VALID_EVTCHN(evtchn))
697 return; 703 return -1;
698 704
699 /* Send future instances of this interrupt to other vcpu. */ 705 /* Send future instances of this interrupt to other vcpu. */
700 bind_vcpu.port = evtchn; 706 bind_vcpu.port = evtchn;
@@ -707,13 +713,15 @@ static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
707 */ 713 */
708 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 714 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
709 bind_evtchn_to_cpu(evtchn, tcpu); 715 bind_evtchn_to_cpu(evtchn, tcpu);
710}
711 716
717 return 0;
718}
712 719
713static void set_affinity_irq(unsigned irq, const struct cpumask *dest) 720static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
714{ 721{
715 unsigned tcpu = cpumask_first(dest); 722 unsigned tcpu = cpumask_first(dest);
716 rebind_irq_to_cpu(irq, tcpu); 723
724 return rebind_irq_to_cpu(irq, tcpu);
717} 725}
718 726
719int resend_irq_on_evtchn(unsigned int irq) 727int resend_irq_on_evtchn(unsigned int irq)
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
new file mode 100644
index 00000000000..af031950f9b
--- /dev/null
+++ b/drivers/xen/evtchn.c
@@ -0,0 +1,507 @@
1/******************************************************************************
2 * evtchn.c
3 *
4 * Driver for receiving and demuxing event-channel signals.
5 *
6 * Copyright (c) 2004-2005, K A Fraser
7 * Multi-process extensions Copyright (c) 2004, Steven Smith
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/sched.h>
37#include <linux/slab.h>
38#include <linux/string.h>
39#include <linux/errno.h>
40#include <linux/fs.h>
41#include <linux/errno.h>
42#include <linux/miscdevice.h>
43#include <linux/major.h>
44#include <linux/proc_fs.h>
45#include <linux/stat.h>
46#include <linux/poll.h>
47#include <linux/irq.h>
48#include <linux/init.h>
49#include <linux/gfp.h>
50#include <linux/mutex.h>
51#include <linux/cpu.h>
52#include <xen/events.h>
53#include <xen/evtchn.h>
54#include <asm/xen/hypervisor.h>
55
56struct per_user_data {
57 struct mutex bind_mutex; /* serialize bind/unbind operations */
58
59 /* Notification ring, accessed via /dev/xen/evtchn. */
60#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
61#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
62 evtchn_port_t *ring;
63 unsigned int ring_cons, ring_prod, ring_overflow;
64 struct mutex ring_cons_mutex; /* protect against concurrent readers */
65
66 /* Processes wait on this queue when ring is empty. */
67 wait_queue_head_t evtchn_wait;
68 struct fasync_struct *evtchn_async_queue;
69 const char *name;
70};
71
72/* Who's bound to each port? */
73static struct per_user_data *port_user[NR_EVENT_CHANNELS];
74static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */
75
76irqreturn_t evtchn_interrupt(int irq, void *data)
77{
78 unsigned int port = (unsigned long)data;
79 struct per_user_data *u;
80
81 spin_lock(&port_user_lock);
82
83 u = port_user[port];
84
85 disable_irq_nosync(irq);
86
87 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
88 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
89 wmb(); /* Ensure ring contents visible */
90 if (u->ring_cons == u->ring_prod++) {
91 wake_up_interruptible(&u->evtchn_wait);
92 kill_fasync(&u->evtchn_async_queue,
93 SIGIO, POLL_IN);
94 }
95 } else {
96 u->ring_overflow = 1;
97 }
98
99 spin_unlock(&port_user_lock);
100
101 return IRQ_HANDLED;
102}
103
104static ssize_t evtchn_read(struct file *file, char __user *buf,
105 size_t count, loff_t *ppos)
106{
107 int rc;
108 unsigned int c, p, bytes1 = 0, bytes2 = 0;
109 struct per_user_data *u = file->private_data;
110
111 /* Whole number of ports. */
112 count &= ~(sizeof(evtchn_port_t)-1);
113
114 if (count == 0)
115 return 0;
116
117 if (count > PAGE_SIZE)
118 count = PAGE_SIZE;
119
120 for (;;) {
121 mutex_lock(&u->ring_cons_mutex);
122
123 rc = -EFBIG;
124 if (u->ring_overflow)
125 goto unlock_out;
126
127 c = u->ring_cons;
128 p = u->ring_prod;
129 if (c != p)
130 break;
131
132 mutex_unlock(&u->ring_cons_mutex);
133
134 if (file->f_flags & O_NONBLOCK)
135 return -EAGAIN;
136
137 rc = wait_event_interruptible(u->evtchn_wait,
138 u->ring_cons != u->ring_prod);
139 if (rc)
140 return rc;
141 }
142
143 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
144 if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
145 bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
146 sizeof(evtchn_port_t);
147 bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
148 } else {
149 bytes1 = (p - c) * sizeof(evtchn_port_t);
150 bytes2 = 0;
151 }
152
153 /* Truncate chunks according to caller's maximum byte count. */
154 if (bytes1 > count) {
155 bytes1 = count;
156 bytes2 = 0;
157 } else if ((bytes1 + bytes2) > count) {
158 bytes2 = count - bytes1;
159 }
160
161 rc = -EFAULT;
162 rmb(); /* Ensure that we see the port before we copy it. */
163 if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
164 ((bytes2 != 0) &&
165 copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
166 goto unlock_out;
167
168 u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
169 rc = bytes1 + bytes2;
170
171 unlock_out:
172 mutex_unlock(&u->ring_cons_mutex);
173 return rc;
174}
175
176static ssize_t evtchn_write(struct file *file, const char __user *buf,
177 size_t count, loff_t *ppos)
178{
179 int rc, i;
180 evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
181 struct per_user_data *u = file->private_data;
182
183 if (kbuf == NULL)
184 return -ENOMEM;
185
186 /* Whole number of ports. */
187 count &= ~(sizeof(evtchn_port_t)-1);
188
189 rc = 0;
190 if (count == 0)
191 goto out;
192
193 if (count > PAGE_SIZE)
194 count = PAGE_SIZE;
195
196 rc = -EFAULT;
197 if (copy_from_user(kbuf, buf, count) != 0)
198 goto out;
199
200 spin_lock_irq(&port_user_lock);
201 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
202 if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
203 enable_irq(irq_from_evtchn(kbuf[i]));
204 spin_unlock_irq(&port_user_lock);
205
206 rc = count;
207
208 out:
209 free_page((unsigned long)kbuf);
210 return rc;
211}
212
213static int evtchn_bind_to_user(struct per_user_data *u, int port)
214{
215 int rc = 0;
216
217 /*
218 * Ports are never reused, so every caller should pass in a
219 * unique port.
220 *
221 * (Locking not necessary because we haven't registered the
222 * interrupt handler yet, and our caller has already
223 * serialized bind operations.)
224 */
225 BUG_ON(port_user[port] != NULL);
226 port_user[port] = u;
227
228 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
229 u->name, (void *)(unsigned long)port);
230 if (rc >= 0)
231 rc = 0;
232
233 return rc;
234}
235
236static void evtchn_unbind_from_user(struct per_user_data *u, int port)
237{
238 int irq = irq_from_evtchn(port);
239
240 unbind_from_irqhandler(irq, (void *)(unsigned long)port);
241
242 /* make sure we unbind the irq handler before clearing the port */
243 barrier();
244
245 port_user[port] = NULL;
246}
247
248static long evtchn_ioctl(struct file *file,
249 unsigned int cmd, unsigned long arg)
250{
251 int rc;
252 struct per_user_data *u = file->private_data;
253 void __user *uarg = (void __user *) arg;
254
255 /* Prevent bind from racing with unbind */
256 mutex_lock(&u->bind_mutex);
257
258 switch (cmd) {
259 case IOCTL_EVTCHN_BIND_VIRQ: {
260 struct ioctl_evtchn_bind_virq bind;
261 struct evtchn_bind_virq bind_virq;
262
263 rc = -EFAULT;
264 if (copy_from_user(&bind, uarg, sizeof(bind)))
265 break;
266
267 bind_virq.virq = bind.virq;
268 bind_virq.vcpu = 0;
269 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
270 &bind_virq);
271 if (rc != 0)
272 break;
273
274 rc = evtchn_bind_to_user(u, bind_virq.port);
275 if (rc == 0)
276 rc = bind_virq.port;
277 break;
278 }
279
280 case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
281 struct ioctl_evtchn_bind_interdomain bind;
282 struct evtchn_bind_interdomain bind_interdomain;
283
284 rc = -EFAULT;
285 if (copy_from_user(&bind, uarg, sizeof(bind)))
286 break;
287
288 bind_interdomain.remote_dom = bind.remote_domain;
289 bind_interdomain.remote_port = bind.remote_port;
290 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
291 &bind_interdomain);
292 if (rc != 0)
293 break;
294
295 rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
296 if (rc == 0)
297 rc = bind_interdomain.local_port;
298 break;
299 }
300
301 case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
302 struct ioctl_evtchn_bind_unbound_port bind;
303 struct evtchn_alloc_unbound alloc_unbound;
304
305 rc = -EFAULT;
306 if (copy_from_user(&bind, uarg, sizeof(bind)))
307 break;
308
309 alloc_unbound.dom = DOMID_SELF;
310 alloc_unbound.remote_dom = bind.remote_domain;
311 rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
312 &alloc_unbound);
313 if (rc != 0)
314 break;
315
316 rc = evtchn_bind_to_user(u, alloc_unbound.port);
317 if (rc == 0)
318 rc = alloc_unbound.port;
319 break;
320 }
321
322 case IOCTL_EVTCHN_UNBIND: {
323 struct ioctl_evtchn_unbind unbind;
324
325 rc = -EFAULT;
326 if (copy_from_user(&unbind, uarg, sizeof(unbind)))
327 break;
328
329 rc = -EINVAL;
330 if (unbind.port >= NR_EVENT_CHANNELS)
331 break;
332
333 spin_lock_irq(&port_user_lock);
334
335 rc = -ENOTCONN;
336 if (port_user[unbind.port] != u) {
337 spin_unlock_irq(&port_user_lock);
338 break;
339 }
340
341 evtchn_unbind_from_user(u, unbind.port);
342
343 spin_unlock_irq(&port_user_lock);
344
345 rc = 0;
346 break;
347 }
348
349 case IOCTL_EVTCHN_NOTIFY: {
350 struct ioctl_evtchn_notify notify;
351
352 rc = -EFAULT;
353 if (copy_from_user(&notify, uarg, sizeof(notify)))
354 break;
355
356 if (notify.port >= NR_EVENT_CHANNELS) {
357 rc = -EINVAL;
358 } else if (port_user[notify.port] != u) {
359 rc = -ENOTCONN;
360 } else {
361 notify_remote_via_evtchn(notify.port);
362 rc = 0;
363 }
364 break;
365 }
366
367 case IOCTL_EVTCHN_RESET: {
368 /* Initialise the ring to empty. Clear errors. */
369 mutex_lock(&u->ring_cons_mutex);
370 spin_lock_irq(&port_user_lock);
371 u->ring_cons = u->ring_prod = u->ring_overflow = 0;
372 spin_unlock_irq(&port_user_lock);
373 mutex_unlock(&u->ring_cons_mutex);
374 rc = 0;
375 break;
376 }
377
378 default:
379 rc = -ENOSYS;
380 break;
381 }
382 mutex_unlock(&u->bind_mutex);
383
384 return rc;
385}
386
387static unsigned int evtchn_poll(struct file *file, poll_table *wait)
388{
389 unsigned int mask = POLLOUT | POLLWRNORM;
390 struct per_user_data *u = file->private_data;
391
392 poll_wait(file, &u->evtchn_wait, wait);
393 if (u->ring_cons != u->ring_prod)
394 mask |= POLLIN | POLLRDNORM;
395 if (u->ring_overflow)
396 mask = POLLERR;
397 return mask;
398}
399
400static int evtchn_fasync(int fd, struct file *filp, int on)
401{
402 struct per_user_data *u = filp->private_data;
403 return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
404}
405
406static int evtchn_open(struct inode *inode, struct file *filp)
407{
408 struct per_user_data *u;
409
410 u = kzalloc(sizeof(*u), GFP_KERNEL);
411 if (u == NULL)
412 return -ENOMEM;
413
414 u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm);
415 if (u->name == NULL) {
416 kfree(u);
417 return -ENOMEM;
418 }
419
420 init_waitqueue_head(&u->evtchn_wait);
421
422 u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
423 if (u->ring == NULL) {
424 kfree(u->name);
425 kfree(u);
426 return -ENOMEM;
427 }
428
429 mutex_init(&u->bind_mutex);
430 mutex_init(&u->ring_cons_mutex);
431
432 filp->private_data = u;
433
434 return 0;
435}
436
437static int evtchn_release(struct inode *inode, struct file *filp)
438{
439 int i;
440 struct per_user_data *u = filp->private_data;
441
442 spin_lock_irq(&port_user_lock);
443
444 free_page((unsigned long)u->ring);
445
446 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
447 if (port_user[i] != u)
448 continue;
449
450 evtchn_unbind_from_user(port_user[i], i);
451 }
452
453 spin_unlock_irq(&port_user_lock);
454
455 kfree(u->name);
456 kfree(u);
457
458 return 0;
459}
460
461static const struct file_operations evtchn_fops = {
462 .owner = THIS_MODULE,
463 .read = evtchn_read,
464 .write = evtchn_write,
465 .unlocked_ioctl = evtchn_ioctl,
466 .poll = evtchn_poll,
467 .fasync = evtchn_fasync,
468 .open = evtchn_open,
469 .release = evtchn_release,
470};
471
472static struct miscdevice evtchn_miscdev = {
473 .minor = MISC_DYNAMIC_MINOR,
474 .name = "evtchn",
475 .fops = &evtchn_fops,
476};
477static int __init evtchn_init(void)
478{
479 int err;
480
481 if (!xen_domain())
482 return -ENODEV;
483
484 spin_lock_init(&port_user_lock);
485 memset(port_user, 0, sizeof(port_user));
486
487 /* Create '/dev/misc/evtchn'. */
488 err = misc_register(&evtchn_miscdev);
489 if (err != 0) {
490 printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
491 return err;
492 }
493
494 printk(KERN_INFO "Event-channel device installed.\n");
495
496 return 0;
497}
498
499static void __exit evtchn_cleanup(void)
500{
501 misc_deregister(&evtchn_miscdev);
502}
503
504module_init(evtchn_init);
505module_exit(evtchn_cleanup);
506
507MODULE_LICENSE("GPL");
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 4b5b84837ee..10d03d7931c 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -43,7 +43,7 @@ static int xen_suspend(void *data)
43 if (err) { 43 if (err) {
44 printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", 44 printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
45 err); 45 err);
46 device_power_up(PMSG_RESUME); 46 dpm_resume_noirq(PMSG_RESUME);
47 return err; 47 return err;
48 } 48 }
49 49
@@ -69,7 +69,7 @@ static int xen_suspend(void *data)
69 } 69 }
70 70
71 sysdev_resume(); 71 sysdev_resume();
72 device_power_up(PMSG_RESUME); 72 dpm_resume_noirq(PMSG_RESUME);
73 73
74 return 0; 74 return 0;
75} 75}
@@ -92,19 +92,18 @@ static void do_suspend(void)
92 } 92 }
93#endif 93#endif
94 94
95 err = device_suspend(PMSG_SUSPEND); 95 err = dpm_suspend_start(PMSG_SUSPEND);
96 if (err) { 96 if (err) {
97 printk(KERN_ERR "xen suspend: device_suspend %d\n", err); 97 printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
98 goto out; 98 goto out;
99 } 99 }
100 100
101 printk("suspending xenbus...\n"); 101 printk(KERN_DEBUG "suspending xenstore...\n");
102 /* XXX use normal device tree? */ 102 xs_suspend();
103 xenbus_suspend();
104 103
105 err = device_power_down(PMSG_SUSPEND); 104 err = dpm_suspend_noirq(PMSG_SUSPEND);
106 if (err) { 105 if (err) {
107 printk(KERN_ERR "device_power_down failed: %d\n", err); 106 printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
108 goto resume_devices; 107 goto resume_devices;
109 } 108 }
110 109
@@ -116,14 +115,14 @@ static void do_suspend(void)
116 115
117 if (!cancelled) { 116 if (!cancelled) {
118 xen_arch_resume(); 117 xen_arch_resume();
119 xenbus_resume(); 118 xs_resume();
120 } else 119 } else
121 xenbus_suspend_cancel(); 120 xs_suspend_cancel();
122 121
123 device_power_up(PMSG_RESUME); 122 dpm_resume_noirq(PMSG_RESUME);
124 123
125resume_devices: 124resume_devices:
126 device_resume(PMSG_RESUME); 125 dpm_resume_end(PMSG_RESUME);
127 126
128 /* Make sure timer events get retriggered on all CPUs */ 127 /* Make sure timer events get retriggered on all CPUs */
129 clock_was_set(); 128 clock_was_set();
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
new file mode 100644
index 00000000000..88a60e03ccf
--- /dev/null
+++ b/drivers/xen/sys-hypervisor.c
@@ -0,0 +1,445 @@
1/*
2 * copyright (c) 2006 IBM Corporation
3 * Authored by: Mike D. Day <ncmike@us.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/kobject.h>
13
14#include <asm/xen/hypervisor.h>
15#include <asm/xen/hypercall.h>
16
17#include <xen/xenbus.h>
18#include <xen/interface/xen.h>
19#include <xen/interface/version.h>
20
21#define HYPERVISOR_ATTR_RO(_name) \
22static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
23
24#define HYPERVISOR_ATTR_RW(_name) \
25static struct hyp_sysfs_attr _name##_attr = \
26 __ATTR(_name, 0644, _name##_show, _name##_store)
27
28struct hyp_sysfs_attr {
29 struct attribute attr;
30 ssize_t (*show)(struct hyp_sysfs_attr *, char *);
31 ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
32 void *hyp_attr_data;
33};
34
35static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer)
36{
37 return sprintf(buffer, "xen\n");
38}
39
40HYPERVISOR_ATTR_RO(type);
41
42static int __init xen_sysfs_type_init(void)
43{
44 return sysfs_create_file(hypervisor_kobj, &type_attr.attr);
45}
46
47static void xen_sysfs_type_destroy(void)
48{
49 sysfs_remove_file(hypervisor_kobj, &type_attr.attr);
50}
51
52/* xen version attributes */
53static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
54{
55 int version = HYPERVISOR_xen_version(XENVER_version, NULL);
56 if (version)
57 return sprintf(buffer, "%d\n", version >> 16);
58 return -ENODEV;
59}
60
61HYPERVISOR_ATTR_RO(major);
62
63static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer)
64{
65 int version = HYPERVISOR_xen_version(XENVER_version, NULL);
66 if (version)
67 return sprintf(buffer, "%d\n", version & 0xff);
68 return -ENODEV;
69}
70
71HYPERVISOR_ATTR_RO(minor);
72
73static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer)
74{
75 int ret = -ENOMEM;
76 char *extra;
77
78 extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL);
79 if (extra) {
80 ret = HYPERVISOR_xen_version(XENVER_extraversion, extra);
81 if (!ret)
82 ret = sprintf(buffer, "%s\n", extra);
83 kfree(extra);
84 }
85
86 return ret;
87}
88
89HYPERVISOR_ATTR_RO(extra);
90
91static struct attribute *version_attrs[] = {
92 &major_attr.attr,
93 &minor_attr.attr,
94 &extra_attr.attr,
95 NULL
96};
97
98static struct attribute_group version_group = {
99 .name = "version",
100 .attrs = version_attrs,
101};
102
103static int __init xen_sysfs_version_init(void)
104{
105 return sysfs_create_group(hypervisor_kobj, &version_group);
106}
107
108static void xen_sysfs_version_destroy(void)
109{
110 sysfs_remove_group(hypervisor_kobj, &version_group);
111}
112
113/* UUID */
114
115static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
116{
117 char *vm, *val;
118 int ret;
119 extern int xenstored_ready;
120
121 if (!xenstored_ready)
122 return -EBUSY;
123
124 vm = xenbus_read(XBT_NIL, "vm", "", NULL);
125 if (IS_ERR(vm))
126 return PTR_ERR(vm);
127 val = xenbus_read(XBT_NIL, vm, "uuid", NULL);
128 kfree(vm);
129 if (IS_ERR(val))
130 return PTR_ERR(val);
131 ret = sprintf(buffer, "%s\n", val);
132 kfree(val);
133 return ret;
134}
135
136HYPERVISOR_ATTR_RO(uuid);
137
138static int __init xen_sysfs_uuid_init(void)
139{
140 return sysfs_create_file(hypervisor_kobj, &uuid_attr.attr);
141}
142
143static void xen_sysfs_uuid_destroy(void)
144{
145 sysfs_remove_file(hypervisor_kobj, &uuid_attr.attr);
146}
147
148/* xen compilation attributes */
149
150static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer)
151{
152 int ret = -ENOMEM;
153 struct xen_compile_info *info;
154
155 info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
156 if (info) {
157 ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
158 if (!ret)
159 ret = sprintf(buffer, "%s\n", info->compiler);
160 kfree(info);
161 }
162
163 return ret;
164}
165
166HYPERVISOR_ATTR_RO(compiler);
167
168static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer)
169{
170 int ret = -ENOMEM;
171 struct xen_compile_info *info;
172
173 info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
174 if (info) {
175 ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
176 if (!ret)
177 ret = sprintf(buffer, "%s\n", info->compile_by);
178 kfree(info);
179 }
180
181 return ret;
182}
183
184HYPERVISOR_ATTR_RO(compiled_by);
185
186static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer)
187{
188 int ret = -ENOMEM;
189 struct xen_compile_info *info;
190
191 info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
192 if (info) {
193 ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
194 if (!ret)
195 ret = sprintf(buffer, "%s\n", info->compile_date);
196 kfree(info);
197 }
198
199 return ret;
200}
201
202HYPERVISOR_ATTR_RO(compile_date);
203
204static struct attribute *xen_compile_attrs[] = {
205 &compiler_attr.attr,
206 &compiled_by_attr.attr,
207 &compile_date_attr.attr,
208 NULL
209};
210
211static struct attribute_group xen_compilation_group = {
212 .name = "compilation",
213 .attrs = xen_compile_attrs,
214};
215
216int __init static xen_compilation_init(void)
217{
218 return sysfs_create_group(hypervisor_kobj, &xen_compilation_group);
219}
220
221static void xen_compilation_destroy(void)
222{
223 sysfs_remove_group(hypervisor_kobj, &xen_compilation_group);
224}
225
226/* xen properties info */
227
228static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
229{
230 int ret = -ENOMEM;
231 char *caps;
232
233 caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL);
234 if (caps) {
235 ret = HYPERVISOR_xen_version(XENVER_capabilities, caps);
236 if (!ret)
237 ret = sprintf(buffer, "%s\n", caps);
238 kfree(caps);
239 }
240
241 return ret;
242}
243
244HYPERVISOR_ATTR_RO(capabilities);
245
246static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer)
247{
248 int ret = -ENOMEM;
249 char *cset;
250
251 cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL);
252 if (cset) {
253 ret = HYPERVISOR_xen_version(XENVER_changeset, cset);
254 if (!ret)
255 ret = sprintf(buffer, "%s\n", cset);
256 kfree(cset);
257 }
258
259 return ret;
260}
261
262HYPERVISOR_ATTR_RO(changeset);
263
264static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
265{
266 int ret = -ENOMEM;
267 struct xen_platform_parameters *parms;
268
269 parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL);
270 if (parms) {
271 ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
272 parms);
273 if (!ret)
274 ret = sprintf(buffer, "%lx\n", parms->virt_start);
275 kfree(parms);
276 }
277
278 return ret;
279}
280
281HYPERVISOR_ATTR_RO(virtual_start);
282
283static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer)
284{
285 int ret;
286
287 ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL);
288 if (ret > 0)
289 ret = sprintf(buffer, "%x\n", ret);
290
291 return ret;
292}
293
294HYPERVISOR_ATTR_RO(pagesize);
295
296static ssize_t xen_feature_show(int index, char *buffer)
297{
298 ssize_t ret;
299 struct xen_feature_info info;
300
301 info.submap_idx = index;
302 ret = HYPERVISOR_xen_version(XENVER_get_features, &info);
303 if (!ret)
304 ret = sprintf(buffer, "%08x", info.submap);
305
306 return ret;
307}
308
309static ssize_t features_show(struct hyp_sysfs_attr *attr, char *buffer)
310{
311 ssize_t len;
312 int i;
313
314 len = 0;
315 for (i = XENFEAT_NR_SUBMAPS-1; i >= 0; i--) {
316 int ret = xen_feature_show(i, buffer + len);
317 if (ret < 0) {
318 if (len == 0)
319 len = ret;
320 break;
321 }
322 len += ret;
323 }
324 if (len > 0)
325 buffer[len++] = '\n';
326
327 return len;
328}
329
330HYPERVISOR_ATTR_RO(features);
331
332static struct attribute *xen_properties_attrs[] = {
333 &capabilities_attr.attr,
334 &changeset_attr.attr,
335 &virtual_start_attr.attr,
336 &pagesize_attr.attr,
337 &features_attr.attr,
338 NULL
339};
340
341static struct attribute_group xen_properties_group = {
342 .name = "properties",
343 .attrs = xen_properties_attrs,
344};
345
346static int __init xen_properties_init(void)
347{
348 return sysfs_create_group(hypervisor_kobj, &xen_properties_group);
349}
350
351static void xen_properties_destroy(void)
352{
353 sysfs_remove_group(hypervisor_kobj, &xen_properties_group);
354}
355
356static int __init hyper_sysfs_init(void)
357{
358 int ret;
359
360 if (!xen_domain())
361 return -ENODEV;
362
363 ret = xen_sysfs_type_init();
364 if (ret)
365 goto out;
366 ret = xen_sysfs_version_init();
367 if (ret)
368 goto version_out;
369 ret = xen_compilation_init();
370 if (ret)
371 goto comp_out;
372 ret = xen_sysfs_uuid_init();
373 if (ret)
374 goto uuid_out;
375 ret = xen_properties_init();
376 if (ret)
377 goto prop_out;
378
379 goto out;
380
381prop_out:
382 xen_sysfs_uuid_destroy();
383uuid_out:
384 xen_compilation_destroy();
385comp_out:
386 xen_sysfs_version_destroy();
387version_out:
388 xen_sysfs_type_destroy();
389out:
390 return ret;
391}
392
393static void __exit hyper_sysfs_exit(void)
394{
395 xen_properties_destroy();
396 xen_compilation_destroy();
397 xen_sysfs_uuid_destroy();
398 xen_sysfs_version_destroy();
399 xen_sysfs_type_destroy();
400
401}
402module_init(hyper_sysfs_init);
403module_exit(hyper_sysfs_exit);
404
405static ssize_t hyp_sysfs_show(struct kobject *kobj,
406 struct attribute *attr,
407 char *buffer)
408{
409 struct hyp_sysfs_attr *hyp_attr;
410 hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
411 if (hyp_attr->show)
412 return hyp_attr->show(hyp_attr, buffer);
413 return 0;
414}
415
416static ssize_t hyp_sysfs_store(struct kobject *kobj,
417 struct attribute *attr,
418 const char *buffer,
419 size_t len)
420{
421 struct hyp_sysfs_attr *hyp_attr;
422 hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
423 if (hyp_attr->store)
424 return hyp_attr->store(hyp_attr, buffer, len);
425 return 0;
426}
427
428static struct sysfs_ops hyp_sysfs_ops = {
429 .show = hyp_sysfs_show,
430 .store = hyp_sysfs_store,
431};
432
433static struct kobj_type hyp_sysfs_kobj_type = {
434 .sysfs_ops = &hyp_sysfs_ops,
435};
436
437static int __init hypervisor_subsys_init(void)
438{
439 if (!xen_domain())
440 return -ENODEV;
441
442 hypervisor_kobj->ktype = &hyp_sysfs_kobj_type;
443 return 0;
444}
445device_initcall(hypervisor_subsys_init);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 773d1cf2328..d42e25d5968 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -71,6 +71,9 @@ static int xenbus_probe_frontend(const char *type, const char *name);
71 71
72static void xenbus_dev_shutdown(struct device *_dev); 72static void xenbus_dev_shutdown(struct device *_dev);
73 73
74static int xenbus_dev_suspend(struct device *dev, pm_message_t state);
75static int xenbus_dev_resume(struct device *dev);
76
74/* If something in array of ids matches this device, return it. */ 77/* If something in array of ids matches this device, return it. */
75static const struct xenbus_device_id * 78static const struct xenbus_device_id *
76match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) 79match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
@@ -188,6 +191,9 @@ static struct xen_bus_type xenbus_frontend = {
188 .remove = xenbus_dev_remove, 191 .remove = xenbus_dev_remove,
189 .shutdown = xenbus_dev_shutdown, 192 .shutdown = xenbus_dev_shutdown,
190 .dev_attrs = xenbus_dev_attrs, 193 .dev_attrs = xenbus_dev_attrs,
194
195 .suspend = xenbus_dev_suspend,
196 .resume = xenbus_dev_resume,
191 }, 197 },
192}; 198};
193 199
@@ -654,6 +660,7 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
654 660
655 kfree(root); 661 kfree(root);
656} 662}
663EXPORT_SYMBOL_GPL(xenbus_dev_changed);
657 664
658static void frontend_changed(struct xenbus_watch *watch, 665static void frontend_changed(struct xenbus_watch *watch,
659 const char **vec, unsigned int len) 666 const char **vec, unsigned int len)
@@ -669,7 +676,7 @@ static struct xenbus_watch fe_watch = {
669 .callback = frontend_changed, 676 .callback = frontend_changed,
670}; 677};
671 678
672static int suspend_dev(struct device *dev, void *data) 679static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
673{ 680{
674 int err = 0; 681 int err = 0;
675 struct xenbus_driver *drv; 682 struct xenbus_driver *drv;
@@ -682,35 +689,14 @@ static int suspend_dev(struct device *dev, void *data)
682 drv = to_xenbus_driver(dev->driver); 689 drv = to_xenbus_driver(dev->driver);
683 xdev = container_of(dev, struct xenbus_device, dev); 690 xdev = container_of(dev, struct xenbus_device, dev);
684 if (drv->suspend) 691 if (drv->suspend)
685 err = drv->suspend(xdev); 692 err = drv->suspend(xdev, state);
686 if (err) 693 if (err)
687 printk(KERN_WARNING 694 printk(KERN_WARNING
688 "xenbus: suspend %s failed: %i\n", dev_name(dev), err); 695 "xenbus: suspend %s failed: %i\n", dev_name(dev), err);
689 return 0; 696 return 0;
690} 697}
691 698
692static int suspend_cancel_dev(struct device *dev, void *data) 699static int xenbus_dev_resume(struct device *dev)
693{
694 int err = 0;
695 struct xenbus_driver *drv;
696 struct xenbus_device *xdev;
697
698 DPRINTK("");
699
700 if (dev->driver == NULL)
701 return 0;
702 drv = to_xenbus_driver(dev->driver);
703 xdev = container_of(dev, struct xenbus_device, dev);
704 if (drv->suspend_cancel)
705 err = drv->suspend_cancel(xdev);
706 if (err)
707 printk(KERN_WARNING
708 "xenbus: suspend_cancel %s failed: %i\n",
709 dev_name(dev), err);
710 return 0;
711}
712
713static int resume_dev(struct device *dev, void *data)
714{ 700{
715 int err; 701 int err;
716 struct xenbus_driver *drv; 702 struct xenbus_driver *drv;
@@ -755,33 +741,6 @@ static int resume_dev(struct device *dev, void *data)
755 return 0; 741 return 0;
756} 742}
757 743
758void xenbus_suspend(void)
759{
760 DPRINTK("");
761
762 bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
763 xenbus_backend_suspend(suspend_dev);
764 xs_suspend();
765}
766EXPORT_SYMBOL_GPL(xenbus_suspend);
767
768void xenbus_resume(void)
769{
770 xb_init_comms();
771 xs_resume();
772 bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
773 xenbus_backend_resume(resume_dev);
774}
775EXPORT_SYMBOL_GPL(xenbus_resume);
776
777void xenbus_suspend_cancel(void)
778{
779 xs_suspend_cancel();
780 bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
781 xenbus_backend_resume(suspend_cancel_dev);
782}
783EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
784
785/* A flag to determine if xenstored is 'ready' (i.e. has started) */ 744/* A flag to determine if xenstored is 'ready' (i.e. has started) */
786int xenstored_ready = 0; 745int xenstored_ready = 0;
787 746
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index e325eab4724..eab33f1dbdf 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -673,6 +673,8 @@ void xs_resume(void)
673 struct xenbus_watch *watch; 673 struct xenbus_watch *watch;
674 char token[sizeof(watch) * 2 + 1]; 674 char token[sizeof(watch) * 2 + 1];
675 675
676 xb_init_comms();
677
676 mutex_unlock(&xs_state.response_mutex); 678 mutex_unlock(&xs_state.response_mutex);
677 mutex_unlock(&xs_state.request_mutex); 679 mutex_unlock(&xs_state.request_mutex);
678 up_write(&xs_state.transaction_mutex); 680 up_write(&xs_state.transaction_mutex);
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 515741a8e6b..6559e0c752c 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -20,10 +20,27 @@
20MODULE_DESCRIPTION("Xen filesystem"); 20MODULE_DESCRIPTION("Xen filesystem");
21MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
22 22
23static ssize_t capabilities_read(struct file *file, char __user *buf,
24 size_t size, loff_t *off)
25{
26 char *tmp = "";
27
28 if (xen_initial_domain())
29 tmp = "control_d\n";
30
31 return simple_read_from_buffer(buf, size, off, tmp, strlen(tmp));
32}
33
34static const struct file_operations capabilities_file_ops = {
35 .read = capabilities_read,
36};
37
23static int xenfs_fill_super(struct super_block *sb, void *data, int silent) 38static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
24{ 39{
25 static struct tree_descr xenfs_files[] = { 40 static struct tree_descr xenfs_files[] = {
26 [2] = {"xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR}, 41 [1] = {},
42 { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
43 { "capabilities", &capabilities_file_ops, S_IRUGO },
27 {""}, 44 {""},
28 }; 45 };
29 46