aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@redhat.com>2011-09-17 09:29:49 -0400
committerMauro Carvalho Chehab <mchehab@redhat.com>2011-09-17 09:29:49 -0400
commit7577911244c437f4a4abac5e4b67b059c06dbe9d (patch)
tree4f0078ddacff226e26b03fa4f6cf185d48633124 /drivers
parent418d93ac0be6d4a410731b80af4e836614ffe73e (diff)
parentb6fd41e29dea9c6753b1843a77e50433e6123bcb (diff)
Merge tag 'v3.1-rc6' into staging/for_v3.2
* tag 'v3.1-rc6': (1902 commits) Linux 3.1-rc6 ioctl: register LTTng ioctl fuse: fix memory leak fuse: fix flock breakage Btrfs: add dummy extent if dst offset excceeds file end in Btrfs: calc file extent num_bytes correctly in file clone btrfs: xattr: fix attribute removal Btrfs: fix wrong nbytes information of the inode Btrfs: fix the file extent gap when doing direct IO Btrfs: fix unclosed transaction handle in btrfs_cont_expand Btrfs: fix misuse of trans block rsv Btrfs: reset to appropriate block rsv after orphan operations Btrfs: skip locking if searching the commit root in csum lookup btrfs: fix warning in iput for bad-inode Btrfs: fix an oops when deleting snapshots [media] vp7045: fix buffer setup [media] nuvoton-cir: simplify raw IR sample handling [media] [Resend] viacam: Don't explode if pci_find_bus() returns NULL [media] v4l2: Fix documentation of the codec device controls [media] gspca - sonixj: Fix the darkness of sensor om6802 in 320x240 ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/acglobal.h6
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/acpredef.h1
-rw-r--r--drivers/acpi/acpica/nspredef.c19
-rw-r--r--drivers/acpi/acpica/nsrepair2.c15
-rw-r--r--drivers/acpi/acpica/tbinstal.c27
-rw-r--r--drivers/acpi/apei/Kconfig11
-rw-r--r--drivers/acpi/apei/apei-base.c35
-rw-r--r--drivers/acpi/apei/apei-internal.h15
-rw-r--r--drivers/acpi/apei/einj.c43
-rw-r--r--drivers/acpi/apei/erst-dbg.c6
-rw-r--r--drivers/acpi/apei/erst.c32
-rw-r--r--drivers/acpi/apei/ghes.c431
-rw-r--r--drivers/acpi/apei/hest.c17
-rw-r--r--drivers/acpi/battery.c86
-rw-r--r--drivers/acpi/bus.c14
-rw-r--r--drivers/acpi/dock.c4
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/acpi/pci_irq.c58
-rw-r--r--drivers/acpi/pci_root.c3
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/sbs.c13
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/video.c2
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/pata_imx.c253
-rw-r--r--drivers/ata/pata_via.c18
-rw-r--r--drivers/ata/sata_dwc_460ex.c14
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/base/devres.c1
-rw-r--r--drivers/base/devtmpfs.c4
-rw-r--r--drivers/base/firmware_class.c11
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/clock_ops.c40
-rw-r--r--drivers/base/power/domain.c33
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/base/regmap/regmap-i2c.c1
-rw-r--r--drivers/base/regmap/regmap-spi.c3
-rw-r--r--drivers/base/regmap/regmap.c7
-rw-r--r--drivers/bcma/main.c12
-rw-r--r--drivers/block/Kconfig17
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/loop.c297
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/bluetooth/ath3k.c1
-rw-r--r--drivers/bluetooth/btusb.c13
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/char/Kconfig11
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/msm_smd_pkt.c5
-rw-r--r--drivers/char/ramoops.c8
-rw-r--r--drivers/char/random.c349
-rw-r--r--drivers/char/tile-srom.c481
-rw-r--r--drivers/char/tpm/tpm_tis.c7
-rw-r--r--drivers/clocksource/sh_cmt.c34
-rw-r--r--drivers/connector/cn_proc.c8
-rw-r--r--drivers/cpuidle/cpuidle.c50
-rw-r--r--drivers/cpuidle/cpuidle.h1
-rw-r--r--drivers/cpuidle/driver.c3
-rw-r--r--drivers/cpuidle/governor.c3
-rw-r--r--drivers/dma/TODO1
-rw-r--r--drivers/dma/amba-pl08x.c247
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/coh901318.c19
-rw-r--r--drivers/dma/dmaengine.c8
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/intel_mid_dma.c2
-rw-r--r--drivers/dma/ioat/dma_v3.c8
-rw-r--r--drivers/dma/ioat/pci.c11
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/mv_xor.c3
-rw-r--r--drivers/dma/mxs-dma.c13
-rw-r--r--drivers/dma/pch_dma.c127
-rw-r--r--drivers/dma/pl330.c64
-rw-r--r--drivers/dma/shdma.c88
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/dma/ste_dma40.c312
-rw-r--r--drivers/dma/ste_dma40_ll.h3
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/eisa/pci_eisa.c4
-rw-r--r--drivers/firewire/core-cdev.c24
-rw-r--r--drivers/firewire/core-device.c15
-rw-r--r--drivers/firewire/ohci.c9
-rw-r--r--drivers/firewire/sbp2.c4
-rw-r--r--drivers/firmware/efivars.c243
-rw-r--r--drivers/firmware/google/gsmi.c2
-rw-r--r--drivers/gpio/Kconfig22
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/gpio-ab8500.c2
-rw-r--r--drivers/gpio/gpio-msm-v1.c636
-rw-r--r--drivers/gpio/gpio-msm-v2.c433
-rw-r--r--drivers/gpio/gpio-tps65912.c156
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c33
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c26
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c191
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c9
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h53
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c319
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c118
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h26
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c158
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c90
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c76
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c12
-rw-r--r--drivers/gpu/drm/radeon/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/atom.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c12
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c30
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c130
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c23
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-magicmouse.c66
-rw-r--r--drivers/hid/hid-wacom.c22
-rw-r--r--drivers/hid/hid-wiimote.c277
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hwmon/Kconfig127
-rw-r--r--drivers/hwmon/Makefile13
-rw-r--r--drivers/hwmon/coretemp.c177
-rw-r--r--drivers/hwmon/i5k_amb.c42
-rw-r--r--drivers/hwmon/ibmaem.c15
-rw-r--r--drivers/hwmon/lm90.c65
-rw-r--r--drivers/hwmon/lm95241.c31
-rw-r--r--drivers/hwmon/lm95245.c543
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1668.c502
-rw-r--r--drivers/hwmon/ntc_thermistor.c452
-rw-r--r--drivers/hwmon/pmbus/Kconfig100
-rw-r--r--drivers/hwmon/pmbus/Makefile13
-rw-r--r--drivers/hwmon/pmbus/adm1275.c (renamed from drivers/hwmon/adm1275.c)66
-rw-r--r--drivers/hwmon/pmbus/lm25066.c352
-rw-r--r--drivers/hwmon/pmbus/max16064.c (renamed from drivers/hwmon/max16064.c)57
-rw-r--r--drivers/hwmon/pmbus/max34440.c (renamed from drivers/hwmon/max34440.c)81
-rw-r--r--drivers/hwmon/pmbus/max8688.c (renamed from drivers/hwmon/max8688.c)69
-rw-r--r--drivers/hwmon/pmbus/pmbus.c (renamed from drivers/hwmon/pmbus.c)37
-rw-r--r--drivers/hwmon/pmbus/pmbus.h (renamed from drivers/hwmon/pmbus.h)49
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c (renamed from drivers/hwmon/pmbus_core.c)371
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c (renamed from drivers/hwmon/ucd9000.c)6
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c (renamed from drivers/hwmon/ucd9200.c)6
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c9
-rw-r--r--drivers/i2c/busses/i2c-omap.c29
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-tegra.c60
-rw-r--r--drivers/ide/cy82c693.c2
-rw-r--r--drivers/ide/ide_platform.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c2
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c1
-rw-r--r--drivers/input/keyboard/gpio_keys.c2
-rw-r--r--drivers/input/keyboard/lm8323.c9
-rw-r--r--drivers/input/keyboard/tegra-kbc.c7
-rw-r--r--drivers/input/misc/ad714x-i2c.c81
-rw-r--r--drivers/input/misc/ad714x-spi.c68
-rw-r--r--drivers/input/misc/ad714x.c116
-rw-r--r--drivers/input/misc/ad714x.h35
-rw-r--r--drivers/input/misc/kxtj9.c1
-rw-r--r--drivers/input/misc/mma8450.c8
-rw-r--r--drivers/input/misc/mpu3050.c2
-rw-r--r--drivers/input/mouse/bcm5974.c40
-rw-r--r--drivers/input/mouse/hgpk.c1
-rw-r--r--drivers/input/serio/xilinx_ps2.c2
-rw-r--r--drivers/input/tablet/wacom_sys.c17
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/input/touchscreen/ad7879.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c12
-rw-r--r--drivers/input/touchscreen/max11801_ts.c3
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c1
-rw-r--r--drivers/iommu/amd_iommu.c18
-rw-r--r--drivers/leds/leds-ams-delta.c1
-rw-r--r--drivers/leds/leds-bd2802.c5
-rw-r--r--drivers/leds/leds-hp6xx.c1
-rw-r--r--drivers/md/Kconfig5
-rw-r--r--drivers/md/dm-crypt.c62
-rw-r--r--drivers/md/dm-flakey.c270
-rw-r--r--drivers/md/dm-io.c29
-rw-r--r--drivers/md/dm-ioctl.c89
-rw-r--r--drivers/md/dm-kcopyd.c42
-rw-r--r--drivers/md/dm-log-userspace-base.c3
-rw-r--r--drivers/md/dm-log.c32
-rw-r--r--drivers/md/dm-mpath.c147
-rw-r--r--drivers/md/dm-raid.c621
-rw-r--r--drivers/md/dm-snap-persistent.c80
-rw-r--r--drivers/md/dm-snap.c84
-rw-r--r--drivers/md/dm-table.c155
-rw-r--r--drivers/md/dm.c75
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/linear.h2
-rw-r--r--drivers/md/md.c28
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/md/raid10.c47
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c26
-rw-r--r--drivers/media/rc/nuvoton-cir.c45
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c2
-rw-r--r--drivers/media/video/via-camera.c2
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/mfd/Kconfig53
-rw-r--r--drivers/mfd/Makefile8
-rw-r--r--drivers/mfd/aat2870-core.c535
-rw-r--r--drivers/mfd/ab3550-core.c41
-rw-r--r--drivers/mfd/ab8500-core.c231
-rw-r--r--drivers/mfd/ab8500-debugfs.c41
-rw-r--r--drivers/mfd/jz4740-adc.c90
-rw-r--r--drivers/mfd/lpc_sch.c49
-rw-r--r--drivers/mfd/max8997-irq.c2
-rw-r--r--drivers/mfd/max8998.c2
-rw-r--r--drivers/mfd/omap-usb-host.c4
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/mfd/stmpe.h1
-rw-r--r--drivers/mfd/tps65910.c13
-rw-r--r--drivers/mfd/tps65911-comparator.c2
-rw-r--r--drivers/mfd/tps65912-core.c177
-rw-r--r--drivers/mfd/tps65912-i2c.c139
-rw-r--r--drivers/mfd/tps65912-irq.c224
-rw-r--r--drivers/mfd/tps65912-spi.c142
-rw-r--r--drivers/mfd/twl-core.c2
-rw-r--r--drivers/mfd/twl4030-madc.c8
-rw-r--r--drivers/mfd/twl6030-pwm.c2
-rw-r--r--drivers/mfd/wm831x-auxadc.c299
-rw-r--r--drivers/mfd/wm831x-core.c259
-rw-r--r--drivers/mfd/wm831x-irq.c77
-rw-r--r--drivers/mfd/wm8350-irq.c18
-rw-r--r--drivers/mfd/wm8994-core.c33
-rw-r--r--drivers/mfd/wm8994-irq.c12
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/ab8500-pwm.c2
-rw-r--r--drivers/misc/cb710/core.c3
-rw-r--r--drivers/misc/fsa9480.c4
-rw-r--r--drivers/misc/pti.c2
-rw-r--r--drivers/misc/ti-st/st_core.c10
-rw-r--r--drivers/misc/ti-st/st_kim.c33
-rw-r--r--drivers/misc/ti-st/st_ll.c19
-rw-r--r--drivers/mmc/card/mmc_test.c58
-rw-r--r--drivers/mmc/core/core.c37
-rw-r--r--drivers/mmc/core/host.c12
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/sd.c81
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c41
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c3
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci.c53
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c4
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mtd/ubi/debug.h2
-rw-r--r--drivers/net/arm/am79c961a.c3
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c45
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c218
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h3
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c23
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h32
-rw-r--r--drivers/net/bonding/bond_main.c18
-rw-r--r--drivers/net/can/sja1000/plx_pci.c4
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/ti_hecc.c3
-rw-r--r--drivers/net/cassini.c3
-rw-r--r--drivers/net/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/e1000/e1000_hw.c3
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/ethtool.c14
-rw-r--r--drivers/net/e1000e/ich8lan.c72
-rw-r--r--drivers/net/e1000e/lib.c8
-rw-r--r--drivers/net/e1000e/netdev.c91
-rw-r--r--drivers/net/e1000e/phy.c2
-rw-r--r--drivers/net/forcedeth.c3
-rw-r--r--drivers/net/gianfar.c9
-rw-r--r--drivers/net/gianfar_ethtool.c26
-rw-r--r--drivers/net/gianfar_ptp.c9
-rw-r--r--drivers/net/ibmveth.c12
-rw-r--r--drivers/net/igb/e1000_nvm.c1
-rw-r--r--drivers/net/igb/igb_ethtool.c5
-rw-r--r--drivers/net/igb/igb_main.c4
-rw-r--r--drivers/net/igbvf/netdev.c2
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.c18
-rw-r--r--drivers/net/ixgb/ixgb_ee.c9
-rw-r--r--drivers/net/ixgb/ixgb_hw.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c1
-rw-r--r--drivers/net/macb.c3
-rw-r--r--drivers/net/mlx4/en_port.c2
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mlx4/port.c9
-rw-r--r--drivers/net/niu.c4
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/phy/dp83640.c5
-rw-r--r--drivers/net/phy/national.c17
-rw-r--r--drivers/net/r8169.c28
-rw-r--r--drivers/net/rionet.c23
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/sis190.c12
-rw-r--r--drivers/net/slip.c2
-rw-r--r--drivers/net/sungem.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c191
-rw-r--r--drivers/net/usb/rtl8150.c1
-rw-r--r--drivers/net/via-velocity.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c34
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c27
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/b43/dma.c20
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c6
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c3
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c31
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c11
-rw-r--r--drivers/net/wireless/wl1251/acx.c6
-rw-r--r--drivers/net/wireless/wl1251/cmd.c2
-rw-r--r--drivers/net/wireless/wl12xx/acx.c6
-rw-r--r--drivers/net/wireless/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c2
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c50
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/of/base.c7
-rw-r--r--drivers/of/gpio.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c47
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/pci.c67
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/probe.c146
-rw-r--r--drivers/pci/setup-bus.c166
-rw-r--r--drivers/pci/setup-res.c152
-rw-r--r--drivers/pcmcia/pxa2xx_balloon3.c10
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x255.c11
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c11
-rw-r--r--drivers/pcmcia/pxa2xx_colibri.c11
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c10
-rw-r--r--drivers/pcmcia/pxa2xx_palmld.c11
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c11
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c11
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c34
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c10
-rw-r--r--drivers/pcmcia/soc_common.c7
-rw-r--r--drivers/platform/x86/Kconfig8
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c40
-rw-r--r--drivers/platform/x86/acerhdf.c13
-rw-r--r--drivers/platform/x86/asus-laptop.c9
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c27
-rw-r--r--drivers/platform/x86/asus-wmi.c239
-rw-r--r--drivers/platform/x86/asus-wmi.h7
-rw-r--r--drivers/platform/x86/dell-laptop.c1
-rw-r--r--drivers/platform/x86/dell-wmi.c10
-rw-r--r--drivers/platform/x86/eeepc-wmi.c27
-rw-r--r--drivers/platform/x86/ideapad-laptop.c195
-rw-r--r--drivers/platform/x86/intel_ips.c4
-rw-r--r--drivers/platform/x86/intel_menlow.c2
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c26
-rw-r--r--drivers/platform/x86/intel_rar_register.c4
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c10
-rw-r--r--drivers/platform/x86/msi-wmi.c1
-rw-r--r--drivers/platform/x86/samsung-laptop.c20
-rw-r--r--drivers/platform/x86/samsung-q10.c196
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c11
-rw-r--r--drivers/power/Kconfig14
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/apm_power.c8
-rw-r--r--drivers/power/bq20z75.c103
-rw-r--r--drivers/power/gpio-charger.c2
-rw-r--r--drivers/power/max17042_battery.c175
-rw-r--r--drivers/power/max8903_charger.c16
-rw-r--r--drivers/power/max8997_charger.c207
-rw-r--r--drivers/power/max8998_charger.c219
-rw-r--r--drivers/power/s3c_adc_battery.c3
-rw-r--r--drivers/power/twl4030_charger.c10
-rw-r--r--drivers/power/wm831x_backup.c12
-rw-r--r--drivers/power/wm831x_power.c26
-rw-r--r--drivers/rapidio/rio-scan.c3
-rw-r--r--drivers/regulator/Kconfig13
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/aat2870-regulator.c232
-rw-r--r--drivers/regulator/core.c190
-rw-r--r--drivers/regulator/dummy.c32
-rw-r--r--drivers/regulator/tps65910-regulator.c63
-rw-r--r--drivers/regulator/tps65912-regulator.c800
-rw-r--r--drivers/regulator/twl-regulator.c66
-rw-r--r--drivers/regulator/wm831x-dcdc.c126
-rw-r--r--drivers/regulator/wm831x-ldo.c25
-rw-r--r--drivers/regulator/wm8994-regulator.c4
-rw-r--r--drivers/rtc/interface.c4
-rw-r--r--drivers/rtc/rtc-ep93xx.c16
-rw-r--r--drivers/rtc/rtc-lib.c2
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/rtc/rtc-s3c.c79
-rw-r--r--drivers/rtc/rtc-twl.c60
-rw-r--r--drivers/s390/block/dasd.c9
-rw-r--r--drivers/s390/block/dasd_eckd.c9
-rw-r--r--drivers/s390/block/dasd_ioctl.c10
-rw-r--r--drivers/s390/block/dasd_proc.c4
-rw-r--r--drivers/s390/char/sclp_async.c9
-rw-r--r--drivers/s390/char/sclp_cmd.c6
-rw-r--r--drivers/s390/cio/qdio.h2
-rw-r--r--drivers/s390/cio/qdio_debug.c12
-rw-r--r--drivers/s390/cio/qdio_main.c21
-rw-r--r--drivers/scsi/bfa/bfa.h51
-rw-r--r--drivers/scsi/bfa/bfa_core.c60
-rw-r--r--drivers/scsi/bfa/bfa_defs.h171
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h99
-rw-r--r--drivers/scsi/bfa/bfa_fc.h155
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c736
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h45
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c26
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c37
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c74
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c49
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c38
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c25
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c569
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h48
-rw-r--r--drivers/scsi/bfa/bfa_modules.h3
-rw-r--r--drivers/scsi/bfa/bfa_svc.c249
-rw-r--r--drivers/scsi/bfa/bfa_svc.h29
-rw-r--r--drivers/scsi/bfa/bfad.c8
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c1082
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h237
-rw-r--r--drivers/scsi/bfa/bfad_drv.h6
-rw-r--r--drivers/scsi/bfa/bfad_im.c26
-rw-r--r--drivers/scsi/bfa/bfad_im.h22
-rw-r--r--drivers/scsi/bfa/bfi.h20
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h107
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c434
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c732
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c433
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c194
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c51
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c82
-rw-r--r--drivers/scsi/fcoe/fcoe.c69
-rw-r--r--drivers/scsi/hpsa.c4
-rw-r--r--drivers/scsi/hpsa.h2
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/libfc/fc_exch.c9
-rw-r--r--drivers/scsi/libfc/fc_fcp.c9
-rw-r--r--drivers/scsi/libfc/fc_lport.c1
-rw-r--r--drivers/scsi/libsas/sas_expander.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c161
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c1354
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h125
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c105
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c222
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c97
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c399
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c18
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c4
-rw-r--r--drivers/scsi/mvsas/Kconfig9
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c101
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c508
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h99
-rw-r--r--drivers/scsi/mvsas/mv_chips.h17
-rw-r--r--drivers/scsi/mvsas/mv_defs.h11
-rw-r--r--drivers/scsi/mvsas/mv_init.c187
-rw-r--r--drivers/scsi/mvsas/mv_sas.c422
-rw-r--r--drivers/scsi/mvsas/mv_sas.h105
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c183
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c441
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c396
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h187
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c371
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c856
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c663
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c1091
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c160
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c556
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c747
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c275
-rw-r--r--drivers/scsi/qla4xxx/Kconfig2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_spi.c24
-rw-r--r--drivers/sh/clk/core.c29
-rw-r--r--drivers/sh/intc/chip.c3
-rw-r--r--drivers/spi/spi-pl022.c11
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c2
-rw-r--r--drivers/staging/brcm80211/brcmsmac/otp.c1
-rw-r--r--drivers/staging/brcm80211/brcmsmac/types.h1
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.c1
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c1
-rw-r--r--drivers/staging/gma500/gem_glue.c23
-rw-r--r--drivers/staging/gma500/gem_glue.h2
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.c3
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.h3
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dpi.c7
-rw-r--r--drivers/staging/gma500/mdfld_dsi_output.c4
-rw-r--r--drivers/staging/gma500/medfield.h2
-rw-r--r--drivers/staging/gma500/psb_drv.h1
-rw-r--r--drivers/staging/hv/blkvsc_drv.c4
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c8
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c8
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c8
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c8
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c8
-rw-r--r--drivers/staging/nvec/TODO6
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c1
-rw-r--r--drivers/staging/octeon/ethernet-spi.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c9
-rw-r--r--drivers/staging/rts_pstor/rtsx.c109
-rw-r--r--drivers/staging/rts_pstor/rtsx.h9
-rw-r--r--drivers/staging/solo6x10/core.c1
-rw-r--r--drivers/staging/solo6x10/enc.c1
-rw-r--r--drivers/staging/solo6x10/g723.c1
-rw-r--r--drivers/staging/solo6x10/p2m.c1
-rw-r--r--drivers/staging/solo6x10/solo6x10.h1
-rw-r--r--drivers/staging/speakup/devsynth.c5
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c1
-rw-r--r--drivers/staging/zcache/Makefile2
-rw-r--r--drivers/staging/zcache/tmem.c2
-rw-r--r--drivers/staging/zcache/zcache-main.c (renamed from drivers/staging/zcache/zcache.c)21
-rw-r--r--drivers/target/iscsi/Kconfig1
-rw-r--r--drivers/target/iscsi/iscsi_target.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c43
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/target_core_cdb.c57
-rw-r--r--drivers/target/target_core_device.c48
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_pr.c8
-rw-r--r--drivers/target/target_core_rd.c24
-rw-r--r--drivers/target/target_core_tpg.c64
-rw-r--r--drivers/target/target_core_transport.c239
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h5
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c1
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c6
-rw-r--r--drivers/target/tcm_fc/tfc_io.c121
-rw-r--r--drivers/thermal/Kconfig8
-rw-r--r--drivers/thermal/thermal_sys.c142
-rw-r--r--drivers/tty/pty.c17
-rw-r--r--drivers/tty/serial/8250.c8
-rw-r--r--drivers/tty/serial/8250_pci.c11
-rw-r--r--drivers/tty/serial/8250_pnp.c3
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/atmel_serial.c8
-rw-r--r--drivers/tty/serial/imx.c13
-rw-r--r--drivers/tty/serial/max3107-aava.c2
-rw-r--r--drivers/tty/serial/max3107.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c2
-rw-r--r--drivers/tty/serial/omap-serial.c3
-rw-r--r--drivers/tty/serial/pch_uart.c3
-rw-r--r--drivers/tty/serial/samsung.c8
-rw-r--r--drivers/tty/serial/serial_core.c5
-rw-r--r--drivers/tty/serial/sh-sci.c829
-rw-r--r--drivers/tty/serial/sh-sci.h434
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/core/config.c11
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/at91_udc.c1
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/f_hid.c7
-rw-r--r--drivers/usb/gadget/f_phonet.c1
-rw-r--r--drivers/usb/gadget/fusb300_udc.c101
-rw-r--r--drivers/usb/gadget/net2272.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c1
-rw-r--r--drivers/usb/host/ehci-hub.c19
-rw-r--r--drivers/usb/host/ehci-mxc.c1
-rw-r--r--drivers/usb/host/ehci-omap.c16
-rw-r--r--drivers/usb/host/ehci-s5p.c1
-rw-r--r--drivers/usb/host/isp1760-hcd.c3
-rw-r--r--drivers/usb/host/pci-quirks.c4
-rw-r--r--drivers/usb/host/xhci-hub.c17
-rw-r--r--drivers/usb/host/xhci-ring.c90
-rw-r--r--drivers/usb/host/xhci.c47
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/cppi_dma.c26
-rw-r--r--drivers/usb/musb/musb_core.h12
-rw-r--r--drivers/usb/musb/musb_gadget.c9
-rw-r--r--drivers/usb/musb/musb_regs.h6
-rw-r--r--drivers/usb/musb/tusb6010.c1
-rw-r--r--drivers/usb/musb/tusb6010_omap.c3
-rw-r--r--drivers/usb/musb/ux500_dma.c38
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c28
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c116
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h10
-rw-r--r--drivers/video/backlight/Kconfig7
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/aat2870_bl.c246
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/ep93xx_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c9
-rw-r--r--drivers/video/omap2/displays/panel-taal.c55
-rw-r--r--drivers/video/omap2/dss/Kconfig12
-rw-r--r--drivers/video/omap2/dss/core.c21
-rw-r--r--drivers/video/omap2/dss/dispc.c562
-rw-r--r--drivers/video/omap2/dss/display.c57
-rw-r--r--drivers/video/omap2/dss/dpi.c73
-rw-r--r--drivers/video/omap2/dss/dsi.c296
-rw-r--r--drivers/video/omap2/dss/dss.c583
-rw-r--r--drivers/video/omap2/dss/dss.h54
-rw-r--r--drivers/video/omap2/dss/dss_features.c36
-rw-r--r--drivers/video/omap2/dss/dss_features.h7
-rw-r--r--drivers/video/omap2/dss/hdmi.c162
-rw-r--r--drivers/video/omap2/dss/manager.c351
-rw-r--r--drivers/video/omap2/dss/overlay.c27
-rw-r--r--drivers/video/omap2/dss/rfbi.c114
-rw-r--r--drivers/video/omap2/dss/sdi.c40
-rw-r--r--drivers/video/omap2/dss/venc.c183
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c72
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c166
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c34
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h37
-rw-r--r--drivers/video/savage/savagefb.h2
-rw-r--r--drivers/w1/masters/ds2490.c4
-rw-r--r--drivers/w1/masters/matrox_w1.c4
-rw-r--r--drivers/w1/slaves/w1_ds2408.c2
-rw-r--r--drivers/w1/slaves/w1_smem.c4
-rw-r--r--drivers/w1/slaves/w1_therm.c4
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/w1/w1.h2
-rw-r--r--drivers/w1/w1_family.c2
-rw-r--r--drivers/w1/w1_family.h2
-rw-r--r--drivers/w1/w1_int.c2
-rw-r--r--drivers/w1/w1_int.h2
-rw-r--r--drivers/w1/w1_io.c2
-rw-r--r--drivers/w1/w1_log.h2
-rw-r--r--drivers/w1/w1_netlink.c2
-rw-r--r--drivers/w1/w1_netlink.h2
-rw-r--r--drivers/watchdog/Kconfig3
-rw-r--r--drivers/watchdog/nv_tco.c8
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/xen-selfballoon.c1
719 files changed, 32221 insertions, 11565 deletions
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 73863d86f022..76dc02f15574 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -126,6 +126,12 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
126 */ 126 */
127u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE); 127u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE);
128 128
129/*
130 * Disable runtime checking and repair of values returned by control methods.
131 * Use only if the repair is causing a problem on a particular machine.
132 */
133u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
134
129/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */ 135/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
130 136
131struct acpi_table_fadt acpi_gbl_FADT; 137struct acpi_table_fadt acpi_gbl_FADT;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index c7f743ca395b..5552125d8340 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -357,6 +357,7 @@ struct acpi_predefined_data {
357 char *pathname; 357 char *pathname;
358 const union acpi_predefined_info *predefined; 358 const union acpi_predefined_info *predefined;
359 union acpi_operand_object *parent_package; 359 union acpi_operand_object *parent_package;
360 struct acpi_namespace_node *node;
360 u32 flags; 361 u32 flags;
361 u8 node_flags; 362 u8 node_flags;
362}; 363};
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 94e73c97cf85..c445cca490ea 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -468,6 +468,7 @@ static const union acpi_predefined_info predefined_names[] =
468 {{"_SWS", 0, ACPI_RTYPE_INTEGER}}, 468 {{"_SWS", 0, ACPI_RTYPE_INTEGER}},
469 {{"_TC1", 0, ACPI_RTYPE_INTEGER}}, 469 {{"_TC1", 0, ACPI_RTYPE_INTEGER}},
470 {{"_TC2", 0, ACPI_RTYPE_INTEGER}}, 470 {{"_TC2", 0, ACPI_RTYPE_INTEGER}},
471 {{"_TDL", 0, ACPI_RTYPE_INTEGER}},
471 {{"_TIP", 1, ACPI_RTYPE_INTEGER}}, 472 {{"_TIP", 1, ACPI_RTYPE_INTEGER}},
472 {{"_TIV", 1, ACPI_RTYPE_INTEGER}}, 473 {{"_TIV", 1, ACPI_RTYPE_INTEGER}},
473 {{"_TMP", 0, ACPI_RTYPE_INTEGER}}, 474 {{"_TMP", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 9fb03fa8ffde..c845c8089f39 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -193,14 +193,20 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
193 } 193 }
194 194
195 /* 195 /*
196 * 1) We have a return value, but if one wasn't expected, just exit, this is 196 * Return value validation and possible repair.
197 * not a problem. For example, if the "Implicit Return" feature is
198 * enabled, methods will always return a value.
199 * 197 *
200 * 2) If the return value can be of any type, then we cannot perform any 198 * 1) Don't perform return value validation/repair if this feature
201 * validation, exit. 199 * has been disabled via a global option.
200 *
201 * 2) We have a return value, but if one wasn't expected, just exit,
202 * this is not a problem. For example, if the "Implicit Return"
203 * feature is enabled, methods will always return a value.
204 *
205 * 3) If the return value can be of any type, then we cannot perform
206 * any validation, just exit.
202 */ 207 */
203 if ((!predefined->info.expected_btypes) || 208 if (acpi_gbl_disable_auto_repair ||
209 (!predefined->info.expected_btypes) ||
204 (predefined->info.expected_btypes == ACPI_RTYPE_ALL)) { 210 (predefined->info.expected_btypes == ACPI_RTYPE_ALL)) {
205 goto cleanup; 211 goto cleanup;
206 } 212 }
@@ -212,6 +218,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
212 goto cleanup; 218 goto cleanup;
213 } 219 }
214 data->predefined = predefined; 220 data->predefined = predefined;
221 data->node = node;
215 data->node_flags = node->flags; 222 data->node_flags = node->flags;
216 data->pathname = pathname; 223 data->pathname = pathname;
217 224
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 973883babee1..024c4f263f87 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -503,6 +503,21 @@ acpi_ns_repair_TSS(struct acpi_predefined_data *data,
503{ 503{
504 union acpi_operand_object *return_object = *return_object_ptr; 504 union acpi_operand_object *return_object = *return_object_ptr;
505 acpi_status status; 505 acpi_status status;
506 struct acpi_namespace_node *node;
507
508 /*
509 * We can only sort the _TSS return package if there is no _PSS in the
510 * same scope. This is because if _PSS is present, the ACPI specification
511 * dictates that the _TSS Power Dissipation field is to be ignored, and
512 * therefore some BIOSs leave garbage values in the _TSS Power field(s).
513 * In this case, it is best to just return the _TSS package as-is.
514 * (May, 2011)
515 */
516 status =
517 acpi_ns_get_node(data->node, "^_PSS", ACPI_NS_NO_UPSEARCH, &node);
518 if (ACPI_SUCCESS(status)) {
519 return (AE_OK);
520 }
506 521
507 status = acpi_ns_check_sorted_list(data, return_object, 5, 1, 522 status = acpi_ns_check_sorted_list(data, return_object, 5, 1,
508 ACPI_SORT_DESCENDING, 523 ACPI_SORT_DESCENDING,
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 48db0944ce4a..62365f6075dd 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -126,12 +126,29 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
126 } 126 }
127 127
128 /* 128 /*
129 * Originally, we checked the table signature for "SSDT" or "PSDT" here. 129 * Validate the incoming table signature.
130 * Next, we added support for OEMx tables, signature "OEM". 130 *
131 * Valid tables were encountered with a null signature, so we've just 131 * 1) Originally, we checked the table signature for "SSDT" or "PSDT".
132 * given up on validating the signature, since it seems to be a waste 132 * 2) We added support for OEMx tables, signature "OEM".
133 * of code. The original code was removed (05/2008). 133 * 3) Valid tables were encountered with a null signature, so we just
134 * gave up on validating the signature, (05/2008).
135 * 4) We encountered non-AML tables such as the MADT, which caused
136 * interpreter errors and kernel faults. So now, we once again allow
137 * only "SSDT", "OEMx", and now, also a null signature. (05/2011).
134 */ 138 */
139 if ((table_desc->pointer->signature[0] != 0x00) &&
140 (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT))
141 && (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) {
142 ACPI_ERROR((AE_INFO,
143 "Table has invalid signature [%4.4s] (0x%8.8X), must be SSDT or OEMx",
144 acpi_ut_valid_acpi_name(*(u32 *)table_desc->
145 pointer->
146 signature) ? table_desc->
147 pointer->signature : "????",
148 *(u32 *)table_desc->pointer->signature));
149
150 return_ACPI_STATUS(AE_BAD_SIGNATURE);
151 }
135 152
136 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 153 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
137 154
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index f739a70b1c70..c34aa51af4ee 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -10,9 +10,11 @@ config ACPI_APEI
10 error injection. 10 error injection.
11 11
12config ACPI_APEI_GHES 12config ACPI_APEI_GHES
13 tristate "APEI Generic Hardware Error Source" 13 bool "APEI Generic Hardware Error Source"
14 depends on ACPI_APEI && X86 14 depends on ACPI_APEI && X86
15 select ACPI_HED 15 select ACPI_HED
16 select LLIST
17 select GENERIC_ALLOCATOR
16 help 18 help
17 Generic Hardware Error Source provides a way to report 19 Generic Hardware Error Source provides a way to report
18 platform hardware errors (such as that from chipset). It 20 platform hardware errors (such as that from chipset). It
@@ -30,6 +32,13 @@ config ACPI_APEI_PCIEAER
30 PCIe AER errors may be reported via APEI firmware first mode. 32 PCIe AER errors may be reported via APEI firmware first mode.
31 Turn on this option to enable the corresponding support. 33 Turn on this option to enable the corresponding support.
32 34
35config ACPI_APEI_MEMORY_FAILURE
36 bool "APEI memory error recovering support"
37 depends on ACPI_APEI && MEMORY_FAILURE
38 help
39 Memory errors may be reported via APEI firmware first mode.
40 Turn on this option to enable the memory recovering support.
41
33config ACPI_APEI_EINJ 42config ACPI_APEI_EINJ
34 tristate "APEI Error INJection (EINJ)" 43 tristate "APEI Error INJection (EINJ)"
35 depends on ACPI_APEI && DEBUG_FS 44 depends on ACPI_APEI && DEBUG_FS
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 4a904a4bf05f..8041248fce9b 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -157,9 +157,10 @@ EXPORT_SYMBOL_GPL(apei_exec_noop);
157 * Interpret the specified action. Go through whole action table, 157 * Interpret the specified action. Go through whole action table,
158 * execute all instructions belong to the action. 158 * execute all instructions belong to the action.
159 */ 159 */
160int apei_exec_run(struct apei_exec_context *ctx, u8 action) 160int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
161 bool optional)
161{ 162{
162 int rc; 163 int rc = -ENOENT;
163 u32 i, ip; 164 u32 i, ip;
164 struct acpi_whea_header *entry; 165 struct acpi_whea_header *entry;
165 apei_exec_ins_func_t run; 166 apei_exec_ins_func_t run;
@@ -198,9 +199,9 @@ rewind:
198 goto rewind; 199 goto rewind;
199 } 200 }
200 201
201 return 0; 202 return !optional && rc < 0 ? rc : 0;
202} 203}
203EXPORT_SYMBOL_GPL(apei_exec_run); 204EXPORT_SYMBOL_GPL(__apei_exec_run);
204 205
205typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx, 206typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
206 struct acpi_whea_header *entry, 207 struct acpi_whea_header *entry,
@@ -603,3 +604,29 @@ struct dentry *apei_get_debugfs_dir(void)
603 return dapei; 604 return dapei;
604} 605}
605EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); 606EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
607
608int apei_osc_setup(void)
609{
610 static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
611 acpi_handle handle;
612 u32 capbuf[3];
613 struct acpi_osc_context context = {
614 .uuid_str = whea_uuid_str,
615 .rev = 1,
616 .cap.length = sizeof(capbuf),
617 .cap.pointer = capbuf,
618 };
619
620 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
621 capbuf[OSC_SUPPORT_TYPE] = 0;
622 capbuf[OSC_CONTROL_TYPE] = 0;
623
624 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
625 || ACPI_FAILURE(acpi_run_osc(handle, &context)))
626 return -EIO;
627 else {
628 kfree(context.ret.pointer);
629 return 0;
630 }
631}
632EXPORT_SYMBOL_GPL(apei_osc_setup);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index ef0581f2094d..f57050e7a5e7 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -50,7 +50,18 @@ static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
50 return ctx->value; 50 return ctx->value;
51} 51}
52 52
53int apei_exec_run(struct apei_exec_context *ctx, u8 action); 53int __apei_exec_run(struct apei_exec_context *ctx, u8 action, bool optional);
54
55static inline int apei_exec_run(struct apei_exec_context *ctx, u8 action)
56{
57 return __apei_exec_run(ctx, action, 0);
58}
59
60/* It is optional whether the firmware provides the action */
61static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 action)
62{
63 return __apei_exec_run(ctx, action, 1);
64}
54 65
55/* Common instruction implementation */ 66/* Common instruction implementation */
56 67
@@ -113,4 +124,6 @@ void apei_estatus_print(const char *pfx,
113 const struct acpi_hest_generic_status *estatus); 124 const struct acpi_hest_generic_status *estatus);
114int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); 125int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
115int apei_estatus_check(const struct acpi_hest_generic_status *estatus); 126int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
127
128int apei_osc_setup(void);
116#endif 129#endif
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index f74b2ea11f21..589b96c38704 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -46,7 +46,8 @@
46 * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the 46 * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
47 * EINJ table through an unpublished extension. Use with caution as 47 * EINJ table through an unpublished extension. Use with caution as
48 * most will ignore the parameter and make their own choice of address 48 * most will ignore the parameter and make their own choice of address
49 * for error injection. 49 * for error injection. This extension is used only if
50 * param_extension module parameter is specified.
50 */ 51 */
51struct einj_parameter { 52struct einj_parameter {
52 u64 type; 53 u64 type;
@@ -65,6 +66,9 @@ struct einj_parameter {
65 ((struct acpi_whea_header *)((char *)(tab) + \ 66 ((struct acpi_whea_header *)((char *)(tab) + \
66 sizeof(struct acpi_table_einj))) 67 sizeof(struct acpi_table_einj)))
67 68
69static bool param_extension;
70module_param(param_extension, bool, 0);
71
68static struct acpi_table_einj *einj_tab; 72static struct acpi_table_einj *einj_tab;
69 73
70static struct apei_resources einj_resources; 74static struct apei_resources einj_resources;
@@ -285,7 +289,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
285 289
286 einj_exec_ctx_init(&ctx); 290 einj_exec_ctx_init(&ctx);
287 291
288 rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION); 292 rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION);
289 if (rc) 293 if (rc)
290 return rc; 294 return rc;
291 apei_exec_ctx_set_input(&ctx, type); 295 apei_exec_ctx_set_input(&ctx, type);
@@ -323,7 +327,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
323 rc = __einj_error_trigger(trigger_paddr); 327 rc = __einj_error_trigger(trigger_paddr);
324 if (rc) 328 if (rc)
325 return rc; 329 return rc;
326 rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION); 330 rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
327 331
328 return rc; 332 return rc;
329} 333}
@@ -489,14 +493,6 @@ static int __init einj_init(void)
489 einj_debug_dir, NULL, &error_type_fops); 493 einj_debug_dir, NULL, &error_type_fops);
490 if (!fentry) 494 if (!fentry)
491 goto err_cleanup; 495 goto err_cleanup;
492 fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
493 einj_debug_dir, &error_param1);
494 if (!fentry)
495 goto err_cleanup;
496 fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
497 einj_debug_dir, &error_param2);
498 if (!fentry)
499 goto err_cleanup;
500 fentry = debugfs_create_file("error_inject", S_IWUSR, 496 fentry = debugfs_create_file("error_inject", S_IWUSR,
501 einj_debug_dir, NULL, &error_inject_fops); 497 einj_debug_dir, NULL, &error_inject_fops);
502 if (!fentry) 498 if (!fentry)
@@ -513,12 +509,23 @@ static int __init einj_init(void)
513 rc = apei_exec_pre_map_gars(&ctx); 509 rc = apei_exec_pre_map_gars(&ctx);
514 if (rc) 510 if (rc)
515 goto err_release; 511 goto err_release;
516 param_paddr = einj_get_parameter_address(); 512 if (param_extension) {
517 if (param_paddr) { 513 param_paddr = einj_get_parameter_address();
518 einj_param = ioremap(param_paddr, sizeof(*einj_param)); 514 if (param_paddr) {
519 rc = -ENOMEM; 515 einj_param = ioremap(param_paddr, sizeof(*einj_param));
520 if (!einj_param) 516 rc = -ENOMEM;
521 goto err_unmap; 517 if (!einj_param)
518 goto err_unmap;
519 fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
520 einj_debug_dir, &error_param1);
521 if (!fentry)
522 goto err_unmap;
523 fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
524 einj_debug_dir, &error_param2);
525 if (!fentry)
526 goto err_unmap;
527 } else
528 pr_warn(EINJ_PFX "Parameter extension is not supported.\n");
522 } 529 }
523 530
524 pr_info(EINJ_PFX "Error INJection is initialized.\n"); 531 pr_info(EINJ_PFX "Error INJection is initialized.\n");
@@ -526,6 +533,8 @@ static int __init einj_init(void)
526 return 0; 533 return 0;
527 534
528err_unmap: 535err_unmap:
536 if (einj_param)
537 iounmap(einj_param);
529 apei_exec_post_unmap_gars(&ctx); 538 apei_exec_post_unmap_gars(&ctx);
530err_release: 539err_release:
531 apei_resources_release(&einj_resources); 540 apei_resources_release(&einj_resources);
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index a4cfb64c86a1..903549df809b 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -33,7 +33,7 @@
33 33
34#define ERST_DBG_PFX "ERST DBG: " 34#define ERST_DBG_PFX "ERST DBG: "
35 35
36#define ERST_DBG_RECORD_LEN_MAX 4096 36#define ERST_DBG_RECORD_LEN_MAX 0x4000
37 37
38static void *erst_dbg_buf; 38static void *erst_dbg_buf;
39static unsigned int erst_dbg_buf_len; 39static unsigned int erst_dbg_buf_len;
@@ -213,6 +213,10 @@ static struct miscdevice erst_dbg_dev = {
213 213
214static __init int erst_dbg_init(void) 214static __init int erst_dbg_init(void)
215{ 215{
216 if (erst_disable) {
217 pr_info(ERST_DBG_PFX "ERST support is disabled.\n");
218 return -ENODEV;
219 }
216 return misc_register(&erst_dbg_dev); 220 return misc_register(&erst_dbg_dev);
217} 221}
218 222
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index e6cef8e1b534..2ca59dc69f7f 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -642,7 +642,7 @@ static int __erst_write_to_storage(u64 offset)
642 int rc; 642 int rc;
643 643
644 erst_exec_ctx_init(&ctx); 644 erst_exec_ctx_init(&ctx);
645 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE); 645 rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE);
646 if (rc) 646 if (rc)
647 return rc; 647 return rc;
648 apei_exec_ctx_set_input(&ctx, offset); 648 apei_exec_ctx_set_input(&ctx, offset);
@@ -666,7 +666,7 @@ static int __erst_write_to_storage(u64 offset)
666 if (rc) 666 if (rc)
667 return rc; 667 return rc;
668 val = apei_exec_ctx_get_output(&ctx); 668 val = apei_exec_ctx_get_output(&ctx);
669 rc = apei_exec_run(&ctx, ACPI_ERST_END); 669 rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
670 if (rc) 670 if (rc)
671 return rc; 671 return rc;
672 672
@@ -681,7 +681,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
681 int rc; 681 int rc;
682 682
683 erst_exec_ctx_init(&ctx); 683 erst_exec_ctx_init(&ctx);
684 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ); 684 rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ);
685 if (rc) 685 if (rc)
686 return rc; 686 return rc;
687 apei_exec_ctx_set_input(&ctx, offset); 687 apei_exec_ctx_set_input(&ctx, offset);
@@ -709,7 +709,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
709 if (rc) 709 if (rc)
710 return rc; 710 return rc;
711 val = apei_exec_ctx_get_output(&ctx); 711 val = apei_exec_ctx_get_output(&ctx);
712 rc = apei_exec_run(&ctx, ACPI_ERST_END); 712 rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
713 if (rc) 713 if (rc)
714 return rc; 714 return rc;
715 715
@@ -724,7 +724,7 @@ static int __erst_clear_from_storage(u64 record_id)
724 int rc; 724 int rc;
725 725
726 erst_exec_ctx_init(&ctx); 726 erst_exec_ctx_init(&ctx);
727 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR); 727 rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR);
728 if (rc) 728 if (rc)
729 return rc; 729 return rc;
730 apei_exec_ctx_set_input(&ctx, record_id); 730 apei_exec_ctx_set_input(&ctx, record_id);
@@ -748,7 +748,7 @@ static int __erst_clear_from_storage(u64 record_id)
748 if (rc) 748 if (rc)
749 return rc; 749 return rc;
750 val = apei_exec_ctx_get_output(&ctx); 750 val = apei_exec_ctx_get_output(&ctx);
751 rc = apei_exec_run(&ctx, ACPI_ERST_END); 751 rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
752 if (rc) 752 if (rc)
753 return rc; 753 return rc;
754 754
@@ -932,8 +932,11 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
932static int erst_open_pstore(struct pstore_info *psi); 932static int erst_open_pstore(struct pstore_info *psi);
933static int erst_close_pstore(struct pstore_info *psi); 933static int erst_close_pstore(struct pstore_info *psi);
934static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, 934static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
935 struct timespec *time); 935 struct timespec *time, struct pstore_info *psi);
936static u64 erst_writer(enum pstore_type_id type, size_t size); 936static u64 erst_writer(enum pstore_type_id type, unsigned int part,
937 size_t size, struct pstore_info *psi);
938static int erst_clearer(enum pstore_type_id type, u64 id,
939 struct pstore_info *psi);
937 940
938static struct pstore_info erst_info = { 941static struct pstore_info erst_info = {
939 .owner = THIS_MODULE, 942 .owner = THIS_MODULE,
@@ -942,7 +945,7 @@ static struct pstore_info erst_info = {
942 .close = erst_close_pstore, 945 .close = erst_close_pstore,
943 .read = erst_reader, 946 .read = erst_reader,
944 .write = erst_writer, 947 .write = erst_writer,
945 .erase = erst_clear 948 .erase = erst_clearer
946}; 949};
947 950
948#define CPER_CREATOR_PSTORE \ 951#define CPER_CREATOR_PSTORE \
@@ -983,7 +986,7 @@ static int erst_close_pstore(struct pstore_info *psi)
983} 986}
984 987
985static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, 988static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
986 struct timespec *time) 989 struct timespec *time, struct pstore_info *psi)
987{ 990{
988 int rc; 991 int rc;
989 ssize_t len = 0; 992 ssize_t len = 0;
@@ -1037,7 +1040,8 @@ out:
1037 return (rc < 0) ? rc : (len - sizeof(*rcd)); 1040 return (rc < 0) ? rc : (len - sizeof(*rcd));
1038} 1041}
1039 1042
1040static u64 erst_writer(enum pstore_type_id type, size_t size) 1043static u64 erst_writer(enum pstore_type_id type, unsigned int part,
1044 size_t size, struct pstore_info *psi)
1041{ 1045{
1042 struct cper_pstore_record *rcd = (struct cper_pstore_record *) 1046 struct cper_pstore_record *rcd = (struct cper_pstore_record *)
1043 (erst_info.buf - sizeof(*rcd)); 1047 (erst_info.buf - sizeof(*rcd));
@@ -1080,6 +1084,12 @@ static u64 erst_writer(enum pstore_type_id type, size_t size)
1080 return rcd->hdr.record_id; 1084 return rcd->hdr.record_id;
1081} 1085}
1082 1086
1087static int erst_clearer(enum pstore_type_id type, u64 id,
1088 struct pstore_info *psi)
1089{
1090 return erst_clear(id);
1091}
1092
1083static int __init erst_init(void) 1093static int __init erst_init(void)
1084{ 1094{
1085 int rc = 0; 1095 int rc = 0;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f703b2881153..0784f99a4665 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -12,7 +12,7 @@
12 * For more information about Generic Hardware Error Source, please 12 * For more information about Generic Hardware Error Source, please
13 * refer to ACPI Specification version 4.0, section 17.3.2.6 13 * refer to ACPI Specification version 4.0, section 17.3.2.6
14 * 14 *
15 * Copyright 2010 Intel Corp. 15 * Copyright 2010,2011 Intel Corp.
16 * Author: Huang Ying <ying.huang@intel.com> 16 * Author: Huang Ying <ying.huang@intel.com>
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
@@ -42,6 +42,9 @@
42#include <linux/mutex.h> 42#include <linux/mutex.h>
43#include <linux/ratelimit.h> 43#include <linux/ratelimit.h>
44#include <linux/vmalloc.h> 44#include <linux/vmalloc.h>
45#include <linux/irq_work.h>
46#include <linux/llist.h>
47#include <linux/genalloc.h>
45#include <acpi/apei.h> 48#include <acpi/apei.h>
46#include <acpi/atomicio.h> 49#include <acpi/atomicio.h>
47#include <acpi/hed.h> 50#include <acpi/hed.h>
@@ -53,6 +56,30 @@
53#define GHES_PFX "GHES: " 56#define GHES_PFX "GHES: "
54 57
55#define GHES_ESTATUS_MAX_SIZE 65536 58#define GHES_ESTATUS_MAX_SIZE 65536
59#define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
60
61#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
62
63/* This is just an estimation for memory pool allocation */
64#define GHES_ESTATUS_CACHE_AVG_SIZE 512
65
66#define GHES_ESTATUS_CACHES_SIZE 4
67
68#define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
69/* Prevent too many caches are allocated because of RCU */
70#define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
71
72#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
73 (sizeof(struct ghes_estatus_cache) + (estatus_len))
74#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
75 ((struct acpi_hest_generic_status *) \
76 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
77
78#define GHES_ESTATUS_NODE_LEN(estatus_len) \
79 (sizeof(struct ghes_estatus_node) + (estatus_len))
80#define GHES_ESTATUS_FROM_NODE(estatus_node) \
81 ((struct acpi_hest_generic_status *) \
82 ((struct ghes_estatus_node *)(estatus_node) + 1))
56 83
57/* 84/*
58 * One struct ghes is created for each generic hardware error source. 85 * One struct ghes is created for each generic hardware error source.
@@ -77,6 +104,22 @@ struct ghes {
77 }; 104 };
78}; 105};
79 106
107struct ghes_estatus_node {
108 struct llist_node llnode;
109 struct acpi_hest_generic *generic;
110};
111
112struct ghes_estatus_cache {
113 u32 estatus_len;
114 atomic_t count;
115 struct acpi_hest_generic *generic;
116 unsigned long long time_in;
117 struct rcu_head rcu;
118};
119
120int ghes_disable;
121module_param_named(disable, ghes_disable, bool, 0);
122
80static int ghes_panic_timeout __read_mostly = 30; 123static int ghes_panic_timeout __read_mostly = 30;
81 124
82/* 125/*
@@ -121,6 +164,22 @@ static struct vm_struct *ghes_ioremap_area;
121static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); 164static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
122static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); 165static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
123 166
167/*
168 * printk is not safe in NMI context. So in NMI handler, we allocate
169 * required memory from lock-less memory allocator
170 * (ghes_estatus_pool), save estatus into it, put them into lock-less
171 * list (ghes_estatus_llist), then delay printk into IRQ context via
172 * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
173 * required pool size by all NMI error source.
174 */
175static struct gen_pool *ghes_estatus_pool;
176static unsigned long ghes_estatus_pool_size_request;
177static struct llist_head ghes_estatus_llist;
178static struct irq_work ghes_proc_irq_work;
179
180struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
181static atomic_t ghes_estatus_cache_alloced;
182
124static int ghes_ioremap_init(void) 183static int ghes_ioremap_init(void)
125{ 184{
126 ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, 185 ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
@@ -180,6 +239,55 @@ static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
180 __flush_tlb_one(vaddr); 239 __flush_tlb_one(vaddr);
181} 240}
182 241
242static int ghes_estatus_pool_init(void)
243{
244 ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
245 if (!ghes_estatus_pool)
246 return -ENOMEM;
247 return 0;
248}
249
250static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool,
251 struct gen_pool_chunk *chunk,
252 void *data)
253{
254 free_page(chunk->start_addr);
255}
256
257static void ghes_estatus_pool_exit(void)
258{
259 gen_pool_for_each_chunk(ghes_estatus_pool,
260 ghes_estatus_pool_free_chunk_page, NULL);
261 gen_pool_destroy(ghes_estatus_pool);
262}
263
264static int ghes_estatus_pool_expand(unsigned long len)
265{
266 unsigned long i, pages, size, addr;
267 int ret;
268
269 ghes_estatus_pool_size_request += PAGE_ALIGN(len);
270 size = gen_pool_size(ghes_estatus_pool);
271 if (size >= ghes_estatus_pool_size_request)
272 return 0;
273 pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
274 for (i = 0; i < pages; i++) {
275 addr = __get_free_page(GFP_KERNEL);
276 if (!addr)
277 return -ENOMEM;
278 ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
279 if (ret)
280 return ret;
281 }
282
283 return 0;
284}
285
286static void ghes_estatus_pool_shrink(unsigned long len)
287{
288 ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
289}
290
183static struct ghes *ghes_new(struct acpi_hest_generic *generic) 291static struct ghes *ghes_new(struct acpi_hest_generic *generic)
184{ 292{
185 struct ghes *ghes; 293 struct ghes *ghes;
@@ -341,43 +449,196 @@ static void ghes_clear_estatus(struct ghes *ghes)
341 ghes->flags &= ~GHES_TO_CLEAR; 449 ghes->flags &= ~GHES_TO_CLEAR;
342} 450}
343 451
344static void ghes_do_proc(struct ghes *ghes) 452static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
345{ 453{
346 int sev, processed = 0; 454 int sev, sec_sev;
347 struct acpi_hest_generic_data *gdata; 455 struct acpi_hest_generic_data *gdata;
348 456
349 sev = ghes_severity(ghes->estatus->error_severity); 457 sev = ghes_severity(estatus->error_severity);
350 apei_estatus_for_each_section(ghes->estatus, gdata) { 458 apei_estatus_for_each_section(estatus, gdata) {
351#ifdef CONFIG_X86_MCE 459 sec_sev = ghes_severity(gdata->error_severity);
352 if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, 460 if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
353 CPER_SEC_PLATFORM_MEM)) { 461 CPER_SEC_PLATFORM_MEM)) {
354 apei_mce_report_mem_error( 462 struct cper_sec_mem_err *mem_err;
355 sev == GHES_SEV_CORRECTED, 463 mem_err = (struct cper_sec_mem_err *)(gdata+1);
356 (struct cper_sec_mem_err *)(gdata+1)); 464#ifdef CONFIG_X86_MCE
357 processed = 1; 465 apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
358 } 466 mem_err);
359#endif 467#endif
468#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
469 if (sev == GHES_SEV_RECOVERABLE &&
470 sec_sev == GHES_SEV_RECOVERABLE &&
471 mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
472 unsigned long pfn;
473 pfn = mem_err->physical_addr >> PAGE_SHIFT;
474 memory_failure_queue(pfn, 0, 0);
475 }
476#endif
477 }
360 } 478 }
361} 479}
362 480
363static void ghes_print_estatus(const char *pfx, struct ghes *ghes) 481static void __ghes_print_estatus(const char *pfx,
482 const struct acpi_hest_generic *generic,
483 const struct acpi_hest_generic_status *estatus)
364{ 484{
365 /* Not more than 2 messages every 5 seconds */
366 static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
367
368 if (pfx == NULL) { 485 if (pfx == NULL) {
369 if (ghes_severity(ghes->estatus->error_severity) <= 486 if (ghes_severity(estatus->error_severity) <=
370 GHES_SEV_CORRECTED) 487 GHES_SEV_CORRECTED)
371 pfx = KERN_WARNING HW_ERR; 488 pfx = KERN_WARNING HW_ERR;
372 else 489 else
373 pfx = KERN_ERR HW_ERR; 490 pfx = KERN_ERR HW_ERR;
374 } 491 }
375 if (__ratelimit(&ratelimit)) { 492 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
376 printk( 493 pfx, generic->header.source_id);
377 "%s""Hardware error from APEI Generic Hardware Error Source: %d\n", 494 apei_estatus_print(pfx, estatus);
378 pfx, ghes->generic->header.source_id); 495}
379 apei_estatus_print(pfx, ghes->estatus); 496
497static int ghes_print_estatus(const char *pfx,
498 const struct acpi_hest_generic *generic,
499 const struct acpi_hest_generic_status *estatus)
500{
501 /* Not more than 2 messages every 5 seconds */
502 static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
503 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
504 struct ratelimit_state *ratelimit;
505
506 if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
507 ratelimit = &ratelimit_corrected;
508 else
509 ratelimit = &ratelimit_uncorrected;
510 if (__ratelimit(ratelimit)) {
511 __ghes_print_estatus(pfx, generic, estatus);
512 return 1;
380 } 513 }
514 return 0;
515}
516
517/*
518 * GHES error status reporting throttle, to report more kinds of
519 * errors, instead of just most frequently occurred errors.
520 */
521static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
522{
523 u32 len;
524 int i, cached = 0;
525 unsigned long long now;
526 struct ghes_estatus_cache *cache;
527 struct acpi_hest_generic_status *cache_estatus;
528
529 len = apei_estatus_len(estatus);
530 rcu_read_lock();
531 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
532 cache = rcu_dereference(ghes_estatus_caches[i]);
533 if (cache == NULL)
534 continue;
535 if (len != cache->estatus_len)
536 continue;
537 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
538 if (memcmp(estatus, cache_estatus, len))
539 continue;
540 atomic_inc(&cache->count);
541 now = sched_clock();
542 if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
543 cached = 1;
544 break;
545 }
546 rcu_read_unlock();
547 return cached;
548}
549
550static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
551 struct acpi_hest_generic *generic,
552 struct acpi_hest_generic_status *estatus)
553{
554 int alloced;
555 u32 len, cache_len;
556 struct ghes_estatus_cache *cache;
557 struct acpi_hest_generic_status *cache_estatus;
558
559 alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
560 if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
561 atomic_dec(&ghes_estatus_cache_alloced);
562 return NULL;
563 }
564 len = apei_estatus_len(estatus);
565 cache_len = GHES_ESTATUS_CACHE_LEN(len);
566 cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
567 if (!cache) {
568 atomic_dec(&ghes_estatus_cache_alloced);
569 return NULL;
570 }
571 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
572 memcpy(cache_estatus, estatus, len);
573 cache->estatus_len = len;
574 atomic_set(&cache->count, 0);
575 cache->generic = generic;
576 cache->time_in = sched_clock();
577 return cache;
578}
579
580static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
581{
582 u32 len;
583
584 len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
585 len = GHES_ESTATUS_CACHE_LEN(len);
586 gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
587 atomic_dec(&ghes_estatus_cache_alloced);
588}
589
590static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
591{
592 struct ghes_estatus_cache *cache;
593
594 cache = container_of(head, struct ghes_estatus_cache, rcu);
595 ghes_estatus_cache_free(cache);
596}
597
598static void ghes_estatus_cache_add(
599 struct acpi_hest_generic *generic,
600 struct acpi_hest_generic_status *estatus)
601{
602 int i, slot = -1, count;
603 unsigned long long now, duration, period, max_period = 0;
604 struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
605
606 new_cache = ghes_estatus_cache_alloc(generic, estatus);
607 if (new_cache == NULL)
608 return;
609 rcu_read_lock();
610 now = sched_clock();
611 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
612 cache = rcu_dereference(ghes_estatus_caches[i]);
613 if (cache == NULL) {
614 slot = i;
615 slot_cache = NULL;
616 break;
617 }
618 duration = now - cache->time_in;
619 if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
620 slot = i;
621 slot_cache = cache;
622 break;
623 }
624 count = atomic_read(&cache->count);
625 period = duration;
626 do_div(period, (count + 1));
627 if (period > max_period) {
628 max_period = period;
629 slot = i;
630 slot_cache = cache;
631 }
632 }
633 /* new_cache must be put into array after its contents are written */
634 smp_wmb();
635 if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
636 slot_cache, new_cache) == slot_cache) {
637 if (slot_cache)
638 call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
639 } else
640 ghes_estatus_cache_free(new_cache);
641 rcu_read_unlock();
381} 642}
382 643
383static int ghes_proc(struct ghes *ghes) 644static int ghes_proc(struct ghes *ghes)
@@ -387,9 +648,11 @@ static int ghes_proc(struct ghes *ghes)
387 rc = ghes_read_estatus(ghes, 0); 648 rc = ghes_read_estatus(ghes, 0);
388 if (rc) 649 if (rc)
389 goto out; 650 goto out;
390 ghes_print_estatus(NULL, ghes); 651 if (!ghes_estatus_cached(ghes->estatus)) {
391 ghes_do_proc(ghes); 652 if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
392 653 ghes_estatus_cache_add(ghes->generic, ghes->estatus);
654 }
655 ghes_do_proc(ghes->estatus);
393out: 656out:
394 ghes_clear_estatus(ghes); 657 ghes_clear_estatus(ghes);
395 return 0; 658 return 0;
@@ -447,6 +710,45 @@ static int ghes_notify_sci(struct notifier_block *this,
447 return ret; 710 return ret;
448} 711}
449 712
713static void ghes_proc_in_irq(struct irq_work *irq_work)
714{
715 struct llist_node *llnode, *next, *tail = NULL;
716 struct ghes_estatus_node *estatus_node;
717 struct acpi_hest_generic *generic;
718 struct acpi_hest_generic_status *estatus;
719 u32 len, node_len;
720
721 /*
722 * Because the time order of estatus in list is reversed,
723 * revert it back to proper order.
724 */
725 llnode = llist_del_all(&ghes_estatus_llist);
726 while (llnode) {
727 next = llnode->next;
728 llnode->next = tail;
729 tail = llnode;
730 llnode = next;
731 }
732 llnode = tail;
733 while (llnode) {
734 next = llnode->next;
735 estatus_node = llist_entry(llnode, struct ghes_estatus_node,
736 llnode);
737 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
738 len = apei_estatus_len(estatus);
739 node_len = GHES_ESTATUS_NODE_LEN(len);
740 ghes_do_proc(estatus);
741 if (!ghes_estatus_cached(estatus)) {
742 generic = estatus_node->generic;
743 if (ghes_print_estatus(NULL, generic, estatus))
744 ghes_estatus_cache_add(generic, estatus);
745 }
746 gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
747 node_len);
748 llnode = next;
749 }
750}
751
450static int ghes_notify_nmi(struct notifier_block *this, 752static int ghes_notify_nmi(struct notifier_block *this,
451 unsigned long cmd, void *data) 753 unsigned long cmd, void *data)
452{ 754{
@@ -476,7 +778,8 @@ static int ghes_notify_nmi(struct notifier_block *this,
476 778
477 if (sev_global >= GHES_SEV_PANIC) { 779 if (sev_global >= GHES_SEV_PANIC) {
478 oops_begin(); 780 oops_begin();
479 ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global); 781 __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
782 ghes_global->estatus);
480 /* reboot to log the error! */ 783 /* reboot to log the error! */
481 if (panic_timeout == 0) 784 if (panic_timeout == 0)
482 panic_timeout = ghes_panic_timeout; 785 panic_timeout = ghes_panic_timeout;
@@ -484,12 +787,34 @@ static int ghes_notify_nmi(struct notifier_block *this,
484 } 787 }
485 788
486 list_for_each_entry_rcu(ghes, &ghes_nmi, list) { 789 list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
790#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
791 u32 len, node_len;
792 struct ghes_estatus_node *estatus_node;
793 struct acpi_hest_generic_status *estatus;
794#endif
487 if (!(ghes->flags & GHES_TO_CLEAR)) 795 if (!(ghes->flags & GHES_TO_CLEAR))
488 continue; 796 continue;
489 /* Do not print estatus because printk is not NMI safe */ 797#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
490 ghes_do_proc(ghes); 798 if (ghes_estatus_cached(ghes->estatus))
799 goto next;
800 /* Save estatus for further processing in IRQ context */
801 len = apei_estatus_len(ghes->estatus);
802 node_len = GHES_ESTATUS_NODE_LEN(len);
803 estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
804 node_len);
805 if (estatus_node) {
806 estatus_node->generic = ghes->generic;
807 estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
808 memcpy(estatus, ghes->estatus, len);
809 llist_add(&estatus_node->llnode, &ghes_estatus_llist);
810 }
811next:
812#endif
491 ghes_clear_estatus(ghes); 813 ghes_clear_estatus(ghes);
492 } 814 }
815#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
816 irq_work_queue(&ghes_proc_irq_work);
817#endif
493 818
494out: 819out:
495 raw_spin_unlock(&ghes_nmi_lock); 820 raw_spin_unlock(&ghes_nmi_lock);
@@ -504,10 +829,26 @@ static struct notifier_block ghes_notifier_nmi = {
504 .notifier_call = ghes_notify_nmi, 829 .notifier_call = ghes_notify_nmi,
505}; 830};
506 831
832static unsigned long ghes_esource_prealloc_size(
833 const struct acpi_hest_generic *generic)
834{
835 unsigned long block_length, prealloc_records, prealloc_size;
836
837 block_length = min_t(unsigned long, generic->error_block_length,
838 GHES_ESTATUS_MAX_SIZE);
839 prealloc_records = max_t(unsigned long,
840 generic->records_to_preallocate, 1);
841 prealloc_size = min_t(unsigned long, block_length * prealloc_records,
842 GHES_ESOURCE_PREALLOC_MAX_SIZE);
843
844 return prealloc_size;
845}
846
507static int __devinit ghes_probe(struct platform_device *ghes_dev) 847static int __devinit ghes_probe(struct platform_device *ghes_dev)
508{ 848{
509 struct acpi_hest_generic *generic; 849 struct acpi_hest_generic *generic;
510 struct ghes *ghes = NULL; 850 struct ghes *ghes = NULL;
851 unsigned long len;
511 int rc = -EINVAL; 852 int rc = -EINVAL;
512 853
513 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; 854 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
@@ -573,6 +914,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
573 mutex_unlock(&ghes_list_mutex); 914 mutex_unlock(&ghes_list_mutex);
574 break; 915 break;
575 case ACPI_HEST_NOTIFY_NMI: 916 case ACPI_HEST_NOTIFY_NMI:
917 len = ghes_esource_prealloc_size(generic);
918 ghes_estatus_pool_expand(len);
576 mutex_lock(&ghes_list_mutex); 919 mutex_lock(&ghes_list_mutex);
577 if (list_empty(&ghes_nmi)) 920 if (list_empty(&ghes_nmi))
578 register_die_notifier(&ghes_notifier_nmi); 921 register_die_notifier(&ghes_notifier_nmi);
@@ -597,6 +940,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
597{ 940{
598 struct ghes *ghes; 941 struct ghes *ghes;
599 struct acpi_hest_generic *generic; 942 struct acpi_hest_generic *generic;
943 unsigned long len;
600 944
601 ghes = platform_get_drvdata(ghes_dev); 945 ghes = platform_get_drvdata(ghes_dev);
602 generic = ghes->generic; 946 generic = ghes->generic;
@@ -627,6 +971,8 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
627 * freed after NMI handler finishes. 971 * freed after NMI handler finishes.
628 */ 972 */
629 synchronize_rcu(); 973 synchronize_rcu();
974 len = ghes_esource_prealloc_size(generic);
975 ghes_estatus_pool_shrink(len);
630 break; 976 break;
631 default: 977 default:
632 BUG(); 978 BUG();
@@ -662,15 +1008,43 @@ static int __init ghes_init(void)
662 return -EINVAL; 1008 return -EINVAL;
663 } 1009 }
664 1010
1011 if (ghes_disable) {
1012 pr_info(GHES_PFX "GHES is not enabled!\n");
1013 return -EINVAL;
1014 }
1015
1016 init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
1017
665 rc = ghes_ioremap_init(); 1018 rc = ghes_ioremap_init();
666 if (rc) 1019 if (rc)
667 goto err; 1020 goto err;
668 1021
669 rc = platform_driver_register(&ghes_platform_driver); 1022 rc = ghes_estatus_pool_init();
670 if (rc) 1023 if (rc)
671 goto err_ioremap_exit; 1024 goto err_ioremap_exit;
672 1025
1026 rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
1027 GHES_ESTATUS_CACHE_ALLOCED_MAX);
1028 if (rc)
1029 goto err_pool_exit;
1030
1031 rc = platform_driver_register(&ghes_platform_driver);
1032 if (rc)
1033 goto err_pool_exit;
1034
1035 rc = apei_osc_setup();
1036 if (rc == 0 && osc_sb_apei_support_acked)
1037 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1038 else if (rc == 0 && !osc_sb_apei_support_acked)
1039 pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1040 else if (rc && osc_sb_apei_support_acked)
1041 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1042 else
1043 pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1044
673 return 0; 1045 return 0;
1046err_pool_exit:
1047 ghes_estatus_pool_exit();
674err_ioremap_exit: 1048err_ioremap_exit:
675 ghes_ioremap_exit(); 1049 ghes_ioremap_exit();
676err: 1050err:
@@ -680,6 +1054,7 @@ err:
680static void __exit ghes_exit(void) 1054static void __exit ghes_exit(void)
681{ 1055{
682 platform_driver_unregister(&ghes_platform_driver); 1056 platform_driver_unregister(&ghes_platform_driver);
1057 ghes_estatus_pool_exit();
683 ghes_ioremap_exit(); 1058 ghes_ioremap_exit();
684} 1059}
685 1060
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 181bc2f7bb74..05fee06f4d6e 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -231,16 +231,17 @@ void __init acpi_hest_init(void)
231 goto err; 231 goto err;
232 } 232 }
233 233
234 rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); 234 if (!ghes_disable) {
235 if (rc) 235 rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
236 goto err; 236 if (rc)
237 237 goto err;
238 rc = hest_ghes_dev_register(ghes_count); 238 rc = hest_ghes_dev_register(ghes_count);
239 if (!rc) { 239 if (rc)
240 pr_info(HEST_PFX "Table parsing has been initialized.\n"); 240 goto err;
241 return;
242 } 241 }
243 242
243 pr_info(HEST_PFX "Table parsing has been initialized.\n");
244 return;
244err: 245err:
245 hest_disable = 1; 246 hest_disable = 1;
246} 247}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 2c661353e8f2..7711d94a0409 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -55,6 +55,9 @@
55#define ACPI_BATTERY_NOTIFY_INFO 0x81 55#define ACPI_BATTERY_NOTIFY_INFO 0x81
56#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82 56#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82
57 57
58/* Battery power unit: 0 means mW, 1 means mA */
59#define ACPI_BATTERY_POWER_UNIT_MA 1
60
58#define _COMPONENT ACPI_BATTERY_COMPONENT 61#define _COMPONENT ACPI_BATTERY_COMPONENT
59 62
60ACPI_MODULE_NAME("battery"); 63ACPI_MODULE_NAME("battery");
@@ -91,16 +94,12 @@ MODULE_DEVICE_TABLE(acpi, battery_device_ids);
91enum { 94enum {
92 ACPI_BATTERY_ALARM_PRESENT, 95 ACPI_BATTERY_ALARM_PRESENT,
93 ACPI_BATTERY_XINFO_PRESENT, 96 ACPI_BATTERY_XINFO_PRESENT,
94 /* For buggy DSDTs that report negative 16-bit values for either
95 * charging or discharging current and/or report 0 as 65536
96 * due to bad math.
97 */
98 ACPI_BATTERY_QUIRK_SIGNED16_CURRENT,
99 ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, 97 ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
100}; 98};
101 99
102struct acpi_battery { 100struct acpi_battery {
103 struct mutex lock; 101 struct mutex lock;
102 struct mutex sysfs_lock;
104 struct power_supply bat; 103 struct power_supply bat;
105 struct acpi_device *device; 104 struct acpi_device *device;
106 struct notifier_block pm_nb; 105 struct notifier_block pm_nb;
@@ -301,7 +300,8 @@ static enum power_supply_property energy_battery_props[] = {
301#ifdef CONFIG_ACPI_PROCFS_POWER 300#ifdef CONFIG_ACPI_PROCFS_POWER
302inline char *acpi_battery_units(struct acpi_battery *battery) 301inline char *acpi_battery_units(struct acpi_battery *battery)
303{ 302{
304 return (battery->power_unit)?"mA":"mW"; 303 return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
304 "mA" : "mW";
305} 305}
306#endif 306#endif
307 307
@@ -461,9 +461,17 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
461 battery->update_time = jiffies; 461 battery->update_time = jiffies;
462 kfree(buffer.pointer); 462 kfree(buffer.pointer);
463 463
464 if (test_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags) && 464 /* For buggy DSDTs that report negative 16-bit values for either
465 battery->rate_now != -1) 465 * charging or discharging current and/or report 0 as 65536
466 * due to bad math.
467 */
468 if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA &&
469 battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
470 (s16)(battery->rate_now) < 0) {
466 battery->rate_now = abs((s16)battery->rate_now); 471 battery->rate_now = abs((s16)battery->rate_now);
472 printk_once(KERN_WARNING FW_BUG "battery: (dis)charge rate"
473 " invalid.\n");
474 }
467 475
468 if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) 476 if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
469 && battery->capacity_now >= 0 && battery->capacity_now <= 100) 477 && battery->capacity_now >= 0 && battery->capacity_now <= 100)
@@ -544,7 +552,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
544{ 552{
545 int result; 553 int result;
546 554
547 if (battery->power_unit) { 555 if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
548 battery->bat.properties = charge_battery_props; 556 battery->bat.properties = charge_battery_props;
549 battery->bat.num_properties = 557 battery->bat.num_properties =
550 ARRAY_SIZE(charge_battery_props); 558 ARRAY_SIZE(charge_battery_props);
@@ -566,18 +574,16 @@ static int sysfs_add_battery(struct acpi_battery *battery)
566 574
567static void sysfs_remove_battery(struct acpi_battery *battery) 575static void sysfs_remove_battery(struct acpi_battery *battery)
568{ 576{
569 if (!battery->bat.dev) 577 mutex_lock(&battery->sysfs_lock);
578 if (!battery->bat.dev) {
579 mutex_unlock(&battery->sysfs_lock);
570 return; 580 return;
581 }
582
571 device_remove_file(battery->bat.dev, &alarm_attr); 583 device_remove_file(battery->bat.dev, &alarm_attr);
572 power_supply_unregister(&battery->bat); 584 power_supply_unregister(&battery->bat);
573 battery->bat.dev = NULL; 585 battery->bat.dev = NULL;
574} 586 mutex_unlock(&battery->sysfs_lock);
575
576static void acpi_battery_quirks(struct acpi_battery *battery)
577{
578 if (dmi_name_in_vendors("Acer") && battery->power_unit) {
579 set_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags);
580 }
581} 587}
582 588
583/* 589/*
@@ -592,7 +598,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
592 * 598 *
593 * Handle this correctly so that they won't break userspace. 599 * Handle this correctly so that they won't break userspace.
594 */ 600 */
595static void acpi_battery_quirks2(struct acpi_battery *battery) 601static void acpi_battery_quirks(struct acpi_battery *battery)
596{ 602{
597 if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) 603 if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
598 return ; 604 return ;
@@ -623,13 +629,15 @@ static int acpi_battery_update(struct acpi_battery *battery)
623 result = acpi_battery_get_info(battery); 629 result = acpi_battery_get_info(battery);
624 if (result) 630 if (result)
625 return result; 631 return result;
626 acpi_battery_quirks(battery);
627 acpi_battery_init_alarm(battery); 632 acpi_battery_init_alarm(battery);
628 } 633 }
629 if (!battery->bat.dev) 634 if (!battery->bat.dev) {
630 sysfs_add_battery(battery); 635 result = sysfs_add_battery(battery);
636 if (result)
637 return result;
638 }
631 result = acpi_battery_get_state(battery); 639 result = acpi_battery_get_state(battery);
632 acpi_battery_quirks2(battery); 640 acpi_battery_quirks(battery);
633 return result; 641 return result;
634} 642}
635 643
@@ -863,7 +871,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
863 }, \ 871 }, \
864 } 872 }
865 873
866static struct battery_file { 874static const struct battery_file {
867 struct file_operations ops; 875 struct file_operations ops;
868 mode_t mode; 876 mode_t mode;
869 const char *name; 877 const char *name;
@@ -948,9 +956,12 @@ static int battery_notify(struct notifier_block *nb,
948 struct acpi_battery *battery = container_of(nb, struct acpi_battery, 956 struct acpi_battery *battery = container_of(nb, struct acpi_battery,
949 pm_nb); 957 pm_nb);
950 switch (mode) { 958 switch (mode) {
959 case PM_POST_HIBERNATION:
951 case PM_POST_SUSPEND: 960 case PM_POST_SUSPEND:
952 sysfs_remove_battery(battery); 961 if (battery->bat.dev) {
953 sysfs_add_battery(battery); 962 sysfs_remove_battery(battery);
963 sysfs_add_battery(battery);
964 }
954 break; 965 break;
955 } 966 }
956 967
@@ -972,28 +983,38 @@ static int acpi_battery_add(struct acpi_device *device)
972 strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS); 983 strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
973 device->driver_data = battery; 984 device->driver_data = battery;
974 mutex_init(&battery->lock); 985 mutex_init(&battery->lock);
986 mutex_init(&battery->sysfs_lock);
975 if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle, 987 if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle,
976 "_BIX", &handle))) 988 "_BIX", &handle)))
977 set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); 989 set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
978 acpi_battery_update(battery); 990 result = acpi_battery_update(battery);
991 if (result)
992 goto fail;
979#ifdef CONFIG_ACPI_PROCFS_POWER 993#ifdef CONFIG_ACPI_PROCFS_POWER
980 result = acpi_battery_add_fs(device); 994 result = acpi_battery_add_fs(device);
981#endif 995#endif
982 if (!result) { 996 if (result) {
983 printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
984 ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
985 device->status.battery_present ? "present" : "absent");
986 } else {
987#ifdef CONFIG_ACPI_PROCFS_POWER 997#ifdef CONFIG_ACPI_PROCFS_POWER
988 acpi_battery_remove_fs(device); 998 acpi_battery_remove_fs(device);
989#endif 999#endif
990 kfree(battery); 1000 goto fail;
991 } 1001 }
992 1002
1003 printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
1004 ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
1005 device->status.battery_present ? "present" : "absent");
1006
993 battery->pm_nb.notifier_call = battery_notify; 1007 battery->pm_nb.notifier_call = battery_notify;
994 register_pm_notifier(&battery->pm_nb); 1008 register_pm_notifier(&battery->pm_nb);
995 1009
996 return result; 1010 return result;
1011
1012fail:
1013 sysfs_remove_battery(battery);
1014 mutex_destroy(&battery->lock);
1015 mutex_destroy(&battery->sysfs_lock);
1016 kfree(battery);
1017 return result;
997} 1018}
998 1019
999static int acpi_battery_remove(struct acpi_device *device, int type) 1020static int acpi_battery_remove(struct acpi_device *device, int type)
@@ -1009,6 +1030,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
1009#endif 1030#endif
1010 sysfs_remove_battery(battery); 1031 sysfs_remove_battery(battery);
1011 mutex_destroy(&battery->lock); 1032 mutex_destroy(&battery->lock);
1033 mutex_destroy(&battery->sysfs_lock);
1012 kfree(battery); 1034 kfree(battery);
1013 return 0; 1035 return 0;
1014} 1036}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d1e06c182cdb..437ddbf0c49a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -39,6 +39,7 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <acpi/acpi_bus.h> 40#include <acpi/acpi_bus.h>
41#include <acpi/acpi_drivers.h> 41#include <acpi/acpi_drivers.h>
42#include <acpi/apei.h>
42#include <linux/dmi.h> 43#include <linux/dmi.h>
43#include <linux/suspend.h> 44#include <linux/suspend.h>
44 45
@@ -519,6 +520,7 @@ out_kfree:
519} 520}
520EXPORT_SYMBOL(acpi_run_osc); 521EXPORT_SYMBOL(acpi_run_osc);
521 522
523bool osc_sb_apei_support_acked;
522static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48"; 524static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
523static void acpi_bus_osc_support(void) 525static void acpi_bus_osc_support(void)
524{ 526{
@@ -541,11 +543,19 @@ static void acpi_bus_osc_support(void)
541#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) 543#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
542 capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT; 544 capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
543#endif 545#endif
546
547 if (!ghes_disable)
548 capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT;
544 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) 549 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
545 return; 550 return;
546 if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) 551 if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
552 u32 *capbuf_ret = context.ret.pointer;
553 if (context.ret.length > OSC_SUPPORT_TYPE)
554 osc_sb_apei_support_acked =
555 capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT;
547 kfree(context.ret.pointer); 556 kfree(context.ret.pointer);
548 /* do we need to check the returned cap? Sounds no */ 557 }
558 /* do we need to check other returned cap? Sounds no */
549} 559}
550 560
551/* -------------------------------------------------------------------------- 561/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 1864ad3cf895..19a61136d848 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -77,7 +77,7 @@ struct dock_dependent_device {
77 struct list_head list; 77 struct list_head list;
78 struct list_head hotplug_list; 78 struct list_head hotplug_list;
79 acpi_handle handle; 79 acpi_handle handle;
80 struct acpi_dock_ops *ops; 80 const struct acpi_dock_ops *ops;
81 void *context; 81 void *context;
82}; 82};
83 83
@@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
589 * the dock driver after _DCK is executed. 589 * the dock driver after _DCK is executed.
590 */ 590 */
591int 591int
592register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, 592register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
593 void *context) 593 void *context)
594{ 594{
595 struct dock_dependent_device *dd; 595 struct dock_dependent_device *dd;
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 05b44201a614..22f918bacd35 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -92,7 +92,7 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
92 return count; 92 return count;
93} 93}
94 94
95static struct file_operations acpi_ec_io_ops = { 95static const struct file_operations acpi_ec_io_ops = {
96 .owner = THIS_MODULE, 96 .owner = THIS_MODULE,
97 .open = acpi_ec_open_io, 97 .open = acpi_ec_open_io,
98 .read = acpi_ec_read_io, 98 .read = acpi_ec_read_io,
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 467479f07c1f..0f0356ca1a9e 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -110,7 +110,7 @@ fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
110 return result; 110 return result;
111} 111}
112 112
113static struct thermal_cooling_device_ops fan_cooling_ops = { 113static const struct thermal_cooling_device_ops fan_cooling_ops = {
114 .get_max_state = fan_get_max_state, 114 .get_max_state = fan_get_max_state,
115 .get_cur_state = fan_get_cur_state, 115 .get_cur_state = fan_get_cur_state,
116 .set_cur_state = fan_set_cur_state, 116 .set_cur_state = fan_set_cur_state,
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 372f9b70f7f4..fa32f584229f 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -155,7 +155,7 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
155{ 155{
156 if (!strcmp("Linux", interface)) { 156 if (!strcmp("Linux", interface)) {
157 157
158 printk(KERN_NOTICE FW_BUG PREFIX 158 printk_once(KERN_NOTICE FW_BUG PREFIX
159 "BIOS _OSI(Linux) query %s%s\n", 159 "BIOS _OSI(Linux) query %s%s\n",
160 osi_linux.enable ? "honored" : "ignored", 160 osi_linux.enable ? "honored" : "ignored",
161 osi_linux.cmdline ? " via cmdline" : 161 osi_linux.cmdline ? " via cmdline" :
@@ -237,8 +237,23 @@ void acpi_os_vprintf(const char *fmt, va_list args)
237#endif 237#endif
238} 238}
239 239
240#ifdef CONFIG_KEXEC
241static unsigned long acpi_rsdp;
242static int __init setup_acpi_rsdp(char *arg)
243{
244 acpi_rsdp = simple_strtoul(arg, NULL, 16);
245 return 0;
246}
247early_param("acpi_rsdp", setup_acpi_rsdp);
248#endif
249
240acpi_physical_address __init acpi_os_get_root_pointer(void) 250acpi_physical_address __init acpi_os_get_root_pointer(void)
241{ 251{
252#ifdef CONFIG_KEXEC
253 if (acpi_rsdp)
254 return acpi_rsdp;
255#endif
256
242 if (efi_enabled) { 257 if (efi_enabled) {
243 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 258 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
244 return efi.acpi20; 259 return efi.acpi20;
@@ -1083,7 +1098,13 @@ struct osi_setup_entry {
1083 bool enable; 1098 bool enable;
1084}; 1099};
1085 1100
1086static struct osi_setup_entry __initdata osi_setup_entries[OSI_STRING_ENTRIES_MAX]; 1101static struct osi_setup_entry __initdata
1102 osi_setup_entries[OSI_STRING_ENTRIES_MAX] = {
1103 {"Module Device", true},
1104 {"Processor Device", true},
1105 {"3.0 _SCP Extensions", true},
1106 {"Processor Aggregator Device", true},
1107};
1087 1108
1088void __init acpi_osi_setup(char *str) 1109void __init acpi_osi_setup(char *str)
1089{ 1110{
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index f907cfbfa13c..7f9eba9a0b02 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -303,6 +303,61 @@ void acpi_pci_irq_del_prt(struct pci_bus *bus)
303/* -------------------------------------------------------------------------- 303/* --------------------------------------------------------------------------
304 PCI Interrupt Routing Support 304 PCI Interrupt Routing Support
305 -------------------------------------------------------------------------- */ 305 -------------------------------------------------------------------------- */
306#ifdef CONFIG_X86_IO_APIC
307extern int noioapicquirk;
308extern int noioapicreroute;
309
310static int bridge_has_boot_interrupt_variant(struct pci_bus *bus)
311{
312 struct pci_bus *bus_it;
313
314 for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) {
315 if (!bus_it->self)
316 return 0;
317 if (bus_it->self->irq_reroute_variant)
318 return bus_it->self->irq_reroute_variant;
319 }
320 return 0;
321}
322
323/*
324 * Some chipsets (e.g. Intel 6700PXH) generate a legacy INTx when the IRQ
325 * entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel does
326 * during interrupt handling). When this INTx generation cannot be disabled,
327 * we reroute these interrupts to their legacy equivalent to get rid of
328 * spurious interrupts.
329 */
330static int acpi_reroute_boot_interrupt(struct pci_dev *dev,
331 struct acpi_prt_entry *entry)
332{
333 if (noioapicquirk || noioapicreroute) {
334 return 0;
335 } else {
336 switch (bridge_has_boot_interrupt_variant(dev->bus)) {
337 case 0:
338 /* no rerouting necessary */
339 return 0;
340 case INTEL_IRQ_REROUTE_VARIANT:
341 /*
342 * Remap according to INTx routing table in 6700PXH
343 * specs, intel order number 302628-002, section
344 * 2.15.2. Other chipsets (80332, ...) have the same
345 * mapping and are handled here as well.
346 */
347 dev_info(&dev->dev, "PCI IRQ %d -> rerouted to legacy "
348 "IRQ %d\n", entry->index,
349 (entry->index % 4) + 16);
350 entry->index = (entry->index % 4) + 16;
351 return 1;
352 default:
353 dev_warn(&dev->dev, "Cannot reroute IRQ %d to legacy "
354 "IRQ: unknown mapping\n", entry->index);
355 return -1;
356 }
357 }
358}
359#endif /* CONFIG_X86_IO_APIC */
360
306static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) 361static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
307{ 362{
308 struct acpi_prt_entry *entry; 363 struct acpi_prt_entry *entry;
@@ -311,6 +366,9 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
311 366
312 entry = acpi_pci_irq_find_prt_entry(dev, pin); 367 entry = acpi_pci_irq_find_prt_entry(dev, pin);
313 if (entry) { 368 if (entry) {
369#ifdef CONFIG_X86_IO_APIC
370 acpi_reroute_boot_interrupt(dev, entry);
371#endif /* CONFIG_X86_IO_APIC */
314 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n", 372 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n",
315 pci_name(dev), pin_name(pin))); 373 pci_name(dev), pin_name(pin)));
316 return entry; 374 return entry;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d06078d660ad..2672c798272f 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -485,7 +485,8 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
485 root->secondary.end = 0xFF; 485 root->secondary.end = 0xFF;
486 printk(KERN_WARNING FW_BUG PREFIX 486 printk(KERN_WARNING FW_BUG PREFIX
487 "no secondary bus range in _CRS\n"); 487 "no secondary bus range in _CRS\n");
488 status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); 488 status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN,
489 NULL, &bus);
489 if (ACPI_SUCCESS(status)) 490 if (ACPI_SUCCESS(status))
490 root->secondary.start = bus; 491 root->secondary.start = bus;
491 else if (status == AE_NOT_FOUND) 492 else if (status == AE_NOT_FOUND)
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 79cb65332894..870550d6a4bf 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -244,7 +244,7 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
244 return result; 244 return result;
245} 245}
246 246
247struct thermal_cooling_device_ops processor_cooling_ops = { 247const struct thermal_cooling_device_ops processor_cooling_ops = {
248 .get_max_state = processor_get_max_state, 248 .get_max_state = processor_get_max_state,
249 .get_cur_state = processor_get_cur_state, 249 .get_cur_state = processor_get_cur_state,
250 .set_cur_state = processor_set_cur_state, 250 .set_cur_state = processor_set_cur_state,
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 50658ff887d9..6e36d0c0057c 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -130,6 +130,9 @@ struct acpi_sbs {
130 130
131#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger) 131#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger)
132 132
133static int acpi_sbs_remove(struct acpi_device *device, int type);
134static int acpi_battery_get_state(struct acpi_battery *battery);
135
133static inline int battery_scale(int log) 136static inline int battery_scale(int log)
134{ 137{
135 int scale = 1; 138 int scale = 1;
@@ -195,6 +198,8 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy,
195 198
196 if ((!battery->present) && psp != POWER_SUPPLY_PROP_PRESENT) 199 if ((!battery->present) && psp != POWER_SUPPLY_PROP_PRESENT)
197 return -ENODEV; 200 return -ENODEV;
201
202 acpi_battery_get_state(battery);
198 switch (psp) { 203 switch (psp) {
199 case POWER_SUPPLY_PROP_STATUS: 204 case POWER_SUPPLY_PROP_STATUS:
200 if (battery->rate_now < 0) 205 if (battery->rate_now < 0)
@@ -225,11 +230,17 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy,
225 case POWER_SUPPLY_PROP_POWER_NOW: 230 case POWER_SUPPLY_PROP_POWER_NOW:
226 val->intval = abs(battery->rate_now) * 231 val->intval = abs(battery->rate_now) *
227 acpi_battery_ipscale(battery) * 1000; 232 acpi_battery_ipscale(battery) * 1000;
233 val->intval *= (acpi_battery_mode(battery)) ?
234 (battery->voltage_now *
235 acpi_battery_vscale(battery) / 1000) : 1;
228 break; 236 break;
229 case POWER_SUPPLY_PROP_CURRENT_AVG: 237 case POWER_SUPPLY_PROP_CURRENT_AVG:
230 case POWER_SUPPLY_PROP_POWER_AVG: 238 case POWER_SUPPLY_PROP_POWER_AVG:
231 val->intval = abs(battery->rate_avg) * 239 val->intval = abs(battery->rate_avg) *
232 acpi_battery_ipscale(battery) * 1000; 240 acpi_battery_ipscale(battery) * 1000;
241 val->intval *= (acpi_battery_mode(battery)) ?
242 (battery->voltage_now *
243 acpi_battery_vscale(battery) / 1000) : 1;
233 break; 244 break;
234 case POWER_SUPPLY_PROP_CAPACITY: 245 case POWER_SUPPLY_PROP_CAPACITY:
235 val->intval = battery->state_of_charge; 246 val->intval = battery->state_of_charge;
@@ -903,8 +914,6 @@ static void acpi_sbs_callback(void *context)
903 } 914 }
904} 915}
905 916
906static int acpi_sbs_remove(struct acpi_device *device, int type);
907
908static int acpi_sbs_add(struct acpi_device *device) 917static int acpi_sbs_add(struct acpi_device *device)
909{ 918{
910 struct acpi_sbs *sbs; 919 struct acpi_sbs *sbs;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6c949602cbd1..3ed80b2ca907 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -428,6 +428,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
428 DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), 428 DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
429 }, 429 },
430 }, 430 },
431 {
432 .callback = init_old_suspend_ordering,
433 .ident = "Asus A8N-SLI DELUXE",
434 .matches = {
435 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
436 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
437 },
438 },
439 {
440 .callback = init_old_suspend_ordering,
441 .ident = "Asus A8N-SLI Premium",
442 .matches = {
443 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
444 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
445 },
446 },
431 {}, 447 {},
432}; 448};
433#endif /* CONFIG_SUSPEND */ 449#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 77255f250dbb..c538d0ef10ff 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -149,12 +149,12 @@ static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
149 return result; 149 return result;
150} 150}
151 151
152static struct kernel_param_ops param_ops_debug_layer = { 152static const struct kernel_param_ops param_ops_debug_layer = {
153 .set = param_set_uint, 153 .set = param_set_uint,
154 .get = param_get_debug_layer, 154 .get = param_get_debug_layer,
155}; 155};
156 156
157static struct kernel_param_ops param_ops_debug_level = { 157static const struct kernel_param_ops param_ops_debug_level = {
158 .set = param_set_uint, 158 .set = param_set_uint,
159 .get = param_get_debug_level, 159 .get = param_get_debug_level,
160}; 160};
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 2607e17b520f..48fbc647b178 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -812,7 +812,7 @@ acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
812 thermal_zone_unbind_cooling_device); 812 thermal_zone_unbind_cooling_device);
813} 813}
814 814
815static struct thermal_zone_device_ops acpi_thermal_zone_ops = { 815static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
816 .bind = acpi_thermal_bind_cooling_device, 816 .bind = acpi_thermal_bind_cooling_device,
817 .unbind = acpi_thermal_unbind_cooling_device, 817 .unbind = acpi_thermal_unbind_cooling_device,
818 .get_temp = thermal_get_temp, 818 .get_temp = thermal_get_temp,
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index ada4b4d9bdc8..08a44b532f7c 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -307,7 +307,7 @@ video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long st
307 return acpi_video_device_lcd_set_level(video, level); 307 return acpi_video_device_lcd_set_level(video, level);
308} 308}
309 309
310static struct thermal_cooling_device_ops video_cooling_ops = { 310static const struct thermal_cooling_device_ops video_cooling_ops = {
311 .get_max_state = video_get_max_state, 311 .get_max_state = video_get_max_state,
312 .get_cur_state = video_get_cur_state, 312 .get_cur_state = video_get_cur_state,
313 .set_cur_state = video_set_cur_state, 313 .set_cur_state = video_set_cur_state,
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index ca3e6be44a04..5987e0ba8c2d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -468,6 +468,15 @@ config PATA_ICSIDE
468 interface card. This is not required for ICS partition support. 468 interface card. This is not required for ICS partition support.
469 If you are unsure, say N to this. 469 If you are unsure, say N to this.
470 470
471config PATA_IMX
472 tristate "PATA support for Freescale iMX"
473 depends on ARCH_MXC
474 help
475 This option enables support for the PATA host available on Freescale
476 iMX SoCs.
477
478 If unsure, say N.
479
471config PATA_IT8213 480config PATA_IT8213
472 tristate "IT8213 PATA support (Experimental)" 481 tristate "IT8213 PATA support (Experimental)"
473 depends on PCI && EXPERIMENTAL 482 depends on PCI && EXPERIMENTAL
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8ac64e1aa051..9550d691fd19 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
48obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o 48obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
49obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o 49obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
50obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o 50obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
51obj-$(CONFIG_PATA_IMX) += pata_imx.o
51obj-$(CONFIG_PATA_IT8213) += pata_it8213.o 52obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
52obj-$(CONFIG_PATA_IT821X) += pata_it821x.o 53obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
53obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o 54obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index e0a5b555cee1..bb7c5f1085cc 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -218,12 +218,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
218 ata_acpi_uevent(dev->link->ap, dev, event); 218 ata_acpi_uevent(dev->link->ap, dev, event);
219} 219}
220 220
221static struct acpi_dock_ops ata_acpi_dev_dock_ops = { 221static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
222 .handler = ata_acpi_dev_notify_dock, 222 .handler = ata_acpi_dev_notify_dock,
223 .uevent = ata_acpi_dev_uevent, 223 .uevent = ata_acpi_dev_uevent,
224}; 224};
225 225
226static struct acpi_dock_ops ata_acpi_ap_dock_ops = { 226static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
227 .handler = ata_acpi_ap_notify_dock, 227 .handler = ata_acpi_ap_notify_dock,
228 .uevent = ata_acpi_ap_uevent, 228 .uevent = ata_acpi_ap_uevent,
229}; 229};
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
new file mode 100644
index 000000000000..ca9d9caedfa3
--- /dev/null
+++ b/drivers/ata/pata_imx.c
@@ -0,0 +1,253 @@
1/*
2 * Freescale iMX PATA driver
3 *
4 * Copyright (C) 2011 Arnaud Patard <arnaud.patard@rtp-net.org>
5 *
6 * Based on pata_platform - Copyright (C) 2006 - 2007 Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * TODO:
13 * - dmaengine support
14 * - check if timing stuff needed
15 */
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/blkdev.h>
20#include <scsi/scsi_host.h>
21#include <linux/ata.h>
22#include <linux/libata.h>
23#include <linux/platform_device.h>
24#include <linux/clk.h>
25
26#define DRV_NAME "pata_imx"
27
28#define PATA_IMX_ATA_CONTROL 0x24
29#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7)
30#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6)
31#define PATA_IMX_ATA_CTRL_IORDY_EN (1<<0)
32#define PATA_IMX_ATA_INT_EN 0x2C
33#define PATA_IMX_ATA_INTR_ATA_INTRQ2 (1<<3)
34#define PATA_IMX_DRIVE_DATA 0xA0
35#define PATA_IMX_DRIVE_CONTROL 0xD8
36
37struct pata_imx_priv {
38 struct clk *clk;
39 /* timings/interrupt/control regs */
40 u8 *host_regs;
41 u32 ata_ctl;
42};
43
44static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
45{
46 struct ata_device *dev;
47 struct ata_port *ap = link->ap;
48 struct pata_imx_priv *priv = ap->host->private_data;
49 u32 val;
50
51 ata_for_each_dev(dev, link, ENABLED) {
52 dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
53 dev->xfer_shift = ATA_SHIFT_PIO;
54 dev->flags |= ATA_DFLAG_PIO;
55
56 val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
57 if (ata_pio_need_iordy(dev))
58 val |= PATA_IMX_ATA_CTRL_IORDY_EN;
59 else
60 val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
61 __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
62
63 ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
64 }
65 return 0;
66}
67
68static struct scsi_host_template pata_imx_sht = {
69 ATA_PIO_SHT(DRV_NAME),
70};
71
72static struct ata_port_operations pata_imx_port_ops = {
73 .inherits = &ata_sff_port_ops,
74 .sff_data_xfer = ata_sff_data_xfer_noirq,
75 .cable_detect = ata_cable_unknown,
76 .set_mode = pata_imx_set_mode,
77};
78
79static void pata_imx_setup_port(struct ata_ioports *ioaddr)
80{
81 /* Fixup the port shift for platforms that need it */
82 ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2);
83 ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2);
84 ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
85 ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
86 ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
87 ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
88 ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
89 ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
90 ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
91 ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
92}
93
94static int __devinit pata_imx_probe(struct platform_device *pdev)
95{
96 struct ata_host *host;
97 struct ata_port *ap;
98 struct pata_imx_priv *priv;
99 int irq = 0;
100 struct resource *io_res;
101
102 io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
103 if (io_res == NULL)
104 return -EINVAL;
105
106 irq = platform_get_irq(pdev, 0);
107 if (irq <= 0)
108 return -EINVAL;
109
110 priv = devm_kzalloc(&pdev->dev,
111 sizeof(struct pata_imx_priv), GFP_KERNEL);
112 if (!priv)
113 return -ENOMEM;
114
115 priv->clk = clk_get(&pdev->dev, NULL);
116 if (IS_ERR(priv->clk)) {
117 dev_err(&pdev->dev, "Failed to get clock\n");
118 return PTR_ERR(priv->clk);
119 }
120
121 clk_enable(priv->clk);
122
123 host = ata_host_alloc(&pdev->dev, 1);
124 if (!host)
125 goto free_priv;
126
127 host->private_data = priv;
128 ap = host->ports[0];
129
130 ap->ops = &pata_imx_port_ops;
131 ap->pio_mask = ATA_PIO0;
132 ap->flags |= ATA_FLAG_SLAVE_POSS;
133
134 priv->host_regs = devm_ioremap(&pdev->dev, io_res->start,
135 resource_size(io_res));
136 if (!priv->host_regs) {
137 dev_err(&pdev->dev, "failed to map IO/CTL base\n");
138 goto free_priv;
139 }
140
141 ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA;
142 ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL;
143
144 ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
145
146 pata_imx_setup_port(&ap->ioaddr);
147
148 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
149 (unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA,
150 (unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL);
151
152 /* deassert resets */
153 __raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B |
154 PATA_IMX_ATA_CTRL_ATA_RST_B,
155 priv->host_regs + PATA_IMX_ATA_CONTROL);
156 /* enable interrupts */
157 __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
158 priv->host_regs + PATA_IMX_ATA_INT_EN);
159
160 /* activate */
161 return ata_host_activate(host, irq, ata_sff_interrupt, 0,
162 &pata_imx_sht);
163
164free_priv:
165 clk_disable(priv->clk);
166 clk_put(priv->clk);
167 return -ENOMEM;
168}
169
170static int __devexit pata_imx_remove(struct platform_device *pdev)
171{
172 struct ata_host *host = dev_get_drvdata(&pdev->dev);
173 struct pata_imx_priv *priv = host->private_data;
174
175 ata_host_detach(host);
176
177 __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
178
179 clk_disable(priv->clk);
180 clk_put(priv->clk);
181
182 return 0;
183}
184
185#ifdef CONFIG_PM
186static int pata_imx_suspend(struct device *dev)
187{
188 struct ata_host *host = dev_get_drvdata(dev);
189 struct pata_imx_priv *priv = host->private_data;
190 int ret;
191
192 ret = ata_host_suspend(host, PMSG_SUSPEND);
193 if (!ret) {
194 __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
195 priv->ata_ctl =
196 __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
197 clk_disable(priv->clk);
198 }
199
200 return ret;
201}
202
203static int pata_imx_resume(struct device *dev)
204{
205 struct ata_host *host = dev_get_drvdata(dev);
206 struct pata_imx_priv *priv = host->private_data;
207
208 clk_enable(priv->clk);
209
210 __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
211
212 __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
213 priv->host_regs + PATA_IMX_ATA_INT_EN);
214
215 ata_host_resume(host);
216
217 return 0;
218}
219
220static const struct dev_pm_ops pata_imx_pm_ops = {
221 .suspend = pata_imx_suspend,
222 .resume = pata_imx_resume,
223};
224#endif
225
226static struct platform_driver pata_imx_driver = {
227 .probe = pata_imx_probe,
228 .remove = __devexit_p(pata_imx_remove),
229 .driver = {
230 .name = DRV_NAME,
231 .owner = THIS_MODULE,
232#ifdef CONFIG_PM
233 .pm = &pata_imx_pm_ops,
234#endif
235 },
236};
237
238static int __init pata_imx_init(void)
239{
240 return platform_driver_register(&pata_imx_driver);
241}
242
243static void __exit pata_imx_exit(void)
244{
245 platform_driver_unregister(&pata_imx_driver);
246}
247module_init(pata_imx_init);
248module_exit(pata_imx_exit);
249
250MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
251MODULE_DESCRIPTION("low-level driver for iMX PATA");
252MODULE_LICENSE("GPL");
253MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 65e4be6be220..8e9f5048a10a 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -124,6 +124,17 @@ static const struct via_isa_bridge {
124 { NULL } 124 { NULL }
125}; 125};
126 126
127static const struct dmi_system_id no_atapi_dma_dmi_table[] = {
128 {
129 .ident = "AVERATEC 3200",
130 .matches = {
131 DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"),
132 DMI_MATCH(DMI_BOARD_NAME, "3200"),
133 },
134 },
135 { }
136};
137
127struct via_port { 138struct via_port {
128 u8 cached_device; 139 u8 cached_device;
129}; 140};
@@ -355,6 +366,13 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
355 mask &= ~ ATA_MASK_UDMA; 366 mask &= ~ ATA_MASK_UDMA;
356 } 367 }
357 } 368 }
369
370 if (dev->class == ATA_DEV_ATAPI &&
371 dmi_check_system(no_atapi_dma_dmi_table)) {
372 ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
373 mask &= ATA_MASK_PIO;
374 }
375
358 return mask; 376 return mask;
359} 377}
360 378
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 0a9a774a7e1e..5c4237452f50 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1329,7 +1329,7 @@ static int sata_dwc_port_start(struct ata_port *ap)
1329 dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", 1329 dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
1330 __func__); 1330 __func__);
1331 err = -ENOMEM; 1331 err = -ENOMEM;
1332 goto CLEANUP; 1332 goto CLEANUP_ALLOC;
1333 } 1333 }
1334 } 1334 }
1335 1335
@@ -1349,15 +1349,13 @@ static int sata_dwc_port_start(struct ata_port *ap)
1349 /* Clear any error bits before libata starts issuing commands */ 1349 /* Clear any error bits before libata starts issuing commands */
1350 clear_serror(); 1350 clear_serror();
1351 ap->private_data = hsdevp; 1351 ap->private_data = hsdevp;
1352 dev_dbg(ap->dev, "%s: done\n", __func__);
1353 return 0;
1352 1354
1355CLEANUP_ALLOC:
1356 kfree(hsdevp);
1353CLEANUP: 1357CLEANUP:
1354 if (err) { 1358 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
1355 sata_dwc_port_stop(ap);
1356 dev_dbg(ap->dev, "%s: fail\n", __func__);
1357 } else {
1358 dev_dbg(ap->dev, "%s: done\n", __func__);
1359 }
1360
1361 return err; 1359 return err;
1362} 1360}
1363 1361
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 98c1d780f552..9dfb40b8c2c9 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -438,7 +438,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
438 u8 status; 438 u8 status;
439 439
440 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { 440 if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
441 u32 serror; 441 u32 serror = 0xffffffff;
442 442
443 /* SIEN doesn't mask SATA IRQs on some 3112s. Those 443 /* SIEN doesn't mask SATA IRQs on some 3112s. Those
444 * controllers continue to assert IRQ as long as 444 * controllers continue to assert IRQ as long as
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index cf7a0c788052..65cd74832450 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -397,6 +397,7 @@ static int remove_nodes(struct device *dev,
397 397
398static int release_nodes(struct device *dev, struct list_head *first, 398static int release_nodes(struct device *dev, struct list_head *first,
399 struct list_head *end, unsigned long flags) 399 struct list_head *end, unsigned long flags)
400 __releases(&dev->devres_lock)
400{ 401{
401 LIST_HEAD(todo); 402 LIST_HEAD(todo);
402 int cnt; 403 int cnt;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index b89fffc1d777..a4760e095ff5 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -166,7 +166,7 @@ static int create_path(const char *nodepath)
166{ 166{
167 char *path; 167 char *path;
168 char *s; 168 char *s;
169 int err; 169 int err = 0;
170 170
171 /* parent directories do not exist, create them */ 171 /* parent directories do not exist, create them */
172 path = kstrdup(nodepath, GFP_KERNEL); 172 path = kstrdup(nodepath, GFP_KERNEL);
@@ -376,7 +376,7 @@ int devtmpfs_mount(const char *mntdir)
376 return err; 376 return err;
377} 377}
378 378
379static __initdata DECLARE_COMPLETION(setup_done); 379static DECLARE_COMPLETION(setup_done);
380 380
381static int handle(const char *name, mode_t mode, struct device *dev) 381static int handle(const char *name, mode_t mode, struct device *dev)
382{ 382{
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index bbb03e6f7255..06ed6b4e7df5 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -521,11 +521,6 @@ static int _request_firmware(const struct firmware **firmware_p,
521 if (!firmware_p) 521 if (!firmware_p)
522 return -EINVAL; 522 return -EINVAL;
523 523
524 if (WARN_ON(usermodehelper_is_disabled())) {
525 dev_err(device, "firmware: %s will not be loaded\n", name);
526 return -EBUSY;
527 }
528
529 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); 524 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
530 if (!firmware) { 525 if (!firmware) {
531 dev_err(device, "%s: kmalloc(struct firmware) failed\n", 526 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
@@ -539,6 +534,12 @@ static int _request_firmware(const struct firmware **firmware_p,
539 return 0; 534 return 0;
540 } 535 }
541 536
537 if (WARN_ON(usermodehelper_is_disabled())) {
538 dev_err(device, "firmware: %s will not be loaded\n", name);
539 retval = -EBUSY;
540 goto out;
541 }
542
542 if (uevent) 543 if (uevent)
543 dev_dbg(device, "firmware: requesting %s\n", name); 544 dev_dbg(device, "firmware: requesting %s\n", name);
544 545
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 0cad9c7f6bb5..99a5272d7c2f 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
33 33
34/** 34/**
35 * arch_setup_pdev_archdata - Allow manipulation of archdata before its used 35 * arch_setup_pdev_archdata - Allow manipulation of archdata before its used
36 * @dev: platform device 36 * @pdev: platform device
37 * 37 *
38 * This is called before platform_device_add() such that any pdev_archdata may 38 * This is called before platform_device_add() such that any pdev_archdata may
39 * be setup before the platform_notifier is called. So if a user needs to 39 * be setup before the platform_notifier is called. So if a user needs to
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index a846b2f95cfb..2c18d584066d 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -19,7 +19,7 @@
19 19
20struct pm_clk_data { 20struct pm_clk_data {
21 struct list_head clock_list; 21 struct list_head clock_list;
22 struct mutex lock; 22 spinlock_t lock;
23}; 23};
24 24
25enum pce_status { 25enum pce_status {
@@ -73,9 +73,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
73 } 73 }
74 } 74 }
75 75
76 mutex_lock(&pcd->lock); 76 spin_lock_irq(&pcd->lock);
77 list_add_tail(&ce->node, &pcd->clock_list); 77 list_add_tail(&ce->node, &pcd->clock_list);
78 mutex_unlock(&pcd->lock); 78 spin_unlock_irq(&pcd->lock);
79 return 0; 79 return 0;
80} 80}
81 81
@@ -83,8 +83,8 @@ int pm_clk_add(struct device *dev, const char *con_id)
83 * __pm_clk_remove - Destroy PM clock entry. 83 * __pm_clk_remove - Destroy PM clock entry.
84 * @ce: PM clock entry to destroy. 84 * @ce: PM clock entry to destroy.
85 * 85 *
86 * This routine must be called under the mutex protecting the PM list of clocks 86 * This routine must be called under the spinlock protecting the PM list of
87 * corresponding the the @ce's device. 87 * clocks corresponding the the @ce's device.
88 */ 88 */
89static void __pm_clk_remove(struct pm_clock_entry *ce) 89static void __pm_clk_remove(struct pm_clock_entry *ce)
90{ 90{
@@ -123,7 +123,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
123 if (!pcd) 123 if (!pcd)
124 return; 124 return;
125 125
126 mutex_lock(&pcd->lock); 126 spin_lock_irq(&pcd->lock);
127 127
128 list_for_each_entry(ce, &pcd->clock_list, node) { 128 list_for_each_entry(ce, &pcd->clock_list, node) {
129 if (!con_id && !ce->con_id) { 129 if (!con_id && !ce->con_id) {
@@ -137,7 +137,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
137 } 137 }
138 } 138 }
139 139
140 mutex_unlock(&pcd->lock); 140 spin_unlock_irq(&pcd->lock);
141} 141}
142 142
143/** 143/**
@@ -158,7 +158,7 @@ int pm_clk_init(struct device *dev)
158 } 158 }
159 159
160 INIT_LIST_HEAD(&pcd->clock_list); 160 INIT_LIST_HEAD(&pcd->clock_list);
161 mutex_init(&pcd->lock); 161 spin_lock_init(&pcd->lock);
162 dev->power.subsys_data = pcd; 162 dev->power.subsys_data = pcd;
163 return 0; 163 return 0;
164} 164}
@@ -181,12 +181,12 @@ void pm_clk_destroy(struct device *dev)
181 181
182 dev->power.subsys_data = NULL; 182 dev->power.subsys_data = NULL;
183 183
184 mutex_lock(&pcd->lock); 184 spin_lock_irq(&pcd->lock);
185 185
186 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) 186 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
187 __pm_clk_remove(ce); 187 __pm_clk_remove(ce);
188 188
189 mutex_unlock(&pcd->lock); 189 spin_unlock_irq(&pcd->lock);
190 190
191 kfree(pcd); 191 kfree(pcd);
192} 192}
@@ -220,13 +220,14 @@ int pm_clk_suspend(struct device *dev)
220{ 220{
221 struct pm_clk_data *pcd = __to_pcd(dev); 221 struct pm_clk_data *pcd = __to_pcd(dev);
222 struct pm_clock_entry *ce; 222 struct pm_clock_entry *ce;
223 unsigned long flags;
223 224
224 dev_dbg(dev, "%s()\n", __func__); 225 dev_dbg(dev, "%s()\n", __func__);
225 226
226 if (!pcd) 227 if (!pcd)
227 return 0; 228 return 0;
228 229
229 mutex_lock(&pcd->lock); 230 spin_lock_irqsave(&pcd->lock, flags);
230 231
231 list_for_each_entry_reverse(ce, &pcd->clock_list, node) { 232 list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
232 if (ce->status == PCE_STATUS_NONE) 233 if (ce->status == PCE_STATUS_NONE)
@@ -238,7 +239,7 @@ int pm_clk_suspend(struct device *dev)
238 } 239 }
239 } 240 }
240 241
241 mutex_unlock(&pcd->lock); 242 spin_unlock_irqrestore(&pcd->lock, flags);
242 243
243 return 0; 244 return 0;
244} 245}
@@ -251,13 +252,14 @@ int pm_clk_resume(struct device *dev)
251{ 252{
252 struct pm_clk_data *pcd = __to_pcd(dev); 253 struct pm_clk_data *pcd = __to_pcd(dev);
253 struct pm_clock_entry *ce; 254 struct pm_clock_entry *ce;
255 unsigned long flags;
254 256
255 dev_dbg(dev, "%s()\n", __func__); 257 dev_dbg(dev, "%s()\n", __func__);
256 258
257 if (!pcd) 259 if (!pcd)
258 return 0; 260 return 0;
259 261
260 mutex_lock(&pcd->lock); 262 spin_lock_irqsave(&pcd->lock, flags);
261 263
262 list_for_each_entry(ce, &pcd->clock_list, node) { 264 list_for_each_entry(ce, &pcd->clock_list, node) {
263 if (ce->status == PCE_STATUS_NONE) 265 if (ce->status == PCE_STATUS_NONE)
@@ -269,7 +271,7 @@ int pm_clk_resume(struct device *dev)
269 } 271 }
270 } 272 }
271 273
272 mutex_unlock(&pcd->lock); 274 spin_unlock_irqrestore(&pcd->lock, flags);
273 275
274 return 0; 276 return 0;
275} 277}
@@ -344,6 +346,7 @@ int pm_clk_suspend(struct device *dev)
344{ 346{
345 struct pm_clk_data *pcd = __to_pcd(dev); 347 struct pm_clk_data *pcd = __to_pcd(dev);
346 struct pm_clock_entry *ce; 348 struct pm_clock_entry *ce;
349 unsigned long flags;
347 350
348 dev_dbg(dev, "%s()\n", __func__); 351 dev_dbg(dev, "%s()\n", __func__);
349 352
@@ -351,12 +354,12 @@ int pm_clk_suspend(struct device *dev)
351 if (!pcd || !dev->driver) 354 if (!pcd || !dev->driver)
352 return 0; 355 return 0;
353 356
354 mutex_lock(&pcd->lock); 357 spin_lock_irqsave(&pcd->lock, flags);
355 358
356 list_for_each_entry_reverse(ce, &pcd->clock_list, node) 359 list_for_each_entry_reverse(ce, &pcd->clock_list, node)
357 clk_disable(ce->clk); 360 clk_disable(ce->clk);
358 361
359 mutex_unlock(&pcd->lock); 362 spin_unlock_irqrestore(&pcd->lock, flags);
360 363
361 return 0; 364 return 0;
362} 365}
@@ -369,6 +372,7 @@ int pm_clk_resume(struct device *dev)
369{ 372{
370 struct pm_clk_data *pcd = __to_pcd(dev); 373 struct pm_clk_data *pcd = __to_pcd(dev);
371 struct pm_clock_entry *ce; 374 struct pm_clock_entry *ce;
375 unsigned long flags;
372 376
373 dev_dbg(dev, "%s()\n", __func__); 377 dev_dbg(dev, "%s()\n", __func__);
374 378
@@ -376,12 +380,12 @@ int pm_clk_resume(struct device *dev)
376 if (!pcd || !dev->driver) 380 if (!pcd || !dev->driver)
377 return 0; 381 return 0;
378 382
379 mutex_lock(&pcd->lock); 383 spin_lock_irqsave(&pcd->lock, flags);
380 384
381 list_for_each_entry(ce, &pcd->clock_list, node) 385 list_for_each_entry(ce, &pcd->clock_list, node)
382 clk_enable(ce->clk); 386 clk_enable(ce->clk);
383 387
384 mutex_unlock(&pcd->lock); 388 spin_unlock_irqrestore(&pcd->lock, flags);
385 389
386 return 0; 390 return 0;
387} 391}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index be8714aa9dd6..1c374579407c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -80,7 +80,6 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
80int pm_genpd_poweron(struct generic_pm_domain *genpd) 80int pm_genpd_poweron(struct generic_pm_domain *genpd)
81{ 81{
82 struct generic_pm_domain *parent = genpd->parent; 82 struct generic_pm_domain *parent = genpd->parent;
83 DEFINE_WAIT(wait);
84 int ret = 0; 83 int ret = 0;
85 84
86 start: 85 start:
@@ -112,7 +111,7 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
112 } 111 }
113 112
114 if (genpd->power_on) { 113 if (genpd->power_on) {
115 int ret = genpd->power_on(genpd); 114 ret = genpd->power_on(genpd);
116 if (ret) 115 if (ret)
117 goto out; 116 goto out;
118 } 117 }
@@ -461,6 +460,21 @@ static int pm_genpd_runtime_resume(struct device *dev)
461 return 0; 460 return 0;
462} 461}
463 462
463/**
464 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
465 */
466void pm_genpd_poweroff_unused(void)
467{
468 struct generic_pm_domain *genpd;
469
470 mutex_lock(&gpd_list_lock);
471
472 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
473 genpd_queue_power_off_work(genpd);
474
475 mutex_unlock(&gpd_list_lock);
476}
477
464#else 478#else
465 479
466static inline void genpd_power_off_work_fn(struct work_struct *work) {} 480static inline void genpd_power_off_work_fn(struct work_struct *work) {}
@@ -1256,18 +1270,3 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1256 list_add(&genpd->gpd_list_node, &gpd_list); 1270 list_add(&genpd->gpd_list_node, &gpd_list);
1257 mutex_unlock(&gpd_list_lock); 1271 mutex_unlock(&gpd_list_lock);
1258} 1272}
1259
1260/**
1261 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
1262 */
1263void pm_genpd_poweroff_unused(void)
1264{
1265 struct generic_pm_domain *genpd;
1266
1267 mutex_lock(&gpd_list_lock);
1268
1269 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1270 genpd_queue_power_off_work(genpd);
1271
1272 mutex_unlock(&gpd_list_lock);
1273}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8dc247c974af..acb3f83b8079 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -226,11 +226,17 @@ static int rpm_idle(struct device *dev, int rpmflags)
226 callback = NULL; 226 callback = NULL;
227 227
228 if (callback) { 228 if (callback) {
229 spin_unlock_irq(&dev->power.lock); 229 if (dev->power.irq_safe)
230 spin_unlock(&dev->power.lock);
231 else
232 spin_unlock_irq(&dev->power.lock);
230 233
231 callback(dev); 234 callback(dev);
232 235
233 spin_lock_irq(&dev->power.lock); 236 if (dev->power.irq_safe)
237 spin_lock(&dev->power.lock);
238 else
239 spin_lock_irq(&dev->power.lock);
234 } 240 }
235 241
236 dev->power.idle_notification = false; 242 dev->power.idle_notification = false;
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index c2231ff06cbc..c4f7a45cd2c3 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -113,3 +113,4 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c,
113} 113}
114EXPORT_SYMBOL_GPL(regmap_init_i2c); 114EXPORT_SYMBOL_GPL(regmap_init_i2c);
115 115
116MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 4deba0621bc7..f8396945d6ed 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -13,6 +13,7 @@
13#include <linux/regmap.h> 13#include <linux/regmap.h>
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h>
16 17
17static int regmap_spi_write(struct device *dev, const void *data, size_t count) 18static int regmap_spi_write(struct device *dev, const void *data, size_t count)
18{ 19{
@@ -70,3 +71,5 @@ struct regmap *regmap_init_spi(struct spi_device *spi,
70 return regmap_init(&spi->dev, &regmap_spi, config); 71 return regmap_init(&spi->dev, &regmap_spi, config);
71} 72}
72EXPORT_SYMBOL_GPL(regmap_init_spi); 73EXPORT_SYMBOL_GPL(regmap_init_spi);
74
75MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index cf3565cae93d..20663f8dae45 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
168 map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL); 168 map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
169 if (map->work_buf == NULL) { 169 if (map->work_buf == NULL) {
170 ret = -ENOMEM; 170 ret = -ENOMEM;
171 goto err_bus; 171 goto err_map;
172 } 172 }
173 173
174 return map; 174 return map;
175 175
176err_bus:
177 module_put(map->bus->owner);
178err_map: 176err_map:
179 kfree(map); 177 kfree(map);
180err: 178err:
@@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
188void regmap_exit(struct regmap *map) 186void regmap_exit(struct regmap *map)
189{ 187{
190 kfree(map->work_buf); 188 kfree(map->work_buf);
191 module_put(map->bus->owner);
192 kfree(map); 189 kfree(map);
193} 190}
194EXPORT_SYMBOL_GPL(regmap_exit); 191EXPORT_SYMBOL_GPL(regmap_exit);
@@ -317,7 +314,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
317 u8[0] |= map->bus->read_flag_mask; 314 u8[0] |= map->bus->read_flag_mask;
318 315
319 ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes, 316 ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes,
320 val, map->format.val_bytes); 317 val, val_len);
321 if (ret != 0) 318 if (ret != 0)
322 return ret; 319 return ret;
323 320
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 873e2e4ac55f..73b7b1a18fab 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -15,6 +15,7 @@ MODULE_LICENSE("GPL");
15static int bcma_bus_match(struct device *dev, struct device_driver *drv); 15static int bcma_bus_match(struct device *dev, struct device_driver *drv);
16static int bcma_device_probe(struct device *dev); 16static int bcma_device_probe(struct device *dev);
17static int bcma_device_remove(struct device *dev); 17static int bcma_device_remove(struct device *dev);
18static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
18 19
19static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) 20static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
20{ 21{
@@ -49,6 +50,7 @@ static struct bus_type bcma_bus_type = {
49 .match = bcma_bus_match, 50 .match = bcma_bus_match,
50 .probe = bcma_device_probe, 51 .probe = bcma_device_probe,
51 .remove = bcma_device_remove, 52 .remove = bcma_device_remove,
53 .uevent = bcma_device_uevent,
52 .dev_attrs = bcma_device_attrs, 54 .dev_attrs = bcma_device_attrs,
53}; 55};
54 56
@@ -227,6 +229,16 @@ static int bcma_device_remove(struct device *dev)
227 return 0; 229 return 0;
228} 230}
229 231
232static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
233{
234 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
235
236 return add_uevent_var(env,
237 "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
238 core->id.manuf, core->id.id,
239 core->id.rev, core->id.class);
240}
241
230static int __init bcma_modinit(void) 242static int __init bcma_modinit(void)
231{ 243{
232 int err; 244 int err;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 717d6e4e18d3..6f07ec1c2f58 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -256,6 +256,21 @@ config BLK_DEV_LOOP
256 256
257 Most users will answer N here. 257 Most users will answer N here.
258 258
259config BLK_DEV_LOOP_MIN_COUNT
260 int "Number of loop devices to pre-create at init time"
261 depends on BLK_DEV_LOOP
262 default 8
263 help
264 Static number of loop devices to be unconditionally pre-created
265 at init time.
266
267 This default value can be overwritten on the kernel command
268 line or with module-parameter loop.max_loop.
269
270 The historic default is 8. If a late 2011 version of losetup(8)
271 is used, it can be set to 0, since needed loop devices can be
272 dynamically allocated with the /dev/loop-control interface.
273
259config BLK_DEV_CRYPTOLOOP 274config BLK_DEV_CRYPTOLOOP
260 tristate "Cryptoloop Support" 275 tristate "Cryptoloop Support"
261 select CRYPTO 276 select CRYPTO
@@ -471,7 +486,7 @@ config XEN_BLKDEV_FRONTEND
471 in another domain which drives the actual block device. 486 in another domain which drives the actual block device.
472 487
473config XEN_BLKDEV_BACKEND 488config XEN_BLKDEV_BACKEND
474 tristate "Block-device backend driver" 489 tristate "Xen block-device backend driver"
475 depends on XEN_BACKEND 490 depends on XEN_BACKEND
476 help 491 help
477 The block-device backend driver allows the kernel to export its 492 The block-device backend driver allows the kernel to export its
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 515bcd948a43..0feab261e295 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1829,10 +1829,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
1829 1829
1830 /* silently ignore cpu mask on UP kernel */ 1830 /* silently ignore cpu mask on UP kernel */
1831 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 1831 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1832 err = __bitmap_parse(sc.cpu_mask, 32, 0, 1832 err = bitmap_parse(sc.cpu_mask, 32,
1833 cpumask_bits(new_cpu_mask), nr_cpu_ids); 1833 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1834 if (err) { 1834 if (err) {
1835 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 1835 dev_warn(DEV, "bitmap_parse() failed with %d\n", err);
1836 retcode = ERR_CPU_MASK_PARSE; 1836 retcode = ERR_CPU_MASK_PARSE;
1837 goto fail; 1837 goto fail;
1838 } 1838 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 76c8da78212b..4720c7ade0ae 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -75,11 +75,11 @@
75#include <linux/kthread.h> 75#include <linux/kthread.h>
76#include <linux/splice.h> 76#include <linux/splice.h>
77#include <linux/sysfs.h> 77#include <linux/sysfs.h>
78 78#include <linux/miscdevice.h>
79#include <asm/uaccess.h> 79#include <asm/uaccess.h>
80 80
81static LIST_HEAD(loop_devices); 81static DEFINE_IDR(loop_index_idr);
82static DEFINE_MUTEX(loop_devices_mutex); 82static DEFINE_MUTEX(loop_index_mutex);
83 83
84static int max_part; 84static int max_part;
85static int part_shift; 85static int part_shift;
@@ -722,17 +722,10 @@ static inline int is_loop_device(struct file *file)
722static ssize_t loop_attr_show(struct device *dev, char *page, 722static ssize_t loop_attr_show(struct device *dev, char *page,
723 ssize_t (*callback)(struct loop_device *, char *)) 723 ssize_t (*callback)(struct loop_device *, char *))
724{ 724{
725 struct loop_device *l, *lo = NULL; 725 struct gendisk *disk = dev_to_disk(dev);
726 726 struct loop_device *lo = disk->private_data;
727 mutex_lock(&loop_devices_mutex);
728 list_for_each_entry(l, &loop_devices, lo_list)
729 if (disk_to_dev(l->lo_disk) == dev) {
730 lo = l;
731 break;
732 }
733 mutex_unlock(&loop_devices_mutex);
734 727
735 return lo ? callback(lo, page) : -EIO; 728 return callback(lo, page);
736} 729}
737 730
738#define LOOP_ATTR_RO(_name) \ 731#define LOOP_ATTR_RO(_name) \
@@ -750,10 +743,10 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
750 ssize_t ret; 743 ssize_t ret;
751 char *p = NULL; 744 char *p = NULL;
752 745
753 mutex_lock(&lo->lo_ctl_mutex); 746 spin_lock_irq(&lo->lo_lock);
754 if (lo->lo_backing_file) 747 if (lo->lo_backing_file)
755 p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); 748 p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
756 mutex_unlock(&lo->lo_ctl_mutex); 749 spin_unlock_irq(&lo->lo_lock);
757 750
758 if (IS_ERR_OR_NULL(p)) 751 if (IS_ERR_OR_NULL(p))
759 ret = PTR_ERR(p); 752 ret = PTR_ERR(p);
@@ -1007,7 +1000,9 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
1007 1000
1008 kthread_stop(lo->lo_thread); 1001 kthread_stop(lo->lo_thread);
1009 1002
1003 spin_lock_irq(&lo->lo_lock);
1010 lo->lo_backing_file = NULL; 1004 lo->lo_backing_file = NULL;
1005 spin_unlock_irq(&lo->lo_lock);
1011 1006
1012 loop_release_xfer(lo); 1007 loop_release_xfer(lo);
1013 lo->transfer = NULL; 1008 lo->transfer = NULL;
@@ -1485,13 +1480,22 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1485 1480
1486static int lo_open(struct block_device *bdev, fmode_t mode) 1481static int lo_open(struct block_device *bdev, fmode_t mode)
1487{ 1482{
1488 struct loop_device *lo = bdev->bd_disk->private_data; 1483 struct loop_device *lo;
1484 int err = 0;
1485
1486 mutex_lock(&loop_index_mutex);
1487 lo = bdev->bd_disk->private_data;
1488 if (!lo) {
1489 err = -ENXIO;
1490 goto out;
1491 }
1489 1492
1490 mutex_lock(&lo->lo_ctl_mutex); 1493 mutex_lock(&lo->lo_ctl_mutex);
1491 lo->lo_refcnt++; 1494 lo->lo_refcnt++;
1492 mutex_unlock(&lo->lo_ctl_mutex); 1495 mutex_unlock(&lo->lo_ctl_mutex);
1493 1496out:
1494 return 0; 1497 mutex_unlock(&loop_index_mutex);
1498 return err;
1495} 1499}
1496 1500
1497static int lo_release(struct gendisk *disk, fmode_t mode) 1501static int lo_release(struct gendisk *disk, fmode_t mode)
@@ -1557,40 +1561,71 @@ int loop_register_transfer(struct loop_func_table *funcs)
1557 return 0; 1561 return 0;
1558} 1562}
1559 1563
1564static int unregister_transfer_cb(int id, void *ptr, void *data)
1565{
1566 struct loop_device *lo = ptr;
1567 struct loop_func_table *xfer = data;
1568
1569 mutex_lock(&lo->lo_ctl_mutex);
1570 if (lo->lo_encryption == xfer)
1571 loop_release_xfer(lo);
1572 mutex_unlock(&lo->lo_ctl_mutex);
1573 return 0;
1574}
1575
1560int loop_unregister_transfer(int number) 1576int loop_unregister_transfer(int number)
1561{ 1577{
1562 unsigned int n = number; 1578 unsigned int n = number;
1563 struct loop_device *lo;
1564 struct loop_func_table *xfer; 1579 struct loop_func_table *xfer;
1565 1580
1566 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) 1581 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1567 return -EINVAL; 1582 return -EINVAL;
1568 1583
1569 xfer_funcs[n] = NULL; 1584 xfer_funcs[n] = NULL;
1570 1585 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1571 list_for_each_entry(lo, &loop_devices, lo_list) {
1572 mutex_lock(&lo->lo_ctl_mutex);
1573
1574 if (lo->lo_encryption == xfer)
1575 loop_release_xfer(lo);
1576
1577 mutex_unlock(&lo->lo_ctl_mutex);
1578 }
1579
1580 return 0; 1586 return 0;
1581} 1587}
1582 1588
1583EXPORT_SYMBOL(loop_register_transfer); 1589EXPORT_SYMBOL(loop_register_transfer);
1584EXPORT_SYMBOL(loop_unregister_transfer); 1590EXPORT_SYMBOL(loop_unregister_transfer);
1585 1591
1586static struct loop_device *loop_alloc(int i) 1592static int loop_add(struct loop_device **l, int i)
1587{ 1593{
1588 struct loop_device *lo; 1594 struct loop_device *lo;
1589 struct gendisk *disk; 1595 struct gendisk *disk;
1596 int err;
1590 1597
1591 lo = kzalloc(sizeof(*lo), GFP_KERNEL); 1598 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
1592 if (!lo) 1599 if (!lo) {
1600 err = -ENOMEM;
1593 goto out; 1601 goto out;
1602 }
1603
1604 err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
1605 if (err < 0)
1606 goto out_free_dev;
1607
1608 if (i >= 0) {
1609 int m;
1610
1611 /* create specific i in the index */
1612 err = idr_get_new_above(&loop_index_idr, lo, i, &m);
1613 if (err >= 0 && i != m) {
1614 idr_remove(&loop_index_idr, m);
1615 err = -EEXIST;
1616 }
1617 } else if (i == -1) {
1618 int m;
1619
1620 /* get next free nr */
1621 err = idr_get_new(&loop_index_idr, lo, &m);
1622 if (err >= 0)
1623 i = m;
1624 } else {
1625 err = -EINVAL;
1626 }
1627 if (err < 0)
1628 goto out_free_dev;
1594 1629
1595 lo->lo_queue = blk_alloc_queue(GFP_KERNEL); 1630 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1596 if (!lo->lo_queue) 1631 if (!lo->lo_queue)
@@ -1611,81 +1646,158 @@ static struct loop_device *loop_alloc(int i)
1611 disk->private_data = lo; 1646 disk->private_data = lo;
1612 disk->queue = lo->lo_queue; 1647 disk->queue = lo->lo_queue;
1613 sprintf(disk->disk_name, "loop%d", i); 1648 sprintf(disk->disk_name, "loop%d", i);
1614 return lo; 1649 add_disk(disk);
1650 *l = lo;
1651 return lo->lo_number;
1615 1652
1616out_free_queue: 1653out_free_queue:
1617 blk_cleanup_queue(lo->lo_queue); 1654 blk_cleanup_queue(lo->lo_queue);
1618out_free_dev: 1655out_free_dev:
1619 kfree(lo); 1656 kfree(lo);
1620out: 1657out:
1621 return NULL; 1658 return err;
1622} 1659}
1623 1660
1624static void loop_free(struct loop_device *lo) 1661static void loop_remove(struct loop_device *lo)
1625{ 1662{
1663 del_gendisk(lo->lo_disk);
1626 blk_cleanup_queue(lo->lo_queue); 1664 blk_cleanup_queue(lo->lo_queue);
1627 put_disk(lo->lo_disk); 1665 put_disk(lo->lo_disk);
1628 list_del(&lo->lo_list);
1629 kfree(lo); 1666 kfree(lo);
1630} 1667}
1631 1668
1632static struct loop_device *loop_init_one(int i) 1669static int find_free_cb(int id, void *ptr, void *data)
1670{
1671 struct loop_device *lo = ptr;
1672 struct loop_device **l = data;
1673
1674 if (lo->lo_state == Lo_unbound) {
1675 *l = lo;
1676 return 1;
1677 }
1678 return 0;
1679}
1680
1681static int loop_lookup(struct loop_device **l, int i)
1633{ 1682{
1634 struct loop_device *lo; 1683 struct loop_device *lo;
1684 int ret = -ENODEV;
1635 1685
1636 list_for_each_entry(lo, &loop_devices, lo_list) { 1686 if (i < 0) {
1637 if (lo->lo_number == i) 1687 int err;
1638 return lo; 1688
1689 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
1690 if (err == 1) {
1691 *l = lo;
1692 ret = lo->lo_number;
1693 }
1694 goto out;
1639 } 1695 }
1640 1696
1641 lo = loop_alloc(i); 1697 /* lookup and return a specific i */
1698 lo = idr_find(&loop_index_idr, i);
1642 if (lo) { 1699 if (lo) {
1643 add_disk(lo->lo_disk); 1700 *l = lo;
1644 list_add_tail(&lo->lo_list, &loop_devices); 1701 ret = lo->lo_number;
1645 } 1702 }
1646 return lo; 1703out:
1647} 1704 return ret;
1648
1649static void loop_del_one(struct loop_device *lo)
1650{
1651 del_gendisk(lo->lo_disk);
1652 loop_free(lo);
1653} 1705}
1654 1706
1655static struct kobject *loop_probe(dev_t dev, int *part, void *data) 1707static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1656{ 1708{
1657 struct loop_device *lo; 1709 struct loop_device *lo;
1658 struct kobject *kobj; 1710 struct kobject *kobj;
1711 int err;
1659 1712
1660 mutex_lock(&loop_devices_mutex); 1713 mutex_lock(&loop_index_mutex);
1661 lo = loop_init_one(MINOR(dev) >> part_shift); 1714 err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1662 kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM); 1715 if (err < 0)
1663 mutex_unlock(&loop_devices_mutex); 1716 err = loop_add(&lo, MINOR(dev) >> part_shift);
1717 if (err < 0)
1718 kobj = ERR_PTR(err);
1719 else
1720 kobj = get_disk(lo->lo_disk);
1721 mutex_unlock(&loop_index_mutex);
1664 1722
1665 *part = 0; 1723 *part = 0;
1666 return kobj; 1724 return kobj;
1667} 1725}
1668 1726
1727static long loop_control_ioctl(struct file *file, unsigned int cmd,
1728 unsigned long parm)
1729{
1730 struct loop_device *lo;
1731 int ret = -ENOSYS;
1732
1733 mutex_lock(&loop_index_mutex);
1734 switch (cmd) {
1735 case LOOP_CTL_ADD:
1736 ret = loop_lookup(&lo, parm);
1737 if (ret >= 0) {
1738 ret = -EEXIST;
1739 break;
1740 }
1741 ret = loop_add(&lo, parm);
1742 break;
1743 case LOOP_CTL_REMOVE:
1744 ret = loop_lookup(&lo, parm);
1745 if (ret < 0)
1746 break;
1747 mutex_lock(&lo->lo_ctl_mutex);
1748 if (lo->lo_state != Lo_unbound) {
1749 ret = -EBUSY;
1750 mutex_unlock(&lo->lo_ctl_mutex);
1751 break;
1752 }
1753 if (lo->lo_refcnt > 0) {
1754 ret = -EBUSY;
1755 mutex_unlock(&lo->lo_ctl_mutex);
1756 break;
1757 }
1758 lo->lo_disk->private_data = NULL;
1759 mutex_unlock(&lo->lo_ctl_mutex);
1760 idr_remove(&loop_index_idr, lo->lo_number);
1761 loop_remove(lo);
1762 break;
1763 case LOOP_CTL_GET_FREE:
1764 ret = loop_lookup(&lo, -1);
1765 if (ret >= 0)
1766 break;
1767 ret = loop_add(&lo, -1);
1768 }
1769 mutex_unlock(&loop_index_mutex);
1770
1771 return ret;
1772}
1773
1774static const struct file_operations loop_ctl_fops = {
1775 .open = nonseekable_open,
1776 .unlocked_ioctl = loop_control_ioctl,
1777 .compat_ioctl = loop_control_ioctl,
1778 .owner = THIS_MODULE,
1779 .llseek = noop_llseek,
1780};
1781
1782static struct miscdevice loop_misc = {
1783 .minor = LOOP_CTRL_MINOR,
1784 .name = "loop-control",
1785 .fops = &loop_ctl_fops,
1786};
1787
1788MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
1789MODULE_ALIAS("devname:loop-control");
1790
1669static int __init loop_init(void) 1791static int __init loop_init(void)
1670{ 1792{
1671 int i, nr; 1793 int i, nr;
1672 unsigned long range; 1794 unsigned long range;
1673 struct loop_device *lo, *next; 1795 struct loop_device *lo;
1796 int err;
1674 1797
1675 /* 1798 err = misc_register(&loop_misc);
1676 * loop module now has a feature to instantiate underlying device 1799 if (err < 0)
1677 * structure on-demand, provided that there is an access dev node. 1800 return err;
1678 * However, this will not work well with user space tool that doesn't
1679 * know about such "feature". In order to not break any existing
1680 * tool, we do the following:
1681 *
1682 * (1) if max_loop is specified, create that many upfront, and this
1683 * also becomes a hard limit.
1684 * (2) if max_loop is not specified, create 8 loop device on module
1685 * load, user can further extend loop device by create dev node
1686 * themselves and have kernel automatically instantiate actual
1687 * device on-demand.
1688 */
1689 1801
1690 part_shift = 0; 1802 part_shift = 0;
1691 if (max_part > 0) { 1803 if (max_part > 0) {
@@ -1708,57 +1820,60 @@ static int __init loop_init(void)
1708 if (max_loop > 1UL << (MINORBITS - part_shift)) 1820 if (max_loop > 1UL << (MINORBITS - part_shift))
1709 return -EINVAL; 1821 return -EINVAL;
1710 1822
1823 /*
1824 * If max_loop is specified, create that many devices upfront.
1825 * This also becomes a hard limit. If max_loop is not specified,
1826 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
1827 * init time. Loop devices can be requested on-demand with the
1828 * /dev/loop-control interface, or be instantiated by accessing
1829 * a 'dead' device node.
1830 */
1711 if (max_loop) { 1831 if (max_loop) {
1712 nr = max_loop; 1832 nr = max_loop;
1713 range = max_loop << part_shift; 1833 range = max_loop << part_shift;
1714 } else { 1834 } else {
1715 nr = 8; 1835 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
1716 range = 1UL << MINORBITS; 1836 range = 1UL << MINORBITS;
1717 } 1837 }
1718 1838
1719 if (register_blkdev(LOOP_MAJOR, "loop")) 1839 if (register_blkdev(LOOP_MAJOR, "loop"))
1720 return -EIO; 1840 return -EIO;
1721 1841
1722 for (i = 0; i < nr; i++) {
1723 lo = loop_alloc(i);
1724 if (!lo)
1725 goto Enomem;
1726 list_add_tail(&lo->lo_list, &loop_devices);
1727 }
1728
1729 /* point of no return */
1730
1731 list_for_each_entry(lo, &loop_devices, lo_list)
1732 add_disk(lo->lo_disk);
1733
1734 blk_register_region(MKDEV(LOOP_MAJOR, 0), range, 1842 blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
1735 THIS_MODULE, loop_probe, NULL, NULL); 1843 THIS_MODULE, loop_probe, NULL, NULL);
1736 1844
1845 /* pre-create number of devices given by config or max_loop */
1846 mutex_lock(&loop_index_mutex);
1847 for (i = 0; i < nr; i++)
1848 loop_add(&lo, i);
1849 mutex_unlock(&loop_index_mutex);
1850
1737 printk(KERN_INFO "loop: module loaded\n"); 1851 printk(KERN_INFO "loop: module loaded\n");
1738 return 0; 1852 return 0;
1853}
1739 1854
1740Enomem: 1855static int loop_exit_cb(int id, void *ptr, void *data)
1741 printk(KERN_INFO "loop: out of memory\n"); 1856{
1742 1857 struct loop_device *lo = ptr;
1743 list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
1744 loop_free(lo);
1745 1858
1746 unregister_blkdev(LOOP_MAJOR, "loop"); 1859 loop_remove(lo);
1747 return -ENOMEM; 1860 return 0;
1748} 1861}
1749 1862
1750static void __exit loop_exit(void) 1863static void __exit loop_exit(void)
1751{ 1864{
1752 unsigned long range; 1865 unsigned long range;
1753 struct loop_device *lo, *next;
1754 1866
1755 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; 1867 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
1756 1868
1757 list_for_each_entry_safe(lo, next, &loop_devices, lo_list) 1869 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
1758 loop_del_one(lo); 1870 idr_remove_all(&loop_index_idr);
1871 idr_destroy(&loop_index_idr);
1759 1872
1760 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); 1873 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
1761 unregister_blkdev(LOOP_MAJOR, "loop"); 1874 unregister_blkdev(LOOP_MAJOR, "loop");
1875
1876 misc_deregister(&loop_misc);
1762} 1877}
1763 1878
1764module_init(loop_init); 1879module_init(loop_init);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 773bfa792777..ae3e167e17ad 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1184,6 +1184,7 @@ static struct of_device_id swim3_match[] =
1184 { 1184 {
1185 .compatible = "swim3" 1185 .compatible = "swim3"
1186 }, 1186 },
1187 { /* end of list */ }
1187}; 1188};
1188 1189
1189static struct macio_driver swim3_driver = 1190static struct macio_driver swim3_driver =
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b536a9cef917..9ea8c2576c70 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -123,8 +123,8 @@ static DEFINE_SPINLOCK(minor_lock);
123#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) 123#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
124#define EMULATED_HD_DISK_MINOR_OFFSET (0) 124#define EMULATED_HD_DISK_MINOR_OFFSET (0)
125#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) 125#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
126#define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16)) 126#define EMULATED_SD_DISK_MINOR_OFFSET (0)
127#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4) 127#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
128 128
129#define DEV_NAME "xvd" /* name in /dev */ 129#define DEV_NAME "xvd" /* name in /dev */
130 130
@@ -529,7 +529,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
529 minor = BLKIF_MINOR_EXT(info->vdevice); 529 minor = BLKIF_MINOR_EXT(info->vdevice);
530 nr_parts = PARTS_PER_EXT_DISK; 530 nr_parts = PARTS_PER_EXT_DISK;
531 offset = minor / nr_parts; 531 offset = minor / nr_parts;
532 if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4) 532 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
533 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " 533 printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
534 "emulated IDE disks,\n\t choose an xvd device name" 534 "emulated IDE disks,\n\t choose an xvd device name"
535 "from xvde on\n", info->vdevice); 535 "from xvde on\n", info->vdevice);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a5854735bb2e..db7cb8111fbe 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {
63 /* Atheros AR3011 with sflash firmware*/ 63 /* Atheros AR3011 with sflash firmware*/
64 { USB_DEVICE(0x0CF3, 0x3002) }, 64 { USB_DEVICE(0x0CF3, 0x3002) },
65 { USB_DEVICE(0x13d3, 0x3304) }, 65 { USB_DEVICE(0x13d3, 0x3304) },
66 { USB_DEVICE(0x0930, 0x0215) },
66 67
67 /* Atheros AR9285 Malbec with sflash firmware */ 68 /* Atheros AR9285 Malbec with sflash firmware */
68 { USB_DEVICE(0x03F0, 0x311D) }, 69 { USB_DEVICE(0x03F0, 0x311D) },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 91d13a9e8c65..3ef476070baf 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -106,6 +106,7 @@ static struct usb_device_id blacklist_table[] = {
106 /* Atheros 3011 with sflash firmware */ 106 /* Atheros 3011 with sflash firmware */
107 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, 107 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
108 { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, 108 { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
109 { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
109 110
110 /* Atheros AR9285 Malbec with sflash firmware */ 111 /* Atheros AR9285 Malbec with sflash firmware */
111 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, 112 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
@@ -256,7 +257,9 @@ static void btusb_intr_complete(struct urb *urb)
256 257
257 err = usb_submit_urb(urb, GFP_ATOMIC); 258 err = usb_submit_urb(urb, GFP_ATOMIC);
258 if (err < 0) { 259 if (err < 0) {
259 if (err != -EPERM) 260 /* -EPERM: urb is being killed;
261 * -ENODEV: device got disconnected */
262 if (err != -EPERM && err != -ENODEV)
260 BT_ERR("%s urb %p failed to resubmit (%d)", 263 BT_ERR("%s urb %p failed to resubmit (%d)",
261 hdev->name, urb, -err); 264 hdev->name, urb, -err);
262 usb_unanchor_urb(urb); 265 usb_unanchor_urb(urb);
@@ -341,7 +344,9 @@ static void btusb_bulk_complete(struct urb *urb)
341 344
342 err = usb_submit_urb(urb, GFP_ATOMIC); 345 err = usb_submit_urb(urb, GFP_ATOMIC);
343 if (err < 0) { 346 if (err < 0) {
344 if (err != -EPERM) 347 /* -EPERM: urb is being killed;
348 * -ENODEV: device got disconnected */
349 if (err != -EPERM && err != -ENODEV)
345 BT_ERR("%s urb %p failed to resubmit (%d)", 350 BT_ERR("%s urb %p failed to resubmit (%d)",
346 hdev->name, urb, -err); 351 hdev->name, urb, -err);
347 usb_unanchor_urb(urb); 352 usb_unanchor_urb(urb);
@@ -431,7 +436,9 @@ static void btusb_isoc_complete(struct urb *urb)
431 436
432 err = usb_submit_urb(urb, GFP_ATOMIC); 437 err = usb_submit_urb(urb, GFP_ATOMIC);
433 if (err < 0) { 438 if (err < 0) {
434 if (err != -EPERM) 439 /* -EPERM: urb is being killed;
440 * -ENODEV: device got disconnected */
441 if (err != -EPERM && err != -ENODEV)
435 BT_ERR("%s urb %p failed to resubmit (%d)", 442 BT_ERR("%s urb %p failed to resubmit (%d)",
436 hdev->name, urb, -err); 443 hdev->name, urb, -err);
437 usb_unanchor_urb(urb); 444 usb_unanchor_urb(urb);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 75fb965b8f72..f997c27d79e2 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1929,11 +1929,17 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
1929 goto out; 1929 goto out;
1930 1930
1931 s->manufact.len = buf[0] << 8 | buf[1]; 1931 s->manufact.len = buf[0] << 8 | buf[1];
1932 if (s->manufact.len < 0 || s->manufact.len > 2048) { 1932 if (s->manufact.len < 0) {
1933 cdinfo(CD_WARNING, "Received invalid manufacture info length" 1933 cdinfo(CD_WARNING, "Received invalid manufacture info length"
1934 " (%d)\n", s->manufact.len); 1934 " (%d)\n", s->manufact.len);
1935 ret = -EIO; 1935 ret = -EIO;
1936 } else { 1936 } else {
1937 if (s->manufact.len > 2048) {
1938 cdinfo(CD_WARNING, "Received invalid manufacture info "
1939 "length (%d): truncating to 2048\n",
1940 s->manufact.len);
1941 s->manufact.len = 2048;
1942 }
1937 memcpy(s->manufact.value, &buf[4], s->manufact.len); 1943 memcpy(s->manufact.value, &buf[4], s->manufact.len);
1938 } 1944 }
1939 1945
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 49502bc5360a..423fd56bf612 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -616,5 +616,16 @@ config MSM_SMD_PKT
616 Enables userspace clients to read and write to some packet SMD 616 Enables userspace clients to read and write to some packet SMD
617 ports via device interface for MSM chipset. 617 ports via device interface for MSM chipset.
618 618
619config TILE_SROM
620 bool "Character-device access via hypervisor to the Tilera SPI ROM"
621 depends on TILE
622 default y
623 ---help---
624 This device provides character-level read-write access
625 to the SROM, typically via the "0", "1", and "2" devices
626 in /dev/srom/. The Tilera hypervisor makes the flash
627 device appear much like a simple EEPROM, and knows
628 how to partition a single ROM for multiple purposes.
629
619endmenu 630endmenu
620 631
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7a00672bd85d..32762ba769c2 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -63,3 +63,5 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o
63 63
64obj-$(CONFIG_JS_RTC) += js-rtc.o 64obj-$(CONFIG_JS_RTC) += js-rtc.o
65js-rtc-y = rtc.o 65js-rtc-y = rtc.o
66
67obj-$(CONFIG_TILE_SROM) += tile-srom.o
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
index b6f8a65c9960..8eca55deb3a3 100644
--- a/drivers/char/msm_smd_pkt.c
+++ b/drivers/char/msm_smd_pkt.c
@@ -379,9 +379,8 @@ static int __init smd_pkt_init(void)
379 for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { 379 for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
380 smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), 380 smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev),
381 GFP_KERNEL); 381 GFP_KERNEL);
382 if (IS_ERR(smd_pkt_devp[i])) { 382 if (!smd_pkt_devp[i]) {
383 r = PTR_ERR(smd_pkt_devp[i]); 383 pr_err("kmalloc() failed\n");
384 pr_err("kmalloc() failed %d\n", r);
385 goto clean_cdevs; 384 goto clean_cdevs;
386 } 385 }
387 386
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index fca0c51bbc90..810aff9e750f 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -147,6 +147,14 @@ static int __init ramoops_probe(struct platform_device *pdev)
147 cxt->phys_addr = pdata->mem_address; 147 cxt->phys_addr = pdata->mem_address;
148 cxt->record_size = pdata->record_size; 148 cxt->record_size = pdata->record_size;
149 cxt->dump_oops = pdata->dump_oops; 149 cxt->dump_oops = pdata->dump_oops;
150 /*
151 * Update the module parameter variables as well so they are visible
152 * through /sys/module/ramoops/parameters/
153 */
154 mem_size = pdata->mem_size;
155 mem_address = pdata->mem_address;
156 record_size = pdata->record_size;
157 dump_oops = pdata->dump_oops;
150 158
151 if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) { 159 if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
152 pr_err("request mem region failed\n"); 160 pr_err("request mem region failed\n");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 729281961f22..c35a785005b0 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1300,345 +1300,14 @@ ctl_table random_table[] = {
1300}; 1300};
1301#endif /* CONFIG_SYSCTL */ 1301#endif /* CONFIG_SYSCTL */
1302 1302
1303/******************************************************************** 1303static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
1304 *
1305 * Random functions for networking
1306 *
1307 ********************************************************************/
1308
1309/*
1310 * TCP initial sequence number picking. This uses the random number
1311 * generator to pick an initial secret value. This value is hashed
1312 * along with the TCP endpoint information to provide a unique
1313 * starting point for each pair of TCP endpoints. This defeats
1314 * attacks which rely on guessing the initial TCP sequence number.
1315 * This algorithm was suggested by Steve Bellovin.
1316 *
1317 * Using a very strong hash was taking an appreciable amount of the total
1318 * TCP connection establishment time, so this is a weaker hash,
1319 * compensated for by changing the secret periodically.
1320 */
1321
1322/* F, G and H are basic MD4 functions: selection, majority, parity */
1323#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
1324#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
1325#define H(x, y, z) ((x) ^ (y) ^ (z))
1326
1327/*
1328 * The generic round function. The application is so specific that
1329 * we don't bother protecting all the arguments with parens, as is generally
1330 * good macro practice, in favor of extra legibility.
1331 * Rotation is separate from addition to prevent recomputation
1332 */
1333#define ROUND(f, a, b, c, d, x, s) \
1334 (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
1335#define K1 0
1336#define K2 013240474631UL
1337#define K3 015666365641UL
1338
1339#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1340
1341static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
1342{
1343 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
1344
1345 /* Round 1 */
1346 ROUND(F, a, b, c, d, in[ 0] + K1, 3);
1347 ROUND(F, d, a, b, c, in[ 1] + K1, 7);
1348 ROUND(F, c, d, a, b, in[ 2] + K1, 11);
1349 ROUND(F, b, c, d, a, in[ 3] + K1, 19);
1350 ROUND(F, a, b, c, d, in[ 4] + K1, 3);
1351 ROUND(F, d, a, b, c, in[ 5] + K1, 7);
1352 ROUND(F, c, d, a, b, in[ 6] + K1, 11);
1353 ROUND(F, b, c, d, a, in[ 7] + K1, 19);
1354 ROUND(F, a, b, c, d, in[ 8] + K1, 3);
1355 ROUND(F, d, a, b, c, in[ 9] + K1, 7);
1356 ROUND(F, c, d, a, b, in[10] + K1, 11);
1357 ROUND(F, b, c, d, a, in[11] + K1, 19);
1358
1359 /* Round 2 */
1360 ROUND(G, a, b, c, d, in[ 1] + K2, 3);
1361 ROUND(G, d, a, b, c, in[ 3] + K2, 5);
1362 ROUND(G, c, d, a, b, in[ 5] + K2, 9);
1363 ROUND(G, b, c, d, a, in[ 7] + K2, 13);
1364 ROUND(G, a, b, c, d, in[ 9] + K2, 3);
1365 ROUND(G, d, a, b, c, in[11] + K2, 5);
1366 ROUND(G, c, d, a, b, in[ 0] + K2, 9);
1367 ROUND(G, b, c, d, a, in[ 2] + K2, 13);
1368 ROUND(G, a, b, c, d, in[ 4] + K2, 3);
1369 ROUND(G, d, a, b, c, in[ 6] + K2, 5);
1370 ROUND(G, c, d, a, b, in[ 8] + K2, 9);
1371 ROUND(G, b, c, d, a, in[10] + K2, 13);
1372
1373 /* Round 3 */
1374 ROUND(H, a, b, c, d, in[ 3] + K3, 3);
1375 ROUND(H, d, a, b, c, in[ 7] + K3, 9);
1376 ROUND(H, c, d, a, b, in[11] + K3, 11);
1377 ROUND(H, b, c, d, a, in[ 2] + K3, 15);
1378 ROUND(H, a, b, c, d, in[ 6] + K3, 3);
1379 ROUND(H, d, a, b, c, in[10] + K3, 9);
1380 ROUND(H, c, d, a, b, in[ 1] + K3, 11);
1381 ROUND(H, b, c, d, a, in[ 5] + K3, 15);
1382 ROUND(H, a, b, c, d, in[ 9] + K3, 3);
1383 ROUND(H, d, a, b, c, in[ 0] + K3, 9);
1384 ROUND(H, c, d, a, b, in[ 4] + K3, 11);
1385 ROUND(H, b, c, d, a, in[ 8] + K3, 15);
1386
1387 return buf[1] + b; /* "most hashed" word */
1388 /* Alternative: return sum of all words? */
1389}
1390#endif
1391
1392#undef ROUND
1393#undef F
1394#undef G
1395#undef H
1396#undef K1
1397#undef K2
1398#undef K3
1399
1400/* This should not be decreased so low that ISNs wrap too fast. */
1401#define REKEY_INTERVAL (300 * HZ)
1402/*
1403 * Bit layout of the tcp sequence numbers (before adding current time):
1404 * bit 24-31: increased after every key exchange
1405 * bit 0-23: hash(source,dest)
1406 *
1407 * The implementation is similar to the algorithm described
1408 * in the Appendix of RFC 1185, except that
1409 * - it uses a 1 MHz clock instead of a 250 kHz clock
1410 * - it performs a rekey every 5 minutes, which is equivalent
1411 * to a (source,dest) tulple dependent forward jump of the
1412 * clock by 0..2^(HASH_BITS+1)
1413 *
1414 * Thus the average ISN wraparound time is 68 minutes instead of
1415 * 4.55 hours.
1416 *
1417 * SMP cleanup and lock avoidance with poor man's RCU.
1418 * Manfred Spraul <manfred@colorfullife.com>
1419 *
1420 */
1421#define COUNT_BITS 8
1422#define COUNT_MASK ((1 << COUNT_BITS) - 1)
1423#define HASH_BITS 24
1424#define HASH_MASK ((1 << HASH_BITS) - 1)
1425 1304
1426static struct keydata { 1305static int __init random_int_secret_init(void)
1427 __u32 count; /* already shifted to the final position */
1428 __u32 secret[12];
1429} ____cacheline_aligned ip_keydata[2];
1430
1431static unsigned int ip_cnt;
1432
1433static void rekey_seq_generator(struct work_struct *work);
1434
1435static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
1436
1437/*
1438 * Lock avoidance:
1439 * The ISN generation runs lockless - it's just a hash over random data.
1440 * State changes happen every 5 minutes when the random key is replaced.
1441 * Synchronization is performed by having two copies of the hash function
1442 * state and rekey_seq_generator always updates the inactive copy.
1443 * The copy is then activated by updating ip_cnt.
1444 * The implementation breaks down if someone blocks the thread
1445 * that processes SYN requests for more than 5 minutes. Should never
1446 * happen, and even if that happens only a not perfectly compliant
1447 * ISN is generated, nothing fatal.
1448 */
1449static void rekey_seq_generator(struct work_struct *work)
1450{ 1306{
1451 struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; 1307 get_random_bytes(random_int_secret, sizeof(random_int_secret));
1452
1453 get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
1454 keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
1455 smp_wmb();
1456 ip_cnt++;
1457 schedule_delayed_work(&rekey_work,
1458 round_jiffies_relative(REKEY_INTERVAL));
1459}
1460
1461static inline struct keydata *get_keyptr(void)
1462{
1463 struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
1464
1465 smp_rmb();
1466
1467 return keyptr;
1468}
1469
1470static __init int seqgen_init(void)
1471{
1472 rekey_seq_generator(NULL);
1473 return 0; 1308 return 0;
1474} 1309}
1475late_initcall(seqgen_init); 1310late_initcall(random_int_secret_init);
1476
1477#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1478__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
1479 __be16 sport, __be16 dport)
1480{
1481 __u32 seq;
1482 __u32 hash[12];
1483 struct keydata *keyptr = get_keyptr();
1484
1485 /* The procedure is the same as for IPv4, but addresses are longer.
1486 * Thus we must use twothirdsMD4Transform.
1487 */
1488
1489 memcpy(hash, saddr, 16);
1490 hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
1491 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1492
1493 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
1494 seq += keyptr->count;
1495
1496 seq += ktime_to_ns(ktime_get_real());
1497
1498 return seq;
1499}
1500EXPORT_SYMBOL(secure_tcpv6_sequence_number);
1501#endif
1502
1503/* The code below is shamelessly stolen from secure_tcp_sequence_number().
1504 * All blames to Andrey V. Savochkin <saw@msu.ru>.
1505 */
1506__u32 secure_ip_id(__be32 daddr)
1507{
1508 struct keydata *keyptr;
1509 __u32 hash[4];
1510
1511 keyptr = get_keyptr();
1512
1513 /*
1514 * Pick a unique starting offset for each IP destination.
1515 * The dest ip address is placed in the starting vector,
1516 * which is then hashed with random data.
1517 */
1518 hash[0] = (__force __u32)daddr;
1519 hash[1] = keyptr->secret[9];
1520 hash[2] = keyptr->secret[10];
1521 hash[3] = keyptr->secret[11];
1522
1523 return half_md4_transform(hash, keyptr->secret);
1524}
1525
1526__u32 secure_ipv6_id(const __be32 daddr[4])
1527{
1528 const struct keydata *keyptr;
1529 __u32 hash[4];
1530
1531 keyptr = get_keyptr();
1532
1533 hash[0] = (__force __u32)daddr[0];
1534 hash[1] = (__force __u32)daddr[1];
1535 hash[2] = (__force __u32)daddr[2];
1536 hash[3] = (__force __u32)daddr[3];
1537
1538 return half_md4_transform(hash, keyptr->secret);
1539}
1540
1541#ifdef CONFIG_INET
1542
1543__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1544 __be16 sport, __be16 dport)
1545{
1546 __u32 seq;
1547 __u32 hash[4];
1548 struct keydata *keyptr = get_keyptr();
1549
1550 /*
1551 * Pick a unique starting offset for each TCP connection endpoints
1552 * (saddr, daddr, sport, dport).
1553 * Note that the words are placed into the starting vector, which is
1554 * then mixed with a partial MD4 over random data.
1555 */
1556 hash[0] = (__force u32)saddr;
1557 hash[1] = (__force u32)daddr;
1558 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
1559 hash[3] = keyptr->secret[11];
1560
1561 seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
1562 seq += keyptr->count;
1563 /*
1564 * As close as possible to RFC 793, which
1565 * suggests using a 250 kHz clock.
1566 * Further reading shows this assumes 2 Mb/s networks.
1567 * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
1568 * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
1569 * we also need to limit the resolution so that the u32 seq
1570 * overlaps less than one time per MSL (2 minutes).
1571 * Choosing a clock of 64 ns period is OK. (period of 274 s)
1572 */
1573 seq += ktime_to_ns(ktime_get_real()) >> 6;
1574
1575 return seq;
1576}
1577
1578/* Generate secure starting point for ephemeral IPV4 transport port search */
1579u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
1580{
1581 struct keydata *keyptr = get_keyptr();
1582 u32 hash[4];
1583
1584 /*
1585 * Pick a unique starting offset for each ephemeral port search
1586 * (saddr, daddr, dport) and 48bits of random data.
1587 */
1588 hash[0] = (__force u32)saddr;
1589 hash[1] = (__force u32)daddr;
1590 hash[2] = (__force u32)dport ^ keyptr->secret[10];
1591 hash[3] = keyptr->secret[11];
1592
1593 return half_md4_transform(hash, keyptr->secret);
1594}
1595EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
1596
1597#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1598u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
1599 __be16 dport)
1600{
1601 struct keydata *keyptr = get_keyptr();
1602 u32 hash[12];
1603
1604 memcpy(hash, saddr, 16);
1605 hash[4] = (__force u32)dport;
1606 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1607
1608 return twothirdsMD4Transform((const __u32 *)daddr, hash);
1609}
1610#endif
1611
1612#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
1613/* Similar to secure_tcp_sequence_number but generate a 48 bit value
1614 * bit's 32-47 increase every key exchange
1615 * 0-31 hash(source, dest)
1616 */
1617u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
1618 __be16 sport, __be16 dport)
1619{
1620 u64 seq;
1621 __u32 hash[4];
1622 struct keydata *keyptr = get_keyptr();
1623
1624 hash[0] = (__force u32)saddr;
1625 hash[1] = (__force u32)daddr;
1626 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
1627 hash[3] = keyptr->secret[11];
1628
1629 seq = half_md4_transform(hash, keyptr->secret);
1630 seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
1631
1632 seq += ktime_to_ns(ktime_get_real());
1633 seq &= (1ull << 48) - 1;
1634
1635 return seq;
1636}
1637EXPORT_SYMBOL(secure_dccp_sequence_number);
1638#endif
1639
1640#endif /* CONFIG_INET */
1641
1642 1311
1643/* 1312/*
1644 * Get a random word for internal kernel use only. Similar to urandom but 1313 * Get a random word for internal kernel use only. Similar to urandom but
@@ -1646,17 +1315,15 @@ EXPORT_SYMBOL(secure_dccp_sequence_number);
1646 * value is not cryptographically secure but for several uses the cost of 1315 * value is not cryptographically secure but for several uses the cost of
1647 * depleting entropy is too high 1316 * depleting entropy is too high
1648 */ 1317 */
1649DEFINE_PER_CPU(__u32 [4], get_random_int_hash); 1318DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
1650unsigned int get_random_int(void) 1319unsigned int get_random_int(void)
1651{ 1320{
1652 struct keydata *keyptr;
1653 __u32 *hash = get_cpu_var(get_random_int_hash); 1321 __u32 *hash = get_cpu_var(get_random_int_hash);
1654 int ret; 1322 unsigned int ret;
1655 1323
1656 keyptr = get_keyptr();
1657 hash[0] += current->pid + jiffies + get_cycles(); 1324 hash[0] += current->pid + jiffies + get_cycles();
1658 1325 md5_transform(hash, random_int_secret);
1659 ret = half_md4_transform(hash, keyptr->secret); 1326 ret = hash[0];
1660 put_cpu_var(get_random_int_hash); 1327 put_cpu_var(get_random_int_hash);
1661 1328
1662 return ret; 1329 return ret;
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
new file mode 100644
index 000000000000..cf3ee008dca2
--- /dev/null
+++ b/drivers/char/tile-srom.c
@@ -0,0 +1,481 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * SPI Flash ROM driver
15 *
16 * This source code is derived from code provided in "Linux Device
17 * Drivers, Third Edition", by Jonathan Corbet, Alessandro Rubini, and
18 * Greg Kroah-Hartman, published by O'Reilly Media, Inc.
19 */
20
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/init.h>
24#include <linux/kernel.h> /* printk() */
25#include <linux/slab.h> /* kmalloc() */
26#include <linux/fs.h> /* everything... */
27#include <linux/errno.h> /* error codes */
28#include <linux/types.h> /* size_t */
29#include <linux/proc_fs.h>
30#include <linux/fcntl.h> /* O_ACCMODE */
31#include <linux/aio.h>
32#include <linux/pagemap.h>
33#include <linux/hugetlb.h>
34#include <linux/uaccess.h>
35#include <linux/platform_device.h>
36#include <hv/hypervisor.h>
37#include <linux/ioctl.h>
38#include <linux/cdev.h>
39#include <linux/delay.h>
40#include <hv/drv_srom_intf.h>
41
42/*
43 * Size of our hypervisor I/O requests. We break up large transfers
44 * so that we don't spend large uninterrupted spans of time in the
45 * hypervisor. Erasing an SROM sector takes a significant fraction of
46 * a second, so if we allowed the user to, say, do one I/O to write the
47 * entire ROM, we'd get soft lockup timeouts, or worse.
48 */
49#define SROM_CHUNK_SIZE ((size_t)4096)
50
51/*
52 * When hypervisor is busy (e.g. erasing), poll the status periodically.
53 */
54
55/*
56 * Interval to poll the state in msec
57 */
58#define SROM_WAIT_TRY_INTERVAL 20
59
60/*
61 * Maximum times to poll the state
62 */
63#define SROM_MAX_WAIT_TRY_TIMES 1000
64
65struct srom_dev {
66 int hv_devhdl; /* Handle for hypervisor device */
67 u32 total_size; /* Size of this device */
68 u32 sector_size; /* Size of a sector */
69 u32 page_size; /* Size of a page */
70 struct mutex lock; /* Allow only one accessor at a time */
71};
72
73static int srom_major; /* Dynamic major by default */
74module_param(srom_major, int, 0);
75MODULE_AUTHOR("Tilera Corporation");
76MODULE_LICENSE("GPL");
77
78static int srom_devs; /* Number of SROM partitions */
79static struct cdev srom_cdev;
80static struct class *srom_class;
81static struct srom_dev *srom_devices;
82
83/*
84 * Handle calling the hypervisor and managing EAGAIN/EBUSY.
85 */
86
87static ssize_t _srom_read(int hv_devhdl, void *buf,
88 loff_t off, size_t count)
89{
90 int retval, retries = SROM_MAX_WAIT_TRY_TIMES;
91 for (;;) {
92 retval = hv_dev_pread(hv_devhdl, 0, (HV_VirtAddr)buf,
93 count, off);
94 if (retval >= 0)
95 return retval;
96 if (retval == HV_EAGAIN)
97 continue;
98 if (retval == HV_EBUSY && --retries > 0) {
99 msleep(SROM_WAIT_TRY_INTERVAL);
100 continue;
101 }
102 pr_err("_srom_read: error %d\n", retval);
103 return -EIO;
104 }
105}
106
107static ssize_t _srom_write(int hv_devhdl, const void *buf,
108 loff_t off, size_t count)
109{
110 int retval, retries = SROM_MAX_WAIT_TRY_TIMES;
111 for (;;) {
112 retval = hv_dev_pwrite(hv_devhdl, 0, (HV_VirtAddr)buf,
113 count, off);
114 if (retval >= 0)
115 return retval;
116 if (retval == HV_EAGAIN)
117 continue;
118 if (retval == HV_EBUSY && --retries > 0) {
119 msleep(SROM_WAIT_TRY_INTERVAL);
120 continue;
121 }
122 pr_err("_srom_write: error %d\n", retval);
123 return -EIO;
124 }
125}
126
127/**
128 * srom_open() - Device open routine.
129 * @inode: Inode for this device.
130 * @filp: File for this specific open of the device.
131 *
132 * Returns zero, or an error code.
133 */
134static int srom_open(struct inode *inode, struct file *filp)
135{
136 filp->private_data = &srom_devices[iminor(inode)];
137 return 0;
138}
139
140
141/**
142 * srom_release() - Device release routine.
143 * @inode: Inode for this device.
144 * @filp: File for this specific open of the device.
145 *
146 * Returns zero, or an error code.
147 */
148static int srom_release(struct inode *inode, struct file *filp)
149{
150 struct srom_dev *srom = filp->private_data;
151 char dummy;
152
153 /* Make sure we've flushed anything written to the ROM. */
154 mutex_lock(&srom->lock);
155 if (srom->hv_devhdl >= 0)
156 _srom_write(srom->hv_devhdl, &dummy, SROM_FLUSH_OFF, 1);
157 mutex_unlock(&srom->lock);
158
159 filp->private_data = NULL;
160
161 return 0;
162}
163
164
165/**
166 * srom_read() - Read data from the device.
167 * @filp: File for this specific open of the device.
168 * @buf: User's data buffer.
169 * @count: Number of bytes requested.
170 * @f_pos: File position.
171 *
172 * Returns number of bytes read, or an error code.
173 */
174static ssize_t srom_read(struct file *filp, char __user *buf,
175 size_t count, loff_t *f_pos)
176{
177 int retval = 0;
178 void *kernbuf;
179 struct srom_dev *srom = filp->private_data;
180
181 kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL);
182 if (!kernbuf)
183 return -ENOMEM;
184
185 if (mutex_lock_interruptible(&srom->lock)) {
186 retval = -ERESTARTSYS;
187 kfree(kernbuf);
188 return retval;
189 }
190
191 while (count) {
192 int hv_retval;
193 int bytes_this_pass = min(count, SROM_CHUNK_SIZE);
194
195 hv_retval = _srom_read(srom->hv_devhdl, kernbuf,
196 *f_pos, bytes_this_pass);
197 if (hv_retval > 0) {
198 if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
199 retval = -EFAULT;
200 break;
201 }
202 } else if (hv_retval <= 0) {
203 if (retval == 0)
204 retval = hv_retval;
205 break;
206 }
207
208 retval += hv_retval;
209 *f_pos += hv_retval;
210 buf += hv_retval;
211 count -= hv_retval;
212 }
213
214 mutex_unlock(&srom->lock);
215 kfree(kernbuf);
216
217 return retval;
218}
219
220/**
221 * srom_write() - Write data to the device.
222 * @filp: File for this specific open of the device.
223 * @buf: User's data buffer.
224 * @count: Number of bytes requested.
225 * @f_pos: File position.
226 *
227 * Returns number of bytes written, or an error code.
228 */
229static ssize_t srom_write(struct file *filp, const char __user *buf,
230 size_t count, loff_t *f_pos)
231{
232 int retval = 0;
233 void *kernbuf;
234 struct srom_dev *srom = filp->private_data;
235
236 kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL);
237 if (!kernbuf)
238 return -ENOMEM;
239
240 if (mutex_lock_interruptible(&srom->lock)) {
241 retval = -ERESTARTSYS;
242 kfree(kernbuf);
243 return retval;
244 }
245
246 while (count) {
247 int hv_retval;
248 int bytes_this_pass = min(count, SROM_CHUNK_SIZE);
249
250 if (copy_from_user(kernbuf, buf, bytes_this_pass) != 0) {
251 retval = -EFAULT;
252 break;
253 }
254
255 hv_retval = _srom_write(srom->hv_devhdl, kernbuf,
256 *f_pos, bytes_this_pass);
257 if (hv_retval <= 0) {
258 if (retval == 0)
259 retval = hv_retval;
260 break;
261 }
262
263 retval += hv_retval;
264 *f_pos += hv_retval;
265 buf += hv_retval;
266 count -= hv_retval;
267 }
268
269 mutex_unlock(&srom->lock);
270 kfree(kernbuf);
271
272 return retval;
273}
274
275/* Provide our own implementation so we can use srom->total_size. */
276loff_t srom_llseek(struct file *filp, loff_t offset, int origin)
277{
278 struct srom_dev *srom = filp->private_data;
279
280 if (mutex_lock_interruptible(&srom->lock))
281 return -ERESTARTSYS;
282
283 switch (origin) {
284 case SEEK_END:
285 offset += srom->total_size;
286 break;
287 case SEEK_CUR:
288 offset += filp->f_pos;
289 break;
290 }
291
292 if (offset < 0 || offset > srom->total_size) {
293 offset = -EINVAL;
294 } else {
295 filp->f_pos = offset;
296 filp->f_version = 0;
297 }
298
299 mutex_unlock(&srom->lock);
300
301 return offset;
302}
303
304static ssize_t total_show(struct device *dev,
305 struct device_attribute *attr, char *buf)
306{
307 struct srom_dev *srom = dev_get_drvdata(dev);
308 return sprintf(buf, "%u\n", srom->total_size);
309}
310
311static ssize_t sector_show(struct device *dev,
312 struct device_attribute *attr, char *buf)
313{
314 struct srom_dev *srom = dev_get_drvdata(dev);
315 return sprintf(buf, "%u\n", srom->sector_size);
316}
317
318static ssize_t page_show(struct device *dev,
319 struct device_attribute *attr, char *buf)
320{
321 struct srom_dev *srom = dev_get_drvdata(dev);
322 return sprintf(buf, "%u\n", srom->page_size);
323}
324
325static struct device_attribute srom_dev_attrs[] = {
326 __ATTR(total_size, S_IRUGO, total_show, NULL),
327 __ATTR(sector_size, S_IRUGO, sector_show, NULL),
328 __ATTR(page_size, S_IRUGO, page_show, NULL),
329 __ATTR_NULL
330};
331
332static char *srom_devnode(struct device *dev, mode_t *mode)
333{
334 *mode = S_IRUGO | S_IWUSR;
335 return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
336}
337
338/*
339 * The fops
340 */
341static const struct file_operations srom_fops = {
342 .owner = THIS_MODULE,
343 .llseek = srom_llseek,
344 .read = srom_read,
345 .write = srom_write,
346 .open = srom_open,
347 .release = srom_release,
348};
349
350/**
351 * srom_setup_minor() - Initialize per-minor information.
352 * @srom: Per-device SROM state.
353 * @index: Device to set up.
354 */
355static int srom_setup_minor(struct srom_dev *srom, int index)
356{
357 struct device *dev;
358 int devhdl = srom->hv_devhdl;
359
360 mutex_init(&srom->lock);
361
362 if (_srom_read(devhdl, &srom->total_size,
363 SROM_TOTAL_SIZE_OFF, sizeof(srom->total_size)) < 0)
364 return -EIO;
365 if (_srom_read(devhdl, &srom->sector_size,
366 SROM_SECTOR_SIZE_OFF, sizeof(srom->sector_size)) < 0)
367 return -EIO;
368 if (_srom_read(devhdl, &srom->page_size,
369 SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0)
370 return -EIO;
371
372 dev = device_create(srom_class, &platform_bus,
373 MKDEV(srom_major, index), srom, "%d", index);
374 return IS_ERR(dev) ? PTR_ERR(dev) : 0;
375}
376
377/** srom_init() - Initialize the driver's module. */
378static int srom_init(void)
379{
380 int result, i;
381 dev_t dev = MKDEV(srom_major, 0);
382
383 /*
384 * Start with a plausible number of partitions; the krealloc() call
385 * below will yield about log(srom_devs) additional allocations.
386 */
387 srom_devices = kzalloc(4 * sizeof(struct srom_dev), GFP_KERNEL);
388
389 /* Discover the number of srom partitions. */
390 for (i = 0; ; i++) {
391 int devhdl;
392 char buf[20];
393 struct srom_dev *new_srom_devices =
394 krealloc(srom_devices, (i+1) * sizeof(struct srom_dev),
395 GFP_KERNEL | __GFP_ZERO);
396 if (!new_srom_devices) {
397 result = -ENOMEM;
398 goto fail_mem;
399 }
400 srom_devices = new_srom_devices;
401 sprintf(buf, "srom/0/%d", i);
402 devhdl = hv_dev_open((HV_VirtAddr)buf, 0);
403 if (devhdl < 0) {
404 if (devhdl != HV_ENODEV)
405 pr_notice("srom/%d: hv_dev_open failed: %d.\n",
406 i, devhdl);
407 break;
408 }
409 srom_devices[i].hv_devhdl = devhdl;
410 }
411 srom_devs = i;
412
413 /* Bail out early if we have no partitions at all. */
414 if (srom_devs == 0) {
415 result = -ENODEV;
416 goto fail_mem;
417 }
418
419 /* Register our major, and accept a dynamic number. */
420 if (srom_major)
421 result = register_chrdev_region(dev, srom_devs, "srom");
422 else {
423 result = alloc_chrdev_region(&dev, 0, srom_devs, "srom");
424 srom_major = MAJOR(dev);
425 }
426 if (result < 0)
427 goto fail_mem;
428
429 /* Register a character device. */
430 cdev_init(&srom_cdev, &srom_fops);
431 srom_cdev.owner = THIS_MODULE;
432 srom_cdev.ops = &srom_fops;
433 result = cdev_add(&srom_cdev, dev, srom_devs);
434 if (result < 0)
435 goto fail_chrdev;
436
437 /* Create a sysfs class. */
438 srom_class = class_create(THIS_MODULE, "srom");
439 if (IS_ERR(srom_class)) {
440 result = PTR_ERR(srom_class);
441 goto fail_cdev;
442 }
443 srom_class->dev_attrs = srom_dev_attrs;
444 srom_class->devnode = srom_devnode;
445
446 /* Do per-partition initialization */
447 for (i = 0; i < srom_devs; i++) {
448 result = srom_setup_minor(srom_devices + i, i);
449 if (result < 0)
450 goto fail_class;
451 }
452
453 return 0;
454
455fail_class:
456 for (i = 0; i < srom_devs; i++)
457 device_destroy(srom_class, MKDEV(srom_major, i));
458 class_destroy(srom_class);
459fail_cdev:
460 cdev_del(&srom_cdev);
461fail_chrdev:
462 unregister_chrdev_region(dev, srom_devs);
463fail_mem:
464 kfree(srom_devices);
465 return result;
466}
467
468/** srom_cleanup() - Clean up the driver's module. */
469static void srom_cleanup(void)
470{
471 int i;
472 for (i = 0; i < srom_devs; i++)
473 device_destroy(srom_class, MKDEV(srom_major, i));
474 class_destroy(srom_class);
475 cdev_del(&srom_cdev);
476 unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs);
477 kfree(srom_devices);
478}
479
480module_init(srom_init);
481module_exit(srom_cleanup);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 7fc2f108f490..3f4051a7c5a7 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -80,7 +80,7 @@ enum tis_defaults {
80static LIST_HEAD(tis_chips); 80static LIST_HEAD(tis_chips);
81static DEFINE_SPINLOCK(tis_lock); 81static DEFINE_SPINLOCK(tis_lock);
82 82
83#ifdef CONFIG_PNP 83#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
84static int is_itpm(struct pnp_dev *dev) 84static int is_itpm(struct pnp_dev *dev)
85{ 85{
86 struct acpi_device *acpi = pnp_acpi_device(dev); 86 struct acpi_device *acpi = pnp_acpi_device(dev);
@@ -93,6 +93,11 @@ static int is_itpm(struct pnp_dev *dev)
93 93
94 return 0; 94 return 0;
95} 95}
96#else
97static inline int is_itpm(struct pnp_dev *dev)
98{
99 return 0;
100}
96#endif 101#endif
97 102
98static int check_locality(struct tpm_chip *chip, int l) 103static int check_locality(struct tpm_chip *chip, int l)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index dc7c033ef587..32a77becc098 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -26,6 +26,7 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/delay.h>
29#include <linux/clocksource.h> 30#include <linux/clocksource.h>
30#include <linux/clockchips.h> 31#include <linux/clockchips.h>
31#include <linux/sh_timer.h> 32#include <linux/sh_timer.h>
@@ -150,13 +151,13 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
150 151
151static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 152static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
152{ 153{
153 int ret; 154 int k, ret;
154 155
155 /* enable clock */ 156 /* enable clock */
156 ret = clk_enable(p->clk); 157 ret = clk_enable(p->clk);
157 if (ret) { 158 if (ret) {
158 dev_err(&p->pdev->dev, "cannot enable clock\n"); 159 dev_err(&p->pdev->dev, "cannot enable clock\n");
159 return ret; 160 goto err0;
160 } 161 }
161 162
162 /* make sure channel is disabled */ 163 /* make sure channel is disabled */
@@ -174,9 +175,38 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
174 sh_cmt_write(p, CMCOR, 0xffffffff); 175 sh_cmt_write(p, CMCOR, 0xffffffff);
175 sh_cmt_write(p, CMCNT, 0); 176 sh_cmt_write(p, CMCNT, 0);
176 177
178 /*
179 * According to the sh73a0 user's manual, as CMCNT can be operated
180 * only by the RCLK (Pseudo 32 KHz), there's one restriction on
181 * modifying CMCNT register; two RCLK cycles are necessary before
182 * this register is either read or any modification of the value
183 * it holds is reflected in the LSI's actual operation.
184 *
185 * While at it, we're supposed to clear out the CMCNT as of this
186 * moment, so make sure it's processed properly here. This will
187 * take RCLKx2 at maximum.
188 */
189 for (k = 0; k < 100; k++) {
190 if (!sh_cmt_read(p, CMCNT))
191 break;
192 udelay(1);
193 }
194
195 if (sh_cmt_read(p, CMCNT)) {
196 dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
197 ret = -ETIMEDOUT;
198 goto err1;
199 }
200
177 /* enable channel */ 201 /* enable channel */
178 sh_cmt_start_stop_ch(p, 1); 202 sh_cmt_start_stop_ch(p, 1);
179 return 0; 203 return 0;
204 err1:
205 /* stop clock */
206 clk_disable(p->clk);
207
208 err0:
209 return ret;
180} 210}
181 211
182static void sh_cmt_disable(struct sh_cmt_priv *p) 212static void sh_cmt_disable(struct sh_cmt_priv *p)
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 3ee1fdb31ea7..e55814bc0d06 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -57,6 +57,7 @@ void proc_fork_connector(struct task_struct *task)
57 struct proc_event *ev; 57 struct proc_event *ev;
58 __u8 buffer[CN_PROC_MSG_SIZE]; 58 __u8 buffer[CN_PROC_MSG_SIZE];
59 struct timespec ts; 59 struct timespec ts;
60 struct task_struct *parent;
60 61
61 if (atomic_read(&proc_event_num_listeners) < 1) 62 if (atomic_read(&proc_event_num_listeners) < 1)
62 return; 63 return;
@@ -67,8 +68,11 @@ void proc_fork_connector(struct task_struct *task)
67 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 68 ktime_get_ts(&ts); /* get high res monotonic timestamp */
68 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); 69 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
69 ev->what = PROC_EVENT_FORK; 70 ev->what = PROC_EVENT_FORK;
70 ev->event_data.fork.parent_pid = task->real_parent->pid; 71 rcu_read_lock();
71 ev->event_data.fork.parent_tgid = task->real_parent->tgid; 72 parent = rcu_dereference(task->real_parent);
73 ev->event_data.fork.parent_pid = parent->pid;
74 ev->event_data.fork.parent_tgid = parent->tgid;
75 rcu_read_unlock();
72 ev->event_data.fork.child_pid = task->pid; 76 ev->event_data.fork.child_pid = task->pid;
73 ev->event_data.fork.child_tgid = task->tgid; 77 ev->event_data.fork.child_tgid = task->tgid;
74 78
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index bf5092455a8f..d4c542372886 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -25,9 +25,19 @@ DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
25 25
26DEFINE_MUTEX(cpuidle_lock); 26DEFINE_MUTEX(cpuidle_lock);
27LIST_HEAD(cpuidle_detected_devices); 27LIST_HEAD(cpuidle_detected_devices);
28static void (*pm_idle_old)(void);
29 28
30static int enabled_devices; 29static int enabled_devices;
30static int off __read_mostly;
31static int initialized __read_mostly;
32
33int cpuidle_disabled(void)
34{
35 return off;
36}
37void disable_cpuidle(void)
38{
39 off = 1;
40}
31 41
32#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) 42#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
33static void cpuidle_kick_cpus(void) 43static void cpuidle_kick_cpus(void)
@@ -46,25 +56,23 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
46 * cpuidle_idle_call - the main idle loop 56 * cpuidle_idle_call - the main idle loop
47 * 57 *
48 * NOTE: no locks or semaphores should be used here 58 * NOTE: no locks or semaphores should be used here
59 * return non-zero on failure
49 */ 60 */
50static void cpuidle_idle_call(void) 61int cpuidle_idle_call(void)
51{ 62{
52 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 63 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
53 struct cpuidle_state *target_state; 64 struct cpuidle_state *target_state;
54 int next_state; 65 int next_state;
55 66
67 if (off)
68 return -ENODEV;
69
70 if (!initialized)
71 return -ENODEV;
72
56 /* check if the device is ready */ 73 /* check if the device is ready */
57 if (!dev || !dev->enabled) { 74 if (!dev || !dev->enabled)
58 if (pm_idle_old) 75 return -EBUSY;
59 pm_idle_old();
60 else
61#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
62 default_idle();
63#else
64 local_irq_enable();
65#endif
66 return;
67 }
68 76
69#if 0 77#if 0
70 /* shows regressions, re-enable for 2.6.29 */ 78 /* shows regressions, re-enable for 2.6.29 */
@@ -89,7 +97,7 @@ static void cpuidle_idle_call(void)
89 next_state = cpuidle_curr_governor->select(dev); 97 next_state = cpuidle_curr_governor->select(dev);
90 if (need_resched()) { 98 if (need_resched()) {
91 local_irq_enable(); 99 local_irq_enable();
92 return; 100 return 0;
93 } 101 }
94 102
95 target_state = &dev->states[next_state]; 103 target_state = &dev->states[next_state];
@@ -114,6 +122,8 @@ static void cpuidle_idle_call(void)
114 /* give the governor an opportunity to reflect on the outcome */ 122 /* give the governor an opportunity to reflect on the outcome */
115 if (cpuidle_curr_governor->reflect) 123 if (cpuidle_curr_governor->reflect)
116 cpuidle_curr_governor->reflect(dev); 124 cpuidle_curr_governor->reflect(dev);
125
126 return 0;
117} 127}
118 128
119/** 129/**
@@ -121,10 +131,10 @@ static void cpuidle_idle_call(void)
121 */ 131 */
122void cpuidle_install_idle_handler(void) 132void cpuidle_install_idle_handler(void)
123{ 133{
124 if (enabled_devices && (pm_idle != cpuidle_idle_call)) { 134 if (enabled_devices) {
125 /* Make sure all changes finished before we switch to new idle */ 135 /* Make sure all changes finished before we switch to new idle */
126 smp_wmb(); 136 smp_wmb();
127 pm_idle = cpuidle_idle_call; 137 initialized = 1;
128 } 138 }
129} 139}
130 140
@@ -133,8 +143,8 @@ void cpuidle_install_idle_handler(void)
133 */ 143 */
134void cpuidle_uninstall_idle_handler(void) 144void cpuidle_uninstall_idle_handler(void)
135{ 145{
136 if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) { 146 if (enabled_devices) {
137 pm_idle = pm_idle_old; 147 initialized = 0;
138 cpuidle_kick_cpus(); 148 cpuidle_kick_cpus();
139 } 149 }
140} 150}
@@ -427,7 +437,8 @@ static int __init cpuidle_init(void)
427{ 437{
428 int ret; 438 int ret;
429 439
430 pm_idle_old = pm_idle; 440 if (cpuidle_disabled())
441 return -ENODEV;
431 442
432 ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); 443 ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
433 if (ret) 444 if (ret)
@@ -438,4 +449,5 @@ static int __init cpuidle_init(void)
438 return 0; 449 return 0;
439} 450}
440 451
452module_param(off, int, 0444);
441core_initcall(cpuidle_init); 453core_initcall(cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 33e50d556f17..38c3fd8b9d76 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -13,6 +13,7 @@ extern struct list_head cpuidle_governors;
13extern struct list_head cpuidle_detected_devices; 13extern struct list_head cpuidle_detected_devices;
14extern struct mutex cpuidle_lock; 14extern struct mutex cpuidle_lock;
15extern spinlock_t cpuidle_driver_lock; 15extern spinlock_t cpuidle_driver_lock;
16extern int cpuidle_disabled(void);
16 17
17/* idle loop */ 18/* idle loop */
18extern void cpuidle_install_idle_handler(void); 19extern void cpuidle_install_idle_handler(void);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index fd1601e3d125..3f7e3cedd133 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -26,6 +26,9 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
26 if (!drv) 26 if (!drv)
27 return -EINVAL; 27 return -EINVAL;
28 28
29 if (cpuidle_disabled())
30 return -ENODEV;
31
29 spin_lock(&cpuidle_driver_lock); 32 spin_lock(&cpuidle_driver_lock);
30 if (cpuidle_curr_driver) { 33 if (cpuidle_curr_driver) {
31 spin_unlock(&cpuidle_driver_lock); 34 spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 724c164d31c9..ea2f8e7aa24a 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -81,6 +81,9 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
81 if (!gov || !gov->select) 81 if (!gov || !gov->select)
82 return -EINVAL; 82 return -EINVAL;
83 83
84 if (cpuidle_disabled())
85 return -ENODEV;
86
84 mutex_lock(&cpuidle_lock); 87 mutex_lock(&cpuidle_lock);
85 if (__cpuidle_find_governor(gov->name) == NULL) { 88 if (__cpuidle_find_governor(gov->name) == NULL) {
86 ret = 0; 89 ret = 0;
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
index a4af8589330c..734ed0206cd5 100644
--- a/drivers/dma/TODO
+++ b/drivers/dma/TODO
@@ -9,6 +9,5 @@ TODO for slave dma
9 - mxs-dma.c 9 - mxs-dma.c
10 - dw_dmac 10 - dw_dmac
11 - intel_mid_dma 11 - intel_mid_dma
12 - ste_dma40
134. Check other subsystems for dma drivers and merge/move to dmaengine 124. Check other subsystems for dma drivers and merge/move to dmaengine
145. Remove dma_slave_config's dma direction. 135. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e6d7228b1479..be21e3f138a8 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -80,6 +80,7 @@
80#include <linux/interrupt.h> 80#include <linux/interrupt.h>
81#include <linux/slab.h> 81#include <linux/slab.h>
82#include <linux/delay.h> 82#include <linux/delay.h>
83#include <linux/dma-mapping.h>
83#include <linux/dmapool.h> 84#include <linux/dmapool.h>
84#include <linux/dmaengine.h> 85#include <linux/dmaengine.h>
85#include <linux/amba/bus.h> 86#include <linux/amba/bus.h>
@@ -156,14 +157,10 @@ struct pl08x_driver_data {
156#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ 157#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
157#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) 158#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
158 159
159/* Minimum period between work queue runs */
160#define PL08X_WQ_PERIODMIN 20
161
162/* Size (bytes) of each LLI buffer allocated for one transfer */ 160/* Size (bytes) of each LLI buffer allocated for one transfer */
163# define PL08X_LLI_TSFR_SIZE 0x2000 161# define PL08X_LLI_TSFR_SIZE 0x2000
164 162
165/* Maximum times we call dma_pool_alloc on this pool without freeing */ 163/* Maximum times we call dma_pool_alloc on this pool without freeing */
166#define PL08X_MAX_ALLOCS 0x40
167#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) 164#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
168#define PL08X_ALIGN 8 165#define PL08X_ALIGN 8
169 166
@@ -495,10 +492,10 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
495 492
496struct pl08x_lli_build_data { 493struct pl08x_lli_build_data {
497 struct pl08x_txd *txd; 494 struct pl08x_txd *txd;
498 struct pl08x_driver_data *pl08x;
499 struct pl08x_bus_data srcbus; 495 struct pl08x_bus_data srcbus;
500 struct pl08x_bus_data dstbus; 496 struct pl08x_bus_data dstbus;
501 size_t remainder; 497 size_t remainder;
498 u32 lli_bus;
502}; 499};
503 500
504/* 501/*
@@ -551,8 +548,7 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
551 llis_va[num_llis].src = bd->srcbus.addr; 548 llis_va[num_llis].src = bd->srcbus.addr;
552 llis_va[num_llis].dst = bd->dstbus.addr; 549 llis_va[num_llis].dst = bd->dstbus.addr;
553 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); 550 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
554 if (bd->pl08x->lli_buses & PL08X_AHB2) 551 llis_va[num_llis].lli |= bd->lli_bus;
555 llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
556 552
557 if (cctl & PL080_CONTROL_SRC_INCR) 553 if (cctl & PL080_CONTROL_SRC_INCR)
558 bd->srcbus.addr += len; 554 bd->srcbus.addr += len;
@@ -605,9 +601,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
605 cctl = txd->cctl; 601 cctl = txd->cctl;
606 602
607 bd.txd = txd; 603 bd.txd = txd;
608 bd.pl08x = pl08x;
609 bd.srcbus.addr = txd->src_addr; 604 bd.srcbus.addr = txd->src_addr;
610 bd.dstbus.addr = txd->dst_addr; 605 bd.dstbus.addr = txd->dst_addr;
606 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
611 607
612 /* Find maximum width of the source bus */ 608 /* Find maximum width of the source bus */
613 bd.srcbus.maxwidth = 609 bd.srcbus.maxwidth =
@@ -622,25 +618,15 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
622 /* Set up the bus widths to the maximum */ 618 /* Set up the bus widths to the maximum */
623 bd.srcbus.buswidth = bd.srcbus.maxwidth; 619 bd.srcbus.buswidth = bd.srcbus.maxwidth;
624 bd.dstbus.buswidth = bd.dstbus.maxwidth; 620 bd.dstbus.buswidth = bd.dstbus.maxwidth;
625 dev_vdbg(&pl08x->adev->dev,
626 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
627 __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
628
629 621
630 /* 622 /*
631 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 623 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
632 */ 624 */
633 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * 625 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
634 PL080_CONTROL_TRANSFER_SIZE_MASK; 626 PL080_CONTROL_TRANSFER_SIZE_MASK;
635 dev_vdbg(&pl08x->adev->dev,
636 "%s max bytes per lli = %zu\n",
637 __func__, max_bytes_per_lli);
638 627
639 /* We need to count this down to zero */ 628 /* We need to count this down to zero */
640 bd.remainder = txd->len; 629 bd.remainder = txd->len;
641 dev_vdbg(&pl08x->adev->dev,
642 "%s remainder = %zu\n",
643 __func__, bd.remainder);
644 630
645 /* 631 /*
646 * Choose bus to align to 632 * Choose bus to align to
@@ -649,6 +635,16 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
649 */ 635 */
650 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 636 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
651 637
638 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n",
639 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
640 bd.srcbus.buswidth,
641 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
642 bd.dstbus.buswidth,
643 bd.remainder, max_bytes_per_lli);
644 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
645 mbus == &bd.srcbus ? "src" : "dst",
646 sbus == &bd.srcbus ? "src" : "dst");
647
652 if (txd->len < mbus->buswidth) { 648 if (txd->len < mbus->buswidth) {
653 /* Less than a bus width available - send as single bytes */ 649 /* Less than a bus width available - send as single bytes */
654 while (bd.remainder) { 650 while (bd.remainder) {
@@ -840,15 +836,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
840 { 836 {
841 int i; 837 int i;
842 838
839 dev_vdbg(&pl08x->adev->dev,
840 "%-3s %-9s %-10s %-10s %-10s %s\n",
841 "lli", "", "csrc", "cdst", "clli", "cctl");
843 for (i = 0; i < num_llis; i++) { 842 for (i = 0; i < num_llis; i++) {
844 dev_vdbg(&pl08x->adev->dev, 843 dev_vdbg(&pl08x->adev->dev,
845 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", 844 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
846 i, 845 i, &llis_va[i], llis_va[i].src,
847 &llis_va[i], 846 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
848 llis_va[i].src,
849 llis_va[i].dst,
850 llis_va[i].cctl,
851 llis_va[i].lli
852 ); 847 );
853 } 848 }
854 } 849 }
@@ -1054,64 +1049,105 @@ pl08x_dma_tx_status(struct dma_chan *chan,
1054 1049
1055/* PrimeCell DMA extension */ 1050/* PrimeCell DMA extension */
1056struct burst_table { 1051struct burst_table {
1057 int burstwords; 1052 u32 burstwords;
1058 u32 reg; 1053 u32 reg;
1059}; 1054};
1060 1055
1061static const struct burst_table burst_sizes[] = { 1056static const struct burst_table burst_sizes[] = {
1062 { 1057 {
1063 .burstwords = 256, 1058 .burstwords = 256,
1064 .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | 1059 .reg = PL080_BSIZE_256,
1065 (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
1066 }, 1060 },
1067 { 1061 {
1068 .burstwords = 128, 1062 .burstwords = 128,
1069 .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | 1063 .reg = PL080_BSIZE_128,
1070 (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
1071 }, 1064 },
1072 { 1065 {
1073 .burstwords = 64, 1066 .burstwords = 64,
1074 .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | 1067 .reg = PL080_BSIZE_64,
1075 (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
1076 }, 1068 },
1077 { 1069 {
1078 .burstwords = 32, 1070 .burstwords = 32,
1079 .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | 1071 .reg = PL080_BSIZE_32,
1080 (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
1081 }, 1072 },
1082 { 1073 {
1083 .burstwords = 16, 1074 .burstwords = 16,
1084 .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | 1075 .reg = PL080_BSIZE_16,
1085 (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
1086 }, 1076 },
1087 { 1077 {
1088 .burstwords = 8, 1078 .burstwords = 8,
1089 .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | 1079 .reg = PL080_BSIZE_8,
1090 (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
1091 }, 1080 },
1092 { 1081 {
1093 .burstwords = 4, 1082 .burstwords = 4,
1094 .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | 1083 .reg = PL080_BSIZE_4,
1095 (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
1096 }, 1084 },
1097 { 1085 {
1098 .burstwords = 1, 1086 .burstwords = 0,
1099 .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1087 .reg = PL080_BSIZE_1,
1100 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
1101 }, 1088 },
1102}; 1089};
1103 1090
1091/*
1092 * Given the source and destination available bus masks, select which
1093 * will be routed to each port. We try to have source and destination
1094 * on separate ports, but always respect the allowable settings.
1095 */
1096static u32 pl08x_select_bus(u8 src, u8 dst)
1097{
1098 u32 cctl = 0;
1099
1100 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1101 cctl |= PL080_CONTROL_DST_AHB2;
1102 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1103 cctl |= PL080_CONTROL_SRC_AHB2;
1104
1105 return cctl;
1106}
1107
1108static u32 pl08x_cctl(u32 cctl)
1109{
1110 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1111 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1112 PL080_CONTROL_PROT_MASK);
1113
1114 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1115 return cctl | PL080_CONTROL_PROT_SYS;
1116}
1117
1118static u32 pl08x_width(enum dma_slave_buswidth width)
1119{
1120 switch (width) {
1121 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1122 return PL080_WIDTH_8BIT;
1123 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1124 return PL080_WIDTH_16BIT;
1125 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1126 return PL080_WIDTH_32BIT;
1127 default:
1128 return ~0;
1129 }
1130}
1131
1132static u32 pl08x_burst(u32 maxburst)
1133{
1134 int i;
1135
1136 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1137 if (burst_sizes[i].burstwords <= maxburst)
1138 break;
1139
1140 return burst_sizes[i].reg;
1141}
1142
1104static int dma_set_runtime_config(struct dma_chan *chan, 1143static int dma_set_runtime_config(struct dma_chan *chan,
1105 struct dma_slave_config *config) 1144 struct dma_slave_config *config)
1106{ 1145{
1107 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1146 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1108 struct pl08x_driver_data *pl08x = plchan->host; 1147 struct pl08x_driver_data *pl08x = plchan->host;
1109 struct pl08x_channel_data *cd = plchan->cd;
1110 enum dma_slave_buswidth addr_width; 1148 enum dma_slave_buswidth addr_width;
1111 dma_addr_t addr; 1149 u32 width, burst, maxburst;
1112 u32 maxburst;
1113 u32 cctl = 0; 1150 u32 cctl = 0;
1114 int i;
1115 1151
1116 if (!plchan->slave) 1152 if (!plchan->slave)
1117 return -EINVAL; 1153 return -EINVAL;
@@ -1119,11 +1155,9 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1119 /* Transfer direction */ 1155 /* Transfer direction */
1120 plchan->runtime_direction = config->direction; 1156 plchan->runtime_direction = config->direction;
1121 if (config->direction == DMA_TO_DEVICE) { 1157 if (config->direction == DMA_TO_DEVICE) {
1122 addr = config->dst_addr;
1123 addr_width = config->dst_addr_width; 1158 addr_width = config->dst_addr_width;
1124 maxburst = config->dst_maxburst; 1159 maxburst = config->dst_maxburst;
1125 } else if (config->direction == DMA_FROM_DEVICE) { 1160 } else if (config->direction == DMA_FROM_DEVICE) {
1126 addr = config->src_addr;
1127 addr_width = config->src_addr_width; 1161 addr_width = config->src_addr_width;
1128 maxburst = config->src_maxburst; 1162 maxburst = config->src_maxburst;
1129 } else { 1163 } else {
@@ -1132,46 +1166,40 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1132 return -EINVAL; 1166 return -EINVAL;
1133 } 1167 }
1134 1168
1135 switch (addr_width) { 1169 width = pl08x_width(addr_width);
1136 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1170 if (width == ~0) {
1137 cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1138 (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
1139 break;
1140 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1141 cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1142 (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
1143 break;
1144 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1145 cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1146 (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
1147 break;
1148 default:
1149 dev_err(&pl08x->adev->dev, 1171 dev_err(&pl08x->adev->dev,
1150 "bad runtime_config: alien address width\n"); 1172 "bad runtime_config: alien address width\n");
1151 return -EINVAL; 1173 return -EINVAL;
1152 } 1174 }
1153 1175
1176 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1177 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
1178
1154 /* 1179 /*
1155 * Now decide on a maxburst:
1156 * If this channel will only request single transfers, set this 1180 * If this channel will only request single transfers, set this
1157 * down to ONE element. Also select one element if no maxburst 1181 * down to ONE element. Also select one element if no maxburst
1158 * is specified. 1182 * is specified.
1159 */ 1183 */
1160 if (plchan->cd->single || maxburst == 0) { 1184 if (plchan->cd->single)
1161 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1185 maxburst = 1;
1162 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); 1186
1187 burst = pl08x_burst(maxburst);
1188 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1189 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1190
1191 if (plchan->runtime_direction == DMA_FROM_DEVICE) {
1192 plchan->src_addr = config->src_addr;
1193 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
1194 pl08x_select_bus(plchan->cd->periph_buses,
1195 pl08x->mem_buses);
1163 } else { 1196 } else {
1164 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1197 plchan->dst_addr = config->dst_addr;
1165 if (burst_sizes[i].burstwords <= maxburst) 1198 plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
1166 break; 1199 pl08x_select_bus(pl08x->mem_buses,
1167 cctl |= burst_sizes[i].reg; 1200 plchan->cd->periph_buses);
1168 } 1201 }
1169 1202
1170 plchan->runtime_addr = addr;
1171
1172 /* Modify the default channel data to fit PrimeCell request */
1173 cd->cctl = cctl;
1174
1175 dev_dbg(&pl08x->adev->dev, 1203 dev_dbg(&pl08x->adev->dev,
1176 "configured channel %s (%s) for %s, data width %d, " 1204 "configured channel %s (%s) for %s, data width %d, "
1177 "maxburst %d words, LE, CCTL=0x%08x\n", 1205 "maxburst %d words, LE, CCTL=0x%08x\n",
@@ -1270,23 +1298,6 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1270 return 0; 1298 return 0;
1271} 1299}
1272 1300
1273/*
1274 * Given the source and destination available bus masks, select which
1275 * will be routed to each port. We try to have source and destination
1276 * on separate ports, but always respect the allowable settings.
1277 */
1278static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
1279{
1280 u32 cctl = 0;
1281
1282 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1283 cctl |= PL080_CONTROL_DST_AHB2;
1284 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1285 cctl |= PL080_CONTROL_SRC_AHB2;
1286
1287 return cctl;
1288}
1289
1290static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1301static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1291 unsigned long flags) 1302 unsigned long flags)
1292{ 1303{
@@ -1338,8 +1349,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1338 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1349 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1339 1350
1340 if (pl08x->vd->dualmaster) 1351 if (pl08x->vd->dualmaster)
1341 txd->cctl |= pl08x_select_bus(pl08x, 1352 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1342 pl08x->mem_buses, pl08x->mem_buses); 1353 pl08x->mem_buses);
1343 1354
1344 ret = pl08x_prep_channel_resources(plchan, txd); 1355 ret = pl08x_prep_channel_resources(plchan, txd);
1345 if (ret) 1356 if (ret)
@@ -1356,7 +1367,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1356 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1367 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1357 struct pl08x_driver_data *pl08x = plchan->host; 1368 struct pl08x_driver_data *pl08x = plchan->host;
1358 struct pl08x_txd *txd; 1369 struct pl08x_txd *txd;
1359 u8 src_buses, dst_buses;
1360 int ret; 1370 int ret;
1361 1371
1362 /* 1372 /*
@@ -1390,42 +1400,22 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1390 txd->direction = direction; 1400 txd->direction = direction;
1391 txd->len = sgl->length; 1401 txd->len = sgl->length;
1392 1402
1393 txd->cctl = plchan->cd->cctl &
1394 ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1395 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1396 PL080_CONTROL_PROT_MASK);
1397
1398 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1399 txd->cctl |= PL080_CONTROL_PROT_SYS;
1400
1401 if (direction == DMA_TO_DEVICE) { 1403 if (direction == DMA_TO_DEVICE) {
1402 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1404 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1403 txd->cctl |= PL080_CONTROL_SRC_INCR; 1405 txd->cctl = plchan->dst_cctl;
1404 txd->src_addr = sgl->dma_address; 1406 txd->src_addr = sgl->dma_address;
1405 if (plchan->runtime_addr) 1407 txd->dst_addr = plchan->dst_addr;
1406 txd->dst_addr = plchan->runtime_addr;
1407 else
1408 txd->dst_addr = plchan->cd->addr;
1409 src_buses = pl08x->mem_buses;
1410 dst_buses = plchan->cd->periph_buses;
1411 } else if (direction == DMA_FROM_DEVICE) { 1408 } else if (direction == DMA_FROM_DEVICE) {
1412 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1409 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1413 txd->cctl |= PL080_CONTROL_DST_INCR; 1410 txd->cctl = plchan->src_cctl;
1414 if (plchan->runtime_addr) 1411 txd->src_addr = plchan->src_addr;
1415 txd->src_addr = plchan->runtime_addr;
1416 else
1417 txd->src_addr = plchan->cd->addr;
1418 txd->dst_addr = sgl->dma_address; 1412 txd->dst_addr = sgl->dma_address;
1419 src_buses = plchan->cd->periph_buses;
1420 dst_buses = pl08x->mem_buses;
1421 } else { 1413 } else {
1422 dev_err(&pl08x->adev->dev, 1414 dev_err(&pl08x->adev->dev,
1423 "%s direction unsupported\n", __func__); 1415 "%s direction unsupported\n", __func__);
1424 return NULL; 1416 return NULL;
1425 } 1417 }
1426 1418
1427 txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
1428
1429 ret = pl08x_prep_channel_resources(plchan, txd); 1419 ret = pl08x_prep_channel_resources(plchan, txd);
1430 if (ret) 1420 if (ret)
1431 return NULL; 1421 return NULL;
@@ -1676,6 +1666,20 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1676 return mask ? IRQ_HANDLED : IRQ_NONE; 1666 return mask ? IRQ_HANDLED : IRQ_NONE;
1677} 1667}
1678 1668
1669static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1670{
1671 u32 cctl = pl08x_cctl(chan->cd->cctl);
1672
1673 chan->slave = true;
1674 chan->name = chan->cd->bus_id;
1675 chan->src_addr = chan->cd->addr;
1676 chan->dst_addr = chan->cd->addr;
1677 chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
1678 pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
1679 chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
1680 pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
1681}
1682
1679/* 1683/*
1680 * Initialise the DMAC memcpy/slave channels. 1684 * Initialise the DMAC memcpy/slave channels.
1681 * Make a local wrapper to hold required data 1685 * Make a local wrapper to hold required data
@@ -1707,9 +1711,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1707 chan->state = PL08X_CHAN_IDLE; 1711 chan->state = PL08X_CHAN_IDLE;
1708 1712
1709 if (slave) { 1713 if (slave) {
1710 chan->slave = true;
1711 chan->name = pl08x->pd->slave_channels[i].bus_id;
1712 chan->cd = &pl08x->pd->slave_channels[i]; 1714 chan->cd = &pl08x->pd->slave_channels[i];
1715 pl08x_dma_slave_init(chan);
1713 } else { 1716 } else {
1714 chan->cd = &pl08x->pd->memcpy_channel; 1717 chan->cd = &pl08x->pd->memcpy_channel;
1715 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1718 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 36144f88d718..6a483eac7b3f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1216 atdma->dma_common.cap_mask = pdata->cap_mask; 1216 atdma->dma_common.cap_mask = pdata->cap_mask;
1217 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; 1217 atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
1218 1218
1219 size = io->end - io->start + 1; 1219 size = resource_size(io);
1220 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1220 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1221 err = -EBUSY; 1221 err = -EBUSY;
1222 goto err_kfree; 1222 goto err_kfree;
@@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
1362 atdma->regs = NULL; 1362 atdma->regs = NULL;
1363 1363
1364 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1364 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1365 release_mem_region(io->start, io->end - io->start + 1); 1365 release_mem_region(io->start, resource_size(io));
1366 1366
1367 kfree(atdma); 1367 kfree(atdma);
1368 1368
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index a92d95eac86b..4234f416ef11 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -41,6 +41,8 @@ struct coh901318_desc {
41 struct coh901318_lli *lli; 41 struct coh901318_lli *lli;
42 enum dma_data_direction dir; 42 enum dma_data_direction dir;
43 unsigned long flags; 43 unsigned long flags;
44 u32 head_config;
45 u32 head_ctrl;
44}; 46};
45 47
46struct coh901318_base { 48struct coh901318_base {
@@ -661,6 +663,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
661 663
662 coh901318_desc_submit(cohc, cohd); 664 coh901318_desc_submit(cohc, cohd);
663 665
666 /* Program the transaction head */
667 coh901318_set_conf(cohc, cohd->head_config);
668 coh901318_set_ctrl(cohc, cohd->head_ctrl);
664 coh901318_prep_linked_list(cohc, cohd->lli); 669 coh901318_prep_linked_list(cohc, cohd->lli);
665 670
666 /* start dma job on this channel */ 671 /* start dma job on this channel */
@@ -1091,8 +1096,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1091 } else 1096 } else
1092 goto err_direction; 1097 goto err_direction;
1093 1098
1094 coh901318_set_conf(cohc, config);
1095
1096 /* The dma only supports transmitting packages up to 1099 /* The dma only supports transmitting packages up to
1097 * MAX_DMA_PACKET_SIZE. Calculate to total number of 1100 * MAX_DMA_PACKET_SIZE. Calculate to total number of
1098 * dma elemts required to send the entire sg list 1101 * dma elemts required to send the entire sg list
@@ -1129,16 +1132,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1129 if (ret) 1132 if (ret)
1130 goto err_lli_fill; 1133 goto err_lli_fill;
1131 1134
1132 /*
1133 * Set the default ctrl for the channel to the one from the lli,
1134 * things may have changed due to odd buffer alignment etc.
1135 */
1136 coh901318_set_ctrl(cohc, lli->control);
1137 1135
1138 COH_DBG(coh901318_list_print(cohc, lli)); 1136 COH_DBG(coh901318_list_print(cohc, lli));
1139 1137
1140 /* Pick a descriptor to handle this transfer */ 1138 /* Pick a descriptor to handle this transfer */
1141 cohd = coh901318_desc_get(cohc); 1139 cohd = coh901318_desc_get(cohc);
1140 cohd->head_config = config;
1141 /*
1142 * Set the default head ctrl for the channel to the one from the
1143 * lli, things may have changed due to odd buffer alignment
1144 * etc.
1145 */
1146 cohd->head_ctrl = lli->control;
1142 cohd->dir = direction; 1147 cohd->dir = direction;
1143 cohd->flags = flags; 1148 cohd->flags = flags;
1144 cohd->desc.tx_submit = coh901318_tx_submit; 1149 cohd->desc.tx_submit = coh901318_tx_submit;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 48694c34d96b..b48967b499da 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -62,9 +62,9 @@
62#include <linux/slab.h> 62#include <linux/slab.h>
63 63
64static DEFINE_MUTEX(dma_list_mutex); 64static DEFINE_MUTEX(dma_list_mutex);
65static DEFINE_IDR(dma_idr);
65static LIST_HEAD(dma_device_list); 66static LIST_HEAD(dma_device_list);
66static long dmaengine_ref_count; 67static long dmaengine_ref_count;
67static struct idr dma_idr;
68 68
69/* --- sysfs implementation --- */ 69/* --- sysfs implementation --- */
70 70
@@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
510 dma_chan_name(chan)); 510 dma_chan_name(chan));
511 list_del_rcu(&device->global_node); 511 list_del_rcu(&device->global_node);
512 } else if (err) 512 } else if (err)
513 pr_err("dmaengine: failed to get %s: (%d)\n", 513 pr_debug("dmaengine: failed to get %s: (%d)\n",
514 dma_chan_name(chan), err); 514 dma_chan_name(chan), err);
515 else 515 else
516 break; 516 break;
517 if (--device->privatecnt == 0) 517 if (--device->privatecnt == 0)
@@ -1050,8 +1050,6 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
1050 1050
1051static int __init dma_bus_init(void) 1051static int __init dma_bus_init(void)
1052{ 1052{
1053 idr_init(&dma_idr);
1054 mutex_init(&dma_list_mutex);
1055 return class_register(&dma_devclass); 1053 return class_register(&dma_devclass);
1056} 1054}
1057arch_initcall(dma_bus_init); 1055arch_initcall(dma_bus_init);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 0766c1e53b1d..5d7a49bd7c26 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -902,7 +902,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
902 * 902 *
903 * Returns a valid DMA descriptor or %NULL in case of failure. 903 * Returns a valid DMA descriptor or %NULL in case of failure.
904 */ 904 */
905struct dma_async_tx_descriptor * 905static struct dma_async_tx_descriptor *
906ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, 906ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
907 dma_addr_t src, size_t len, unsigned long flags) 907 dma_addr_t src, size_t len, unsigned long flags)
908{ 908{
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 1eb60ded2f0d..7bd7e98548cd 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1305,8 +1305,10 @@ static int __init sdma_probe(struct platform_device *pdev)
1305 goto err_request_irq; 1305 goto err_request_irq;
1306 1306
1307 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); 1307 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1308 if (!sdma->script_addrs) 1308 if (!sdma->script_addrs) {
1309 ret = -ENOMEM;
1309 goto err_alloc; 1310 goto err_alloc;
1311 }
1310 1312
1311 if (of_id) 1313 if (of_id)
1312 pdev->id_entry = of_id->data; 1314 pdev->id_entry = of_id->data;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index f653517ef744..8a3fdd87db97 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1351,7 +1351,6 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
1351 return -EAGAIN; 1351 return -EAGAIN;
1352 } 1352 }
1353 device->state = SUSPENDED; 1353 device->state = SUSPENDED;
1354 pci_set_drvdata(pci, device);
1355 pci_save_state(pci); 1354 pci_save_state(pci);
1356 pci_disable_device(pci); 1355 pci_disable_device(pci);
1357 pci_set_power_state(pci, PCI_D3hot); 1356 pci_set_power_state(pci, PCI_D3hot);
@@ -1380,7 +1379,6 @@ int dma_resume(struct pci_dev *pci)
1380 } 1379 }
1381 device->state = RUNNING; 1380 device->state = RUNNING;
1382 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1381 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1383 pci_set_drvdata(pci, device);
1384 return 0; 1382 return 0;
1385} 1383}
1386 1384
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index d845dc4b7103..f519c93a61e7 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -73,10 +73,10 @@
73/* provide a lookup table for setting the source address in the base or 73/* provide a lookup table for setting the source address in the base or
74 * extended descriptor of an xor or pq descriptor 74 * extended descriptor of an xor or pq descriptor
75 */ 75 */
76static const u8 xor_idx_to_desc __read_mostly = 0xd0; 76static const u8 xor_idx_to_desc = 0xe0;
77static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 }; 77static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
78static const u8 pq_idx_to_desc __read_mostly = 0xf8; 78static const u8 pq_idx_to_desc = 0xf8;
79static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 }; 79static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
80 80
81static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) 81static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
82{ 82{
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index fab37d1cf48d..5e3a40f79945 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -72,6 +72,17 @@ static struct pci_device_id ioat_pci_tbl[] = {
72 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, 72 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
73 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, 73 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
74 74
75 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
76 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
77 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
78 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
79 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
80 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
81 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
82 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
83 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
84 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
85
75 { 0, } 86 { 0, }
76}; 87};
77MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); 88MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index fd7d2b308cf2..6815905a772f 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1706,16 +1706,14 @@ static int __init ipu_probe(struct platform_device *pdev)
1706 ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); 1706 ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
1707 1707
1708 /* Remap IPU common registers */ 1708 /* Remap IPU common registers */
1709 ipu_data.reg_ipu = ioremap(mem_ipu->start, 1709 ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
1710 mem_ipu->end - mem_ipu->start + 1);
1711 if (!ipu_data.reg_ipu) { 1710 if (!ipu_data.reg_ipu) {
1712 ret = -ENOMEM; 1711 ret = -ENOMEM;
1713 goto err_ioremap_ipu; 1712 goto err_ioremap_ipu;
1714 } 1713 }
1715 1714
1716 /* Remap Image Converter and Image DMA Controller registers */ 1715 /* Remap Image Converter and Image DMA Controller registers */
1717 ipu_data.reg_ic = ioremap(mem_ic->start, 1716 ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
1718 mem_ic->end - mem_ic->start + 1);
1719 if (!ipu_data.reg_ic) { 1717 if (!ipu_data.reg_ic) {
1720 ret = -ENOMEM; 1718 ret = -ENOMEM;
1721 goto err_ioremap_ic; 1719 goto err_ioremap_ic;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 06f9f27dbe7c..9a353c2216d0 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1304,7 +1304,8 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
1304 if (!res) 1304 if (!res)
1305 return -ENODEV; 1305 return -ENODEV;
1306 1306
1307 msp->xor_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1307 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
1308 resource_size(res));
1308 if (!msp->xor_base) 1309 if (!msp->xor_base)
1309 return -EBUSY; 1310 return -EBUSY;
1310 1311
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 88aad4f54002..be641cbd36fc 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -327,10 +327,12 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
327 327
328 memset(mxs_chan->ccw, 0, PAGE_SIZE); 328 memset(mxs_chan->ccw, 0, PAGE_SIZE);
329 329
330 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, 330 if (mxs_chan->chan_irq != NO_IRQ) {
331 0, "mxs-dma", mxs_dma); 331 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
332 if (ret) 332 0, "mxs-dma", mxs_dma);
333 goto err_irq; 333 if (ret)
334 goto err_irq;
335 }
334 336
335 ret = clk_enable(mxs_dma->clk); 337 ret = clk_enable(mxs_dma->clk);
336 if (ret) 338 if (ret)
@@ -535,6 +537,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
535 switch (cmd) { 537 switch (cmd) {
536 case DMA_TERMINATE_ALL: 538 case DMA_TERMINATE_ALL:
537 mxs_dma_disable_chan(mxs_chan); 539 mxs_dma_disable_chan(mxs_chan);
540 mxs_dma_reset_chan(mxs_chan);
538 break; 541 break;
539 case DMA_PAUSE: 542 case DMA_PAUSE:
540 mxs_dma_pause_chan(mxs_chan); 543 mxs_dma_pause_chan(mxs_chan);
@@ -707,6 +710,8 @@ static struct platform_device_id mxs_dma_type[] = {
707 }, { 710 }, {
708 .name = "mxs-dma-apbx", 711 .name = "mxs-dma-apbx",
709 .driver_data = MXS_DMA_APBX, 712 .driver_data = MXS_DMA_APBX,
713 }, {
714 /* end of list */
710 } 715 }
711}; 716};
712 717
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index ff5b38f9d45b..1ac8d4b580b7 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -45,7 +45,8 @@
45#define DMA_STATUS_MASK_BITS 0x3 45#define DMA_STATUS_MASK_BITS 0x3
46#define DMA_STATUS_SHIFT_BITS 16 46#define DMA_STATUS_SHIFT_BITS 16
47#define DMA_STATUS_IRQ(x) (0x1 << (x)) 47#define DMA_STATUS_IRQ(x) (0x1 << (x))
48#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) 48#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
49#define DMA_STATUS2_ERR(x) (0x1 << (x))
49 50
50#define DMA_DESC_WIDTH_SHIFT_BITS 12 51#define DMA_DESC_WIDTH_SHIFT_BITS 12
51#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) 52#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
@@ -61,6 +62,9 @@
61 62
62#define MAX_CHAN_NR 8 63#define MAX_CHAN_NR 8
63 64
65#define DMA_MASK_CTL0_MODE 0x33333333
66#define DMA_MASK_CTL2_MODE 0x00003333
67
64static unsigned int init_nr_desc_per_channel = 64; 68static unsigned int init_nr_desc_per_channel = 64;
65module_param(init_nr_desc_per_channel, uint, 0644); 69module_param(init_nr_desc_per_channel, uint, 0644);
66MODULE_PARM_DESC(init_nr_desc_per_channel, 70MODULE_PARM_DESC(init_nr_desc_per_channel,
@@ -133,6 +137,7 @@ struct pch_dma {
133#define PCH_DMA_CTL3 0x0C 137#define PCH_DMA_CTL3 0x0C
134#define PCH_DMA_STS0 0x10 138#define PCH_DMA_STS0 0x10
135#define PCH_DMA_STS1 0x14 139#define PCH_DMA_STS1 0x14
140#define PCH_DMA_STS2 0x18
136 141
137#define dma_readl(pd, name) \ 142#define dma_readl(pd, name) \
138 readl((pd)->membase + PCH_DMA_##name) 143 readl((pd)->membase + PCH_DMA_##name)
@@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable)
183{ 188{
184 struct pch_dma *pd = to_pd(chan->device); 189 struct pch_dma *pd = to_pd(chan->device);
185 u32 val; 190 u32 val;
191 int pos;
192
193 if (chan->chan_id < 8)
194 pos = chan->chan_id;
195 else
196 pos = chan->chan_id + 8;
186 197
187 val = dma_readl(pd, CTL2); 198 val = dma_readl(pd, CTL2);
188 199
189 if (enable) 200 if (enable)
190 val |= 0x1 << chan->chan_id; 201 val |= 0x1 << pos;
191 else 202 else
192 val &= ~(0x1 << chan->chan_id); 203 val &= ~(0x1 << pos);
193 204
194 dma_writel(pd, CTL2, val); 205 dma_writel(pd, CTL2, val);
195 206
@@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan)
202 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 213 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
203 struct pch_dma *pd = to_pd(chan->device); 214 struct pch_dma *pd = to_pd(chan->device);
204 u32 val; 215 u32 val;
216 u32 mask_mode;
217 u32 mask_ctl;
205 218
206 if (chan->chan_id < 8) { 219 if (chan->chan_id < 8) {
207 val = dma_readl(pd, CTL0); 220 val = dma_readl(pd, CTL0);
208 221
222 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
223 (DMA_CTL0_BITS_PER_CH * chan->chan_id);
224 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
225 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
226 val &= mask_mode;
209 if (pd_chan->dir == DMA_TO_DEVICE) 227 if (pd_chan->dir == DMA_TO_DEVICE)
210 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + 228 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
211 DMA_CTL0_DIR_SHIFT_BITS); 229 DMA_CTL0_DIR_SHIFT_BITS);
@@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan)
213 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + 231 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
214 DMA_CTL0_DIR_SHIFT_BITS)); 232 DMA_CTL0_DIR_SHIFT_BITS));
215 233
234 val |= mask_ctl;
216 dma_writel(pd, CTL0, val); 235 dma_writel(pd, CTL0, val);
217 } else { 236 } else {
218 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ 237 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
219 val = dma_readl(pd, CTL3); 238 val = dma_readl(pd, CTL3);
220 239
240 mask_mode = DMA_CTL0_MODE_MASK_BITS <<
241 (DMA_CTL0_BITS_PER_CH * ch);
242 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
243 (DMA_CTL0_BITS_PER_CH * ch));
244 val &= mask_mode;
221 if (pd_chan->dir == DMA_TO_DEVICE) 245 if (pd_chan->dir == DMA_TO_DEVICE)
222 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + 246 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
223 DMA_CTL0_DIR_SHIFT_BITS); 247 DMA_CTL0_DIR_SHIFT_BITS);
224 else 248 else
225 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + 249 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
226 DMA_CTL0_DIR_SHIFT_BITS)); 250 DMA_CTL0_DIR_SHIFT_BITS));
227 251 val |= mask_ctl;
228 dma_writel(pd, CTL3, val); 252 dma_writel(pd, CTL3, val);
229 } 253 }
230 254
@@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
236{ 260{
237 struct pch_dma *pd = to_pd(chan->device); 261 struct pch_dma *pd = to_pd(chan->device);
238 u32 val; 262 u32 val;
263 u32 mask_ctl;
264 u32 mask_dir;
239 265
240 if (chan->chan_id < 8) { 266 if (chan->chan_id < 8) {
267 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
268 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
269 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
270 DMA_CTL0_DIR_SHIFT_BITS);
241 val = dma_readl(pd, CTL0); 271 val = dma_readl(pd, CTL0);
242 272 val &= mask_dir;
243 val &= ~(DMA_CTL0_MODE_MASK_BITS <<
244 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
245 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); 273 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
246 274 val |= mask_ctl;
247 dma_writel(pd, CTL0, val); 275 dma_writel(pd, CTL0, val);
248 } else { 276 } else {
249 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ 277 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
250 278 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
279 (DMA_CTL0_BITS_PER_CH * ch));
280 mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
281 DMA_CTL0_DIR_SHIFT_BITS);
251 val = dma_readl(pd, CTL3); 282 val = dma_readl(pd, CTL3);
252 283 val &= mask_dir;
253 val &= ~(DMA_CTL0_MODE_MASK_BITS <<
254 (DMA_CTL0_BITS_PER_CH * ch));
255 val |= mode << (DMA_CTL0_BITS_PER_CH * ch); 284 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
256 285 val |= mask_ctl;
257 dma_writel(pd, CTL3, val); 286 dma_writel(pd, CTL3, val);
258
259 } 287 }
260 288
261 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", 289 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
262 chan->chan_id, val); 290 chan->chan_id, val);
263} 291}
264 292
265static u32 pdc_get_status(struct pch_dma_chan *pd_chan) 293static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
266{ 294{
267 struct pch_dma *pd = to_pd(pd_chan->chan.device); 295 struct pch_dma *pd = to_pd(pd_chan->chan.device);
268 u32 val; 296 u32 val;
@@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
272 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); 300 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
273} 301}
274 302
303static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
304{
305 struct pch_dma *pd = to_pd(pd_chan->chan.device);
306 u32 val;
307
308 val = dma_readl(pd, STS2);
309 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
310 DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
311}
312
275static bool pdc_is_idle(struct pch_dma_chan *pd_chan) 313static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
276{ 314{
277 if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) 315 u32 sts;
316
317 if (pd_chan->chan.chan_id < 8)
318 sts = pdc_get_status0(pd_chan);
319 else
320 sts = pdc_get_status2(pd_chan);
321
322
323 if (sts == DMA_STATUS_IDLE)
278 return true; 324 return true;
279 else 325 else
280 return false; 326 return false;
@@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
495 list_add_tail(&desc->desc_node, &tmp_list); 541 list_add_tail(&desc->desc_node, &tmp_list);
496 } 542 }
497 543
498 spin_lock_bh(&pd_chan->lock); 544 spin_lock_irq(&pd_chan->lock);
499 list_splice(&tmp_list, &pd_chan->free_list); 545 list_splice(&tmp_list, &pd_chan->free_list);
500 pd_chan->descs_allocated = i; 546 pd_chan->descs_allocated = i;
501 pd_chan->completed_cookie = chan->cookie = 1; 547 pd_chan->completed_cookie = chan->cookie = 1;
502 spin_unlock_bh(&pd_chan->lock); 548 spin_unlock_irq(&pd_chan->lock);
503 549
504 pdc_enable_irq(chan, 1); 550 pdc_enable_irq(chan, 1);
505 551
@@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan)
517 BUG_ON(!list_empty(&pd_chan->active_list)); 563 BUG_ON(!list_empty(&pd_chan->active_list));
518 BUG_ON(!list_empty(&pd_chan->queue)); 564 BUG_ON(!list_empty(&pd_chan->queue));
519 565
520 spin_lock_bh(&pd_chan->lock); 566 spin_lock_irq(&pd_chan->lock);
521 list_splice_init(&pd_chan->free_list, &tmp_list); 567 list_splice_init(&pd_chan->free_list, &tmp_list);
522 pd_chan->descs_allocated = 0; 568 pd_chan->descs_allocated = 0;
523 spin_unlock_bh(&pd_chan->lock); 569 spin_unlock_irq(&pd_chan->lock);
524 570
525 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) 571 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
526 pci_pool_free(pd->pool, desc, desc->txd.phys); 572 pci_pool_free(pd->pool, desc, desc->txd.phys);
@@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
536 dma_cookie_t last_completed; 582 dma_cookie_t last_completed;
537 int ret; 583 int ret;
538 584
539 spin_lock_bh(&pd_chan->lock); 585 spin_lock_irq(&pd_chan->lock);
540 last_completed = pd_chan->completed_cookie; 586 last_completed = pd_chan->completed_cookie;
541 last_used = chan->cookie; 587 last_used = chan->cookie;
542 spin_unlock_bh(&pd_chan->lock); 588 spin_unlock_irq(&pd_chan->lock);
543 589
544 ret = dma_async_is_complete(cookie, last_completed, last_used); 590 ret = dma_async_is_complete(cookie, last_completed, last_used);
545 591
@@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
654 if (cmd != DMA_TERMINATE_ALL) 700 if (cmd != DMA_TERMINATE_ALL)
655 return -ENXIO; 701 return -ENXIO;
656 702
657 spin_lock_bh(&pd_chan->lock); 703 spin_lock_irq(&pd_chan->lock);
658 704
659 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); 705 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
660 706
@@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
664 list_for_each_entry_safe(desc, _d, &list, desc_node) 710 list_for_each_entry_safe(desc, _d, &list, desc_node)
665 pdc_chain_complete(pd_chan, desc); 711 pdc_chain_complete(pd_chan, desc);
666 712
667 spin_unlock_bh(&pd_chan->lock); 713 spin_unlock_irq(&pd_chan->lock);
668 714
669 return 0; 715 return 0;
670} 716}
@@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid)
693 struct pch_dma *pd = (struct pch_dma *)devid; 739 struct pch_dma *pd = (struct pch_dma *)devid;
694 struct pch_dma_chan *pd_chan; 740 struct pch_dma_chan *pd_chan;
695 u32 sts0; 741 u32 sts0;
742 u32 sts2;
696 int i; 743 int i;
697 int ret = IRQ_NONE; 744 int ret0 = IRQ_NONE;
745 int ret2 = IRQ_NONE;
698 746
699 sts0 = dma_readl(pd, STS0); 747 sts0 = dma_readl(pd, STS0);
748 sts2 = dma_readl(pd, STS2);
700 749
701 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); 750 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
702 751
703 for (i = 0; i < pd->dma.chancnt; i++) { 752 for (i = 0; i < pd->dma.chancnt; i++) {
704 pd_chan = &pd->channels[i]; 753 pd_chan = &pd->channels[i];
705 754
706 if (sts0 & DMA_STATUS_IRQ(i)) { 755 if (i < 8) {
707 if (sts0 & DMA_STATUS_ERR(i)) 756 if (sts0 & DMA_STATUS_IRQ(i)) {
708 set_bit(0, &pd_chan->err_status); 757 if (sts0 & DMA_STATUS0_ERR(i))
758 set_bit(0, &pd_chan->err_status);
709 759
710 tasklet_schedule(&pd_chan->tasklet); 760 tasklet_schedule(&pd_chan->tasklet);
711 ret = IRQ_HANDLED; 761 ret0 = IRQ_HANDLED;
712 } 762 }
763 } else {
764 if (sts2 & DMA_STATUS_IRQ(i - 8)) {
765 if (sts2 & DMA_STATUS2_ERR(i))
766 set_bit(0, &pd_chan->err_status);
713 767
768 tasklet_schedule(&pd_chan->tasklet);
769 ret2 = IRQ_HANDLED;
770 }
771 }
714 } 772 }
715 773
716 /* clear interrupt bits in status register */ 774 /* clear interrupt bits in status register */
717 dma_writel(pd, STS0, sts0); 775 if (ret0)
776 dma_writel(pd, STS0, sts0);
777 if (ret2)
778 dma_writel(pd, STS2, sts2);
718 779
719 return ret; 780 return ret0 | ret2;
720} 781}
721 782
722#ifdef CONFIG_PM 783#ifdef CONFIG_PM
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 6abe1ec1f2ce..00eee59e8b33 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -82,7 +82,7 @@ struct dma_pl330_dmac {
82 spinlock_t pool_lock; 82 spinlock_t pool_lock;
83 83
84 /* Peripheral channels connected to this DMAC */ 84 /* Peripheral channels connected to this DMAC */
85 struct dma_pl330_chan peripherals[0]; /* keep at end */ 85 struct dma_pl330_chan *peripherals; /* keep at end */
86}; 86};
87 87
88struct dma_pl330_desc { 88struct dma_pl330_desc {
@@ -451,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
451 desc->txd.cookie = 0; 451 desc->txd.cookie = 0;
452 async_tx_ack(&desc->txd); 452 async_tx_ack(&desc->txd);
453 453
454 desc->req.rqtype = peri->rqtype; 454 if (peri) {
455 desc->req.peri = peri->peri_id; 455 desc->req.rqtype = peri->rqtype;
456 desc->req.peri = peri->peri_id;
457 } else {
458 desc->req.rqtype = MEMTOMEM;
459 desc->req.peri = 0;
460 }
456 461
457 dma_async_tx_descriptor_init(&desc->txd, &pch->chan); 462 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
458 463
@@ -529,10 +534,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
529 struct pl330_info *pi; 534 struct pl330_info *pi;
530 int burst; 535 int burst;
531 536
532 if (unlikely(!pch || !len || !peri)) 537 if (unlikely(!pch || !len))
533 return NULL; 538 return NULL;
534 539
535 if (peri->rqtype != MEMTOMEM) 540 if (peri && peri->rqtype != MEMTOMEM)
536 return NULL; 541 return NULL;
537 542
538 pi = &pch->dmac->pif; 543 pi = &pch->dmac->pif;
@@ -577,7 +582,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
577 int i, burst_size; 582 int i, burst_size;
578 dma_addr_t addr; 583 dma_addr_t addr;
579 584
580 if (unlikely(!pch || !sgl || !sg_len)) 585 if (unlikely(!pch || !sgl || !sg_len || !peri))
581 return NULL; 586 return NULL;
582 587
583 /* Make sure the direction is consistent */ 588 /* Make sure the direction is consistent */
@@ -666,17 +671,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
666 struct dma_device *pd; 671 struct dma_device *pd;
667 struct resource *res; 672 struct resource *res;
668 int i, ret, irq; 673 int i, ret, irq;
674 int num_chan;
669 675
670 pdat = adev->dev.platform_data; 676 pdat = adev->dev.platform_data;
671 677
672 if (!pdat || !pdat->nr_valid_peri) {
673 dev_err(&adev->dev, "platform data missing\n");
674 return -ENODEV;
675 }
676
677 /* Allocate a new DMAC and its Channels */ 678 /* Allocate a new DMAC and its Channels */
678 pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch) 679 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
679 + sizeof(*pdmac), GFP_KERNEL);
680 if (!pdmac) { 680 if (!pdmac) {
681 dev_err(&adev->dev, "unable to allocate mem\n"); 681 dev_err(&adev->dev, "unable to allocate mem\n");
682 return -ENOMEM; 682 return -ENOMEM;
@@ -685,7 +685,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
685 pi = &pdmac->pif; 685 pi = &pdmac->pif;
686 pi->dev = &adev->dev; 686 pi->dev = &adev->dev;
687 pi->pl330_data = NULL; 687 pi->pl330_data = NULL;
688 pi->mcbufsz = pdat->mcbuf_sz; 688 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
689 689
690 res = &adev->res; 690 res = &adev->res;
691 request_mem_region(res->start, resource_size(res), "dma-pl330"); 691 request_mem_region(res->start, resource_size(res), "dma-pl330");
@@ -717,27 +717,35 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
717 INIT_LIST_HEAD(&pd->channels); 717 INIT_LIST_HEAD(&pd->channels);
718 718
719 /* Initialize channel parameters */ 719 /* Initialize channel parameters */
720 for (i = 0; i < pdat->nr_valid_peri; i++) { 720 num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
721 struct dma_pl330_peri *peri = &pdat->peri[i]; 721 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
722 pch = &pdmac->peripherals[i];
723 722
724 switch (peri->rqtype) { 723 for (i = 0; i < num_chan; i++) {
725 case MEMTOMEM: 724 pch = &pdmac->peripherals[i];
725 if (pdat) {
726 struct dma_pl330_peri *peri = &pdat->peri[i];
727
728 switch (peri->rqtype) {
729 case MEMTOMEM:
730 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
731 break;
732 case MEMTODEV:
733 case DEVTOMEM:
734 dma_cap_set(DMA_SLAVE, pd->cap_mask);
735 break;
736 default:
737 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
738 continue;
739 }
740 pch->chan.private = peri;
741 } else {
726 dma_cap_set(DMA_MEMCPY, pd->cap_mask); 742 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
727 break; 743 pch->chan.private = NULL;
728 case MEMTODEV:
729 case DEVTOMEM:
730 dma_cap_set(DMA_SLAVE, pd->cap_mask);
731 break;
732 default:
733 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
734 continue;
735 } 744 }
736 745
737 INIT_LIST_HEAD(&pch->work_list); 746 INIT_LIST_HEAD(&pch->work_list);
738 spin_lock_init(&pch->lock); 747 spin_lock_init(&pch->lock);
739 pch->pl330_chid = NULL; 748 pch->pl330_chid = NULL;
740 pch->chan.private = peri;
741 pch->chan.device = pd; 749 pch->chan.device = pd;
742 pch->chan.chan_id = i; 750 pch->chan.chan_id = i;
743 pch->dmac = pdmac; 751 pch->dmac = pdmac;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 028330044201..7f49235d14b9 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -70,12 +70,36 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
70 70
71static u16 dmaor_read(struct sh_dmae_device *shdev) 71static u16 dmaor_read(struct sh_dmae_device *shdev)
72{ 72{
73 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); 73 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
74
75 if (shdev->pdata->dmaor_is_32bit)
76 return __raw_readl(addr);
77 else
78 return __raw_readw(addr);
74} 79}
75 80
76static void dmaor_write(struct sh_dmae_device *shdev, u16 data) 81static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
77{ 82{
78 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); 83 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
84
85 if (shdev->pdata->dmaor_is_32bit)
86 __raw_writel(data, addr);
87 else
88 __raw_writew(data, addr);
89}
90
91static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
92{
93 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
94
95 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
96}
97
98static u32 chcr_read(struct sh_dmae_chan *sh_dc)
99{
100 struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
101
102 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
79} 103}
80 104
81/* 105/*
@@ -120,7 +144,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
120 144
121static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) 145static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
122{ 146{
123 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 147 u32 chcr = chcr_read(sh_chan);
124 148
125 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) 149 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
126 return true; /* working */ 150 return true; /* working */
@@ -130,8 +154,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
130 154
131static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 155static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
132{ 156{
133 struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 157 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
134 struct sh_dmae_device, common);
135 struct sh_dmae_pdata *pdata = shdev->pdata; 158 struct sh_dmae_pdata *pdata = shdev->pdata;
136 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 159 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
137 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); 160 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
@@ -144,8 +167,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
144 167
145static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 168static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
146{ 169{
147 struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 170 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
148 struct sh_dmae_device, common);
149 struct sh_dmae_pdata *pdata = shdev->pdata; 171 struct sh_dmae_pdata *pdata = shdev->pdata;
150 int i; 172 int i;
151 173
@@ -169,18 +191,23 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
169 191
170static void dmae_start(struct sh_dmae_chan *sh_chan) 192static void dmae_start(struct sh_dmae_chan *sh_chan)
171{ 193{
172 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 194 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
195 u32 chcr = chcr_read(sh_chan);
196
197 if (shdev->pdata->needs_tend_set)
198 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
173 199
174 chcr |= CHCR_DE | CHCR_IE; 200 chcr |= CHCR_DE | shdev->chcr_ie_bit;
175 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); 201 chcr_write(sh_chan, chcr & ~CHCR_TE);
176} 202}
177 203
178static void dmae_halt(struct sh_dmae_chan *sh_chan) 204static void dmae_halt(struct sh_dmae_chan *sh_chan)
179{ 205{
180 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 206 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
207 u32 chcr = chcr_read(sh_chan);
181 208
182 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); 209 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
183 sh_dmae_writel(sh_chan, chcr, CHCR); 210 chcr_write(sh_chan, chcr);
184} 211}
185 212
186static void dmae_init(struct sh_dmae_chan *sh_chan) 213static void dmae_init(struct sh_dmae_chan *sh_chan)
@@ -192,7 +219,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan)
192 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, 219 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
193 LOG2_DEFAULT_XFER_SIZE); 220 LOG2_DEFAULT_XFER_SIZE);
194 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); 221 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
195 sh_dmae_writel(sh_chan, chcr, CHCR); 222 chcr_write(sh_chan, chcr);
196} 223}
197 224
198static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 225static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
@@ -202,23 +229,25 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
202 return -EBUSY; 229 return -EBUSY;
203 230
204 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); 231 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
205 sh_dmae_writel(sh_chan, val, CHCR); 232 chcr_write(sh_chan, val);
206 233
207 return 0; 234 return 0;
208} 235}
209 236
210static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 237static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
211{ 238{
212 struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 239 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
213 struct sh_dmae_device, common);
214 struct sh_dmae_pdata *pdata = shdev->pdata; 240 struct sh_dmae_pdata *pdata = shdev->pdata;
215 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 241 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
216 u16 __iomem *addr = shdev->dmars; 242 u16 __iomem *addr = shdev->dmars;
217 int shift = chan_pdata->dmars_bit; 243 unsigned int shift = chan_pdata->dmars_bit;
218 244
219 if (dmae_is_busy(sh_chan)) 245 if (dmae_is_busy(sh_chan))
220 return -EBUSY; 246 return -EBUSY;
221 247
248 if (pdata->no_dmars)
249 return 0;
250
222 /* in the case of a missing DMARS resource use first memory window */ 251 /* in the case of a missing DMARS resource use first memory window */
223 if (!addr) 252 if (!addr)
224 addr = (u16 __iomem *)shdev->chan_reg; 253 addr = (u16 __iomem *)shdev->chan_reg;
@@ -296,9 +325,7 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
296static const struct sh_dmae_slave_config *sh_dmae_find_slave( 325static const struct sh_dmae_slave_config *sh_dmae_find_slave(
297 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) 326 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
298{ 327{
299 struct dma_device *dma_dev = sh_chan->common.device; 328 struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
300 struct sh_dmae_device *shdev = container_of(dma_dev,
301 struct sh_dmae_device, common);
302 struct sh_dmae_pdata *pdata = shdev->pdata; 329 struct sh_dmae_pdata *pdata = shdev->pdata;
303 int i; 330 int i;
304 331
@@ -771,10 +798,8 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
771 798
772 spin_lock_bh(&sh_chan->desc_lock); 799 spin_lock_bh(&sh_chan->desc_lock);
773 /* DMA work check */ 800 /* DMA work check */
774 if (dmae_is_busy(sh_chan)) { 801 if (dmae_is_busy(sh_chan))
775 spin_unlock_bh(&sh_chan->desc_lock); 802 goto sh_chan_xfer_ld_queue_end;
776 return;
777 }
778 803
779 /* Find the first not transferred descriptor */ 804 /* Find the first not transferred descriptor */
780 list_for_each_entry(desc, &sh_chan->ld_queue, node) 805 list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -788,6 +813,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
788 break; 813 break;
789 } 814 }
790 815
816sh_chan_xfer_ld_queue_end:
791 spin_unlock_bh(&sh_chan->desc_lock); 817 spin_unlock_bh(&sh_chan->desc_lock);
792} 818}
793 819
@@ -846,7 +872,7 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
846 872
847 spin_lock(&sh_chan->desc_lock); 873 spin_lock(&sh_chan->desc_lock);
848 874
849 chcr = sh_dmae_readl(sh_chan, CHCR); 875 chcr = chcr_read(sh_chan);
850 876
851 if (chcr & CHCR_TE) { 877 if (chcr & CHCR_TE) {
852 /* DMA stop */ 878 /* DMA stop */
@@ -1144,6 +1170,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1144 /* platform data */ 1170 /* platform data */
1145 shdev->pdata = pdata; 1171 shdev->pdata = pdata;
1146 1172
1173 if (pdata->chcr_offset)
1174 shdev->chcr_offset = pdata->chcr_offset;
1175 else
1176 shdev->chcr_offset = CHCR;
1177
1178 if (pdata->chcr_ie_bit)
1179 shdev->chcr_ie_bit = pdata->chcr_ie_bit;
1180 else
1181 shdev->chcr_ie_bit = CHCR_IE;
1182
1147 platform_set_drvdata(pdev, shdev); 1183 platform_set_drvdata(pdev, shdev);
1148 1184
1149 pm_runtime_enable(&pdev->dev); 1185 pm_runtime_enable(&pdev->dev);
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 5ae9fc512180..dc56576f9fdb 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -47,10 +47,14 @@ struct sh_dmae_device {
47 struct list_head node; 47 struct list_head node;
48 u32 __iomem *chan_reg; 48 u32 __iomem *chan_reg;
49 u16 __iomem *dmars; 49 u16 __iomem *dmars;
50 unsigned int chcr_offset;
51 u32 chcr_ie_bit;
50}; 52};
51 53
52#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) 54#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
53#define to_sh_desc(lh) container_of(lh, struct sh_desc, node) 55#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
54#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) 56#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
57#define to_sh_dev(chan) container_of(chan->common.device,\
58 struct sh_dmae_device, common)
55 59
56#endif /* __DMA_SHDMA_H */ 60#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 29d1addbe0cf..467e4dcb20a0 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,7 @@
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/amba/bus.h>
17 18
18#include <plat/ste_dma40.h> 19#include <plat/ste_dma40.h>
19 20
@@ -45,9 +46,6 @@
45#define D40_ALLOC_PHY (1 << 30) 46#define D40_ALLOC_PHY (1 << 30)
46#define D40_ALLOC_LOG_FREE 0 47#define D40_ALLOC_LOG_FREE 0
47 48
48/* Hardware designer of the block */
49#define D40_HW_DESIGNER 0x8
50
51/** 49/**
52 * enum 40_command - The different commands and/or statuses. 50 * enum 40_command - The different commands and/or statuses.
53 * 51 *
@@ -176,8 +174,10 @@ struct d40_base;
176 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 174 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
177 * transfer and call client callback. 175 * transfer and call client callback.
178 * @client: Cliented owned descriptor list. 176 * @client: Cliented owned descriptor list.
177 * @pending_queue: Submitted jobs, to be issued by issue_pending()
179 * @active: Active descriptor. 178 * @active: Active descriptor.
180 * @queue: Queued jobs. 179 * @queue: Queued jobs.
180 * @prepare_queue: Prepared jobs.
181 * @dma_cfg: The client configuration of this dma channel. 181 * @dma_cfg: The client configuration of this dma channel.
182 * @configured: whether the dma_cfg configuration is valid 182 * @configured: whether the dma_cfg configuration is valid
183 * @base: Pointer to the device instance struct. 183 * @base: Pointer to the device instance struct.
@@ -186,6 +186,8 @@ struct d40_base;
186 * @log_def: Default logical channel settings. 186 * @log_def: Default logical channel settings.
187 * @lcla: Space for one dst src pair for logical channel transfers. 187 * @lcla: Space for one dst src pair for logical channel transfers.
188 * @lcpa: Pointer to dst and src lcpa settings. 188 * @lcpa: Pointer to dst and src lcpa settings.
189 * @runtime_addr: runtime configured address.
190 * @runtime_direction: runtime configured direction.
189 * 191 *
190 * This struct can either "be" a logical or a physical channel. 192 * This struct can either "be" a logical or a physical channel.
191 */ 193 */
@@ -200,8 +202,10 @@ struct d40_chan {
200 struct dma_chan chan; 202 struct dma_chan chan;
201 struct tasklet_struct tasklet; 203 struct tasklet_struct tasklet;
202 struct list_head client; 204 struct list_head client;
205 struct list_head pending_queue;
203 struct list_head active; 206 struct list_head active;
204 struct list_head queue; 207 struct list_head queue;
208 struct list_head prepare_queue;
205 struct stedma40_chan_cfg dma_cfg; 209 struct stedma40_chan_cfg dma_cfg;
206 bool configured; 210 bool configured;
207 struct d40_base *base; 211 struct d40_base *base;
@@ -476,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
476 480
477 list_for_each_entry_safe(d, _d, &d40c->client, node) 481 list_for_each_entry_safe(d, _d, &d40c->client, node)
478 if (async_tx_test_ack(&d->txd)) { 482 if (async_tx_test_ack(&d->txd)) {
479 d40_pool_lli_free(d40c, d);
480 d40_desc_remove(d); 483 d40_desc_remove(d);
481 desc = d; 484 desc = d;
482 memset(desc, 0, sizeof(*desc)); 485 memset(desc, 0, sizeof(*desc));
@@ -643,9 +646,25 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
643 return d; 646 return d;
644} 647}
645 648
649/* remove desc from current queue and add it to the pending_queue */
646static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 650static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
647{ 651{
648 list_add_tail(&desc->node, &d40c->queue); 652 d40_desc_remove(desc);
653 desc->is_in_client_list = false;
654 list_add_tail(&desc->node, &d40c->pending_queue);
655}
656
657static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
658{
659 struct d40_desc *d;
660
661 if (list_empty(&d40c->pending_queue))
662 return NULL;
663
664 d = list_first_entry(&d40c->pending_queue,
665 struct d40_desc,
666 node);
667 return d;
649} 668}
650 669
651static struct d40_desc *d40_first_queued(struct d40_chan *d40c) 670static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
@@ -789,6 +808,7 @@ done:
789static void d40_term_all(struct d40_chan *d40c) 808static void d40_term_all(struct d40_chan *d40c)
790{ 809{
791 struct d40_desc *d40d; 810 struct d40_desc *d40d;
811 struct d40_desc *_d;
792 812
793 /* Release active descriptors */ 813 /* Release active descriptors */
794 while ((d40d = d40_first_active_get(d40c))) { 814 while ((d40d = d40_first_active_get(d40c))) {
@@ -802,6 +822,26 @@ static void d40_term_all(struct d40_chan *d40c)
802 d40_desc_free(d40c, d40d); 822 d40_desc_free(d40c, d40d);
803 } 823 }
804 824
825 /* Release pending descriptors */
826 while ((d40d = d40_first_pending(d40c))) {
827 d40_desc_remove(d40d);
828 d40_desc_free(d40c, d40d);
829 }
830
831 /* Release client owned descriptors */
832 if (!list_empty(&d40c->client))
833 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
834 d40_desc_remove(d40d);
835 d40_desc_free(d40c, d40d);
836 }
837
838 /* Release descriptors in prepare queue */
839 if (!list_empty(&d40c->prepare_queue))
840 list_for_each_entry_safe(d40d, _d,
841 &d40c->prepare_queue, node) {
842 d40_desc_remove(d40d);
843 d40_desc_free(d40c, d40d);
844 }
805 845
806 d40c->pending_tx = 0; 846 d40c->pending_tx = 0;
807 d40c->busy = false; 847 d40c->busy = false;
@@ -1189,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
1189 1229
1190 if (!d40d->cyclic) { 1230 if (!d40d->cyclic) {
1191 if (async_tx_test_ack(&d40d->txd)) { 1231 if (async_tx_test_ack(&d40d->txd)) {
1192 d40_pool_lli_free(d40c, d40d);
1193 d40_desc_remove(d40d); 1232 d40_desc_remove(d40d);
1194 d40_desc_free(d40c, d40d); 1233 d40_desc_free(d40c, d40d);
1195 } else { 1234 } else {
@@ -1576,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
1576 u32 event; 1615 u32 event;
1577 struct d40_phy_res *phy = d40c->phy_chan; 1616 struct d40_phy_res *phy = d40c->phy_chan;
1578 bool is_src; 1617 bool is_src;
1579 struct d40_desc *d;
1580 struct d40_desc *_d;
1581
1582 1618
1583 /* Terminate all queued and active transfers */ 1619 /* Terminate all queued and active transfers */
1584 d40_term_all(d40c); 1620 d40_term_all(d40c);
1585 1621
1586 /* Release client owned descriptors */
1587 if (!list_empty(&d40c->client))
1588 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1589 d40_pool_lli_free(d40c, d);
1590 d40_desc_remove(d);
1591 d40_desc_free(d40c, d);
1592 }
1593
1594 if (phy == NULL) { 1622 if (phy == NULL) {
1595 chan_err(d40c, "phy == null\n"); 1623 chan_err(d40c, "phy == null\n");
1596 return -EINVAL; 1624 return -EINVAL;
@@ -1892,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
1892 goto err; 1920 goto err;
1893 } 1921 }
1894 1922
1923 /*
1924 * add descriptor to the prepare queue in order to be able
1925 * to free them later in terminate_all
1926 */
1927 list_add_tail(&desc->node, &chan->prepare_queue);
1928
1895 spin_unlock_irqrestore(&chan->lock, flags); 1929 spin_unlock_irqrestore(&chan->lock, flags);
1896 1930
1897 return &desc->txd; 1931 return &desc->txd;
@@ -2092,7 +2126,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2092 struct scatterlist *sg; 2126 struct scatterlist *sg;
2093 int i; 2127 int i;
2094 2128
2095 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); 2129 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2096 for (i = 0; i < periods; i++) { 2130 for (i = 0; i < periods; i++) {
2097 sg_dma_address(&sg[i]) = dma_addr; 2131 sg_dma_address(&sg[i]) = dma_addr;
2098 sg_dma_len(&sg[i]) = period_len; 2132 sg_dma_len(&sg[i]) = period_len;
@@ -2152,24 +2186,87 @@ static void d40_issue_pending(struct dma_chan *chan)
2152 2186
2153 spin_lock_irqsave(&d40c->lock, flags); 2187 spin_lock_irqsave(&d40c->lock, flags);
2154 2188
2155 /* Busy means that pending jobs are already being processed */ 2189 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2190
2191 /* Busy means that queued jobs are already being processed */
2156 if (!d40c->busy) 2192 if (!d40c->busy)
2157 (void) d40_queue_start(d40c); 2193 (void) d40_queue_start(d40c);
2158 2194
2159 spin_unlock_irqrestore(&d40c->lock, flags); 2195 spin_unlock_irqrestore(&d40c->lock, flags);
2160} 2196}
2161 2197
2198static int
2199dma40_config_to_halfchannel(struct d40_chan *d40c,
2200 struct stedma40_half_channel_info *info,
2201 enum dma_slave_buswidth width,
2202 u32 maxburst)
2203{
2204 enum stedma40_periph_data_width addr_width;
2205 int psize;
2206
2207 switch (width) {
2208 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2209 addr_width = STEDMA40_BYTE_WIDTH;
2210 break;
2211 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2212 addr_width = STEDMA40_HALFWORD_WIDTH;
2213 break;
2214 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2215 addr_width = STEDMA40_WORD_WIDTH;
2216 break;
2217 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2218 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2219 break;
2220 default:
2221 dev_err(d40c->base->dev,
2222 "illegal peripheral address width "
2223 "requested (%d)\n",
2224 width);
2225 return -EINVAL;
2226 }
2227
2228 if (chan_is_logical(d40c)) {
2229 if (maxburst >= 16)
2230 psize = STEDMA40_PSIZE_LOG_16;
2231 else if (maxburst >= 8)
2232 psize = STEDMA40_PSIZE_LOG_8;
2233 else if (maxburst >= 4)
2234 psize = STEDMA40_PSIZE_LOG_4;
2235 else
2236 psize = STEDMA40_PSIZE_LOG_1;
2237 } else {
2238 if (maxburst >= 16)
2239 psize = STEDMA40_PSIZE_PHY_16;
2240 else if (maxburst >= 8)
2241 psize = STEDMA40_PSIZE_PHY_8;
2242 else if (maxburst >= 4)
2243 psize = STEDMA40_PSIZE_PHY_4;
2244 else
2245 psize = STEDMA40_PSIZE_PHY_1;
2246 }
2247
2248 info->data_width = addr_width;
2249 info->psize = psize;
2250 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2251
2252 return 0;
2253}
2254
2162/* Runtime reconfiguration extension */ 2255/* Runtime reconfiguration extension */
2163static void d40_set_runtime_config(struct dma_chan *chan, 2256static int d40_set_runtime_config(struct dma_chan *chan,
2164 struct dma_slave_config *config) 2257 struct dma_slave_config *config)
2165{ 2258{
2166 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2259 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2167 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; 2260 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2168 enum dma_slave_buswidth config_addr_width; 2261 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2169 dma_addr_t config_addr; 2262 dma_addr_t config_addr;
2170 u32 config_maxburst; 2263 u32 src_maxburst, dst_maxburst;
2171 enum stedma40_periph_data_width addr_width; 2264 int ret;
2172 int psize; 2265
2266 src_addr_width = config->src_addr_width;
2267 src_maxburst = config->src_maxburst;
2268 dst_addr_width = config->dst_addr_width;
2269 dst_maxburst = config->dst_maxburst;
2173 2270
2174 if (config->direction == DMA_FROM_DEVICE) { 2271 if (config->direction == DMA_FROM_DEVICE) {
2175 dma_addr_t dev_addr_rx = 2272 dma_addr_t dev_addr_rx =
@@ -2188,8 +2285,11 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2188 cfg->dir); 2285 cfg->dir);
2189 cfg->dir = STEDMA40_PERIPH_TO_MEM; 2286 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2190 2287
2191 config_addr_width = config->src_addr_width; 2288 /* Configure the memory side */
2192 config_maxburst = config->src_maxburst; 2289 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2290 dst_addr_width = src_addr_width;
2291 if (dst_maxburst == 0)
2292 dst_maxburst = src_maxburst;
2193 2293
2194 } else if (config->direction == DMA_TO_DEVICE) { 2294 } else if (config->direction == DMA_TO_DEVICE) {
2195 dma_addr_t dev_addr_tx = 2295 dma_addr_t dev_addr_tx =
@@ -2208,68 +2308,39 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2208 cfg->dir); 2308 cfg->dir);
2209 cfg->dir = STEDMA40_MEM_TO_PERIPH; 2309 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2210 2310
2211 config_addr_width = config->dst_addr_width; 2311 /* Configure the memory side */
2212 config_maxburst = config->dst_maxburst; 2312 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2213 2313 src_addr_width = dst_addr_width;
2314 if (src_maxburst == 0)
2315 src_maxburst = dst_maxburst;
2214 } else { 2316 } else {
2215 dev_err(d40c->base->dev, 2317 dev_err(d40c->base->dev,
2216 "unrecognized channel direction %d\n", 2318 "unrecognized channel direction %d\n",
2217 config->direction); 2319 config->direction);
2218 return; 2320 return -EINVAL;
2219 } 2321 }
2220 2322
2221 switch (config_addr_width) { 2323 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2222 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2223 addr_width = STEDMA40_BYTE_WIDTH;
2224 break;
2225 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2226 addr_width = STEDMA40_HALFWORD_WIDTH;
2227 break;
2228 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2229 addr_width = STEDMA40_WORD_WIDTH;
2230 break;
2231 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2232 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2233 break;
2234 default:
2235 dev_err(d40c->base->dev, 2324 dev_err(d40c->base->dev,
2236 "illegal peripheral address width " 2325 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2237 "requested (%d)\n", 2326 src_maxburst,
2238 config->src_addr_width); 2327 src_addr_width,
2239 return; 2328 dst_maxburst,
2329 dst_addr_width);
2330 return -EINVAL;
2240 } 2331 }
2241 2332
2242 if (chan_is_logical(d40c)) { 2333 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2243 if (config_maxburst >= 16) 2334 src_addr_width,
2244 psize = STEDMA40_PSIZE_LOG_16; 2335 src_maxburst);
2245 else if (config_maxburst >= 8) 2336 if (ret)
2246 psize = STEDMA40_PSIZE_LOG_8; 2337 return ret;
2247 else if (config_maxburst >= 4)
2248 psize = STEDMA40_PSIZE_LOG_4;
2249 else
2250 psize = STEDMA40_PSIZE_LOG_1;
2251 } else {
2252 if (config_maxburst >= 16)
2253 psize = STEDMA40_PSIZE_PHY_16;
2254 else if (config_maxburst >= 8)
2255 psize = STEDMA40_PSIZE_PHY_8;
2256 else if (config_maxburst >= 4)
2257 psize = STEDMA40_PSIZE_PHY_4;
2258 else if (config_maxburst >= 2)
2259 psize = STEDMA40_PSIZE_PHY_2;
2260 else
2261 psize = STEDMA40_PSIZE_PHY_1;
2262 }
2263 2338
2264 /* Set up all the endpoint configs */ 2339 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2265 cfg->src_info.data_width = addr_width; 2340 dst_addr_width,
2266 cfg->src_info.psize = psize; 2341 dst_maxburst);
2267 cfg->src_info.big_endian = false; 2342 if (ret)
2268 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2343 return ret;
2269 cfg->dst_info.data_width = addr_width;
2270 cfg->dst_info.psize = psize;
2271 cfg->dst_info.big_endian = false;
2272 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2273 2344
2274 /* Fill in register values */ 2345 /* Fill in register values */
2275 if (chan_is_logical(d40c)) 2346 if (chan_is_logical(d40c))
@@ -2282,12 +2353,14 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2282 d40c->runtime_addr = config_addr; 2353 d40c->runtime_addr = config_addr;
2283 d40c->runtime_direction = config->direction; 2354 d40c->runtime_direction = config->direction;
2284 dev_dbg(d40c->base->dev, 2355 dev_dbg(d40c->base->dev,
2285 "configured channel %s for %s, data width %d, " 2356 "configured channel %s for %s, data width %d/%d, "
2286 "maxburst %d bytes, LE, no flow control\n", 2357 "maxburst %d/%d elements, LE, no flow control\n",
2287 dma_chan_name(chan), 2358 dma_chan_name(chan),
2288 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 2359 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2289 config_addr_width, 2360 src_addr_width, dst_addr_width,
2290 config_maxburst); 2361 src_maxburst, dst_maxburst);
2362
2363 return 0;
2291} 2364}
2292 2365
2293static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 2366static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -2308,9 +2381,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2308 case DMA_RESUME: 2381 case DMA_RESUME:
2309 return d40_resume(d40c); 2382 return d40_resume(d40c);
2310 case DMA_SLAVE_CONFIG: 2383 case DMA_SLAVE_CONFIG:
2311 d40_set_runtime_config(chan, 2384 return d40_set_runtime_config(chan,
2312 (struct dma_slave_config *) arg); 2385 (struct dma_slave_config *) arg);
2313 return 0;
2314 default: 2386 default:
2315 break; 2387 break;
2316 } 2388 }
@@ -2341,7 +2413,9 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2341 2413
2342 INIT_LIST_HEAD(&d40c->active); 2414 INIT_LIST_HEAD(&d40c->active);
2343 INIT_LIST_HEAD(&d40c->queue); 2415 INIT_LIST_HEAD(&d40c->queue);
2416 INIT_LIST_HEAD(&d40c->pending_queue);
2344 INIT_LIST_HEAD(&d40c->client); 2417 INIT_LIST_HEAD(&d40c->client);
2418 INIT_LIST_HEAD(&d40c->prepare_queue);
2345 2419
2346 tasklet_init(&d40c->tasklet, dma_tasklet, 2420 tasklet_init(&d40c->tasklet, dma_tasklet,
2347 (unsigned long) d40c); 2421 (unsigned long) d40c);
@@ -2502,25 +2576,6 @@ static int __init d40_phy_res_init(struct d40_base *base)
2502 2576
2503static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) 2577static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2504{ 2578{
2505 static const struct d40_reg_val dma_id_regs[] = {
2506 /* Peripheral Id */
2507 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2508 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2509 /*
2510 * D40_DREG_PERIPHID2 Depends on HW revision:
2511 * DB8500ed has 0x0008,
2512 * ? has 0x0018,
2513 * DB8500v1 has 0x0028
2514 * DB8500v2 has 0x0038
2515 */
2516 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2517
2518 /* PCell Id */
2519 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2520 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2521 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2522 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2523 };
2524 struct stedma40_platform_data *plat_data; 2579 struct stedma40_platform_data *plat_data;
2525 struct clk *clk = NULL; 2580 struct clk *clk = NULL;
2526 void __iomem *virtbase = NULL; 2581 void __iomem *virtbase = NULL;
@@ -2529,8 +2584,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2529 int num_log_chans = 0; 2584 int num_log_chans = 0;
2530 int num_phy_chans; 2585 int num_phy_chans;
2531 int i; 2586 int i;
2532 u32 val; 2587 u32 pid;
2533 u32 rev; 2588 u32 cid;
2589 u8 rev;
2534 2590
2535 clk = clk_get(&pdev->dev, NULL); 2591 clk = clk_get(&pdev->dev, NULL);
2536 2592
@@ -2554,32 +2610,32 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2554 if (!virtbase) 2610 if (!virtbase)
2555 goto failure; 2611 goto failure;
2556 2612
2557 /* HW version check */ 2613 /* This is just a regular AMBA PrimeCell ID actually */
2558 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { 2614 for (pid = 0, i = 0; i < 4; i++)
2559 if (dma_id_regs[i].val != 2615 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
2560 readl(virtbase + dma_id_regs[i].reg)) { 2616 & 255) << (i * 8);
2561 d40_err(&pdev->dev, 2617 for (cid = 0, i = 0; i < 4; i++)
2562 "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", 2618 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
2563 dma_id_regs[i].val, 2619 & 255) << (i * 8);
2564 dma_id_regs[i].reg,
2565 readl(virtbase + dma_id_regs[i].reg));
2566 goto failure;
2567 }
2568 }
2569
2570 /* Get silicon revision and designer */
2571 val = readl(virtbase + D40_DREG_PERIPHID2);
2572 2620
2573 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != 2621 if (cid != AMBA_CID) {
2574 D40_HW_DESIGNER) { 2622 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
2623 goto failure;
2624 }
2625 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
2575 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", 2626 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2576 val & D40_DREG_PERIPHID2_DESIGNER_MASK, 2627 AMBA_MANF_BITS(pid),
2577 D40_HW_DESIGNER); 2628 AMBA_VENDOR_ST);
2578 goto failure; 2629 goto failure;
2579 } 2630 }
2580 2631 /*
2581 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >> 2632 * HW revision:
2582 D40_DREG_PERIPHID2_REV_POS; 2633 * DB8500ed has revision 0
2634 * ? has revision 1
2635 * DB8500v1 has revision 2
2636 * DB8500v2 has revision 3
2637 */
2638 rev = AMBA_REV_BITS(pid);
2583 2639
2584 /* The number of physical channels on this HW */ 2640 /* The number of physical channels on this HW */
2585 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 2641 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 195ee65ee7f3..b44c455158de 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -184,9 +184,6 @@
184#define D40_DREG_PERIPHID0 0xFE0 184#define D40_DREG_PERIPHID0 0xFE0
185#define D40_DREG_PERIPHID1 0xFE4 185#define D40_DREG_PERIPHID1 0xFE4
186#define D40_DREG_PERIPHID2 0xFE8 186#define D40_DREG_PERIPHID2 0xFE8
187#define D40_DREG_PERIPHID2_REV_POS 4
188#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
189#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
190#define D40_DREG_PERIPHID3 0xFEC 187#define D40_DREG_PERIPHID3 0xFEC
191#define D40_DREG_CELLID0 0xFF0 188#define D40_DREG_CELLID0 0xFF0
192#define D40_DREG_CELLID1 0xFF4 189#define D40_DREG_CELLID1 0xFF4
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 04f1e7ce02b1..f6cf448d69b4 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1670 char *type, *optype, *err, *msg; 1670 char *type, *optype, *err, *msg;
1671 unsigned long error = m->status & 0x1ff0000l; 1671 unsigned long error = m->status & 0x1ff0000l;
1672 u32 optypenum = (m->status >> 4) & 0x07; 1672 u32 optypenum = (m->status >> 4) & 0x07;
1673 u32 core_err_cnt = (m->status >> 38) && 0x7fff; 1673 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1674 u32 dimm = (m->misc >> 16) & 0x3; 1674 u32 dimm = (m->misc >> 16) & 0x3;
1675 u32 channel = (m->misc >> 18) & 0x3; 1675 u32 channel = (m->misc >> 18) & 0x3;
1676 u32 syndrome = m->misc >> 32; 1676 u32 syndrome = m->misc >> 32;
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index 30da70d06a6d..cdae207028a7 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -45,13 +45,13 @@ static int __init pci_eisa_init(struct pci_dev *pdev,
45 return 0; 45 return 0;
46} 46}
47 47
48static struct pci_device_id __initdata pci_eisa_pci_tbl[] = { 48static struct pci_device_id pci_eisa_pci_tbl[] = {
49 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 49 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
50 PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, 50 PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 },
51 { 0, } 51 { 0, }
52}; 52};
53 53
54static struct pci_driver __initdata pci_eisa_driver = { 54static struct pci_driver __refdata pci_eisa_driver = {
55 .name = "pci_eisa", 55 .name = "pci_eisa",
56 .id_table = pci_eisa_pci_tbl, 56 .id_table = pci_eisa_pci_tbl,
57 .probe = pci_eisa_init, 57 .probe = pci_eisa_init,
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index e6ad3bb6c1a6..4799393247c8 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -216,15 +216,33 @@ struct inbound_phy_packet_event {
216 struct fw_cdev_event_phy_packet phy_packet; 216 struct fw_cdev_event_phy_packet phy_packet;
217}; 217};
218 218
219static inline void __user *u64_to_uptr(__u64 value) 219#ifdef CONFIG_COMPAT
220static void __user *u64_to_uptr(u64 value)
221{
222 if (is_compat_task())
223 return compat_ptr(value);
224 else
225 return (void __user *)(unsigned long)value;
226}
227
228static u64 uptr_to_u64(void __user *ptr)
229{
230 if (is_compat_task())
231 return ptr_to_compat(ptr);
232 else
233 return (u64)(unsigned long)ptr;
234}
235#else
236static inline void __user *u64_to_uptr(u64 value)
220{ 237{
221 return (void __user *)(unsigned long)value; 238 return (void __user *)(unsigned long)value;
222} 239}
223 240
224static inline __u64 uptr_to_u64(void __user *ptr) 241static inline u64 uptr_to_u64(void __user *ptr)
225{ 242{
226 return (__u64)(unsigned long)ptr; 243 return (u64)(unsigned long)ptr;
227} 244}
245#endif /* CONFIG_COMPAT */
228 246
229static int fw_device_op_open(struct inode *inode, struct file *file) 247static int fw_device_op_open(struct inode *inode, struct file *file)
230{ 248{
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 8ba7f7928f1f..f3b890da1e87 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -455,15 +455,20 @@ static struct device_attribute fw_device_attributes[] = {
455static int read_rom(struct fw_device *device, 455static int read_rom(struct fw_device *device,
456 int generation, int index, u32 *data) 456 int generation, int index, u32 *data)
457{ 457{
458 int rcode; 458 u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
459 int i, rcode;
459 460
460 /* device->node_id, accessed below, must not be older than generation */ 461 /* device->node_id, accessed below, must not be older than generation */
461 smp_rmb(); 462 smp_rmb();
462 463
463 rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST, 464 for (i = 10; i < 100; i += 10) {
464 device->node_id, generation, device->max_speed, 465 rcode = fw_run_transaction(device->card,
465 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4, 466 TCODE_READ_QUADLET_REQUEST, device->node_id,
466 data, 4); 467 generation, device->max_speed, offset, data, 4);
468 if (rcode != RCODE_BUSY)
469 break;
470 msleep(i);
471 }
467 be32_to_cpus(data); 472 be32_to_cpus(data);
468 473
469 return rcode; 474 return rcode;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index bcf792fac442..57cd3a406edf 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2179,8 +2179,13 @@ static int ohci_enable(struct fw_card *card,
2179 ohci_driver_name, ohci)) { 2179 ohci_driver_name, ohci)) {
2180 fw_error("Failed to allocate interrupt %d.\n", dev->irq); 2180 fw_error("Failed to allocate interrupt %d.\n", dev->irq);
2181 pci_disable_msi(dev); 2181 pci_disable_msi(dev);
2182 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2182
2183 ohci->config_rom, ohci->config_rom_bus); 2183 if (config_rom) {
2184 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2185 ohci->next_config_rom,
2186 ohci->next_config_rom_bus);
2187 ohci->next_config_rom = NULL;
2188 }
2184 return -EIO; 2189 return -EIO;
2185 } 2190 }
2186 2191
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 41841a3e3f99..17cef864506a 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1198,6 +1198,10 @@ static int sbp2_remove(struct device *dev)
1198{ 1198{
1199 struct fw_unit *unit = fw_unit(dev); 1199 struct fw_unit *unit = fw_unit(dev);
1200 struct sbp2_target *tgt = dev_get_drvdata(&unit->device); 1200 struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
1201 struct sbp2_logical_unit *lu;
1202
1203 list_for_each_entry(lu, &tgt->lu_list, link)
1204 cancel_delayed_work_sync(&lu->work);
1201 1205
1202 sbp2_target_put(tgt); 1206 sbp2_target_put(tgt);
1203 return 0; 1207 return 0;
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 5f29aafd4462..eb80b549ed8d 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -78,6 +78,7 @@
78#include <linux/kobject.h> 78#include <linux/kobject.h>
79#include <linux/device.h> 79#include <linux/device.h>
80#include <linux/slab.h> 80#include <linux/slab.h>
81#include <linux/pstore.h>
81 82
82#include <asm/uaccess.h> 83#include <asm/uaccess.h>
83 84
@@ -89,6 +90,8 @@ MODULE_DESCRIPTION("sysfs interface to EFI Variables");
89MODULE_LICENSE("GPL"); 90MODULE_LICENSE("GPL");
90MODULE_VERSION(EFIVARS_VERSION); 91MODULE_VERSION(EFIVARS_VERSION);
91 92
93#define DUMP_NAME_LEN 52
94
92/* 95/*
93 * The maximum size of VariableName + Data = 1024 96 * The maximum size of VariableName + Data = 1024
94 * Therefore, it's reasonable to save that much 97 * Therefore, it's reasonable to save that much
@@ -119,6 +122,10 @@ struct efivar_attribute {
119 ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count); 122 ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
120}; 123};
121 124
125#define PSTORE_EFI_ATTRIBUTES \
126 (EFI_VARIABLE_NON_VOLATILE | \
127 EFI_VARIABLE_BOOTSERVICE_ACCESS | \
128 EFI_VARIABLE_RUNTIME_ACCESS)
122 129
123#define EFIVAR_ATTR(_name, _mode, _show, _store) \ 130#define EFIVAR_ATTR(_name, _mode, _show, _store) \
124struct efivar_attribute efivar_attr_##_name = { \ 131struct efivar_attribute efivar_attr_##_name = { \
@@ -141,38 +148,72 @@ efivar_create_sysfs_entry(struct efivars *efivars,
141 148
142/* Return the number of unicode characters in data */ 149/* Return the number of unicode characters in data */
143static unsigned long 150static unsigned long
144utf8_strlen(efi_char16_t *data, unsigned long maxlength) 151utf16_strnlen(efi_char16_t *s, size_t maxlength)
145{ 152{
146 unsigned long length = 0; 153 unsigned long length = 0;
147 154
148 while (*data++ != 0 && length < maxlength) 155 while (*s++ != 0 && length < maxlength)
149 length++; 156 length++;
150 return length; 157 return length;
151} 158}
152 159
160static inline unsigned long
161utf16_strlen(efi_char16_t *s)
162{
163 return utf16_strnlen(s, ~0UL);
164}
165
153/* 166/*
154 * Return the number of bytes is the length of this string 167 * Return the number of bytes is the length of this string
155 * Note: this is NOT the same as the number of unicode characters 168 * Note: this is NOT the same as the number of unicode characters
156 */ 169 */
157static inline unsigned long 170static inline unsigned long
158utf8_strsize(efi_char16_t *data, unsigned long maxlength) 171utf16_strsize(efi_char16_t *data, unsigned long maxlength)
159{ 172{
160 return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); 173 return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
174}
175
176static inline int
177utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
178{
179 while (1) {
180 if (len == 0)
181 return 0;
182 if (*a < *b)
183 return -1;
184 if (*a > *b)
185 return 1;
186 if (*a == 0) /* implies *b == 0 */
187 return 0;
188 a++;
189 b++;
190 len--;
191 }
161} 192}
162 193
163static efi_status_t 194static efi_status_t
164get_var_data(struct efivars *efivars, struct efi_variable *var) 195get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
165{ 196{
166 efi_status_t status; 197 efi_status_t status;
167 198
168 spin_lock(&efivars->lock);
169 var->DataSize = 1024; 199 var->DataSize = 1024;
170 status = efivars->ops->get_variable(var->VariableName, 200 status = efivars->ops->get_variable(var->VariableName,
171 &var->VendorGuid, 201 &var->VendorGuid,
172 &var->Attributes, 202 &var->Attributes,
173 &var->DataSize, 203 &var->DataSize,
174 var->Data); 204 var->Data);
205 return status;
206}
207
208static efi_status_t
209get_var_data(struct efivars *efivars, struct efi_variable *var)
210{
211 efi_status_t status;
212
213 spin_lock(&efivars->lock);
214 status = get_var_data_locked(efivars, var);
175 spin_unlock(&efivars->lock); 215 spin_unlock(&efivars->lock);
216
176 if (status != EFI_SUCCESS) { 217 if (status != EFI_SUCCESS) {
177 printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n", 218 printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n",
178 status); 219 status);
@@ -387,12 +428,180 @@ static struct kobj_type efivar_ktype = {
387 .default_attrs = def_attrs, 428 .default_attrs = def_attrs,
388}; 429};
389 430
431static struct pstore_info efi_pstore_info;
432
390static inline void 433static inline void
391efivar_unregister(struct efivar_entry *var) 434efivar_unregister(struct efivar_entry *var)
392{ 435{
393 kobject_put(&var->kobj); 436 kobject_put(&var->kobj);
394} 437}
395 438
439#ifdef CONFIG_PSTORE
440
441static int efi_pstore_open(struct pstore_info *psi)
442{
443 struct efivars *efivars = psi->data;
444
445 spin_lock(&efivars->lock);
446 efivars->walk_entry = list_first_entry(&efivars->list,
447 struct efivar_entry, list);
448 return 0;
449}
450
451static int efi_pstore_close(struct pstore_info *psi)
452{
453 struct efivars *efivars = psi->data;
454
455 spin_unlock(&efivars->lock);
456 return 0;
457}
458
459static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
460 struct timespec *timespec, struct pstore_info *psi)
461{
462 efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
463 struct efivars *efivars = psi->data;
464 char name[DUMP_NAME_LEN];
465 int i;
466 unsigned int part, size;
467 unsigned long time;
468
469 while (&efivars->walk_entry->list != &efivars->list) {
470 if (!efi_guidcmp(efivars->walk_entry->var.VendorGuid,
471 vendor)) {
472 for (i = 0; i < DUMP_NAME_LEN; i++) {
473 name[i] = efivars->walk_entry->var.VariableName[i];
474 }
475 if (sscanf(name, "dump-type%u-%u-%lu", type, &part, &time) == 3) {
476 *id = part;
477 timespec->tv_sec = time;
478 timespec->tv_nsec = 0;
479 get_var_data_locked(efivars, &efivars->walk_entry->var);
480 size = efivars->walk_entry->var.DataSize;
481 memcpy(psi->buf, efivars->walk_entry->var.Data, size);
482 efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
483 struct efivar_entry, list);
484 return size;
485 }
486 }
487 efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
488 struct efivar_entry, list);
489 }
490 return 0;
491}
492
493static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
494 size_t size, struct pstore_info *psi)
495{
496 char name[DUMP_NAME_LEN];
497 char stub_name[DUMP_NAME_LEN];
498 efi_char16_t efi_name[DUMP_NAME_LEN];
499 efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
500 struct efivars *efivars = psi->data;
501 struct efivar_entry *entry, *found = NULL;
502 int i;
503
504 sprintf(stub_name, "dump-type%u-%u-", type, part);
505 sprintf(name, "%s%lu", stub_name, get_seconds());
506
507 spin_lock(&efivars->lock);
508
509 for (i = 0; i < DUMP_NAME_LEN; i++)
510 efi_name[i] = stub_name[i];
511
512 /*
513 * Clean up any entries with the same name
514 */
515
516 list_for_each_entry(entry, &efivars->list, list) {
517 get_var_data_locked(efivars, &entry->var);
518
519 if (efi_guidcmp(entry->var.VendorGuid, vendor))
520 continue;
521 if (utf16_strncmp(entry->var.VariableName, efi_name,
522 utf16_strlen(efi_name)))
523 continue;
524 /* Needs to be a prefix */
525 if (entry->var.VariableName[utf16_strlen(efi_name)] == 0)
526 continue;
527
528 /* found */
529 found = entry;
530 efivars->ops->set_variable(entry->var.VariableName,
531 &entry->var.VendorGuid,
532 PSTORE_EFI_ATTRIBUTES,
533 0, NULL);
534 }
535
536 if (found)
537 list_del(&found->list);
538
539 for (i = 0; i < DUMP_NAME_LEN; i++)
540 efi_name[i] = name[i];
541
542 efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
543 size, psi->buf);
544
545 spin_unlock(&efivars->lock);
546
547 if (found)
548 efivar_unregister(found);
549
550 if (size)
551 efivar_create_sysfs_entry(efivars,
552 utf16_strsize(efi_name,
553 DUMP_NAME_LEN * 2),
554 efi_name, &vendor);
555
556 return part;
557};
558
559static int efi_pstore_erase(enum pstore_type_id type, u64 id,
560 struct pstore_info *psi)
561{
562 efi_pstore_write(type, id, 0, psi);
563
564 return 0;
565}
566#else
567static int efi_pstore_open(struct pstore_info *psi)
568{
569 return 0;
570}
571
572static int efi_pstore_close(struct pstore_info *psi)
573{
574 return 0;
575}
576
577static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
578 struct timespec *time, struct pstore_info *psi)
579{
580 return -1;
581}
582
583static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
584 size_t size, struct pstore_info *psi)
585{
586 return 0;
587}
588
589static int efi_pstore_erase(enum pstore_type_id type, u64 id,
590 struct pstore_info *psi)
591{
592 return 0;
593}
594#endif
595
596static struct pstore_info efi_pstore_info = {
597 .owner = THIS_MODULE,
598 .name = "efi",
599 .open = efi_pstore_open,
600 .close = efi_pstore_close,
601 .read = efi_pstore_read,
602 .write = efi_pstore_write,
603 .erase = efi_pstore_erase,
604};
396 605
397static ssize_t efivar_create(struct file *filp, struct kobject *kobj, 606static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
398 struct bin_attribute *bin_attr, 607 struct bin_attribute *bin_attr,
@@ -414,8 +623,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
414 * Does this variable already exist? 623 * Does this variable already exist?
415 */ 624 */
416 list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { 625 list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
417 strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024); 626 strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
418 strsize2 = utf8_strsize(new_var->VariableName, 1024); 627 strsize2 = utf16_strsize(new_var->VariableName, 1024);
419 if (strsize1 == strsize2 && 628 if (strsize1 == strsize2 &&
420 !memcmp(&(search_efivar->var.VariableName), 629 !memcmp(&(search_efivar->var.VariableName),
421 new_var->VariableName, strsize1) && 630 new_var->VariableName, strsize1) &&
@@ -447,8 +656,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
447 656
448 /* Create the entry in sysfs. Locking is not required here */ 657 /* Create the entry in sysfs. Locking is not required here */
449 status = efivar_create_sysfs_entry(efivars, 658 status = efivar_create_sysfs_entry(efivars,
450 utf8_strsize(new_var->VariableName, 659 utf16_strsize(new_var->VariableName,
451 1024), 660 1024),
452 new_var->VariableName, 661 new_var->VariableName,
453 &new_var->VendorGuid); 662 &new_var->VendorGuid);
454 if (status) { 663 if (status) {
@@ -477,8 +686,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
477 * Does this variable already exist? 686 * Does this variable already exist?
478 */ 687 */
479 list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { 688 list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
480 strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024); 689 strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
481 strsize2 = utf8_strsize(del_var->VariableName, 1024); 690 strsize2 = utf16_strsize(del_var->VariableName, 1024);
482 if (strsize1 == strsize2 && 691 if (strsize1 == strsize2 &&
483 !memcmp(&(search_efivar->var.VariableName), 692 !memcmp(&(search_efivar->var.VariableName),
484 del_var->VariableName, strsize1) && 693 del_var->VariableName, strsize1) &&
@@ -763,6 +972,16 @@ int register_efivars(struct efivars *efivars,
763 if (error) 972 if (error)
764 unregister_efivars(efivars); 973 unregister_efivars(efivars);
765 974
975 efivars->efi_pstore_info = efi_pstore_info;
976
977 efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
978 if (efivars->efi_pstore_info.buf) {
979 efivars->efi_pstore_info.bufsize = 1024;
980 efivars->efi_pstore_info.data = efivars;
981 mutex_init(&efivars->efi_pstore_info.buf_mutex);
982 pstore_register(&efivars->efi_pstore_info);
983 }
984
766out: 985out:
767 kfree(variable_name); 986 kfree(variable_name);
768 987
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 68810fd1a59d..aa83de9db1b9 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -420,7 +420,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size,
420 420
421static efi_status_t gsmi_set_variable(efi_char16_t *name, 421static efi_status_t gsmi_set_variable(efi_char16_t *name,
422 efi_guid_t *vendor, 422 efi_guid_t *vendor,
423 unsigned long attr, 423 u32 attr,
424 unsigned long data_size, 424 unsigned long data_size,
425 void *data) 425 void *data)
426{ 426{
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 363498697c2c..d539efd96d4b 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -103,6 +103,22 @@ config GPIO_MPC5200
103 def_bool y 103 def_bool y
104 depends on PPC_MPC52xx 104 depends on PPC_MPC52xx
105 105
106config GPIO_MSM_V1
107 tristate "Qualcomm MSM GPIO v1"
108 depends on GPIOLIB && ARCH_MSM
109 help
110 Say yes here to support the GPIO interface on ARM v6 based
111 Qualcomm MSM chips. Most of the pins on the MSM can be
112 selected for GPIO, and are controlled by this driver.
113
114config GPIO_MSM_V2
115 tristate "Qualcomm MSM GPIO v2"
116 depends on GPIOLIB && ARCH_MSM
117 help
118 Say yes here to support the GPIO interface on ARM v7 based
119 Qualcomm MSM chips. Most of the pins on the MSM can be
120 selected for GPIO, and are controlled by this driver.
121
106config GPIO_MXC 122config GPIO_MXC
107 def_bool y 123 def_bool y
108 depends on ARCH_MXC 124 depends on ARCH_MXC
@@ -280,6 +296,12 @@ config GPIO_TC3589X
280 This enables support for the GPIOs found on the TC3589X 296 This enables support for the GPIOs found on the TC3589X
281 I/O Expander. 297 I/O Expander.
282 298
299config GPIO_TPS65912
300 tristate "TI TPS65912 GPIO"
301 depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
302 help
303 This driver supports TPS65912 gpio chip
304
283config GPIO_TWL4030 305config GPIO_TWL4030
284 tristate "TWL4030, TWL5030, and TPS659x0 GPIOs" 306 tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
285 depends on TWL4030_CORE 307 depends on TWL4030_CORE
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 720711251391..9588948c96f0 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -27,6 +27,8 @@ obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
27obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o 27obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
28obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o 28obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
29obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o 29obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
30obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o
31obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o
30obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o 32obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
31obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o 33obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
32obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o 34obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
@@ -48,6 +50,7 @@ obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
48obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o 50obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
49obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o 51obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
50obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o 52obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
53obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
51obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o 54obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
52obj-$(CONFIG_MACH_U300) += gpio-u300.o 55obj-$(CONFIG_MACH_U300) += gpio-u300.o
53obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o 56obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c
index ed795e64eea7..050c05d91896 100644
--- a/drivers/gpio/gpio-ab8500.c
+++ b/drivers/gpio/gpio-ab8500.c
@@ -516,5 +516,5 @@ module_exit(ab8500_gpio_exit);
516 516
517MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>"); 517MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
518MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO"); 518MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO");
519MODULE_ALIAS("AB8500 GPIO driver"); 519MODULE_ALIAS("platform:ab8500-gpio");
520MODULE_LICENSE("GPL v2"); 520MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
new file mode 100644
index 000000000000..52a4d4286eba
--- /dev/null
+++ b/drivers/gpio/gpio-msm-v1.c
@@ -0,0 +1,636 @@
1/*
2 * Copyright (C) 2007 Google, Inc.
3 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/bitops.h>
17#include <linux/gpio.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/module.h>
22#include <mach/cpu.h>
23#include <mach/msm_gpiomux.h>
24#include <mach/msm_iomap.h>
25
26/* see 80-VA736-2 Rev C pp 695-751
27**
28** These are actually the *shadow* gpio registers, since the
29** real ones (which allow full access) are only available to the
30** ARM9 side of the world.
31**
32** Since the _BASE need to be page-aligned when we're mapping them
33** to virtual addresses, adjust for the additional offset in these
34** macros.
35*/
36
37#define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + (off))
38#define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0x400 + (off))
39#define MSM_GPIO1_SHADOW_REG(off) (MSM_GPIO1_BASE + 0x800 + (off))
40#define MSM_GPIO2_SHADOW_REG(off) (MSM_GPIO2_BASE + 0xC00 + (off))
41
42/*
43 * MSM7X00 registers
44 */
45/* output value */
46#define MSM7X00_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */
47#define MSM7X00_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */
48#define MSM7X00_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */
49#define MSM7X00_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */
50#define MSM7X00_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 106-95 */
51#define MSM7X00_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x50) /* gpio 107-121 */
52
53/* same pin map as above, output enable */
54#define MSM7X00_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x10)
55#define MSM7X00_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08)
56#define MSM7X00_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x14)
57#define MSM7X00_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x18)
58#define MSM7X00_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x1C)
59#define MSM7X00_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x54)
60
61/* same pin map as above, input read */
62#define MSM7X00_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x34)
63#define MSM7X00_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20)
64#define MSM7X00_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x38)
65#define MSM7X00_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x3C)
66#define MSM7X00_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x40)
67#define MSM7X00_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x44)
68
69/* same pin map as above, 1=edge 0=level interrup */
70#define MSM7X00_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x60)
71#define MSM7X00_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50)
72#define MSM7X00_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x64)
73#define MSM7X00_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x68)
74#define MSM7X00_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x6C)
75#define MSM7X00_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0xC0)
76
77/* same pin map as above, 1=positive 0=negative */
78#define MSM7X00_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x70)
79#define MSM7X00_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58)
80#define MSM7X00_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x74)
81#define MSM7X00_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x78)
82#define MSM7X00_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x7C)
83#define MSM7X00_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xBC)
84
85/* same pin map as above, interrupt enable */
86#define MSM7X00_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0x80)
87#define MSM7X00_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60)
88#define MSM7X00_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0x84)
89#define MSM7X00_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0x88)
90#define MSM7X00_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0x8C)
91#define MSM7X00_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xB8)
92
93/* same pin map as above, write 1 to clear interrupt */
94#define MSM7X00_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0x90)
95#define MSM7X00_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68)
96#define MSM7X00_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0x94)
97#define MSM7X00_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0x98)
98#define MSM7X00_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0x9C)
99#define MSM7X00_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xB4)
100
101/* same pin map as above, 1=interrupt pending */
102#define MSM7X00_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xA0)
103#define MSM7X00_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70)
104#define MSM7X00_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xA4)
105#define MSM7X00_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xA8)
106#define MSM7X00_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xAC)
107#define MSM7X00_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0xB0)
108
109/*
110 * QSD8X50 registers
111 */
112/* output value */
113#define QSD8X50_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */
114#define QSD8X50_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */
115#define QSD8X50_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */
116#define QSD8X50_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */
117#define QSD8X50_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 103-95 */
118#define QSD8X50_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x10) /* gpio 121-104 */
119#define QSD8X50_GPIO_OUT_6 MSM_GPIO1_SHADOW_REG(0x14) /* gpio 152-122 */
120#define QSD8X50_GPIO_OUT_7 MSM_GPIO1_SHADOW_REG(0x18) /* gpio 164-153 */
121
122/* same pin map as above, output enable */
123#define QSD8X50_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x20)
124#define QSD8X50_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08)
125#define QSD8X50_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x24)
126#define QSD8X50_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x28)
127#define QSD8X50_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x2C)
128#define QSD8X50_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x30)
129#define QSD8X50_GPIO_OE_6 MSM_GPIO1_SHADOW_REG(0x34)
130#define QSD8X50_GPIO_OE_7 MSM_GPIO1_SHADOW_REG(0x38)
131
132/* same pin map as above, input read */
133#define QSD8X50_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x50)
134#define QSD8X50_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20)
135#define QSD8X50_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x54)
136#define QSD8X50_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x58)
137#define QSD8X50_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x5C)
138#define QSD8X50_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x60)
139#define QSD8X50_GPIO_IN_6 MSM_GPIO1_SHADOW_REG(0x64)
140#define QSD8X50_GPIO_IN_7 MSM_GPIO1_SHADOW_REG(0x68)
141
142/* same pin map as above, 1=edge 0=level interrup */
143#define QSD8X50_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x70)
144#define QSD8X50_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50)
145#define QSD8X50_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x74)
146#define QSD8X50_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x78)
147#define QSD8X50_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x7C)
148#define QSD8X50_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0x80)
149#define QSD8X50_GPIO_INT_EDGE_6 MSM_GPIO1_SHADOW_REG(0x84)
150#define QSD8X50_GPIO_INT_EDGE_7 MSM_GPIO1_SHADOW_REG(0x88)
151
152/* same pin map as above, 1=positive 0=negative */
153#define QSD8X50_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x90)
154#define QSD8X50_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58)
155#define QSD8X50_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x94)
156#define QSD8X50_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x98)
157#define QSD8X50_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x9C)
158#define QSD8X50_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xA0)
159#define QSD8X50_GPIO_INT_POS_6 MSM_GPIO1_SHADOW_REG(0xA4)
160#define QSD8X50_GPIO_INT_POS_7 MSM_GPIO1_SHADOW_REG(0xA8)
161
162/* same pin map as above, interrupt enable */
163#define QSD8X50_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0xB0)
164#define QSD8X50_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60)
165#define QSD8X50_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0xB4)
166#define QSD8X50_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0xB8)
167#define QSD8X50_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0xBC)
168#define QSD8X50_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xC0)
169#define QSD8X50_GPIO_INT_EN_6 MSM_GPIO1_SHADOW_REG(0xC4)
170#define QSD8X50_GPIO_INT_EN_7 MSM_GPIO1_SHADOW_REG(0xC8)
171
172/* same pin map as above, write 1 to clear interrupt */
173#define QSD8X50_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0xD0)
174#define QSD8X50_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68)
175#define QSD8X50_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0xD4)
176#define QSD8X50_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0xD8)
177#define QSD8X50_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0xDC)
178#define QSD8X50_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xE0)
179#define QSD8X50_GPIO_INT_CLEAR_6 MSM_GPIO1_SHADOW_REG(0xE4)
180#define QSD8X50_GPIO_INT_CLEAR_7 MSM_GPIO1_SHADOW_REG(0xE8)
181
182/* same pin map as above, 1=interrupt pending */
183#define QSD8X50_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xF0)
184#define QSD8X50_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70)
185#define QSD8X50_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xF4)
186#define QSD8X50_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xF8)
187#define QSD8X50_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xFC)
188#define QSD8X50_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0x100)
189#define QSD8X50_GPIO_INT_STATUS_6 MSM_GPIO1_SHADOW_REG(0x104)
190#define QSD8X50_GPIO_INT_STATUS_7 MSM_GPIO1_SHADOW_REG(0x108)
191
192/*
193 * MSM7X30 registers
194 */
195/* output value */
196#define MSM7X30_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */
197#define MSM7X30_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 43-16 */
198#define MSM7X30_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-44 */
199#define MSM7X30_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */
200#define MSM7X30_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */
201#define MSM7X30_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 133-107 */
202#define MSM7X30_GPIO_OUT_6 MSM_GPIO1_REG(0xC4) /* gpio 150-134 */
203#define MSM7X30_GPIO_OUT_7 MSM_GPIO1_REG(0x214) /* gpio 181-151 */
204
205/* same pin map as above, output enable */
206#define MSM7X30_GPIO_OE_0 MSM_GPIO1_REG(0x10)
207#define MSM7X30_GPIO_OE_1 MSM_GPIO2_REG(0x08)
208#define MSM7X30_GPIO_OE_2 MSM_GPIO1_REG(0x14)
209#define MSM7X30_GPIO_OE_3 MSM_GPIO1_REG(0x18)
210#define MSM7X30_GPIO_OE_4 MSM_GPIO1_REG(0x1C)
211#define MSM7X30_GPIO_OE_5 MSM_GPIO1_REG(0x54)
212#define MSM7X30_GPIO_OE_6 MSM_GPIO1_REG(0xC8)
213#define MSM7X30_GPIO_OE_7 MSM_GPIO1_REG(0x218)
214
215/* same pin map as above, input read */
216#define MSM7X30_GPIO_IN_0 MSM_GPIO1_REG(0x34)
217#define MSM7X30_GPIO_IN_1 MSM_GPIO2_REG(0x20)
218#define MSM7X30_GPIO_IN_2 MSM_GPIO1_REG(0x38)
219#define MSM7X30_GPIO_IN_3 MSM_GPIO1_REG(0x3C)
220#define MSM7X30_GPIO_IN_4 MSM_GPIO1_REG(0x40)
221#define MSM7X30_GPIO_IN_5 MSM_GPIO1_REG(0x44)
222#define MSM7X30_GPIO_IN_6 MSM_GPIO1_REG(0xCC)
223#define MSM7X30_GPIO_IN_7 MSM_GPIO1_REG(0x21C)
224
225/* same pin map as above, 1=edge 0=level interrup */
226#define MSM7X30_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60)
227#define MSM7X30_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50)
228#define MSM7X30_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64)
229#define MSM7X30_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68)
230#define MSM7X30_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C)
231#define MSM7X30_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0)
232#define MSM7X30_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0xD0)
233#define MSM7X30_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x240)
234
235/* same pin map as above, 1=positive 0=negative */
236#define MSM7X30_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70)
237#define MSM7X30_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58)
238#define MSM7X30_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74)
239#define MSM7X30_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78)
240#define MSM7X30_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C)
241#define MSM7X30_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC)
242#define MSM7X30_GPIO_INT_POS_6 MSM_GPIO1_REG(0xD4)
243#define MSM7X30_GPIO_INT_POS_7 MSM_GPIO1_REG(0x228)
244
245/* same pin map as above, interrupt enable */
246#define MSM7X30_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80)
247#define MSM7X30_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60)
248#define MSM7X30_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84)
249#define MSM7X30_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88)
250#define MSM7X30_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C)
251#define MSM7X30_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8)
252#define MSM7X30_GPIO_INT_EN_6 MSM_GPIO1_REG(0xD8)
253#define MSM7X30_GPIO_INT_EN_7 MSM_GPIO1_REG(0x22C)
254
255/* same pin map as above, write 1 to clear interrupt */
256#define MSM7X30_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90)
257#define MSM7X30_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68)
258#define MSM7X30_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94)
259#define MSM7X30_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98)
260#define MSM7X30_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C)
261#define MSM7X30_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4)
262#define MSM7X30_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xDC)
263#define MSM7X30_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0x230)
264
265/* same pin map as above, 1=interrupt pending */
266#define MSM7X30_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0)
267#define MSM7X30_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70)
268#define MSM7X30_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4)
269#define MSM7X30_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8)
270#define MSM7X30_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC)
271#define MSM7X30_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0)
272#define MSM7X30_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0xE0)
273#define MSM7X30_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x234)
274
275#define FIRST_GPIO_IRQ MSM_GPIO_TO_INT(0)
276
277#define MSM_GPIO_BANK(soc, bank, first, last) \
278 { \
279 .regs = { \
280 .out = soc##_GPIO_OUT_##bank, \
281 .in = soc##_GPIO_IN_##bank, \
282 .int_status = soc##_GPIO_INT_STATUS_##bank, \
283 .int_clear = soc##_GPIO_INT_CLEAR_##bank, \
284 .int_en = soc##_GPIO_INT_EN_##bank, \
285 .int_edge = soc##_GPIO_INT_EDGE_##bank, \
286 .int_pos = soc##_GPIO_INT_POS_##bank, \
287 .oe = soc##_GPIO_OE_##bank, \
288 }, \
289 .chip = { \
290 .base = (first), \
291 .ngpio = (last) - (first) + 1, \
292 .get = msm_gpio_get, \
293 .set = msm_gpio_set, \
294 .direction_input = msm_gpio_direction_input, \
295 .direction_output = msm_gpio_direction_output, \
296 .to_irq = msm_gpio_to_irq, \
297 .request = msm_gpio_request, \
298 .free = msm_gpio_free, \
299 } \
300 }
301
302#define MSM_GPIO_BROKEN_INT_CLEAR 1
303
304struct msm_gpio_regs {
305 void __iomem *out;
306 void __iomem *in;
307 void __iomem *int_status;
308 void __iomem *int_clear;
309 void __iomem *int_en;
310 void __iomem *int_edge;
311 void __iomem *int_pos;
312 void __iomem *oe;
313};
314
315struct msm_gpio_chip {
316 spinlock_t lock;
317 struct gpio_chip chip;
318 struct msm_gpio_regs regs;
319#if MSM_GPIO_BROKEN_INT_CLEAR
320 unsigned int_status_copy;
321#endif
322 unsigned int both_edge_detect;
323 unsigned int int_enable[2]; /* 0: awake, 1: sleep */
324};
325
326static int msm_gpio_write(struct msm_gpio_chip *msm_chip,
327 unsigned offset, unsigned on)
328{
329 unsigned mask = BIT(offset);
330 unsigned val;
331
332 val = readl(msm_chip->regs.out);
333 if (on)
334 writel(val | mask, msm_chip->regs.out);
335 else
336 writel(val & ~mask, msm_chip->regs.out);
337 return 0;
338}
339
340static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip)
341{
342 int loop_limit = 100;
343 unsigned pol, val, val2, intstat;
344 do {
345 val = readl(msm_chip->regs.in);
346 pol = readl(msm_chip->regs.int_pos);
347 pol = (pol & ~msm_chip->both_edge_detect) |
348 (~val & msm_chip->both_edge_detect);
349 writel(pol, msm_chip->regs.int_pos);
350 intstat = readl(msm_chip->regs.int_status);
351 val2 = readl(msm_chip->regs.in);
352 if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0)
353 return;
354 } while (loop_limit-- > 0);
355 printk(KERN_ERR "msm_gpio_update_both_edge_detect, "
356 "failed to reach stable state %x != %x\n", val, val2);
357}
358
359static int msm_gpio_clear_detect_status(struct msm_gpio_chip *msm_chip,
360 unsigned offset)
361{
362 unsigned bit = BIT(offset);
363
364#if MSM_GPIO_BROKEN_INT_CLEAR
365 /* Save interrupts that already triggered before we loose them. */
366 /* Any interrupt that triggers between the read of int_status */
367 /* and the write to int_clear will still be lost though. */
368 msm_chip->int_status_copy |= readl(msm_chip->regs.int_status);
369 msm_chip->int_status_copy &= ~bit;
370#endif
371 writel(bit, msm_chip->regs.int_clear);
372 msm_gpio_update_both_edge_detect(msm_chip);
373 return 0;
374}
375
376static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
377{
378 struct msm_gpio_chip *msm_chip;
379 unsigned long irq_flags;
380
381 msm_chip = container_of(chip, struct msm_gpio_chip, chip);
382 spin_lock_irqsave(&msm_chip->lock, irq_flags);
383 writel(readl(msm_chip->regs.oe) & ~BIT(offset), msm_chip->regs.oe);
384 spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
385 return 0;
386}
387
388static int
389msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value)
390{
391 struct msm_gpio_chip *msm_chip;
392 unsigned long irq_flags;
393
394 msm_chip = container_of(chip, struct msm_gpio_chip, chip);
395 spin_lock_irqsave(&msm_chip->lock, irq_flags);
396 msm_gpio_write(msm_chip, offset, value);
397 writel(readl(msm_chip->regs.oe) | BIT(offset), msm_chip->regs.oe);
398 spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
399 return 0;
400}
401
402static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
403{
404 struct msm_gpio_chip *msm_chip;
405
406 msm_chip = container_of(chip, struct msm_gpio_chip, chip);
407 return (readl(msm_chip->regs.in) & (1U << offset)) ? 1 : 0;
408}
409
410static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
411{
412 struct msm_gpio_chip *msm_chip;
413 unsigned long irq_flags;
414
415 msm_chip = container_of(chip, struct msm_gpio_chip, chip);
416 spin_lock_irqsave(&msm_chip->lock, irq_flags);
417 msm_gpio_write(msm_chip, offset, value);
418 spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
419}
420
421static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
422{
423 return MSM_GPIO_TO_INT(chip->base + offset);
424}
425
426#ifdef CONFIG_MSM_GPIOMUX
427static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
428{
429 return msm_gpiomux_get(chip->base + offset);
430}
431
432static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
433{
434 msm_gpiomux_put(chip->base + offset);
435}
436#else
437#define msm_gpio_request NULL
438#define msm_gpio_free NULL
439#endif
440
441static struct msm_gpio_chip *msm_gpio_chips;
442static int msm_gpio_count;
443
444static struct msm_gpio_chip msm_gpio_chips_msm7x01[] = {
445 MSM_GPIO_BANK(MSM7X00, 0, 0, 15),
446 MSM_GPIO_BANK(MSM7X00, 1, 16, 42),
447 MSM_GPIO_BANK(MSM7X00, 2, 43, 67),
448 MSM_GPIO_BANK(MSM7X00, 3, 68, 94),
449 MSM_GPIO_BANK(MSM7X00, 4, 95, 106),
450 MSM_GPIO_BANK(MSM7X00, 5, 107, 121),
451};
452
453static struct msm_gpio_chip msm_gpio_chips_msm7x30[] = {
454 MSM_GPIO_BANK(MSM7X30, 0, 0, 15),
455 MSM_GPIO_BANK(MSM7X30, 1, 16, 43),
456 MSM_GPIO_BANK(MSM7X30, 2, 44, 67),
457 MSM_GPIO_BANK(MSM7X30, 3, 68, 94),
458 MSM_GPIO_BANK(MSM7X30, 4, 95, 106),
459 MSM_GPIO_BANK(MSM7X30, 5, 107, 133),
460 MSM_GPIO_BANK(MSM7X30, 6, 134, 150),
461 MSM_GPIO_BANK(MSM7X30, 7, 151, 181),
462};
463
464static struct msm_gpio_chip msm_gpio_chips_qsd8x50[] = {
465 MSM_GPIO_BANK(QSD8X50, 0, 0, 15),
466 MSM_GPIO_BANK(QSD8X50, 1, 16, 42),
467 MSM_GPIO_BANK(QSD8X50, 2, 43, 67),
468 MSM_GPIO_BANK(QSD8X50, 3, 68, 94),
469 MSM_GPIO_BANK(QSD8X50, 4, 95, 103),
470 MSM_GPIO_BANK(QSD8X50, 5, 104, 121),
471 MSM_GPIO_BANK(QSD8X50, 6, 122, 152),
472 MSM_GPIO_BANK(QSD8X50, 7, 153, 164),
473};
474
475static void msm_gpio_irq_ack(struct irq_data *d)
476{
477 unsigned long irq_flags;
478 struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
479 spin_lock_irqsave(&msm_chip->lock, irq_flags);
480 msm_gpio_clear_detect_status(msm_chip,
481 d->irq - gpio_to_irq(msm_chip->chip.base));
482 spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
483}
484
485static void msm_gpio_irq_mask(struct irq_data *d)
486{
487 unsigned long irq_flags;
488 struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
489 unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
490
491 spin_lock_irqsave(&msm_chip->lock, irq_flags);
492 /* level triggered interrupts are also latched */
493 if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
494 msm_gpio_clear_detect_status(msm_chip, offset);
495 msm_chip->int_enable[0] &= ~BIT(offset);
496 writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
497 spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
498}
499
500static void msm_gpio_irq_unmask(struct irq_data *d)
501{
502 unsigned long irq_flags;
503 struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
504 unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
505
506 spin_lock_irqsave(&msm_chip->lock, irq_flags);
507 /* level triggered interrupts are also latched */
508 if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
509 msm_gpio_clear_detect_status(msm_chip, offset);
510 msm_chip->int_enable[0] |= BIT(offset);
511 writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
512 spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
513}
514
515static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
516{
517 unsigned long irq_flags;
518 struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
519 unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
520
521 spin_lock_irqsave(&msm_chip->lock, irq_flags);
522
523 if (on)
524 msm_chip->int_enable[1] |= BIT(offset);
525 else
526 msm_chip->int_enable[1] &= ~BIT(offset);
527
528 spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
529 return 0;
530}
531
532static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
533{
534 unsigned long irq_flags;
535 struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
536 unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
537 unsigned val, mask = BIT(offset);
538
539 spin_lock_irqsave(&msm_chip->lock, irq_flags);
540 val = readl(msm_chip->regs.int_edge);
541 if (flow_type & IRQ_TYPE_EDGE_BOTH) {
542 writel(val | mask, msm_chip->regs.int_edge);
543 __irq_set_handler_locked(d->irq, handle_edge_irq);
544 } else {
545 writel(val & ~mask, msm_chip->regs.int_edge);
546 __irq_set_handler_locked(d->irq, handle_level_irq);
547 }
548 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
549 msm_chip->both_edge_detect |= mask;
550 msm_gpio_update_both_edge_detect(msm_chip);
551 } else {
552 msm_chip->both_edge_detect &= ~mask;
553 val = readl(msm_chip->regs.int_pos);
554 if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
555 writel(val | mask, msm_chip->regs.int_pos);
556 else
557 writel(val & ~mask, msm_chip->regs.int_pos);
558 }
559 spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
560 return 0;
561}
562
563static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
564{
565 int i, j, mask;
566 unsigned val;
567
568 for (i = 0; i < msm_gpio_count; i++) {
569 struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i];
570 val = readl(msm_chip->regs.int_status);
571 val &= msm_chip->int_enable[0];
572 while (val) {
573 mask = val & -val;
574 j = fls(mask) - 1;
575 /* printk("%s %08x %08x bit %d gpio %d irq %d\n",
576 __func__, v, m, j, msm_chip->chip.start + j,
577 FIRST_GPIO_IRQ + msm_chip->chip.start + j); */
578 val &= ~mask;
579 generic_handle_irq(FIRST_GPIO_IRQ +
580 msm_chip->chip.base + j);
581 }
582 }
583 desc->irq_data.chip->irq_ack(&desc->irq_data);
584}
585
586static struct irq_chip msm_gpio_irq_chip = {
587 .name = "msmgpio",
588 .irq_ack = msm_gpio_irq_ack,
589 .irq_mask = msm_gpio_irq_mask,
590 .irq_unmask = msm_gpio_irq_unmask,
591 .irq_set_wake = msm_gpio_irq_set_wake,
592 .irq_set_type = msm_gpio_irq_set_type,
593};
594
595static int __init msm_init_gpio(void)
596{
597 int i, j = 0;
598
599 if (cpu_is_msm7x01()) {
600 msm_gpio_chips = msm_gpio_chips_msm7x01;
601 msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x01);
602 } else if (cpu_is_msm7x30()) {
603 msm_gpio_chips = msm_gpio_chips_msm7x30;
604 msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x30);
605 } else if (cpu_is_qsd8x50()) {
606 msm_gpio_chips = msm_gpio_chips_qsd8x50;
607 msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_qsd8x50);
608 } else {
609 return 0;
610 }
611
612 for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) {
613 if (i - FIRST_GPIO_IRQ >=
614 msm_gpio_chips[j].chip.base +
615 msm_gpio_chips[j].chip.ngpio)
616 j++;
617 irq_set_chip_data(i, &msm_gpio_chips[j]);
618 irq_set_chip_and_handler(i, &msm_gpio_irq_chip,
619 handle_edge_irq);
620 set_irq_flags(i, IRQF_VALID);
621 }
622
623 for (i = 0; i < msm_gpio_count; i++) {
624 spin_lock_init(&msm_gpio_chips[i].lock);
625 writel(0, msm_gpio_chips[i].regs.int_en);
626 gpiochip_add(&msm_gpio_chips[i].chip);
627 }
628
629 irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler);
630 irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler);
631 irq_set_irq_wake(INT_GPIO_GROUP1, 1);
632 irq_set_irq_wake(INT_GPIO_GROUP2, 2);
633 return 0;
634}
635
636postcore_initcall(msm_init_gpio);
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
new file mode 100644
index 000000000000..5cb1227d69cf
--- /dev/null
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -0,0 +1,433 @@
1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 *
17 */
18#define pr_fmt(fmt) "%s: " fmt, __func__
19
20#include <linux/bitmap.h>
21#include <linux/bitops.h>
22#include <linux/gpio.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/spinlock.h>
30
31#include <asm/mach/irq.h>
32
33#include <mach/msm_gpiomux.h>
34#include <mach/msm_iomap.h>
35
36/* Bits of interest in the GPIO_IN_OUT register.
37 */
38enum {
39 GPIO_IN = 0,
40 GPIO_OUT = 1
41};
42
43/* Bits of interest in the GPIO_INTR_STATUS register.
44 */
45enum {
46 INTR_STATUS = 0,
47};
48
49/* Bits of interest in the GPIO_CFG register.
50 */
51enum {
52 GPIO_OE = 9,
53};
54
55/* Bits of interest in the GPIO_INTR_CFG register.
56 * When a GPIO triggers, two separate decisions are made, controlled
57 * by two separate flags.
58 *
59 * - First, INTR_RAW_STATUS_EN controls whether or not the GPIO_INTR_STATUS
60 * register for that GPIO will be updated to reflect the triggering of that
61 * gpio. If this bit is 0, this register will not be updated.
62 * - Second, INTR_ENABLE controls whether an interrupt is triggered.
63 *
64 * If INTR_ENABLE is set and INTR_RAW_STATUS_EN is NOT set, an interrupt
65 * can be triggered but the status register will not reflect it.
66 */
67enum {
68 INTR_ENABLE = 0,
69 INTR_POL_CTL = 1,
70 INTR_DECT_CTL = 2,
71 INTR_RAW_STATUS_EN = 3,
72};
73
74/* Codes of interest in GPIO_INTR_CFG_SU.
75 */
76enum {
77 TARGET_PROC_SCORPION = 4,
78 TARGET_PROC_NONE = 7,
79};
80
81
82#define GPIO_INTR_CFG_SU(gpio) (MSM_TLMM_BASE + 0x0400 + (0x04 * (gpio)))
83#define GPIO_CONFIG(gpio) (MSM_TLMM_BASE + 0x1000 + (0x10 * (gpio)))
84#define GPIO_IN_OUT(gpio) (MSM_TLMM_BASE + 0x1004 + (0x10 * (gpio)))
85#define GPIO_INTR_CFG(gpio) (MSM_TLMM_BASE + 0x1008 + (0x10 * (gpio)))
86#define GPIO_INTR_STATUS(gpio) (MSM_TLMM_BASE + 0x100c + (0x10 * (gpio)))
87
88/**
89 * struct msm_gpio_dev: the MSM8660 SoC GPIO device structure
90 *
91 * @enabled_irqs: a bitmap used to optimize the summary-irq handler. By
92 * keeping track of which gpios are unmasked as irq sources, we avoid
93 * having to do readl calls on hundreds of iomapped registers each time
94 * the summary interrupt fires in order to locate the active interrupts.
95 *
96 * @wake_irqs: a bitmap for tracking which interrupt lines are enabled
97 * as wakeup sources. When the device is suspended, interrupts which are
98 * not wakeup sources are disabled.
99 *
100 * @dual_edge_irqs: a bitmap used to track which irqs are configured
101 * as dual-edge, as this is not supported by the hardware and requires
102 * some special handling in the driver.
103 */
104struct msm_gpio_dev {
105 struct gpio_chip gpio_chip;
106 DECLARE_BITMAP(enabled_irqs, NR_GPIO_IRQS);
107 DECLARE_BITMAP(wake_irqs, NR_GPIO_IRQS);
108 DECLARE_BITMAP(dual_edge_irqs, NR_GPIO_IRQS);
109};
110
111static DEFINE_SPINLOCK(tlmm_lock);
112
113static inline struct msm_gpio_dev *to_msm_gpio_dev(struct gpio_chip *chip)
114{
115 return container_of(chip, struct msm_gpio_dev, gpio_chip);
116}
117
118static inline void set_gpio_bits(unsigned n, void __iomem *reg)
119{
120 writel(readl(reg) | n, reg);
121}
122
123static inline void clear_gpio_bits(unsigned n, void __iomem *reg)
124{
125 writel(readl(reg) & ~n, reg);
126}
127
128static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
129{
130 return readl(GPIO_IN_OUT(offset)) & BIT(GPIO_IN);
131}
132
133static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
134{
135 writel(val ? BIT(GPIO_OUT) : 0, GPIO_IN_OUT(offset));
136}
137
138static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
139{
140 unsigned long irq_flags;
141
142 spin_lock_irqsave(&tlmm_lock, irq_flags);
143 clear_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
144 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
145 return 0;
146}
147
148static int msm_gpio_direction_output(struct gpio_chip *chip,
149 unsigned offset,
150 int val)
151{
152 unsigned long irq_flags;
153
154 spin_lock_irqsave(&tlmm_lock, irq_flags);
155 msm_gpio_set(chip, offset, val);
156 set_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
157 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
158 return 0;
159}
160
161static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
162{
163 return msm_gpiomux_get(chip->base + offset);
164}
165
166static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
167{
168 msm_gpiomux_put(chip->base + offset);
169}
170
171static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
172{
173 return MSM_GPIO_TO_INT(chip->base + offset);
174}
175
176static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
177{
178 return irq - MSM_GPIO_TO_INT(chip->base);
179}
180
181static struct msm_gpio_dev msm_gpio = {
182 .gpio_chip = {
183 .base = 0,
184 .ngpio = NR_GPIO_IRQS,
185 .direction_input = msm_gpio_direction_input,
186 .direction_output = msm_gpio_direction_output,
187 .get = msm_gpio_get,
188 .set = msm_gpio_set,
189 .to_irq = msm_gpio_to_irq,
190 .request = msm_gpio_request,
191 .free = msm_gpio_free,
192 },
193};
194
195/* For dual-edge interrupts in software, since the hardware has no
196 * such support:
197 *
198 * At appropriate moments, this function may be called to flip the polarity
199 * settings of both-edge irq lines to try and catch the next edge.
200 *
201 * The attempt is considered successful if:
202 * - the status bit goes high, indicating that an edge was caught, or
203 * - the input value of the gpio doesn't change during the attempt.
204 * If the value changes twice during the process, that would cause the first
205 * test to fail but would force the second, as two opposite
206 * transitions would cause a detection no matter the polarity setting.
207 *
208 * The do-loop tries to sledge-hammer closed the timing hole between
209 * the initial value-read and the polarity-write - if the line value changes
210 * during that window, an interrupt is lost, the new polarity setting is
211 * incorrect, and the first success test will fail, causing a retry.
212 *
213 * Algorithm comes from Google's msmgpio driver, see mach-msm/gpio.c.
214 */
215static void msm_gpio_update_dual_edge_pos(unsigned gpio)
216{
217 int loop_limit = 100;
218 unsigned val, val2, intstat;
219
220 do {
221 val = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
222 if (val)
223 clear_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
224 else
225 set_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
226 val2 = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
227 intstat = readl(GPIO_INTR_STATUS(gpio)) & BIT(INTR_STATUS);
228 if (intstat || val == val2)
229 return;
230 } while (loop_limit-- > 0);
231 pr_err("dual-edge irq failed to stabilize, "
232 "interrupts dropped. %#08x != %#08x\n",
233 val, val2);
234}
235
236static void msm_gpio_irq_ack(struct irq_data *d)
237{
238 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
239
240 writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio));
241 if (test_bit(gpio, msm_gpio.dual_edge_irqs))
242 msm_gpio_update_dual_edge_pos(gpio);
243}
244
245static void msm_gpio_irq_mask(struct irq_data *d)
246{
247 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
248 unsigned long irq_flags;
249
250 spin_lock_irqsave(&tlmm_lock, irq_flags);
251 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
252 clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
253 __clear_bit(gpio, msm_gpio.enabled_irqs);
254 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
255}
256
257static void msm_gpio_irq_unmask(struct irq_data *d)
258{
259 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
260 unsigned long irq_flags;
261
262 spin_lock_irqsave(&tlmm_lock, irq_flags);
263 __set_bit(gpio, msm_gpio.enabled_irqs);
264 set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
265 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
266 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
267}
268
269static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
270{
271 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
272 unsigned long irq_flags;
273 uint32_t bits;
274
275 spin_lock_irqsave(&tlmm_lock, irq_flags);
276
277 bits = readl(GPIO_INTR_CFG(gpio));
278
279 if (flow_type & IRQ_TYPE_EDGE_BOTH) {
280 bits |= BIT(INTR_DECT_CTL);
281 __irq_set_handler_locked(d->irq, handle_edge_irq);
282 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
283 __set_bit(gpio, msm_gpio.dual_edge_irqs);
284 else
285 __clear_bit(gpio, msm_gpio.dual_edge_irqs);
286 } else {
287 bits &= ~BIT(INTR_DECT_CTL);
288 __irq_set_handler_locked(d->irq, handle_level_irq);
289 __clear_bit(gpio, msm_gpio.dual_edge_irqs);
290 }
291
292 if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
293 bits |= BIT(INTR_POL_CTL);
294 else
295 bits &= ~BIT(INTR_POL_CTL);
296
297 writel(bits, GPIO_INTR_CFG(gpio));
298
299 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
300 msm_gpio_update_dual_edge_pos(gpio);
301
302 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
303
304 return 0;
305}
306
307/*
308 * When the summary IRQ is raised, any number of GPIO lines may be high.
309 * It is the job of the summary handler to find all those GPIO lines
310 * which have been set as summary IRQ lines and which are triggered,
311 * and to call their interrupt handlers.
312 */
313static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc)
314{
315 unsigned long i;
316 struct irq_chip *chip = irq_desc_get_chip(desc);
317
318 chained_irq_enter(chip, desc);
319
320 for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS);
321 i < NR_GPIO_IRQS;
322 i = find_next_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS, i + 1)) {
323 if (readl(GPIO_INTR_STATUS(i)) & BIT(INTR_STATUS))
324 generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip,
325 i));
326 }
327
328 chained_irq_exit(chip, desc);
329}
330
331static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
332{
333 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
334
335 if (on) {
336 if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS))
337 irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1);
338 set_bit(gpio, msm_gpio.wake_irqs);
339 } else {
340 clear_bit(gpio, msm_gpio.wake_irqs);
341 if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS))
342 irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0);
343 }
344
345 return 0;
346}
347
348static struct irq_chip msm_gpio_irq_chip = {
349 .name = "msmgpio",
350 .irq_mask = msm_gpio_irq_mask,
351 .irq_unmask = msm_gpio_irq_unmask,
352 .irq_ack = msm_gpio_irq_ack,
353 .irq_set_type = msm_gpio_irq_set_type,
354 .irq_set_wake = msm_gpio_irq_set_wake,
355};
356
357static int __devinit msm_gpio_probe(struct platform_device *dev)
358{
359 int i, irq, ret;
360
361 bitmap_zero(msm_gpio.enabled_irqs, NR_GPIO_IRQS);
362 bitmap_zero(msm_gpio.wake_irqs, NR_GPIO_IRQS);
363 bitmap_zero(msm_gpio.dual_edge_irqs, NR_GPIO_IRQS);
364 msm_gpio.gpio_chip.label = dev->name;
365 ret = gpiochip_add(&msm_gpio.gpio_chip);
366 if (ret < 0)
367 return ret;
368
369 for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) {
370 irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i);
371 irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
372 handle_level_irq);
373 set_irq_flags(irq, IRQF_VALID);
374 }
375
376 irq_set_chained_handler(TLMM_SCSS_SUMMARY_IRQ,
377 msm_summary_irq_handler);
378 return 0;
379}
380
381static int __devexit msm_gpio_remove(struct platform_device *dev)
382{
383 int ret = gpiochip_remove(&msm_gpio.gpio_chip);
384
385 if (ret < 0)
386 return ret;
387
388 irq_set_handler(TLMM_SCSS_SUMMARY_IRQ, NULL);
389
390 return 0;
391}
392
393static struct platform_driver msm_gpio_driver = {
394 .probe = msm_gpio_probe,
395 .remove = __devexit_p(msm_gpio_remove),
396 .driver = {
397 .name = "msmgpio",
398 .owner = THIS_MODULE,
399 },
400};
401
402static struct platform_device msm_device_gpio = {
403 .name = "msmgpio",
404 .id = -1,
405};
406
407static int __init msm_gpio_init(void)
408{
409 int rc;
410
411 rc = platform_driver_register(&msm_gpio_driver);
412 if (!rc) {
413 rc = platform_device_register(&msm_device_gpio);
414 if (rc)
415 platform_driver_unregister(&msm_gpio_driver);
416 }
417
418 return rc;
419}
420
421static void __exit msm_gpio_exit(void)
422{
423 platform_device_unregister(&msm_device_gpio);
424 platform_driver_unregister(&msm_gpio_driver);
425}
426
427postcore_initcall(msm_gpio_init);
428module_exit(msm_gpio_exit);
429
430MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
431MODULE_DESCRIPTION("Driver for Qualcomm MSM TLMMv2 SoC GPIOs");
432MODULE_LICENSE("GPL v2");
433MODULE_ALIAS("platform:msmgpio");
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
new file mode 100644
index 000000000000..79e66c002350
--- /dev/null
+++ b/drivers/gpio/gpio-tps65912.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright 2011 Texas Instruments Inc.
3 *
4 * Author: Margarita Olaya <magi@slimlogic.co.uk>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This driver is based on wm8350 implementation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/errno.h>
17#include <linux/gpio.h>
18#include <linux/mfd/core.h>
19#include <linux/platform_device.h>
20#include <linux/seq_file.h>
21#include <linux/slab.h>
22#include <linux/mfd/tps65912.h>
23
24struct tps65912_gpio_data {
25 struct tps65912 *tps65912;
26 struct gpio_chip gpio_chip;
27};
28
29static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
30{
31 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
32 int val;
33
34 val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
35
36 if (val & GPIO_STS_MASK)
37 return 1;
38
39 return 0;
40}
41
42static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
43 int value)
44{
45 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
46
47 if (value)
48 tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
49 GPIO_SET_MASK);
50 else
51 tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
52 GPIO_SET_MASK);
53}
54
55static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
56 int value)
57{
58 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
59
60 /* Set the initial value */
61 tps65912_gpio_set(gc, offset, value);
62
63 return tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
64 GPIO_CFG_MASK);
65}
66
67static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
68{
69 struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
70
71 return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
72 GPIO_CFG_MASK);
73
74}
75
76static struct gpio_chip template_chip = {
77 .label = "tps65912",
78 .owner = THIS_MODULE,
79 .direction_input = tps65912_gpio_input,
80 .direction_output = tps65912_gpio_output,
81 .get = tps65912_gpio_get,
82 .set = tps65912_gpio_set,
83 .can_sleep = 1,
84 .ngpio = 5,
85 .base = -1,
86};
87
88static int __devinit tps65912_gpio_probe(struct platform_device *pdev)
89{
90 struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
91 struct tps65912_board *pdata = tps65912->dev->platform_data;
92 struct tps65912_gpio_data *tps65912_gpio;
93 int ret;
94
95 tps65912_gpio = kzalloc(sizeof(*tps65912_gpio), GFP_KERNEL);
96 if (tps65912_gpio == NULL)
97 return -ENOMEM;
98
99 tps65912_gpio->tps65912 = tps65912;
100 tps65912_gpio->gpio_chip = template_chip;
101 tps65912_gpio->gpio_chip.dev = &pdev->dev;
102 if (pdata && pdata->gpio_base)
103 tps65912_gpio->gpio_chip.base = pdata->gpio_base;
104
105 ret = gpiochip_add(&tps65912_gpio->gpio_chip);
106 if (ret < 0) {
107 dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
108 goto err;
109 }
110
111 platform_set_drvdata(pdev, tps65912_gpio);
112
113 return ret;
114
115err:
116 kfree(tps65912_gpio);
117 return ret;
118}
119
120static int __devexit tps65912_gpio_remove(struct platform_device *pdev)
121{
122 struct tps65912_gpio_data *tps65912_gpio = platform_get_drvdata(pdev);
123 int ret;
124
125 ret = gpiochip_remove(&tps65912_gpio->gpio_chip);
126 if (ret == 0)
127 kfree(tps65912_gpio);
128
129 return ret;
130}
131
132static struct platform_driver tps65912_gpio_driver = {
133 .driver = {
134 .name = "tps65912-gpio",
135 .owner = THIS_MODULE,
136 },
137 .probe = tps65912_gpio_probe,
138 .remove = __devexit_p(tps65912_gpio_remove),
139};
140
141static int __init tps65912_gpio_init(void)
142{
143 return platform_driver_register(&tps65912_gpio_driver);
144}
145subsys_initcall(tps65912_gpio_init);
146
147static void __exit tps65912_gpio_exit(void)
148{
149 platform_driver_unregister(&tps65912_gpio_driver);
150}
151module_exit(tps65912_gpio_exit);
152
153MODULE_AUTHOR("Margarita Olaya Cabrera <magi@slimlogic.co.uk>");
154MODULE_DESCRIPTION("GPIO interface for TPS65912 PMICs");
155MODULE_LICENSE("GPL v2");
156MODULE_ALIAS("platform:tps65912-gpio");
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 82db18506662..fe738f05309b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -499,6 +499,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
499 mutex_lock(&dev->mode_config.mutex); 499 mutex_lock(&dev->mode_config.mutex);
500 drm_mode_object_put(dev, &connector->base); 500 drm_mode_object_put(dev, &connector->base);
501 list_del(&connector->head); 501 list_del(&connector->head);
502 dev->mode_config.num_connector--;
502 mutex_unlock(&dev->mode_config.mutex); 503 mutex_unlock(&dev->mode_config.mutex);
503} 504}
504EXPORT_SYMBOL(drm_connector_cleanup); 505EXPORT_SYMBOL(drm_connector_cleanup);
@@ -529,6 +530,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
529 mutex_lock(&dev->mode_config.mutex); 530 mutex_lock(&dev->mode_config.mutex);
530 drm_mode_object_put(dev, &encoder->base); 531 drm_mode_object_put(dev, &encoder->base);
531 list_del(&encoder->head); 532 list_del(&encoder->head);
533 dev->mode_config.num_encoder--;
532 mutex_unlock(&dev->mode_config.mutex); 534 mutex_unlock(&dev->mode_config.mutex);
533} 535}
534EXPORT_SYMBOL(drm_encoder_cleanup); 536EXPORT_SYMBOL(drm_encoder_cleanup);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 9d8c892d07c9..9d2668a50872 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -90,7 +90,6 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
90 struct drm_device *dev = minor->dev; 90 struct drm_device *dev = minor->dev;
91 struct dentry *ent; 91 struct dentry *ent;
92 struct drm_info_node *tmp; 92 struct drm_info_node *tmp;
93 char name[64];
94 int i, ret; 93 int i, ret;
95 94
96 for (i = 0; i < count; i++) { 95 for (i = 0; i < count; i++) {
@@ -108,6 +107,9 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
108 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, 107 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
109 root, tmp, &drm_debugfs_fops); 108 root, tmp, &drm_debugfs_fops);
110 if (!ent) { 109 if (!ent) {
110 char name[64];
111 strncpy(name, root->d_name.name,
112 min(root->d_name.len, 64U));
111 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", 113 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
112 name, files[i].name); 114 name, files[i].name);
113 kfree(tmp); 115 kfree(tmp);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 756af4d7ec74..7425e5c9bd75 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -127,6 +127,23 @@ static const u8 edid_header[] = {
127 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 127 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
128}; 128};
129 129
130 /*
131 * Sanity check the header of the base EDID block. Return 8 if the header
132 * is perfect, down to 0 if it's totally wrong.
133 */
134int drm_edid_header_is_valid(const u8 *raw_edid)
135{
136 int i, score = 0;
137
138 for (i = 0; i < sizeof(edid_header); i++)
139 if (raw_edid[i] == edid_header[i])
140 score++;
141
142 return score;
143}
144EXPORT_SYMBOL(drm_edid_header_is_valid);
145
146
130/* 147/*
131 * Sanity check the EDID block (base or extension). Return 0 if the block 148 * Sanity check the EDID block (base or extension). Return 0 if the block
132 * doesn't check out, or 1 if it's valid. 149 * doesn't check out, or 1 if it's valid.
@@ -139,12 +156,7 @@ drm_edid_block_valid(u8 *raw_edid)
139 struct edid *edid = (struct edid *)raw_edid; 156 struct edid *edid = (struct edid *)raw_edid;
140 157
141 if (raw_edid[0] == 0x00) { 158 if (raw_edid[0] == 0x00) {
142 int score = 0; 159 int score = drm_edid_header_is_valid(raw_edid);
143
144 for (i = 0; i < sizeof(edid_header); i++)
145 if (raw_edid[i] == edid_header[i])
146 score++;
147
148 if (score == 8) ; 160 if (score == 8) ;
149 else if (score >= 6) { 161 else if (score >= 6) {
150 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); 162 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
@@ -1439,6 +1451,8 @@ EXPORT_SYMBOL(drm_detect_monitor_audio);
1439static void drm_add_display_info(struct edid *edid, 1451static void drm_add_display_info(struct edid *edid,
1440 struct drm_display_info *info) 1452 struct drm_display_info *info)
1441{ 1453{
1454 u8 *edid_ext;
1455
1442 info->width_mm = edid->width_cm * 10; 1456 info->width_mm = edid->width_cm * 10;
1443 info->height_mm = edid->height_cm * 10; 1457 info->height_mm = edid->height_cm * 10;
1444 1458
@@ -1483,6 +1497,13 @@ static void drm_add_display_info(struct edid *edid,
1483 info->color_formats = DRM_COLOR_FORMAT_YCRCB444; 1497 info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
1484 if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422) 1498 if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
1485 info->color_formats = DRM_COLOR_FORMAT_YCRCB422; 1499 info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
1500
1501 /* Get data from CEA blocks if present */
1502 edid_ext = drm_find_cea_extension(edid);
1503 if (!edid_ext)
1504 return;
1505
1506 info->cea_rev = edid_ext[1];
1486} 1507}
1487 1508
1488/** 1509/**
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 802b61ac3139..f7c6854eb4dd 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
256{ 256{
257 printk(KERN_ERR "panic occurred, switching back to text console\n"); 257 printk(KERN_ERR "panic occurred, switching back to text console\n");
258 return drm_fb_helper_force_kernel_mode(); 258 return drm_fb_helper_force_kernel_mode();
259 return 0;
260} 259}
261EXPORT_SYMBOL(drm_fb_helper_panic); 260EXPORT_SYMBOL(drm_fb_helper_panic);
262 261
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 2022a5c966bb..3830e9e478c0 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -291,11 +291,14 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
291 if (!dev->irq_enabled) 291 if (!dev->irq_enabled)
292 return; 292 return;
293 293
294 if (state) 294 if (state) {
295 dev->driver->irq_uninstall(dev); 295 if (dev->driver->irq_uninstall)
296 else { 296 dev->driver->irq_uninstall(dev);
297 dev->driver->irq_preinstall(dev); 297 } else {
298 dev->driver->irq_postinstall(dev); 298 if (dev->driver->irq_preinstall)
299 dev->driver->irq_preinstall(dev);
300 if (dev->driver->irq_postinstall)
301 dev->driver->irq_postinstall(dev);
299 } 302 }
300} 303}
301 304
@@ -338,7 +341,8 @@ int drm_irq_install(struct drm_device *dev)
338 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); 341 DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
339 342
340 /* Before installing handler */ 343 /* Before installing handler */
341 dev->driver->irq_preinstall(dev); 344 if (dev->driver->irq_preinstall)
345 dev->driver->irq_preinstall(dev);
342 346
343 /* Install handler */ 347 /* Install handler */
344 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) 348 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
@@ -363,11 +367,16 @@ int drm_irq_install(struct drm_device *dev)
363 vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL); 367 vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
364 368
365 /* After installing handler */ 369 /* After installing handler */
366 ret = dev->driver->irq_postinstall(dev); 370 if (dev->driver->irq_postinstall)
371 ret = dev->driver->irq_postinstall(dev);
372
367 if (ret < 0) { 373 if (ret < 0) {
368 mutex_lock(&dev->struct_mutex); 374 mutex_lock(&dev->struct_mutex);
369 dev->irq_enabled = 0; 375 dev->irq_enabled = 0;
370 mutex_unlock(&dev->struct_mutex); 376 mutex_unlock(&dev->struct_mutex);
377 if (!drm_core_check_feature(dev, DRIVER_MODESET))
378 vga_client_register(dev->pdev, NULL, NULL, NULL);
379 free_irq(drm_dev_to_irq(dev), dev);
371 } 380 }
372 381
373 return ret; 382 return ret;
@@ -413,7 +422,8 @@ int drm_irq_uninstall(struct drm_device *dev)
413 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 422 if (!drm_core_check_feature(dev, DRIVER_MODESET))
414 vga_client_register(dev->pdev, NULL, NULL, NULL); 423 vga_client_register(dev->pdev, NULL, NULL, NULL);
415 424
416 dev->driver->irq_uninstall(dev); 425 if (dev->driver->irq_uninstall)
426 dev->driver->irq_uninstall(dev);
417 427
418 free_irq(drm_dev_to_irq(dev), dev); 428 free_irq(drm_dev_to_irq(dev), dev);
419 429
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e2662497d50f..3c395a59da35 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
499 seq_printf(m, "Interrupts received: %d\n", 499 seq_printf(m, "Interrupts received: %d\n",
500 atomic_read(&dev_priv->irq_received)); 500 atomic_read(&dev_priv->irq_received));
501 for (i = 0; i < I915_NUM_RINGS; i++) { 501 for (i = 0; i < I915_NUM_RINGS; i++) {
502 if (IS_GEN6(dev)) { 502 if (IS_GEN6(dev) || IS_GEN7(dev)) {
503 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", 503 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
504 dev_priv->ring[i].name, 504 dev_priv->ring[i].name,
505 I915_READ_IMR(&dev_priv->ring[i])); 505 I915_READ_IMR(&dev_priv->ring[i]));
@@ -1338,6 +1338,155 @@ static const struct file_operations i915_wedged_fops = {
1338 .llseek = default_llseek, 1338 .llseek = default_llseek,
1339}; 1339};
1340 1340
1341static int
1342i915_max_freq_open(struct inode *inode,
1343 struct file *filp)
1344{
1345 filp->private_data = inode->i_private;
1346 return 0;
1347}
1348
1349static ssize_t
1350i915_max_freq_read(struct file *filp,
1351 char __user *ubuf,
1352 size_t max,
1353 loff_t *ppos)
1354{
1355 struct drm_device *dev = filp->private_data;
1356 drm_i915_private_t *dev_priv = dev->dev_private;
1357 char buf[80];
1358 int len;
1359
1360 len = snprintf(buf, sizeof (buf),
1361 "max freq: %d\n", dev_priv->max_delay * 50);
1362
1363 if (len > sizeof (buf))
1364 len = sizeof (buf);
1365
1366 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1367}
1368
1369static ssize_t
1370i915_max_freq_write(struct file *filp,
1371 const char __user *ubuf,
1372 size_t cnt,
1373 loff_t *ppos)
1374{
1375 struct drm_device *dev = filp->private_data;
1376 struct drm_i915_private *dev_priv = dev->dev_private;
1377 char buf[20];
1378 int val = 1;
1379
1380 if (cnt > 0) {
1381 if (cnt > sizeof (buf) - 1)
1382 return -EINVAL;
1383
1384 if (copy_from_user(buf, ubuf, cnt))
1385 return -EFAULT;
1386 buf[cnt] = 0;
1387
1388 val = simple_strtoul(buf, NULL, 0);
1389 }
1390
1391 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1392
1393 /*
1394 * Turbo will still be enabled, but won't go above the set value.
1395 */
1396 dev_priv->max_delay = val / 50;
1397
1398 gen6_set_rps(dev, val / 50);
1399
1400 return cnt;
1401}
1402
1403static const struct file_operations i915_max_freq_fops = {
1404 .owner = THIS_MODULE,
1405 .open = i915_max_freq_open,
1406 .read = i915_max_freq_read,
1407 .write = i915_max_freq_write,
1408 .llseek = default_llseek,
1409};
1410
1411static int
1412i915_cache_sharing_open(struct inode *inode,
1413 struct file *filp)
1414{
1415 filp->private_data = inode->i_private;
1416 return 0;
1417}
1418
1419static ssize_t
1420i915_cache_sharing_read(struct file *filp,
1421 char __user *ubuf,
1422 size_t max,
1423 loff_t *ppos)
1424{
1425 struct drm_device *dev = filp->private_data;
1426 drm_i915_private_t *dev_priv = dev->dev_private;
1427 char buf[80];
1428 u32 snpcr;
1429 int len;
1430
1431 mutex_lock(&dev_priv->dev->struct_mutex);
1432 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1433 mutex_unlock(&dev_priv->dev->struct_mutex);
1434
1435 len = snprintf(buf, sizeof (buf),
1436 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
1437 GEN6_MBC_SNPCR_SHIFT);
1438
1439 if (len > sizeof (buf))
1440 len = sizeof (buf);
1441
1442 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1443}
1444
1445static ssize_t
1446i915_cache_sharing_write(struct file *filp,
1447 const char __user *ubuf,
1448 size_t cnt,
1449 loff_t *ppos)
1450{
1451 struct drm_device *dev = filp->private_data;
1452 struct drm_i915_private *dev_priv = dev->dev_private;
1453 char buf[20];
1454 u32 snpcr;
1455 int val = 1;
1456
1457 if (cnt > 0) {
1458 if (cnt > sizeof (buf) - 1)
1459 return -EINVAL;
1460
1461 if (copy_from_user(buf, ubuf, cnt))
1462 return -EFAULT;
1463 buf[cnt] = 0;
1464
1465 val = simple_strtoul(buf, NULL, 0);
1466 }
1467
1468 if (val < 0 || val > 3)
1469 return -EINVAL;
1470
1471 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
1472
1473 /* Update the cache sharing policy here as well */
1474 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1475 snpcr &= ~GEN6_MBC_SNPCR_MASK;
1476 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1477 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1478
1479 return cnt;
1480}
1481
1482static const struct file_operations i915_cache_sharing_fops = {
1483 .owner = THIS_MODULE,
1484 .open = i915_cache_sharing_open,
1485 .read = i915_cache_sharing_read,
1486 .write = i915_cache_sharing_write,
1487 .llseek = default_llseek,
1488};
1489
1341/* As the drm_debugfs_init() routines are called before dev->dev_private is 1490/* As the drm_debugfs_init() routines are called before dev->dev_private is
1342 * allocated we need to hook into the minor for release. */ 1491 * allocated we need to hook into the minor for release. */
1343static int 1492static int
@@ -1437,6 +1586,36 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
1437 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 1586 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
1438} 1587}
1439 1588
1589static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
1590{
1591 struct drm_device *dev = minor->dev;
1592 struct dentry *ent;
1593
1594 ent = debugfs_create_file("i915_max_freq",
1595 S_IRUGO | S_IWUSR,
1596 root, dev,
1597 &i915_max_freq_fops);
1598 if (IS_ERR(ent))
1599 return PTR_ERR(ent);
1600
1601 return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
1602}
1603
1604static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
1605{
1606 struct drm_device *dev = minor->dev;
1607 struct dentry *ent;
1608
1609 ent = debugfs_create_file("i915_cache_sharing",
1610 S_IRUGO | S_IWUSR,
1611 root, dev,
1612 &i915_cache_sharing_fops);
1613 if (IS_ERR(ent))
1614 return PTR_ERR(ent);
1615
1616 return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
1617}
1618
1440static struct drm_info_list i915_debugfs_list[] = { 1619static struct drm_info_list i915_debugfs_list[] = {
1441 {"i915_capabilities", i915_capabilities, 0}, 1620 {"i915_capabilities", i915_capabilities, 0},
1442 {"i915_gem_objects", i915_gem_object_info, 0}, 1621 {"i915_gem_objects", i915_gem_object_info, 0},
@@ -1490,6 +1669,12 @@ int i915_debugfs_init(struct drm_minor *minor)
1490 ret = i915_forcewake_create(minor->debugfs_root, minor); 1669 ret = i915_forcewake_create(minor->debugfs_root, minor);
1491 if (ret) 1670 if (ret)
1492 return ret; 1671 return ret;
1672 ret = i915_max_freq_create(minor->debugfs_root, minor);
1673 if (ret)
1674 return ret;
1675 ret = i915_cache_sharing_create(minor->debugfs_root, minor);
1676 if (ret)
1677 return ret;
1493 1678
1494 return drm_debugfs_create_files(i915_debugfs_list, 1679 return drm_debugfs_create_files(i915_debugfs_list,
1495 I915_DEBUGFS_ENTRIES, 1680 I915_DEBUGFS_ENTRIES,
@@ -1504,6 +1689,10 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1504 1, minor); 1689 1, minor);
1505 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 1690 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1506 1, minor); 1691 1, minor);
1692 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
1693 1, minor);
1694 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1695 1, minor);
1507} 1696}
1508 1697
1509#endif /* CONFIG_DEBUG_FS */ 1698#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 12712824a6d2..8a3942c4f099 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -61,7 +61,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
61static int i915_init_phys_hws(struct drm_device *dev) 61static int i915_init_phys_hws(struct drm_device *dev)
62{ 62{
63 drm_i915_private_t *dev_priv = dev->dev_private; 63 drm_i915_private_t *dev_priv = dev->dev_private;
64 struct intel_ring_buffer *ring = LP_RING(dev_priv);
65 64
66 /* Program Hardware Status Page */ 65 /* Program Hardware Status Page */
67 dev_priv->status_page_dmah = 66 dev_priv->status_page_dmah =
@@ -71,10 +70,9 @@ static int i915_init_phys_hws(struct drm_device *dev)
71 DRM_ERROR("Can not allocate hardware status page\n"); 70 DRM_ERROR("Can not allocate hardware status page\n");
72 return -ENOMEM; 71 return -ENOMEM;
73 } 72 }
74 ring->status_page.page_addr =
75 (void __force __iomem *)dev_priv->status_page_dmah->vaddr;
76 73
77 memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); 74 memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
75 0, PAGE_SIZE);
78 76
79 i915_write_hws_pga(dev); 77 i915_write_hws_pga(dev);
80 78
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6867e193d85e..7916bd97d5c1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -36,6 +36,7 @@
36#include <linux/io-mapping.h> 36#include <linux/io-mapping.h>
37#include <linux/i2c.h> 37#include <linux/i2c.h>
38#include <drm/intel-gtt.h> 38#include <drm/intel-gtt.h>
39#include <linux/backlight.h>
39 40
40/* General customization: 41/* General customization:
41 */ 42 */
@@ -544,6 +545,7 @@ typedef struct drm_i915_private {
544 u32 savePIPEB_LINK_M1; 545 u32 savePIPEB_LINK_M1;
545 u32 savePIPEB_LINK_N1; 546 u32 savePIPEB_LINK_N1;
546 u32 saveMCHBAR_RENDER_STANDBY; 547 u32 saveMCHBAR_RENDER_STANDBY;
548 u32 savePCH_PORT_HOTPLUG;
547 549
548 struct { 550 struct {
549 /** Bridge to intel-gtt-ko */ 551 /** Bridge to intel-gtt-ko */
@@ -689,6 +691,7 @@ typedef struct drm_i915_private {
689 int child_dev_num; 691 int child_dev_num;
690 struct child_device_config *child_dev; 692 struct child_device_config *child_dev;
691 struct drm_connector *int_lvds_connector; 693 struct drm_connector *int_lvds_connector;
694 struct drm_connector *int_edp_connector;
692 695
693 bool mchbar_need_disable; 696 bool mchbar_need_disable;
694 697
@@ -722,6 +725,8 @@ typedef struct drm_i915_private {
722 /* list of fbdev register on this device */ 725 /* list of fbdev register on this device */
723 struct intel_fbdev *fbdev; 726 struct intel_fbdev *fbdev;
724 727
728 struct backlight_device *backlight;
729
725 struct drm_property *broadcast_rgb_property; 730 struct drm_property *broadcast_rgb_property;
726 struct drm_property *force_audio_property; 731 struct drm_property *force_audio_property;
727 732
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d1cd8b89f47d..a546a71fb060 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3112,7 +3112,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3112 3112
3113 if (pipelined != obj->ring) { 3113 if (pipelined != obj->ring) {
3114 ret = i915_gem_object_wait_rendering(obj); 3114 ret = i915_gem_object_wait_rendering(obj);
3115 if (ret) 3115 if (ret == -ERESTARTSYS)
3116 return ret; 3116 return ret;
3117 } 3117 }
3118 3118
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 23d1ae67d279..9cbb0cd8f46a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -306,12 +306,15 @@ static void i915_hotplug_work_func(struct work_struct *work)
306 struct drm_mode_config *mode_config = &dev->mode_config; 306 struct drm_mode_config *mode_config = &dev->mode_config;
307 struct intel_encoder *encoder; 307 struct intel_encoder *encoder;
308 308
309 mutex_lock(&mode_config->mutex);
309 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 310 DRM_DEBUG_KMS("running encoder hotplug functions\n");
310 311
311 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 312 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
312 if (encoder->hot_plug) 313 if (encoder->hot_plug)
313 encoder->hot_plug(encoder); 314 encoder->hot_plug(encoder);
314 315
316 mutex_unlock(&mode_config->mutex);
317
315 /* Just fire off a uevent and let userspace tell us what to do */ 318 /* Just fire off a uevent and let userspace tell us what to do */
316 drm_helper_hpd_irq_event(dev); 319 drm_helper_hpd_irq_event(dev);
317} 320}
@@ -2055,8 +2058,10 @@ void intel_irq_init(struct drm_device *dev)
2055 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2058 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2056 } 2059 }
2057 2060
2058 2061 if (drm_core_check_feature(dev, DRIVER_MODESET))
2059 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 2062 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2063 else
2064 dev->driver->get_vblank_timestamp = NULL;
2060 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2065 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2061 2066
2062 if (IS_IVYBRIDGE(dev)) { 2067 if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 02db299f621a..542453f7498c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,14 @@
78#define GRDOM_RENDER (1<<2) 78#define GRDOM_RENDER (1<<2)
79#define GRDOM_MEDIA (3<<2) 79#define GRDOM_MEDIA (3<<2)
80 80
81#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
82#define GEN6_MBC_SNPCR_SHIFT 21
83#define GEN6_MBC_SNPCR_MASK (3<<21)
84#define GEN6_MBC_SNPCR_MAX (0<<21)
85#define GEN6_MBC_SNPCR_MED (1<<21)
86#define GEN6_MBC_SNPCR_LOW (2<<21)
87#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
88
81#define GEN6_GDRST 0x941c 89#define GEN6_GDRST 0x941c
82#define GEN6_GRDOM_FULL (1 << 0) 90#define GEN6_GRDOM_FULL (1 << 0)
83#define GEN6_GRDOM_RENDER (1 << 1) 91#define GEN6_GRDOM_RENDER (1 << 1)
@@ -367,6 +375,7 @@
367# define MI_FLUSH_ENABLE (1 << 11) 375# define MI_FLUSH_ENABLE (1 << 11)
368 376
369#define GFX_MODE 0x02520 377#define GFX_MODE 0x02520
378#define GFX_MODE_GEN7 0x0229c
370#define GFX_RUN_LIST_ENABLE (1<<15) 379#define GFX_RUN_LIST_ENABLE (1<<15)
371#define GFX_TLB_INVALIDATE_ALWAYS (1<<13) 380#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
372#define GFX_SURFACE_FAULT_ENABLE (1<<12) 381#define GFX_SURFACE_FAULT_ENABLE (1<<12)
@@ -374,6 +383,9 @@
374#define GFX_PSMI_GRANULARITY (1<<10) 383#define GFX_PSMI_GRANULARITY (1<<10)
375#define GFX_PPGTT_ENABLE (1<<9) 384#define GFX_PPGTT_ENABLE (1<<9)
376 385
386#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
387#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
388
377#define SCPD0 0x0209c /* 915+ only */ 389#define SCPD0 0x0209c /* 915+ only */
378#define IER 0x020a0 390#define IER 0x020a0
379#define IIR 0x020a4 391#define IIR 0x020a4
@@ -1310,6 +1322,7 @@
1310#define ADPA_PIPE_SELECT_MASK (1<<30) 1322#define ADPA_PIPE_SELECT_MASK (1<<30)
1311#define ADPA_PIPE_A_SELECT 0 1323#define ADPA_PIPE_A_SELECT 0
1312#define ADPA_PIPE_B_SELECT (1<<30) 1324#define ADPA_PIPE_B_SELECT (1<<30)
1325#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
1313#define ADPA_USE_VGA_HVPOLARITY (1<<15) 1326#define ADPA_USE_VGA_HVPOLARITY (1<<15)
1314#define ADPA_SETS_HVPOLARITY 0 1327#define ADPA_SETS_HVPOLARITY 0
1315#define ADPA_VSYNC_CNTL_DISABLE (1<<11) 1328#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1452,6 +1465,7 @@
1452/* Selects pipe B for LVDS data. Must be set on pre-965. */ 1465/* Selects pipe B for LVDS data. Must be set on pre-965. */
1453#define LVDS_PIPEB_SELECT (1 << 30) 1466#define LVDS_PIPEB_SELECT (1 << 30)
1454#define LVDS_PIPE_MASK (1 << 30) 1467#define LVDS_PIPE_MASK (1 << 30)
1468#define LVDS_PIPE(pipe) ((pipe) << 30)
1455/* LVDS dithering flag on 965/g4x platform */ 1469/* LVDS dithering flag on 965/g4x platform */
1456#define LVDS_ENABLE_DITHER (1 << 25) 1470#define LVDS_ENABLE_DITHER (1 << 25)
1457/* LVDS sync polarity flags. Set to invert (i.e. negative) */ 1471/* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -1491,9 +1505,6 @@
1491#define LVDS_B0B3_POWER_DOWN (0 << 2) 1505#define LVDS_B0B3_POWER_DOWN (0 << 2)
1492#define LVDS_B0B3_POWER_UP (3 << 2) 1506#define LVDS_B0B3_POWER_UP (3 << 2)
1493 1507
1494#define LVDS_PIPE_ENABLED(V, P) \
1495 (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
1496
1497/* Video Data Island Packet control */ 1508/* Video Data Island Packet control */
1498#define VIDEO_DIP_DATA 0x61178 1509#define VIDEO_DIP_DATA 0x61178
1499#define VIDEO_DIP_CTL 0x61170 1510#define VIDEO_DIP_CTL 0x61170
@@ -1506,6 +1517,7 @@
1506#define VIDEO_DIP_SELECT_AVI (0 << 19) 1517#define VIDEO_DIP_SELECT_AVI (0 << 19)
1507#define VIDEO_DIP_SELECT_VENDOR (1 << 19) 1518#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
1508#define VIDEO_DIP_SELECT_SPD (3 << 19) 1519#define VIDEO_DIP_SELECT_SPD (3 << 19)
1520#define VIDEO_DIP_SELECT_MASK (3 << 19)
1509#define VIDEO_DIP_FREQ_ONCE (0 << 16) 1521#define VIDEO_DIP_FREQ_ONCE (0 << 16)
1510#define VIDEO_DIP_FREQ_VSYNC (1 << 16) 1522#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
1511#define VIDEO_DIP_FREQ_2VSYNC (2 << 16) 1523#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
@@ -2084,9 +2096,6 @@
2084#define DP_PIPEB_SELECT (1 << 30) 2096#define DP_PIPEB_SELECT (1 << 30)
2085#define DP_PIPE_MASK (1 << 30) 2097#define DP_PIPE_MASK (1 << 30)
2086 2098
2087#define DP_PIPE_ENABLED(V, P) \
2088 (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN))
2089
2090/* Link training mode - select a suitable mode for each stage */ 2099/* Link training mode - select a suitable mode for each stage */
2091#define DP_LINK_TRAIN_PAT_1 (0 << 28) 2100#define DP_LINK_TRAIN_PAT_1 (0 << 28)
2092#define DP_LINK_TRAIN_PAT_2 (1 << 28) 2101#define DP_LINK_TRAIN_PAT_2 (1 << 28)
@@ -3024,6 +3033,20 @@
3024#define _TRANSA_DP_LINK_M2 0xe0048 3033#define _TRANSA_DP_LINK_M2 0xe0048
3025#define _TRANSA_DP_LINK_N2 0xe004c 3034#define _TRANSA_DP_LINK_N2 0xe004c
3026 3035
3036/* Per-transcoder DIP controls */
3037
3038#define _VIDEO_DIP_CTL_A 0xe0200
3039#define _VIDEO_DIP_DATA_A 0xe0208
3040#define _VIDEO_DIP_GCP_A 0xe0210
3041
3042#define _VIDEO_DIP_CTL_B 0xe1200
3043#define _VIDEO_DIP_DATA_B 0xe1208
3044#define _VIDEO_DIP_GCP_B 0xe1210
3045
3046#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
3047#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
3048#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
3049
3027#define _TRANS_HTOTAL_B 0xe1000 3050#define _TRANS_HTOTAL_B 0xe1000
3028#define _TRANS_HBLANK_B 0xe1004 3051#define _TRANS_HBLANK_B 0xe1004
3029#define _TRANS_HSYNC_B 0xe1008 3052#define _TRANS_HSYNC_B 0xe1008
@@ -3076,6 +3099,16 @@
3076#define TRANS_6BPC (2<<5) 3099#define TRANS_6BPC (2<<5)
3077#define TRANS_12BPC (3<<5) 3100#define TRANS_12BPC (3<<5)
3078 3101
3102#define _TRANSA_CHICKEN2 0xf0064
3103#define _TRANSB_CHICKEN2 0xf1064
3104#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
3105#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
3106
3107#define SOUTH_CHICKEN1 0xc2000
3108#define FDIA_PHASE_SYNC_SHIFT_OVR 19
3109#define FDIA_PHASE_SYNC_SHIFT_EN 18
3110#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
3111#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
3079#define SOUTH_CHICKEN2 0xc2004 3112#define SOUTH_CHICKEN2 0xc2004
3080#define DPLS_EDP_PPS_FIX_DIS (1<<0) 3113#define DPLS_EDP_PPS_FIX_DIS (1<<0)
3081 3114
@@ -3226,14 +3259,12 @@
3226#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) 3259#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
3227#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) 3260#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
3228 3261
3229#define ADPA_PIPE_ENABLED(V, P) \
3230 (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
3231
3232/* or SDVOB */ 3262/* or SDVOB */
3233#define HDMIB 0xe1140 3263#define HDMIB 0xe1140
3234#define PORT_ENABLE (1 << 31) 3264#define PORT_ENABLE (1 << 31)
3235#define TRANSCODER_A (0) 3265#define TRANSCODER_A (0)
3236#define TRANSCODER_B (1 << 30) 3266#define TRANSCODER_B (1 << 30)
3267#define TRANSCODER(pipe) ((pipe) << 30)
3237#define TRANSCODER_MASK (1 << 30) 3268#define TRANSCODER_MASK (1 << 30)
3238#define COLOR_FORMAT_8bpc (0) 3269#define COLOR_FORMAT_8bpc (0)
3239#define COLOR_FORMAT_12bpc (3 << 26) 3270#define COLOR_FORMAT_12bpc (3 << 26)
@@ -3250,9 +3281,6 @@
3250#define HSYNC_ACTIVE_HIGH (1 << 3) 3281#define HSYNC_ACTIVE_HIGH (1 << 3)
3251#define PORT_DETECTED (1 << 2) 3282#define PORT_DETECTED (1 << 2)
3252 3283
3253#define HDMI_PIPE_ENABLED(V, P) \
3254 (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
3255
3256/* PCH SDVOB multiplex with HDMIB */ 3284/* PCH SDVOB multiplex with HDMIB */
3257#define PCH_SDVOB HDMIB 3285#define PCH_SDVOB HDMIB
3258 3286
@@ -3319,6 +3347,7 @@
3319#define PORT_TRANS_B_SEL_CPT (1<<29) 3347#define PORT_TRANS_B_SEL_CPT (1<<29)
3320#define PORT_TRANS_C_SEL_CPT (2<<29) 3348#define PORT_TRANS_C_SEL_CPT (2<<29)
3321#define PORT_TRANS_SEL_MASK (3<<29) 3349#define PORT_TRANS_SEL_MASK (3<<29)
3350#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
3322 3351
3323#define TRANS_DP_CTL_A 0xe0300 3352#define TRANS_DP_CTL_A 0xe0300
3324#define TRANS_DP_CTL_B 0xe1300 3353#define TRANS_DP_CTL_B 0xe1300
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 285758603ac8..f10742359ec9 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -812,6 +812,7 @@ int i915_save_state(struct drm_device *dev)
812 dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); 812 dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
813 dev_priv->saveMCHBAR_RENDER_STANDBY = 813 dev_priv->saveMCHBAR_RENDER_STANDBY =
814 I915_READ(RSTDBYCTL); 814 I915_READ(RSTDBYCTL);
815 dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
815 } else { 816 } else {
816 dev_priv->saveIER = I915_READ(IER); 817 dev_priv->saveIER = I915_READ(IER);
817 dev_priv->saveIMR = I915_READ(IMR); 818 dev_priv->saveIMR = I915_READ(IMR);
@@ -863,13 +864,15 @@ int i915_restore_state(struct drm_device *dev)
863 I915_WRITE(GTIMR, dev_priv->saveGTIMR); 864 I915_WRITE(GTIMR, dev_priv->saveGTIMR);
864 I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); 865 I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
865 I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); 866 I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
867 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
866 } else { 868 } else {
867 I915_WRITE(IER, dev_priv->saveIER); 869 I915_WRITE(IER, dev_priv->saveIER);
868 I915_WRITE(IMR, dev_priv->saveIMR); 870 I915_WRITE(IMR, dev_priv->saveIMR);
869 } 871 }
870 mutex_unlock(&dev->struct_mutex); 872 mutex_unlock(&dev->struct_mutex);
871 873
872 intel_init_clock_gating(dev); 874 if (drm_core_check_feature(dev, DRIVER_MODESET))
875 intel_init_clock_gating(dev);
873 876
874 if (IS_IRONLAKE_M(dev)) { 877 if (IS_IRONLAKE_M(dev)) {
875 ironlake_enable_drps(dev); 878 ironlake_enable_drps(dev);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 393a39922e53..56a8554d9039 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -878,7 +878,7 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
878 int pp_reg, lvds_reg; 878 int pp_reg, lvds_reg;
879 u32 val; 879 u32 val;
880 enum pipe panel_pipe = PIPE_A; 880 enum pipe panel_pipe = PIPE_A;
881 bool locked = locked; 881 bool locked = true;
882 882
883 if (HAS_PCH_SPLIT(dev_priv->dev)) { 883 if (HAS_PCH_SPLIT(dev_priv->dev)) {
884 pp_reg = PCH_PP_CONTROL; 884 pp_reg = PCH_PP_CONTROL;
@@ -980,11 +980,76 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
980 pipe_name(pipe)); 980 pipe_name(pipe));
981} 981}
982 982
983static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
984 enum pipe pipe, u32 port_sel, u32 val)
985{
986 if ((val & DP_PORT_EN) == 0)
987 return false;
988
989 if (HAS_PCH_CPT(dev_priv->dev)) {
990 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
991 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
992 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
993 return false;
994 } else {
995 if ((val & DP_PIPE_MASK) != (pipe << 30))
996 return false;
997 }
998 return true;
999}
1000
1001static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1002 enum pipe pipe, u32 val)
1003{
1004 if ((val & PORT_ENABLE) == 0)
1005 return false;
1006
1007 if (HAS_PCH_CPT(dev_priv->dev)) {
1008 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1009 return false;
1010 } else {
1011 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1012 return false;
1013 }
1014 return true;
1015}
1016
1017static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1018 enum pipe pipe, u32 val)
1019{
1020 if ((val & LVDS_PORT_EN) == 0)
1021 return false;
1022
1023 if (HAS_PCH_CPT(dev_priv->dev)) {
1024 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1025 return false;
1026 } else {
1027 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1028 return false;
1029 }
1030 return true;
1031}
1032
1033static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1034 enum pipe pipe, u32 val)
1035{
1036 if ((val & ADPA_DAC_ENABLE) == 0)
1037 return false;
1038 if (HAS_PCH_CPT(dev_priv->dev)) {
1039 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1040 return false;
1041 } else {
1042 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1043 return false;
1044 }
1045 return true;
1046}
1047
983static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1048static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
984 enum pipe pipe, int reg) 1049 enum pipe pipe, int reg, u32 port_sel)
985{ 1050{
986 u32 val = I915_READ(reg); 1051 u32 val = I915_READ(reg);
987 WARN(DP_PIPE_ENABLED(val, pipe), 1052 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
988 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1053 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
989 reg, pipe_name(pipe)); 1054 reg, pipe_name(pipe));
990} 1055}
@@ -993,7 +1058,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
993 enum pipe pipe, int reg) 1058 enum pipe pipe, int reg)
994{ 1059{
995 u32 val = I915_READ(reg); 1060 u32 val = I915_READ(reg);
996 WARN(HDMI_PIPE_ENABLED(val, pipe), 1061 WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
997 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1062 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
998 reg, pipe_name(pipe)); 1063 reg, pipe_name(pipe));
999} 1064}
@@ -1004,19 +1069,19 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1004 int reg; 1069 int reg;
1005 u32 val; 1070 u32 val;
1006 1071
1007 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B); 1072 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1008 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C); 1073 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1009 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D); 1074 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1010 1075
1011 reg = PCH_ADPA; 1076 reg = PCH_ADPA;
1012 val = I915_READ(reg); 1077 val = I915_READ(reg);
1013 WARN(ADPA_PIPE_ENABLED(val, pipe), 1078 WARN(adpa_pipe_enabled(dev_priv, val, pipe),
1014 "PCH VGA enabled on transcoder %c, should be disabled\n", 1079 "PCH VGA enabled on transcoder %c, should be disabled\n",
1015 pipe_name(pipe)); 1080 pipe_name(pipe));
1016 1081
1017 reg = PCH_LVDS; 1082 reg = PCH_LVDS;
1018 val = I915_READ(reg); 1083 val = I915_READ(reg);
1019 WARN(LVDS_PIPE_ENABLED(val, pipe), 1084 WARN(lvds_pipe_enabled(dev_priv, val, pipe),
1020 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1085 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1021 pipe_name(pipe)); 1086 pipe_name(pipe));
1022 1087
@@ -1276,6 +1341,17 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1276 intel_wait_for_pipe_off(dev_priv->dev, pipe); 1341 intel_wait_for_pipe_off(dev_priv->dev, pipe);
1277} 1342}
1278 1343
1344/*
1345 * Plane regs are double buffered, going from enabled->disabled needs a
1346 * trigger in order to latch. The display address reg provides this.
1347 */
1348static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1349 enum plane plane)
1350{
1351 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1352 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1353}
1354
1279/** 1355/**
1280 * intel_enable_plane - enable a display plane on a given pipe 1356 * intel_enable_plane - enable a display plane on a given pipe
1281 * @dev_priv: i915 private structure 1357 * @dev_priv: i915 private structure
@@ -1299,20 +1375,10 @@ static void intel_enable_plane(struct drm_i915_private *dev_priv,
1299 return; 1375 return;
1300 1376
1301 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 1377 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1378 intel_flush_display_plane(dev_priv, plane);
1302 intel_wait_for_vblank(dev_priv->dev, pipe); 1379 intel_wait_for_vblank(dev_priv->dev, pipe);
1303} 1380}
1304 1381
1305/*
1306 * Plane regs are double buffered, going from enabled->disabled needs a
1307 * trigger in order to latch. The display address reg provides this.
1308 */
1309static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1310 enum plane plane)
1311{
1312 u32 reg = DSPADDR(plane);
1313 I915_WRITE(reg, I915_READ(reg));
1314}
1315
1316/** 1382/**
1317 * intel_disable_plane - disable a display plane 1383 * intel_disable_plane - disable a display plane
1318 * @dev_priv: i915 private structure 1384 * @dev_priv: i915 private structure
@@ -1338,19 +1404,24 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
1338} 1404}
1339 1405
1340static void disable_pch_dp(struct drm_i915_private *dev_priv, 1406static void disable_pch_dp(struct drm_i915_private *dev_priv,
1341 enum pipe pipe, int reg) 1407 enum pipe pipe, int reg, u32 port_sel)
1342{ 1408{
1343 u32 val = I915_READ(reg); 1409 u32 val = I915_READ(reg);
1344 if (DP_PIPE_ENABLED(val, pipe)) 1410 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1411 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1345 I915_WRITE(reg, val & ~DP_PORT_EN); 1412 I915_WRITE(reg, val & ~DP_PORT_EN);
1413 }
1346} 1414}
1347 1415
1348static void disable_pch_hdmi(struct drm_i915_private *dev_priv, 1416static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1349 enum pipe pipe, int reg) 1417 enum pipe pipe, int reg)
1350{ 1418{
1351 u32 val = I915_READ(reg); 1419 u32 val = I915_READ(reg);
1352 if (HDMI_PIPE_ENABLED(val, pipe)) 1420 if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1421 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1422 reg, pipe);
1353 I915_WRITE(reg, val & ~PORT_ENABLE); 1423 I915_WRITE(reg, val & ~PORT_ENABLE);
1424 }
1354} 1425}
1355 1426
1356/* Disable any ports connected to this transcoder */ 1427/* Disable any ports connected to this transcoder */
@@ -1362,18 +1433,19 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1362 val = I915_READ(PCH_PP_CONTROL); 1433 val = I915_READ(PCH_PP_CONTROL);
1363 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); 1434 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1364 1435
1365 disable_pch_dp(dev_priv, pipe, PCH_DP_B); 1436 disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1366 disable_pch_dp(dev_priv, pipe, PCH_DP_C); 1437 disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1367 disable_pch_dp(dev_priv, pipe, PCH_DP_D); 1438 disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1368 1439
1369 reg = PCH_ADPA; 1440 reg = PCH_ADPA;
1370 val = I915_READ(reg); 1441 val = I915_READ(reg);
1371 if (ADPA_PIPE_ENABLED(val, pipe)) 1442 if (adpa_pipe_enabled(dev_priv, val, pipe))
1372 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); 1443 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1373 1444
1374 reg = PCH_LVDS; 1445 reg = PCH_LVDS;
1375 val = I915_READ(reg); 1446 val = I915_READ(reg);
1376 if (LVDS_PIPE_ENABLED(val, pipe)) { 1447 if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1448 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1377 I915_WRITE(reg, val & ~LVDS_PORT_EN); 1449 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1378 POSTING_READ(reg); 1450 POSTING_READ(reg);
1379 udelay(100); 1451 udelay(100);
@@ -2096,7 +2168,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2096 2168
2097 /* no fb bound */ 2169 /* no fb bound */
2098 if (!crtc->fb) { 2170 if (!crtc->fb) {
2099 DRM_DEBUG_KMS("No FB bound\n"); 2171 DRM_ERROR("No FB bound\n");
2100 return 0; 2172 return 0;
2101 } 2173 }
2102 2174
@@ -2105,6 +2177,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2105 case 1: 2177 case 1:
2106 break; 2178 break;
2107 default: 2179 default:
2180 DRM_ERROR("no plane for crtc\n");
2108 return -EINVAL; 2181 return -EINVAL;
2109 } 2182 }
2110 2183
@@ -2114,6 +2187,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2114 NULL); 2187 NULL);
2115 if (ret != 0) { 2188 if (ret != 0) {
2116 mutex_unlock(&dev->struct_mutex); 2189 mutex_unlock(&dev->struct_mutex);
2190 DRM_ERROR("pin & fence failed\n");
2117 return ret; 2191 return ret;
2118 } 2192 }
2119 2193
@@ -2142,6 +2216,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2142 if (ret) { 2216 if (ret) {
2143 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); 2217 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
2144 mutex_unlock(&dev->struct_mutex); 2218 mutex_unlock(&dev->struct_mutex);
2219 DRM_ERROR("failed to update base address\n");
2145 return ret; 2220 return ret;
2146 } 2221 }
2147 2222
@@ -2248,6 +2323,18 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2248 FDI_FE_ERRC_ENABLE); 2323 FDI_FE_ERRC_ENABLE);
2249} 2324}
2250 2325
2326static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2327{
2328 struct drm_i915_private *dev_priv = dev->dev_private;
2329 u32 flags = I915_READ(SOUTH_CHICKEN1);
2330
2331 flags |= FDI_PHASE_SYNC_OVR(pipe);
2332 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2333 flags |= FDI_PHASE_SYNC_EN(pipe);
2334 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2335 POSTING_READ(SOUTH_CHICKEN1);
2336}
2337
2251/* The FDI link training functions for ILK/Ibexpeak. */ 2338/* The FDI link training functions for ILK/Ibexpeak. */
2252static void ironlake_fdi_link_train(struct drm_crtc *crtc) 2339static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2253{ 2340{
@@ -2398,6 +2485,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2398 POSTING_READ(reg); 2485 POSTING_READ(reg);
2399 udelay(150); 2486 udelay(150);
2400 2487
2488 if (HAS_PCH_CPT(dev))
2489 cpt_phase_pointer_enable(dev, pipe);
2490
2401 for (i = 0; i < 4; i++ ) { 2491 for (i = 0; i < 4; i++ ) {
2402 reg = FDI_TX_CTL(pipe); 2492 reg = FDI_TX_CTL(pipe);
2403 temp = I915_READ(reg); 2493 temp = I915_READ(reg);
@@ -2514,6 +2604,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2514 POSTING_READ(reg); 2604 POSTING_READ(reg);
2515 udelay(150); 2605 udelay(150);
2516 2606
2607 if (HAS_PCH_CPT(dev))
2608 cpt_phase_pointer_enable(dev, pipe);
2609
2517 for (i = 0; i < 4; i++ ) { 2610 for (i = 0; i < 4; i++ ) {
2518 reg = FDI_TX_CTL(pipe); 2611 reg = FDI_TX_CTL(pipe);
2519 temp = I915_READ(reg); 2612 temp = I915_READ(reg);
@@ -2623,6 +2716,17 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2623 } 2716 }
2624} 2717}
2625 2718
2719static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2720{
2721 struct drm_i915_private *dev_priv = dev->dev_private;
2722 u32 flags = I915_READ(SOUTH_CHICKEN1);
2723
2724 flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2725 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2726 flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2727 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2728 POSTING_READ(SOUTH_CHICKEN1);
2729}
2626static void ironlake_fdi_disable(struct drm_crtc *crtc) 2730static void ironlake_fdi_disable(struct drm_crtc *crtc)
2627{ 2731{
2628 struct drm_device *dev = crtc->dev; 2732 struct drm_device *dev = crtc->dev;
@@ -2652,6 +2756,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2652 I915_WRITE(FDI_RX_CHICKEN(pipe), 2756 I915_WRITE(FDI_RX_CHICKEN(pipe),
2653 I915_READ(FDI_RX_CHICKEN(pipe) & 2757 I915_READ(FDI_RX_CHICKEN(pipe) &
2654 ~FDI_RX_PHASE_SYNC_POINTER_EN)); 2758 ~FDI_RX_PHASE_SYNC_POINTER_EN));
2759 } else if (HAS_PCH_CPT(dev)) {
2760 cpt_phase_pointer_disable(dev, pipe);
2655 } 2761 }
2656 2762
2657 /* still set train pattern 1 */ 2763 /* still set train pattern 1 */
@@ -2862,14 +2968,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2862 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 2968 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
2863 } 2969 }
2864 2970
2971 /*
2972 * On ILK+ LUT must be loaded before the pipe is running but with
2973 * clocks enabled
2974 */
2975 intel_crtc_load_lut(crtc);
2976
2865 intel_enable_pipe(dev_priv, pipe, is_pch_port); 2977 intel_enable_pipe(dev_priv, pipe, is_pch_port);
2866 intel_enable_plane(dev_priv, plane, pipe); 2978 intel_enable_plane(dev_priv, plane, pipe);
2867 2979
2868 if (is_pch_port) 2980 if (is_pch_port)
2869 ironlake_pch_enable(crtc); 2981 ironlake_pch_enable(crtc);
2870 2982
2871 intel_crtc_load_lut(crtc);
2872
2873 mutex_lock(&dev->struct_mutex); 2983 mutex_lock(&dev->struct_mutex);
2874 intel_update_fbc(dev); 2984 intel_update_fbc(dev);
2875 mutex_unlock(&dev->struct_mutex); 2985 mutex_unlock(&dev->struct_mutex);
@@ -4538,7 +4648,9 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4538 if (connector->encoder != encoder) 4648 if (connector->encoder != encoder)
4539 continue; 4649 continue;
4540 4650
4541 if (connector->display_info.bpc < display_bpc) { 4651 /* Don't use an invalid EDID bpc value */
4652 if (connector->display_info.bpc &&
4653 connector->display_info.bpc < display_bpc) {
4542 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 4654 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4543 display_bpc = connector->display_info.bpc; 4655 display_bpc = connector->display_info.bpc;
4544 } 4656 }
@@ -4985,6 +5097,81 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4985 return ret; 5097 return ret;
4986} 5098}
4987 5099
5100static void ironlake_update_pch_refclk(struct drm_device *dev)
5101{
5102 struct drm_i915_private *dev_priv = dev->dev_private;
5103 struct drm_mode_config *mode_config = &dev->mode_config;
5104 struct drm_crtc *crtc;
5105 struct intel_encoder *encoder;
5106 struct intel_encoder *has_edp_encoder = NULL;
5107 u32 temp;
5108 bool has_lvds = false;
5109
5110 /* We need to take the global config into account */
5111 list_for_each_entry(crtc, &mode_config->crtc_list, head) {
5112 if (!crtc->enabled)
5113 continue;
5114
5115 list_for_each_entry(encoder, &mode_config->encoder_list,
5116 base.head) {
5117 if (encoder->base.crtc != crtc)
5118 continue;
5119
5120 switch (encoder->type) {
5121 case INTEL_OUTPUT_LVDS:
5122 has_lvds = true;
5123 case INTEL_OUTPUT_EDP:
5124 has_edp_encoder = encoder;
5125 break;
5126 }
5127 }
5128 }
5129
5130 /* Ironlake: try to setup display ref clock before DPLL
5131 * enabling. This is only under driver's control after
5132 * PCH B stepping, previous chipset stepping should be
5133 * ignoring this setting.
5134 */
5135 temp = I915_READ(PCH_DREF_CONTROL);
5136 /* Always enable nonspread source */
5137 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5138 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5139 temp &= ~DREF_SSC_SOURCE_MASK;
5140 temp |= DREF_SSC_SOURCE_ENABLE;
5141 I915_WRITE(PCH_DREF_CONTROL, temp);
5142
5143 POSTING_READ(PCH_DREF_CONTROL);
5144 udelay(200);
5145
5146 if (has_edp_encoder) {
5147 if (intel_panel_use_ssc(dev_priv)) {
5148 temp |= DREF_SSC1_ENABLE;
5149 I915_WRITE(PCH_DREF_CONTROL, temp);
5150
5151 POSTING_READ(PCH_DREF_CONTROL);
5152 udelay(200);
5153 }
5154 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5155
5156 /* Enable CPU source on CPU attached eDP */
5157 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5158 if (intel_panel_use_ssc(dev_priv))
5159 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5160 else
5161 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5162 } else {
5163 /* Enable SSC on PCH eDP if needed */
5164 if (intel_panel_use_ssc(dev_priv)) {
5165 DRM_ERROR("enabling SSC on PCH\n");
5166 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
5167 }
5168 }
5169 I915_WRITE(PCH_DREF_CONTROL, temp);
5170 POSTING_READ(PCH_DREF_CONTROL);
5171 udelay(200);
5172 }
5173}
5174
4988static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 5175static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4989 struct drm_display_mode *mode, 5176 struct drm_display_mode *mode,
4990 struct drm_display_mode *adjusted_mode, 5177 struct drm_display_mode *adjusted_mode,
@@ -5153,7 +5340,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5153 temp |= PIPE_12BPC; 5340 temp |= PIPE_12BPC;
5154 break; 5341 break;
5155 default: 5342 default:
5156 WARN(1, "intel_choose_pipe_bpp returned invalid value\n"); 5343 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
5344 pipe_bpp);
5157 temp |= PIPE_8BPC; 5345 temp |= PIPE_8BPC;
5158 pipe_bpp = 24; 5346 pipe_bpp = 24;
5159 break; 5347 break;
@@ -5179,49 +5367,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5179 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 5367 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5180 &m_n); 5368 &m_n);
5181 5369
5182 /* Ironlake: try to setup display ref clock before DPLL 5370 ironlake_update_pch_refclk(dev);
5183 * enabling. This is only under driver's control after
5184 * PCH B stepping, previous chipset stepping should be
5185 * ignoring this setting.
5186 */
5187 temp = I915_READ(PCH_DREF_CONTROL);
5188 /* Always enable nonspread source */
5189 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5190 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5191 temp &= ~DREF_SSC_SOURCE_MASK;
5192 temp |= DREF_SSC_SOURCE_ENABLE;
5193 I915_WRITE(PCH_DREF_CONTROL, temp);
5194
5195 POSTING_READ(PCH_DREF_CONTROL);
5196 udelay(200);
5197
5198 if (has_edp_encoder) {
5199 if (intel_panel_use_ssc(dev_priv)) {
5200 temp |= DREF_SSC1_ENABLE;
5201 I915_WRITE(PCH_DREF_CONTROL, temp);
5202
5203 POSTING_READ(PCH_DREF_CONTROL);
5204 udelay(200);
5205 }
5206 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5207
5208 /* Enable CPU source on CPU attached eDP */
5209 if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5210 if (intel_panel_use_ssc(dev_priv))
5211 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5212 else
5213 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5214 } else {
5215 /* Enable SSC on PCH eDP if needed */
5216 if (intel_panel_use_ssc(dev_priv)) {
5217 DRM_ERROR("enabling SSC on PCH\n");
5218 temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
5219 }
5220 }
5221 I915_WRITE(PCH_DREF_CONTROL, temp);
5222 POSTING_READ(PCH_DREF_CONTROL);
5223 udelay(200);
5224 }
5225 5371
5226 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 5372 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5227 if (has_reduced_clock) 5373 if (has_reduced_clock)
@@ -5238,7 +5384,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5238 } else if (is_sdvo && is_tv) 5384 } else if (is_sdvo && is_tv)
5239 factor = 20; 5385 factor = 20;
5240 5386
5241 if (clock.m1 < factor * clock.n) 5387 if (clock.m < factor * clock.n)
5242 fp |= FP_CB_TUNE; 5388 fp |= FP_CB_TUNE;
5243 5389
5244 dpll = 0; 5390 dpll = 0;
@@ -5516,6 +5662,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5516 5662
5517 drm_vblank_post_modeset(dev, pipe); 5663 drm_vblank_post_modeset(dev, pipe);
5518 5664
5665 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
5666
5519 return ret; 5667 return ret;
5520} 5668}
5521 5669
@@ -7090,8 +7238,6 @@ static void intel_setup_outputs(struct drm_device *dev)
7090 intel_encoder_clones(dev, encoder->clone_mask); 7238 intel_encoder_clones(dev, encoder->clone_mask);
7091 } 7239 }
7092 7240
7093 intel_panel_setup_backlight(dev);
7094
7095 /* disable all the possible outputs/crtcs before entering KMS mode */ 7241 /* disable all the possible outputs/crtcs before entering KMS mode */
7096 drm_helper_disable_unused_functions(dev); 7242 drm_helper_disable_unused_functions(dev);
7097} 7243}
@@ -7714,10 +7860,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
7714 ILK_DPARB_CLK_GATE | 7860 ILK_DPARB_CLK_GATE |
7715 ILK_DPFD_CLK_GATE); 7861 ILK_DPFD_CLK_GATE);
7716 7862
7717 for_each_pipe(pipe) 7863 for_each_pipe(pipe) {
7718 I915_WRITE(DSPCNTR(pipe), 7864 I915_WRITE(DSPCNTR(pipe),
7719 I915_READ(DSPCNTR(pipe)) | 7865 I915_READ(DSPCNTR(pipe)) |
7720 DISPPLANE_TRICKLE_FEED_DISABLE); 7866 DISPPLANE_TRICKLE_FEED_DISABLE);
7867 intel_flush_display_plane(dev_priv, pipe);
7868 }
7721} 7869}
7722 7870
7723static void ivybridge_init_clock_gating(struct drm_device *dev) 7871static void ivybridge_init_clock_gating(struct drm_device *dev)
@@ -7734,10 +7882,12 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
7734 7882
7735 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); 7883 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
7736 7884
7737 for_each_pipe(pipe) 7885 for_each_pipe(pipe) {
7738 I915_WRITE(DSPCNTR(pipe), 7886 I915_WRITE(DSPCNTR(pipe),
7739 I915_READ(DSPCNTR(pipe)) | 7887 I915_READ(DSPCNTR(pipe)) |
7740 DISPPLANE_TRICKLE_FEED_DISABLE); 7888 DISPPLANE_TRICKLE_FEED_DISABLE);
7889 intel_flush_display_plane(dev_priv, pipe);
7890 }
7741} 7891}
7742 7892
7743static void g4x_init_clock_gating(struct drm_device *dev) 7893static void g4x_init_clock_gating(struct drm_device *dev)
@@ -7820,6 +7970,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
7820static void cpt_init_clock_gating(struct drm_device *dev) 7970static void cpt_init_clock_gating(struct drm_device *dev)
7821{ 7971{
7822 struct drm_i915_private *dev_priv = dev->dev_private; 7972 struct drm_i915_private *dev_priv = dev->dev_private;
7973 int pipe;
7823 7974
7824 /* 7975 /*
7825 * On Ibex Peak and Cougar Point, we need to disable clock 7976 * On Ibex Peak and Cougar Point, we need to disable clock
@@ -7829,6 +7980,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
7829 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 7980 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
7830 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 7981 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
7831 DPLS_EDP_PPS_FIX_DIS); 7982 DPLS_EDP_PPS_FIX_DIS);
7983 /* Without this, mode sets may fail silently on FDI */
7984 for_each_pipe(pipe)
7985 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
7832} 7986}
7833 7987
7834static void ironlake_teardown_rc6(struct drm_device *dev) 7988static void ironlake_teardown_rc6(struct drm_device *dev)
@@ -8178,6 +8332,9 @@ struct intel_quirk intel_quirks[] = {
8178 8332
8179 /* Lenovo U160 cannot use SSC on LVDS */ 8333 /* Lenovo U160 cannot use SSC on LVDS */
8180 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 8334 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
8335
8336 /* Sony Vaio Y cannot use SSC on LVDS */
8337 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
8181}; 8338};
8182 8339
8183static void intel_init_quirks(struct drm_device *dev) 8340static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f797fb58ba9c..44fef5e1c490 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -50,9 +50,10 @@ struct intel_dp {
50 bool has_audio; 50 bool has_audio;
51 int force_audio; 51 int force_audio;
52 uint32_t color_range; 52 uint32_t color_range;
53 int dpms_mode;
53 uint8_t link_bw; 54 uint8_t link_bw;
54 uint8_t lane_count; 55 uint8_t lane_count;
55 uint8_t dpcd[4]; 56 uint8_t dpcd[8];
56 struct i2c_adapter adapter; 57 struct i2c_adapter adapter;
57 struct i2c_algo_dp_aux_data algo; 58 struct i2c_algo_dp_aux_data algo;
58 bool is_pch_edp; 59 bool is_pch_edp;
@@ -316,9 +317,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
316 else 317 else
317 precharge = 5; 318 precharge = 5;
318 319
319 if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { 320 /* Try to wait for any previous AUX channel activity */
320 DRM_ERROR("dp_aux_ch not started status 0x%08x\n", 321 for (try = 0; try < 3; try++) {
321 I915_READ(ch_ctl)); 322 status = I915_READ(ch_ctl);
323 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
324 break;
325 msleep(1);
326 }
327
328 if (try == 3) {
329 WARN(1, "dp_aux_ch not started status 0x%08x\n",
330 I915_READ(ch_ctl));
322 return -EBUSY; 331 return -EBUSY;
323 } 332 }
324 333
@@ -770,6 +779,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
770 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 779 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
771 intel_dp->link_configuration[0] = intel_dp->link_bw; 780 intel_dp->link_configuration[0] = intel_dp->link_bw;
772 intel_dp->link_configuration[1] = intel_dp->lane_count; 781 intel_dp->link_configuration[1] = intel_dp->lane_count;
782 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
773 783
774 /* 784 /*
775 * Check for DPCD version > 1.1 and enhanced framing support 785 * Check for DPCD version > 1.1 and enhanced framing support
@@ -1011,6 +1021,8 @@ static void intel_dp_commit(struct drm_encoder *encoder)
1011 1021
1012 if (is_edp(intel_dp)) 1022 if (is_edp(intel_dp))
1013 ironlake_edp_backlight_on(dev); 1023 ironlake_edp_backlight_on(dev);
1024
1025 intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
1014} 1026}
1015 1027
1016static void 1028static void
@@ -1045,6 +1057,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1045 if (is_edp(intel_dp)) 1057 if (is_edp(intel_dp))
1046 ironlake_edp_backlight_on(dev); 1058 ironlake_edp_backlight_on(dev);
1047 } 1059 }
1060 intel_dp->dpms_mode = mode;
1048} 1061}
1049 1062
1050/* 1063/*
@@ -1334,10 +1347,16 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1334 u32 reg; 1347 u32 reg;
1335 uint32_t DP = intel_dp->DP; 1348 uint32_t DP = intel_dp->DP;
1336 1349
1337 /* Enable output, wait for it to become active */ 1350 /*
1338 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 1351 * On CPT we have to enable the port in training pattern 1, which
1339 POSTING_READ(intel_dp->output_reg); 1352 * will happen below in intel_dp_set_link_train. Otherwise, enable
1340 intel_wait_for_vblank(dev, intel_crtc->pipe); 1353 * the port and wait for it to become active.
1354 */
1355 if (!HAS_PCH_CPT(dev)) {
1356 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
1357 POSTING_READ(intel_dp->output_reg);
1358 intel_wait_for_vblank(dev, intel_crtc->pipe);
1359 }
1341 1360
1342 /* Write the link configuration data */ 1361 /* Write the link configuration data */
1343 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1362 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1370,7 +1389,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1370 reg = DP | DP_LINK_TRAIN_PAT_1; 1389 reg = DP | DP_LINK_TRAIN_PAT_1;
1371 1390
1372 if (!intel_dp_set_link_train(intel_dp, reg, 1391 if (!intel_dp_set_link_train(intel_dp, reg,
1373 DP_TRAINING_PATTERN_1)) 1392 DP_TRAINING_PATTERN_1 |
1393 DP_LINK_SCRAMBLING_DISABLE))
1374 break; 1394 break;
1375 /* Set training pattern 1 */ 1395 /* Set training pattern 1 */
1376 1396
@@ -1445,7 +1465,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1445 1465
1446 /* channel eq pattern */ 1466 /* channel eq pattern */
1447 if (!intel_dp_set_link_train(intel_dp, reg, 1467 if (!intel_dp_set_link_train(intel_dp, reg,
1448 DP_TRAINING_PATTERN_2)) 1468 DP_TRAINING_PATTERN_2 |
1469 DP_LINK_SCRAMBLING_DISABLE))
1449 break; 1470 break;
1450 1471
1451 udelay(400); 1472 udelay(400);
@@ -1559,6 +1580,18 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1559 POSTING_READ(intel_dp->output_reg); 1580 POSTING_READ(intel_dp->output_reg);
1560} 1581}
1561 1582
1583static bool
1584intel_dp_get_dpcd(struct intel_dp *intel_dp)
1585{
1586 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
1587 sizeof (intel_dp->dpcd)) &&
1588 (intel_dp->dpcd[DP_DPCD_REV] != 0)) {
1589 return true;
1590 }
1591
1592 return false;
1593}
1594
1562/* 1595/*
1563 * According to DP spec 1596 * According to DP spec
1564 * 5.1.2: 1597 * 5.1.2:
@@ -1571,36 +1604,44 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1571static void 1604static void
1572intel_dp_check_link_status(struct intel_dp *intel_dp) 1605intel_dp_check_link_status(struct intel_dp *intel_dp)
1573{ 1606{
1574 int ret; 1607 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
1608 return;
1575 1609
1576 if (!intel_dp->base.base.crtc) 1610 if (!intel_dp->base.base.crtc)
1577 return; 1611 return;
1578 1612
1613 /* Try to read receiver status if the link appears to be up */
1579 if (!intel_dp_get_link_status(intel_dp)) { 1614 if (!intel_dp_get_link_status(intel_dp)) {
1580 intel_dp_link_down(intel_dp); 1615 intel_dp_link_down(intel_dp);
1581 return; 1616 return;
1582 } 1617 }
1583 1618
1584 /* Try to read receiver status if the link appears to be up */ 1619 /* Now read the DPCD to see if it's actually running */
1585 ret = intel_dp_aux_native_read(intel_dp, 1620 if (!intel_dp_get_dpcd(intel_dp)) {
1586 0x000, intel_dp->dpcd,
1587 sizeof (intel_dp->dpcd));
1588 if (ret != sizeof(intel_dp->dpcd)) {
1589 intel_dp_link_down(intel_dp); 1621 intel_dp_link_down(intel_dp);
1590 return; 1622 return;
1591 } 1623 }
1592 1624
1593 if (!intel_channel_eq_ok(intel_dp)) { 1625 if (!intel_channel_eq_ok(intel_dp)) {
1626 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
1627 drm_get_encoder_name(&intel_dp->base.base));
1594 intel_dp_start_link_train(intel_dp); 1628 intel_dp_start_link_train(intel_dp);
1595 intel_dp_complete_link_train(intel_dp); 1629 intel_dp_complete_link_train(intel_dp);
1596 } 1630 }
1597} 1631}
1598 1632
1599static enum drm_connector_status 1633static enum drm_connector_status
1634intel_dp_detect_dpcd(struct intel_dp *intel_dp)
1635{
1636 if (intel_dp_get_dpcd(intel_dp))
1637 return connector_status_connected;
1638 return connector_status_disconnected;
1639}
1640
1641static enum drm_connector_status
1600ironlake_dp_detect(struct intel_dp *intel_dp) 1642ironlake_dp_detect(struct intel_dp *intel_dp)
1601{ 1643{
1602 enum drm_connector_status status; 1644 enum drm_connector_status status;
1603 bool ret;
1604 1645
1605 /* Can't disconnect eDP, but you can close the lid... */ 1646 /* Can't disconnect eDP, but you can close the lid... */
1606 if (is_edp(intel_dp)) { 1647 if (is_edp(intel_dp)) {
@@ -1610,15 +1651,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
1610 return status; 1651 return status;
1611 } 1652 }
1612 1653
1613 status = connector_status_disconnected; 1654 return intel_dp_detect_dpcd(intel_dp);
1614 ret = intel_dp_aux_native_read_retry(intel_dp,
1615 0x000, intel_dp->dpcd,
1616 sizeof (intel_dp->dpcd));
1617 if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0)
1618 status = connector_status_connected;
1619 DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
1620 intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
1621 return status;
1622} 1655}
1623 1656
1624static enum drm_connector_status 1657static enum drm_connector_status
@@ -1626,7 +1659,6 @@ g4x_dp_detect(struct intel_dp *intel_dp)
1626{ 1659{
1627 struct drm_device *dev = intel_dp->base.base.dev; 1660 struct drm_device *dev = intel_dp->base.base.dev;
1628 struct drm_i915_private *dev_priv = dev->dev_private; 1661 struct drm_i915_private *dev_priv = dev->dev_private;
1629 enum drm_connector_status status;
1630 uint32_t temp, bit; 1662 uint32_t temp, bit;
1631 1663
1632 switch (intel_dp->output_reg) { 1664 switch (intel_dp->output_reg) {
@@ -1648,15 +1680,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
1648 if ((temp & bit) == 0) 1680 if ((temp & bit) == 0)
1649 return connector_status_disconnected; 1681 return connector_status_disconnected;
1650 1682
1651 status = connector_status_disconnected; 1683 return intel_dp_detect_dpcd(intel_dp);
1652 if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
1653 sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
1654 {
1655 if (intel_dp->dpcd[DP_DPCD_REV] != 0)
1656 status = connector_status_connected;
1657 }
1658
1659 return status;
1660} 1684}
1661 1685
1662/** 1686/**
@@ -1679,6 +1703,12 @@ intel_dp_detect(struct drm_connector *connector, bool force)
1679 status = ironlake_dp_detect(intel_dp); 1703 status = ironlake_dp_detect(intel_dp);
1680 else 1704 else
1681 status = g4x_dp_detect(intel_dp); 1705 status = g4x_dp_detect(intel_dp);
1706
1707 DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
1708 intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
1709 intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
1710 intel_dp->dpcd[6], intel_dp->dpcd[7]);
1711
1682 if (status != connector_status_connected) 1712 if (status != connector_status_connected)
1683 return status; 1713 return status;
1684 1714
@@ -1811,6 +1841,11 @@ done:
1811static void 1841static void
1812intel_dp_destroy (struct drm_connector *connector) 1842intel_dp_destroy (struct drm_connector *connector)
1813{ 1843{
1844 struct drm_device *dev = connector->dev;
1845
1846 if (intel_dpd_is_edp(dev))
1847 intel_panel_destroy_backlight(dev);
1848
1814 drm_sysfs_connector_remove(connector); 1849 drm_sysfs_connector_remove(connector);
1815 drm_connector_cleanup(connector); 1850 drm_connector_cleanup(connector);
1816 kfree(connector); 1851 kfree(connector);
@@ -1924,6 +1959,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1924 return; 1959 return;
1925 1960
1926 intel_dp->output_reg = output_reg; 1961 intel_dp->output_reg = output_reg;
1962 intel_dp->dpms_mode = -1;
1927 1963
1928 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1964 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1929 if (!intel_connector) { 1965 if (!intel_connector) {
@@ -2000,7 +2036,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2000 2036
2001 /* Cache some DPCD data in the eDP case */ 2037 /* Cache some DPCD data in the eDP case */
2002 if (is_edp(intel_dp)) { 2038 if (is_edp(intel_dp)) {
2003 int ret; 2039 bool ret;
2004 u32 pp_on, pp_div; 2040 u32 pp_on, pp_div;
2005 2041
2006 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2042 pp_on = I915_READ(PCH_PP_ON_DELAYS);
@@ -2013,11 +2049,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2013 dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ 2049 dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
2014 2050
2015 ironlake_edp_panel_vdd_on(intel_dp); 2051 ironlake_edp_panel_vdd_on(intel_dp);
2016 ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, 2052 ret = intel_dp_get_dpcd(intel_dp);
2017 intel_dp->dpcd,
2018 sizeof(intel_dp->dpcd));
2019 ironlake_edp_panel_vdd_off(intel_dp); 2053 ironlake_edp_panel_vdd_off(intel_dp);
2020 if (ret == sizeof(intel_dp->dpcd)) { 2054 if (ret) {
2021 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2055 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2022 dev_priv->no_aux_handshake = 2056 dev_priv->no_aux_handshake =
2023 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2057 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@@ -2043,6 +2077,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2043 DRM_MODE_TYPE_PREFERRED; 2077 DRM_MODE_TYPE_PREFERRED;
2044 } 2078 }
2045 } 2079 }
2080 dev_priv->int_edp_connector = connector;
2081 intel_panel_setup_backlight(dev);
2046 } 2082 }
2047 2083
2048 intel_dp_add_properties(intel_dp, connector); 2084 intel_dp_add_properties(intel_dp, connector);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 6e990f9760ef..0b2ee9d39980 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -178,10 +178,28 @@ struct intel_crtc {
178#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) 178#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
179#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 179#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
180 180
181#define DIP_HEADER_SIZE 5
182
181#define DIP_TYPE_AVI 0x82 183#define DIP_TYPE_AVI 0x82
182#define DIP_VERSION_AVI 0x2 184#define DIP_VERSION_AVI 0x2
183#define DIP_LEN_AVI 13 185#define DIP_LEN_AVI 13
184 186
187#define DIP_TYPE_SPD 0x3
188#define DIP_VERSION_SPD 0x1
189#define DIP_LEN_SPD 25
190#define DIP_SPD_UNKNOWN 0
191#define DIP_SPD_DSTB 0x1
192#define DIP_SPD_DVDP 0x2
193#define DIP_SPD_DVHS 0x3
194#define DIP_SPD_HDDVR 0x4
195#define DIP_SPD_DVC 0x5
196#define DIP_SPD_DSC 0x6
197#define DIP_SPD_VCD 0x7
198#define DIP_SPD_GAME 0x8
199#define DIP_SPD_PC 0x9
200#define DIP_SPD_BD 0xa
201#define DIP_SPD_SCD 0xb
202
185struct dip_infoframe { 203struct dip_infoframe {
186 uint8_t type; /* HB0 */ 204 uint8_t type; /* HB0 */
187 uint8_t ver; /* HB1 */ 205 uint8_t ver; /* HB1 */
@@ -206,6 +224,11 @@ struct dip_infoframe {
206 uint16_t left_bar_end; 224 uint16_t left_bar_end;
207 uint16_t right_bar_start; 225 uint16_t right_bar_start;
208 } avi; 226 } avi;
227 struct {
228 uint8_t vn[8];
229 uint8_t pd[16];
230 uint8_t sdi;
231 } spd;
209 uint8_t payload[27]; 232 uint8_t payload[27];
210 } __attribute__ ((packed)) body; 233 } __attribute__ ((packed)) body;
211} __attribute__((packed)); 234} __attribute__((packed));
@@ -274,9 +297,10 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
274extern u32 intel_panel_get_max_backlight(struct drm_device *dev); 297extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
275extern u32 intel_panel_get_backlight(struct drm_device *dev); 298extern u32 intel_panel_get_backlight(struct drm_device *dev);
276extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); 299extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
277extern void intel_panel_setup_backlight(struct drm_device *dev); 300extern int intel_panel_setup_backlight(struct drm_device *dev);
278extern void intel_panel_enable_backlight(struct drm_device *dev); 301extern void intel_panel_enable_backlight(struct drm_device *dev);
279extern void intel_panel_disable_backlight(struct drm_device *dev); 302extern void intel_panel_disable_backlight(struct drm_device *dev);
303extern void intel_panel_destroy_backlight(struct drm_device *dev);
280extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); 304extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
281 305
282extern void intel_crtc_load_lut(struct drm_crtc *crtc); 306extern void intel_crtc_load_lut(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1ed8e6903915..226ba830f383 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -45,6 +45,8 @@ struct intel_hdmi {
45 bool has_hdmi_sink; 45 bool has_hdmi_sink;
46 bool has_audio; 46 bool has_audio;
47 int force_audio; 47 int force_audio;
48 void (*write_infoframe)(struct drm_encoder *encoder,
49 struct dip_infoframe *frame);
48}; 50};
49 51
50static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) 52static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
@@ -58,37 +60,70 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
58 struct intel_hdmi, base); 60 struct intel_hdmi, base);
59} 61}
60 62
61void intel_dip_infoframe_csum(struct dip_infoframe *avi_if) 63void intel_dip_infoframe_csum(struct dip_infoframe *frame)
62{ 64{
63 uint8_t *data = (uint8_t *)avi_if; 65 uint8_t *data = (uint8_t *)frame;
64 uint8_t sum = 0; 66 uint8_t sum = 0;
65 unsigned i; 67 unsigned i;
66 68
67 avi_if->checksum = 0; 69 frame->checksum = 0;
68 avi_if->ecc = 0; 70 frame->ecc = 0;
69 71
70 for (i = 0; i < sizeof(*avi_if); i++) 72 /* Header isn't part of the checksum */
73 for (i = 5; i < frame->len; i++)
71 sum += data[i]; 74 sum += data[i];
72 75
73 avi_if->checksum = 0x100 - sum; 76 frame->checksum = 0x100 - sum;
74} 77}
75 78
76static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) 79static u32 intel_infoframe_index(struct dip_infoframe *frame)
77{ 80{
78 struct dip_infoframe avi_if = { 81 u32 flags = 0;
79 .type = DIP_TYPE_AVI, 82
80 .ver = DIP_VERSION_AVI, 83 switch (frame->type) {
81 .len = DIP_LEN_AVI, 84 case DIP_TYPE_AVI:
82 }; 85 flags |= VIDEO_DIP_SELECT_AVI;
83 uint32_t *data = (uint32_t *)&avi_if; 86 break;
87 case DIP_TYPE_SPD:
88 flags |= VIDEO_DIP_SELECT_SPD;
89 break;
90 default:
91 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
92 break;
93 }
94
95 return flags;
96}
97
98static u32 intel_infoframe_flags(struct dip_infoframe *frame)
99{
100 u32 flags = 0;
101
102 switch (frame->type) {
103 case DIP_TYPE_AVI:
104 flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
105 break;
106 case DIP_TYPE_SPD:
107 flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC;
108 break;
109 default:
110 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
111 break;
112 }
113
114 return flags;
115}
116
117static void i9xx_write_infoframe(struct drm_encoder *encoder,
118 struct dip_infoframe *frame)
119{
120 uint32_t *data = (uint32_t *)frame;
84 struct drm_device *dev = encoder->dev; 121 struct drm_device *dev = encoder->dev;
85 struct drm_i915_private *dev_priv = dev->dev_private; 122 struct drm_i915_private *dev_priv = dev->dev_private;
86 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 123 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
87 u32 port; 124 u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
88 unsigned i; 125 unsigned i, len = DIP_HEADER_SIZE + frame->len;
89 126
90 if (!intel_hdmi->has_hdmi_sink)
91 return;
92 127
93 /* XXX first guess at handling video port, is this corrent? */ 128 /* XXX first guess at handling video port, is this corrent? */
94 if (intel_hdmi->sdvox_reg == SDVOB) 129 if (intel_hdmi->sdvox_reg == SDVOB)
@@ -98,18 +133,87 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
98 else 133 else
99 return; 134 return;
100 135
101 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | 136 flags = intel_infoframe_index(frame);
102 VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC); 137
138 val &= ~VIDEO_DIP_SELECT_MASK;
103 139
104 intel_dip_infoframe_csum(&avi_if); 140 I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
105 for (i = 0; i < sizeof(avi_if); i += 4) { 141
142 for (i = 0; i < len; i += 4) {
106 I915_WRITE(VIDEO_DIP_DATA, *data); 143 I915_WRITE(VIDEO_DIP_DATA, *data);
107 data++; 144 data++;
108 } 145 }
109 146
110 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | 147 flags |= intel_infoframe_flags(frame);
111 VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC | 148
112 VIDEO_DIP_ENABLE_AVI); 149 I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
150}
151
152static void ironlake_write_infoframe(struct drm_encoder *encoder,
153 struct dip_infoframe *frame)
154{
155 uint32_t *data = (uint32_t *)frame;
156 struct drm_device *dev = encoder->dev;
157 struct drm_i915_private *dev_priv = dev->dev_private;
158 struct drm_crtc *crtc = encoder->crtc;
159 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
160 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
161 unsigned i, len = DIP_HEADER_SIZE + frame->len;
162 u32 flags, val = I915_READ(reg);
163
164 intel_wait_for_vblank(dev, intel_crtc->pipe);
165
166 flags = intel_infoframe_index(frame);
167
168 val &= ~VIDEO_DIP_SELECT_MASK;
169
170 I915_WRITE(reg, val | flags);
171
172 for (i = 0; i < len; i += 4) {
173 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
174 data++;
175 }
176
177 flags |= intel_infoframe_flags(frame);
178
179 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
180}
181static void intel_set_infoframe(struct drm_encoder *encoder,
182 struct dip_infoframe *frame)
183{
184 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
185
186 if (!intel_hdmi->has_hdmi_sink)
187 return;
188
189 intel_dip_infoframe_csum(frame);
190 intel_hdmi->write_infoframe(encoder, frame);
191}
192
193static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
194{
195 struct dip_infoframe avi_if = {
196 .type = DIP_TYPE_AVI,
197 .ver = DIP_VERSION_AVI,
198 .len = DIP_LEN_AVI,
199 };
200
201 intel_set_infoframe(encoder, &avi_if);
202}
203
204static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
205{
206 struct dip_infoframe spd_if;
207
208 memset(&spd_if, 0, sizeof(spd_if));
209 spd_if.type = DIP_TYPE_SPD;
210 spd_if.ver = DIP_VERSION_SPD;
211 spd_if.len = DIP_LEN_SPD;
212 strcpy(spd_if.body.spd.vn, "Intel");
213 strcpy(spd_if.body.spd.pd, "Integrated gfx");
214 spd_if.body.spd.sdi = DIP_SPD_PC;
215
216 intel_set_infoframe(encoder, &spd_if);
113} 217}
114 218
115static void intel_hdmi_mode_set(struct drm_encoder *encoder, 219static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -156,6 +260,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
156 POSTING_READ(intel_hdmi->sdvox_reg); 260 POSTING_READ(intel_hdmi->sdvox_reg);
157 261
158 intel_hdmi_set_avi_infoframe(encoder); 262 intel_hdmi_set_avi_infoframe(encoder);
263 intel_hdmi_set_spd_infoframe(encoder);
159} 264}
160 265
161static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) 266static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -433,6 +538,11 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
433 538
434 intel_hdmi->sdvox_reg = sdvox_reg; 539 intel_hdmi->sdvox_reg = sdvox_reg;
435 540
541 if (!HAS_PCH_SPLIT(dev))
542 intel_hdmi->write_infoframe = i9xx_write_infoframe;
543 else
544 intel_hdmi->write_infoframe = ironlake_write_infoframe;
545
436 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); 546 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
437 547
438 intel_hdmi_add_properties(intel_hdmi, connector); 548 intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b28f7bd9f88a..31da77f5c051 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -72,14 +72,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
72{ 72{
73 struct drm_device *dev = intel_lvds->base.base.dev; 73 struct drm_device *dev = intel_lvds->base.base.dev;
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 ctl_reg, lvds_reg; 75 u32 ctl_reg, lvds_reg, stat_reg;
76 76
77 if (HAS_PCH_SPLIT(dev)) { 77 if (HAS_PCH_SPLIT(dev)) {
78 ctl_reg = PCH_PP_CONTROL; 78 ctl_reg = PCH_PP_CONTROL;
79 lvds_reg = PCH_LVDS; 79 lvds_reg = PCH_LVDS;
80 stat_reg = PCH_PP_STATUS;
80 } else { 81 } else {
81 ctl_reg = PP_CONTROL; 82 ctl_reg = PP_CONTROL;
82 lvds_reg = LVDS; 83 lvds_reg = LVDS;
84 stat_reg = PP_STATUS;
83 } 85 }
84 86
85 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); 87 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
@@ -94,17 +96,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
94 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", 96 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
95 intel_lvds->pfit_control, 97 intel_lvds->pfit_control,
96 intel_lvds->pfit_pgm_ratios); 98 intel_lvds->pfit_pgm_ratios);
97 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { 99
98 DRM_ERROR("timed out waiting for panel to power off\n"); 100 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
99 } else { 101 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
100 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); 102 intel_lvds->pfit_dirty = false;
101 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
102 intel_lvds->pfit_dirty = false;
103 }
104 } 103 }
105 104
106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 105 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
107 POSTING_READ(lvds_reg); 106 POSTING_READ(lvds_reg);
107 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
108 DRM_ERROR("timed out waiting for panel to power on\n");
108 109
109 intel_panel_enable_backlight(dev); 110 intel_panel_enable_backlight(dev);
110} 111}
@@ -113,24 +114,25 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
113{ 114{
114 struct drm_device *dev = intel_lvds->base.base.dev; 115 struct drm_device *dev = intel_lvds->base.base.dev;
115 struct drm_i915_private *dev_priv = dev->dev_private; 116 struct drm_i915_private *dev_priv = dev->dev_private;
116 u32 ctl_reg, lvds_reg; 117 u32 ctl_reg, lvds_reg, stat_reg;
117 118
118 if (HAS_PCH_SPLIT(dev)) { 119 if (HAS_PCH_SPLIT(dev)) {
119 ctl_reg = PCH_PP_CONTROL; 120 ctl_reg = PCH_PP_CONTROL;
120 lvds_reg = PCH_LVDS; 121 lvds_reg = PCH_LVDS;
122 stat_reg = PCH_PP_STATUS;
121 } else { 123 } else {
122 ctl_reg = PP_CONTROL; 124 ctl_reg = PP_CONTROL;
123 lvds_reg = LVDS; 125 lvds_reg = LVDS;
126 stat_reg = PP_STATUS;
124 } 127 }
125 128
126 intel_panel_disable_backlight(dev); 129 intel_panel_disable_backlight(dev);
127 130
128 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); 131 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
132 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
133 DRM_ERROR("timed out waiting for panel to power off\n");
129 134
130 if (intel_lvds->pfit_control) { 135 if (intel_lvds->pfit_control) {
131 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
132 DRM_ERROR("timed out waiting for panel to power off\n");
133
134 I915_WRITE(PFIT_CONTROL, 0); 136 I915_WRITE(PFIT_CONTROL, 0);
135 intel_lvds->pfit_dirty = true; 137 intel_lvds->pfit_dirty = true;
136 } 138 }
@@ -398,53 +400,21 @@ out:
398 400
399static void intel_lvds_prepare(struct drm_encoder *encoder) 401static void intel_lvds_prepare(struct drm_encoder *encoder)
400{ 402{
401 struct drm_device *dev = encoder->dev;
402 struct drm_i915_private *dev_priv = dev->dev_private;
403 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 403 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
404 404
405 /* We try to do the minimum that is necessary in order to unlock 405 /*
406 * the registers for mode setting.
407 *
408 * On Ironlake, this is quite simple as we just set the unlock key
409 * and ignore all subtleties. (This may cause some issues...)
410 *
411 * Prior to Ironlake, we must disable the pipe if we want to adjust 406 * Prior to Ironlake, we must disable the pipe if we want to adjust
412 * the panel fitter. However at all other times we can just reset 407 * the panel fitter. However at all other times we can just reset
413 * the registers regardless. 408 * the registers regardless.
414 */ 409 */
415 410 if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
416 if (HAS_PCH_SPLIT(dev)) { 411 intel_lvds_disable(intel_lvds);
417 I915_WRITE(PCH_PP_CONTROL,
418 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
419 } else if (intel_lvds->pfit_dirty) {
420 I915_WRITE(PP_CONTROL,
421 (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
422 & ~POWER_TARGET_ON);
423 } else {
424 I915_WRITE(PP_CONTROL,
425 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
426 }
427} 412}
428 413
429static void intel_lvds_commit(struct drm_encoder *encoder) 414static void intel_lvds_commit(struct drm_encoder *encoder)
430{ 415{
431 struct drm_device *dev = encoder->dev;
432 struct drm_i915_private *dev_priv = dev->dev_private;
433 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 416 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
434 417
435 /* Undo any unlocking done in prepare to prevent accidental
436 * adjustment of the registers.
437 */
438 if (HAS_PCH_SPLIT(dev)) {
439 u32 val = I915_READ(PCH_PP_CONTROL);
440 if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
441 I915_WRITE(PCH_PP_CONTROL, val & 0x3);
442 } else {
443 u32 val = I915_READ(PP_CONTROL);
444 if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
445 I915_WRITE(PP_CONTROL, val & 0x3);
446 }
447
448 /* Always do a full power on as we do not know what state 418 /* Always do a full power on as we do not know what state
449 * we were left in. 419 * we were left in.
450 */ 420 */
@@ -582,6 +552,8 @@ static void intel_lvds_destroy(struct drm_connector *connector)
582 struct drm_device *dev = connector->dev; 552 struct drm_device *dev = connector->dev;
583 struct drm_i915_private *dev_priv = dev->dev_private; 553 struct drm_i915_private *dev_priv = dev->dev_private;
584 554
555 intel_panel_destroy_backlight(dev);
556
585 if (dev_priv->lid_notifier.notifier_call) 557 if (dev_priv->lid_notifier.notifier_call)
586 acpi_lid_notifier_unregister(&dev_priv->lid_notifier); 558 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
587 drm_sysfs_connector_remove(connector); 559 drm_sysfs_connector_remove(connector);
@@ -690,6 +662,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
690 }, 662 },
691 { 663 {
692 .callback = intel_no_lvds_dmi_callback, 664 .callback = intel_no_lvds_dmi_callback,
665 .ident = "Dell OptiPlex FX170",
666 .matches = {
667 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
668 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
669 },
670 },
671 {
672 .callback = intel_no_lvds_dmi_callback,
693 .ident = "AOpen Mini PC", 673 .ident = "AOpen Mini PC",
694 .matches = { 674 .matches = {
695 DMI_MATCH(DMI_SYS_VENDOR, "AOpen"), 675 DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
@@ -1032,6 +1012,19 @@ out:
1032 pwm = I915_READ(BLC_PWM_PCH_CTL1); 1012 pwm = I915_READ(BLC_PWM_PCH_CTL1);
1033 pwm |= PWM_PCH_ENABLE; 1013 pwm |= PWM_PCH_ENABLE;
1034 I915_WRITE(BLC_PWM_PCH_CTL1, pwm); 1014 I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
1015 /*
1016 * Unlock registers and just
1017 * leave them unlocked
1018 */
1019 I915_WRITE(PCH_PP_CONTROL,
1020 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
1021 } else {
1022 /*
1023 * Unlock registers and just
1024 * leave them unlocked
1025 */
1026 I915_WRITE(PP_CONTROL,
1027 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
1035 } 1028 }
1036 dev_priv->lid_notifier.notifier_call = intel_lid_notify; 1029 dev_priv->lid_notifier.notifier_call = intel_lid_notify;
1037 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { 1030 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
@@ -1041,6 +1034,9 @@ out:
1041 /* keep the LVDS connector */ 1034 /* keep the LVDS connector */
1042 dev_priv->int_lvds_connector = connector; 1035 dev_priv->int_lvds_connector = connector;
1043 drm_sysfs_connector_add(connector); 1036 drm_sysfs_connector_add(connector);
1037
1038 intel_panel_setup_backlight(dev);
1039
1044 return true; 1040 return true;
1045 1041
1046failed: 1042failed:
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b7c5ddb564d1..b8e8158bb16e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -227,7 +227,6 @@ void intel_opregion_asle_intr(struct drm_device *dev)
227 asle->aslc = asle_stat; 227 asle->aslc = asle_stat;
228} 228}
229 229
230/* Only present on Ironlake+ */
231void intel_opregion_gse_intr(struct drm_device *dev) 230void intel_opregion_gse_intr(struct drm_device *dev)
232{ 231{
233 struct drm_i915_private *dev_priv = dev->dev_private; 232 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a06ff07a4d3b..a9e0c7bcd317 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -83,11 +83,15 @@ intel_pch_panel_fitting(struct drm_device *dev,
83 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; 83 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
84 if (scaled_width > scaled_height) { /* pillar */ 84 if (scaled_width > scaled_height) { /* pillar */
85 width = scaled_height / mode->vdisplay; 85 width = scaled_height / mode->vdisplay;
86 if (width & 1)
87 width++;
86 x = (adjusted_mode->hdisplay - width + 1) / 2; 88 x = (adjusted_mode->hdisplay - width + 1) / 2;
87 y = 0; 89 y = 0;
88 height = adjusted_mode->vdisplay; 90 height = adjusted_mode->vdisplay;
89 } else if (scaled_width < scaled_height) { /* letter */ 91 } else if (scaled_width < scaled_height) { /* letter */
90 height = scaled_width / mode->hdisplay; 92 height = scaled_width / mode->hdisplay;
93 if (height & 1)
94 height++;
91 y = (adjusted_mode->vdisplay - height + 1) / 2; 95 y = (adjusted_mode->vdisplay - height + 1) / 2;
92 x = 0; 96 x = 0;
93 width = adjusted_mode->hdisplay; 97 width = adjusted_mode->hdisplay;
@@ -273,7 +277,7 @@ void intel_panel_enable_backlight(struct drm_device *dev)
273 dev_priv->backlight_enabled = true; 277 dev_priv->backlight_enabled = true;
274} 278}
275 279
276void intel_panel_setup_backlight(struct drm_device *dev) 280static void intel_panel_init_backlight(struct drm_device *dev)
277{ 281{
278 struct drm_i915_private *dev_priv = dev->dev_private; 282 struct drm_i915_private *dev_priv = dev->dev_private;
279 283
@@ -305,3 +309,73 @@ intel_panel_detect(struct drm_device *dev)
305 309
306 return connector_status_unknown; 310 return connector_status_unknown;
307} 311}
312
313#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
314static int intel_panel_update_status(struct backlight_device *bd)
315{
316 struct drm_device *dev = bl_get_data(bd);
317 intel_panel_set_backlight(dev, bd->props.brightness);
318 return 0;
319}
320
321static int intel_panel_get_brightness(struct backlight_device *bd)
322{
323 struct drm_device *dev = bl_get_data(bd);
324 return intel_panel_get_backlight(dev);
325}
326
327static const struct backlight_ops intel_panel_bl_ops = {
328 .update_status = intel_panel_update_status,
329 .get_brightness = intel_panel_get_brightness,
330};
331
332int intel_panel_setup_backlight(struct drm_device *dev)
333{
334 struct drm_i915_private *dev_priv = dev->dev_private;
335 struct backlight_properties props;
336 struct drm_connector *connector;
337
338 intel_panel_init_backlight(dev);
339
340 if (dev_priv->int_lvds_connector)
341 connector = dev_priv->int_lvds_connector;
342 else if (dev_priv->int_edp_connector)
343 connector = dev_priv->int_edp_connector;
344 else
345 return -ENODEV;
346
347 props.type = BACKLIGHT_RAW;
348 props.max_brightness = intel_panel_get_max_backlight(dev);
349 dev_priv->backlight =
350 backlight_device_register("intel_backlight",
351 &connector->kdev, dev,
352 &intel_panel_bl_ops, &props);
353
354 if (IS_ERR(dev_priv->backlight)) {
355 DRM_ERROR("Failed to register backlight: %ld\n",
356 PTR_ERR(dev_priv->backlight));
357 dev_priv->backlight = NULL;
358 return -ENODEV;
359 }
360 dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev);
361 return 0;
362}
363
364void intel_panel_destroy_backlight(struct drm_device *dev)
365{
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 if (dev_priv->backlight)
368 backlight_device_unregister(dev_priv->backlight);
369}
370#else
371int intel_panel_setup_backlight(struct drm_device *dev)
372{
373 intel_panel_init_backlight(dev);
374 return 0;
375}
376
377void intel_panel_destroy_backlight(struct drm_device *dev)
378{
379 return;
380}
381#endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e9615685a39c..c30626ea9f93 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -290,6 +290,10 @@ static int init_render_ring(struct intel_ring_buffer *ring)
290 if (IS_GEN6(dev) || IS_GEN7(dev)) 290 if (IS_GEN6(dev) || IS_GEN7(dev))
291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
292 I915_WRITE(MI_MODE, mode); 292 I915_WRITE(MI_MODE, mode);
293 if (IS_GEN7(dev))
294 I915_WRITE(GFX_MODE_GEN7,
295 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
296 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
293 } 297 }
294 298
295 if (INTEL_INFO(dev)->gen >= 6) { 299 if (INTEL_INFO(dev)->gen >= 6) {
@@ -1321,6 +1325,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1321 ring->get_seqno = pc_render_get_seqno; 1325 ring->get_seqno = pc_render_get_seqno;
1322 } 1326 }
1323 1327
1328 if (!I915_NEED_GFX_HWS(dev))
1329 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1330
1324 ring->dev = dev; 1331 ring->dev = dev;
1325 INIT_LIST_HEAD(&ring->active_list); 1332 INIT_LIST_HEAD(&ring->active_list);
1326 INIT_LIST_HEAD(&ring->request_list); 1333 INIT_LIST_HEAD(&ring->request_list);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 8d02d875376d..c919cfc8f2fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -530,7 +530,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
530 nouveau_gpuobj_ref(NULL, &obj); 530 nouveau_gpuobj_ref(NULL, &obj);
531 if (ret) 531 if (ret)
532 return ret; 532 return ret;
533 } else { 533 } else
534 if (USE_SEMA(dev)) {
534 /* map fence bo into channel's vm */ 535 /* map fence bo into channel's vm */
535 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, 536 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
536 &chan->fence.vma); 537 &chan->fence.vma);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c444cadbf849..2706cb3d871a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
37 return -ENOMEM; 37 return -ENOMEM;
38 38
39 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); 39 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
40 if (!nvbe->ttm_alloced) 40 if (!nvbe->ttm_alloced) {
41 kfree(nvbe->pages);
42 nvbe->pages = NULL;
41 return -ENOMEM; 43 return -ENOMEM;
44 }
42 45
43 nvbe->nr_pages = 0; 46 nvbe->nr_pages = 0;
44 while (num_pages--) { 47 while (num_pages--) {
@@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
126 129
127 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { 130 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
128 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); 131 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
129 dma_offset += NV_CTXDMA_PAGE_SIZE; 132 offset_l += NV_CTXDMA_PAGE_SIZE;
130 } 133 }
131 } 134 }
132 135
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 118261d4927a..5e45398a9e2d 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
781 struct drm_device *dev = crtc->dev; 781 struct drm_device *dev = crtc->dev;
782 struct drm_nouveau_private *dev_priv = dev->dev_private; 782 struct drm_nouveau_private *dev_priv = dev->dev_private;
783 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 783 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
784 struct drm_framebuffer *drm_fb = nv_crtc->base.fb; 784 struct drm_framebuffer *drm_fb;
785 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 785 struct nouveau_framebuffer *fb;
786 int arb_burst, arb_lwm; 786 int arb_burst, arb_lwm;
787 int ret; 787 int ret;
788 788
789 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
790
791 /* no fb bound */
792 if (!atomic && !crtc->fb) {
793 NV_DEBUG_KMS(dev, "No FB bound\n");
794 return 0;
795 }
796
797
789 /* If atomic, we want to switch to the fb we were passed, so 798 /* If atomic, we want to switch to the fb we were passed, so
790 * now we update pointers to do that. (We don't pin; just 799 * now we update pointers to do that. (We don't pin; just
791 * assume we're already pinned and update the base address.) 800 * assume we're already pinned and update the base address.)
@@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
794 drm_fb = passed_fb; 803 drm_fb = passed_fb;
795 fb = nouveau_framebuffer(passed_fb); 804 fb = nouveau_framebuffer(passed_fb);
796 } else { 805 } else {
806 drm_fb = crtc->fb;
807 fb = nouveau_framebuffer(crtc->fb);
797 /* If not atomic, we can go ahead and pin, and unpin the 808 /* If not atomic, we can go ahead and pin, and unpin the
798 * old fb we were passed. 809 * old fb we were passed.
799 */ 810 */
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 46ad59ea2185..5d989073ba6e 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
519 struct drm_device *dev = nv_crtc->base.dev; 519 struct drm_device *dev = nv_crtc->base.dev;
520 struct drm_nouveau_private *dev_priv = dev->dev_private; 520 struct drm_nouveau_private *dev_priv = dev->dev_private;
521 struct nouveau_channel *evo = nv50_display(dev)->master; 521 struct nouveau_channel *evo = nv50_display(dev)->master;
522 struct drm_framebuffer *drm_fb = nv_crtc->base.fb; 522 struct drm_framebuffer *drm_fb;
523 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 523 struct nouveau_framebuffer *fb;
524 int ret; 524 int ret;
525 525
526 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 526 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
527 527
528 /* no fb bound */
529 if (!atomic && !crtc->fb) {
530 NV_DEBUG_KMS(dev, "No FB bound\n");
531 return 0;
532 }
533
528 /* If atomic, we want to switch to the fb we were passed, so 534 /* If atomic, we want to switch to the fb we were passed, so
529 * now we update pointers to do that. (We don't pin; just 535 * now we update pointers to do that. (We don't pin; just
530 * assume we're already pinned and update the base address.) 536 * assume we're already pinned and update the base address.)
@@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
533 drm_fb = passed_fb; 539 drm_fb = passed_fb;
534 fb = nouveau_framebuffer(passed_fb); 540 fb = nouveau_framebuffer(passed_fb);
535 } else { 541 } else {
542 drm_fb = crtc->fb;
543 fb = nouveau_framebuffer(crtc->fb);
536 /* If not atomic, we can go ahead and pin, and unpin the 544 /* If not atomic, we can go ahead and pin, and unpin the
537 * old fb we were passed. 545 * old fb we were passed.
538 */ 546 */
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 3896ef811102..9f363e0c4b60 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -5,6 +5,7 @@
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6 6
7hostprogs-y := mkregtable 7hostprogs-y := mkregtable
8clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
8 9
9quiet_cmd_mkregtable = MKREGTABLE $@ 10quiet_cmd_mkregtable = MKREGTABLE $@
10 cmd_mkregtable = $(obj)/mkregtable $< > $@ 11 cmd_mkregtable = $(obj)/mkregtable $< > $@
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index ebdb0fdb8348..e88c64417a8a 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1245,6 +1245,9 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
1245 char name[512]; 1245 char name[512];
1246 int i; 1246 int i;
1247 1247
1248 if (!ctx)
1249 return NULL;
1250
1248 ctx->card = card; 1251 ctx->card = card;
1249 ctx->bios = bios; 1252 ctx->bios = bios;
1250 1253
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 645b84b3d203..7ad43c6b1db7 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -613,6 +613,18 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
613 return true; 613 return true;
614} 614}
615 615
616bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
617{
618 u8 link_status[DP_LINK_STATUS_SIZE];
619 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
620
621 if (!radeon_dp_get_link_status(radeon_connector, link_status))
622 return false;
623 if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
624 return false;
625 return true;
626}
627
616struct radeon_dp_link_train_info { 628struct radeon_dp_link_train_info {
617 struct radeon_device *rdev; 629 struct radeon_device *rdev;
618 struct drm_encoder *encoder; 630 struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 14dce9f22172..dc0a5b56c81a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev);
41void evergreen_fini(struct radeon_device *rdev); 41void evergreen_fini(struct radeon_device *rdev);
42static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 42static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
43 43
44void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
45{
46 u16 ctl, v;
47 int cap, err;
48
49 cap = pci_pcie_cap(rdev->pdev);
50 if (!cap)
51 return;
52
53 err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
54 if (err)
55 return;
56
57 v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
58
59 /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
60 * to avoid hangs or perfomance issues
61 */
62 if ((v == 0) || (v == 6) || (v == 7)) {
63 ctl &= ~PCI_EXP_DEVCTL_READRQ;
64 ctl |= (2 << 12);
65 pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
66 }
67}
68
44void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) 69void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
45{ 70{
46 /* enable the pflip int */ 71 /* enable the pflip int */
@@ -743,7 +768,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
743 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || 768 !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
744 !evergreen_check_latency_hiding(&wm) || 769 !evergreen_check_latency_hiding(&wm) ||
745 (rdev->disp_priority == 2)) { 770 (rdev->disp_priority == 2)) {
746 DRM_INFO("force priority to high\n"); 771 DRM_DEBUG_KMS("force priority to high\n");
747 priority_a_cnt |= PRIORITY_ALWAYS_ON; 772 priority_a_cnt |= PRIORITY_ALWAYS_ON;
748 priority_b_cnt |= PRIORITY_ALWAYS_ON; 773 priority_b_cnt |= PRIORITY_ALWAYS_ON;
749 } 774 }
@@ -1357,6 +1382,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1357 SOFT_RESET_PA | 1382 SOFT_RESET_PA |
1358 SOFT_RESET_SH | 1383 SOFT_RESET_SH |
1359 SOFT_RESET_VGT | 1384 SOFT_RESET_VGT |
1385 SOFT_RESET_SPI |
1360 SOFT_RESET_SX)); 1386 SOFT_RESET_SX));
1361 RREG32(GRBM_SOFT_RESET); 1387 RREG32(GRBM_SOFT_RESET);
1362 mdelay(15); 1388 mdelay(15);
@@ -1862,6 +1888,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1862 1888
1863 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 1889 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1864 1890
1891 evergreen_fix_pci_max_read_req_size(rdev);
1892
1865 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; 1893 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
1866 1894
1867 cc_gc_shader_pipe_config |= 1895 cc_gc_shader_pipe_config |=
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 189e86522b5b..a134790903d3 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -428,7 +428,7 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
428 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); 428 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
429 429
430 i = (reg >> 7); 430 i = (reg >> 7);
431 if (i > last_reg) { 431 if (i >= last_reg) {
432 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 432 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
433 return -EINVAL; 433 return -EINVAL;
434 } 434 }
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 44c4750f4518..cbf57d75d925 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
39extern void evergreen_mc_program(struct radeon_device *rdev); 39extern void evergreen_mc_program(struct radeon_device *rdev);
40extern void evergreen_irq_suspend(struct radeon_device *rdev); 40extern void evergreen_irq_suspend(struct radeon_device *rdev);
41extern int evergreen_mc_init(struct radeon_device *rdev); 41extern int evergreen_mc_init(struct radeon_device *rdev);
42extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
42 43
43#define EVERGREEN_PFP_UCODE_SIZE 1120 44#define EVERGREEN_PFP_UCODE_SIZE 1120
44#define EVERGREEN_PM4_UCODE_SIZE 1376 45#define EVERGREEN_PM4_UCODE_SIZE 1376
@@ -669,6 +670,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
669 670
670 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 671 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
671 672
673 evergreen_fix_pci_max_read_req_size(rdev);
674
672 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 675 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
673 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 676 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
674 677
@@ -1159,6 +1162,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
1159 SOFT_RESET_PA | 1162 SOFT_RESET_PA |
1160 SOFT_RESET_SH | 1163 SOFT_RESET_SH |
1161 SOFT_RESET_VGT | 1164 SOFT_RESET_VGT |
1165 SOFT_RESET_SPI |
1162 SOFT_RESET_SX)); 1166 SOFT_RESET_SX));
1163 RREG32(GRBM_SOFT_RESET); 1167 RREG32(GRBM_SOFT_RESET);
1164 mdelay(15); 1168 mdelay(15);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index db8ef1905d5f..cf83aa05a684 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -915,12 +915,11 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
915{ 915{
916 struct r600_cs_track *track = (struct r600_cs_track *)p->track; 916 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
917 struct radeon_cs_reloc *reloc; 917 struct radeon_cs_reloc *reloc;
918 u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
919 u32 m, i, tmp, *ib; 918 u32 m, i, tmp, *ib;
920 int r; 919 int r;
921 920
922 i = (reg >> 7); 921 i = (reg >> 7);
923 if (i > last_reg) { 922 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
924 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 923 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
925 return -EINVAL; 924 return -EINVAL;
926 } 925 }
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index dcd0863e31ae..b6e18c8db9f5 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev)
219 } else { 219 } else {
220 DRM_INFO("Using generic clock info\n"); 220 DRM_INFO("Using generic clock info\n");
221 221
222 /* may need to be per card */
223 rdev->clock.max_pixel_clock = 35000;
224
222 if (rdev->flags & RADEON_IS_IGP) { 225 if (rdev->flags & RADEON_IS_IGP) {
223 p1pll->reference_freq = 1432; 226 p1pll->reference_freq = 1432;
224 p2pll->reference_freq = 1432; 227 p2pll->reference_freq = 1432;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index a74217cd192f..63675241c7ff 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2557,6 +2557,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2557 u16 offset, misc, misc2 = 0; 2557 u16 offset, misc, misc2 = 0;
2558 u8 rev, blocks, tmp; 2558 u8 rev, blocks, tmp;
2559 int state_index = 0; 2559 int state_index = 0;
2560 struct radeon_i2c_bus_rec i2c_bus;
2560 2561
2561 rdev->pm.default_power_state_index = -1; 2562 rdev->pm.default_power_state_index = -1;
2562 2563
@@ -2575,7 +2576,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2575 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); 2576 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
2576 if (offset) { 2577 if (offset) {
2577 u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0; 2578 u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
2578 struct radeon_i2c_bus_rec i2c_bus;
2579 2579
2580 rev = RBIOS8(offset); 2580 rev = RBIOS8(offset);
2581 2581
@@ -2617,6 +2617,25 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2617 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); 2617 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
2618 } 2618 }
2619 } 2619 }
2620 } else {
2621 /* boards with a thermal chip, but no overdrive table */
2622
2623 /* Asus 9600xt has an f75375 on the monid bus */
2624 if ((dev->pdev->device == 0x4152) &&
2625 (dev->pdev->subsystem_vendor == 0x1043) &&
2626 (dev->pdev->subsystem_device == 0xc002)) {
2627 i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
2628 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
2629 if (rdev->pm.i2c_bus) {
2630 struct i2c_board_info info = { };
2631 const char *name = "f75375";
2632 info.addr = 0x28;
2633 strlcpy(info.type, name, sizeof(info.type));
2634 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
2635 DRM_INFO("Possible %s thermal controller at 0x%02x\n",
2636 name, info.addr);
2637 }
2638 }
2620 } 2639 }
2621 2640
2622 if (rdev->flags & RADEON_IS_MOBILITY) { 2641 if (rdev->flags & RADEON_IS_MOBILITY) {
@@ -3279,6 +3298,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
3279 rdev->pdev->subsystem_device == 0x30a4) 3298 rdev->pdev->subsystem_device == 0x30a4)
3280 return; 3299 return;
3281 3300
3301 /* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
3302 * - it hangs on resume inside the dynclk 1 table.
3303 */
3304 if (rdev->family == CHIP_RS480 &&
3305 rdev->pdev->subsystem_vendor == 0x103c &&
3306 rdev->pdev->subsystem_device == 0x30ae)
3307 return;
3308
3282 /* DYN CLK 1 */ 3309 /* DYN CLK 1 */
3283 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); 3310 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
3284 if (table) 3311 if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 9792d4ffdc86..c4b8741dbf58 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -60,18 +60,20 @@ void radeon_connector_hotplug(struct drm_connector *connector)
60 60
61 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 61 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
62 62
63 /* powering up/down the eDP panel generates hpd events which 63 /* if the connector is already off, don't turn it back on */
64 * can interfere with modesetting. 64 if (connector->dpms != DRM_MODE_DPMS_ON)
65 */
66 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
67 return; 65 return;
68 66
69 /* pre-r600 did not always have the hpd pins mapped accurately to connectors */ 67 /* just deal with DP (not eDP) here. */
70 if (rdev->family >= CHIP_R600) { 68 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
71 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) 69 int saved_dpms = connector->dpms;
70
71 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
72 radeon_dp_needs_link_train(radeon_connector))
72 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 73 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
73 else 74 else
74 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 75 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
76 connector->dpms = saved_dpms;
75 } 77 }
76} 78}
77 79
@@ -430,16 +432,73 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
430 return 0; 432 return 0;
431} 433}
432 434
435/*
436 * Some integrated ATI Radeon chipset implementations (e. g.
437 * Asus M2A-VM HDMI) may indicate the availability of a DDC,
438 * even when there's no monitor connected. For these connectors
439 * following DDC probe extension will be applied: check also for the
440 * availability of EDID with at least a correct EDID header. Only then,
441 * DDC is assumed to be available. This prevents drm_get_edid() and
442 * drm_edid_block_valid() from periodically dumping data and kernel
443 * errors into the logs and onto the terminal.
444 */
445static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
446 uint32_t supported_device,
447 int connector_type)
448{
449 /* Asus M2A-VM HDMI board sends data to i2c bus even,
450 * if HDMI add-on card is not plugged in or HDMI is disabled in
451 * BIOS. Valid DDC can only be assumed, if also a valid EDID header
452 * can be retrieved via i2c bus during DDC probe */
453 if ((dev->pdev->device == 0x791e) &&
454 (dev->pdev->subsystem_vendor == 0x1043) &&
455 (dev->pdev->subsystem_device == 0x826d)) {
456 if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
457 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
458 return true;
459 }
460 /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus
461 * for a DVI connector that is not implemented */
462 if ((dev->pdev->device == 0x796e) &&
463 (dev->pdev->subsystem_vendor == 0x1019) &&
464 (dev->pdev->subsystem_device == 0x2615)) {
465 if ((connector_type == DRM_MODE_CONNECTOR_DVID) &&
466 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
467 return true;
468 }
469 /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100
470 * (RS690M) sends data to i2c bus for a HDMI connector that
471 * is not implemented */
472 if ((dev->pdev->device == 0x791f) &&
473 (dev->pdev->subsystem_vendor == 0x1179) &&
474 (dev->pdev->subsystem_device == 0xff68)) {
475 if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
476 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
477 return true;
478 }
479
480 /* Default: no EDID header probe required for DDC probing */
481 return false;
482}
483
433static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, 484static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
434 struct drm_connector *connector) 485 struct drm_connector *connector)
435{ 486{
436 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 487 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
437 struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 488 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
489 struct drm_display_mode *t, *mode;
490
491 /* If the EDID preferred mode doesn't match the native mode, use it */
492 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
493 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
494 if (mode->hdisplay != native_mode->hdisplay ||
495 mode->vdisplay != native_mode->vdisplay)
496 memcpy(native_mode, mode, sizeof(*mode));
497 }
498 }
438 499
439 /* Try to get native mode details from EDID if necessary */ 500 /* Try to get native mode details from EDID if necessary */
440 if (!native_mode->clock) { 501 if (!native_mode->clock) {
441 struct drm_display_mode *t, *mode;
442
443 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { 502 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
444 if (mode->hdisplay == native_mode->hdisplay && 503 if (mode->hdisplay == native_mode->hdisplay &&
445 mode->vdisplay == native_mode->vdisplay) { 504 mode->vdisplay == native_mode->vdisplay) {
@@ -450,6 +509,7 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
450 } 509 }
451 } 510 }
452 } 511 }
512
453 if (!native_mode->clock) { 513 if (!native_mode->clock) {
454 DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); 514 DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
455 radeon_encoder->rmx_type = RMX_OFF; 515 radeon_encoder->rmx_type = RMX_OFF;
@@ -661,7 +721,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
661 ret = connector_status_disconnected; 721 ret = connector_status_disconnected;
662 722
663 if (radeon_connector->ddc_bus) 723 if (radeon_connector->ddc_bus)
664 dret = radeon_ddc_probe(radeon_connector); 724 dret = radeon_ddc_probe(radeon_connector,
725 radeon_connector->requires_extended_probe);
665 if (dret) { 726 if (dret) {
666 if (radeon_connector->edid) { 727 if (radeon_connector->edid) {
667 kfree(radeon_connector->edid); 728 kfree(radeon_connector->edid);
@@ -833,7 +894,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
833 bool dret = false; 894 bool dret = false;
834 895
835 if (radeon_connector->ddc_bus) 896 if (radeon_connector->ddc_bus)
836 dret = radeon_ddc_probe(radeon_connector); 897 dret = radeon_ddc_probe(radeon_connector,
898 radeon_connector->requires_extended_probe);
837 if (dret) { 899 if (dret) {
838 if (radeon_connector->edid) { 900 if (radeon_connector->edid) {
839 kfree(radeon_connector->edid); 901 kfree(radeon_connector->edid);
@@ -1235,12 +1297,33 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1235 if (!radeon_dig_connector->edp_on) 1297 if (!radeon_dig_connector->edp_on)
1236 atombios_set_edp_panel_power(connector, 1298 atombios_set_edp_panel_power(connector,
1237 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1299 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1238 } else { 1300 } else if (radeon_connector_encoder_is_dp_bridge(connector)) {
1239 /* need to setup ddc on the bridge */ 1301 /* DP bridges are always DP */
1240 if (radeon_connector_encoder_is_dp_bridge(connector)) { 1302 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
1303 /* get the DPCD from the bridge */
1304 radeon_dp_getdpcd(radeon_connector);
1305
1306 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
1307 ret = connector_status_connected;
1308 else {
1309 /* need to setup ddc on the bridge */
1241 if (encoder) 1310 if (encoder)
1242 radeon_atom_ext_encoder_setup_ddc(encoder); 1311 radeon_atom_ext_encoder_setup_ddc(encoder);
1312 if (radeon_ddc_probe(radeon_connector,
1313 radeon_connector->requires_extended_probe))
1314 ret = connector_status_connected;
1315 }
1316
1317 if ((ret == connector_status_disconnected) &&
1318 radeon_connector->dac_load_detect) {
1319 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1320 struct drm_encoder_helper_funcs *encoder_funcs;
1321 if (encoder) {
1322 encoder_funcs = encoder->helper_private;
1323 ret = encoder_funcs->detect(encoder, connector);
1324 }
1243 } 1325 }
1326 } else {
1244 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1327 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
1245 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 1328 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
1246 ret = connector_status_connected; 1329 ret = connector_status_connected;
@@ -1251,20 +1334,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1251 if (radeon_dp_getdpcd(radeon_connector)) 1334 if (radeon_dp_getdpcd(radeon_connector))
1252 ret = connector_status_connected; 1335 ret = connector_status_connected;
1253 } else { 1336 } else {
1254 if (radeon_ddc_probe(radeon_connector)) 1337 if (radeon_ddc_probe(radeon_connector,
1338 radeon_connector->requires_extended_probe))
1255 ret = connector_status_connected; 1339 ret = connector_status_connected;
1256 } 1340 }
1257 } 1341 }
1258
1259 if ((ret == connector_status_disconnected) &&
1260 radeon_connector->dac_load_detect) {
1261 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1262 struct drm_encoder_helper_funcs *encoder_funcs;
1263 if (encoder) {
1264 encoder_funcs = encoder->helper_private;
1265 ret = encoder_funcs->detect(encoder, connector);
1266 }
1267 }
1268 } 1342 }
1269 1343
1270 radeon_connector_update_scratch_regs(connector, ret); 1344 radeon_connector_update_scratch_regs(connector, ret);
@@ -1406,6 +1480,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1406 radeon_connector->shared_ddc = shared_ddc; 1480 radeon_connector->shared_ddc = shared_ddc;
1407 radeon_connector->connector_object_id = connector_object_id; 1481 radeon_connector->connector_object_id = connector_object_id;
1408 radeon_connector->hpd = *hpd; 1482 radeon_connector->hpd = *hpd;
1483 radeon_connector->requires_extended_probe =
1484 radeon_connector_needs_extended_probe(rdev, supported_device,
1485 connector_type);
1409 radeon_connector->router = *router; 1486 radeon_connector->router = *router;
1410 if (router->ddc_valid || router->cd_valid) { 1487 if (router->ddc_valid || router->cd_valid) {
1411 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1488 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
@@ -1752,6 +1829,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
1752 radeon_connector->devices = supported_device; 1829 radeon_connector->devices = supported_device;
1753 radeon_connector->connector_object_id = connector_object_id; 1830 radeon_connector->connector_object_id = connector_object_id;
1754 radeon_connector->hpd = *hpd; 1831 radeon_connector->hpd = *hpd;
1832 radeon_connector->requires_extended_probe =
1833 radeon_connector_needs_extended_probe(rdev, supported_device,
1834 connector_type);
1755 switch (connector_type) { 1835 switch (connector_type) {
1756 case DRM_MODE_CONNECTOR_VGA: 1836 case DRM_MODE_CONNECTOR_VGA:
1757 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1837 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7cfaa7e2f3b5..b51e15725c6e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -32,6 +32,7 @@
32#include <drm/radeon_drm.h> 32#include <drm/radeon_drm.h>
33#include <linux/vgaarb.h> 33#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h> 34#include <linux/vga_switcheroo.h>
35#include <linux/efi.h>
35#include "radeon_reg.h" 36#include "radeon_reg.h"
36#include "radeon.h" 37#include "radeon.h"
37#include "atom.h" 38#include "atom.h"
@@ -300,6 +301,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
300 mc->mc_vram_size = mc->aper_size; 301 mc->mc_vram_size = mc->aper_size;
301 } 302 }
302 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 303 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
304 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
305 mc->real_vram_size = radeon_vram_limit;
303 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 306 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
304 mc->mc_vram_size >> 20, mc->vram_start, 307 mc->mc_vram_size >> 20, mc->vram_start,
305 mc->vram_end, mc->real_vram_size >> 20); 308 mc->vram_end, mc->real_vram_size >> 20);
@@ -348,6 +351,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
348{ 351{
349 uint32_t reg; 352 uint32_t reg;
350 353
354 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
355 return false;
356
351 /* first check CRTCs */ 357 /* first check CRTCs */
352 if (ASIC_IS_DCE41(rdev)) { 358 if (ASIC_IS_DCE41(rdev)) {
353 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 359 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
@@ -704,8 +710,9 @@ int radeon_device_init(struct radeon_device *rdev,
704 rdev->gpu_lockup = false; 710 rdev->gpu_lockup = false;
705 rdev->accel_working = false; 711 rdev->accel_working = false;
706 712
707 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", 713 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
708 radeon_family_name[rdev->family], pdev->vendor, pdev->device); 714 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
715 pdev->subsystem_vendor, pdev->subsystem_device);
709 716
710 /* mutex initialization are all done here so we 717 /* mutex initialization are all done here so we
711 * can recall function without having locking issues */ 718 * can recall function without having locking issues */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 28f4655905bc..6cc17fb96a57 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -707,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
707 radeon_router_select_ddc_port(radeon_connector); 707 radeon_router_select_ddc_port(radeon_connector);
708 708
709 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 709 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
710 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 710 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
711 radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) {
711 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 712 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
713
712 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || 714 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
713 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) 715 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
714 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 716 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
715 } 717 &dig->dp_i2c_bus->adapter);
716 if (!radeon_connector->ddc_bus) 718 else if (radeon_connector->ddc_bus && !radeon_connector->edid)
717 return -1; 719 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
718 if (!radeon_connector->edid) { 720 &radeon_connector->ddc_bus->adapter);
719 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 721 } else {
722 if (radeon_connector->ddc_bus && !radeon_connector->edid)
723 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
724 &radeon_connector->ddc_bus->adapter);
720 } 725 }
721 726
722 if (!radeon_connector->edid) { 727 if (!radeon_connector->edid) {
@@ -751,8 +756,17 @@ static int radeon_ddc_dump(struct drm_connector *connector)
751 if (!radeon_connector->ddc_bus) 756 if (!radeon_connector->ddc_bus)
752 return -1; 757 return -1;
753 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); 758 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
759 /* Log EDID retrieval status here. In particular with regard to
760 * connectors with requires_extended_probe flag set, that will prevent
761 * function radeon_dvi_detect() to fetch EDID on this connector,
762 * as long as there is no valid EDID header found */
754 if (edid) { 763 if (edid) {
764 DRM_INFO("Radeon display connector %s: Found valid EDID",
765 drm_get_connector_name(connector));
755 kfree(edid); 766 kfree(edid);
767 } else {
768 DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID",
769 drm_get_connector_name(connector));
756 } 770 }
757 return ret; 771 return ret;
758} 772}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 85f033f19a8a..e71d2ed7fa11 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -50,8 +50,8 @@
50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs 50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query 51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
52 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 52 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
53 * 2.10.0 - fusion 2D tiling, initial compute support for the CS checker 53 * 2.10.0 - fusion 2D tiling
54 * 2.11.0 - backend map 54 * 2.11.0 - backend map, initial compute support for the CS checker
55 */ 55 */
56#define KMS_DRIVER_MAJOR 2 56#define KMS_DRIVER_MAJOR 2
57#define KMS_DRIVER_MINOR 11 57#define KMS_DRIVER_MINOR 11
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b293487e5aa3..319d85d7e759 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -2323,6 +2323,9 @@ radeon_add_atom_encoder(struct drm_device *dev,
2323 default: 2323 default:
2324 encoder->possible_crtcs = 0x3; 2324 encoder->possible_crtcs = 0x3;
2325 break; 2325 break;
2326 case 4:
2327 encoder->possible_crtcs = 0xf;
2328 break;
2326 case 6: 2329 case 6:
2327 encoder->possible_crtcs = 0x3f; 2330 encoder->possible_crtcs = 0x3f;
2328 break; 2331 break;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 781196db792f..6c111c1fa3f9 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -32,17 +32,17 @@
32 * radeon_ddc_probe 32 * radeon_ddc_probe
33 * 33 *
34 */ 34 */
35bool radeon_ddc_probe(struct radeon_connector *radeon_connector) 35bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe)
36{ 36{
37 u8 out_buf[] = { 0x0, 0x0}; 37 u8 out = 0x0;
38 u8 buf[2]; 38 u8 buf[8];
39 int ret; 39 int ret;
40 struct i2c_msg msgs[] = { 40 struct i2c_msg msgs[] = {
41 { 41 {
42 .addr = 0x50, 42 .addr = 0x50,
43 .flags = 0, 43 .flags = 0,
44 .len = 1, 44 .len = 1,
45 .buf = out_buf, 45 .buf = &out,
46 }, 46 },
47 { 47 {
48 .addr = 0x50, 48 .addr = 0x50,
@@ -52,15 +52,31 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
52 } 52 }
53 }; 53 };
54 54
55 /* Read 8 bytes from i2c for extended probe of EDID header */
56 if (requires_extended_probe)
57 msgs[1].len = 8;
58
55 /* on hw with routers, select right port */ 59 /* on hw with routers, select right port */
56 if (radeon_connector->router.ddc_valid) 60 if (radeon_connector->router.ddc_valid)
57 radeon_router_select_ddc_port(radeon_connector); 61 radeon_router_select_ddc_port(radeon_connector);
58 62
59 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); 63 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
60 if (ret == 2) 64 if (ret != 2)
61 return true; 65 /* Couldn't find an accessible DDC on this connector */
62 66 return false;
63 return false; 67 if (requires_extended_probe) {
68 /* Probe also for valid EDID header
69 * EDID header starts with:
70 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
71 * Only the first 6 bytes must be valid as
72 * drm_edid_block_valid() can fix the last 2 bytes */
73 if (drm_edid_header_is_valid(buf) < 6) {
74 /* Couldn't find an accessible EDID on this
75 * connector */
76 return false;
77 }
78 }
79 return true;
64} 80}
65 81
66/* bit banging i2c */ 82/* bit banging i2c */
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6df4e3cec0c2..68820f5f6303 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -438,6 +438,9 @@ struct radeon_connector {
438 struct radeon_i2c_chan *ddc_bus; 438 struct radeon_i2c_chan *ddc_bus;
439 /* some systems have an hdmi and vga port with a shared ddc line */ 439 /* some systems have an hdmi and vga port with a shared ddc line */
440 bool shared_ddc; 440 bool shared_ddc;
441 /* for some Radeon chip families we apply an additional EDID header
442 check as part of the DDC probe */
443 bool requires_extended_probe;
441 bool use_digital; 444 bool use_digital;
442 /* we need to mind the EDID between detect 445 /* we need to mind the EDID between detect
443 and get modes due to analog/digital/tvencoder */ 446 and get modes due to analog/digital/tvencoder */
@@ -476,6 +479,7 @@ extern void radeon_dp_set_link_config(struct drm_connector *connector,
476 struct drm_display_mode *mode); 479 struct drm_display_mode *mode);
477extern void radeon_dp_link_train(struct drm_encoder *encoder, 480extern void radeon_dp_link_train(struct drm_encoder *encoder,
478 struct drm_connector *connector); 481 struct drm_connector *connector);
482extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
479extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); 483extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
480extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); 484extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
481extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); 485extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
@@ -514,7 +518,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
514 u8 val); 518 u8 val);
515extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); 519extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
516extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); 520extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
517extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 521extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector,
522 bool requires_extended_probe);
518extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 523extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
519 524
520extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); 525extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index dee4a0c1b4b2..602fa3541c45 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -40,10 +40,14 @@ void radeon_test_moves(struct radeon_device *rdev)
40 size = 1024 * 1024; 40 size = 1024 * 1024;
41 41
42 /* Number of tests = 42 /* Number of tests =
43 * (Total GTT - IB pool - writeback page - ring buffer) / test size 43 * (Total GTT - IB pool - writeback page - ring buffers) / test size
44 */ 44 */
45 n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - 45 n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
46 rdev->cp.ring_size)) / size; 46 if (rdev->wb.wb_obj)
47 n -= RADEON_GPU_PAGE_SIZE;
48 if (rdev->ih.ring_obj)
49 n -= rdev->ih.ring_size;
50 n /= size;
47 51
48 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 52 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
49 if (!gtt_obj) { 53 if (!gtt_obj) {
@@ -132,9 +136,15 @@ void radeon_test_moves(struct radeon_device *rdev)
132 gtt_start++, vram_start++) { 136 gtt_start++, vram_start++) {
133 if (*vram_start != gtt_start) { 137 if (*vram_start != gtt_start) {
134 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " 138 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
135 "expected 0x%p (GTT map 0x%p-0x%p)\n", 139 "expected 0x%p (GTT/VRAM offset "
136 i, *vram_start, gtt_start, gtt_map, 140 "0x%16llx/0x%16llx)\n",
137 gtt_end); 141 i, *vram_start, gtt_start,
142 (unsigned long long)
143 (gtt_addr - rdev->mc.gtt_start +
144 (void*)gtt_start - gtt_map),
145 (unsigned long long)
146 (vram_addr - rdev->mc.vram_start +
147 (void*)gtt_start - gtt_map));
138 radeon_bo_kunmap(vram_obj); 148 radeon_bo_kunmap(vram_obj);
139 goto out_cleanup; 149 goto out_cleanup;
140 } 150 }
@@ -175,9 +185,15 @@ void radeon_test_moves(struct radeon_device *rdev)
175 gtt_start++, vram_start++) { 185 gtt_start++, vram_start++) {
176 if (*gtt_start != vram_start) { 186 if (*gtt_start != vram_start) {
177 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " 187 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
178 "expected 0x%p (VRAM map 0x%p-0x%p)\n", 188 "expected 0x%p (VRAM/GTT offset "
179 i, *gtt_start, vram_start, vram_map, 189 "0x%16llx/0x%16llx)\n",
180 vram_end); 190 i, *gtt_start, vram_start,
191 (unsigned long long)
192 (vram_addr - rdev->mc.vram_start +
193 (void*)vram_start - vram_map),
194 (unsigned long long)
195 (gtt_addr - rdev->mc.gtt_start +
196 (void*)vram_start - vram_map));
181 radeon_bo_kunmap(gtt_obj[i]); 197 radeon_bo_kunmap(gtt_obj[i]);
182 goto out_cleanup; 198 goto out_cleanup;
183 } 199 }
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 60125ddba1e9..9b86fb0e4122 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -450,6 +450,29 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
450 return -EINVAL; 450 return -EINVAL;
451 mem->bus.base = rdev->mc.aper_base; 451 mem->bus.base = rdev->mc.aper_base;
452 mem->bus.is_iomem = true; 452 mem->bus.is_iomem = true;
453#ifdef __alpha__
454 /*
455 * Alpha: use bus.addr to hold the ioremap() return,
456 * so we can modify bus.base below.
457 */
458 if (mem->placement & TTM_PL_FLAG_WC)
459 mem->bus.addr =
460 ioremap_wc(mem->bus.base + mem->bus.offset,
461 mem->bus.size);
462 else
463 mem->bus.addr =
464 ioremap_nocache(mem->bus.base + mem->bus.offset,
465 mem->bus.size);
466
467 /*
468 * Alpha: Use just the bus offset plus
469 * the hose/domain memory base for bus.base.
470 * It then can be used to build PTEs for VRAM
471 * access, as done in ttm_bo_vm_fault().
472 */
473 mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
474 rdev->ddev->hose->dense_mem_base;
475#endif
453 break; 476 break;
454 default: 477 default:
455 return -EINVAL; 478 return -EINVAL;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 56619f64b6bf..a4d38d85909a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -353,8 +353,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
353 353
354 ret = ttm_tt_set_user(bo->ttm, current, 354 ret = ttm_tt_set_user(bo->ttm, current,
355 bo->buffer_start, bo->num_pages); 355 bo->buffer_start, bo->num_pages);
356 if (unlikely(ret != 0)) 356 if (unlikely(ret != 0)) {
357 ttm_tt_destroy(bo->ttm); 357 ttm_tt_destroy(bo->ttm);
358 bo->ttm = NULL;
359 }
358 break; 360 break;
359 default: 361 default:
360 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); 362 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
@@ -390,10 +392,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
390 * Create and bind a ttm if required. 392 * Create and bind a ttm if required.
391 */ 393 */
392 394
393 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { 395 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
394 ret = ttm_bo_add_ttm(bo, false); 396 if (bo->ttm == NULL) {
395 if (ret) 397 ret = ttm_bo_add_ttm(bo, false);
396 goto out_err; 398 if (ret)
399 goto out_err;
400 }
397 401
398 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); 402 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
399 if (ret) 403 if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 77dbf408c0d0..ae3c6f5dd2b7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
635 if (ret) 635 if (ret)
636 return ret; 636 return ret;
637 637
638 ttm_bo_free_old_node(bo);
639 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && 638 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
640 (bo->ttm != NULL)) { 639 (bo->ttm != NULL)) {
641 ttm_tt_unbind(bo->ttm); 640 ttm_tt_unbind(bo->ttm);
642 ttm_tt_destroy(bo->ttm); 641 ttm_tt_destroy(bo->ttm);
643 bo->ttm = NULL; 642 bo->ttm = NULL;
644 } 643 }
644 ttm_bo_free_old_node(bo);
645 } else { 645 } else {
646 /** 646 /**
647 * This should help pipeline ordinary buffer moves. 647 * This should help pipeline ordinary buffer moves.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 306b15f39c9c..1130a8987125 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -589,6 +589,7 @@ config HID_WACOM_POWER_SUPPLY
589config HID_WIIMOTE 589config HID_WIIMOTE
590 tristate "Nintendo Wii Remote support" 590 tristate "Nintendo Wii Remote support"
591 depends on BT_HIDP 591 depends on BT_HIDP
592 depends on LEDS_CLASS
592 ---help--- 593 ---help---
593 Support for the Nintendo Wii Remote bluetooth device. 594 Support for the Nintendo Wii Remote bluetooth device.
594 595
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index b85744fe8464..18b3bc646bf3 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -444,6 +444,12 @@ static const struct hid_device_id apple_devices[] = {
444 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS), 444 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS),
445 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | 445 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
446 APPLE_RDESC_JIS }, 446 APPLE_RDESC_JIS },
447 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
448 .driver_data = APPLE_HAS_FN },
449 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
450 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
451 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS),
452 .driver_data = APPLE_HAS_FN },
447 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), 453 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
448 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 454 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
449 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO), 455 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 1a5cf0c9cfca..242353df3dc4 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1340,6 +1340,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1340 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, 1340 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
1341 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, 1341 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
1342 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, 1342 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
1343 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
1344 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
1345 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
1343 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, 1346 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1344 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, 1347 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1345 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1348 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index db63ccf21cc8..7484e1b67249 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -109,6 +109,9 @@
109#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 109#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
110#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 110#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
111#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 111#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
112#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f
113#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250
114#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251
112#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 115#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
113#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a 116#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
114#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b 117#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
@@ -274,6 +277,7 @@
274#define USB_DEVICE_ID_PENPOWER 0x00f4 277#define USB_DEVICE_ID_PENPOWER 0x00f4
275 278
276#define USB_VENDOR_ID_GREENASIA 0x0e8f 279#define USB_VENDOR_ID_GREENASIA 0x0e8f
280#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
277 281
278#define USB_VENDOR_ID_GRETAGMACBETH 0x0971 282#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
279#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005 283#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005
@@ -576,6 +580,9 @@
576#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 580#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
577#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 581#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
578 582
583#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
584#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
585
579#define USB_VENDOR_ID_SKYCABLE 0x1223 586#define USB_VENDOR_ID_SKYCABLE 0x1223
580#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 587#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
581 588
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 0ec91c18a421..f0fbd7bd239e 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
81#define NO_TOUCHES -1 81#define NO_TOUCHES -1
82#define SINGLE_TOUCH_UP -2 82#define SINGLE_TOUCH_UP -2
83 83
84/* Touch surface information. Dimension is in hundredths of a mm, min and max
85 * are in units. */
86#define MOUSE_DIMENSION_X (float)9056
87#define MOUSE_MIN_X -1100
88#define MOUSE_MAX_X 1258
89#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
90#define MOUSE_DIMENSION_Y (float)5152
91#define MOUSE_MIN_Y -1589
92#define MOUSE_MAX_Y 2047
93#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
94
95#define TRACKPAD_DIMENSION_X (float)13000
96#define TRACKPAD_MIN_X -2909
97#define TRACKPAD_MAX_X 3167
98#define TRACKPAD_RES_X \
99 ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
100#define TRACKPAD_DIMENSION_Y (float)11000
101#define TRACKPAD_MIN_Y -2456
102#define TRACKPAD_MAX_Y 2565
103#define TRACKPAD_RES_Y \
104 ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
105
84/** 106/**
85 * struct magicmouse_sc - Tracks Magic Mouse-specific data. 107 * struct magicmouse_sc - Tracks Magic Mouse-specific data.
86 * @input: Input device through which we report events. 108 * @input: Input device through which we report events.
@@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
406 * inverse of the reported Y. 428 * inverse of the reported Y.
407 */ 429 */
408 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { 430 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
409 input_set_abs_params(input, ABS_MT_POSITION_X, -1100, 431 input_set_abs_params(input, ABS_MT_POSITION_X,
410 1358, 4, 0); 432 MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
411 input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, 433 input_set_abs_params(input, ABS_MT_POSITION_Y,
412 2047, 4, 0); 434 MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
435
436 input_abs_set_res(input, ABS_MT_POSITION_X,
437 MOUSE_RES_X);
438 input_abs_set_res(input, ABS_MT_POSITION_Y,
439 MOUSE_RES_Y);
413 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ 440 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
414 input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0); 441 input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
415 input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0); 442 TRACKPAD_MAX_X, 4, 0);
416 input_set_abs_params(input, ABS_MT_POSITION_X, -2909, 443 input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
417 3167, 4, 0); 444 TRACKPAD_MAX_Y, 4, 0);
418 input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, 445 input_set_abs_params(input, ABS_MT_POSITION_X,
419 2565, 4, 0); 446 TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
447 input_set_abs_params(input, ABS_MT_POSITION_Y,
448 TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
449
450 input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
451 input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
452 input_abs_set_res(input, ABS_MT_POSITION_X,
453 TRACKPAD_RES_X);
454 input_abs_set_res(input, ABS_MT_POSITION_Y,
455 TRACKPAD_RES_Y);
420 } 456 }
421 457
422 input_set_events_per_packet(input, 60); 458 input_set_events_per_packet(input, 60);
@@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
501 } 537 }
502 report->size = 6; 538 report->size = 6;
503 539
540 /*
541 * Some devices repond with 'invalid report id' when feature
542 * report switching it into multitouch mode is sent to it.
543 *
544 * This results in -EIO from the _raw low-level transport callback,
545 * but there seems to be no other way of switching the mode.
546 * Thus the super-ugly hacky success check below.
547 */
504 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), 548 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
505 HID_FEATURE_REPORT); 549 HID_FEATURE_REPORT);
506 if (ret != sizeof(feature)) { 550 if (ret != -EIO && ret != sizeof(feature)) {
507 hid_err(hdev, "unable to request touch data (%d)\n", ret); 551 hid_err(hdev, "unable to request touch data (%d)\n", ret);
508 goto err_stop_hw; 552 goto err_stop_hw;
509 } 553 }
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 06888323828c..a597039d0755 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
353 if (ret) { 353 if (ret) {
354 hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n", 354 hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
355 ret); 355 ret);
356 /* 356 goto err_battery;
357 * battery attribute is not critical for the tablet, but if it
358 * failed then there is no need to create ac attribute
359 */
360 goto move_on;
361 } 357 }
362 358
363 wdata->ac.properties = wacom_ac_props; 359 wdata->ac.properties = wacom_ac_props;
@@ -371,14 +367,8 @@ static int wacom_probe(struct hid_device *hdev,
371 if (ret) { 367 if (ret) {
372 hid_warn(hdev, 368 hid_warn(hdev,
373 "can't create ac battery attribute, err: %d\n", ret); 369 "can't create ac battery attribute, err: %d\n", ret);
374 /* 370 goto err_ac;
375 * ac attribute is not critical for the tablet, but if it
376 * failed then we don't want to battery attribute to exist
377 */
378 power_supply_unregister(&wdata->battery);
379 } 371 }
380
381move_on:
382#endif 372#endif
383 hidinput = list_entry(hdev->inputs.next, struct hid_input, list); 373 hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
384 input = hidinput->input; 374 input = hidinput->input;
@@ -416,6 +406,13 @@ move_on:
416 406
417 return 0; 407 return 0;
418 408
409#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
410err_ac:
411 power_supply_unregister(&wdata->battery);
412err_battery:
413 device_remove_file(&hdev->dev, &dev_attr_speed);
414 hid_hw_stop(hdev);
415#endif
419err_free: 416err_free:
420 kfree(wdata); 417 kfree(wdata);
421 return ret; 418 return ret;
@@ -426,6 +423,7 @@ static void wacom_remove(struct hid_device *hdev)
426#ifdef CONFIG_HID_WACOM_POWER_SUPPLY 423#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
427 struct wacom_data *wdata = hid_get_drvdata(hdev); 424 struct wacom_data *wdata = hid_get_drvdata(hdev);
428#endif 425#endif
426 device_remove_file(&hdev->dev, &dev_attr_speed);
429 hid_hw_stop(hdev); 427 hid_hw_stop(hdev);
430 428
431#ifdef CONFIG_HID_WACOM_POWER_SUPPLY 429#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c
index a594383ce03d..85a02e5f9fe8 100644
--- a/drivers/hid/hid-wiimote.c
+++ b/drivers/hid/hid-wiimote.c
@@ -10,10 +10,10 @@
10 * any later version. 10 * any later version.
11 */ 11 */
12 12
13#include <linux/atomic.h>
14#include <linux/device.h> 13#include <linux/device.h>
15#include <linux/hid.h> 14#include <linux/hid.h>
16#include <linux/input.h> 15#include <linux/input.h>
16#include <linux/leds.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include "hid-ids.h" 19#include "hid-ids.h"
@@ -33,9 +33,9 @@ struct wiimote_state {
33}; 33};
34 34
35struct wiimote_data { 35struct wiimote_data {
36 atomic_t ready;
37 struct hid_device *hdev; 36 struct hid_device *hdev;
38 struct input_dev *input; 37 struct input_dev *input;
38 struct led_classdev *leds[4];
39 39
40 spinlock_t qlock; 40 spinlock_t qlock;
41 __u8 head; 41 __u8 head;
@@ -53,8 +53,15 @@ struct wiimote_data {
53#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \ 53#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
54 WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4) 54 WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
55 55
56/* return flag for led \num */
57#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
58
56enum wiiproto_reqs { 59enum wiiproto_reqs {
60 WIIPROTO_REQ_NULL = 0x0,
57 WIIPROTO_REQ_LED = 0x11, 61 WIIPROTO_REQ_LED = 0x11,
62 WIIPROTO_REQ_DRM = 0x12,
63 WIIPROTO_REQ_STATUS = 0x20,
64 WIIPROTO_REQ_RETURN = 0x22,
58 WIIPROTO_REQ_DRM_K = 0x30, 65 WIIPROTO_REQ_DRM_K = 0x30,
59}; 66};
60 67
@@ -87,9 +94,6 @@ static __u16 wiiproto_keymap[] = {
87 BTN_MODE, /* WIIPROTO_KEY_HOME */ 94 BTN_MODE, /* WIIPROTO_KEY_HOME */
88}; 95};
89 96
90#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \
91 dev))
92
93static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer, 97static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
94 size_t count) 98 size_t count)
95{ 99{
@@ -192,66 +196,96 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
192 wiimote_queue(wdata, cmd, sizeof(cmd)); 196 wiimote_queue(wdata, cmd, sizeof(cmd));
193} 197}
194 198
195#define wiifs_led_show_set(num) \ 199/*
196static ssize_t wiifs_led_show_##num(struct device *dev, \ 200 * Check what peripherals of the wiimote are currently
197 struct device_attribute *attr, char *buf) \ 201 * active and select a proper DRM that supports all of
198{ \ 202 * the requested data inputs.
199 struct wiimote_data *wdata = dev_to_wii(dev); \ 203 */
200 unsigned long flags; \ 204static __u8 select_drm(struct wiimote_data *wdata)
201 int state; \ 205{
202 \ 206 return WIIPROTO_REQ_DRM_K;
203 if (!atomic_read(&wdata->ready)) \ 207}
204 return -EBUSY; \ 208
205 \ 209static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
206 spin_lock_irqsave(&wdata->state.lock, flags); \ 210{
207 state = !!(wdata->state.flags & WIIPROTO_FLAG_LED##num); \ 211 __u8 cmd[3];
208 spin_unlock_irqrestore(&wdata->state.lock, flags); \ 212
209 \ 213 if (drm == WIIPROTO_REQ_NULL)
210 return sprintf(buf, "%d\n", state); \ 214 drm = select_drm(wdata);
211} \ 215
212static ssize_t wiifs_led_set_##num(struct device *dev, \ 216 cmd[0] = WIIPROTO_REQ_DRM;
213 struct device_attribute *attr, const char *buf, size_t count) \ 217 cmd[1] = 0;
214{ \ 218 cmd[2] = drm;
215 struct wiimote_data *wdata = dev_to_wii(dev); \ 219
216 int tmp = simple_strtoul(buf, NULL, 10); \ 220 wiimote_queue(wdata, cmd, sizeof(cmd));
217 unsigned long flags; \ 221}
218 __u8 state; \ 222
219 \ 223static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev)
220 if (!atomic_read(&wdata->ready)) \ 224{
221 return -EBUSY; \ 225 struct wiimote_data *wdata;
222 \ 226 struct device *dev = led_dev->dev->parent;
223 spin_lock_irqsave(&wdata->state.lock, flags); \ 227 int i;
224 \ 228 unsigned long flags;
225 state = wdata->state.flags; \ 229 bool value = false;
226 \ 230
227 if (tmp) \ 231 wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
228 wiiproto_req_leds(wdata, state | WIIPROTO_FLAG_LED##num);\ 232
229 else \ 233 for (i = 0; i < 4; ++i) {
230 wiiproto_req_leds(wdata, state & ~WIIPROTO_FLAG_LED##num);\ 234 if (wdata->leds[i] == led_dev) {
231 \ 235 spin_lock_irqsave(&wdata->state.lock, flags);
232 spin_unlock_irqrestore(&wdata->state.lock, flags); \ 236 value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1);
233 \ 237 spin_unlock_irqrestore(&wdata->state.lock, flags);
234 return count; \ 238 break;
235} \ 239 }
236static DEVICE_ATTR(led##num, S_IRUGO | S_IWUSR, wiifs_led_show_##num, \ 240 }
237 wiifs_led_set_##num) 241
238 242 return value ? LED_FULL : LED_OFF;
239wiifs_led_show_set(1); 243}
240wiifs_led_show_set(2); 244
241wiifs_led_show_set(3); 245static void wiimote_leds_set(struct led_classdev *led_dev,
242wiifs_led_show_set(4); 246 enum led_brightness value)
247{
248 struct wiimote_data *wdata;
249 struct device *dev = led_dev->dev->parent;
250 int i;
251 unsigned long flags;
252 __u8 state, flag;
253
254 wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
255
256 for (i = 0; i < 4; ++i) {
257 if (wdata->leds[i] == led_dev) {
258 flag = WIIPROTO_FLAG_LED(i + 1);
259 spin_lock_irqsave(&wdata->state.lock, flags);
260 state = wdata->state.flags;
261 if (value == LED_OFF)
262 wiiproto_req_leds(wdata, state & ~flag);
263 else
264 wiiproto_req_leds(wdata, state | flag);
265 spin_unlock_irqrestore(&wdata->state.lock, flags);
266 break;
267 }
268 }
269}
243 270
244static int wiimote_input_event(struct input_dev *dev, unsigned int type, 271static int wiimote_input_event(struct input_dev *dev, unsigned int type,
245 unsigned int code, int value) 272 unsigned int code, int value)
246{ 273{
274 return 0;
275}
276
277static int wiimote_input_open(struct input_dev *dev)
278{
247 struct wiimote_data *wdata = input_get_drvdata(dev); 279 struct wiimote_data *wdata = input_get_drvdata(dev);
248 280
249 if (!atomic_read(&wdata->ready)) 281 return hid_hw_open(wdata->hdev);
250 return -EBUSY; 282}
251 /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
252 smp_rmb();
253 283
254 return 0; 284static void wiimote_input_close(struct input_dev *dev)
285{
286 struct wiimote_data *wdata = input_get_drvdata(dev);
287
288 hid_hw_close(wdata->hdev);
255} 289}
256 290
257static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) 291static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
@@ -281,6 +315,26 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
281 input_sync(wdata->input); 315 input_sync(wdata->input);
282} 316}
283 317
318static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
319{
320 handler_keys(wdata, payload);
321
322 /* on status reports the drm is reset so we need to resend the drm */
323 wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
324}
325
326static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
327{
328 __u8 err = payload[3];
329 __u8 cmd = payload[2];
330
331 handler_keys(wdata, payload);
332
333 if (err)
334 hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
335 cmd);
336}
337
284struct wiiproto_handler { 338struct wiiproto_handler {
285 __u8 id; 339 __u8 id;
286 size_t size; 340 size_t size;
@@ -288,6 +342,8 @@ struct wiiproto_handler {
288}; 342};
289 343
290static struct wiiproto_handler handlers[] = { 344static struct wiiproto_handler handlers[] = {
345 { .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
346 { .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },
291 { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys }, 347 { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
292 { .id = 0 } 348 { .id = 0 }
293}; 349};
@@ -300,11 +356,6 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
300 int i; 356 int i;
301 unsigned long flags; 357 unsigned long flags;
302 358
303 if (!atomic_read(&wdata->ready))
304 return -EBUSY;
305 /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
306 smp_rmb();
307
308 if (size < 1) 359 if (size < 1)
309 return -EINVAL; 360 return -EINVAL;
310 361
@@ -321,6 +372,58 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
321 return 0; 372 return 0;
322} 373}
323 374
375static void wiimote_leds_destroy(struct wiimote_data *wdata)
376{
377 int i;
378 struct led_classdev *led;
379
380 for (i = 0; i < 4; ++i) {
381 if (wdata->leds[i]) {
382 led = wdata->leds[i];
383 wdata->leds[i] = NULL;
384 led_classdev_unregister(led);
385 kfree(led);
386 }
387 }
388}
389
390static int wiimote_leds_create(struct wiimote_data *wdata)
391{
392 int i, ret;
393 struct device *dev = &wdata->hdev->dev;
394 size_t namesz = strlen(dev_name(dev)) + 9;
395 struct led_classdev *led;
396 char *name;
397
398 for (i = 0; i < 4; ++i) {
399 led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL);
400 if (!led) {
401 ret = -ENOMEM;
402 goto err;
403 }
404 name = (void*)&led[1];
405 snprintf(name, namesz, "%s:blue:p%d", dev_name(dev), i);
406 led->name = name;
407 led->brightness = 0;
408 led->max_brightness = 1;
409 led->brightness_get = wiimote_leds_get;
410 led->brightness_set = wiimote_leds_set;
411
412 ret = led_classdev_register(dev, led);
413 if (ret) {
414 kfree(led);
415 goto err;
416 }
417 wdata->leds[i] = led;
418 }
419
420 return 0;
421
422err:
423 wiimote_leds_destroy(wdata);
424 return ret;
425}
426
324static struct wiimote_data *wiimote_create(struct hid_device *hdev) 427static struct wiimote_data *wiimote_create(struct hid_device *hdev)
325{ 428{
326 struct wiimote_data *wdata; 429 struct wiimote_data *wdata;
@@ -341,6 +444,8 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
341 444
342 input_set_drvdata(wdata->input, wdata); 445 input_set_drvdata(wdata->input, wdata);
343 wdata->input->event = wiimote_input_event; 446 wdata->input->event = wiimote_input_event;
447 wdata->input->open = wiimote_input_open;
448 wdata->input->close = wiimote_input_close;
344 wdata->input->dev.parent = &wdata->hdev->dev; 449 wdata->input->dev.parent = &wdata->hdev->dev;
345 wdata->input->id.bustype = wdata->hdev->bus; 450 wdata->input->id.bustype = wdata->hdev->bus;
346 wdata->input->id.vendor = wdata->hdev->vendor; 451 wdata->input->id.vendor = wdata->hdev->vendor;
@@ -362,6 +467,12 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
362 467
363static void wiimote_destroy(struct wiimote_data *wdata) 468static void wiimote_destroy(struct wiimote_data *wdata)
364{ 469{
470 wiimote_leds_destroy(wdata);
471
472 input_unregister_device(wdata->input);
473 cancel_work_sync(&wdata->worker);
474 hid_hw_stop(wdata->hdev);
475
365 kfree(wdata); 476 kfree(wdata);
366} 477}
367 478
@@ -377,19 +488,6 @@ static int wiimote_hid_probe(struct hid_device *hdev,
377 return -ENOMEM; 488 return -ENOMEM;
378 } 489 }
379 490
380 ret = device_create_file(&hdev->dev, &dev_attr_led1);
381 if (ret)
382 goto err;
383 ret = device_create_file(&hdev->dev, &dev_attr_led2);
384 if (ret)
385 goto err;
386 ret = device_create_file(&hdev->dev, &dev_attr_led3);
387 if (ret)
388 goto err;
389 ret = device_create_file(&hdev->dev, &dev_attr_led4);
390 if (ret)
391 goto err;
392
393 ret = hid_parse(hdev); 491 ret = hid_parse(hdev);
394 if (ret) { 492 if (ret) {
395 hid_err(hdev, "HID parse failed\n"); 493 hid_err(hdev, "HID parse failed\n");
@@ -408,9 +506,10 @@ static int wiimote_hid_probe(struct hid_device *hdev,
408 goto err_stop; 506 goto err_stop;
409 } 507 }
410 508
411 /* smp_wmb: Write wdata->xy first before wdata->ready is set to 1 */ 509 ret = wiimote_leds_create(wdata);
412 smp_wmb(); 510 if (ret)
413 atomic_set(&wdata->ready, 1); 511 goto err_free;
512
414 hid_info(hdev, "New device registered\n"); 513 hid_info(hdev, "New device registered\n");
415 514
416 /* by default set led1 after device initialization */ 515 /* by default set led1 after device initialization */
@@ -420,15 +519,15 @@ static int wiimote_hid_probe(struct hid_device *hdev,
420 519
421 return 0; 520 return 0;
422 521
522err_free:
523 wiimote_destroy(wdata);
524 return ret;
525
423err_stop: 526err_stop:
424 hid_hw_stop(hdev); 527 hid_hw_stop(hdev);
425err: 528err:
426 input_free_device(wdata->input); 529 input_free_device(wdata->input);
427 device_remove_file(&hdev->dev, &dev_attr_led1); 530 kfree(wdata);
428 device_remove_file(&hdev->dev, &dev_attr_led2);
429 device_remove_file(&hdev->dev, &dev_attr_led3);
430 device_remove_file(&hdev->dev, &dev_attr_led4);
431 wiimote_destroy(wdata);
432 return ret; 531 return ret;
433} 532}
434 533
@@ -437,16 +536,6 @@ static void wiimote_hid_remove(struct hid_device *hdev)
437 struct wiimote_data *wdata = hid_get_drvdata(hdev); 536 struct wiimote_data *wdata = hid_get_drvdata(hdev);
438 537
439 hid_info(hdev, "Device removed\n"); 538 hid_info(hdev, "Device removed\n");
440
441 device_remove_file(&hdev->dev, &dev_attr_led1);
442 device_remove_file(&hdev->dev, &dev_attr_led2);
443 device_remove_file(&hdev->dev, &dev_attr_led3);
444 device_remove_file(&hdev->dev, &dev_attr_led4);
445
446 hid_hw_stop(hdev);
447 input_unregister_device(wdata->input);
448
449 cancel_work_sync(&wdata->worker);
450 wiimote_destroy(wdata); 539 wiimote_destroy(wdata);
451} 540}
452 541
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 621959d5cc42..3146fdcda272 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
47 { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, 47 { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
48 48
49 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, 49 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
50 { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
50 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 51 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
51 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, 52 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
52 { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, 53 { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
@@ -89,6 +90,7 @@ static const struct hid_blacklist {
89 90
90 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT }, 91 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
91 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, 92 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
93 { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
92 { 0, 0 } 94 { 0, 0 }
93}; 95};
94 96
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0598cd22edf2..0b62c3c6b7ce 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -623,7 +623,7 @@ config SENSORS_LM90
623 LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A, 623 LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
624 Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659, 624 Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
625 MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008, 625 MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008,
626 and Winbond/Nuvoton W83L771W/G/AWG/ASG sensor chips. 626 Winbond/Nuvoton W83L771W/G/AWG/ASG and Philips SA56004 sensor chips.
627 627
628 This driver can also be built as a module. If so, the module 628 This driver can also be built as a module. If so, the module
629 will be called lm90. 629 will be called lm90.
@@ -694,14 +694,24 @@ config SENSORS_LTC4261
694 be called ltc4261. 694 be called ltc4261.
695 695
696config SENSORS_LM95241 696config SENSORS_LM95241
697 tristate "National Semiconductor LM95241 sensor chip" 697 tristate "National Semiconductor LM95241 and compatibles"
698 depends on I2C 698 depends on I2C
699 help 699 help
700 If you say yes here you get support for LM95241 sensor chip. 700 If you say yes here you get support for LM95231 and LM95241 sensor
701 chips.
701 702
702 This driver can also be built as a module. If so, the module 703 This driver can also be built as a module. If so, the module
703 will be called lm95241. 704 will be called lm95241.
704 705
706config SENSORS_LM95245
707 tristate "National Semiconductor LM95245 sensor chip"
708 depends on I2C && EXPERIMENTAL
709 help
710 If you say yes here you get support for LM95245 sensor chip.
711
712 This driver can also be built as a module. If so, the module
713 will be called lm95245.
714
705config SENSORS_MAX1111 715config SENSORS_MAX1111
706 tristate "Maxim MAX1111 Multichannel, Serial 8-bit ADC chip" 716 tristate "Maxim MAX1111 Multichannel, Serial 8-bit ADC chip"
707 depends on SPI_MASTER 717 depends on SPI_MASTER
@@ -736,6 +746,16 @@ config SENSORS_MAX1619
736 This driver can also be built as a module. If so, the module 746 This driver can also be built as a module. If so, the module
737 will be called max1619. 747 will be called max1619.
738 748
749config SENSORS_MAX1668
750 tristate "Maxim MAX1668 and compatibles"
751 depends on I2C && EXPERIMENTAL
752 help
753 If you say yes here you get support for MAX1668, MAX1989 and
754 MAX1805 chips.
755
756 This driver can also be built as a module. If so, the module
757 will be called max1668.
758
739config SENSORS_MAX6639 759config SENSORS_MAX6639
740 tristate "Maxim MAX6639 sensor chip" 760 tristate "Maxim MAX6639 sensor chip"
741 depends on I2C && EXPERIMENTAL 761 depends on I2C && EXPERIMENTAL
@@ -767,6 +787,20 @@ config SENSORS_MAX6650
767 This driver can also be built as a module. If so, the module 787 This driver can also be built as a module. If so, the module
768 will be called max6650. 788 will be called max6650.
769 789
790config SENSORS_NTC_THERMISTOR
791 tristate "NTC thermistor support"
792 depends on EXPERIMENTAL
793 help
794 This driver supports NTC thermistors sensor reading and its
795 interpretation. The driver can also monitor the temperature and
796 send notifications about the temperature.
797
798 Currently, this driver supports
799 NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333.
800
801 This driver can also be built as a module. If so, the module
802 will be called ntc-thermistor.
803
770config SENSORS_PC87360 804config SENSORS_PC87360
771 tristate "National Semiconductor PC87360 family" 805 tristate "National Semiconductor PC87360 family"
772 select HWMON_VID 806 select HWMON_VID
@@ -807,92 +841,7 @@ config SENSORS_PCF8591
807 These devices are hard to detect and rarely found on mainstream 841 These devices are hard to detect and rarely found on mainstream
808 hardware. If unsure, say N. 842 hardware. If unsure, say N.
809 843
810config PMBUS 844source drivers/hwmon/pmbus/Kconfig
811 tristate "PMBus support"
812 depends on I2C && EXPERIMENTAL
813 default n
814 help
815 Say yes here if you want to enable PMBus support.
816
817 This driver can also be built as a module. If so, the module will
818 be called pmbus_core.
819
820if PMBUS
821
822config SENSORS_PMBUS
823 tristate "Generic PMBus devices"
824 default n
825 help
826 If you say yes here you get hardware monitoring support for generic
827 PMBus devices, including but not limited to BMR450, BMR451, BMR453,
828 BMR454, and LTC2978.
829
830 This driver can also be built as a module. If so, the module will
831 be called pmbus.
832
833config SENSORS_ADM1275
834 tristate "Analog Devices ADM1275"
835 default n
836 help
837 If you say yes here you get hardware monitoring support for Analog
838 Devices ADM1275 Hot-Swap Controller and Digital Power Monitor.
839
840 This driver can also be built as a module. If so, the module will
841 be called adm1275.
842
843config SENSORS_MAX16064
844 tristate "Maxim MAX16064"
845 default n
846 help
847 If you say yes here you get hardware monitoring support for Maxim
848 MAX16064.
849
850 This driver can also be built as a module. If so, the module will
851 be called max16064.
852
853config SENSORS_MAX34440
854 tristate "Maxim MAX34440/MAX34441"
855 default n
856 help
857 If you say yes here you get hardware monitoring support for Maxim
858 MAX34440 and MAX34441.
859
860 This driver can also be built as a module. If so, the module will
861 be called max34440.
862
863config SENSORS_MAX8688
864 tristate "Maxim MAX8688"
865 default n
866 help
867 If you say yes here you get hardware monitoring support for Maxim
868 MAX8688.
869
870 This driver can also be built as a module. If so, the module will
871 be called max8688.
872
873config SENSORS_UCD9000
874 tristate "TI UCD90120, UCD90124, UCD9090, UCD90910"
875 default n
876 help
877 If you say yes here you get hardware monitoring support for TI
878 UCD90120, UCD90124, UCD9090, UCD90910 Sequencer and System Health
879 Controllers.
880
881 This driver can also be built as a module. If so, the module will
882 be called ucd9000.
883
884config SENSORS_UCD9200
885 tristate "TI UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, UCD9248"
886 default n
887 help
888 If you say yes here you get hardware monitoring support for TI
889 UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, and UCD9248
890 Digital PWM System Controllers.
891
892 This driver can also be built as a module. If so, the module will
893 be called ucd9200.
894
895endif # PMBUS
896 845
897config SENSORS_SHT15 846config SENSORS_SHT15
898 tristate "Sensiron humidity and temperature sensors. SHT15 and compat." 847 tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index d7995a1d0784..3c9ccefea791 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_SENSORS_LM90) += lm90.o
80obj-$(CONFIG_SENSORS_LM92) += lm92.o 80obj-$(CONFIG_SENSORS_LM92) += lm92.o
81obj-$(CONFIG_SENSORS_LM93) += lm93.o 81obj-$(CONFIG_SENSORS_LM93) += lm93.o
82obj-$(CONFIG_SENSORS_LM95241) += lm95241.o 82obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
83obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
83obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o 84obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
84obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o 85obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
85obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o 86obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
@@ -87,10 +88,12 @@ obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
87obj-$(CONFIG_SENSORS_MAX1111) += max1111.o 88obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
88obj-$(CONFIG_SENSORS_MAX16065) += max16065.o 89obj-$(CONFIG_SENSORS_MAX16065) += max16065.o
89obj-$(CONFIG_SENSORS_MAX1619) += max1619.o 90obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
91obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
90obj-$(CONFIG_SENSORS_MAX6639) += max6639.o 92obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
91obj-$(CONFIG_SENSORS_MAX6642) += max6642.o 93obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
92obj-$(CONFIG_SENSORS_MAX6650) += max6650.o 94obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
93obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o 95obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
96obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
94obj-$(CONFIG_SENSORS_PC87360) += pc87360.o 97obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
95obj-$(CONFIG_SENSORS_PC87427) += pc87427.o 98obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
96obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o 99obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
@@ -121,15 +124,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
121obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o 124obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
122obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o 125obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
123 126
124# PMBus drivers 127obj-$(CONFIG_PMBUS) += pmbus/
125obj-$(CONFIG_PMBUS) += pmbus_core.o
126obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
127obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
128obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
129obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
130obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
131obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
132obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
133 128
134ccflags-$(CONFIG_HWMON_DEBUG_CHIP) := -DDEBUG 129ccflags-$(CONFIG_HWMON_DEBUG_CHIP) := -DDEBUG
135 130
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 0070d5476dd0..59d83e83da7f 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -44,7 +44,9 @@
44#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 44#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
45#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ 45#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
46#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ 46#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
47#define MAX_ATTRS 5 /* Maximum no of per-core attrs */ 47#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
48#define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */
49#define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
48#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) 50#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
49 51
50#ifdef CONFIG_SMP 52#ifdef CONFIG_SMP
@@ -67,6 +69,9 @@
67 * This value is passed as "id" field to rdmsr/wrmsr functions. 69 * This value is passed as "id" field to rdmsr/wrmsr functions.
68 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, 70 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
69 * from where the temperature values should be read. 71 * from where the temperature values should be read.
72 * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
73 * from where the thresholds are read.
74 * @attr_size: Total number of pre-core attrs displayed in the sysfs.
70 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. 75 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
71 * Otherwise, temp_data holds coretemp data. 76 * Otherwise, temp_data holds coretemp data.
72 * @valid: If this is 1, the current temperature is valid. 77 * @valid: If this is 1, the current temperature is valid.
@@ -74,15 +79,18 @@
74struct temp_data { 79struct temp_data {
75 int temp; 80 int temp;
76 int ttarget; 81 int ttarget;
82 int tmin;
77 int tjmax; 83 int tjmax;
78 unsigned long last_updated; 84 unsigned long last_updated;
79 unsigned int cpu; 85 unsigned int cpu;
80 u32 cpu_core_id; 86 u32 cpu_core_id;
81 u32 status_reg; 87 u32 status_reg;
88 u32 intrpt_reg;
89 int attr_size;
82 bool is_pkg_data; 90 bool is_pkg_data;
83 bool valid; 91 bool valid;
84 struct sensor_device_attribute sd_attrs[MAX_ATTRS]; 92 struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
85 char attr_name[MAX_ATTRS][CORETEMP_NAME_LENGTH]; 93 char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
86 struct mutex update_lock; 94 struct mutex update_lock;
87}; 95};
88 96
@@ -135,6 +143,19 @@ static ssize_t show_crit_alarm(struct device *dev,
135 return sprintf(buf, "%d\n", (eax >> 5) & 1); 143 return sprintf(buf, "%d\n", (eax >> 5) & 1);
136} 144}
137 145
146static ssize_t show_max_alarm(struct device *dev,
147 struct device_attribute *devattr, char *buf)
148{
149 u32 eax, edx;
150 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
151 struct platform_data *pdata = dev_get_drvdata(dev);
152 struct temp_data *tdata = pdata->core_data[attr->index];
153
154 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
155
156 return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
157}
158
138static ssize_t show_tjmax(struct device *dev, 159static ssize_t show_tjmax(struct device *dev,
139 struct device_attribute *devattr, char *buf) 160 struct device_attribute *devattr, char *buf)
140{ 161{
@@ -153,6 +174,83 @@ static ssize_t show_ttarget(struct device *dev,
153 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); 174 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
154} 175}
155 176
177static ssize_t store_ttarget(struct device *dev,
178 struct device_attribute *devattr,
179 const char *buf, size_t count)
180{
181 struct platform_data *pdata = dev_get_drvdata(dev);
182 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
183 struct temp_data *tdata = pdata->core_data[attr->index];
184 u32 eax, edx;
185 unsigned long val;
186 int diff;
187
188 if (strict_strtoul(buf, 10, &val))
189 return -EINVAL;
190
191 /*
192 * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
193 * of milli degree celsius. Hence don't accept val > (127 * 1000)
194 */
195 if (val > tdata->tjmax || val > 127000)
196 return -EINVAL;
197
198 diff = (tdata->tjmax - val) / 1000;
199
200 mutex_lock(&tdata->update_lock);
201 rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
202 eax = (eax & ~THERM_MASK_THRESHOLD1) |
203 (diff << THERM_SHIFT_THRESHOLD1);
204 wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
205 tdata->ttarget = val;
206 mutex_unlock(&tdata->update_lock);
207
208 return count;
209}
210
211static ssize_t show_tmin(struct device *dev,
212 struct device_attribute *devattr, char *buf)
213{
214 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
215 struct platform_data *pdata = dev_get_drvdata(dev);
216
217 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
218}
219
220static ssize_t store_tmin(struct device *dev,
221 struct device_attribute *devattr,
222 const char *buf, size_t count)
223{
224 struct platform_data *pdata = dev_get_drvdata(dev);
225 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
226 struct temp_data *tdata = pdata->core_data[attr->index];
227 u32 eax, edx;
228 unsigned long val;
229 int diff;
230
231 if (strict_strtoul(buf, 10, &val))
232 return -EINVAL;
233
234 /*
235 * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
236 * of milli degree celsius. Hence don't accept val > (127 * 1000)
237 */
238 if (val > tdata->tjmax || val > 127000)
239 return -EINVAL;
240
241 diff = (tdata->tjmax - val) / 1000;
242
243 mutex_lock(&tdata->update_lock);
244 rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
245 eax = (eax & ~THERM_MASK_THRESHOLD0) |
246 (diff << THERM_SHIFT_THRESHOLD0);
247 wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
248 tdata->tmin = val;
249 mutex_unlock(&tdata->update_lock);
250
251 return count;
252}
253
156static ssize_t show_temp(struct device *dev, 254static ssize_t show_temp(struct device *dev,
157 struct device_attribute *devattr, char *buf) 255 struct device_attribute *devattr, char *buf)
158{ 256{
@@ -344,23 +442,31 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
344 int attr_no) 442 int attr_no)
345{ 443{
346 int err, i; 444 int err, i;
347 static ssize_t (*rd_ptr[MAX_ATTRS]) (struct device *dev, 445 static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev,
348 struct device_attribute *devattr, char *buf) = { 446 struct device_attribute *devattr, char *buf) = {
349 show_label, show_crit_alarm, show_ttarget, 447 show_label, show_crit_alarm, show_temp, show_tjmax,
350 show_temp, show_tjmax }; 448 show_max_alarm, show_ttarget, show_tmin };
351 static const char *names[MAX_ATTRS] = { 449 static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
450 struct device_attribute *devattr, const char *buf,
451 size_t count) = { NULL, NULL, NULL, NULL, NULL,
452 store_ttarget, store_tmin };
453 static const char *names[TOTAL_ATTRS] = {
352 "temp%d_label", "temp%d_crit_alarm", 454 "temp%d_label", "temp%d_crit_alarm",
353 "temp%d_max", "temp%d_input", 455 "temp%d_input", "temp%d_crit",
354 "temp%d_crit" }; 456 "temp%d_max_alarm", "temp%d_max",
457 "temp%d_max_hyst" };
355 458
356 for (i = 0; i < MAX_ATTRS; i++) { 459 for (i = 0; i < tdata->attr_size; i++) {
357 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], 460 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
358 attr_no); 461 attr_no);
359 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); 462 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
360 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; 463 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
361 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; 464 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
465 if (rw_ptr[i]) {
466 tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
467 tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
468 }
362 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; 469 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
363 tdata->sd_attrs[i].dev_attr.store = NULL;
364 tdata->sd_attrs[i].index = attr_no; 470 tdata->sd_attrs[i].index = attr_no;
365 err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); 471 err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
366 if (err) 472 if (err)
@@ -374,38 +480,6 @@ exit_free:
374 return err; 480 return err;
375} 481}
376 482
377static void update_ttarget(__u8 cpu_model, struct temp_data *tdata,
378 struct device *dev)
379{
380 int err;
381 u32 eax, edx;
382
383 /*
384 * Initialize ttarget value. Eventually this will be
385 * initialized with the value from MSR_IA32_THERM_INTERRUPT
386 * register. If IA32_TEMPERATURE_TARGET is supported, this
387 * value will be over written below.
388 * To Do: Patch to initialize ttarget from MSR_IA32_THERM_INTERRUPT
389 */
390 tdata->ttarget = tdata->tjmax - 20000;
391
392 /*
393 * Read the still undocumented IA32_TEMPERATURE_TARGET. It exists
394 * on older CPUs but not in this register,
395 * Atoms don't have it either.
396 */
397 if (cpu_model > 0xe && cpu_model != 0x1c) {
398 err = rdmsr_safe_on_cpu(tdata->cpu,
399 MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
400 if (err) {
401 dev_warn(dev,
402 "Unable to read IA32_TEMPERATURE_TARGET MSR\n");
403 } else {
404 tdata->ttarget = tdata->tjmax -
405 ((eax >> 8) & 0xff) * 1000;
406 }
407 }
408}
409 483
410static int __devinit chk_ucode_version(struct platform_device *pdev) 484static int __devinit chk_ucode_version(struct platform_device *pdev)
411{ 485{
@@ -464,9 +538,12 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
464 538
465 tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : 539 tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
466 MSR_IA32_THERM_STATUS; 540 MSR_IA32_THERM_STATUS;
541 tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
542 MSR_IA32_THERM_INTERRUPT;
467 tdata->is_pkg_data = pkg_flag; 543 tdata->is_pkg_data = pkg_flag;
468 tdata->cpu = cpu; 544 tdata->cpu = cpu;
469 tdata->cpu_core_id = TO_CORE_ID(cpu); 545 tdata->cpu_core_id = TO_CORE_ID(cpu);
546 tdata->attr_size = MAX_CORE_ATTRS;
470 mutex_init(&tdata->update_lock); 547 mutex_init(&tdata->update_lock);
471 return tdata; 548 return tdata;
472} 549}
@@ -516,7 +593,17 @@ static int create_core_data(struct platform_data *pdata,
516 else 593 else
517 tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); 594 tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
518 595
519 update_ttarget(c->x86_model, tdata, &pdev->dev); 596 /*
597 * Test if we can access the intrpt register. If so, increase the
598 * 'size' enough to have ttarget/tmin/max_alarm interfaces.
599 * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT
600 */
601 err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
602 if (!err) {
603 tdata->attr_size += MAX_THRESH_ATTRS;
604 tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
605 }
606
520 pdata->core_data[attr_no] = tdata; 607 pdata->core_data[attr_no] = tdata;
521 608
522 /* Create sysfs interfaces */ 609 /* Create sysfs interfaces */
@@ -553,7 +640,7 @@ static void coretemp_remove_core(struct platform_data *pdata,
553 struct temp_data *tdata = pdata->core_data[indx]; 640 struct temp_data *tdata = pdata->core_data[indx];
554 641
555 /* Remove the sysfs attributes */ 642 /* Remove the sysfs attributes */
556 for (i = 0; i < MAX_ATTRS; i++) 643 for (i = 0; i < tdata->attr_size; i++)
557 device_remove_file(dev, &tdata->sd_attrs[i].dev_attr); 644 device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
558 645
559 kfree(pdata->core_data[indx]); 646 kfree(pdata->core_data[indx]);
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index c4c40be0edbf..d22f241b6a67 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -114,7 +114,6 @@ struct i5k_amb_data {
114 void __iomem *amb_mmio; 114 void __iomem *amb_mmio;
115 struct i5k_device_attribute *attrs; 115 struct i5k_device_attribute *attrs;
116 unsigned int num_attrs; 116 unsigned int num_attrs;
117 unsigned long chipset_id;
118}; 117};
119 118
120static ssize_t show_name(struct device *dev, struct device_attribute *devattr, 119static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
@@ -444,8 +443,6 @@ static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data,
444 goto out; 443 goto out;
445 } 444 }
446 445
447 data->chipset_id = devid;
448
449 res = 0; 446 res = 0;
450out: 447out:
451 pci_dev_put(pcidev); 448 pci_dev_put(pcidev);
@@ -478,23 +475,13 @@ out:
478 return res; 475 return res;
479} 476}
480 477
481static unsigned long i5k_channel_pci_id(struct i5k_amb_data *data, 478static struct {
482 unsigned long channel) 479 unsigned long err;
483{ 480 unsigned long fbd0;
484 switch (data->chipset_id) { 481} chipset_ids[] __devinitdata = {
485 case PCI_DEVICE_ID_INTEL_5000_ERR: 482 { PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 },
486 return PCI_DEVICE_ID_INTEL_5000_FBD0 + channel; 483 { PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 },
487 case PCI_DEVICE_ID_INTEL_5400_ERR: 484 { 0, 0 }
488 return PCI_DEVICE_ID_INTEL_5400_FBD0 + channel;
489 default:
490 BUG();
491 }
492}
493
494static unsigned long chipset_ids[] = {
495 PCI_DEVICE_ID_INTEL_5000_ERR,
496 PCI_DEVICE_ID_INTEL_5400_ERR,
497 0
498}; 485};
499 486
500#ifdef MODULE 487#ifdef MODULE
@@ -510,8 +497,7 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev)
510{ 497{
511 struct i5k_amb_data *data; 498 struct i5k_amb_data *data;
512 struct resource *reso; 499 struct resource *reso;
513 int i; 500 int i, res;
514 int res = -ENODEV;
515 501
516 data = kzalloc(sizeof(*data), GFP_KERNEL); 502 data = kzalloc(sizeof(*data), GFP_KERNEL);
517 if (!data) 503 if (!data)
@@ -520,22 +506,22 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev)
520 /* Figure out where the AMB registers live */ 506 /* Figure out where the AMB registers live */
521 i = 0; 507 i = 0;
522 do { 508 do {
523 res = i5k_find_amb_registers(data, chipset_ids[i]); 509 res = i5k_find_amb_registers(data, chipset_ids[i].err);
510 if (res == 0)
511 break;
524 i++; 512 i++;
525 } while (res && chipset_ids[i]); 513 } while (chipset_ids[i].err);
526 514
527 if (res) 515 if (res)
528 goto err; 516 goto err;
529 517
530 /* Copy the DIMM presence map for the first two channels */ 518 /* Copy the DIMM presence map for the first two channels */
531 res = i5k_channel_probe(&data->amb_present[0], 519 res = i5k_channel_probe(&data->amb_present[0], chipset_ids[i].fbd0);
532 i5k_channel_pci_id(data, 0));
533 if (res) 520 if (res)
534 goto err; 521 goto err;
535 522
536 /* Copy the DIMM presence map for the optional second two channels */ 523 /* Copy the DIMM presence map for the optional second two channels */
537 i5k_channel_probe(&data->amb_present[2], 524 i5k_channel_probe(&data->amb_present[2], chipset_ids[i].fbd0 + 1);
538 i5k_channel_pci_id(data, 1));
539 525
540 /* Set up resource regions */ 526 /* Set up resource regions */
541 reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); 527 reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 1a409c5bc9bc..c316294c48b4 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -432,13 +432,15 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
432 aem_send_message(ipmi); 432 aem_send_message(ipmi);
433 433
434 res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT); 434 res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT);
435 if (!res) 435 if (!res) {
436 return -ETIMEDOUT; 436 res = -ETIMEDOUT;
437 goto out;
438 }
437 439
438 if (ipmi->rx_result || ipmi->rx_msg_len != rs_size || 440 if (ipmi->rx_result || ipmi->rx_msg_len != rs_size ||
439 memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) { 441 memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) {
440 kfree(rs_resp); 442 res = -ENOENT;
441 return -ENOENT; 443 goto out;
442 } 444 }
443 445
444 switch (size) { 446 switch (size) {
@@ -463,8 +465,11 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
463 break; 465 break;
464 } 466 }
465 } 467 }
468 res = 0;
466 469
467 return 0; 470out:
471 kfree(rs_resp);
472 return res;
468} 473}
469 474
470/* Update AEM energy registers */ 475/* Update AEM energy registers */
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 2f94f9504804..90ddb8774210 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -54,6 +54,9 @@
54 * and extended mode. They are mostly compatible with LM90 except for a data 54 * and extended mode. They are mostly compatible with LM90 except for a data
55 * format difference for the temperature value registers. 55 * format difference for the temperature value registers.
56 * 56 *
57 * This driver also supports the SA56004 from Philips. This device is
58 * pin-compatible with the LM86, the ED/EDP parts are also address-compatible.
59 *
57 * Since the LM90 was the first chipset supported by this driver, most 60 * Since the LM90 was the first chipset supported by this driver, most
58 * comments will refer to this chipset, but are actually general and 61 * comments will refer to this chipset, but are actually general and
59 * concern all supported chipsets, unless mentioned otherwise. 62 * concern all supported chipsets, unless mentioned otherwise.
@@ -96,13 +99,15 @@
96 * MAX6659 can have address 0x4c, 0x4d or 0x4e. 99 * MAX6659 can have address 0x4c, 0x4d or 0x4e.
97 * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 100 * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
98 * 0x4c, 0x4d or 0x4e. 101 * 0x4c, 0x4d or 0x4e.
102 * SA56004 can have address 0x48 through 0x4F.
99 */ 103 */
100 104
101static const unsigned short normal_i2c[] = { 105static const unsigned short normal_i2c[] = {
102 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END }; 106 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x48, 0x49, 0x4a, 0x4b, 0x4c,
107 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
103 108
104enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, 109enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
105 max6646, w83l771, max6696 }; 110 max6646, w83l771, max6696, sa56004 };
106 111
107/* 112/*
108 * The LM90 registers 113 * The LM90 registers
@@ -152,6 +157,10 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
152#define MAX6659_REG_R_LOCAL_EMERG 0x17 157#define MAX6659_REG_R_LOCAL_EMERG 0x17
153#define MAX6659_REG_W_LOCAL_EMERG 0x17 158#define MAX6659_REG_W_LOCAL_EMERG 0x17
154 159
160/* SA56004 registers */
161
162#define SA56004_REG_R_LOCAL_TEMPL 0x22
163
155#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */ 164#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */
156#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */ 165#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
157 166
@@ -161,7 +170,6 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
161#define LM90_FLAG_ADT7461_EXT (1 << 0) /* ADT7461 extended mode */ 170#define LM90_FLAG_ADT7461_EXT (1 << 0) /* ADT7461 extended mode */
162/* Device features */ 171/* Device features */
163#define LM90_HAVE_OFFSET (1 << 1) /* temperature offset register */ 172#define LM90_HAVE_OFFSET (1 << 1) /* temperature offset register */
164#define LM90_HAVE_LOCAL_EXT (1 << 2) /* extended local temperature */
165#define LM90_HAVE_REM_LIMIT_EXT (1 << 3) /* extended remote limit */ 173#define LM90_HAVE_REM_LIMIT_EXT (1 << 3) /* extended remote limit */
166#define LM90_HAVE_EMERGENCY (1 << 4) /* 3rd upper (emergency) limit */ 174#define LM90_HAVE_EMERGENCY (1 << 4) /* 3rd upper (emergency) limit */
167#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */ 175#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */
@@ -192,6 +200,7 @@ static const struct i2c_device_id lm90_id[] = {
192 { "max6696", max6696 }, 200 { "max6696", max6696 },
193 { "nct1008", adt7461 }, 201 { "nct1008", adt7461 },
194 { "w83l771", w83l771 }, 202 { "w83l771", w83l771 },
203 { "sa56004", sa56004 },
195 { } 204 { }
196}; 205};
197MODULE_DEVICE_TABLE(i2c, lm90_id); 206MODULE_DEVICE_TABLE(i2c, lm90_id);
@@ -204,6 +213,7 @@ struct lm90_params {
204 u16 alert_alarms; /* Which alarm bits trigger ALERT# */ 213 u16 alert_alarms; /* Which alarm bits trigger ALERT# */
205 /* Upper 8 bits for max6695/96 */ 214 /* Upper 8 bits for max6695/96 */
206 u8 max_convrate; /* Maximum conversion rate register value */ 215 u8 max_convrate; /* Maximum conversion rate register value */
216 u8 reg_local_ext; /* Extended local temp register (optional) */
207}; 217};
208 218
209static const struct lm90_params lm90_params[] = { 219static const struct lm90_params lm90_params[] = {
@@ -235,19 +245,20 @@ static const struct lm90_params lm90_params[] = {
235 .max_convrate = 9, 245 .max_convrate = 9,
236 }, 246 },
237 [max6646] = { 247 [max6646] = {
238 .flags = LM90_HAVE_LOCAL_EXT,
239 .alert_alarms = 0x7c, 248 .alert_alarms = 0x7c,
240 .max_convrate = 6, 249 .max_convrate = 6,
250 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
241 }, 251 },
242 [max6657] = { 252 [max6657] = {
243 .flags = LM90_HAVE_LOCAL_EXT,
244 .alert_alarms = 0x7c, 253 .alert_alarms = 0x7c,
245 .max_convrate = 8, 254 .max_convrate = 8,
255 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
246 }, 256 },
247 [max6659] = { 257 [max6659] = {
248 .flags = LM90_HAVE_LOCAL_EXT | LM90_HAVE_EMERGENCY, 258 .flags = LM90_HAVE_EMERGENCY,
249 .alert_alarms = 0x7c, 259 .alert_alarms = 0x7c,
250 .max_convrate = 8, 260 .max_convrate = 8,
261 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
251 }, 262 },
252 [max6680] = { 263 [max6680] = {
253 .flags = LM90_HAVE_OFFSET, 264 .flags = LM90_HAVE_OFFSET,
@@ -255,16 +266,23 @@ static const struct lm90_params lm90_params[] = {
255 .max_convrate = 7, 266 .max_convrate = 7,
256 }, 267 },
257 [max6696] = { 268 [max6696] = {
258 .flags = LM90_HAVE_LOCAL_EXT | LM90_HAVE_EMERGENCY 269 .flags = LM90_HAVE_EMERGENCY
259 | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3, 270 | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
260 .alert_alarms = 0x187c, 271 .alert_alarms = 0x187c,
261 .max_convrate = 6, 272 .max_convrate = 6,
273 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
262 }, 274 },
263 [w83l771] = { 275 [w83l771] = {
264 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT, 276 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
265 .alert_alarms = 0x7c, 277 .alert_alarms = 0x7c,
266 .max_convrate = 8, 278 .max_convrate = 8,
267 }, 279 },
280 [sa56004] = {
281 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
282 .alert_alarms = 0x7b,
283 .max_convrate = 9,
284 .reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
285 },
268}; 286};
269 287
270/* 288/*
@@ -286,6 +304,7 @@ struct lm90_data {
286 u16 alert_alarms; /* Which alarm bits trigger ALERT# */ 304 u16 alert_alarms; /* Which alarm bits trigger ALERT# */
287 /* Upper 8 bits for max6695/96 */ 305 /* Upper 8 bits for max6695/96 */
288 u8 max_convrate; /* Maximum conversion rate */ 306 u8 max_convrate; /* Maximum conversion rate */
307 u8 reg_local_ext; /* local extension register offset */
289 308
290 /* registers values */ 309 /* registers values */
291 s8 temp8[8]; /* 0: local low limit 310 s8 temp8[8]; /* 0: local low limit
@@ -452,9 +471,9 @@ static struct lm90_data *lm90_update_device(struct device *dev)
452 lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]); 471 lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]);
453 lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst); 472 lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
454 473
455 if (data->flags & LM90_HAVE_LOCAL_EXT) { 474 if (data->reg_local_ext) {
456 lm90_read16(client, LM90_REG_R_LOCAL_TEMP, 475 lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
457 MAX6657_REG_R_LOCAL_TEMPL, 476 data->reg_local_ext,
458 &data->temp11[4]); 477 &data->temp11[4]);
459 } else { 478 } else {
460 if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP, 479 if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
@@ -1092,7 +1111,7 @@ static int lm90_detect(struct i2c_client *new_client,
1092 struct i2c_adapter *adapter = new_client->adapter; 1111 struct i2c_adapter *adapter = new_client->adapter;
1093 int address = new_client->addr; 1112 int address = new_client->addr;
1094 const char *name = NULL; 1113 const char *name = NULL;
1095 int man_id, chip_id, reg_config1, reg_convrate; 1114 int man_id, chip_id, reg_config1, reg_config2, reg_convrate;
1096 1115
1097 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 1116 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
1098 return -ENODEV; 1117 return -ENODEV;
@@ -1108,15 +1127,16 @@ static int lm90_detect(struct i2c_client *new_client,
1108 LM90_REG_R_CONVRATE)) < 0) 1127 LM90_REG_R_CONVRATE)) < 0)
1109 return -ENODEV; 1128 return -ENODEV;
1110 1129
1111 if ((address == 0x4C || address == 0x4D) 1130 if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
1112 && man_id == 0x01) { /* National Semiconductor */
1113 int reg_config2;
1114
1115 reg_config2 = i2c_smbus_read_byte_data(new_client, 1131 reg_config2 = i2c_smbus_read_byte_data(new_client,
1116 LM90_REG_R_CONFIG2); 1132 LM90_REG_R_CONFIG2);
1117 if (reg_config2 < 0) 1133 if (reg_config2 < 0)
1118 return -ENODEV; 1134 return -ENODEV;
1135 } else
1136 reg_config2 = 0; /* Make compiler happy */
1119 1137
1138 if ((address == 0x4C || address == 0x4D)
1139 && man_id == 0x01) { /* National Semiconductor */
1120 if ((reg_config1 & 0x2A) == 0x00 1140 if ((reg_config1 & 0x2A) == 0x00
1121 && (reg_config2 & 0xF8) == 0x00 1141 && (reg_config2 & 0xF8) == 0x00
1122 && reg_convrate <= 0x09) { 1142 && reg_convrate <= 0x09) {
@@ -1245,13 +1265,6 @@ static int lm90_detect(struct i2c_client *new_client,
1245 } else 1265 } else
1246 if (address == 0x4C 1266 if (address == 0x4C
1247 && man_id == 0x5C) { /* Winbond/Nuvoton */ 1267 && man_id == 0x5C) { /* Winbond/Nuvoton */
1248 int reg_config2;
1249
1250 reg_config2 = i2c_smbus_read_byte_data(new_client,
1251 LM90_REG_R_CONFIG2);
1252 if (reg_config2 < 0)
1253 return -ENODEV;
1254
1255 if ((reg_config1 & 0x2A) == 0x00 1268 if ((reg_config1 & 0x2A) == 0x00
1256 && (reg_config2 & 0xF8) == 0x00) { 1269 && (reg_config2 & 0xF8) == 0x00) {
1257 if (chip_id == 0x01 /* W83L771W/G */ 1270 if (chip_id == 0x01 /* W83L771W/G */
@@ -1263,6 +1276,15 @@ static int lm90_detect(struct i2c_client *new_client,
1263 name = "w83l771"; 1276 name = "w83l771";
1264 } 1277 }
1265 } 1278 }
1279 } else
1280 if (address >= 0x48 && address <= 0x4F
1281 && man_id == 0xA1) { /* NXP Semiconductor/Philips */
1282 if (chip_id == 0x00
1283 && (reg_config1 & 0x2A) == 0x00
1284 && (reg_config2 & 0xFE) == 0x00
1285 && reg_convrate <= 0x09) {
1286 name = "sa56004";
1287 }
1266 } 1288 }
1267 1289
1268 if (!name) { /* identification failed */ 1290 if (!name) { /* identification failed */
@@ -1368,6 +1390,7 @@ static int lm90_probe(struct i2c_client *new_client,
1368 1390
1369 /* Set chip capabilities */ 1391 /* Set chip capabilities */
1370 data->flags = lm90_params[data->kind].flags; 1392 data->flags = lm90_params[data->kind].flags;
1393 data->reg_local_ext = lm90_params[data->kind].reg_local_ext;
1371 1394
1372 /* Set maximum conversion rate */ 1395 /* Set maximum conversion rate */
1373 data->max_convrate = lm90_params[data->kind].max_convrate; 1396 data->max_convrate = lm90_params[data->kind].max_convrate;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index d3b464b74ced..513901d592a9 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -74,8 +74,9 @@ static const unsigned short normal_i2c[] = {
74#define TT_OFF 0 74#define TT_OFF 0
75#define TT_ON 1 75#define TT_ON 1
76#define TT_MASK 7 76#define TT_MASK 7
77#define MANUFACTURER_ID 0x01 77#define NATSEMI_MAN_ID 0x01
78#define DEFAULT_REVISION 0xA4 78#define LM95231_CHIP_ID 0xA1
79#define LM95241_CHIP_ID 0xA4
79 80
80static const u8 lm95241_reg_address[] = { 81static const u8 lm95241_reg_address[] = {
81 LM95241_REG_R_LOCAL_TEMPH, 82 LM95241_REG_R_LOCAL_TEMPH,
@@ -338,20 +339,25 @@ static int lm95241_detect(struct i2c_client *new_client,
338 struct i2c_board_info *info) 339 struct i2c_board_info *info)
339{ 340{
340 struct i2c_adapter *adapter = new_client->adapter; 341 struct i2c_adapter *adapter = new_client->adapter;
341 int address = new_client->addr;
342 const char *name; 342 const char *name;
343 int mfg_id, chip_id;
343 344
344 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 345 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
345 return -ENODEV; 346 return -ENODEV;
346 347
347 if ((i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID) 348 mfg_id = i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID);
348 == MANUFACTURER_ID) 349 if (mfg_id != NATSEMI_MAN_ID)
349 && (i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID) 350 return -ENODEV;
350 == DEFAULT_REVISION)) { 351
351 name = DEVNAME; 352 chip_id = i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID);
352 } else { 353 switch (chip_id) {
353 dev_dbg(&adapter->dev, "LM95241 detection failed at 0x%02x\n", 354 case LM95231_CHIP_ID:
354 address); 355 name = "lm95231";
356 break;
357 case LM95241_CHIP_ID:
358 name = "lm95241";
359 break;
360 default:
355 return -ENODEV; 361 return -ENODEV;
356 } 362 }
357 363
@@ -431,7 +437,8 @@ static int lm95241_remove(struct i2c_client *client)
431 437
432/* Driver data (common to all clients) */ 438/* Driver data (common to all clients) */
433static const struct i2c_device_id lm95241_id[] = { 439static const struct i2c_device_id lm95241_id[] = {
434 { DEVNAME, 0 }, 440 { "lm95231", 0 },
441 { "lm95241", 0 },
435 { } 442 { }
436}; 443};
437MODULE_DEVICE_TABLE(i2c, lm95241_id); 444MODULE_DEVICE_TABLE(i2c, lm95241_id);
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
new file mode 100644
index 000000000000..dce9e68241e6
--- /dev/null
+++ b/drivers/hwmon/lm95245.c
@@ -0,0 +1,543 @@
1/*
2 * Copyright (C) 2011 Alexander Stein <alexander.stein@systec-electronic.com>
3 *
4 * The LM95245 is a sensor chip made by National Semiconductors.
5 * It reports up to two temperatures (its own plus an external one).
6 * Complete datasheet can be obtained from National's website at:
7 * http://www.national.com/ds.cgi/LM/LM95245.pdf
8 *
9 * This driver is based on lm95241.c
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/slab.h>
29#include <linux/jiffies.h>
30#include <linux/i2c.h>
31#include <linux/hwmon.h>
32#include <linux/hwmon-sysfs.h>
33#include <linux/err.h>
34#include <linux/mutex.h>
35#include <linux/sysfs.h>
36
37#define DEVNAME "lm95245"
38
39static const unsigned short normal_i2c[] = {
40 0x18, 0x19, 0x29, 0x4c, 0x4d, I2C_CLIENT_END };
41
42/* LM95245 registers */
43/* general registers */
44#define LM95245_REG_RW_CONFIG1 0x03
45#define LM95245_REG_RW_CONVERS_RATE 0x04
46#define LM95245_REG_W_ONE_SHOT 0x0F
47
48/* diode configuration */
49#define LM95245_REG_RW_CONFIG2 0xBF
50#define LM95245_REG_RW_REMOTE_OFFH 0x11
51#define LM95245_REG_RW_REMOTE_OFFL 0x12
52
53/* status registers */
54#define LM95245_REG_R_STATUS1 0x02
55#define LM95245_REG_R_STATUS2 0x33
56
57/* limit registers */
58#define LM95245_REG_RW_REMOTE_OS_LIMIT 0x07
59#define LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT 0x20
60#define LM95245_REG_RW_REMOTE_TCRIT_LIMIT 0x19
61#define LM95245_REG_RW_COMMON_HYSTERESIS 0x21
62
63/* temperature signed */
64#define LM95245_REG_R_LOCAL_TEMPH_S 0x00
65#define LM95245_REG_R_LOCAL_TEMPL_S 0x30
66#define LM95245_REG_R_REMOTE_TEMPH_S 0x01
67#define LM95245_REG_R_REMOTE_TEMPL_S 0x10
68/* temperature unsigned */
69#define LM95245_REG_R_REMOTE_TEMPH_U 0x31
70#define LM95245_REG_R_REMOTE_TEMPL_U 0x32
71
72/* id registers */
73#define LM95245_REG_R_MAN_ID 0xFE
74#define LM95245_REG_R_CHIP_ID 0xFF
75
76/* LM95245 specific bitfields */
77#define CFG_STOP 0x40
78#define CFG_REMOTE_TCRIT_MASK 0x10
79#define CFG_REMOTE_OS_MASK 0x08
80#define CFG_LOCAL_TCRIT_MASK 0x04
81#define CFG_LOCAL_OS_MASK 0x02
82
83#define CFG2_OS_A0 0x40
84#define CFG2_DIODE_FAULT_OS 0x20
85#define CFG2_DIODE_FAULT_TCRIT 0x10
86#define CFG2_REMOTE_TT 0x08
87#define CFG2_REMOTE_FILTER_DIS 0x00
88#define CFG2_REMOTE_FILTER_EN 0x06
89
90/* conversation rate in ms */
91#define RATE_CR0063 0x00
92#define RATE_CR0364 0x01
93#define RATE_CR1000 0x02
94#define RATE_CR2500 0x03
95
96#define STATUS1_DIODE_FAULT 0x04
97#define STATUS1_RTCRIT 0x02
98#define STATUS1_LOC 0x01
99
100#define MANUFACTURER_ID 0x01
101#define DEFAULT_REVISION 0xB3
102
103static const u8 lm95245_reg_address[] = {
104 LM95245_REG_R_LOCAL_TEMPH_S,
105 LM95245_REG_R_LOCAL_TEMPL_S,
106 LM95245_REG_R_REMOTE_TEMPH_S,
107 LM95245_REG_R_REMOTE_TEMPL_S,
108 LM95245_REG_R_REMOTE_TEMPH_U,
109 LM95245_REG_R_REMOTE_TEMPL_U,
110 LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT,
111 LM95245_REG_RW_REMOTE_TCRIT_LIMIT,
112 LM95245_REG_RW_COMMON_HYSTERESIS,
113 LM95245_REG_R_STATUS1,
114};
115
116/* Client data (each client gets its own) */
117struct lm95245_data {
118 struct device *hwmon_dev;
119 struct mutex update_lock;
120 unsigned long last_updated; /* in jiffies */
121 unsigned long interval; /* in msecs */
122 bool valid; /* zero until following fields are valid */
123 /* registers values */
124 u8 regs[ARRAY_SIZE(lm95245_reg_address)];
125 u8 config1, config2;
126};
127
128/* Conversions */
129static int temp_from_reg_unsigned(u8 val_h, u8 val_l)
130{
131 return val_h * 1000 + val_l * 1000 / 256;
132}
133
134static int temp_from_reg_signed(u8 val_h, u8 val_l)
135{
136 if (val_h & 0x80)
137 return (val_h - 0x100) * 1000;
138 return temp_from_reg_unsigned(val_h, val_l);
139}
140
141static struct lm95245_data *lm95245_update_device(struct device *dev)
142{
143 struct i2c_client *client = to_i2c_client(dev);
144 struct lm95245_data *data = i2c_get_clientdata(client);
145
146 mutex_lock(&data->update_lock);
147
148 if (time_after(jiffies, data->last_updated
149 + msecs_to_jiffies(data->interval)) || !data->valid) {
150 int i;
151
152 dev_dbg(&client->dev, "Updating lm95245 data.\n");
153 for (i = 0; i < ARRAY_SIZE(lm95245_reg_address); i++)
154 data->regs[i]
155 = i2c_smbus_read_byte_data(client,
156 lm95245_reg_address[i]);
157 data->last_updated = jiffies;
158 data->valid = 1;
159 }
160
161 mutex_unlock(&data->update_lock);
162
163 return data;
164}
165
166static unsigned long lm95245_read_conversion_rate(struct i2c_client *client)
167{
168 int rate;
169 unsigned long interval;
170
171 rate = i2c_smbus_read_byte_data(client, LM95245_REG_RW_CONVERS_RATE);
172
173 switch (rate) {
174 case RATE_CR0063:
175 interval = 63;
176 break;
177 case RATE_CR0364:
178 interval = 364;
179 break;
180 case RATE_CR1000:
181 interval = 1000;
182 break;
183 case RATE_CR2500:
184 default:
185 interval = 2500;
186 break;
187 }
188
189 return interval;
190}
191
192static unsigned long lm95245_set_conversion_rate(struct i2c_client *client,
193 unsigned long interval)
194{
195 int rate;
196
197 if (interval <= 63) {
198 interval = 63;
199 rate = RATE_CR0063;
200 } else if (interval <= 364) {
201 interval = 364;
202 rate = RATE_CR0364;
203 } else if (interval <= 1000) {
204 interval = 1000;
205 rate = RATE_CR1000;
206 } else {
207 interval = 2500;
208 rate = RATE_CR2500;
209 }
210
211 i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONVERS_RATE, rate);
212
213 return interval;
214}
215
216/* Sysfs stuff */
217static ssize_t show_input(struct device *dev, struct device_attribute *attr,
218 char *buf)
219{
220 struct lm95245_data *data = lm95245_update_device(dev);
221 int temp;
222 int index = to_sensor_dev_attr(attr)->index;
223
224 /*
225 * Index 0 (Local temp) is always signed
226 * Index 2 (Remote temp) has both signed and unsigned data
227 * use signed calculation for remote if signed bit is set
228 */
229 if (index == 0 || data->regs[index] & 0x80)
230 temp = temp_from_reg_signed(data->regs[index],
231 data->regs[index + 1]);
232 else
233 temp = temp_from_reg_unsigned(data->regs[index + 2],
234 data->regs[index + 3]);
235
236 return snprintf(buf, PAGE_SIZE - 1, "%d\n", temp);
237}
238
239static ssize_t show_limit(struct device *dev, struct device_attribute *attr,
240 char *buf)
241{
242 struct lm95245_data *data = lm95245_update_device(dev);
243 int index = to_sensor_dev_attr(attr)->index;
244
245 return snprintf(buf, PAGE_SIZE - 1, "%d\n",
246 data->regs[index] * 1000);
247}
248
249static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
250 const char *buf, size_t count)
251{
252 struct i2c_client *client = to_i2c_client(dev);
253 struct lm95245_data *data = i2c_get_clientdata(client);
254 int index = to_sensor_dev_attr(attr)->index;
255 unsigned long val;
256
257 if (strict_strtoul(buf, 10, &val) < 0)
258 return -EINVAL;
259
260 val /= 1000;
261
262 val = SENSORS_LIMIT(val, 0, (index == 6 ? 127 : 255));
263
264 mutex_lock(&data->update_lock);
265
266 data->valid = 0;
267
268 i2c_smbus_write_byte_data(client, lm95245_reg_address[index], val);
269
270 mutex_unlock(&data->update_lock);
271
272 return count;
273}
274
275static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
276 const char *buf, size_t count)
277{
278 struct i2c_client *client = to_i2c_client(dev);
279 struct lm95245_data *data = i2c_get_clientdata(client);
280 unsigned long val;
281
282 if (strict_strtoul(buf, 10, &val) < 0)
283 return -EINVAL;
284
285 val /= 1000;
286
287 val = SENSORS_LIMIT(val, 0, 31);
288
289 mutex_lock(&data->update_lock);
290
291 data->valid = 0;
292
293 /* shared crit hysteresis */
294 i2c_smbus_write_byte_data(client, LM95245_REG_RW_COMMON_HYSTERESIS,
295 val);
296
297 mutex_unlock(&data->update_lock);
298
299 return count;
300}
301
302static ssize_t show_type(struct device *dev, struct device_attribute *attr,
303 char *buf)
304{
305 struct i2c_client *client = to_i2c_client(dev);
306 struct lm95245_data *data = i2c_get_clientdata(client);
307
308 return snprintf(buf, PAGE_SIZE - 1,
309 data->config2 & CFG2_REMOTE_TT ? "1\n" : "2\n");
310}
311
312static ssize_t set_type(struct device *dev, struct device_attribute *attr,
313 const char *buf, size_t count)
314{
315 struct i2c_client *client = to_i2c_client(dev);
316 struct lm95245_data *data = i2c_get_clientdata(client);
317 unsigned long val;
318
319 if (strict_strtoul(buf, 10, &val) < 0)
320 return -EINVAL;
321 if (val != 1 && val != 2)
322 return -EINVAL;
323
324 mutex_lock(&data->update_lock);
325
326 if (val == 1)
327 data->config2 |= CFG2_REMOTE_TT;
328 else
329 data->config2 &= ~CFG2_REMOTE_TT;
330
331 data->valid = 0;
332
333 i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONFIG2,
334 data->config2);
335
336 mutex_unlock(&data->update_lock);
337
338 return count;
339}
340
341static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
342 char *buf)
343{
344 struct lm95245_data *data = lm95245_update_device(dev);
345 int index = to_sensor_dev_attr(attr)->index;
346
347 return snprintf(buf, PAGE_SIZE - 1, "%d\n",
348 !!(data->regs[9] & index));
349}
350
351static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
352 char *buf)
353{
354 struct lm95245_data *data = lm95245_update_device(dev);
355
356 return snprintf(buf, PAGE_SIZE - 1, "%lu\n", data->interval);
357}
358
359static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
360 const char *buf, size_t count)
361{
362 struct i2c_client *client = to_i2c_client(dev);
363 struct lm95245_data *data = i2c_get_clientdata(client);
364 unsigned long val;
365
366 if (strict_strtoul(buf, 10, &val) < 0)
367 return -EINVAL;
368
369 mutex_lock(&data->update_lock);
370
371 data->interval = lm95245_set_conversion_rate(client, val);
372
373 mutex_unlock(&data->update_lock);
374
375 return count;
376}
377
378static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 0);
379static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_limit,
380 set_limit, 6);
381static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
382 set_crit_hyst, 8);
383static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL,
384 STATUS1_LOC);
385
386static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
387static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_limit,
388 set_limit, 7);
389static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
390 set_crit_hyst, 8);
391static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL,
392 STATUS1_RTCRIT);
393static SENSOR_DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type,
394 set_type, 0);
395static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL,
396 STATUS1_DIODE_FAULT);
397
398static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
399 set_interval);
400
401static struct attribute *lm95245_attributes[] = {
402 &sensor_dev_attr_temp1_input.dev_attr.attr,
403 &sensor_dev_attr_temp1_crit.dev_attr.attr,
404 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
405 &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
406 &sensor_dev_attr_temp2_input.dev_attr.attr,
407 &sensor_dev_attr_temp2_crit.dev_attr.attr,
408 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
409 &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
410 &sensor_dev_attr_temp2_type.dev_attr.attr,
411 &sensor_dev_attr_temp2_fault.dev_attr.attr,
412 &dev_attr_update_interval.attr,
413 NULL
414};
415
416static const struct attribute_group lm95245_group = {
417 .attrs = lm95245_attributes,
418};
419
420/* Return 0 if detection is successful, -ENODEV otherwise */
421static int lm95245_detect(struct i2c_client *new_client,
422 struct i2c_board_info *info)
423{
424 struct i2c_adapter *adapter = new_client->adapter;
425
426 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
427 return -ENODEV;
428
429 if (i2c_smbus_read_byte_data(new_client, LM95245_REG_R_MAN_ID)
430 != MANUFACTURER_ID
431 || i2c_smbus_read_byte_data(new_client, LM95245_REG_R_CHIP_ID)
432 != DEFAULT_REVISION)
433 return -ENODEV;
434
435 strlcpy(info->type, DEVNAME, I2C_NAME_SIZE);
436 return 0;
437}
438
439static void lm95245_init_client(struct i2c_client *client)
440{
441 struct lm95245_data *data = i2c_get_clientdata(client);
442
443 data->valid = 0;
444 data->interval = lm95245_read_conversion_rate(client);
445
446 data->config1 = i2c_smbus_read_byte_data(client,
447 LM95245_REG_RW_CONFIG1);
448 data->config2 = i2c_smbus_read_byte_data(client,
449 LM95245_REG_RW_CONFIG2);
450
451 if (data->config1 & CFG_STOP) {
452 /* Clear the standby bit */
453 data->config1 &= ~CFG_STOP;
454 i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONFIG1,
455 data->config1);
456 }
457}
458
459static int lm95245_probe(struct i2c_client *new_client,
460 const struct i2c_device_id *id)
461{
462 struct lm95245_data *data;
463 int err;
464
465 data = kzalloc(sizeof(struct lm95245_data), GFP_KERNEL);
466 if (!data) {
467 err = -ENOMEM;
468 goto exit;
469 }
470
471 i2c_set_clientdata(new_client, data);
472 mutex_init(&data->update_lock);
473
474 /* Initialize the LM95245 chip */
475 lm95245_init_client(new_client);
476
477 /* Register sysfs hooks */
478 err = sysfs_create_group(&new_client->dev.kobj, &lm95245_group);
479 if (err)
480 goto exit_free;
481
482 data->hwmon_dev = hwmon_device_register(&new_client->dev);
483 if (IS_ERR(data->hwmon_dev)) {
484 err = PTR_ERR(data->hwmon_dev);
485 goto exit_remove_files;
486 }
487
488 return 0;
489
490exit_remove_files:
491 sysfs_remove_group(&new_client->dev.kobj, &lm95245_group);
492exit_free:
493 kfree(data);
494exit:
495 return err;
496}
497
498static int lm95245_remove(struct i2c_client *client)
499{
500 struct lm95245_data *data = i2c_get_clientdata(client);
501
502 hwmon_device_unregister(data->hwmon_dev);
503 sysfs_remove_group(&client->dev.kobj, &lm95245_group);
504
505 kfree(data);
506 return 0;
507}
508
509/* Driver data (common to all clients) */
510static const struct i2c_device_id lm95245_id[] = {
511 { DEVNAME, 0 },
512 { }
513};
514MODULE_DEVICE_TABLE(i2c, lm95245_id);
515
516static struct i2c_driver lm95245_driver = {
517 .class = I2C_CLASS_HWMON,
518 .driver = {
519 .name = DEVNAME,
520 },
521 .probe = lm95245_probe,
522 .remove = lm95245_remove,
523 .id_table = lm95245_id,
524 .detect = lm95245_detect,
525 .address_list = normal_i2c,
526};
527
528static int __init sensors_lm95245_init(void)
529{
530 return i2c_add_driver(&lm95245_driver);
531}
532
533static void __exit sensors_lm95245_exit(void)
534{
535 i2c_del_driver(&lm95245_driver);
536}
537
538MODULE_AUTHOR("Alexander Stein <alexander.stein@systec-electronic.com>");
539MODULE_DESCRIPTION("LM95245 sensor driver");
540MODULE_LICENSE("GPL");
541
542module_init(sensors_lm95245_init);
543module_exit(sensors_lm95245_exit);
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index d94a24fdf4ba..dd2d7b9620c2 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
124 124
125static inline int ADC_TO_CURR(int adc, int gain) 125static inline int ADC_TO_CURR(int adc, int gain)
126{ 126{
127 return adc * 1400000 / gain * 255; 127 return adc * 1400000 / (gain * 255);
128} 128}
129 129
130/* 130/*
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
new file mode 100644
index 000000000000..20d1b2ddffb6
--- /dev/null
+++ b/drivers/hwmon/max1668.c
@@ -0,0 +1,502 @@
1/*
2 Copyright (c) 2011 David George <david.george@ska.ac.za>
3
4 based on adm1021.c
5 some credit to Christoph Scheurer, but largely a rewrite
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/slab.h>
25#include <linux/jiffies.h>
26#include <linux/i2c.h>
27#include <linux/hwmon.h>
28#include <linux/hwmon-sysfs.h>
29#include <linux/err.h>
30#include <linux/mutex.h>
31
32/* Addresses to scan */
33static unsigned short max1668_addr_list[] = {
34 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
35
36/* max1668 registers */
37
38#define MAX1668_REG_TEMP(nr) (nr)
39#define MAX1668_REG_STAT1 0x05
40#define MAX1668_REG_STAT2 0x06
41#define MAX1668_REG_MAN_ID 0xfe
42#define MAX1668_REG_DEV_ID 0xff
43
44/* limits */
45
46/* write high limits */
47#define MAX1668_REG_LIMH_WR(nr) (0x13 + 2 * (nr))
48/* write low limits */
49#define MAX1668_REG_LIML_WR(nr) (0x14 + 2 * (nr))
50/* read high limits */
51#define MAX1668_REG_LIMH_RD(nr) (0x08 + 2 * (nr))
52/* read low limits */
53#define MAX1668_REG_LIML_RD(nr) (0x09 + 2 * (nr))
54
55/* manufacturer and device ID Constants */
56#define MAN_ID_MAXIM 0x4d
57#define DEV_ID_MAX1668 0x3
58#define DEV_ID_MAX1805 0x5
59#define DEV_ID_MAX1989 0xb
60
61/* read only mode module parameter */
62static int read_only;
63module_param(read_only, bool, 0);
64MODULE_PARM_DESC(read_only, "Don't set any values, read only mode");
65
66enum chips { max1668, max1805, max1989 };
67
68struct max1668_data {
69 struct device *hwmon_dev;
70 enum chips type;
71
72 struct mutex update_lock;
73 char valid; /* !=0 if following fields are valid */
74 unsigned long last_updated; /* In jiffies */
75
76 /* 1x local and 4x remote */
77 s8 temp_max[5];
78 s8 temp_min[5];
79 s8 temp[5];
80 u16 alarms;
81};
82
83static struct max1668_data *max1668_update_device(struct device *dev)
84{
85 struct i2c_client *client = to_i2c_client(dev);
86 struct max1668_data *data = i2c_get_clientdata(client);
87 struct max1668_data *ret = data;
88 s32 val;
89 int i;
90
91 mutex_lock(&data->update_lock);
92
93 if (data->valid && !time_after(jiffies,
94 data->last_updated + HZ + HZ / 2))
95 goto abort;
96
97 for (i = 0; i < 5; i++) {
98 val = i2c_smbus_read_byte_data(client, MAX1668_REG_TEMP(i));
99 if (unlikely(val < 0)) {
100 ret = ERR_PTR(val);
101 goto abort;
102 }
103 data->temp[i] = (s8) val;
104
105 val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIMH_RD(i));
106 if (unlikely(val < 0)) {
107 ret = ERR_PTR(val);
108 goto abort;
109 }
110 data->temp_max[i] = (s8) val;
111
112 val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIML_RD(i));
113 if (unlikely(val < 0)) {
114 ret = ERR_PTR(val);
115 goto abort;
116 }
117 data->temp_min[i] = (s8) val;
118 }
119
120 val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT1);
121 if (unlikely(val < 0)) {
122 ret = ERR_PTR(val);
123 goto abort;
124 }
125 data->alarms = val << 8;
126
127 val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT2);
128 if (unlikely(val < 0)) {
129 ret = ERR_PTR(val);
130 goto abort;
131 }
132 data->alarms |= val;
133
134 data->last_updated = jiffies;
135 data->valid = 1;
136abort:
137 mutex_unlock(&data->update_lock);
138
139 return ret;
140}
141
142static ssize_t show_temp(struct device *dev,
143 struct device_attribute *devattr, char *buf)
144{
145 int index = to_sensor_dev_attr(devattr)->index;
146 struct max1668_data *data = max1668_update_device(dev);
147
148 if (IS_ERR(data))
149 return PTR_ERR(data);
150
151 return sprintf(buf, "%d\n", data->temp[index] * 1000);
152}
153
154static ssize_t show_temp_max(struct device *dev,
155 struct device_attribute *devattr, char *buf)
156{
157 int index = to_sensor_dev_attr(devattr)->index;
158 struct max1668_data *data = max1668_update_device(dev);
159
160 if (IS_ERR(data))
161 return PTR_ERR(data);
162
163 return sprintf(buf, "%d\n", data->temp_max[index] * 1000);
164}
165
166static ssize_t show_temp_min(struct device *dev,
167 struct device_attribute *devattr, char *buf)
168{
169 int index = to_sensor_dev_attr(devattr)->index;
170 struct max1668_data *data = max1668_update_device(dev);
171
172 if (IS_ERR(data))
173 return PTR_ERR(data);
174
175 return sprintf(buf, "%d\n", data->temp_min[index] * 1000);
176}
177
178static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
179 char *buf)
180{
181 int index = to_sensor_dev_attr(attr)->index;
182 struct max1668_data *data = max1668_update_device(dev);
183
184 if (IS_ERR(data))
185 return PTR_ERR(data);
186
187 return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1);
188}
189
190static ssize_t show_fault(struct device *dev,
191 struct device_attribute *devattr, char *buf)
192{
193 int index = to_sensor_dev_attr(devattr)->index;
194 struct max1668_data *data = max1668_update_device(dev);
195
196 if (IS_ERR(data))
197 return PTR_ERR(data);
198
199 return sprintf(buf, "%u\n",
200 (data->alarms & (1 << 12)) && data->temp[index] == 127);
201}
202
203static ssize_t set_temp_max(struct device *dev,
204 struct device_attribute *devattr,
205 const char *buf, size_t count)
206{
207 int index = to_sensor_dev_attr(devattr)->index;
208 struct i2c_client *client = to_i2c_client(dev);
209 struct max1668_data *data = i2c_get_clientdata(client);
210 long temp;
211 int ret;
212
213 ret = kstrtol(buf, 10, &temp);
214 if (ret < 0)
215 return ret;
216
217 mutex_lock(&data->update_lock);
218 data->temp_max[index] = SENSORS_LIMIT(temp/1000, -128, 127);
219 if (i2c_smbus_write_byte_data(client,
220 MAX1668_REG_LIMH_WR(index),
221 data->temp_max[index]))
222 count = -EIO;
223 mutex_unlock(&data->update_lock);
224
225 return count;
226}
227
228static ssize_t set_temp_min(struct device *dev,
229 struct device_attribute *devattr,
230 const char *buf, size_t count)
231{
232 int index = to_sensor_dev_attr(devattr)->index;
233 struct i2c_client *client = to_i2c_client(dev);
234 struct max1668_data *data = i2c_get_clientdata(client);
235 long temp;
236 int ret;
237
238 ret = kstrtol(buf, 10, &temp);
239 if (ret < 0)
240 return ret;
241
242 mutex_lock(&data->update_lock);
243 data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127);
244 if (i2c_smbus_write_byte_data(client,
245 MAX1668_REG_LIML_WR(index),
246 data->temp_max[index]))
247 count = -EIO;
248 mutex_unlock(&data->update_lock);
249
250 return count;
251}
252
253static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
254static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max,
255 set_temp_max, 0);
256static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min,
257 set_temp_min, 0);
258static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
259static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max,
260 set_temp_max, 1);
261static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min,
262 set_temp_min, 1);
263static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
264static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max,
265 set_temp_max, 2);
266static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min,
267 set_temp_min, 2);
268static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
269static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max,
270 set_temp_max, 3);
271static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min,
272 set_temp_min, 3);
273static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4);
274static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max,
275 set_temp_max, 4);
276static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min,
277 set_temp_min, 4);
278
279static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 14);
280static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 13);
281static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 7);
282static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 6);
283static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 5);
284static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4);
285static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_alarm, NULL, 3);
286static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 2);
287static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_alarm, NULL, 1);
288static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 0);
289
290static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_fault, NULL, 1);
291static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_fault, NULL, 2);
292static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_fault, NULL, 3);
293static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_fault, NULL, 4);
294
295/* Attributes common to MAX1668, MAX1989 and MAX1805 */
296static struct attribute *max1668_attribute_common[] = {
297 &sensor_dev_attr_temp1_max.dev_attr.attr,
298 &sensor_dev_attr_temp1_min.dev_attr.attr,
299 &sensor_dev_attr_temp1_input.dev_attr.attr,
300 &sensor_dev_attr_temp2_max.dev_attr.attr,
301 &sensor_dev_attr_temp2_min.dev_attr.attr,
302 &sensor_dev_attr_temp2_input.dev_attr.attr,
303 &sensor_dev_attr_temp3_max.dev_attr.attr,
304 &sensor_dev_attr_temp3_min.dev_attr.attr,
305 &sensor_dev_attr_temp3_input.dev_attr.attr,
306
307 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
308 &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
309 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
310 &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
311 &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
312 &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
313
314 &sensor_dev_attr_temp2_fault.dev_attr.attr,
315 &sensor_dev_attr_temp3_fault.dev_attr.attr,
316 NULL
317};
318
319/* Attributes not present on MAX1805 */
320static struct attribute *max1668_attribute_unique[] = {
321 &sensor_dev_attr_temp4_max.dev_attr.attr,
322 &sensor_dev_attr_temp4_min.dev_attr.attr,
323 &sensor_dev_attr_temp4_input.dev_attr.attr,
324 &sensor_dev_attr_temp5_max.dev_attr.attr,
325 &sensor_dev_attr_temp5_min.dev_attr.attr,
326 &sensor_dev_attr_temp5_input.dev_attr.attr,
327
328 &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
329 &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
330 &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
331 &sensor_dev_attr_temp5_min_alarm.dev_attr.attr,
332
333 &sensor_dev_attr_temp4_fault.dev_attr.attr,
334 &sensor_dev_attr_temp5_fault.dev_attr.attr,
335 NULL
336};
337
338static mode_t max1668_attribute_mode(struct kobject *kobj,
339 struct attribute *attr, int index)
340{
341 int ret = S_IRUGO;
342 if (read_only)
343 return ret;
344 if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr ||
345 attr == &sensor_dev_attr_temp2_max.dev_attr.attr ||
346 attr == &sensor_dev_attr_temp3_max.dev_attr.attr ||
347 attr == &sensor_dev_attr_temp4_max.dev_attr.attr ||
348 attr == &sensor_dev_attr_temp5_max.dev_attr.attr ||
349 attr == &sensor_dev_attr_temp1_min.dev_attr.attr ||
350 attr == &sensor_dev_attr_temp2_min.dev_attr.attr ||
351 attr == &sensor_dev_attr_temp3_min.dev_attr.attr ||
352 attr == &sensor_dev_attr_temp4_min.dev_attr.attr ||
353 attr == &sensor_dev_attr_temp5_min.dev_attr.attr)
354 ret |= S_IWUSR;
355 return ret;
356}
357
358static const struct attribute_group max1668_group_common = {
359 .attrs = max1668_attribute_common,
360 .is_visible = max1668_attribute_mode
361};
362
363static const struct attribute_group max1668_group_unique = {
364 .attrs = max1668_attribute_unique,
365 .is_visible = max1668_attribute_mode
366};
367
368/* Return 0 if detection is successful, -ENODEV otherwise */
369static int max1668_detect(struct i2c_client *client,
370 struct i2c_board_info *info)
371{
372 struct i2c_adapter *adapter = client->adapter;
373 const char *type_name;
374 int man_id, dev_id;
375
376 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
377 return -ENODEV;
378
379 /* Check for unsupported part */
380 man_id = i2c_smbus_read_byte_data(client, MAX1668_REG_MAN_ID);
381 if (man_id != MAN_ID_MAXIM)
382 return -ENODEV;
383
384 dev_id = i2c_smbus_read_byte_data(client, MAX1668_REG_DEV_ID);
385 if (dev_id < 0)
386 return -ENODEV;
387
388 type_name = NULL;
389 if (dev_id == DEV_ID_MAX1668)
390 type_name = "max1668";
391 else if (dev_id == DEV_ID_MAX1805)
392 type_name = "max1805";
393 else if (dev_id == DEV_ID_MAX1989)
394 type_name = "max1989";
395
396 if (!type_name)
397 return -ENODEV;
398
399 strlcpy(info->type, type_name, I2C_NAME_SIZE);
400
401 return 0;
402}
403
404static int max1668_probe(struct i2c_client *client,
405 const struct i2c_device_id *id)
406{
407 struct i2c_adapter *adapter = client->adapter;
408 struct max1668_data *data;
409 int err;
410
411 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
412 return -ENODEV;
413
414 data = kzalloc(sizeof(struct max1668_data), GFP_KERNEL);
415 if (!data)
416 return -ENOMEM;
417
418 i2c_set_clientdata(client, data);
419 data->type = id->driver_data;
420 mutex_init(&data->update_lock);
421
422 /* Register sysfs hooks */
423 err = sysfs_create_group(&client->dev.kobj, &max1668_group_common);
424 if (err)
425 goto error_free;
426
427 if (data->type == max1668 || data->type == max1989) {
428 err = sysfs_create_group(&client->dev.kobj,
429 &max1668_group_unique);
430 if (err)
431 goto error_sysrem0;
432 }
433
434 data->hwmon_dev = hwmon_device_register(&client->dev);
435 if (IS_ERR(data->hwmon_dev)) {
436 err = PTR_ERR(data->hwmon_dev);
437 goto error_sysrem1;
438 }
439
440 return 0;
441
442error_sysrem1:
443 if (data->type == max1668 || data->type == max1989)
444 sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
445error_sysrem0:
446 sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
447error_free:
448 kfree(data);
449 return err;
450}
451
452static int max1668_remove(struct i2c_client *client)
453{
454 struct max1668_data *data = i2c_get_clientdata(client);
455
456 hwmon_device_unregister(data->hwmon_dev);
457 if (data->type == max1668 || data->type == max1989)
458 sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
459
460 sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
461
462 kfree(data);
463 return 0;
464}
465
466static const struct i2c_device_id max1668_id[] = {
467 { "max1668", max1668 },
468 { "max1805", max1805 },
469 { "max1989", max1989 },
470 { }
471};
472MODULE_DEVICE_TABLE(i2c, max1668_id);
473
474/* This is the driver that will be inserted */
475static struct i2c_driver max1668_driver = {
476 .class = I2C_CLASS_HWMON,
477 .driver = {
478 .name = "max1668",
479 },
480 .probe = max1668_probe,
481 .remove = max1668_remove,
482 .id_table = max1668_id,
483 .detect = max1668_detect,
484 .address_list = max1668_addr_list,
485};
486
487static int __init sensors_max1668_init(void)
488{
489 return i2c_add_driver(&max1668_driver);
490}
491
492static void __exit sensors_max1668_exit(void)
493{
494 i2c_del_driver(&max1668_driver);
495}
496
497MODULE_AUTHOR("David George <david.george@ska.ac.za>");
498MODULE_DESCRIPTION("MAX1668 remote temperature sensor driver");
499MODULE_LICENSE("GPL");
500
501module_init(sensors_max1668_init)
502module_exit(sensors_max1668_exit)
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
new file mode 100644
index 000000000000..eab11615dced
--- /dev/null
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -0,0 +1,452 @@
1/*
2 * ntc_thermistor.c - NTC Thermistors
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/pm_runtime.h>
26#include <linux/math64.h>
27#include <linux/platform_device.h>
28#include <linux/err.h>
29
30#include <linux/platform_data/ntc_thermistor.h>
31
32#include <linux/hwmon.h>
33#include <linux/hwmon-sysfs.h>
34
35struct ntc_compensation {
36 int temp_C;
37 unsigned int ohm;
38};
39
40/*
41 * A compensation table should be sorted by the values of .ohm
42 * in descending order.
43 * The following compensation tables are from the specification of Murata NTC
44 * Thermistors Datasheet
45 */
46const struct ntc_compensation ncpXXwb473[] = {
47 { .temp_C = -40, .ohm = 1747920 },
48 { .temp_C = -35, .ohm = 1245428 },
49 { .temp_C = -30, .ohm = 898485 },
50 { .temp_C = -25, .ohm = 655802 },
51 { .temp_C = -20, .ohm = 483954 },
52 { .temp_C = -15, .ohm = 360850 },
53 { .temp_C = -10, .ohm = 271697 },
54 { .temp_C = -5, .ohm = 206463 },
55 { .temp_C = 0, .ohm = 158214 },
56 { .temp_C = 5, .ohm = 122259 },
57 { .temp_C = 10, .ohm = 95227 },
58 { .temp_C = 15, .ohm = 74730 },
59 { .temp_C = 20, .ohm = 59065 },
60 { .temp_C = 25, .ohm = 47000 },
61 { .temp_C = 30, .ohm = 37643 },
62 { .temp_C = 35, .ohm = 30334 },
63 { .temp_C = 40, .ohm = 24591 },
64 { .temp_C = 45, .ohm = 20048 },
65 { .temp_C = 50, .ohm = 16433 },
66 { .temp_C = 55, .ohm = 13539 },
67 { .temp_C = 60, .ohm = 11209 },
68 { .temp_C = 65, .ohm = 9328 },
69 { .temp_C = 70, .ohm = 7798 },
70 { .temp_C = 75, .ohm = 6544 },
71 { .temp_C = 80, .ohm = 5518 },
72 { .temp_C = 85, .ohm = 4674 },
73 { .temp_C = 90, .ohm = 3972 },
74 { .temp_C = 95, .ohm = 3388 },
75 { .temp_C = 100, .ohm = 2902 },
76 { .temp_C = 105, .ohm = 2494 },
77 { .temp_C = 110, .ohm = 2150 },
78 { .temp_C = 115, .ohm = 1860 },
79 { .temp_C = 120, .ohm = 1615 },
80 { .temp_C = 125, .ohm = 1406 },
81};
82const struct ntc_compensation ncpXXwl333[] = {
83 { .temp_C = -40, .ohm = 1610154 },
84 { .temp_C = -35, .ohm = 1130850 },
85 { .temp_C = -30, .ohm = 802609 },
86 { .temp_C = -25, .ohm = 575385 },
87 { .temp_C = -20, .ohm = 416464 },
88 { .temp_C = -15, .ohm = 304219 },
89 { .temp_C = -10, .ohm = 224193 },
90 { .temp_C = -5, .ohm = 166623 },
91 { .temp_C = 0, .ohm = 124850 },
92 { .temp_C = 5, .ohm = 94287 },
93 { .temp_C = 10, .ohm = 71747 },
94 { .temp_C = 15, .ohm = 54996 },
95 { .temp_C = 20, .ohm = 42455 },
96 { .temp_C = 25, .ohm = 33000 },
97 { .temp_C = 30, .ohm = 25822 },
98 { .temp_C = 35, .ohm = 20335 },
99 { .temp_C = 40, .ohm = 16115 },
100 { .temp_C = 45, .ohm = 12849 },
101 { .temp_C = 50, .ohm = 10306 },
102 { .temp_C = 55, .ohm = 8314 },
103 { .temp_C = 60, .ohm = 6746 },
104 { .temp_C = 65, .ohm = 5503 },
105 { .temp_C = 70, .ohm = 4513 },
106 { .temp_C = 75, .ohm = 3721 },
107 { .temp_C = 80, .ohm = 3084 },
108 { .temp_C = 85, .ohm = 2569 },
109 { .temp_C = 90, .ohm = 2151 },
110 { .temp_C = 95, .ohm = 1809 },
111 { .temp_C = 100, .ohm = 1529 },
112 { .temp_C = 105, .ohm = 1299 },
113 { .temp_C = 110, .ohm = 1108 },
114 { .temp_C = 115, .ohm = 949 },
115 { .temp_C = 120, .ohm = 817 },
116 { .temp_C = 125, .ohm = 707 },
117};
118
119struct ntc_data {
120 struct device *hwmon_dev;
121 struct ntc_thermistor_platform_data *pdata;
122 const struct ntc_compensation *comp;
123 struct device *dev;
124 int n_comp;
125 char name[PLATFORM_NAME_SIZE];
126};
127
128static inline u64 div64_u64_safe(u64 dividend, u64 divisor)
129{
130 if (divisor == 0 && dividend == 0)
131 return 0;
132 if (divisor == 0)
133 return UINT_MAX;
134 return div64_u64(dividend, divisor);
135}
136
137static unsigned int get_ohm_of_thermistor(struct ntc_data *data,
138 unsigned int uV)
139{
140 struct ntc_thermistor_platform_data *pdata = data->pdata;
141 u64 mV = uV / 1000;
142 u64 pmV = pdata->pullup_uV / 1000;
143 u64 N, puO, pdO;
144 puO = pdata->pullup_ohm;
145 pdO = pdata->pulldown_ohm;
146
147 if (mV == 0) {
148 if (pdata->connect == NTC_CONNECTED_POSITIVE)
149 return UINT_MAX;
150 return 0;
151 }
152 if (mV >= pmV)
153 return (pdata->connect == NTC_CONNECTED_POSITIVE) ?
154 0 : UINT_MAX;
155
156 if (pdata->connect == NTC_CONNECTED_POSITIVE && puO == 0)
157 N = div64_u64_safe(pdO * (pmV - mV), mV);
158 else if (pdata->connect == NTC_CONNECTED_GROUND && pdO == 0)
159 N = div64_u64_safe(puO * mV, pmV - mV);
160 else if (pdata->connect == NTC_CONNECTED_POSITIVE)
161 N = div64_u64_safe(pdO * puO * (pmV - mV),
162 puO * mV - pdO * (pmV - mV));
163 else
164 N = div64_u64_safe(pdO * puO * mV, pdO * (pmV - mV) - puO * mV);
165
166 return (unsigned int) N;
167}
168
169static int lookup_comp(struct ntc_data *data,
170 unsigned int ohm, int *i_low, int *i_high)
171{
172 int start, end, mid = -1;
173
174 /* Do a binary search on compensation table */
175 start = 0;
176 end = data->n_comp;
177
178 while (end > start) {
179 mid = start + (end - start) / 2;
180 if (data->comp[mid].ohm < ohm)
181 end = mid;
182 else if (data->comp[mid].ohm > ohm)
183 start = mid + 1;
184 else
185 break;
186 }
187
188 if (mid == 0) {
189 if (data->comp[mid].ohm > ohm) {
190 *i_high = mid;
191 *i_low = mid + 1;
192 return 0;
193 } else {
194 *i_low = mid;
195 *i_high = -1;
196 return -EINVAL;
197 }
198 }
199 if (mid == (data->n_comp - 1)) {
200 if (data->comp[mid].ohm <= ohm) {
201 *i_low = mid;
202 *i_high = mid - 1;
203 return 0;
204 } else {
205 *i_low = -1;
206 *i_high = mid;
207 return -EINVAL;
208 }
209 }
210
211 if (data->comp[mid].ohm <= ohm) {
212 *i_low = mid;
213 *i_high = mid - 1;
214 } else {
215 *i_low = mid + 1;
216 *i_high = mid;
217 }
218
219 return 0;
220}
221
222static int get_temp_mC(struct ntc_data *data, unsigned int ohm, int *temp)
223{
224 int low, high;
225 int ret;
226
227 ret = lookup_comp(data, ohm, &low, &high);
228 if (ret) {
229 /* Unable to use linear approximation */
230 if (low != -1)
231 *temp = data->comp[low].temp_C * 1000;
232 else if (high != -1)
233 *temp = data->comp[high].temp_C * 1000;
234 else
235 return ret;
236 } else {
237 *temp = data->comp[low].temp_C * 1000 +
238 ((data->comp[high].temp_C - data->comp[low].temp_C) *
239 1000 * ((int)ohm - (int)data->comp[low].ohm)) /
240 ((int)data->comp[high].ohm - (int)data->comp[low].ohm);
241 }
242
243 return 0;
244}
245
246static int ntc_thermistor_read(struct ntc_data *data, int *temp)
247{
248 int ret;
249 int read_ohm, read_uV;
250 unsigned int ohm = 0;
251
252 if (data->pdata->read_ohm) {
253 read_ohm = data->pdata->read_ohm();
254 if (read_ohm < 0)
255 return read_ohm;
256 ohm = (unsigned int)read_ohm;
257 }
258
259 if (data->pdata->read_uV) {
260 read_uV = data->pdata->read_uV();
261 if (read_uV < 0)
262 return read_uV;
263 ohm = get_ohm_of_thermistor(data, (unsigned int)read_uV);
264 }
265
266 ret = get_temp_mC(data, ohm, temp);
267 if (ret) {
268 dev_dbg(data->dev, "Sensor reading function not available.\n");
269 return ret;
270 }
271
272 return 0;
273}
274
275static ssize_t ntc_show_name(struct device *dev,
276 struct device_attribute *attr, char *buf)
277{
278 struct ntc_data *data = dev_get_drvdata(dev);
279
280 return sprintf(buf, "%s\n", data->name);
281}
282
283static ssize_t ntc_show_type(struct device *dev,
284 struct device_attribute *attr, char *buf)
285{
286 return sprintf(buf, "4\n");
287}
288
289static ssize_t ntc_show_temp(struct device *dev,
290 struct device_attribute *attr, char *buf)
291{
292 struct ntc_data *data = dev_get_drvdata(dev);
293 int temp, ret;
294
295 ret = ntc_thermistor_read(data, &temp);
296 if (ret)
297 return ret;
298 return sprintf(buf, "%d\n", temp);
299}
300
301static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, ntc_show_type, NULL, 0);
302static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ntc_show_temp, NULL, 0);
303static DEVICE_ATTR(name, S_IRUGO, ntc_show_name, NULL);
304
305static struct attribute *ntc_attributes[] = {
306 &dev_attr_name.attr,
307 &sensor_dev_attr_temp1_type.dev_attr.attr,
308 &sensor_dev_attr_temp1_input.dev_attr.attr,
309 NULL,
310};
311
312static const struct attribute_group ntc_attr_group = {
313 .attrs = ntc_attributes,
314};
315
316static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
317{
318 struct ntc_data *data;
319 struct ntc_thermistor_platform_data *pdata = pdev->dev.platform_data;
320 int ret = 0;
321
322 if (!pdata) {
323 dev_err(&pdev->dev, "No platform init data supplied.\n");
324 return -ENODEV;
325 }
326
327 /* Either one of the two is required. */
328 if (!pdata->read_uV && !pdata->read_ohm) {
329 dev_err(&pdev->dev, "Both read_uV and read_ohm missing."
330 "Need either one of the two.\n");
331 return -EINVAL;
332 }
333
334 if (pdata->read_uV && pdata->read_ohm) {
335 dev_warn(&pdev->dev, "Only one of read_uV and read_ohm "
336 "is needed; ignoring read_uV.\n");
337 pdata->read_uV = NULL;
338 }
339
340 if (pdata->read_uV && (pdata->pullup_uV == 0 ||
341 (pdata->pullup_ohm == 0 && pdata->connect ==
342 NTC_CONNECTED_GROUND) ||
343 (pdata->pulldown_ohm == 0 && pdata->connect ==
344 NTC_CONNECTED_POSITIVE) ||
345 (pdata->connect != NTC_CONNECTED_POSITIVE &&
346 pdata->connect != NTC_CONNECTED_GROUND))) {
347 dev_err(&pdev->dev, "Required data to use read_uV not "
348 "supplied.\n");
349 return -EINVAL;
350 }
351
352 data = kzalloc(sizeof(struct ntc_data), GFP_KERNEL);
353 if (!data)
354 return -ENOMEM;
355
356 data->dev = &pdev->dev;
357 data->pdata = pdata;
358 strncpy(data->name, pdev->id_entry->name, PLATFORM_NAME_SIZE);
359
360 switch (pdev->id_entry->driver_data) {
361 case TYPE_NCPXXWB473:
362 data->comp = ncpXXwb473;
363 data->n_comp = ARRAY_SIZE(ncpXXwb473);
364 break;
365 case TYPE_NCPXXWL333:
366 data->comp = ncpXXwl333;
367 data->n_comp = ARRAY_SIZE(ncpXXwl333);
368 break;
369 default:
370 dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n",
371 pdev->id_entry->driver_data,
372 pdev->id_entry->name);
373 ret = -EINVAL;
374 goto err;
375 }
376
377 platform_set_drvdata(pdev, data);
378
379 ret = sysfs_create_group(&data->dev->kobj, &ntc_attr_group);
380 if (ret) {
381 dev_err(data->dev, "unable to create sysfs files\n");
382 goto err;
383 }
384
385 data->hwmon_dev = hwmon_device_register(data->dev);
386 if (IS_ERR_OR_NULL(data->hwmon_dev)) {
387 dev_err(data->dev, "unable to register as hwmon device.\n");
388 ret = -EINVAL;
389 goto err_after_sysfs;
390 }
391
392 dev_info(&pdev->dev, "Thermistor %s:%d (type: %s/%lu) successfully probed.\n",
393 pdev->name, pdev->id, pdev->id_entry->name,
394 pdev->id_entry->driver_data);
395 return 0;
396err_after_sysfs:
397 sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
398err:
399 kfree(data);
400 return ret;
401}
402
403static int __devexit ntc_thermistor_remove(struct platform_device *pdev)
404{
405 struct ntc_data *data = platform_get_drvdata(pdev);
406
407 hwmon_device_unregister(data->hwmon_dev);
408 sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
409 platform_set_drvdata(pdev, NULL);
410
411 kfree(data);
412
413 return 0;
414}
415
416static const struct platform_device_id ntc_thermistor_id[] = {
417 { "ncp15wb473", TYPE_NCPXXWB473 },
418 { "ncp18wb473", TYPE_NCPXXWB473 },
419 { "ncp21wb473", TYPE_NCPXXWB473 },
420 { "ncp03wb473", TYPE_NCPXXWB473 },
421 { "ncp15wl333", TYPE_NCPXXWL333 },
422 { },
423};
424
425static struct platform_driver ntc_thermistor_driver = {
426 .driver = {
427 .name = "ntc-thermistor",
428 .owner = THIS_MODULE,
429 },
430 .probe = ntc_thermistor_probe,
431 .remove = __devexit_p(ntc_thermistor_remove),
432 .id_table = ntc_thermistor_id,
433};
434
435static int __init ntc_thermistor_init(void)
436{
437 return platform_driver_register(&ntc_thermistor_driver);
438}
439
440module_init(ntc_thermistor_init);
441
442static void __exit ntc_thermistor_cleanup(void)
443{
444 platform_driver_unregister(&ntc_thermistor_driver);
445}
446
447module_exit(ntc_thermistor_cleanup);
448
449MODULE_DESCRIPTION("NTC Thermistor Driver");
450MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
451MODULE_LICENSE("GPL");
452MODULE_ALIAS("platform:ntc-thermistor");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
new file mode 100644
index 000000000000..c9237b9dcff2
--- /dev/null
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -0,0 +1,100 @@
1#
2# PMBus chip drivers configuration
3#
4
5menuconfig PMBUS
6 tristate "PMBus support"
7 depends on I2C && EXPERIMENTAL
8 default n
9 help
10 Say yes here if you want to enable PMBus support.
11
12 This driver can also be built as a module. If so, the module will
13 be called pmbus_core.
14
15if PMBUS
16
17config SENSORS_PMBUS
18 tristate "Generic PMBus devices"
19 default y
20 help
21 If you say yes here you get hardware monitoring support for generic
22 PMBus devices, including but not limited to ADP4000, BMR450, BMR451,
23 BMR453, BMR454, LTC2978, NCP4200, and NCP4208.
24
25 This driver can also be built as a module. If so, the module will
26 be called pmbus.
27
28config SENSORS_ADM1275
29 tristate "Analog Devices ADM1275"
30 default n
31 help
32 If you say yes here you get hardware monitoring support for Analog
33 Devices ADM1275 Hot-Swap Controller and Digital Power Monitor.
34
35 This driver can also be built as a module. If so, the module will
36 be called adm1275.
37
38config SENSORS_LM25066
39 tristate "National Semiconductor LM25066 and compatibles"
40 default n
41 help
42 If you say yes here you get hardware monitoring support for National
43 Semiconductor LM25066, LM5064, and LM5066.
44
45 This driver can also be built as a module. If so, the module will
46 be called lm25066.
47
48config SENSORS_MAX16064
49 tristate "Maxim MAX16064"
50 default n
51 help
52 If you say yes here you get hardware monitoring support for Maxim
53 MAX16064.
54
55 This driver can also be built as a module. If so, the module will
56 be called max16064.
57
58config SENSORS_MAX34440
59 tristate "Maxim MAX34440/MAX34441"
60 default n
61 help
62 If you say yes here you get hardware monitoring support for Maxim
63 MAX34440 and MAX34441.
64
65 This driver can also be built as a module. If so, the module will
66 be called max34440.
67
68config SENSORS_MAX8688
69 tristate "Maxim MAX8688"
70 default n
71 help
72 If you say yes here you get hardware monitoring support for Maxim
73 MAX8688.
74
75 This driver can also be built as a module. If so, the module will
76 be called max8688.
77
78config SENSORS_UCD9000
79 tristate "TI UCD90120, UCD90124, UCD9090, UCD90910"
80 default n
81 help
82 If you say yes here you get hardware monitoring support for TI
83 UCD90120, UCD90124, UCD9090, UCD90910 Sequencer and System Health
84 Controllers.
85
86 This driver can also be built as a module. If so, the module will
87 be called ucd9000.
88
89config SENSORS_UCD9200
90 tristate "TI UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, UCD9248"
91 default n
92 help
93 If you say yes here you get hardware monitoring support for TI
94 UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, and UCD9248
95 Digital PWM System Controllers.
96
97 This driver can also be built as a module. If so, the module will
98 be called ucd9200.
99
100endif # PMBUS
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
new file mode 100644
index 000000000000..623eedb1ed9a
--- /dev/null
+++ b/drivers/hwmon/pmbus/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for PMBus chip drivers.
3#
4
5obj-$(CONFIG_PMBUS) += pmbus_core.o
6obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
7obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
8obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
9obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
10obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
11obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
12obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
13obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
diff --git a/drivers/hwmon/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 8bc1bd663721..c936e2782309 100644
--- a/drivers/hwmon/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -23,11 +23,68 @@
23#include <linux/i2c.h> 23#include <linux/i2c.h>
24#include "pmbus.h" 24#include "pmbus.h"
25 25
26#define ADM1275_PEAK_IOUT 0xd0
27#define ADM1275_PEAK_VIN 0xd1
28#define ADM1275_PEAK_VOUT 0xd2
26#define ADM1275_PMON_CONFIG 0xd4 29#define ADM1275_PMON_CONFIG 0xd4
27 30
28#define ADM1275_VIN_VOUT_SELECT (1 << 6) 31#define ADM1275_VIN_VOUT_SELECT (1 << 6)
29#define ADM1275_VRANGE (1 << 5) 32#define ADM1275_VRANGE (1 << 5)
30 33
34static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
35{
36 int ret;
37
38 if (page)
39 return -EINVAL;
40
41 switch (reg) {
42 case PMBUS_VIRT_READ_IOUT_MAX:
43 ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_IOUT);
44 break;
45 case PMBUS_VIRT_READ_VOUT_MAX:
46 ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VOUT);
47 break;
48 case PMBUS_VIRT_READ_VIN_MAX:
49 ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VIN);
50 break;
51 case PMBUS_VIRT_RESET_IOUT_HISTORY:
52 case PMBUS_VIRT_RESET_VOUT_HISTORY:
53 case PMBUS_VIRT_RESET_VIN_HISTORY:
54 ret = 0;
55 break;
56 default:
57 ret = -ENODATA;
58 break;
59 }
60 return ret;
61}
62
63static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
64 u16 word)
65{
66 int ret;
67
68 if (page)
69 return -EINVAL;
70
71 switch (reg) {
72 case PMBUS_VIRT_RESET_IOUT_HISTORY:
73 ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_IOUT, 0);
74 break;
75 case PMBUS_VIRT_RESET_VOUT_HISTORY:
76 ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VOUT, 0);
77 break;
78 case PMBUS_VIRT_RESET_VIN_HISTORY:
79 ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VIN, 0);
80 break;
81 default:
82 ret = -ENODATA;
83 break;
84 }
85 return ret;
86}
87
31static int adm1275_probe(struct i2c_client *client, 88static int adm1275_probe(struct i2c_client *client,
32 const struct i2c_device_id *id) 89 const struct i2c_device_id *id)
33{ 90{
@@ -50,14 +107,17 @@ static int adm1275_probe(struct i2c_client *client,
50 } 107 }
51 108
52 info->pages = 1; 109 info->pages = 1;
53 info->direct[PSC_VOLTAGE_IN] = true; 110 info->format[PSC_VOLTAGE_IN] = direct;
54 info->direct[PSC_VOLTAGE_OUT] = true; 111 info->format[PSC_VOLTAGE_OUT] = direct;
55 info->direct[PSC_CURRENT_OUT] = true; 112 info->format[PSC_CURRENT_OUT] = direct;
56 info->m[PSC_CURRENT_OUT] = 807; 113 info->m[PSC_CURRENT_OUT] = 807;
57 info->b[PSC_CURRENT_OUT] = 20475; 114 info->b[PSC_CURRENT_OUT] = 20475;
58 info->R[PSC_CURRENT_OUT] = -1; 115 info->R[PSC_CURRENT_OUT] = -1;
59 info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT; 116 info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
60 117
118 info->read_word_data = adm1275_read_word_data;
119 info->write_word_data = adm1275_write_word_data;
120
61 if (config & ADM1275_VRANGE) { 121 if (config & ADM1275_VRANGE) {
62 info->m[PSC_VOLTAGE_IN] = 19199; 122 info->m[PSC_VOLTAGE_IN] = 19199;
63 info->b[PSC_VOLTAGE_IN] = 0; 123 info->b[PSC_VOLTAGE_IN] = 0;
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
new file mode 100644
index 000000000000..ac254fba551b
--- /dev/null
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -0,0 +1,352 @@
1/*
2 * Hardware monitoring driver for LM25066 / LM5064 / LM5066
3 *
4 * Copyright (c) 2011 Ericsson AB.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/err.h>
25#include <linux/slab.h>
26#include <linux/i2c.h>
27#include "pmbus.h"
28
29enum chips { lm25066, lm5064, lm5066 };
30
31#define LM25066_READ_VAUX 0xd0
32#define LM25066_MFR_READ_IIN 0xd1
33#define LM25066_MFR_READ_PIN 0xd2
34#define LM25066_MFR_IIN_OC_WARN_LIMIT 0xd3
35#define LM25066_MFR_PIN_OP_WARN_LIMIT 0xd4
36#define LM25066_READ_PIN_PEAK 0xd5
37#define LM25066_CLEAR_PIN_PEAK 0xd6
38#define LM25066_DEVICE_SETUP 0xd9
39#define LM25066_READ_AVG_VIN 0xdc
40#define LM25066_READ_AVG_VOUT 0xdd
41#define LM25066_READ_AVG_IIN 0xde
42#define LM25066_READ_AVG_PIN 0xdf
43
44#define LM25066_DEV_SETUP_CL (1 << 4) /* Current limit */
45
46struct lm25066_data {
47 int id;
48 struct pmbus_driver_info info;
49};
50
51#define to_lm25066_data(x) container_of(x, struct lm25066_data, info)
52
53static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
54{
55 const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
56 const struct lm25066_data *data = to_lm25066_data(info);
57 int ret;
58
59 if (page > 1)
60 return -EINVAL;
61
62 /* Map READ_VAUX into READ_VOUT register on page 1 */
63 if (page == 1) {
64 switch (reg) {
65 case PMBUS_READ_VOUT:
66 ret = pmbus_read_word_data(client, 0,
67 LM25066_READ_VAUX);
68 if (ret < 0)
69 break;
70 /* Adjust returned value to match VOUT coefficients */
71 switch (data->id) {
72 case lm25066:
73 /* VOUT: 4.54 mV VAUX: 283.2 uV LSB */
74 ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
75 break;
76 case lm5064:
77 /* VOUT: 4.53 mV VAUX: 700 uV LSB */
78 ret = DIV_ROUND_CLOSEST(ret * 70, 453);
79 break;
80 case lm5066:
81 /* VOUT: 2.18 mV VAUX: 725 uV LSB */
82 ret = DIV_ROUND_CLOSEST(ret * 725, 2180);
83 break;
84 }
85 break;
86 default:
87 /* No other valid registers on page 1 */
88 ret = -EINVAL;
89 break;
90 }
91 goto done;
92 }
93
94 switch (reg) {
95 case PMBUS_READ_IIN:
96 ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_IIN);
97 break;
98 case PMBUS_READ_PIN:
99 ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_PIN);
100 break;
101 case PMBUS_IIN_OC_WARN_LIMIT:
102 ret = pmbus_read_word_data(client, 0,
103 LM25066_MFR_IIN_OC_WARN_LIMIT);
104 break;
105 case PMBUS_PIN_OP_WARN_LIMIT:
106 ret = pmbus_read_word_data(client, 0,
107 LM25066_MFR_PIN_OP_WARN_LIMIT);
108 break;
109 case PMBUS_VIRT_READ_VIN_AVG:
110 ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VIN);
111 break;
112 case PMBUS_VIRT_READ_VOUT_AVG:
113 ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VOUT);
114 break;
115 case PMBUS_VIRT_READ_IIN_AVG:
116 ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_IIN);
117 break;
118 case PMBUS_VIRT_READ_PIN_AVG:
119 ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_PIN);
120 break;
121 case PMBUS_VIRT_READ_PIN_MAX:
122 ret = pmbus_read_word_data(client, 0, LM25066_READ_PIN_PEAK);
123 break;
124 case PMBUS_VIRT_RESET_PIN_HISTORY:
125 ret = 0;
126 break;
127 default:
128 ret = -ENODATA;
129 break;
130 }
131done:
132 return ret;
133}
134
135static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
136 u16 word)
137{
138 int ret;
139
140 if (page > 1)
141 return -EINVAL;
142
143 switch (reg) {
144 case PMBUS_IIN_OC_WARN_LIMIT:
145 ret = pmbus_write_word_data(client, 0,
146 LM25066_MFR_IIN_OC_WARN_LIMIT,
147 word);
148 break;
149 case PMBUS_PIN_OP_WARN_LIMIT:
150 ret = pmbus_write_word_data(client, 0,
151 LM25066_MFR_PIN_OP_WARN_LIMIT,
152 word);
153 break;
154 case PMBUS_VIRT_RESET_PIN_HISTORY:
155 ret = pmbus_write_byte(client, 0, LM25066_CLEAR_PIN_PEAK);
156 break;
157 default:
158 ret = -ENODATA;
159 break;
160 }
161 return ret;
162}
163
164static int lm25066_write_byte(struct i2c_client *client, int page, u8 value)
165{
166 if (page > 1)
167 return -EINVAL;
168
169 if (page == 0)
170 return pmbus_write_byte(client, 0, value);
171
172 return 0;
173}
174
175static int lm25066_probe(struct i2c_client *client,
176 const struct i2c_device_id *id)
177{
178 int config;
179 int ret;
180 struct lm25066_data *data;
181 struct pmbus_driver_info *info;
182
183 if (!i2c_check_functionality(client->adapter,
184 I2C_FUNC_SMBUS_READ_BYTE_DATA))
185 return -ENODEV;
186
187 data = kzalloc(sizeof(struct lm25066_data), GFP_KERNEL);
188 if (!data)
189 return -ENOMEM;
190
191 config = i2c_smbus_read_byte_data(client, LM25066_DEVICE_SETUP);
192 if (config < 0) {
193 ret = config;
194 goto err_mem;
195 }
196
197 data->id = id->driver_data;
198 info = &data->info;
199
200 info->pages = 2;
201 info->format[PSC_VOLTAGE_IN] = direct;
202 info->format[PSC_VOLTAGE_OUT] = direct;
203 info->format[PSC_CURRENT_IN] = direct;
204 info->format[PSC_TEMPERATURE] = direct;
205 info->format[PSC_POWER] = direct;
206
207 info->m[PSC_TEMPERATURE] = 16;
208 info->b[PSC_TEMPERATURE] = 0;
209 info->R[PSC_TEMPERATURE] = 0;
210
211 info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT
212 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_PIN | PMBUS_HAVE_IIN
213 | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
214 info->func[1] = PMBUS_HAVE_VOUT;
215
216 info->read_word_data = lm25066_read_word_data;
217 info->write_word_data = lm25066_write_word_data;
218 info->write_byte = lm25066_write_byte;
219
220 switch (id->driver_data) {
221 case lm25066:
222 info->m[PSC_VOLTAGE_IN] = 22070;
223 info->b[PSC_VOLTAGE_IN] = 0;
224 info->R[PSC_VOLTAGE_IN] = -2;
225 info->m[PSC_VOLTAGE_OUT] = 22070;
226 info->b[PSC_VOLTAGE_OUT] = 0;
227 info->R[PSC_VOLTAGE_OUT] = -2;
228
229 if (config & LM25066_DEV_SETUP_CL) {
230 info->m[PSC_CURRENT_IN] = 6852;
231 info->b[PSC_CURRENT_IN] = 0;
232 info->R[PSC_CURRENT_IN] = -2;
233 info->m[PSC_POWER] = 369;
234 info->b[PSC_POWER] = 0;
235 info->R[PSC_POWER] = -2;
236 } else {
237 info->m[PSC_CURRENT_IN] = 13661;
238 info->b[PSC_CURRENT_IN] = 0;
239 info->R[PSC_CURRENT_IN] = -2;
240 info->m[PSC_POWER] = 736;
241 info->b[PSC_POWER] = 0;
242 info->R[PSC_POWER] = -2;
243 }
244 break;
245 case lm5064:
246 info->m[PSC_VOLTAGE_IN] = 22075;
247 info->b[PSC_VOLTAGE_IN] = 0;
248 info->R[PSC_VOLTAGE_IN] = -2;
249 info->m[PSC_VOLTAGE_OUT] = 22075;
250 info->b[PSC_VOLTAGE_OUT] = 0;
251 info->R[PSC_VOLTAGE_OUT] = -2;
252
253 if (config & LM25066_DEV_SETUP_CL) {
254 info->m[PSC_CURRENT_IN] = 6713;
255 info->b[PSC_CURRENT_IN] = 0;
256 info->R[PSC_CURRENT_IN] = -2;
257 info->m[PSC_POWER] = 3619;
258 info->b[PSC_POWER] = 0;
259 info->R[PSC_POWER] = -3;
260 } else {
261 info->m[PSC_CURRENT_IN] = 13426;
262 info->b[PSC_CURRENT_IN] = 0;
263 info->R[PSC_CURRENT_IN] = -2;
264 info->m[PSC_POWER] = 7238;
265 info->b[PSC_POWER] = 0;
266 info->R[PSC_POWER] = -3;
267 }
268 break;
269 case lm5066:
270 info->m[PSC_VOLTAGE_IN] = 4587;
271 info->b[PSC_VOLTAGE_IN] = 0;
272 info->R[PSC_VOLTAGE_IN] = -2;
273 info->m[PSC_VOLTAGE_OUT] = 4587;
274 info->b[PSC_VOLTAGE_OUT] = 0;
275 info->R[PSC_VOLTAGE_OUT] = -2;
276
277 if (config & LM25066_DEV_SETUP_CL) {
278 info->m[PSC_CURRENT_IN] = 10753;
279 info->b[PSC_CURRENT_IN] = 0;
280 info->R[PSC_CURRENT_IN] = -2;
281 info->m[PSC_POWER] = 1204;
282 info->b[PSC_POWER] = 0;
283 info->R[PSC_POWER] = -3;
284 } else {
285 info->m[PSC_CURRENT_IN] = 5405;
286 info->b[PSC_CURRENT_IN] = 0;
287 info->R[PSC_CURRENT_IN] = -2;
288 info->m[PSC_POWER] = 605;
289 info->b[PSC_POWER] = 0;
290 info->R[PSC_POWER] = -3;
291 }
292 break;
293 default:
294 ret = -ENODEV;
295 goto err_mem;
296 }
297
298 ret = pmbus_do_probe(client, id, info);
299 if (ret)
300 goto err_mem;
301 return 0;
302
303err_mem:
304 kfree(data);
305 return ret;
306}
307
308static int lm25066_remove(struct i2c_client *client)
309{
310 const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
311 const struct lm25066_data *data = to_lm25066_data(info);
312 int ret;
313
314 ret = pmbus_do_remove(client);
315 kfree(data);
316 return ret;
317}
318
319static const struct i2c_device_id lm25066_id[] = {
320 {"lm25066", lm25066},
321 {"lm5064", lm5064},
322 {"lm5066", lm5066},
323 { }
324};
325
326MODULE_DEVICE_TABLE(i2c, lm25066_id);
327
328/* This is the driver that will be inserted */
329static struct i2c_driver lm25066_driver = {
330 .driver = {
331 .name = "lm25066",
332 },
333 .probe = lm25066_probe,
334 .remove = lm25066_remove,
335 .id_table = lm25066_id,
336};
337
338static int __init lm25066_init(void)
339{
340 return i2c_add_driver(&lm25066_driver);
341}
342
343static void __exit lm25066_exit(void)
344{
345 i2c_del_driver(&lm25066_driver);
346}
347
348MODULE_AUTHOR("Guenter Roeck");
349MODULE_DESCRIPTION("PMBus driver for LM25066/LM5064/LM5066");
350MODULE_LICENSE("GPL");
351module_init(lm25066_init);
352module_exit(lm25066_exit);
diff --git a/drivers/hwmon/max16064.c b/drivers/hwmon/pmbus/max16064.c
index 1d6d717060d3..e50b296e8db4 100644
--- a/drivers/hwmon/max16064.c
+++ b/drivers/hwmon/pmbus/max16064.c
@@ -25,11 +25,60 @@
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include "pmbus.h" 26#include "pmbus.h"
27 27
28#define MAX16064_MFR_VOUT_PEAK 0xd4
29#define MAX16064_MFR_TEMPERATURE_PEAK 0xd6
30
31static int max16064_read_word_data(struct i2c_client *client, int page, int reg)
32{
33 int ret;
34
35 switch (reg) {
36 case PMBUS_VIRT_READ_VOUT_MAX:
37 ret = pmbus_read_word_data(client, page,
38 MAX16064_MFR_VOUT_PEAK);
39 break;
40 case PMBUS_VIRT_READ_TEMP_MAX:
41 ret = pmbus_read_word_data(client, page,
42 MAX16064_MFR_TEMPERATURE_PEAK);
43 break;
44 case PMBUS_VIRT_RESET_VOUT_HISTORY:
45 case PMBUS_VIRT_RESET_TEMP_HISTORY:
46 ret = 0;
47 break;
48 default:
49 ret = -ENODATA;
50 break;
51 }
52 return ret;
53}
54
55static int max16064_write_word_data(struct i2c_client *client, int page,
56 int reg, u16 word)
57{
58 int ret;
59
60 switch (reg) {
61 case PMBUS_VIRT_RESET_VOUT_HISTORY:
62 ret = pmbus_write_word_data(client, page,
63 MAX16064_MFR_VOUT_PEAK, 0);
64 break;
65 case PMBUS_VIRT_RESET_TEMP_HISTORY:
66 ret = pmbus_write_word_data(client, page,
67 MAX16064_MFR_TEMPERATURE_PEAK,
68 0xffff);
69 break;
70 default:
71 ret = -ENODATA;
72 break;
73 }
74 return ret;
75}
76
28static struct pmbus_driver_info max16064_info = { 77static struct pmbus_driver_info max16064_info = {
29 .pages = 4, 78 .pages = 4,
30 .direct[PSC_VOLTAGE_IN] = true, 79 .format[PSC_VOLTAGE_IN] = direct,
31 .direct[PSC_VOLTAGE_OUT] = true, 80 .format[PSC_VOLTAGE_OUT] = direct,
32 .direct[PSC_TEMPERATURE] = true, 81 .format[PSC_TEMPERATURE] = direct,
33 .m[PSC_VOLTAGE_IN] = 19995, 82 .m[PSC_VOLTAGE_IN] = 19995,
34 .b[PSC_VOLTAGE_IN] = 0, 83 .b[PSC_VOLTAGE_IN] = 0,
35 .R[PSC_VOLTAGE_IN] = -1, 84 .R[PSC_VOLTAGE_IN] = -1,
@@ -44,6 +93,8 @@ static struct pmbus_driver_info max16064_info = {
44 .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, 93 .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
45 .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, 94 .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
46 .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, 95 .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
96 .read_word_data = max16064_read_word_data,
97 .write_word_data = max16064_write_word_data,
47}; 98};
48 99
49static int max16064_probe(struct i2c_client *client, 100static int max16064_probe(struct i2c_client *client,
diff --git a/drivers/hwmon/max34440.c b/drivers/hwmon/pmbus/max34440.c
index db11e1a175b2..fda621d2e458 100644
--- a/drivers/hwmon/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -27,11 +27,70 @@
27 27
28enum chips { max34440, max34441 }; 28enum chips { max34440, max34441 };
29 29
30#define MAX34440_MFR_VOUT_PEAK 0xd4
31#define MAX34440_MFR_IOUT_PEAK 0xd5
32#define MAX34440_MFR_TEMPERATURE_PEAK 0xd6
33
30#define MAX34440_STATUS_OC_WARN (1 << 0) 34#define MAX34440_STATUS_OC_WARN (1 << 0)
31#define MAX34440_STATUS_OC_FAULT (1 << 1) 35#define MAX34440_STATUS_OC_FAULT (1 << 1)
32#define MAX34440_STATUS_OT_FAULT (1 << 5) 36#define MAX34440_STATUS_OT_FAULT (1 << 5)
33#define MAX34440_STATUS_OT_WARN (1 << 6) 37#define MAX34440_STATUS_OT_WARN (1 << 6)
34 38
39static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
40{
41 int ret;
42
43 switch (reg) {
44 case PMBUS_VIRT_READ_VOUT_MAX:
45 ret = pmbus_read_word_data(client, page,
46 MAX34440_MFR_VOUT_PEAK);
47 break;
48 case PMBUS_VIRT_READ_IOUT_MAX:
49 ret = pmbus_read_word_data(client, page,
50 MAX34440_MFR_IOUT_PEAK);
51 break;
52 case PMBUS_VIRT_READ_TEMP_MAX:
53 ret = pmbus_read_word_data(client, page,
54 MAX34440_MFR_TEMPERATURE_PEAK);
55 break;
56 case PMBUS_VIRT_RESET_VOUT_HISTORY:
57 case PMBUS_VIRT_RESET_IOUT_HISTORY:
58 case PMBUS_VIRT_RESET_TEMP_HISTORY:
59 ret = 0;
60 break;
61 default:
62 ret = -ENODATA;
63 break;
64 }
65 return ret;
66}
67
68static int max34440_write_word_data(struct i2c_client *client, int page,
69 int reg, u16 word)
70{
71 int ret;
72
73 switch (reg) {
74 case PMBUS_VIRT_RESET_VOUT_HISTORY:
75 ret = pmbus_write_word_data(client, page,
76 MAX34440_MFR_VOUT_PEAK, 0);
77 break;
78 case PMBUS_VIRT_RESET_IOUT_HISTORY:
79 ret = pmbus_write_word_data(client, page,
80 MAX34440_MFR_IOUT_PEAK, 0);
81 break;
82 case PMBUS_VIRT_RESET_TEMP_HISTORY:
83 ret = pmbus_write_word_data(client, page,
84 MAX34440_MFR_TEMPERATURE_PEAK,
85 0xffff);
86 break;
87 default:
88 ret = -ENODATA;
89 break;
90 }
91 return ret;
92}
93
35static int max34440_read_byte_data(struct i2c_client *client, int page, int reg) 94static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
36{ 95{
37 int ret; 96 int ret;
@@ -72,10 +131,10 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
72static struct pmbus_driver_info max34440_info[] = { 131static struct pmbus_driver_info max34440_info[] = {
73 [max34440] = { 132 [max34440] = {
74 .pages = 14, 133 .pages = 14,
75 .direct[PSC_VOLTAGE_IN] = true, 134 .format[PSC_VOLTAGE_IN] = direct,
76 .direct[PSC_VOLTAGE_OUT] = true, 135 .format[PSC_VOLTAGE_OUT] = direct,
77 .direct[PSC_TEMPERATURE] = true, 136 .format[PSC_TEMPERATURE] = direct,
78 .direct[PSC_CURRENT_OUT] = true, 137 .format[PSC_CURRENT_OUT] = direct,
79 .m[PSC_VOLTAGE_IN] = 1, 138 .m[PSC_VOLTAGE_IN] = 1,
80 .b[PSC_VOLTAGE_IN] = 0, 139 .b[PSC_VOLTAGE_IN] = 0,
81 .R[PSC_VOLTAGE_IN] = 3, /* R = 0 in datasheet reflects mV */ 140 .R[PSC_VOLTAGE_IN] = 3, /* R = 0 in datasheet reflects mV */
@@ -109,14 +168,16 @@ static struct pmbus_driver_info max34440_info[] = {
109 .func[12] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, 168 .func[12] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
110 .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, 169 .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
111 .read_byte_data = max34440_read_byte_data, 170 .read_byte_data = max34440_read_byte_data,
171 .read_word_data = max34440_read_word_data,
172 .write_word_data = max34440_write_word_data,
112 }, 173 },
113 [max34441] = { 174 [max34441] = {
114 .pages = 12, 175 .pages = 12,
115 .direct[PSC_VOLTAGE_IN] = true, 176 .format[PSC_VOLTAGE_IN] = direct,
116 .direct[PSC_VOLTAGE_OUT] = true, 177 .format[PSC_VOLTAGE_OUT] = direct,
117 .direct[PSC_TEMPERATURE] = true, 178 .format[PSC_TEMPERATURE] = direct,
118 .direct[PSC_CURRENT_OUT] = true, 179 .format[PSC_CURRENT_OUT] = direct,
119 .direct[PSC_FAN] = true, 180 .format[PSC_FAN] = direct,
120 .m[PSC_VOLTAGE_IN] = 1, 181 .m[PSC_VOLTAGE_IN] = 1,
121 .b[PSC_VOLTAGE_IN] = 0, 182 .b[PSC_VOLTAGE_IN] = 0,
122 .R[PSC_VOLTAGE_IN] = 3, 183 .R[PSC_VOLTAGE_IN] = 3,
@@ -150,6 +211,8 @@ static struct pmbus_driver_info max34440_info[] = {
150 .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, 211 .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
151 .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, 212 .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
152 .read_byte_data = max34440_read_byte_data, 213 .read_byte_data = max34440_read_byte_data,
214 .read_word_data = max34440_read_word_data,
215 .write_word_data = max34440_write_word_data,
153 }, 216 },
154}; 217};
155 218
diff --git a/drivers/hwmon/max8688.c b/drivers/hwmon/pmbus/max8688.c
index 7fb93f4e9f21..c3e72f1a3cfb 100644
--- a/drivers/hwmon/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -25,6 +25,9 @@
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include "pmbus.h" 26#include "pmbus.h"
27 27
28#define MAX8688_MFR_VOUT_PEAK 0xd4
29#define MAX8688_MFR_IOUT_PEAK 0xd5
30#define MAX8688_MFR_TEMPERATURE_PEAK 0xd6
28#define MAX8688_MFG_STATUS 0xd8 31#define MAX8688_MFG_STATUS 0xd8
29 32
30#define MAX8688_STATUS_OC_FAULT (1 << 4) 33#define MAX8688_STATUS_OC_FAULT (1 << 4)
@@ -37,6 +40,62 @@
37#define MAX8688_STATUS_OT_FAULT (1 << 13) 40#define MAX8688_STATUS_OT_FAULT (1 << 13)
38#define MAX8688_STATUS_OT_WARNING (1 << 14) 41#define MAX8688_STATUS_OT_WARNING (1 << 14)
39 42
43static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
44{
45 int ret;
46
47 if (page)
48 return -EINVAL;
49
50 switch (reg) {
51 case PMBUS_VIRT_READ_VOUT_MAX:
52 ret = pmbus_read_word_data(client, 0, MAX8688_MFR_VOUT_PEAK);
53 break;
54 case PMBUS_VIRT_READ_IOUT_MAX:
55 ret = pmbus_read_word_data(client, 0, MAX8688_MFR_IOUT_PEAK);
56 break;
57 case PMBUS_VIRT_READ_TEMP_MAX:
58 ret = pmbus_read_word_data(client, 0,
59 MAX8688_MFR_TEMPERATURE_PEAK);
60 break;
61 case PMBUS_VIRT_RESET_VOUT_HISTORY:
62 case PMBUS_VIRT_RESET_IOUT_HISTORY:
63 case PMBUS_VIRT_RESET_TEMP_HISTORY:
64 ret = 0;
65 break;
66 default:
67 ret = -ENODATA;
68 break;
69 }
70 return ret;
71}
72
73static int max8688_write_word_data(struct i2c_client *client, int page, int reg,
74 u16 word)
75{
76 int ret;
77
78 switch (reg) {
79 case PMBUS_VIRT_RESET_VOUT_HISTORY:
80 ret = pmbus_write_word_data(client, 0, MAX8688_MFR_VOUT_PEAK,
81 0);
82 break;
83 case PMBUS_VIRT_RESET_IOUT_HISTORY:
84 ret = pmbus_write_word_data(client, 0, MAX8688_MFR_IOUT_PEAK,
85 0);
86 break;
87 case PMBUS_VIRT_RESET_TEMP_HISTORY:
88 ret = pmbus_write_word_data(client, 0,
89 MAX8688_MFR_TEMPERATURE_PEAK,
90 0xffff);
91 break;
92 default:
93 ret = -ENODATA;
94 break;
95 }
96 return ret;
97}
98
40static int max8688_read_byte_data(struct i2c_client *client, int page, int reg) 99static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
41{ 100{
42 int ret = 0; 101 int ret = 0;
@@ -91,10 +150,10 @@ static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
91 150
92static struct pmbus_driver_info max8688_info = { 151static struct pmbus_driver_info max8688_info = {
93 .pages = 1, 152 .pages = 1,
94 .direct[PSC_VOLTAGE_IN] = true, 153 .format[PSC_VOLTAGE_IN] = direct,
95 .direct[PSC_VOLTAGE_OUT] = true, 154 .format[PSC_VOLTAGE_OUT] = direct,
96 .direct[PSC_TEMPERATURE] = true, 155 .format[PSC_TEMPERATURE] = direct,
97 .direct[PSC_CURRENT_OUT] = true, 156 .format[PSC_CURRENT_OUT] = direct,
98 .m[PSC_VOLTAGE_IN] = 19995, 157 .m[PSC_VOLTAGE_IN] = 19995,
99 .b[PSC_VOLTAGE_IN] = 0, 158 .b[PSC_VOLTAGE_IN] = 0,
100 .R[PSC_VOLTAGE_IN] = -1, 159 .R[PSC_VOLTAGE_IN] = -1,
@@ -111,6 +170,8 @@ static struct pmbus_driver_info max8688_info = {
111 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT 170 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
112 | PMBUS_HAVE_STATUS_TEMP, 171 | PMBUS_HAVE_STATUS_TEMP,
113 .read_byte_data = max8688_read_byte_data, 172 .read_byte_data = max8688_read_byte_data,
173 .read_word_data = max8688_read_word_data,
174 .write_word_data = max8688_write_word_data,
114}; 175};
115 176
116static int max8688_probe(struct i2c_client *client, 177static int max8688_probe(struct i2c_client *client,
diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 9b1f0c37ef77..73de9f1f3194 100644
--- a/drivers/hwmon/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -96,6 +96,8 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
96static int pmbus_identify(struct i2c_client *client, 96static int pmbus_identify(struct i2c_client *client,
97 struct pmbus_driver_info *info) 97 struct pmbus_driver_info *info)
98{ 98{
99 int ret = 0;
100
99 if (!info->pages) { 101 if (!info->pages) {
100 /* 102 /*
101 * Check if the PAGE command is supported. If it is, 103 * Check if the PAGE command is supported. If it is,
@@ -117,6 +119,27 @@ static int pmbus_identify(struct i2c_client *client,
117 } 119 }
118 } 120 }
119 121
122 if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
123 int vout_mode;
124
125 vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
126 if (vout_mode >= 0 && vout_mode != 0xff) {
127 switch (vout_mode >> 5) {
128 case 0:
129 break;
130 case 1:
131 info->format[PSC_VOLTAGE_OUT] = vid;
132 break;
133 case 2:
134 info->format[PSC_VOLTAGE_OUT] = direct;
135 break;
136 default:
137 ret = -ENODEV;
138 goto abort;
139 }
140 }
141 }
142
120 /* 143 /*
121 * We should check if the COEFFICIENTS register is supported. 144 * We should check if the COEFFICIENTS register is supported.
122 * If it is, and the chip is configured for direct mode, we can read 145 * If it is, and the chip is configured for direct mode, we can read
@@ -125,13 +148,18 @@ static int pmbus_identify(struct i2c_client *client,
125 * 148 *
126 * To do this, we will need access to a chip which actually supports the 149 * To do this, we will need access to a chip which actually supports the
127 * COEFFICIENTS command, since the command is too complex to implement 150 * COEFFICIENTS command, since the command is too complex to implement
128 * without testing it. 151 * without testing it. Until then, abort if a chip configured for direct
152 * mode was detected.
129 */ 153 */
154 if (info->format[PSC_VOLTAGE_OUT] == direct) {
155 ret = -ENODEV;
156 goto abort;
157 }
130 158
131 /* Try to find sensor groups */ 159 /* Try to find sensor groups */
132 pmbus_find_sensor_groups(client, info); 160 pmbus_find_sensor_groups(client, info);
133 161abort:
134 return 0; 162 return ret;
135} 163}
136 164
137static int pmbus_probe(struct i2c_client *client, 165static int pmbus_probe(struct i2c_client *client,
@@ -172,11 +200,14 @@ static int pmbus_remove(struct i2c_client *client)
172 * Use driver_data to set the number of pages supported by the chip. 200 * Use driver_data to set the number of pages supported by the chip.
173 */ 201 */
174static const struct i2c_device_id pmbus_id[] = { 202static const struct i2c_device_id pmbus_id[] = {
203 {"adp4000", 1},
175 {"bmr450", 1}, 204 {"bmr450", 1},
176 {"bmr451", 1}, 205 {"bmr451", 1},
177 {"bmr453", 1}, 206 {"bmr453", 1},
178 {"bmr454", 1}, 207 {"bmr454", 1},
179 {"ltc2978", 8}, 208 {"ltc2978", 8},
209 {"ncp4200", 1},
210 {"ncp4208", 1},
180 {"pmbus", 0}, 211 {"pmbus", 0},
181 {} 212 {}
182}; 213};
diff --git a/drivers/hwmon/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 50647ab7235a..a6ae20ffef6b 100644
--- a/drivers/hwmon/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -126,6 +126,42 @@
126#define PMBUS_MFR_SERIAL 0x9E 126#define PMBUS_MFR_SERIAL 0x9E
127 127
128/* 128/*
129 * Virtual registers.
130 * Useful to support attributes which are not supported by standard PMBus
131 * registers but exist as manufacturer specific registers on individual chips.
132 * Must be mapped to real registers in device specific code.
133 *
134 * Semantics:
135 * Virtual registers are all word size.
136 * READ registers are read-only; writes are either ignored or return an error.
137 * RESET registers are read/write. Reading returns zero (used for detection),
138 * writing any value causes the associated history to be reset.
139 */
140#define PMBUS_VIRT_BASE 0x100
141#define PMBUS_VIRT_READ_TEMP_MIN (PMBUS_VIRT_BASE + 0)
142#define PMBUS_VIRT_READ_TEMP_MAX (PMBUS_VIRT_BASE + 1)
143#define PMBUS_VIRT_RESET_TEMP_HISTORY (PMBUS_VIRT_BASE + 2)
144#define PMBUS_VIRT_READ_VIN_AVG (PMBUS_VIRT_BASE + 3)
145#define PMBUS_VIRT_READ_VIN_MIN (PMBUS_VIRT_BASE + 4)
146#define PMBUS_VIRT_READ_VIN_MAX (PMBUS_VIRT_BASE + 5)
147#define PMBUS_VIRT_RESET_VIN_HISTORY (PMBUS_VIRT_BASE + 6)
148#define PMBUS_VIRT_READ_IIN_AVG (PMBUS_VIRT_BASE + 7)
149#define PMBUS_VIRT_READ_IIN_MIN (PMBUS_VIRT_BASE + 8)
150#define PMBUS_VIRT_READ_IIN_MAX (PMBUS_VIRT_BASE + 9)
151#define PMBUS_VIRT_RESET_IIN_HISTORY (PMBUS_VIRT_BASE + 10)
152#define PMBUS_VIRT_READ_PIN_AVG (PMBUS_VIRT_BASE + 11)
153#define PMBUS_VIRT_READ_PIN_MAX (PMBUS_VIRT_BASE + 12)
154#define PMBUS_VIRT_RESET_PIN_HISTORY (PMBUS_VIRT_BASE + 13)
155#define PMBUS_VIRT_READ_VOUT_AVG (PMBUS_VIRT_BASE + 14)
156#define PMBUS_VIRT_READ_VOUT_MIN (PMBUS_VIRT_BASE + 15)
157#define PMBUS_VIRT_READ_VOUT_MAX (PMBUS_VIRT_BASE + 16)
158#define PMBUS_VIRT_RESET_VOUT_HISTORY (PMBUS_VIRT_BASE + 17)
159#define PMBUS_VIRT_READ_IOUT_AVG (PMBUS_VIRT_BASE + 18)
160#define PMBUS_VIRT_READ_IOUT_MIN (PMBUS_VIRT_BASE + 19)
161#define PMBUS_VIRT_READ_IOUT_MAX (PMBUS_VIRT_BASE + 20)
162#define PMBUS_VIRT_RESET_IOUT_HISTORY (PMBUS_VIRT_BASE + 21)
163
164/*
129 * CAPABILITY 165 * CAPABILITY
130 */ 166 */
131#define PB_CAPABILITY_SMBALERT (1<<4) 167#define PB_CAPABILITY_SMBALERT (1<<4)
@@ -266,11 +302,11 @@ enum pmbus_sensor_classes {
266#define PMBUS_HAVE_STATUS_FAN12 (1 << 16) 302#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
267#define PMBUS_HAVE_STATUS_FAN34 (1 << 17) 303#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
268 304
305enum pmbus_data_format { linear = 0, direct, vid };
306
269struct pmbus_driver_info { 307struct pmbus_driver_info {
270 int pages; /* Total number of pages */ 308 int pages; /* Total number of pages */
271 bool direct[PSC_NUM_CLASSES]; 309 enum pmbus_data_format format[PSC_NUM_CLASSES];
272 /* true if device uses direct data format
273 for the given sensor class */
274 /* 310 /*
275 * Support one set of coefficients for each sensor type 311 * Support one set of coefficients for each sensor type
276 * Used for chips providing data in direct mode. 312 * Used for chips providing data in direct mode.
@@ -286,6 +322,10 @@ struct pmbus_driver_info {
286 * necessary. 322 * necessary.
287 */ 323 */
288 int (*read_byte_data)(struct i2c_client *client, int page, int reg); 324 int (*read_byte_data)(struct i2c_client *client, int page, int reg);
325 int (*read_word_data)(struct i2c_client *client, int page, int reg);
326 int (*write_word_data)(struct i2c_client *client, int page, int reg,
327 u16 word);
328 int (*write_byte)(struct i2c_client *client, int page, u8 value);
289 /* 329 /*
290 * The identify function determines supported PMBus functionality. 330 * The identify function determines supported PMBus functionality.
291 * This function is only necessary if a chip driver supports multiple 331 * This function is only necessary if a chip driver supports multiple
@@ -299,6 +339,9 @@ struct pmbus_driver_info {
299 339
300int pmbus_set_page(struct i2c_client *client, u8 page); 340int pmbus_set_page(struct i2c_client *client, u8 page);
301int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg); 341int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
342int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
343int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
344int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
302void pmbus_clear_faults(struct i2c_client *client); 345void pmbus_clear_faults(struct i2c_client *client);
303bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg); 346bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
304bool pmbus_check_word_register(struct i2c_client *client, int page, int reg); 347bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 8e31a8e2c746..a561c3a0e916 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -33,14 +33,18 @@
33/* 33/*
34 * Constants needed to determine number of sensors, booleans, and labels. 34 * Constants needed to determine number of sensors, booleans, and labels.
35 */ 35 */
36#define PMBUS_MAX_INPUT_SENSORS 11 /* 6*volt, 3*curr, 2*power */ 36#define PMBUS_MAX_INPUT_SENSORS 22 /* 10*volt, 7*curr, 5*power */
37#define PMBUS_VOUT_SENSORS_PER_PAGE 5 /* input, min, max, lcrit, 37#define PMBUS_VOUT_SENSORS_PER_PAGE 9 /* input, min, max, lcrit,
38 crit */ 38 crit, lowest, highest, avg,
39#define PMBUS_IOUT_SENSORS_PER_PAGE 4 /* input, min, max, crit */ 39 reset */
40#define PMBUS_IOUT_SENSORS_PER_PAGE 8 /* input, min, max, crit,
41 lowest, highest, avg,
42 reset */
40#define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */ 43#define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */
41#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */ 44#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */
42#define PMBUS_MAX_SENSORS_PER_TEMP 5 /* input, min, max, lcrit, 45#define PMBUS_MAX_SENSORS_PER_TEMP 8 /* input, min, max, lcrit,
43 crit */ 46 crit, lowest, highest,
47 reset */
44 48
45#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm, 49#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm,
46 lcrit_alarm, crit_alarm; 50 lcrit_alarm, crit_alarm;
@@ -74,11 +78,13 @@
74#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES) 78#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
75#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1) 79#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1)
76 80
81#define PMBUS_NAME_SIZE 24
82
77struct pmbus_sensor { 83struct pmbus_sensor {
78 char name[I2C_NAME_SIZE]; /* sysfs sensor name */ 84 char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */
79 struct sensor_device_attribute attribute; 85 struct sensor_device_attribute attribute;
80 u8 page; /* page number */ 86 u8 page; /* page number */
81 u8 reg; /* register */ 87 u16 reg; /* register */
82 enum pmbus_sensor_classes class; /* sensor class */ 88 enum pmbus_sensor_classes class; /* sensor class */
83 bool update; /* runtime sensor update needed */ 89 bool update; /* runtime sensor update needed */
84 int data; /* Sensor data. 90 int data; /* Sensor data.
@@ -86,14 +92,14 @@ struct pmbus_sensor {
86}; 92};
87 93
88struct pmbus_boolean { 94struct pmbus_boolean {
89 char name[I2C_NAME_SIZE]; /* sysfs boolean name */ 95 char name[PMBUS_NAME_SIZE]; /* sysfs boolean name */
90 struct sensor_device_attribute attribute; 96 struct sensor_device_attribute attribute;
91}; 97};
92 98
93struct pmbus_label { 99struct pmbus_label {
94 char name[I2C_NAME_SIZE]; /* sysfs label name */ 100 char name[PMBUS_NAME_SIZE]; /* sysfs label name */
95 struct sensor_device_attribute attribute; 101 struct sensor_device_attribute attribute;
96 char label[I2C_NAME_SIZE]; /* label */ 102 char label[PMBUS_NAME_SIZE]; /* label */
97}; 103};
98 104
99struct pmbus_data { 105struct pmbus_data {
@@ -162,19 +168,39 @@ int pmbus_set_page(struct i2c_client *client, u8 page)
162} 168}
163EXPORT_SYMBOL_GPL(pmbus_set_page); 169EXPORT_SYMBOL_GPL(pmbus_set_page);
164 170
165static int pmbus_write_byte(struct i2c_client *client, u8 page, u8 value) 171int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
166{ 172{
167 int rv; 173 int rv;
168 174
169 rv = pmbus_set_page(client, page); 175 if (page >= 0) {
170 if (rv < 0) 176 rv = pmbus_set_page(client, page);
171 return rv; 177 if (rv < 0)
178 return rv;
179 }
172 180
173 return i2c_smbus_write_byte(client, value); 181 return i2c_smbus_write_byte(client, value);
174} 182}
183EXPORT_SYMBOL_GPL(pmbus_write_byte);
184
185/*
186 * _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if
187 * a device specific mapping funcion exists and calls it if necessary.
188 */
189static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
190{
191 struct pmbus_data *data = i2c_get_clientdata(client);
192 const struct pmbus_driver_info *info = data->info;
193 int status;
194
195 if (info->write_byte) {
196 status = info->write_byte(client, page, value);
197 if (status != -ENODATA)
198 return status;
199 }
200 return pmbus_write_byte(client, page, value);
201}
175 202
176static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, 203int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word)
177 u16 word)
178{ 204{
179 int rv; 205 int rv;
180 206
@@ -184,6 +210,28 @@ static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
184 210
185 return i2c_smbus_write_word_data(client, reg, word); 211 return i2c_smbus_write_word_data(client, reg, word);
186} 212}
213EXPORT_SYMBOL_GPL(pmbus_write_word_data);
214
215/*
216 * _pmbus_write_word_data() is similar to pmbus_write_word_data(), but checks if
217 * a device specific mapping function exists and calls it if necessary.
218 */
219static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
220 u16 word)
221{
222 struct pmbus_data *data = i2c_get_clientdata(client);
223 const struct pmbus_driver_info *info = data->info;
224 int status;
225
226 if (info->write_word_data) {
227 status = info->write_word_data(client, page, reg, word);
228 if (status != -ENODATA)
229 return status;
230 }
231 if (reg >= PMBUS_VIRT_BASE)
232 return -EINVAL;
233 return pmbus_write_word_data(client, page, reg, word);
234}
187 235
188int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg) 236int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
189{ 237{
@@ -197,20 +245,61 @@ int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
197} 245}
198EXPORT_SYMBOL_GPL(pmbus_read_word_data); 246EXPORT_SYMBOL_GPL(pmbus_read_word_data);
199 247
200static int pmbus_read_byte_data(struct i2c_client *client, u8 page, u8 reg) 248/*
249 * _pmbus_read_word_data() is similar to pmbus_read_word_data(), but checks if
250 * a device specific mapping function exists and calls it if necessary.
251 */
252static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
253{
254 struct pmbus_data *data = i2c_get_clientdata(client);
255 const struct pmbus_driver_info *info = data->info;
256 int status;
257
258 if (info->read_word_data) {
259 status = info->read_word_data(client, page, reg);
260 if (status != -ENODATA)
261 return status;
262 }
263 if (reg >= PMBUS_VIRT_BASE)
264 return -EINVAL;
265 return pmbus_read_word_data(client, page, reg);
266}
267
268int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
201{ 269{
202 int rv; 270 int rv;
203 271
204 rv = pmbus_set_page(client, page); 272 if (page >= 0) {
205 if (rv < 0) 273 rv = pmbus_set_page(client, page);
206 return rv; 274 if (rv < 0)
275 return rv;
276 }
207 277
208 return i2c_smbus_read_byte_data(client, reg); 278 return i2c_smbus_read_byte_data(client, reg);
209} 279}
280EXPORT_SYMBOL_GPL(pmbus_read_byte_data);
281
282/*
283 * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if
284 * a device specific mapping function exists and calls it if necessary.
285 */
286static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
287{
288 struct pmbus_data *data = i2c_get_clientdata(client);
289 const struct pmbus_driver_info *info = data->info;
290 int status;
291
292 if (info->read_byte_data) {
293 status = info->read_byte_data(client, page, reg);
294 if (status != -ENODATA)
295 return status;
296 }
297 return pmbus_read_byte_data(client, page, reg);
298}
210 299
211static void pmbus_clear_fault_page(struct i2c_client *client, int page) 300static void pmbus_clear_fault_page(struct i2c_client *client, int page)
212{ 301{
213 pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); 302 _pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
214} 303}
215 304
216void pmbus_clear_faults(struct i2c_client *client) 305void pmbus_clear_faults(struct i2c_client *client)
@@ -223,13 +312,13 @@ void pmbus_clear_faults(struct i2c_client *client)
223} 312}
224EXPORT_SYMBOL_GPL(pmbus_clear_faults); 313EXPORT_SYMBOL_GPL(pmbus_clear_faults);
225 314
226static int pmbus_check_status_cml(struct i2c_client *client, int page) 315static int pmbus_check_status_cml(struct i2c_client *client)
227{ 316{
228 int status, status2; 317 int status, status2;
229 318
230 status = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE); 319 status = pmbus_read_byte_data(client, -1, PMBUS_STATUS_BYTE);
231 if (status < 0 || (status & PB_STATUS_CML)) { 320 if (status < 0 || (status & PB_STATUS_CML)) {
232 status2 = pmbus_read_byte_data(client, page, PMBUS_STATUS_CML); 321 status2 = pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
233 if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND)) 322 if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
234 return -EINVAL; 323 return -EINVAL;
235 } 324 }
@@ -241,10 +330,10 @@ bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
241 int rv; 330 int rv;
242 struct pmbus_data *data = i2c_get_clientdata(client); 331 struct pmbus_data *data = i2c_get_clientdata(client);
243 332
244 rv = pmbus_read_byte_data(client, page, reg); 333 rv = _pmbus_read_byte_data(client, page, reg);
245 if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) 334 if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
246 rv = pmbus_check_status_cml(client, page); 335 rv = pmbus_check_status_cml(client);
247 pmbus_clear_fault_page(client, page); 336 pmbus_clear_fault_page(client, -1);
248 return rv >= 0; 337 return rv >= 0;
249} 338}
250EXPORT_SYMBOL_GPL(pmbus_check_byte_register); 339EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
@@ -254,10 +343,10 @@ bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
254 int rv; 343 int rv;
255 struct pmbus_data *data = i2c_get_clientdata(client); 344 struct pmbus_data *data = i2c_get_clientdata(client);
256 345
257 rv = pmbus_read_word_data(client, page, reg); 346 rv = _pmbus_read_word_data(client, page, reg);
258 if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) 347 if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
259 rv = pmbus_check_status_cml(client, page); 348 rv = pmbus_check_status_cml(client);
260 pmbus_clear_fault_page(client, page); 349 pmbus_clear_fault_page(client, -1);
261 return rv >= 0; 350 return rv >= 0;
262} 351}
263EXPORT_SYMBOL_GPL(pmbus_check_word_register); 352EXPORT_SYMBOL_GPL(pmbus_check_word_register);
@@ -270,24 +359,6 @@ const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
270} 359}
271EXPORT_SYMBOL_GPL(pmbus_get_driver_info); 360EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
272 361
273/*
274 * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if
275 * a device specific mapping funcion exists and calls it if necessary.
276 */
277static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
278{
279 struct pmbus_data *data = i2c_get_clientdata(client);
280 const struct pmbus_driver_info *info = data->info;
281 int status;
282
283 if (info->read_byte_data) {
284 status = info->read_byte_data(client, page, reg);
285 if (status != -ENODATA)
286 return status;
287 }
288 return pmbus_read_byte_data(client, page, reg);
289}
290
291static struct pmbus_data *pmbus_update_device(struct device *dev) 362static struct pmbus_data *pmbus_update_device(struct device *dev)
292{ 363{
293 struct i2c_client *client = to_i2c_client(dev); 364 struct i2c_client *client = to_i2c_client(dev);
@@ -347,8 +418,9 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
347 418
348 if (!data->valid || sensor->update) 419 if (!data->valid || sensor->update)
349 sensor->data 420 sensor->data
350 = pmbus_read_word_data(client, sensor->page, 421 = _pmbus_read_word_data(client,
351 sensor->reg); 422 sensor->page,
423 sensor->reg);
352 } 424 }
353 pmbus_clear_faults(client); 425 pmbus_clear_faults(client);
354 data->last_updated = jiffies; 426 data->last_updated = jiffies;
@@ -443,15 +515,37 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
443 return (val - b) / m; 515 return (val - b) / m;
444} 516}
445 517
518/*
519 * Convert VID sensor values to milli- or micro-units
520 * depending on sensor type.
521 * We currently only support VR11.
522 */
523static long pmbus_reg2data_vid(struct pmbus_data *data,
524 struct pmbus_sensor *sensor)
525{
526 long val = sensor->data;
527
528 if (val < 0x02 || val > 0xb2)
529 return 0;
530 return DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
531}
532
446static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor) 533static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
447{ 534{
448 long val; 535 long val;
449 536
450 if (data->info->direct[sensor->class]) 537 switch (data->info->format[sensor->class]) {
538 case direct:
451 val = pmbus_reg2data_direct(data, sensor); 539 val = pmbus_reg2data_direct(data, sensor);
452 else 540 break;
541 case vid:
542 val = pmbus_reg2data_vid(data, sensor);
543 break;
544 case linear:
545 default:
453 val = pmbus_reg2data_linear(data, sensor); 546 val = pmbus_reg2data_linear(data, sensor);
454 547 break;
548 }
455 return val; 549 return val;
456} 550}
457 551
@@ -561,16 +655,31 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
561 return val; 655 return val;
562} 656}
563 657
658static u16 pmbus_data2reg_vid(struct pmbus_data *data,
659 enum pmbus_sensor_classes class, long val)
660{
661 val = SENSORS_LIMIT(val, 500, 1600);
662
663 return 2 + DIV_ROUND_CLOSEST((1600 - val) * 100, 625);
664}
665
564static u16 pmbus_data2reg(struct pmbus_data *data, 666static u16 pmbus_data2reg(struct pmbus_data *data,
565 enum pmbus_sensor_classes class, long val) 667 enum pmbus_sensor_classes class, long val)
566{ 668{
567 u16 regval; 669 u16 regval;
568 670
569 if (data->info->direct[class]) 671 switch (data->info->format[class]) {
672 case direct:
570 regval = pmbus_data2reg_direct(data, class, val); 673 regval = pmbus_data2reg_direct(data, class, val);
571 else 674 break;
675 case vid:
676 regval = pmbus_data2reg_vid(data, class, val);
677 break;
678 case linear:
679 default:
572 regval = pmbus_data2reg_linear(data, class, val); 680 regval = pmbus_data2reg_linear(data, class, val);
573 681 break;
682 }
574 return regval; 683 return regval;
575} 684}
576 685
@@ -682,7 +791,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
682 791
683 mutex_lock(&data->update_lock); 792 mutex_lock(&data->update_lock);
684 regval = pmbus_data2reg(data, sensor->class, val); 793 regval = pmbus_data2reg(data, sensor->class, val);
685 ret = pmbus_write_word_data(client, sensor->page, sensor->reg, regval); 794 ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
686 if (ret < 0) 795 if (ret < 0)
687 rv = ret; 796 rv = ret;
688 else 797 else
@@ -867,7 +976,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,
867 * and its associated alarm attribute. 976 * and its associated alarm attribute.
868 */ 977 */
869struct pmbus_limit_attr { 978struct pmbus_limit_attr {
870 u8 reg; /* Limit register */ 979 u16 reg; /* Limit register */
980 bool update; /* True if register needs updates */
871 const char *attr; /* Attribute name */ 981 const char *attr; /* Attribute name */
872 const char *alarm; /* Alarm attribute name */ 982 const char *alarm; /* Alarm attribute name */
873 u32 sbit; /* Alarm attribute status bit */ 983 u32 sbit; /* Alarm attribute status bit */
@@ -912,9 +1022,10 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
912 if (pmbus_check_word_register(client, page, l->reg)) { 1022 if (pmbus_check_word_register(client, page, l->reg)) {
913 cindex = data->num_sensors; 1023 cindex = data->num_sensors;
914 pmbus_add_sensor(data, name, l->attr, index, page, 1024 pmbus_add_sensor(data, name, l->attr, index, page,
915 l->reg, attr->class, attr->update, 1025 l->reg, attr->class,
1026 attr->update || l->update,
916 false); 1027 false);
917 if (info->func[page] & attr->sfunc) { 1028 if (l->sbit && (info->func[page] & attr->sfunc)) {
918 if (attr->compare) { 1029 if (attr->compare) {
919 pmbus_add_boolean_cmp(data, name, 1030 pmbus_add_boolean_cmp(data, name,
920 l->alarm, index, 1031 l->alarm, index,
@@ -953,9 +1064,11 @@ static void pmbus_add_sensor_attrs_one(struct i2c_client *client,
953 index, page, cbase, attr); 1064 index, page, cbase, attr);
954 /* 1065 /*
955 * Add generic alarm attribute only if there are no individual 1066 * Add generic alarm attribute only if there are no individual
956 * alarm attributes, and if there is a global alarm bit. 1067 * alarm attributes, if there is a global alarm bit, and if
1068 * the generic status register for this page is accessible.
957 */ 1069 */
958 if (!have_alarm && attr->gbit) 1070 if (!have_alarm && attr->gbit &&
1071 pmbus_check_byte_register(client, page, PMBUS_STATUS_BYTE))
959 pmbus_add_boolean_reg(data, name, "alarm", index, 1072 pmbus_add_boolean_reg(data, name, "alarm", index,
960 PB_STATUS_BASE + page, 1073 PB_STATUS_BASE + page,
961 attr->gbit); 1074 attr->gbit);
@@ -1008,6 +1121,21 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
1008 .attr = "crit", 1121 .attr = "crit",
1009 .alarm = "crit_alarm", 1122 .alarm = "crit_alarm",
1010 .sbit = PB_VOLTAGE_OV_FAULT, 1123 .sbit = PB_VOLTAGE_OV_FAULT,
1124 }, {
1125 .reg = PMBUS_VIRT_READ_VIN_AVG,
1126 .update = true,
1127 .attr = "average",
1128 }, {
1129 .reg = PMBUS_VIRT_READ_VIN_MIN,
1130 .update = true,
1131 .attr = "lowest",
1132 }, {
1133 .reg = PMBUS_VIRT_READ_VIN_MAX,
1134 .update = true,
1135 .attr = "highest",
1136 }, {
1137 .reg = PMBUS_VIRT_RESET_VIN_HISTORY,
1138 .attr = "reset_history",
1011 }, 1139 },
1012}; 1140};
1013 1141
@@ -1032,6 +1160,21 @@ static const struct pmbus_limit_attr vout_limit_attrs[] = {
1032 .attr = "crit", 1160 .attr = "crit",
1033 .alarm = "crit_alarm", 1161 .alarm = "crit_alarm",
1034 .sbit = PB_VOLTAGE_OV_FAULT, 1162 .sbit = PB_VOLTAGE_OV_FAULT,
1163 }, {
1164 .reg = PMBUS_VIRT_READ_VOUT_AVG,
1165 .update = true,
1166 .attr = "average",
1167 }, {
1168 .reg = PMBUS_VIRT_READ_VOUT_MIN,
1169 .update = true,
1170 .attr = "lowest",
1171 }, {
1172 .reg = PMBUS_VIRT_READ_VOUT_MAX,
1173 .update = true,
1174 .attr = "highest",
1175 }, {
1176 .reg = PMBUS_VIRT_RESET_VOUT_HISTORY,
1177 .attr = "reset_history",
1035 } 1178 }
1036}; 1179};
1037 1180
@@ -1078,6 +1221,21 @@ static const struct pmbus_limit_attr iin_limit_attrs[] = {
1078 .attr = "crit", 1221 .attr = "crit",
1079 .alarm = "crit_alarm", 1222 .alarm = "crit_alarm",
1080 .sbit = PB_IIN_OC_FAULT, 1223 .sbit = PB_IIN_OC_FAULT,
1224 }, {
1225 .reg = PMBUS_VIRT_READ_IIN_AVG,
1226 .update = true,
1227 .attr = "average",
1228 }, {
1229 .reg = PMBUS_VIRT_READ_IIN_MIN,
1230 .update = true,
1231 .attr = "lowest",
1232 }, {
1233 .reg = PMBUS_VIRT_READ_IIN_MAX,
1234 .update = true,
1235 .attr = "highest",
1236 }, {
1237 .reg = PMBUS_VIRT_RESET_IIN_HISTORY,
1238 .attr = "reset_history",
1081 } 1239 }
1082}; 1240};
1083 1241
@@ -1097,6 +1255,21 @@ static const struct pmbus_limit_attr iout_limit_attrs[] = {
1097 .attr = "crit", 1255 .attr = "crit",
1098 .alarm = "crit_alarm", 1256 .alarm = "crit_alarm",
1099 .sbit = PB_IOUT_OC_FAULT, 1257 .sbit = PB_IOUT_OC_FAULT,
1258 }, {
1259 .reg = PMBUS_VIRT_READ_IOUT_AVG,
1260 .update = true,
1261 .attr = "average",
1262 }, {
1263 .reg = PMBUS_VIRT_READ_IOUT_MIN,
1264 .update = true,
1265 .attr = "lowest",
1266 }, {
1267 .reg = PMBUS_VIRT_READ_IOUT_MAX,
1268 .update = true,
1269 .attr = "highest",
1270 }, {
1271 .reg = PMBUS_VIRT_RESET_IOUT_HISTORY,
1272 .attr = "reset_history",
1100 } 1273 }
1101}; 1274};
1102 1275
@@ -1132,6 +1305,17 @@ static const struct pmbus_limit_attr pin_limit_attrs[] = {
1132 .attr = "max", 1305 .attr = "max",
1133 .alarm = "alarm", 1306 .alarm = "alarm",
1134 .sbit = PB_PIN_OP_WARNING, 1307 .sbit = PB_PIN_OP_WARNING,
1308 }, {
1309 .reg = PMBUS_VIRT_READ_PIN_AVG,
1310 .update = true,
1311 .attr = "average",
1312 }, {
1313 .reg = PMBUS_VIRT_READ_PIN_MAX,
1314 .update = true,
1315 .attr = "input_highest",
1316 }, {
1317 .reg = PMBUS_VIRT_RESET_PIN_HISTORY,
1318 .attr = "reset_history",
1135 } 1319 }
1136}; 1320};
1137 1321
@@ -1200,6 +1384,39 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
1200 .attr = "crit", 1384 .attr = "crit",
1201 .alarm = "crit_alarm", 1385 .alarm = "crit_alarm",
1202 .sbit = PB_TEMP_OT_FAULT, 1386 .sbit = PB_TEMP_OT_FAULT,
1387 }, {
1388 .reg = PMBUS_VIRT_READ_TEMP_MIN,
1389 .attr = "lowest",
1390 }, {
1391 .reg = PMBUS_VIRT_READ_TEMP_MAX,
1392 .attr = "highest",
1393 }, {
1394 .reg = PMBUS_VIRT_RESET_TEMP_HISTORY,
1395 .attr = "reset_history",
1396 }
1397};
1398
1399static const struct pmbus_limit_attr temp_limit_attrs23[] = {
1400 {
1401 .reg = PMBUS_UT_WARN_LIMIT,
1402 .attr = "min",
1403 .alarm = "min_alarm",
1404 .sbit = PB_TEMP_UT_WARNING,
1405 }, {
1406 .reg = PMBUS_UT_FAULT_LIMIT,
1407 .attr = "lcrit",
1408 .alarm = "lcrit_alarm",
1409 .sbit = PB_TEMP_UT_FAULT,
1410 }, {
1411 .reg = PMBUS_OT_WARN_LIMIT,
1412 .attr = "max",
1413 .alarm = "max_alarm",
1414 .sbit = PB_TEMP_OT_WARNING,
1415 }, {
1416 .reg = PMBUS_OT_FAULT_LIMIT,
1417 .attr = "crit",
1418 .alarm = "crit_alarm",
1419 .sbit = PB_TEMP_OT_FAULT,
1203 } 1420 }
1204}; 1421};
1205 1422
@@ -1226,8 +1443,8 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
1226 .sfunc = PMBUS_HAVE_STATUS_TEMP, 1443 .sfunc = PMBUS_HAVE_STATUS_TEMP,
1227 .sbase = PB_STATUS_TEMP_BASE, 1444 .sbase = PB_STATUS_TEMP_BASE,
1228 .gbit = PB_STATUS_TEMPERATURE, 1445 .gbit = PB_STATUS_TEMPERATURE,
1229 .limit = temp_limit_attrs, 1446 .limit = temp_limit_attrs23,
1230 .nlimit = ARRAY_SIZE(temp_limit_attrs), 1447 .nlimit = ARRAY_SIZE(temp_limit_attrs23),
1231 }, { 1448 }, {
1232 .reg = PMBUS_READ_TEMPERATURE_3, 1449 .reg = PMBUS_READ_TEMPERATURE_3,
1233 .class = PSC_TEMPERATURE, 1450 .class = PSC_TEMPERATURE,
@@ -1238,8 +1455,8 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
1238 .sfunc = PMBUS_HAVE_STATUS_TEMP, 1455 .sfunc = PMBUS_HAVE_STATUS_TEMP,
1239 .sbase = PB_STATUS_TEMP_BASE, 1456 .sbase = PB_STATUS_TEMP_BASE,
1240 .gbit = PB_STATUS_TEMPERATURE, 1457 .gbit = PB_STATUS_TEMPERATURE,
1241 .limit = temp_limit_attrs, 1458 .limit = temp_limit_attrs23,
1242 .nlimit = ARRAY_SIZE(temp_limit_attrs), 1459 .nlimit = ARRAY_SIZE(temp_limit_attrs23),
1243 } 1460 }
1244}; 1461};
1245 1462
@@ -1380,7 +1597,7 @@ static int pmbus_identify_common(struct i2c_client *client,
1380 */ 1597 */
1381 switch (vout_mode >> 5) { 1598 switch (vout_mode >> 5) {
1382 case 0: /* linear mode */ 1599 case 0: /* linear mode */
1383 if (data->info->direct[PSC_VOLTAGE_OUT]) 1600 if (data->info->format[PSC_VOLTAGE_OUT] != linear)
1384 return -ENODEV; 1601 return -ENODEV;
1385 1602
1386 exponent = vout_mode & 0x1f; 1603 exponent = vout_mode & 0x1f;
@@ -1389,8 +1606,12 @@ static int pmbus_identify_common(struct i2c_client *client,
1389 exponent |= ~0x1f; 1606 exponent |= ~0x1f;
1390 data->exponent = exponent; 1607 data->exponent = exponent;
1391 break; 1608 break;
1609 case 1: /* VID mode */
1610 if (data->info->format[PSC_VOLTAGE_OUT] != vid)
1611 return -ENODEV;
1612 break;
1392 case 2: /* direct mode */ 1613 case 2: /* direct mode */
1393 if (!data->info->direct[PSC_VOLTAGE_OUT]) 1614 if (data->info->format[PSC_VOLTAGE_OUT] != direct)
1394 return -ENODEV; 1615 return -ENODEV;
1395 break; 1616 break;
1396 default: 1617 default:
@@ -1457,18 +1678,6 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
1457 ret = -EINVAL; 1678 ret = -EINVAL;
1458 goto out_data; 1679 goto out_data;
1459 } 1680 }
1460 /*
1461 * Bail out if more than one page was configured, but we can not
1462 * select the highest page. This is an indication that the wrong
1463 * chip type was selected. Better bail out now than keep
1464 * returning errors later on.
1465 */
1466 if (info->pages > 1 && pmbus_set_page(client, info->pages - 1) < 0) {
1467 dev_err(&client->dev, "Failed to select page %d\n",
1468 info->pages - 1);
1469 ret = -EINVAL;
1470 goto out_data;
1471 }
1472 1681
1473 ret = pmbus_identify_common(client, data); 1682 ret = pmbus_identify_common(client, data);
1474 if (ret < 0) { 1683 if (ret < 0) {
diff --git a/drivers/hwmon/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index ace1c7319734..d0ddb60155c9 100644
--- a/drivers/hwmon/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
141 block_buffer[ret] = '\0'; 141 block_buffer[ret] = '\0';
142 dev_info(&client->dev, "Device ID %s\n", block_buffer); 142 dev_info(&client->dev, "Device ID %s\n", block_buffer);
143 143
144 mid = NULL; 144 for (mid = ucd9000_id; mid->name[0]; mid++) {
145 for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
146 mid = &ucd9000_id[i];
147 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) 145 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
148 break; 146 break;
149 } 147 }
150 if (!mid || !strlen(mid->name)) { 148 if (!mid->name[0]) {
151 dev_err(&client->dev, "Unsupported device\n"); 149 dev_err(&client->dev, "Unsupported device\n");
152 return -ENODEV; 150 return -ENODEV;
153 } 151 }
diff --git a/drivers/hwmon/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index ffcc1cf3609d..c65e9da707cc 100644
--- a/drivers/hwmon/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
68 block_buffer[ret] = '\0'; 68 block_buffer[ret] = '\0';
69 dev_info(&client->dev, "Device ID %s\n", block_buffer); 69 dev_info(&client->dev, "Device ID %s\n", block_buffer);
70 70
71 mid = NULL; 71 for (mid = ucd9200_id; mid->name[0]; mid++) {
72 for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
73 mid = &ucd9200_id[i];
74 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) 72 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
75 break; 73 break;
76 } 74 }
77 if (!mid || !strlen(mid->name)) { 75 if (!mid->name[0]) {
78 dev_err(&client->dev, "Unsupported device\n"); 76 dev_err(&client->dev, "Unsupported device\n");
79 return -ENODEV; 77 return -ENODEV;
80 } 78 }
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 0c731ca69f15..b228e09c5d05 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -146,6 +146,7 @@ struct i2c_nmk_client {
146 * @stop: stop condition 146 * @stop: stop condition
147 * @xfer_complete: acknowledge completion for a I2C message 147 * @xfer_complete: acknowledge completion for a I2C message
148 * @result: controller propogated result 148 * @result: controller propogated result
149 * @regulator: pointer to i2c regulator
149 * @busy: Busy doing transfer 150 * @busy: Busy doing transfer
150 */ 151 */
151struct nmk_i2c_dev { 152struct nmk_i2c_dev {
@@ -417,12 +418,12 @@ static int read_i2c(struct nmk_i2c_dev *dev)
417 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, 418 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
418 dev->virtbase + I2C_IMSCR); 419 dev->virtbase + I2C_IMSCR);
419 420
420 timeout = wait_for_completion_interruptible_timeout( 421 timeout = wait_for_completion_timeout(
421 &dev->xfer_complete, dev->adap.timeout); 422 &dev->xfer_complete, dev->adap.timeout);
422 423
423 if (timeout < 0) { 424 if (timeout < 0) {
424 dev_err(&dev->pdev->dev, 425 dev_err(&dev->pdev->dev,
425 "wait_for_completion_interruptible_timeout" 426 "wait_for_completion_timeout"
426 "returned %d waiting for event\n", timeout); 427 "returned %d waiting for event\n", timeout);
427 status = timeout; 428 status = timeout;
428 } 429 }
@@ -504,12 +505,12 @@ static int write_i2c(struct nmk_i2c_dev *dev)
504 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, 505 writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
505 dev->virtbase + I2C_IMSCR); 506 dev->virtbase + I2C_IMSCR);
506 507
507 timeout = wait_for_completion_interruptible_timeout( 508 timeout = wait_for_completion_timeout(
508 &dev->xfer_complete, dev->adap.timeout); 509 &dev->xfer_complete, dev->adap.timeout);
509 510
510 if (timeout < 0) { 511 if (timeout < 0) {
511 dev_err(&dev->pdev->dev, 512 dev_err(&dev->pdev->dev,
512 "wait_for_completion_interruptible_timeout" 513 "wait_for_completion_timeout "
513 "returned %d waiting for event\n", timeout); 514 "returned %d waiting for event\n", timeout);
514 status = timeout; 515 status = timeout;
515 } 516 }
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 1a766cf74f6b..2dfb63176856 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1139,41 +1139,12 @@ omap_i2c_remove(struct platform_device *pdev)
1139 return 0; 1139 return 0;
1140} 1140}
1141 1141
1142#ifdef CONFIG_SUSPEND
1143static int omap_i2c_suspend(struct device *dev)
1144{
1145 if (!pm_runtime_suspended(dev))
1146 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
1147 dev->bus->pm->runtime_suspend(dev);
1148
1149 return 0;
1150}
1151
1152static int omap_i2c_resume(struct device *dev)
1153{
1154 if (!pm_runtime_suspended(dev))
1155 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
1156 dev->bus->pm->runtime_resume(dev);
1157
1158 return 0;
1159}
1160
1161static struct dev_pm_ops omap_i2c_pm_ops = {
1162 .suspend = omap_i2c_suspend,
1163 .resume = omap_i2c_resume,
1164};
1165#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
1166#else
1167#define OMAP_I2C_PM_OPS NULL
1168#endif
1169
1170static struct platform_driver omap_i2c_driver = { 1142static struct platform_driver omap_i2c_driver = {
1171 .probe = omap_i2c_probe, 1143 .probe = omap_i2c_probe,
1172 .remove = omap_i2c_remove, 1144 .remove = omap_i2c_remove,
1173 .driver = { 1145 .driver = {
1174 .name = "omap_i2c", 1146 .name = "omap_i2c",
1175 .owner = THIS_MODULE, 1147 .owner = THIS_MODULE,
1176 .pm = OMAP_I2C_PM_OPS,
1177 }, 1148 },
1178}; 1149};
1179 1150
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 6659d269b841..b73da6cd6f91 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
109 return -EINVAL; 109 return -EINVAL;
110 } 110 }
111 sds = kzalloc(sizeof(*sds), GFP_KERNEL); 111 sds = kzalloc(sizeof(*sds), GFP_KERNEL);
112 if (!sds) 112 if (!sds) {
113 ret = -ENOMEM;
113 goto err_mem; 114 goto err_mem;
115 }
114 116
115 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { 117 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
116 sds->pdev[i] = add_i2c_device(dev, i); 118 sds->pdev[i] = add_i2c_device(dev, i);
117 if (IS_ERR(sds->pdev[i])) { 119 if (IS_ERR(sds->pdev[i])) {
120 ret = PTR_ERR(sds->pdev[i]);
118 while (--i >= 0) 121 while (--i >= 0)
119 platform_device_unregister(sds->pdev[i]); 122 platform_device_unregister(sds->pdev[i]);
120 goto err_dev_add; 123 goto err_dev_add;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 2440b7411978..3c94c4a81a55 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
270 270
271 /* Rounds down to not include partial word at the end of buf */ 271 /* Rounds down to not include partial word at the end of buf */
272 words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; 272 words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
273 if (words_to_transfer > tx_fifo_avail)
274 words_to_transfer = tx_fifo_avail;
275 273
276 i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); 274 /* It's very common to have < 4 bytes, so optimize that case. */
277 275 if (words_to_transfer) {
278 buf += words_to_transfer * BYTES_PER_FIFO_WORD; 276 if (words_to_transfer > tx_fifo_avail)
279 buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; 277 words_to_transfer = tx_fifo_avail;
280 tx_fifo_avail -= words_to_transfer; 278
279 /*
280 * Update state before writing to FIFO. If this casues us
281 * to finish writing all bytes (AKA buf_remaining goes to 0) we
282 * have a potential for an interrupt (PACKET_XFER_COMPLETE is
283 * not maskable). We need to make sure that the isr sees
284 * buf_remaining as 0 and doesn't call us back re-entrantly.
285 */
286 buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
287 tx_fifo_avail -= words_to_transfer;
288 i2c_dev->msg_buf_remaining = buf_remaining;
289 i2c_dev->msg_buf = buf +
290 words_to_transfer * BYTES_PER_FIFO_WORD;
291 barrier();
292
293 i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
294
295 buf += words_to_transfer * BYTES_PER_FIFO_WORD;
296 }
281 297
282 /* 298 /*
283 * If there is a partial word at the end of buf, handle it manually to 299 * If there is a partial word at the end of buf, handle it manually to
@@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
287 if (tx_fifo_avail > 0 && buf_remaining > 0) { 303 if (tx_fifo_avail > 0 && buf_remaining > 0) {
288 BUG_ON(buf_remaining > 3); 304 BUG_ON(buf_remaining > 3);
289 memcpy(&val, buf, buf_remaining); 305 memcpy(&val, buf, buf_remaining);
306
307 /* Again update before writing to FIFO to make sure isr sees. */
308 i2c_dev->msg_buf_remaining = 0;
309 i2c_dev->msg_buf = NULL;
310 barrier();
311
290 i2c_writel(i2c_dev, val, I2C_TX_FIFO); 312 i2c_writel(i2c_dev, val, I2C_TX_FIFO);
291 buf_remaining = 0;
292 tx_fifo_avail--;
293 } 313 }
294 314
295 BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
296 i2c_dev->msg_buf_remaining = buf_remaining;
297 i2c_dev->msg_buf = buf;
298 return 0; 315 return 0;
299} 316}
300 317
@@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
411 tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); 428 tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
412 } 429 }
413 430
414 if ((status & I2C_INT_PACKET_XFER_COMPLETE) && 431 if (status & I2C_INT_PACKET_XFER_COMPLETE) {
415 !i2c_dev->msg_buf_remaining) 432 BUG_ON(i2c_dev->msg_buf_remaining);
416 complete(&i2c_dev->msg_complete); 433 complete(&i2c_dev->msg_complete);
434 }
417 435
418 i2c_writel(i2c_dev, status, I2C_INT_STATUS); 436 i2c_writel(i2c_dev, status, I2C_INT_STATUS);
419 if (i2c_dev->is_dvc) 437 if (i2c_dev->is_dvc)
@@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
531 549
532static u32 tegra_i2c_func(struct i2c_adapter *adap) 550static u32 tegra_i2c_func(struct i2c_adapter *adap)
533{ 551{
534 return I2C_FUNC_I2C; 552 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
535} 553}
536 554
537static const struct i2c_algorithm tegra_i2c_algo = { 555static const struct i2c_algorithm tegra_i2c_algo = {
@@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
719} 737}
720#endif 738#endif
721 739
740#if defined(CONFIG_OF)
741/* Match table for of_platform binding */
742static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
743 { .compatible = "nvidia,tegra20-i2c", },
744 {},
745};
746MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
747#else
748#define tegra_i2c_of_match NULL
749#endif
750
722static struct platform_driver tegra_i2c_driver = { 751static struct platform_driver tegra_i2c_driver = {
723 .probe = tegra_i2c_probe, 752 .probe = tegra_i2c_probe,
724 .remove = tegra_i2c_remove, 753 .remove = tegra_i2c_remove,
@@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
729 .driver = { 758 .driver = {
730 .name = "tegra-i2c", 759 .name = "tegra-i2c",
731 .owner = THIS_MODULE, 760 .owner = THIS_MODULE,
761 .of_match_table = tegra_i2c_of_match,
732 }, 762 },
733}; 763};
734 764
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 3be60da52123..67cbcfa35122 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -141,6 +141,8 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
141 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16); 141 pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
142 pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8); 142 pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
143 } 143 }
144 if (hwif->index > 0)
145 pci_dev_put(dev);
144} 146}
145 147
146static void __devinit init_iops_cy82c693(ide_hwif_t *hwif) 148static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 542603b394e4..962693b10a1c 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/ata_platform.h> 20#include <linux/ata_platform.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/interrupt.h>
22#include <linux/io.h> 23#include <linux/io.h>
23 24
24static void __devinit plat_ide_setup_ports(struct ide_hw *hw, 25static void __devinit plat_ide_setup_ports(struct ide_hw *hw,
@@ -95,7 +96,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
95 plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); 96 plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
96 hw.dev = &pdev->dev; 97 hw.dev = &pdev->dev;
97 98
98 d.irq_flags = res_irq->flags; 99 d.irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
100 if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
101 d.irq_flags |= IRQF_SHARED;
102
99 if (mmio) 103 if (mmio)
100 d.host_flags |= IDE_HFLAG_MMIO; 104 d.host_flags |= IDE_HFLAG_MMIO;
101 105
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 43f89ba0a908..fe89c4660d55 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -717,11 +717,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
717{ 717{
718 struct ipoib_dev_priv *priv = netdev_priv(dev); 718 struct ipoib_dev_priv *priv = netdev_priv(dev);
719 struct ipoib_neigh *neigh; 719 struct ipoib_neigh *neigh;
720 struct neighbour *n; 720 struct neighbour *n = NULL;
721 unsigned long flags; 721 unsigned long flags;
722 722
723 n = dst_get_neighbour(skb_dst(skb)); 723 if (likely(skb_dst(skb)))
724 if (likely(skb_dst(skb) && n)) { 724 n = dst_get_neighbour(skb_dst(skb));
725
726 if (likely(n)) {
725 if (unlikely(!*to_ipoib_neigh(n))) { 727 if (unlikely(!*to_ipoib_neigh(n))) {
726 ipoib_path_lookup(skb, dev); 728 ipoib_path_lookup(skb, dev);
727 return NETDEV_TX_OK; 729 return NETDEV_TX_OK;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 8db008de5392..9c61b9c2c597 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -101,13 +101,17 @@ iscsi_iser_recv(struct iscsi_conn *conn,
101 101
102 /* verify PDU length */ 102 /* verify PDU length */
103 datalen = ntoh24(hdr->dlength); 103 datalen = ntoh24(hdr->dlength);
104 if (datalen != rx_data_len) { 104 if (datalen > rx_data_len || (datalen + 4) < rx_data_len) {
105 printk(KERN_ERR "iscsi_iser: datalen %d (hdr) != %d (IB) \n", 105 iser_err("wrong datalen %d (hdr), %d (IB)\n",
106 datalen, rx_data_len); 106 datalen, rx_data_len);
107 rc = ISCSI_ERR_DATALEN; 107 rc = ISCSI_ERR_DATALEN;
108 goto error; 108 goto error;
109 } 109 }
110 110
111 if (datalen != rx_data_len)
112 iser_dbg("aligned datalen (%d) hdr, %d (IB)\n",
113 datalen, rx_data_len);
114
111 /* read AHS */ 115 /* read AHS */
112 ahslen = hdr->hlength * 4; 116 ahslen = hdr->hlength * 4;
113 117
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 342cbc1bdaae..db6f3ce9f3bf 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -89,7 +89,7 @@
89 } while (0) 89 } while (0)
90 90
91#define SHIFT_4K 12 91#define SHIFT_4K 12
92#define SIZE_4K (1UL << SHIFT_4K) 92#define SIZE_4K (1ULL << SHIFT_4K)
93#define MASK_4K (~(SIZE_4K-1)) 93#define MASK_4K (~(SIZE_4K-1))
94 94
95 /* support up to 512KB in one RDMA */ 95 /* support up to 512KB in one RDMA */
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 5745b7fe158c..f299de6b419b 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -412,7 +412,7 @@ int iser_send_control(struct iscsi_conn *conn,
412 memcpy(iser_conn->ib_conn->login_buf, task->data, 412 memcpy(iser_conn->ib_conn->login_buf, task->data,
413 task->data_count); 413 task->data_count);
414 tx_dsg->addr = iser_conn->ib_conn->login_dma; 414 tx_dsg->addr = iser_conn->ib_conn->login_dma;
415 tx_dsg->length = data_seg_len; 415 tx_dsg->length = task->data_count;
416 tx_dsg->lkey = device->mr->lkey; 416 tx_dsg->lkey = device->mr->lkey;
417 mdesc->num_sge = 2; 417 mdesc->num_sge = 2;
418 } 418 }
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 9882971827e6..358cd7ee905b 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -139,7 +139,7 @@ struct analog_port {
139#include <linux/i8253.h> 139#include <linux/i8253.h>
140 140
141#define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0) 141#define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0)
142#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? CLOCK_TICK_RATE / HZ : 0))) 142#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
143#define TIME_NAME (cpu_has_tsc?"TSC":"PIT") 143#define TIME_NAME (cpu_has_tsc?"TSC":"PIT")
144static unsigned int get_time_pit(void) 144static unsigned int get_time_pit(void)
145{ 145{
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index c8242dd190d0..aa17e024d803 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -20,6 +20,7 @@
20 * flag. 20 * flag.
21 */ 21 */
22 22
23#include <linux/module.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25#include <linux/clk.h> 26#include <linux/clk.h>
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index ce281d152275..67df91af8424 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -483,7 +483,7 @@ static int gpio_keys_get_devtree_pdata(struct device *dev,
483 483
484 buttons = kzalloc(pdata->nbuttons * (sizeof *buttons), GFP_KERNEL); 484 buttons = kzalloc(pdata->nbuttons * (sizeof *buttons), GFP_KERNEL);
485 if (!buttons) 485 if (!buttons)
486 return -ENODEV; 486 return -ENOMEM;
487 487
488 pp = NULL; 488 pp = NULL;
489 i = 0; 489 i = 0;
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index ab0acaf7fe8f..756348a7f93a 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -754,8 +754,11 @@ fail3:
754 device_remove_file(&client->dev, &dev_attr_disable_kp); 754 device_remove_file(&client->dev, &dev_attr_disable_kp);
755fail2: 755fail2:
756 while (--pwm >= 0) 756 while (--pwm >= 0)
757 if (lm->pwm[pwm].enabled) 757 if (lm->pwm[pwm].enabled) {
758 device_remove_file(lm->pwm[pwm].cdev.dev,
759 &dev_attr_time);
758 led_classdev_unregister(&lm->pwm[pwm].cdev); 760 led_classdev_unregister(&lm->pwm[pwm].cdev);
761 }
759fail1: 762fail1:
760 input_free_device(idev); 763 input_free_device(idev);
761 kfree(lm); 764 kfree(lm);
@@ -775,8 +778,10 @@ static int __devexit lm8323_remove(struct i2c_client *client)
775 device_remove_file(&lm->client->dev, &dev_attr_disable_kp); 778 device_remove_file(&lm->client->dev, &dev_attr_disable_kp);
776 779
777 for (i = 0; i < 3; i++) 780 for (i = 0; i < 3; i++)
778 if (lm->pwm[i].enabled) 781 if (lm->pwm[i].enabled) {
782 device_remove_file(lm->pwm[i].cdev.dev, &dev_attr_time);
779 led_classdev_unregister(&lm->pwm[i].cdev); 783 led_classdev_unregister(&lm->pwm[i].cdev);
784 }
780 785
781 kfree(lm); 786 kfree(lm);
782 787
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index da3828fc2c09..a5a77915c650 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -19,6 +19,7 @@
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */ 20 */
21 21
22#include <linux/kernel.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/input.h> 24#include <linux/input.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
@@ -37,7 +38,7 @@
37#define KBC_ROW_SCAN_DLY 5 38#define KBC_ROW_SCAN_DLY 5
38 39
39/* KBC uses a 32KHz clock so a cycle = 1/32Khz */ 40/* KBC uses a 32KHz clock so a cycle = 1/32Khz */
40#define KBC_CYCLE_USEC 32 41#define KBC_CYCLE_MS 32
41 42
42/* KBC Registers */ 43/* KBC Registers */
43 44
@@ -647,7 +648,7 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
647 debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT); 648 debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
648 scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows; 649 scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
649 kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt; 650 kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
650 kbc->repoll_dly = ((kbc->repoll_dly * KBC_CYCLE_USEC) + 999) / 1000; 651 kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS);
651 652
652 input_dev->name = pdev->name; 653 input_dev->name = pdev->name;
653 input_dev->id.bustype = BUS_HOST; 654 input_dev->id.bustype = BUS_HOST;
@@ -701,7 +702,7 @@ err_iounmap:
701err_free_mem_region: 702err_free_mem_region:
702 release_mem_region(res->start, resource_size(res)); 703 release_mem_region(res->start, resource_size(res));
703err_free_mem: 704err_free_mem:
704 input_free_device(kbc->idev); 705 input_free_device(input_dev);
705 kfree(kbc); 706 kfree(kbc);
706 707
707 return err; 708 return err;
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index e21deb1baa8a..025417d74ca2 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver (I2C bus) 2 * AD714X CapTouch Programmable Controller driver (I2C bus)
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -27,54 +27,49 @@ static int ad714x_i2c_resume(struct device *dev)
27 27
28static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume); 28static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume);
29 29
30static int ad714x_i2c_write(struct device *dev, unsigned short reg, 30static int ad714x_i2c_write(struct ad714x_chip *chip,
31 unsigned short data) 31 unsigned short reg, unsigned short data)
32{ 32{
33 struct i2c_client *client = to_i2c_client(dev); 33 struct i2c_client *client = to_i2c_client(chip->dev);
34 int ret = 0; 34 int error;
35 u8 *_reg = (u8 *)&reg; 35
36 u8 *_data = (u8 *)&data; 36 chip->xfer_buf[0] = cpu_to_be16(reg);
37 37 chip->xfer_buf[1] = cpu_to_be16(data);
38 u8 tx[4] = { 38
39 _reg[1], 39 error = i2c_master_send(client, (u8 *)chip->xfer_buf,
40 _reg[0], 40 2 * sizeof(*chip->xfer_buf));
41 _data[1], 41 if (unlikely(error < 0)) {
42 _data[0] 42 dev_err(&client->dev, "I2C write error: %d\n", error);
43 }; 43 return error;
44 44 }
45 ret = i2c_master_send(client, tx, 4); 45
46 if (ret < 0) 46 return 0;
47 dev_err(&client->dev, "I2C write error\n");
48
49 return ret;
50} 47}
51 48
52static int ad714x_i2c_read(struct device *dev, unsigned short reg, 49static int ad714x_i2c_read(struct ad714x_chip *chip,
53 unsigned short *data) 50 unsigned short reg, unsigned short *data, size_t len)
54{ 51{
55 struct i2c_client *client = to_i2c_client(dev); 52 struct i2c_client *client = to_i2c_client(chip->dev);
56 int ret = 0; 53 int i;
57 u8 *_reg = (u8 *)&reg; 54 int error;
58 u8 *_data = (u8 *)data; 55
59 56 chip->xfer_buf[0] = cpu_to_be16(reg);
60 u8 tx[2] = { 57
61 _reg[1], 58 error = i2c_master_send(client, (u8 *)chip->xfer_buf,
62 _reg[0] 59 sizeof(*chip->xfer_buf));
63 }; 60 if (error >= 0)
64 u8 rx[2]; 61 error = i2c_master_recv(client, (u8 *)chip->xfer_buf,
65 62 len * sizeof(*chip->xfer_buf));
66 ret = i2c_master_send(client, tx, 2); 63
67 if (ret >= 0) 64 if (unlikely(error < 0)) {
68 ret = i2c_master_recv(client, rx, 2); 65 dev_err(&client->dev, "I2C read error: %d\n", error);
69 66 return error;
70 if (unlikely(ret < 0)) {
71 dev_err(&client->dev, "I2C read error\n");
72 } else {
73 _data[0] = rx[1];
74 _data[1] = rx[0];
75 } 67 }
76 68
77 return ret; 69 for (i = 0; i < len; i++)
70 data[i] = be16_to_cpu(chip->xfer_buf[i]);
71
72 return 0;
78} 73}
79 74
80static int __devinit ad714x_i2c_probe(struct i2c_client *client, 75static int __devinit ad714x_i2c_probe(struct i2c_client *client,
diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c
index 4120dd549305..875b50811361 100644
--- a/drivers/input/misc/ad714x-spi.c
+++ b/drivers/input/misc/ad714x-spi.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver (SPI bus) 2 * AD714X CapTouch Programmable Controller driver (SPI bus)
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9#include <linux/input.h> /* BUS_I2C */ 9#include <linux/input.h> /* BUS_SPI */
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/spi/spi.h> 11#include <linux/spi/spi.h>
12#include <linux/pm.h> 12#include <linux/pm.h>
@@ -30,30 +30,68 @@ static int ad714x_spi_resume(struct device *dev)
30 30
31static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); 31static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume);
32 32
33static int ad714x_spi_read(struct device *dev, unsigned short reg, 33static int ad714x_spi_read(struct ad714x_chip *chip,
34 unsigned short *data) 34 unsigned short reg, unsigned short *data, size_t len)
35{ 35{
36 struct spi_device *spi = to_spi_device(dev); 36 struct spi_device *spi = to_spi_device(chip->dev);
37 unsigned short tx = AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg; 37 struct spi_message message;
38 struct spi_transfer xfer[2];
39 int i;
40 int error;
41
42 spi_message_init(&message);
43 memset(xfer, 0, sizeof(xfer));
44
45 chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX |
46 AD714x_SPI_READ | reg);
47 xfer[0].tx_buf = &chip->xfer_buf[0];
48 xfer[0].len = sizeof(chip->xfer_buf[0]);
49 spi_message_add_tail(&xfer[0], &message);
50
51 xfer[1].rx_buf = &chip->xfer_buf[1];
52 xfer[1].len = sizeof(chip->xfer_buf[1]) * len;
53 spi_message_add_tail(&xfer[1], &message);
54
55 error = spi_sync(spi, &message);
56 if (unlikely(error)) {
57 dev_err(chip->dev, "SPI read error: %d\n", error);
58 return error;
59 }
60
61 for (i = 0; i < len; i++)
62 data[i] = be16_to_cpu(chip->xfer_buf[i + 1]);
38 63
39 return spi_write_then_read(spi, (u8 *)&tx, 2, (u8 *)data, 2); 64 return 0;
40} 65}
41 66
42static int ad714x_spi_write(struct device *dev, unsigned short reg, 67static int ad714x_spi_write(struct ad714x_chip *chip,
43 unsigned short data) 68 unsigned short reg, unsigned short data)
44{ 69{
45 struct spi_device *spi = to_spi_device(dev); 70 struct spi_device *spi = to_spi_device(chip->dev);
46 unsigned short tx[2] = { 71 int error;
47 AD714x_SPI_CMD_PREFIX | reg, 72
48 data 73 chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg);
49 }; 74 chip->xfer_buf[1] = cpu_to_be16(data);
75
76 error = spi_write(spi, (u8 *)chip->xfer_buf,
77 2 * sizeof(*chip->xfer_buf));
78 if (unlikely(error)) {
79 dev_err(chip->dev, "SPI write error: %d\n", error);
80 return error;
81 }
50 82
51 return spi_write(spi, (u8 *)tx, 4); 83 return 0;
52} 84}
53 85
54static int __devinit ad714x_spi_probe(struct spi_device *spi) 86static int __devinit ad714x_spi_probe(struct spi_device *spi)
55{ 87{
56 struct ad714x_chip *chip; 88 struct ad714x_chip *chip;
89 int err;
90
91 spi->bits_per_word = 8;
92 err = spi_setup(spi);
93 if (err < 0)
94 return err;
57 95
58 chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq, 96 chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq,
59 ad714x_spi_read, ad714x_spi_write); 97 ad714x_spi_read, ad714x_spi_write);
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index c3a62c42cd28..ca42c7d2a3c7 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A 2 * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -59,7 +59,6 @@
59#define STAGE11_AMBIENT 0x27D 59#define STAGE11_AMBIENT 0x27D
60 60
61#define PER_STAGE_REG_NUM 36 61#define PER_STAGE_REG_NUM 36
62#define STAGE_NUM 12
63#define STAGE_CFGREG_NUM 8 62#define STAGE_CFGREG_NUM 8
64#define SYS_CFGREG_NUM 8 63#define SYS_CFGREG_NUM 8
65 64
@@ -124,27 +123,6 @@ struct ad714x_driver_data {
124 * information to integrate all things which will be private data 123 * information to integrate all things which will be private data
125 * of spi/i2c device 124 * of spi/i2c device
126 */ 125 */
127struct ad714x_chip {
128 unsigned short h_state;
129 unsigned short l_state;
130 unsigned short c_state;
131 unsigned short adc_reg[STAGE_NUM];
132 unsigned short amb_reg[STAGE_NUM];
133 unsigned short sensor_val[STAGE_NUM];
134
135 struct ad714x_platform_data *hw;
136 struct ad714x_driver_data *sw;
137
138 int irq;
139 struct device *dev;
140 ad714x_read_t read;
141 ad714x_write_t write;
142
143 struct mutex mutex;
144
145 unsigned product;
146 unsigned version;
147};
148 126
149static void ad714x_use_com_int(struct ad714x_chip *ad714x, 127static void ad714x_use_com_int(struct ad714x_chip *ad714x,
150 int start_stage, int end_stage) 128 int start_stage, int end_stage)
@@ -154,13 +132,13 @@ static void ad714x_use_com_int(struct ad714x_chip *ad714x,
154 132
155 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); 133 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1);
156 134
157 ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); 135 ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1);
158 data |= 1 << end_stage; 136 data |= 1 << end_stage;
159 ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); 137 ad714x->write(ad714x, STG_COM_INT_EN_REG, data);
160 138
161 ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); 139 ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1);
162 data &= ~mask; 140 data &= ~mask;
163 ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); 141 ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data);
164} 142}
165 143
166static void ad714x_use_thr_int(struct ad714x_chip *ad714x, 144static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
@@ -171,13 +149,13 @@ static void ad714x_use_thr_int(struct ad714x_chip *ad714x,
171 149
172 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); 150 mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1);
173 151
174 ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); 152 ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1);
175 data &= ~(1 << end_stage); 153 data &= ~(1 << end_stage);
176 ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); 154 ad714x->write(ad714x, STG_COM_INT_EN_REG, data);
177 155
178 ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); 156 ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1);
179 data |= mask; 157 data |= mask;
180 ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); 158 ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data);
181} 159}
182 160
183static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x, 161static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x,
@@ -273,15 +251,16 @@ static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
273 struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx]; 251 struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx];
274 int i; 252 int i;
275 253
254 ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage,
255 &ad714x->adc_reg[hw->start_stage],
256 hw->end_stage - hw->start_stage + 1);
257
276 for (i = hw->start_stage; i <= hw->end_stage; i++) { 258 for (i = hw->start_stage; i <= hw->end_stage; i++) {
277 ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, 259 ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
278 &ad714x->adc_reg[i]); 260 &ad714x->amb_reg[i], 1);
279 ad714x->read(ad714x->dev, 261
280 STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, 262 ad714x->sensor_val[i] =
281 &ad714x->amb_reg[i]); 263 abs(ad714x->adc_reg[i] - ad714x->amb_reg[i]);
282
283 ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] -
284 ad714x->amb_reg[i]);
285 } 264 }
286} 265}
287 266
@@ -444,15 +423,16 @@ static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
444 struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx]; 423 struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx];
445 int i; 424 int i;
446 425
426 ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage,
427 &ad714x->adc_reg[hw->start_stage],
428 hw->end_stage - hw->start_stage + 1);
429
447 for (i = hw->start_stage; i <= hw->end_stage; i++) { 430 for (i = hw->start_stage; i <= hw->end_stage; i++) {
448 ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, 431 ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
449 &ad714x->adc_reg[i]); 432 &ad714x->amb_reg[i], 1);
450 ad714x->read(ad714x->dev,
451 STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
452 &ad714x->amb_reg[i]);
453 if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) 433 if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
454 ad714x->sensor_val[i] = ad714x->adc_reg[i] - 434 ad714x->sensor_val[i] =
455 ad714x->amb_reg[i]; 435 ad714x->adc_reg[i] - ad714x->amb_reg[i];
456 else 436 else
457 ad714x->sensor_val[i] = 0; 437 ad714x->sensor_val[i] = 0;
458 } 438 }
@@ -597,15 +577,16 @@ static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx)
597 struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx]; 577 struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx];
598 int i; 578 int i;
599 579
580 ad714x->read(ad714x, CDC_RESULT_S0 + hw->x_start_stage,
581 &ad714x->adc_reg[hw->x_start_stage],
582 hw->x_end_stage - hw->x_start_stage + 1);
583
600 for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) { 584 for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) {
601 ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, 585 ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
602 &ad714x->adc_reg[i]); 586 &ad714x->amb_reg[i], 1);
603 ad714x->read(ad714x->dev,
604 STAGE0_AMBIENT + i * PER_STAGE_REG_NUM,
605 &ad714x->amb_reg[i]);
606 if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) 587 if (ad714x->adc_reg[i] > ad714x->amb_reg[i])
607 ad714x->sensor_val[i] = ad714x->adc_reg[i] - 588 ad714x->sensor_val[i] =
608 ad714x->amb_reg[i]; 589 ad714x->adc_reg[i] - ad714x->amb_reg[i];
609 else 590 else
610 ad714x->sensor_val[i] = 0; 591 ad714x->sensor_val[i] = 0;
611 } 592 }
@@ -891,7 +872,7 @@ static int ad714x_hw_detect(struct ad714x_chip *ad714x)
891{ 872{
892 unsigned short data; 873 unsigned short data;
893 874
894 ad714x->read(ad714x->dev, AD714X_PARTID_REG, &data); 875 ad714x->read(ad714x, AD714X_PARTID_REG, &data, 1);
895 switch (data & 0xFFF0) { 876 switch (data & 0xFFF0) {
896 case AD7142_PARTID: 877 case AD7142_PARTID:
897 ad714x->product = 0x7142; 878 ad714x->product = 0x7142;
@@ -940,23 +921,20 @@ static void ad714x_hw_init(struct ad714x_chip *ad714x)
940 for (i = 0; i < STAGE_NUM; i++) { 921 for (i = 0; i < STAGE_NUM; i++) {
941 reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM; 922 reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM;
942 for (j = 0; j < STAGE_CFGREG_NUM; j++) 923 for (j = 0; j < STAGE_CFGREG_NUM; j++)
943 ad714x->write(ad714x->dev, reg_base + j, 924 ad714x->write(ad714x, reg_base + j,
944 ad714x->hw->stage_cfg_reg[i][j]); 925 ad714x->hw->stage_cfg_reg[i][j]);
945 } 926 }
946 927
947 for (i = 0; i < SYS_CFGREG_NUM; i++) 928 for (i = 0; i < SYS_CFGREG_NUM; i++)
948 ad714x->write(ad714x->dev, AD714X_SYSCFG_REG + i, 929 ad714x->write(ad714x, AD714X_SYSCFG_REG + i,
949 ad714x->hw->sys_cfg_reg[i]); 930 ad714x->hw->sys_cfg_reg[i]);
950 for (i = 0; i < SYS_CFGREG_NUM; i++) 931 for (i = 0; i < SYS_CFGREG_NUM; i++)
951 ad714x->read(ad714x->dev, AD714X_SYSCFG_REG + i, 932 ad714x->read(ad714x, AD714X_SYSCFG_REG + i, &data, 1);
952 &data);
953 933
954 ad714x->write(ad714x->dev, AD714X_STG_CAL_EN_REG, 0xFFF); 934 ad714x->write(ad714x, AD714X_STG_CAL_EN_REG, 0xFFF);
955 935
956 /* clear all interrupts */ 936 /* clear all interrupts */
957 ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); 937 ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
958 ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
959 ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
960} 938}
961 939
962static irqreturn_t ad714x_interrupt_thread(int irq, void *data) 940static irqreturn_t ad714x_interrupt_thread(int irq, void *data)
@@ -966,9 +944,7 @@ static irqreturn_t ad714x_interrupt_thread(int irq, void *data)
966 944
967 mutex_lock(&ad714x->mutex); 945 mutex_lock(&ad714x->mutex);
968 946
969 ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &ad714x->l_state); 947 ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
970 ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &ad714x->h_state);
971 ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &ad714x->c_state);
972 948
973 for (i = 0; i < ad714x->hw->button_num; i++) 949 for (i = 0; i < ad714x->hw->button_num; i++)
974 ad714x_button_state_machine(ad714x, i); 950 ad714x_button_state_machine(ad714x, i);
@@ -1245,7 +1221,7 @@ int ad714x_disable(struct ad714x_chip *ad714x)
1245 mutex_lock(&ad714x->mutex); 1221 mutex_lock(&ad714x->mutex);
1246 1222
1247 data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3; 1223 data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3;
1248 ad714x->write(ad714x->dev, AD714X_PWR_CTRL, data); 1224 ad714x->write(ad714x, AD714X_PWR_CTRL, data);
1249 1225
1250 mutex_unlock(&ad714x->mutex); 1226 mutex_unlock(&ad714x->mutex);
1251 1227
@@ -1255,24 +1231,20 @@ EXPORT_SYMBOL(ad714x_disable);
1255 1231
1256int ad714x_enable(struct ad714x_chip *ad714x) 1232int ad714x_enable(struct ad714x_chip *ad714x)
1257{ 1233{
1258 unsigned short data;
1259
1260 dev_dbg(ad714x->dev, "%s enter\n", __func__); 1234 dev_dbg(ad714x->dev, "%s enter\n", __func__);
1261 1235
1262 mutex_lock(&ad714x->mutex); 1236 mutex_lock(&ad714x->mutex);
1263 1237
1264 /* resume to non-shutdown mode */ 1238 /* resume to non-shutdown mode */
1265 1239
1266 ad714x->write(ad714x->dev, AD714X_PWR_CTRL, 1240 ad714x->write(ad714x, AD714X_PWR_CTRL,
1267 ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]); 1241 ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]);
1268 1242
1269 /* make sure the interrupt output line is not low level after resume, 1243 /* make sure the interrupt output line is not low level after resume,
1270 * otherwise we will get no chance to enter falling-edge irq again 1244 * otherwise we will get no chance to enter falling-edge irq again
1271 */ 1245 */
1272 1246
1273 ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); 1247 ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3);
1274 ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data);
1275 ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data);
1276 1248
1277 mutex_unlock(&ad714x->mutex); 1249 mutex_unlock(&ad714x->mutex);
1278 1250
diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h
index 45c54fb13f07..3c85455aa66d 100644
--- a/drivers/input/misc/ad714x.h
+++ b/drivers/input/misc/ad714x.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * AD714X CapTouch Programmable Controller driver (bus interfaces) 2 * AD714X CapTouch Programmable Controller driver (bus interfaces)
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -11,11 +11,40 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14#define STAGE_NUM 12
15
14struct device; 16struct device;
17struct ad714x_platform_data;
18struct ad714x_driver_data;
15struct ad714x_chip; 19struct ad714x_chip;
16 20
17typedef int (*ad714x_read_t)(struct device *, unsigned short, unsigned short *); 21typedef int (*ad714x_read_t)(struct ad714x_chip *, unsigned short, unsigned short *, size_t);
18typedef int (*ad714x_write_t)(struct device *, unsigned short, unsigned short); 22typedef int (*ad714x_write_t)(struct ad714x_chip *, unsigned short, unsigned short);
23
24struct ad714x_chip {
25 unsigned short l_state;
26 unsigned short h_state;
27 unsigned short c_state;
28 unsigned short adc_reg[STAGE_NUM];
29 unsigned short amb_reg[STAGE_NUM];
30 unsigned short sensor_val[STAGE_NUM];
31
32 struct ad714x_platform_data *hw;
33 struct ad714x_driver_data *sw;
34
35 int irq;
36 struct device *dev;
37 ad714x_read_t read;
38 ad714x_write_t write;
39
40 struct mutex mutex;
41
42 unsigned product;
43 unsigned version;
44
45 __be16 xfer_buf[16] ____cacheline_aligned;
46
47};
19 48
20int ad714x_disable(struct ad714x_chip *ad714x); 49int ad714x_disable(struct ad714x_chip *ad714x);
21int ad714x_enable(struct ad714x_chip *ad714x); 50int ad714x_enable(struct ad714x_chip *ad714x);
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index c456f63b6bae..783597a9a64a 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -21,6 +21,7 @@
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/input.h> 22#include <linux/input.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/module.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
25#include <linux/input/kxtj9.h> 26#include <linux/input/kxtj9.h>
26#include <linux/input-polldev.h> 27#include <linux/input-polldev.h>
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 20f8f9284f02..0794778295fc 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -24,6 +24,7 @@
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/input-polldev.h> 26#include <linux/input-polldev.h>
27#include <linux/of_device.h>
27 28
28#define MMA8450_DRV_NAME "mma8450" 29#define MMA8450_DRV_NAME "mma8450"
29 30
@@ -229,10 +230,17 @@ static const struct i2c_device_id mma8450_id[] = {
229}; 230};
230MODULE_DEVICE_TABLE(i2c, mma8450_id); 231MODULE_DEVICE_TABLE(i2c, mma8450_id);
231 232
233static const struct of_device_id mma8450_dt_ids[] = {
234 { .compatible = "fsl,mma8450", },
235 { /* sentinel */ }
236};
237MODULE_DEVICE_TABLE(of, mma8450_dt_ids);
238
232static struct i2c_driver mma8450_driver = { 239static struct i2c_driver mma8450_driver = {
233 .driver = { 240 .driver = {
234 .name = MMA8450_DRV_NAME, 241 .name = MMA8450_DRV_NAME,
235 .owner = THIS_MODULE, 242 .owner = THIS_MODULE,
243 .of_match_table = mma8450_dt_ids,
236 }, 244 },
237 .probe = mma8450_probe, 245 .probe = mma8450_probe,
238 .remove = __devexit_p(mma8450_remove), 246 .remove = __devexit_p(mma8450_remove),
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index b95fac15b2ea..f71dc728da58 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -282,7 +282,7 @@ err_free_irq:
282err_pm_set_suspended: 282err_pm_set_suspended:
283 pm_runtime_set_suspended(&client->dev); 283 pm_runtime_set_suspended(&client->dev);
284err_free_mem: 284err_free_mem:
285 input_unregister_device(idev); 285 input_free_device(idev);
286 kfree(sensor); 286 kfree(sensor);
287 return error; 287 return error;
288} 288}
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 3126983c004a..da280189ef07 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -67,6 +67,14 @@
67#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 67#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
68#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 68#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
69#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 69#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
70/* MacbookAir4,2 (unibody, July 2011) */
71#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
72#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
73#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
74/* Macbook8,2 (unibody) */
75#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
76#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
77#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
70 78
71#define BCM5974_DEVICE(prod) { \ 79#define BCM5974_DEVICE(prod) { \
72 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ 80 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -104,6 +112,14 @@ static const struct usb_device_id bcm5974_table[] = {
104 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), 112 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
105 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), 113 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
106 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), 114 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
115 /* MacbookAir4,2 */
116 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
117 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
118 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
119 /* MacbookPro8,2 */
120 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
121 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
122 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
107 /* Terminating entry */ 123 /* Terminating entry */
108 {} 124 {}
109}; 125};
@@ -294,6 +310,30 @@ static const struct bcm5974_config bcm5974_config_table[] = {
294 { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, 310 { DIM_X, DIM_X / SN_COORD, -4415, 5050 },
295 { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } 311 { DIM_Y, DIM_Y / SN_COORD, -55, 6680 }
296 }, 312 },
313 {
314 USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI,
315 USB_DEVICE_ID_APPLE_WELLSPRING6_ISO,
316 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
317 HAS_INTEGRATED_BUTTON,
318 0x84, sizeof(struct bt_data),
319 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
320 { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
321 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
322 { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
323 { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
324 },
325 {
326 USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI,
327 USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO,
328 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
329 HAS_INTEGRATED_BUTTON,
330 0x84, sizeof(struct bt_data),
331 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
332 { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
333 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
334 { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
335 { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
336 },
297 {} 337 {}
298}; 338};
299 339
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 95577c15ae56..4d17d9f3320b 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -32,6 +32,7 @@
32#define DEBUG 32#define DEBUG
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/input.h> 34#include <linux/input.h>
35#include <linux/module.h>
35#include <linux/serio.h> 36#include <linux/serio.h>
36#include <linux/libps2.h> 37#include <linux/libps2.h>
37#include <linux/delay.h> 38#include <linux/delay.h>
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 80baa53da5b1..d64c5a43aaad 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -23,7 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/io.h> 25#include <linux/io.h>
26 26#include <linux/of_address.h>
27#include <linux/of_device.h> 27#include <linux/of_device.h>
28#include <linux/of_platform.h> 28#include <linux/of_platform.h>
29 29
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 449c0a46dbac..d27c9d91630b 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -49,6 +49,7 @@ struct hid_descriptor {
49#define USB_REQ_GET_REPORT 0x01 49#define USB_REQ_GET_REPORT 0x01
50#define USB_REQ_SET_REPORT 0x09 50#define USB_REQ_SET_REPORT 0x09
51#define WAC_HID_FEATURE_REPORT 0x03 51#define WAC_HID_FEATURE_REPORT 0x03
52#define WAC_MSG_RETRIES 5
52 53
53static int usb_get_report(struct usb_interface *intf, unsigned char type, 54static int usb_get_report(struct usb_interface *intf, unsigned char type,
54 unsigned char id, void *buf, int size) 55 unsigned char id, void *buf, int size)
@@ -165,7 +166,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
165 report, 166 report,
166 hid_desc->wDescriptorLength, 167 hid_desc->wDescriptorLength,
167 5000); /* 5 secs */ 168 5000); /* 5 secs */
168 } while (result < 0 && limit++ < 5); 169 } while (result < 0 && limit++ < WAC_MSG_RETRIES);
169 170
170 /* No need to parse the Descriptor. It isn't an error though */ 171 /* No need to parse the Descriptor. It isn't an error though */
171 if (result < 0) 172 if (result < 0)
@@ -319,24 +320,26 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
319 int limit = 0, report_id = 2; 320 int limit = 0, report_id = 2;
320 int error = -ENOMEM; 321 int error = -ENOMEM;
321 322
322 rep_data = kmalloc(2, GFP_KERNEL); 323 rep_data = kmalloc(4, GFP_KERNEL);
323 if (!rep_data) 324 if (!rep_data)
324 return error; 325 return error;
325 326
326 /* ask to report tablet data if it is 2FGT Tablet PC or 327 /* ask to report tablet data if it is MT Tablet PC or
327 * not a Tablet PC */ 328 * not a Tablet PC */
328 if (features->type == TABLETPC2FG) { 329 if (features->type == TABLETPC2FG) {
329 do { 330 do {
330 rep_data[0] = 3; 331 rep_data[0] = 3;
331 rep_data[1] = 4; 332 rep_data[1] = 4;
333 rep_data[2] = 0;
334 rep_data[3] = 0;
332 report_id = 3; 335 report_id = 3;
333 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, 336 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
334 report_id, rep_data, 2); 337 report_id, rep_data, 4);
335 if (error >= 0) 338 if (error >= 0)
336 error = usb_get_report(intf, 339 error = usb_get_report(intf,
337 WAC_HID_FEATURE_REPORT, report_id, 340 WAC_HID_FEATURE_REPORT, report_id,
338 rep_data, 3); 341 rep_data, 4);
339 } while ((error < 0 || rep_data[1] != 4) && limit++ < 5); 342 } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
340 } else if (features->type != TABLETPC) { 343 } else if (features->type != TABLETPC) {
341 do { 344 do {
342 rep_data[0] = 2; 345 rep_data[0] = 2;
@@ -347,7 +350,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
347 error = usb_get_report(intf, 350 error = usb_get_report(intf,
348 WAC_HID_FEATURE_REPORT, report_id, 351 WAC_HID_FEATURE_REPORT, report_id,
349 rep_data, 2); 352 rep_data, 2);
350 } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); 353 } while ((error < 0 || rep_data[1] != 2) && limit++ < WAC_MSG_RETRIES);
351 } 354 }
352 355
353 kfree(rep_data); 356 kfree(rep_data);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 03ebcc8b24b5..c1c2f7b28d89 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1460,6 +1460,9 @@ static const struct wacom_features wacom_features_0xD3 =
1460static const struct wacom_features wacom_features_0xD4 = 1460static const struct wacom_features wacom_features_0xD4 =
1461 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 1461 { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
1462 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1462 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1463static const struct wacom_features wacom_features_0xD5 =
1464 { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023,
1465 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1463static const struct wacom_features wacom_features_0xD6 = 1466static const struct wacom_features wacom_features_0xD6 =
1464 { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 1467 { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
1465 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1468 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1564,6 +1567,7 @@ const struct usb_device_id wacom_ids[] = {
1564 { USB_DEVICE_WACOM(0xD2) }, 1567 { USB_DEVICE_WACOM(0xD2) },
1565 { USB_DEVICE_WACOM(0xD3) }, 1568 { USB_DEVICE_WACOM(0xD3) },
1566 { USB_DEVICE_WACOM(0xD4) }, 1569 { USB_DEVICE_WACOM(0xD4) },
1570 { USB_DEVICE_WACOM(0xD5) },
1567 { USB_DEVICE_WACOM(0xD6) }, 1571 { USB_DEVICE_WACOM(0xD6) },
1568 { USB_DEVICE_WACOM(0xD7) }, 1572 { USB_DEVICE_WACOM(0xD7) },
1569 { USB_DEVICE_WACOM(0xD8) }, 1573 { USB_DEVICE_WACOM(0xD8) },
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index bc3b5187f3a3..131f9d1c921b 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -249,12 +249,14 @@ static void __ad7879_enable(struct ad7879 *ts)
249 249
250static void __ad7879_disable(struct ad7879 *ts) 250static void __ad7879_disable(struct ad7879 *ts)
251{ 251{
252 u16 reg = (ts->cmd_crtl2 & ~AD7879_PM(-1)) |
253 AD7879_PM(AD7879_PM_SHUTDOWN);
252 disable_irq(ts->irq); 254 disable_irq(ts->irq);
253 255
254 if (del_timer_sync(&ts->timer)) 256 if (del_timer_sync(&ts->timer))
255 ad7879_ts_event_release(ts); 257 ad7879_ts_event_release(ts);
256 258
257 ad7879_write(ts, AD7879_REG_CTRL2, AD7879_PM(AD7879_PM_SHUTDOWN)); 259 ad7879_write(ts, AD7879_REG_CTRL2, reg);
258} 260}
259 261
260 262
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index ae00604a6a81..f5d66859f232 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -244,6 +244,7 @@ struct mxt_finger {
244 int x; 244 int x;
245 int y; 245 int y;
246 int area; 246 int area;
247 int pressure;
247}; 248};
248 249
249/* Each client has this additional data */ 250/* Each client has this additional data */
@@ -536,6 +537,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id)
536 finger[id].x); 537 finger[id].x);
537 input_report_abs(input_dev, ABS_MT_POSITION_Y, 538 input_report_abs(input_dev, ABS_MT_POSITION_Y,
538 finger[id].y); 539 finger[id].y);
540 input_report_abs(input_dev, ABS_MT_PRESSURE,
541 finger[id].pressure);
539 } else { 542 } else {
540 finger[id].status = 0; 543 finger[id].status = 0;
541 } 544 }
@@ -546,6 +549,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id)
546 if (status != MXT_RELEASE) { 549 if (status != MXT_RELEASE) {
547 input_report_abs(input_dev, ABS_X, finger[single_id].x); 550 input_report_abs(input_dev, ABS_X, finger[single_id].x);
548 input_report_abs(input_dev, ABS_Y, finger[single_id].y); 551 input_report_abs(input_dev, ABS_Y, finger[single_id].y);
552 input_report_abs(input_dev,
553 ABS_PRESSURE, finger[single_id].pressure);
549 } 554 }
550 555
551 input_sync(input_dev); 556 input_sync(input_dev);
@@ -560,6 +565,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
560 int x; 565 int x;
561 int y; 566 int y;
562 int area; 567 int area;
568 int pressure;
563 569
564 /* Check the touch is present on the screen */ 570 /* Check the touch is present on the screen */
565 if (!(status & MXT_DETECT)) { 571 if (!(status & MXT_DETECT)) {
@@ -584,6 +590,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
584 y = y >> 2; 590 y = y >> 2;
585 591
586 area = message->message[4]; 592 area = message->message[4];
593 pressure = message->message[5];
587 594
588 dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id, 595 dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id,
589 status & MXT_MOVE ? "moved" : "pressed", 596 status & MXT_MOVE ? "moved" : "pressed",
@@ -594,6 +601,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
594 finger[id].x = x; 601 finger[id].x = x;
595 finger[id].y = y; 602 finger[id].y = y;
596 finger[id].area = area; 603 finger[id].area = area;
604 finger[id].pressure = pressure;
597 605
598 mxt_input_report(data, id); 606 mxt_input_report(data, id);
599} 607}
@@ -1116,6 +1124,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
1116 0, data->max_x, 0, 0); 1124 0, data->max_x, 0, 0);
1117 input_set_abs_params(input_dev, ABS_Y, 1125 input_set_abs_params(input_dev, ABS_Y,
1118 0, data->max_y, 0, 0); 1126 0, data->max_y, 0, 0);
1127 input_set_abs_params(input_dev, ABS_PRESSURE,
1128 0, 255, 0, 0);
1119 1129
1120 /* For multi touch */ 1130 /* For multi touch */
1121 input_mt_init_slots(input_dev, MXT_MAX_FINGER); 1131 input_mt_init_slots(input_dev, MXT_MAX_FINGER);
@@ -1125,6 +1135,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
1125 0, data->max_x, 0, 0); 1135 0, data->max_x, 0, 0);
1126 input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 1136 input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
1127 0, data->max_y, 0, 0); 1137 0, data->max_y, 0, 0);
1138 input_set_abs_params(input_dev, ABS_MT_PRESSURE,
1139 0, 255, 0, 0);
1128 1140
1129 input_set_drvdata(input_dev, data); 1141 input_set_drvdata(input_dev, data);
1130 i2c_set_clientdata(client, data); 1142 i2c_set_clientdata(client, data);
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index 4f2713d92791..4627fe55b401 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -9,7 +9,8 @@
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License. 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
13 */ 14 */
14 15
15/* 16/*
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 089b0a0f3d8c..0e8f63e5b36f 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -13,6 +13,7 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#include <linux/module.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/errno.h> 19#include <linux/errno.h>
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a14f8dc23462..0e4227f457af 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
605 * Writes the command to the IOMMUs command buffer and informs the 605 * Writes the command to the IOMMUs command buffer and informs the
606 * hardware about the new command. 606 * hardware about the new command.
607 */ 607 */
608static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 608static int iommu_queue_command_sync(struct amd_iommu *iommu,
609 struct iommu_cmd *cmd,
610 bool sync)
609{ 611{
610 u32 left, tail, head, next_tail; 612 u32 left, tail, head, next_tail;
611 unsigned long flags; 613 unsigned long flags;
@@ -639,13 +641,18 @@ again:
639 copy_cmd_to_buffer(iommu, cmd, tail); 641 copy_cmd_to_buffer(iommu, cmd, tail);
640 642
641 /* We need to sync now to make sure all commands are processed */ 643 /* We need to sync now to make sure all commands are processed */
642 iommu->need_sync = true; 644 iommu->need_sync = sync;
643 645
644 spin_unlock_irqrestore(&iommu->lock, flags); 646 spin_unlock_irqrestore(&iommu->lock, flags);
645 647
646 return 0; 648 return 0;
647} 649}
648 650
651static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
652{
653 return iommu_queue_command_sync(iommu, cmd, true);
654}
655
649/* 656/*
650 * This function queues a completion wait command into the command 657 * This function queues a completion wait command into the command
651 * buffer of an IOMMU 658 * buffer of an IOMMU
@@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
661 668
662 build_completion_wait(&cmd, (u64)&sem); 669 build_completion_wait(&cmd, (u64)&sem);
663 670
664 ret = iommu_queue_command(iommu, &cmd); 671 ret = iommu_queue_command_sync(iommu, &cmd, false);
665 if (ret) 672 if (ret)
666 return ret; 673 return ret;
667 674
@@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
840static void domain_flush_devices(struct protection_domain *domain) 847static void domain_flush_devices(struct protection_domain *domain)
841{ 848{
842 struct iommu_dev_data *dev_data; 849 struct iommu_dev_data *dev_data;
843 unsigned long flags;
844
845 spin_lock_irqsave(&domain->lock, flags);
846 850
847 list_for_each_entry(dev_data, &domain->dev_list, list) 851 list_for_each_entry(dev_data, &domain->dev_list, list)
848 device_flush_dte(dev_data); 852 device_flush_dte(dev_data);
849
850 spin_unlock_irqrestore(&domain->lock, flags);
851} 853}
852 854
853/**************************************************************************** 855/****************************************************************************
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index b9826032450b..8c00937bf7e7 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/module.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 3ebe3824662d..ea2185531f82 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -662,6 +662,11 @@ failed_unregister_led1_R:
662static void bd2802_unregister_led_classdev(struct bd2802_led *led) 662static void bd2802_unregister_led_classdev(struct bd2802_led *led)
663{ 663{
664 cancel_work_sync(&led->work); 664 cancel_work_sync(&led->work);
665 led_classdev_unregister(&led->cdev_led2b);
666 led_classdev_unregister(&led->cdev_led2g);
667 led_classdev_unregister(&led->cdev_led2r);
668 led_classdev_unregister(&led->cdev_led1b);
669 led_classdev_unregister(&led->cdev_led1g);
665 led_classdev_unregister(&led->cdev_led1r); 670 led_classdev_unregister(&led->cdev_led1r);
666} 671}
667 672
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index e4ce1fd46338..bcfbd3a60eab 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 8420129fc5ee..f75a66e7d312 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -241,12 +241,13 @@ config DM_MIRROR
241 needed for live data migration tools such as 'pvmove'. 241 needed for live data migration tools such as 'pvmove'.
242 242
243config DM_RAID 243config DM_RAID
244 tristate "RAID 4/5/6 target (EXPERIMENTAL)" 244 tristate "RAID 1/4/5/6 target (EXPERIMENTAL)"
245 depends on BLK_DEV_DM && EXPERIMENTAL 245 depends on BLK_DEV_DM && EXPERIMENTAL
246 select MD_RAID1
246 select MD_RAID456 247 select MD_RAID456
247 select BLK_DEV_MD 248 select BLK_DEV_MD
248 ---help--- 249 ---help---
249 A dm target that supports RAID4, RAID5 and RAID6 mappings 250 A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings
250 251
251 A RAID-5 set of N drives with a capacity of C MB per drive provides 252 A RAID-5 set of N drives with a capacity of C MB per drive provides
252 the capacity of C * (N - 1) MB, and protects against a failure 253 the capacity of C * (N - 1) MB, and protects against a failure
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index bae6c4e23d3f..49da55c1528a 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -30,7 +30,6 @@
30#include <linux/device-mapper.h> 30#include <linux/device-mapper.h>
31 31
32#define DM_MSG_PREFIX "crypt" 32#define DM_MSG_PREFIX "crypt"
33#define MESG_STR(x) x, sizeof(x)
34 33
35/* 34/*
36 * context holding the current state of a multi-part conversion 35 * context holding the current state of a multi-part conversion
@@ -239,7 +238,7 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
239 struct dm_crypt_request *dmreq) 238 struct dm_crypt_request *dmreq)
240{ 239{
241 memset(iv, 0, cc->iv_size); 240 memset(iv, 0, cc->iv_size);
242 *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); 241 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
243 242
244 return 0; 243 return 0;
245} 244}
@@ -248,7 +247,7 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
248 struct dm_crypt_request *dmreq) 247 struct dm_crypt_request *dmreq)
249{ 248{
250 memset(iv, 0, cc->iv_size); 249 memset(iv, 0, cc->iv_size);
251 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); 250 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
252 251
253 return 0; 252 return 0;
254} 253}
@@ -415,7 +414,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
415 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; 414 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
416 415
417 memset(iv, 0, cc->iv_size); 416 memset(iv, 0, cc->iv_size);
418 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); 417 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
419 crypto_cipher_encrypt_one(essiv_tfm, iv, iv); 418 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
420 419
421 return 0; 420 return 0;
@@ -1575,11 +1574,17 @@ bad_mem:
1575static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1574static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1576{ 1575{
1577 struct crypt_config *cc; 1576 struct crypt_config *cc;
1578 unsigned int key_size; 1577 unsigned int key_size, opt_params;
1579 unsigned long long tmpll; 1578 unsigned long long tmpll;
1580 int ret; 1579 int ret;
1580 struct dm_arg_set as;
1581 const char *opt_string;
1582
1583 static struct dm_arg _args[] = {
1584 {0, 1, "Invalid number of feature args"},
1585 };
1581 1586
1582 if (argc != 5) { 1587 if (argc < 5) {
1583 ti->error = "Not enough arguments"; 1588 ti->error = "Not enough arguments";
1584 return -EINVAL; 1589 return -EINVAL;
1585 } 1590 }
@@ -1648,6 +1653,30 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1648 } 1653 }
1649 cc->start = tmpll; 1654 cc->start = tmpll;
1650 1655
1656 argv += 5;
1657 argc -= 5;
1658
1659 /* Optional parameters */
1660 if (argc) {
1661 as.argc = argc;
1662 as.argv = argv;
1663
1664 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1665 if (ret)
1666 goto bad;
1667
1668 opt_string = dm_shift_arg(&as);
1669
1670 if (opt_params == 1 && opt_string &&
1671 !strcasecmp(opt_string, "allow_discards"))
1672 ti->num_discard_requests = 1;
1673 else if (opt_params) {
1674 ret = -EINVAL;
1675 ti->error = "Invalid feature arguments";
1676 goto bad;
1677 }
1678 }
1679
1651 ret = -ENOMEM; 1680 ret = -ENOMEM;
1652 cc->io_queue = alloc_workqueue("kcryptd_io", 1681 cc->io_queue = alloc_workqueue("kcryptd_io",
1653 WQ_NON_REENTRANT| 1682 WQ_NON_REENTRANT|
@@ -1682,9 +1711,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
1682 struct dm_crypt_io *io; 1711 struct dm_crypt_io *io;
1683 struct crypt_config *cc; 1712 struct crypt_config *cc;
1684 1713
1685 if (bio->bi_rw & REQ_FLUSH) { 1714 /*
1715 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
1716 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
1717 * - for REQ_DISCARD caller must use flush if IO ordering matters
1718 */
1719 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1686 cc = ti->private; 1720 cc = ti->private;
1687 bio->bi_bdev = cc->dev->bdev; 1721 bio->bi_bdev = cc->dev->bdev;
1722 if (bio_sectors(bio))
1723 bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
1688 return DM_MAPIO_REMAPPED; 1724 return DM_MAPIO_REMAPPED;
1689 } 1725 }
1690 1726
@@ -1727,6 +1763,10 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
1727 1763
1728 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, 1764 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1729 cc->dev->name, (unsigned long long)cc->start); 1765 cc->dev->name, (unsigned long long)cc->start);
1766
1767 if (ti->num_discard_requests)
1768 DMEMIT(" 1 allow_discards");
1769
1730 break; 1770 break;
1731 } 1771 }
1732 return 0; 1772 return 0;
@@ -1770,12 +1810,12 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1770 if (argc < 2) 1810 if (argc < 2)
1771 goto error; 1811 goto error;
1772 1812
1773 if (!strnicmp(argv[0], MESG_STR("key"))) { 1813 if (!strcasecmp(argv[0], "key")) {
1774 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { 1814 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1775 DMWARN("not suspended during key manipulation."); 1815 DMWARN("not suspended during key manipulation.");
1776 return -EINVAL; 1816 return -EINVAL;
1777 } 1817 }
1778 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { 1818 if (argc == 3 && !strcasecmp(argv[1], "set")) {
1779 ret = crypt_set_key(cc, argv[2]); 1819 ret = crypt_set_key(cc, argv[2]);
1780 if (ret) 1820 if (ret)
1781 return ret; 1821 return ret;
@@ -1783,7 +1823,7 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1783 ret = cc->iv_gen_ops->init(cc); 1823 ret = cc->iv_gen_ops->init(cc);
1784 return ret; 1824 return ret;
1785 } 1825 }
1786 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { 1826 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
1787 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { 1827 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1788 ret = cc->iv_gen_ops->wipe(cc); 1828 ret = cc->iv_gen_ops->wipe(cc);
1789 if (ret) 1829 if (ret)
@@ -1823,7 +1863,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
1823 1863
1824static struct target_type crypt_target = { 1864static struct target_type crypt_target = {
1825 .name = "crypt", 1865 .name = "crypt",
1826 .version = {1, 10, 0}, 1866 .version = {1, 11, 0},
1827 .module = THIS_MODULE, 1867 .module = THIS_MODULE,
1828 .ctr = crypt_ctr, 1868 .ctr = crypt_ctr,
1829 .dtr = crypt_dtr, 1869 .dtr = crypt_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index ea790623c30b..89f73ca22cfa 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2003 Sistina Software (UK) Limited. 2 * Copyright (C) 2003 Sistina Software (UK) Limited.
3 * Copyright (C) 2004, 2010 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
@@ -15,6 +15,9 @@
15 15
16#define DM_MSG_PREFIX "flakey" 16#define DM_MSG_PREFIX "flakey"
17 17
18#define all_corrupt_bio_flags_match(bio, fc) \
19 (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
20
18/* 21/*
19 * Flakey: Used for testing only, simulates intermittent, 22 * Flakey: Used for testing only, simulates intermittent,
20 * catastrophic device failure. 23 * catastrophic device failure.
@@ -25,60 +28,189 @@ struct flakey_c {
25 sector_t start; 28 sector_t start;
26 unsigned up_interval; 29 unsigned up_interval;
27 unsigned down_interval; 30 unsigned down_interval;
31 unsigned long flags;
32 unsigned corrupt_bio_byte;
33 unsigned corrupt_bio_rw;
34 unsigned corrupt_bio_value;
35 unsigned corrupt_bio_flags;
36};
37
38enum feature_flag_bits {
39 DROP_WRITES
28}; 40};
29 41
42static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
43 struct dm_target *ti)
44{
45 int r;
46 unsigned argc;
47 const char *arg_name;
48
49 static struct dm_arg _args[] = {
50 {0, 6, "Invalid number of feature args"},
51 {1, UINT_MAX, "Invalid corrupt bio byte"},
52 {0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
53 {0, UINT_MAX, "Invalid corrupt bio flags mask"},
54 };
55
56 /* No feature arguments supplied. */
57 if (!as->argc)
58 return 0;
59
60 r = dm_read_arg_group(_args, as, &argc, &ti->error);
61 if (r)
62 return r;
63
64 while (argc) {
65 arg_name = dm_shift_arg(as);
66 argc--;
67
68 /*
69 * drop_writes
70 */
71 if (!strcasecmp(arg_name, "drop_writes")) {
72 if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
73 ti->error = "Feature drop_writes duplicated";
74 return -EINVAL;
75 }
76
77 continue;
78 }
79
80 /*
81 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
82 */
83 if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
84 if (!argc)
85 ti->error = "Feature corrupt_bio_byte requires parameters";
86
87 r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
88 if (r)
89 return r;
90 argc--;
91
92 /*
93 * Direction r or w?
94 */
95 arg_name = dm_shift_arg(as);
96 if (!strcasecmp(arg_name, "w"))
97 fc->corrupt_bio_rw = WRITE;
98 else if (!strcasecmp(arg_name, "r"))
99 fc->corrupt_bio_rw = READ;
100 else {
101 ti->error = "Invalid corrupt bio direction (r or w)";
102 return -EINVAL;
103 }
104 argc--;
105
106 /*
107 * Value of byte (0-255) to write in place of correct one.
108 */
109 r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
110 if (r)
111 return r;
112 argc--;
113
114 /*
115 * Only corrupt bios with these flags set.
116 */
117 r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
118 if (r)
119 return r;
120 argc--;
121
122 continue;
123 }
124
125 ti->error = "Unrecognised flakey feature requested";
126 return -EINVAL;
127 }
128
129 if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
130 ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
131 return -EINVAL;
132 }
133
134 return 0;
135}
136
30/* 137/*
31 * Construct a flakey mapping: <dev_path> <offset> <up interval> <down interval> 138 * Construct a flakey mapping:
139 * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
140 *
141 * Feature args:
142 * [drop_writes]
143 * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
144 *
145 * Nth_byte starts from 1 for the first byte.
146 * Direction is r for READ or w for WRITE.
147 * bio_flags is ignored if 0.
32 */ 148 */
33static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) 149static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
34{ 150{
151 static struct dm_arg _args[] = {
152 {0, UINT_MAX, "Invalid up interval"},
153 {0, UINT_MAX, "Invalid down interval"},
154 };
155
156 int r;
35 struct flakey_c *fc; 157 struct flakey_c *fc;
36 unsigned long long tmp; 158 unsigned long long tmpll;
159 struct dm_arg_set as;
160 const char *devname;
37 161
38 if (argc != 4) { 162 as.argc = argc;
39 ti->error = "dm-flakey: Invalid argument count"; 163 as.argv = argv;
164
165 if (argc < 4) {
166 ti->error = "Invalid argument count";
40 return -EINVAL; 167 return -EINVAL;
41 } 168 }
42 169
43 fc = kmalloc(sizeof(*fc), GFP_KERNEL); 170 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
44 if (!fc) { 171 if (!fc) {
45 ti->error = "dm-flakey: Cannot allocate linear context"; 172 ti->error = "Cannot allocate linear context";
46 return -ENOMEM; 173 return -ENOMEM;
47 } 174 }
48 fc->start_time = jiffies; 175 fc->start_time = jiffies;
49 176
50 if (sscanf(argv[1], "%llu", &tmp) != 1) { 177 devname = dm_shift_arg(&as);
51 ti->error = "dm-flakey: Invalid device sector"; 178
179 if (sscanf(dm_shift_arg(&as), "%llu", &tmpll) != 1) {
180 ti->error = "Invalid device sector";
52 goto bad; 181 goto bad;
53 } 182 }
54 fc->start = tmp; 183 fc->start = tmpll;
55 184
56 if (sscanf(argv[2], "%u", &fc->up_interval) != 1) { 185 r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
57 ti->error = "dm-flakey: Invalid up interval"; 186 if (r)
58 goto bad; 187 goto bad;
59 }
60 188
61 if (sscanf(argv[3], "%u", &fc->down_interval) != 1) { 189 r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
62 ti->error = "dm-flakey: Invalid down interval"; 190 if (r)
63 goto bad; 191 goto bad;
64 }
65 192
66 if (!(fc->up_interval + fc->down_interval)) { 193 if (!(fc->up_interval + fc->down_interval)) {
67 ti->error = "dm-flakey: Total (up + down) interval is zero"; 194 ti->error = "Total (up + down) interval is zero";
68 goto bad; 195 goto bad;
69 } 196 }
70 197
71 if (fc->up_interval + fc->down_interval < fc->up_interval) { 198 if (fc->up_interval + fc->down_interval < fc->up_interval) {
72 ti->error = "dm-flakey: Interval overflow"; 199 ti->error = "Interval overflow";
73 goto bad; 200 goto bad;
74 } 201 }
75 202
76 if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &fc->dev)) { 203 r = parse_features(&as, fc, ti);
77 ti->error = "dm-flakey: Device lookup failed"; 204 if (r)
205 goto bad;
206
207 if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) {
208 ti->error = "Device lookup failed";
78 goto bad; 209 goto bad;
79 } 210 }
80 211
81 ti->num_flush_requests = 1; 212 ti->num_flush_requests = 1;
213 ti->num_discard_requests = 1;
82 ti->private = fc; 214 ti->private = fc;
83 return 0; 215 return 0;
84 216
@@ -99,7 +231,7 @@ static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
99{ 231{
100 struct flakey_c *fc = ti->private; 232 struct flakey_c *fc = ti->private;
101 233
102 return fc->start + (bi_sector - ti->begin); 234 return fc->start + dm_target_offset(ti, bi_sector);
103} 235}
104 236
105static void flakey_map_bio(struct dm_target *ti, struct bio *bio) 237static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
@@ -111,6 +243,25 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
111 bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); 243 bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
112} 244}
113 245
246static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
247{
248 unsigned bio_bytes = bio_cur_bytes(bio);
249 char *data = bio_data(bio);
250
251 /*
252 * Overwrite the Nth byte of the data returned.
253 */
254 if (data && bio_bytes >= fc->corrupt_bio_byte) {
255 data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
256
257 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
258 "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
259 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
260 (bio_data_dir(bio) == WRITE) ? 'w' : 'r',
261 bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
262 }
263}
264
114static int flakey_map(struct dm_target *ti, struct bio *bio, 265static int flakey_map(struct dm_target *ti, struct bio *bio,
115 union map_info *map_context) 266 union map_info *map_context)
116{ 267{
@@ -119,18 +270,71 @@ static int flakey_map(struct dm_target *ti, struct bio *bio,
119 270
120 /* Are we alive ? */ 271 /* Are we alive ? */
121 elapsed = (jiffies - fc->start_time) / HZ; 272 elapsed = (jiffies - fc->start_time) / HZ;
122 if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) 273 if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
274 /*
275 * Flag this bio as submitted while down.
276 */
277 map_context->ll = 1;
278
279 /*
280 * Map reads as normal.
281 */
282 if (bio_data_dir(bio) == READ)
283 goto map_bio;
284
285 /*
286 * Drop writes?
287 */
288 if (test_bit(DROP_WRITES, &fc->flags)) {
289 bio_endio(bio, 0);
290 return DM_MAPIO_SUBMITTED;
291 }
292
293 /*
294 * Corrupt matching writes.
295 */
296 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
297 if (all_corrupt_bio_flags_match(bio, fc))
298 corrupt_bio_data(bio, fc);
299 goto map_bio;
300 }
301
302 /*
303 * By default, error all I/O.
304 */
123 return -EIO; 305 return -EIO;
306 }
124 307
308map_bio:
125 flakey_map_bio(ti, bio); 309 flakey_map_bio(ti, bio);
126 310
127 return DM_MAPIO_REMAPPED; 311 return DM_MAPIO_REMAPPED;
128} 312}
129 313
314static int flakey_end_io(struct dm_target *ti, struct bio *bio,
315 int error, union map_info *map_context)
316{
317 struct flakey_c *fc = ti->private;
318 unsigned bio_submitted_while_down = map_context->ll;
319
320 /*
321 * Corrupt successful READs while in down state.
322 * If flags were specified, only corrupt those that match.
323 */
324 if (!error && bio_submitted_while_down &&
325 (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
326 all_corrupt_bio_flags_match(bio, fc))
327 corrupt_bio_data(bio, fc);
328
329 return error;
330}
331
130static int flakey_status(struct dm_target *ti, status_type_t type, 332static int flakey_status(struct dm_target *ti, status_type_t type,
131 char *result, unsigned int maxlen) 333 char *result, unsigned int maxlen)
132{ 334{
335 unsigned sz = 0;
133 struct flakey_c *fc = ti->private; 336 struct flakey_c *fc = ti->private;
337 unsigned drop_writes;
134 338
135 switch (type) { 339 switch (type) {
136 case STATUSTYPE_INFO: 340 case STATUSTYPE_INFO:
@@ -138,9 +342,22 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
138 break; 342 break;
139 343
140 case STATUSTYPE_TABLE: 344 case STATUSTYPE_TABLE:
141 snprintf(result, maxlen, "%s %llu %u %u", fc->dev->name, 345 DMEMIT("%s %llu %u %u ", fc->dev->name,
142 (unsigned long long)fc->start, fc->up_interval, 346 (unsigned long long)fc->start, fc->up_interval,
143 fc->down_interval); 347 fc->down_interval);
348
349 drop_writes = test_bit(DROP_WRITES, &fc->flags);
350 DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
351
352 if (drop_writes)
353 DMEMIT("drop_writes ");
354
355 if (fc->corrupt_bio_byte)
356 DMEMIT("corrupt_bio_byte %u %c %u %u ",
357 fc->corrupt_bio_byte,
358 (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
359 fc->corrupt_bio_value, fc->corrupt_bio_flags);
360
144 break; 361 break;
145 } 362 }
146 return 0; 363 return 0;
@@ -177,11 +394,12 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
177 394
178static struct target_type flakey_target = { 395static struct target_type flakey_target = {
179 .name = "flakey", 396 .name = "flakey",
180 .version = {1, 1, 0}, 397 .version = {1, 2, 0},
181 .module = THIS_MODULE, 398 .module = THIS_MODULE,
182 .ctr = flakey_ctr, 399 .ctr = flakey_ctr,
183 .dtr = flakey_dtr, 400 .dtr = flakey_dtr,
184 .map = flakey_map, 401 .map = flakey_map,
402 .end_io = flakey_end_io,
185 .status = flakey_status, 403 .status = flakey_status,
186 .ioctl = flakey_ioctl, 404 .ioctl = flakey_ioctl,
187 .merge = flakey_merge, 405 .merge = flakey_merge,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2067288f61f9..ad2eba40e319 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -38,6 +38,8 @@ struct io {
38 struct dm_io_client *client; 38 struct dm_io_client *client;
39 io_notify_fn callback; 39 io_notify_fn callback;
40 void *context; 40 void *context;
41 void *vma_invalidate_address;
42 unsigned long vma_invalidate_size;
41} __attribute__((aligned(DM_IO_MAX_REGIONS))); 43} __attribute__((aligned(DM_IO_MAX_REGIONS)));
42 44
43static struct kmem_cache *_dm_io_cache; 45static struct kmem_cache *_dm_io_cache;
@@ -116,6 +118,10 @@ static void dec_count(struct io *io, unsigned int region, int error)
116 set_bit(region, &io->error_bits); 118 set_bit(region, &io->error_bits);
117 119
118 if (atomic_dec_and_test(&io->count)) { 120 if (atomic_dec_and_test(&io->count)) {
121 if (io->vma_invalidate_size)
122 invalidate_kernel_vmap_range(io->vma_invalidate_address,
123 io->vma_invalidate_size);
124
119 if (io->sleeper) 125 if (io->sleeper)
120 wake_up_process(io->sleeper); 126 wake_up_process(io->sleeper);
121 127
@@ -159,6 +165,9 @@ struct dpages {
159 165
160 unsigned context_u; 166 unsigned context_u;
161 void *context_ptr; 167 void *context_ptr;
168
169 void *vma_invalidate_address;
170 unsigned long vma_invalidate_size;
162}; 171};
163 172
164/* 173/*
@@ -377,6 +386,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
377 io->sleeper = current; 386 io->sleeper = current;
378 io->client = client; 387 io->client = client;
379 388
389 io->vma_invalidate_address = dp->vma_invalidate_address;
390 io->vma_invalidate_size = dp->vma_invalidate_size;
391
380 dispatch_io(rw, num_regions, where, dp, io, 1); 392 dispatch_io(rw, num_regions, where, dp, io, 1);
381 393
382 while (1) { 394 while (1) {
@@ -415,13 +427,21 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
415 io->callback = fn; 427 io->callback = fn;
416 io->context = context; 428 io->context = context;
417 429
430 io->vma_invalidate_address = dp->vma_invalidate_address;
431 io->vma_invalidate_size = dp->vma_invalidate_size;
432
418 dispatch_io(rw, num_regions, where, dp, io, 0); 433 dispatch_io(rw, num_regions, where, dp, io, 0);
419 return 0; 434 return 0;
420} 435}
421 436
422static int dp_init(struct dm_io_request *io_req, struct dpages *dp) 437static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
438 unsigned long size)
423{ 439{
424 /* Set up dpages based on memory type */ 440 /* Set up dpages based on memory type */
441
442 dp->vma_invalidate_address = NULL;
443 dp->vma_invalidate_size = 0;
444
425 switch (io_req->mem.type) { 445 switch (io_req->mem.type) {
426 case DM_IO_PAGE_LIST: 446 case DM_IO_PAGE_LIST:
427 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); 447 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
@@ -432,6 +452,11 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
432 break; 452 break;
433 453
434 case DM_IO_VMA: 454 case DM_IO_VMA:
455 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
456 if ((io_req->bi_rw & RW_MASK) == READ) {
457 dp->vma_invalidate_address = io_req->mem.ptr.vma;
458 dp->vma_invalidate_size = size;
459 }
435 vm_dp_init(dp, io_req->mem.ptr.vma); 460 vm_dp_init(dp, io_req->mem.ptr.vma);
436 break; 461 break;
437 462
@@ -460,7 +485,7 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
460 int r; 485 int r;
461 struct dpages dp; 486 struct dpages dp;
462 487
463 r = dp_init(io_req, &dp); 488 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
464 if (r) 489 if (r)
465 return r; 490 return r;
466 491
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 4cacdad2270a..2e9a3ca37bdd 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -128,6 +128,24 @@ static struct hash_cell *__get_uuid_cell(const char *str)
128 return NULL; 128 return NULL;
129} 129}
130 130
131static struct hash_cell *__get_dev_cell(uint64_t dev)
132{
133 struct mapped_device *md;
134 struct hash_cell *hc;
135
136 md = dm_get_md(huge_decode_dev(dev));
137 if (!md)
138 return NULL;
139
140 hc = dm_get_mdptr(md);
141 if (!hc) {
142 dm_put(md);
143 return NULL;
144 }
145
146 return hc;
147}
148
131/*----------------------------------------------------------------- 149/*-----------------------------------------------------------------
132 * Inserting, removing and renaming a device. 150 * Inserting, removing and renaming a device.
133 *---------------------------------------------------------------*/ 151 *---------------------------------------------------------------*/
@@ -718,25 +736,45 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
718 */ 736 */
719static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) 737static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
720{ 738{
721 struct mapped_device *md; 739 struct hash_cell *hc = NULL;
722 void *mdptr = NULL;
723 740
724 if (*param->uuid) 741 if (*param->uuid) {
725 return __get_uuid_cell(param->uuid); 742 if (*param->name || param->dev)
743 return NULL;
726 744
727 if (*param->name) 745 hc = __get_uuid_cell(param->uuid);
728 return __get_name_cell(param->name); 746 if (!hc)
747 return NULL;
748 } else if (*param->name) {
749 if (param->dev)
750 return NULL;
729 751
730 md = dm_get_md(huge_decode_dev(param->dev)); 752 hc = __get_name_cell(param->name);
731 if (!md) 753 if (!hc)
732 goto out; 754 return NULL;
755 } else if (param->dev) {
756 hc = __get_dev_cell(param->dev);
757 if (!hc)
758 return NULL;
759 } else
760 return NULL;
733 761
734 mdptr = dm_get_mdptr(md); 762 /*
735 if (!mdptr) 763 * Sneakily write in both the name and the uuid
736 dm_put(md); 764 * while we have the cell.
765 */
766 strlcpy(param->name, hc->name, sizeof(param->name));
767 if (hc->uuid)
768 strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
769 else
770 param->uuid[0] = '\0';
737 771
738out: 772 if (hc->new_map)
739 return mdptr; 773 param->flags |= DM_INACTIVE_PRESENT_FLAG;
774 else
775 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
776
777 return hc;
740} 778}
741 779
742static struct mapped_device *find_device(struct dm_ioctl *param) 780static struct mapped_device *find_device(struct dm_ioctl *param)
@@ -746,24 +784,8 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
746 784
747 down_read(&_hash_lock); 785 down_read(&_hash_lock);
748 hc = __find_device_hash_cell(param); 786 hc = __find_device_hash_cell(param);
749 if (hc) { 787 if (hc)
750 md = hc->md; 788 md = hc->md;
751
752 /*
753 * Sneakily write in both the name and the uuid
754 * while we have the cell.
755 */
756 strlcpy(param->name, hc->name, sizeof(param->name));
757 if (hc->uuid)
758 strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
759 else
760 param->uuid[0] = '\0';
761
762 if (hc->new_map)
763 param->flags |= DM_INACTIVE_PRESENT_FLAG;
764 else
765 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
766 }
767 up_read(&_hash_lock); 789 up_read(&_hash_lock);
768 790
769 return md; 791 return md;
@@ -1402,6 +1424,11 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
1402 goto out; 1424 goto out;
1403 } 1425 }
1404 1426
1427 if (!argc) {
1428 DMWARN("Empty message received.");
1429 goto out;
1430 }
1431
1405 table = dm_get_live_table(md); 1432 table = dm_get_live_table(md);
1406 if (!table) 1433 if (!table)
1407 goto out_argv; 1434 goto out_argv;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 320401dec104..f82147029636 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -224,8 +224,6 @@ struct kcopyd_job {
224 unsigned int num_dests; 224 unsigned int num_dests;
225 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS]; 225 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
226 226
227 sector_t offset;
228 unsigned int nr_pages;
229 struct page_list *pages; 227 struct page_list *pages;
230 228
231 /* 229 /*
@@ -380,7 +378,7 @@ static int run_io_job(struct kcopyd_job *job)
380 .bi_rw = job->rw, 378 .bi_rw = job->rw,
381 .mem.type = DM_IO_PAGE_LIST, 379 .mem.type = DM_IO_PAGE_LIST,
382 .mem.ptr.pl = job->pages, 380 .mem.ptr.pl = job->pages,
383 .mem.offset = job->offset, 381 .mem.offset = 0,
384 .notify.fn = complete_io, 382 .notify.fn = complete_io,
385 .notify.context = job, 383 .notify.context = job,
386 .client = job->kc->io_client, 384 .client = job->kc->io_client,
@@ -397,10 +395,9 @@ static int run_io_job(struct kcopyd_job *job)
397static int run_pages_job(struct kcopyd_job *job) 395static int run_pages_job(struct kcopyd_job *job)
398{ 396{
399 int r; 397 int r;
398 unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
400 399
401 job->nr_pages = dm_div_up(job->dests[0].count + job->offset, 400 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
402 PAGE_SIZE >> 9);
403 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
404 if (!r) { 401 if (!r) {
405 /* this job is ready for io */ 402 /* this job is ready for io */
406 push(&job->kc->io_jobs, job); 403 push(&job->kc->io_jobs, job);
@@ -602,8 +599,6 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
602 job->num_dests = num_dests; 599 job->num_dests = num_dests;
603 memcpy(&job->dests, dests, sizeof(*dests) * num_dests); 600 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
604 601
605 job->offset = 0;
606 job->nr_pages = 0;
607 job->pages = NULL; 602 job->pages = NULL;
608 603
609 job->fn = fn; 604 job->fn = fn;
@@ -622,6 +617,37 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
622} 617}
623EXPORT_SYMBOL(dm_kcopyd_copy); 618EXPORT_SYMBOL(dm_kcopyd_copy);
624 619
620void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
621 dm_kcopyd_notify_fn fn, void *context)
622{
623 struct kcopyd_job *job;
624
625 job = mempool_alloc(kc->job_pool, GFP_NOIO);
626
627 memset(job, 0, sizeof(struct kcopyd_job));
628 job->kc = kc;
629 job->fn = fn;
630 job->context = context;
631
632 atomic_inc(&kc->nr_jobs);
633
634 return job;
635}
636EXPORT_SYMBOL(dm_kcopyd_prepare_callback);
637
638void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
639{
640 struct kcopyd_job *job = j;
641 struct dm_kcopyd_client *kc = job->kc;
642
643 job->read_err = read_err;
644 job->write_err = write_err;
645
646 push(&kc->complete_jobs, job);
647 wake(kc);
648}
649EXPORT_SYMBOL(dm_kcopyd_do_callback);
650
625/* 651/*
626 * Cancels a kcopyd job, eg. someone might be deactivating a 652 * Cancels a kcopyd job, eg. someone might be deactivating a
627 * mirror. 653 * mirror.
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index aa2e0c374ab3..1021c8986011 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -394,8 +394,7 @@ static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
394 group[count] = fe->region; 394 group[count] = fe->region;
395 count++; 395 count++;
396 396
397 list_del(&fe->list); 397 list_move(&fe->list, &tmp_list);
398 list_add(&fe->list, &tmp_list);
399 398
400 type = fe->type; 399 type = fe->type;
401 if (count >= MAX_FLUSH_GROUP_COUNT) 400 if (count >= MAX_FLUSH_GROUP_COUNT)
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 948e3f4925bf..3b52bb72bd1f 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -197,15 +197,21 @@ EXPORT_SYMBOL(dm_dirty_log_destroy);
197#define MIRROR_DISK_VERSION 2 197#define MIRROR_DISK_VERSION 2
198#define LOG_OFFSET 2 198#define LOG_OFFSET 2
199 199
200struct log_header { 200struct log_header_disk {
201 uint32_t magic; 201 __le32 magic;
202 202
203 /* 203 /*
204 * Simple, incrementing version. no backward 204 * Simple, incrementing version. no backward
205 * compatibility. 205 * compatibility.
206 */ 206 */
207 __le32 version;
208 __le64 nr_regions;
209} __packed;
210
211struct log_header_core {
212 uint32_t magic;
207 uint32_t version; 213 uint32_t version;
208 sector_t nr_regions; 214 uint64_t nr_regions;
209}; 215};
210 216
211struct log_c { 217struct log_c {
@@ -239,10 +245,10 @@ struct log_c {
239 int log_dev_failed; 245 int log_dev_failed;
240 int log_dev_flush_failed; 246 int log_dev_flush_failed;
241 struct dm_dev *log_dev; 247 struct dm_dev *log_dev;
242 struct log_header header; 248 struct log_header_core header;
243 249
244 struct dm_io_region header_location; 250 struct dm_io_region header_location;
245 struct log_header *disk_header; 251 struct log_header_disk *disk_header;
246}; 252};
247 253
248/* 254/*
@@ -251,34 +257,34 @@ struct log_c {
251 */ 257 */
252static inline int log_test_bit(uint32_t *bs, unsigned bit) 258static inline int log_test_bit(uint32_t *bs, unsigned bit)
253{ 259{
254 return test_bit_le(bit, (unsigned long *) bs) ? 1 : 0; 260 return test_bit_le(bit, bs) ? 1 : 0;
255} 261}
256 262
257static inline void log_set_bit(struct log_c *l, 263static inline void log_set_bit(struct log_c *l,
258 uint32_t *bs, unsigned bit) 264 uint32_t *bs, unsigned bit)
259{ 265{
260 __test_and_set_bit_le(bit, (unsigned long *) bs); 266 __set_bit_le(bit, bs);
261 l->touched_cleaned = 1; 267 l->touched_cleaned = 1;
262} 268}
263 269
264static inline void log_clear_bit(struct log_c *l, 270static inline void log_clear_bit(struct log_c *l,
265 uint32_t *bs, unsigned bit) 271 uint32_t *bs, unsigned bit)
266{ 272{
267 __test_and_clear_bit_le(bit, (unsigned long *) bs); 273 __clear_bit_le(bit, bs);
268 l->touched_dirtied = 1; 274 l->touched_dirtied = 1;
269} 275}
270 276
271/*---------------------------------------------------------------- 277/*----------------------------------------------------------------
272 * Header IO 278 * Header IO
273 *--------------------------------------------------------------*/ 279 *--------------------------------------------------------------*/
274static void header_to_disk(struct log_header *core, struct log_header *disk) 280static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk)
275{ 281{
276 disk->magic = cpu_to_le32(core->magic); 282 disk->magic = cpu_to_le32(core->magic);
277 disk->version = cpu_to_le32(core->version); 283 disk->version = cpu_to_le32(core->version);
278 disk->nr_regions = cpu_to_le64(core->nr_regions); 284 disk->nr_regions = cpu_to_le64(core->nr_regions);
279} 285}
280 286
281static void header_from_disk(struct log_header *core, struct log_header *disk) 287static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk)
282{ 288{
283 core->magic = le32_to_cpu(disk->magic); 289 core->magic = le32_to_cpu(disk->magic);
284 core->version = le32_to_cpu(disk->version); 290 core->version = le32_to_cpu(disk->version);
@@ -486,7 +492,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
486 memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size); 492 memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
487 lc->sync_count = (sync == NOSYNC) ? region_count : 0; 493 lc->sync_count = (sync == NOSYNC) ? region_count : 0;
488 494
489 lc->recovering_bits = vmalloc(bitset_size); 495 lc->recovering_bits = vzalloc(bitset_size);
490 if (!lc->recovering_bits) { 496 if (!lc->recovering_bits) {
491 DMWARN("couldn't allocate sync bitset"); 497 DMWARN("couldn't allocate sync bitset");
492 vfree(lc->sync_bits); 498 vfree(lc->sync_bits);
@@ -498,7 +504,6 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
498 kfree(lc); 504 kfree(lc);
499 return -ENOMEM; 505 return -ENOMEM;
500 } 506 }
501 memset(lc->recovering_bits, 0, bitset_size);
502 lc->sync_search = 0; 507 lc->sync_search = 0;
503 log->context = lc; 508 log->context = lc;
504 509
@@ -739,8 +744,7 @@ static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
739 return 0; 744 return 0;
740 745
741 do { 746 do {
742 *region = find_next_zero_bit_le( 747 *region = find_next_zero_bit_le(lc->sync_bits,
743 (unsigned long *) lc->sync_bits,
744 lc->region_count, 748 lc->region_count,
745 lc->sync_search); 749 lc->sync_search);
746 lc->sync_search = *region + 1; 750 lc->sync_search = *region + 1;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c3547016f0f1..5e0090ef4182 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -22,7 +22,6 @@
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23 23
24#define DM_MSG_PREFIX "multipath" 24#define DM_MSG_PREFIX "multipath"
25#define MESG_STR(x) x, sizeof(x)
26#define DM_PG_INIT_DELAY_MSECS 2000 25#define DM_PG_INIT_DELAY_MSECS 2000
27#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1) 26#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
28 27
@@ -505,80 +504,29 @@ static void trigger_event(struct work_struct *work)
505 * <#paths> <#per-path selector args> 504 * <#paths> <#per-path selector args>
506 * [<path> [<arg>]* ]+ ]+ 505 * [<path> [<arg>]* ]+ ]+
507 *---------------------------------------------------------------*/ 506 *---------------------------------------------------------------*/
508struct param { 507static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
509 unsigned min;
510 unsigned max;
511 char *error;
512};
513
514static int read_param(struct param *param, char *str, unsigned *v, char **error)
515{
516 if (!str ||
517 (sscanf(str, "%u", v) != 1) ||
518 (*v < param->min) ||
519 (*v > param->max)) {
520 *error = param->error;
521 return -EINVAL;
522 }
523
524 return 0;
525}
526
527struct arg_set {
528 unsigned argc;
529 char **argv;
530};
531
532static char *shift(struct arg_set *as)
533{
534 char *r;
535
536 if (as->argc) {
537 as->argc--;
538 r = *as->argv;
539 as->argv++;
540 return r;
541 }
542
543 return NULL;
544}
545
546static void consume(struct arg_set *as, unsigned n)
547{
548 BUG_ON (as->argc < n);
549 as->argc -= n;
550 as->argv += n;
551}
552
553static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
554 struct dm_target *ti) 508 struct dm_target *ti)
555{ 509{
556 int r; 510 int r;
557 struct path_selector_type *pst; 511 struct path_selector_type *pst;
558 unsigned ps_argc; 512 unsigned ps_argc;
559 513
560 static struct param _params[] = { 514 static struct dm_arg _args[] = {
561 {0, 1024, "invalid number of path selector args"}, 515 {0, 1024, "invalid number of path selector args"},
562 }; 516 };
563 517
564 pst = dm_get_path_selector(shift(as)); 518 pst = dm_get_path_selector(dm_shift_arg(as));
565 if (!pst) { 519 if (!pst) {
566 ti->error = "unknown path selector type"; 520 ti->error = "unknown path selector type";
567 return -EINVAL; 521 return -EINVAL;
568 } 522 }
569 523
570 r = read_param(_params, shift(as), &ps_argc, &ti->error); 524 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
571 if (r) { 525 if (r) {
572 dm_put_path_selector(pst); 526 dm_put_path_selector(pst);
573 return -EINVAL; 527 return -EINVAL;
574 } 528 }
575 529
576 if (ps_argc > as->argc) {
577 dm_put_path_selector(pst);
578 ti->error = "not enough arguments for path selector";
579 return -EINVAL;
580 }
581
582 r = pst->create(&pg->ps, ps_argc, as->argv); 530 r = pst->create(&pg->ps, ps_argc, as->argv);
583 if (r) { 531 if (r) {
584 dm_put_path_selector(pst); 532 dm_put_path_selector(pst);
@@ -587,12 +535,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
587 } 535 }
588 536
589 pg->ps.type = pst; 537 pg->ps.type = pst;
590 consume(as, ps_argc); 538 dm_consume_args(as, ps_argc);
591 539
592 return 0; 540 return 0;
593} 541}
594 542
595static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, 543static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
596 struct dm_target *ti) 544 struct dm_target *ti)
597{ 545{
598 int r; 546 int r;
@@ -609,7 +557,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
609 if (!p) 557 if (!p)
610 return ERR_PTR(-ENOMEM); 558 return ERR_PTR(-ENOMEM);
611 559
612 r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table), 560 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
613 &p->path.dev); 561 &p->path.dev);
614 if (r) { 562 if (r) {
615 ti->error = "error getting device"; 563 ti->error = "error getting device";
@@ -660,16 +608,16 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
660 return ERR_PTR(r); 608 return ERR_PTR(r);
661} 609}
662 610
663static struct priority_group *parse_priority_group(struct arg_set *as, 611static struct priority_group *parse_priority_group(struct dm_arg_set *as,
664 struct multipath *m) 612 struct multipath *m)
665{ 613{
666 static struct param _params[] = { 614 static struct dm_arg _args[] = {
667 {1, 1024, "invalid number of paths"}, 615 {1, 1024, "invalid number of paths"},
668 {0, 1024, "invalid number of selector args"} 616 {0, 1024, "invalid number of selector args"}
669 }; 617 };
670 618
671 int r; 619 int r;
672 unsigned i, nr_selector_args, nr_params; 620 unsigned i, nr_selector_args, nr_args;
673 struct priority_group *pg; 621 struct priority_group *pg;
674 struct dm_target *ti = m->ti; 622 struct dm_target *ti = m->ti;
675 623
@@ -693,26 +641,26 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
693 /* 641 /*
694 * read the paths 642 * read the paths
695 */ 643 */
696 r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error); 644 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
697 if (r) 645 if (r)
698 goto bad; 646 goto bad;
699 647
700 r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error); 648 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
701 if (r) 649 if (r)
702 goto bad; 650 goto bad;
703 651
704 nr_params = 1 + nr_selector_args; 652 nr_args = 1 + nr_selector_args;
705 for (i = 0; i < pg->nr_pgpaths; i++) { 653 for (i = 0; i < pg->nr_pgpaths; i++) {
706 struct pgpath *pgpath; 654 struct pgpath *pgpath;
707 struct arg_set path_args; 655 struct dm_arg_set path_args;
708 656
709 if (as->argc < nr_params) { 657 if (as->argc < nr_args) {
710 ti->error = "not enough path parameters"; 658 ti->error = "not enough path parameters";
711 r = -EINVAL; 659 r = -EINVAL;
712 goto bad; 660 goto bad;
713 } 661 }
714 662
715 path_args.argc = nr_params; 663 path_args.argc = nr_args;
716 path_args.argv = as->argv; 664 path_args.argv = as->argv;
717 665
718 pgpath = parse_path(&path_args, &pg->ps, ti); 666 pgpath = parse_path(&path_args, &pg->ps, ti);
@@ -723,7 +671,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
723 671
724 pgpath->pg = pg; 672 pgpath->pg = pg;
725 list_add_tail(&pgpath->list, &pg->pgpaths); 673 list_add_tail(&pgpath->list, &pg->pgpaths);
726 consume(as, nr_params); 674 dm_consume_args(as, nr_args);
727 } 675 }
728 676
729 return pg; 677 return pg;
@@ -733,28 +681,23 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
733 return ERR_PTR(r); 681 return ERR_PTR(r);
734} 682}
735 683
736static int parse_hw_handler(struct arg_set *as, struct multipath *m) 684static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
737{ 685{
738 unsigned hw_argc; 686 unsigned hw_argc;
739 int ret; 687 int ret;
740 struct dm_target *ti = m->ti; 688 struct dm_target *ti = m->ti;
741 689
742 static struct param _params[] = { 690 static struct dm_arg _args[] = {
743 {0, 1024, "invalid number of hardware handler args"}, 691 {0, 1024, "invalid number of hardware handler args"},
744 }; 692 };
745 693
746 if (read_param(_params, shift(as), &hw_argc, &ti->error)) 694 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
747 return -EINVAL; 695 return -EINVAL;
748 696
749 if (!hw_argc) 697 if (!hw_argc)
750 return 0; 698 return 0;
751 699
752 if (hw_argc > as->argc) { 700 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
753 ti->error = "not enough arguments for hardware handler";
754 return -EINVAL;
755 }
756
757 m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
758 request_module("scsi_dh_%s", m->hw_handler_name); 701 request_module("scsi_dh_%s", m->hw_handler_name);
759 if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { 702 if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
760 ti->error = "unknown hardware handler type"; 703 ti->error = "unknown hardware handler type";
@@ -778,7 +721,7 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
778 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) 721 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
779 j = sprintf(p, "%s", as->argv[i]); 722 j = sprintf(p, "%s", as->argv[i]);
780 } 723 }
781 consume(as, hw_argc - 1); 724 dm_consume_args(as, hw_argc - 1);
782 725
783 return 0; 726 return 0;
784fail: 727fail:
@@ -787,20 +730,20 @@ fail:
787 return ret; 730 return ret;
788} 731}
789 732
790static int parse_features(struct arg_set *as, struct multipath *m) 733static int parse_features(struct dm_arg_set *as, struct multipath *m)
791{ 734{
792 int r; 735 int r;
793 unsigned argc; 736 unsigned argc;
794 struct dm_target *ti = m->ti; 737 struct dm_target *ti = m->ti;
795 const char *param_name; 738 const char *arg_name;
796 739
797 static struct param _params[] = { 740 static struct dm_arg _args[] = {
798 {0, 5, "invalid number of feature args"}, 741 {0, 5, "invalid number of feature args"},
799 {1, 50, "pg_init_retries must be between 1 and 50"}, 742 {1, 50, "pg_init_retries must be between 1 and 50"},
800 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, 743 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
801 }; 744 };
802 745
803 r = read_param(_params, shift(as), &argc, &ti->error); 746 r = dm_read_arg_group(_args, as, &argc, &ti->error);
804 if (r) 747 if (r)
805 return -EINVAL; 748 return -EINVAL;
806 749
@@ -808,26 +751,24 @@ static int parse_features(struct arg_set *as, struct multipath *m)
808 return 0; 751 return 0;
809 752
810 do { 753 do {
811 param_name = shift(as); 754 arg_name = dm_shift_arg(as);
812 argc--; 755 argc--;
813 756
814 if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) { 757 if (!strcasecmp(arg_name, "queue_if_no_path")) {
815 r = queue_if_no_path(m, 1, 0); 758 r = queue_if_no_path(m, 1, 0);
816 continue; 759 continue;
817 } 760 }
818 761
819 if (!strnicmp(param_name, MESG_STR("pg_init_retries")) && 762 if (!strcasecmp(arg_name, "pg_init_retries") &&
820 (argc >= 1)) { 763 (argc >= 1)) {
821 r = read_param(_params + 1, shift(as), 764 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
822 &m->pg_init_retries, &ti->error);
823 argc--; 765 argc--;
824 continue; 766 continue;
825 } 767 }
826 768
827 if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) && 769 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
828 (argc >= 1)) { 770 (argc >= 1)) {
829 r = read_param(_params + 2, shift(as), 771 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
830 &m->pg_init_delay_msecs, &ti->error);
831 argc--; 772 argc--;
832 continue; 773 continue;
833 } 774 }
@@ -842,15 +783,15 @@ static int parse_features(struct arg_set *as, struct multipath *m)
842static int multipath_ctr(struct dm_target *ti, unsigned int argc, 783static int multipath_ctr(struct dm_target *ti, unsigned int argc,
843 char **argv) 784 char **argv)
844{ 785{
845 /* target parameters */ 786 /* target arguments */
846 static struct param _params[] = { 787 static struct dm_arg _args[] = {
847 {0, 1024, "invalid number of priority groups"}, 788 {0, 1024, "invalid number of priority groups"},
848 {0, 1024, "invalid initial priority group number"}, 789 {0, 1024, "invalid initial priority group number"},
849 }; 790 };
850 791
851 int r; 792 int r;
852 struct multipath *m; 793 struct multipath *m;
853 struct arg_set as; 794 struct dm_arg_set as;
854 unsigned pg_count = 0; 795 unsigned pg_count = 0;
855 unsigned next_pg_num; 796 unsigned next_pg_num;
856 797
@@ -871,11 +812,11 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
871 if (r) 812 if (r)
872 goto bad; 813 goto bad;
873 814
874 r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error); 815 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
875 if (r) 816 if (r)
876 goto bad; 817 goto bad;
877 818
878 r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error); 819 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
879 if (r) 820 if (r)
880 goto bad; 821 goto bad;
881 822
@@ -1505,10 +1446,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1505 } 1446 }
1506 1447
1507 if (argc == 1) { 1448 if (argc == 1) {
1508 if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) { 1449 if (!strcasecmp(argv[0], "queue_if_no_path")) {
1509 r = queue_if_no_path(m, 1, 0); 1450 r = queue_if_no_path(m, 1, 0);
1510 goto out; 1451 goto out;
1511 } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) { 1452 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1512 r = queue_if_no_path(m, 0, 0); 1453 r = queue_if_no_path(m, 0, 0);
1513 goto out; 1454 goto out;
1514 } 1455 }
@@ -1519,18 +1460,18 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1519 goto out; 1460 goto out;
1520 } 1461 }
1521 1462
1522 if (!strnicmp(argv[0], MESG_STR("disable_group"))) { 1463 if (!strcasecmp(argv[0], "disable_group")) {
1523 r = bypass_pg_num(m, argv[1], 1); 1464 r = bypass_pg_num(m, argv[1], 1);
1524 goto out; 1465 goto out;
1525 } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) { 1466 } else if (!strcasecmp(argv[0], "enable_group")) {
1526 r = bypass_pg_num(m, argv[1], 0); 1467 r = bypass_pg_num(m, argv[1], 0);
1527 goto out; 1468 goto out;
1528 } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) { 1469 } else if (!strcasecmp(argv[0], "switch_group")) {
1529 r = switch_pg_num(m, argv[1]); 1470 r = switch_pg_num(m, argv[1]);
1530 goto out; 1471 goto out;
1531 } else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) 1472 } else if (!strcasecmp(argv[0], "reinstate_path"))
1532 action = reinstate_path; 1473 action = reinstate_path;
1533 else if (!strnicmp(argv[0], MESG_STR("fail_path"))) 1474 else if (!strcasecmp(argv[0], "fail_path"))
1534 action = fail_path; 1475 action = fail_path;
1535 else { 1476 else {
1536 DMWARN("Unrecognised multipath message received."); 1477 DMWARN("Unrecognised multipath message received.");
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index e5d8904fc8f6..a002dd85db1e 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -8,19 +8,19 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9 9
10#include "md.h" 10#include "md.h"
11#include "raid1.h"
11#include "raid5.h" 12#include "raid5.h"
12#include "dm.h"
13#include "bitmap.h" 13#include "bitmap.h"
14 14
15#include <linux/device-mapper.h>
16
15#define DM_MSG_PREFIX "raid" 17#define DM_MSG_PREFIX "raid"
16 18
17/* 19/*
18 * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then 20 * The following flags are used by dm-raid.c to set up the array state.
19 * make it so the flag doesn't set anything. 21 * They must be cleared before md_run is called.
20 */ 22 */
21#ifndef MD_SYNC_STATE_FORCED 23#define FirstUse 10 /* rdev flag */
22#define MD_SYNC_STATE_FORCED 0
23#endif
24 24
25struct raid_dev { 25struct raid_dev {
26 /* 26 /*
@@ -43,14 +43,15 @@ struct raid_dev {
43/* 43/*
44 * Flags for rs->print_flags field. 44 * Flags for rs->print_flags field.
45 */ 45 */
46#define DMPF_DAEMON_SLEEP 0x1 46#define DMPF_SYNC 0x1
47#define DMPF_MAX_WRITE_BEHIND 0x2 47#define DMPF_NOSYNC 0x2
48#define DMPF_SYNC 0x4 48#define DMPF_REBUILD 0x4
49#define DMPF_NOSYNC 0x8 49#define DMPF_DAEMON_SLEEP 0x8
50#define DMPF_STRIPE_CACHE 0x10 50#define DMPF_MIN_RECOVERY_RATE 0x10
51#define DMPF_MIN_RECOVERY_RATE 0x20 51#define DMPF_MAX_RECOVERY_RATE 0x20
52#define DMPF_MAX_RECOVERY_RATE 0x40 52#define DMPF_MAX_WRITE_BEHIND 0x40
53 53#define DMPF_STRIPE_CACHE 0x80
54#define DMPF_REGION_SIZE 0X100
54struct raid_set { 55struct raid_set {
55 struct dm_target *ti; 56 struct dm_target *ti;
56 57
@@ -72,6 +73,7 @@ static struct raid_type {
72 const unsigned level; /* RAID level. */ 73 const unsigned level; /* RAID level. */
73 const unsigned algorithm; /* RAID algorithm. */ 74 const unsigned algorithm; /* RAID algorithm. */
74} raid_types[] = { 75} raid_types[] = {
76 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
75 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, 77 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
76 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, 78 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
77 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, 79 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
@@ -105,7 +107,8 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
105 } 107 }
106 108
107 sectors_per_dev = ti->len; 109 sectors_per_dev = ti->len;
108 if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) { 110 if ((raid_type->level > 1) &&
111 sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
109 ti->error = "Target length not divisible by number of data devices"; 112 ti->error = "Target length not divisible by number of data devices";
110 return ERR_PTR(-EINVAL); 113 return ERR_PTR(-EINVAL);
111 } 114 }
@@ -147,9 +150,16 @@ static void context_free(struct raid_set *rs)
147{ 150{
148 int i; 151 int i;
149 152
150 for (i = 0; i < rs->md.raid_disks; i++) 153 for (i = 0; i < rs->md.raid_disks; i++) {
154 if (rs->dev[i].meta_dev)
155 dm_put_device(rs->ti, rs->dev[i].meta_dev);
156 if (rs->dev[i].rdev.sb_page)
157 put_page(rs->dev[i].rdev.sb_page);
158 rs->dev[i].rdev.sb_page = NULL;
159 rs->dev[i].rdev.sb_loaded = 0;
151 if (rs->dev[i].data_dev) 160 if (rs->dev[i].data_dev)
152 dm_put_device(rs->ti, rs->dev[i].data_dev); 161 dm_put_device(rs->ti, rs->dev[i].data_dev);
162 }
153 163
154 kfree(rs); 164 kfree(rs);
155} 165}
@@ -159,7 +169,16 @@ static void context_free(struct raid_set *rs)
159 * <meta_dev>: meta device name or '-' if missing 169 * <meta_dev>: meta device name or '-' if missing
160 * <data_dev>: data device name or '-' if missing 170 * <data_dev>: data device name or '-' if missing
161 * 171 *
162 * This code parses those words. 172 * The following are permitted:
173 * - -
174 * - <data_dev>
175 * <meta_dev> <data_dev>
176 *
177 * The following is not allowed:
178 * <meta_dev> -
179 *
180 * This code parses those words. If there is a failure,
181 * the caller must use context_free to unwind the operations.
163 */ 182 */
164static int dev_parms(struct raid_set *rs, char **argv) 183static int dev_parms(struct raid_set *rs, char **argv)
165{ 184{
@@ -182,8 +201,16 @@ static int dev_parms(struct raid_set *rs, char **argv)
182 rs->dev[i].rdev.mddev = &rs->md; 201 rs->dev[i].rdev.mddev = &rs->md;
183 202
184 if (strcmp(argv[0], "-")) { 203 if (strcmp(argv[0], "-")) {
185 rs->ti->error = "Metadata devices not supported"; 204 ret = dm_get_device(rs->ti, argv[0],
186 return -EINVAL; 205 dm_table_get_mode(rs->ti->table),
206 &rs->dev[i].meta_dev);
207 rs->ti->error = "RAID metadata device lookup failure";
208 if (ret)
209 return ret;
210
211 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
212 if (!rs->dev[i].rdev.sb_page)
213 return -ENOMEM;
187 } 214 }
188 215
189 if (!strcmp(argv[1], "-")) { 216 if (!strcmp(argv[1], "-")) {
@@ -193,6 +220,10 @@ static int dev_parms(struct raid_set *rs, char **argv)
193 return -EINVAL; 220 return -EINVAL;
194 } 221 }
195 222
223 rs->ti->error = "No data device supplied with metadata device";
224 if (rs->dev[i].meta_dev)
225 return -EINVAL;
226
196 continue; 227 continue;
197 } 228 }
198 229
@@ -204,6 +235,10 @@ static int dev_parms(struct raid_set *rs, char **argv)
204 return ret; 235 return ret;
205 } 236 }
206 237
238 if (rs->dev[i].meta_dev) {
239 metadata_available = 1;
240 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
241 }
207 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; 242 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
208 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); 243 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
209 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) 244 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
@@ -235,33 +270,109 @@ static int dev_parms(struct raid_set *rs, char **argv)
235} 270}
236 271
237/* 272/*
273 * validate_region_size
274 * @rs
275 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
276 *
277 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
278 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
279 *
280 * Returns: 0 on success, -EINVAL on failure.
281 */
282static int validate_region_size(struct raid_set *rs, unsigned long region_size)
283{
284 unsigned long min_region_size = rs->ti->len / (1 << 21);
285
286 if (!region_size) {
287 /*
288 * Choose a reasonable default. All figures in sectors.
289 */
290 if (min_region_size > (1 << 13)) {
291 DMINFO("Choosing default region size of %lu sectors",
292 region_size);
293 region_size = min_region_size;
294 } else {
295 DMINFO("Choosing default region size of 4MiB");
296 region_size = 1 << 13; /* sectors */
297 }
298 } else {
299 /*
300 * Validate user-supplied value.
301 */
302 if (region_size > rs->ti->len) {
303 rs->ti->error = "Supplied region size is too large";
304 return -EINVAL;
305 }
306
307 if (region_size < min_region_size) {
308 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
309 region_size, min_region_size);
310 rs->ti->error = "Supplied region size is too small";
311 return -EINVAL;
312 }
313
314 if (!is_power_of_2(region_size)) {
315 rs->ti->error = "Region size is not a power of 2";
316 return -EINVAL;
317 }
318
319 if (region_size < rs->md.chunk_sectors) {
320 rs->ti->error = "Region size is smaller than the chunk size";
321 return -EINVAL;
322 }
323 }
324
325 /*
326 * Convert sectors to bytes.
327 */
328 rs->md.bitmap_info.chunksize = (region_size << 9);
329
330 return 0;
331}
332
333/*
238 * Possible arguments are... 334 * Possible arguments are...
239 * RAID456:
240 * <chunk_size> [optional_args] 335 * <chunk_size> [optional_args]
241 * 336 *
242 * Optional args: 337 * Argument definitions
243 * [[no]sync] Force or prevent recovery of the entire array 338 * <chunk_size> The number of sectors per disk that
339 * will form the "stripe"
340 * [[no]sync] Force or prevent recovery of the
341 * entire array
244 * [rebuild <idx>] Rebuild the drive indicated by the index 342 * [rebuild <idx>] Rebuild the drive indicated by the index
245 * [daemon_sleep <ms>] Time between bitmap daemon work to clear bits 343 * [daemon_sleep <ms>] Time between bitmap daemon work to
344 * clear bits
246 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization 345 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
247 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization 346 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
347 * [write_mostly <idx>] Indicate a write mostly drive via index
248 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) 348 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
249 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs 349 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
350 * [region_size <sectors>] Defines granularity of bitmap
250 */ 351 */
251static int parse_raid_params(struct raid_set *rs, char **argv, 352static int parse_raid_params(struct raid_set *rs, char **argv,
252 unsigned num_raid_params) 353 unsigned num_raid_params)
253{ 354{
254 unsigned i, rebuild_cnt = 0; 355 unsigned i, rebuild_cnt = 0;
255 unsigned long value; 356 unsigned long value, region_size = 0;
256 char *key; 357 char *key;
257 358
258 /* 359 /*
259 * First, parse the in-order required arguments 360 * First, parse the in-order required arguments
361 * "chunk_size" is the only argument of this type.
260 */ 362 */
261 if ((strict_strtoul(argv[0], 10, &value) < 0) || 363 if ((strict_strtoul(argv[0], 10, &value) < 0)) {
262 !is_power_of_2(value) || (value < 8)) {
263 rs->ti->error = "Bad chunk size"; 364 rs->ti->error = "Bad chunk size";
264 return -EINVAL; 365 return -EINVAL;
366 } else if (rs->raid_type->level == 1) {
367 if (value)
368 DMERR("Ignoring chunk size parameter for RAID 1");
369 value = 0;
370 } else if (!is_power_of_2(value)) {
371 rs->ti->error = "Chunk size must be a power of 2";
372 return -EINVAL;
373 } else if (value < 8) {
374 rs->ti->error = "Chunk size value is too small";
375 return -EINVAL;
265 } 376 }
266 377
267 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; 378 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
@@ -269,22 +380,39 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
269 num_raid_params--; 380 num_raid_params--;
270 381
271 /* 382 /*
272 * Second, parse the unordered optional arguments 383 * We set each individual device as In_sync with a completed
384 * 'recovery_offset'. If there has been a device failure or
385 * replacement then one of the following cases applies:
386 *
387 * 1) User specifies 'rebuild'.
388 * - Device is reset when param is read.
389 * 2) A new device is supplied.
390 * - No matching superblock found, resets device.
391 * 3) Device failure was transient and returns on reload.
392 * - Failure noticed, resets device for bitmap replay.
393 * 4) Device hadn't completed recovery after previous failure.
394 * - Superblock is read and overrides recovery_offset.
395 *
396 * What is found in the superblocks of the devices is always
397 * authoritative, unless 'rebuild' or '[no]sync' was specified.
273 */ 398 */
274 for (i = 0; i < rs->md.raid_disks; i++) 399 for (i = 0; i < rs->md.raid_disks; i++) {
275 set_bit(In_sync, &rs->dev[i].rdev.flags); 400 set_bit(In_sync, &rs->dev[i].rdev.flags);
401 rs->dev[i].rdev.recovery_offset = MaxSector;
402 }
276 403
404 /*
405 * Second, parse the unordered optional arguments
406 */
277 for (i = 0; i < num_raid_params; i++) { 407 for (i = 0; i < num_raid_params; i++) {
278 if (!strcmp(argv[i], "nosync")) { 408 if (!strcasecmp(argv[i], "nosync")) {
279 rs->md.recovery_cp = MaxSector; 409 rs->md.recovery_cp = MaxSector;
280 rs->print_flags |= DMPF_NOSYNC; 410 rs->print_flags |= DMPF_NOSYNC;
281 rs->md.flags |= MD_SYNC_STATE_FORCED;
282 continue; 411 continue;
283 } 412 }
284 if (!strcmp(argv[i], "sync")) { 413 if (!strcasecmp(argv[i], "sync")) {
285 rs->md.recovery_cp = 0; 414 rs->md.recovery_cp = 0;
286 rs->print_flags |= DMPF_SYNC; 415 rs->print_flags |= DMPF_SYNC;
287 rs->md.flags |= MD_SYNC_STATE_FORCED;
288 continue; 416 continue;
289 } 417 }
290 418
@@ -300,9 +428,13 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
300 return -EINVAL; 428 return -EINVAL;
301 } 429 }
302 430
303 if (!strcmp(key, "rebuild")) { 431 if (!strcasecmp(key, "rebuild")) {
304 if (++rebuild_cnt > rs->raid_type->parity_devs) { 432 rebuild_cnt++;
305 rs->ti->error = "Too many rebuild drives given"; 433 if (((rs->raid_type->level != 1) &&
434 (rebuild_cnt > rs->raid_type->parity_devs)) ||
435 ((rs->raid_type->level == 1) &&
436 (rebuild_cnt > (rs->md.raid_disks - 1)))) {
437 rs->ti->error = "Too many rebuild devices specified for given RAID type";
306 return -EINVAL; 438 return -EINVAL;
307 } 439 }
308 if (value > rs->md.raid_disks) { 440 if (value > rs->md.raid_disks) {
@@ -311,7 +443,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
311 } 443 }
312 clear_bit(In_sync, &rs->dev[value].rdev.flags); 444 clear_bit(In_sync, &rs->dev[value].rdev.flags);
313 rs->dev[value].rdev.recovery_offset = 0; 445 rs->dev[value].rdev.recovery_offset = 0;
314 } else if (!strcmp(key, "max_write_behind")) { 446 rs->print_flags |= DMPF_REBUILD;
447 } else if (!strcasecmp(key, "write_mostly")) {
448 if (rs->raid_type->level != 1) {
449 rs->ti->error = "write_mostly option is only valid for RAID1";
450 return -EINVAL;
451 }
452 if (value > rs->md.raid_disks) {
453 rs->ti->error = "Invalid write_mostly drive index given";
454 return -EINVAL;
455 }
456 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
457 } else if (!strcasecmp(key, "max_write_behind")) {
458 if (rs->raid_type->level != 1) {
459 rs->ti->error = "max_write_behind option is only valid for RAID1";
460 return -EINVAL;
461 }
315 rs->print_flags |= DMPF_MAX_WRITE_BEHIND; 462 rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
316 463
317 /* 464 /*
@@ -324,14 +471,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
324 return -EINVAL; 471 return -EINVAL;
325 } 472 }
326 rs->md.bitmap_info.max_write_behind = value; 473 rs->md.bitmap_info.max_write_behind = value;
327 } else if (!strcmp(key, "daemon_sleep")) { 474 } else if (!strcasecmp(key, "daemon_sleep")) {
328 rs->print_flags |= DMPF_DAEMON_SLEEP; 475 rs->print_flags |= DMPF_DAEMON_SLEEP;
329 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { 476 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
330 rs->ti->error = "daemon sleep period out of range"; 477 rs->ti->error = "daemon sleep period out of range";
331 return -EINVAL; 478 return -EINVAL;
332 } 479 }
333 rs->md.bitmap_info.daemon_sleep = value; 480 rs->md.bitmap_info.daemon_sleep = value;
334 } else if (!strcmp(key, "stripe_cache")) { 481 } else if (!strcasecmp(key, "stripe_cache")) {
335 rs->print_flags |= DMPF_STRIPE_CACHE; 482 rs->print_flags |= DMPF_STRIPE_CACHE;
336 483
337 /* 484 /*
@@ -348,20 +495,23 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
348 rs->ti->error = "Bad stripe_cache size"; 495 rs->ti->error = "Bad stripe_cache size";
349 return -EINVAL; 496 return -EINVAL;
350 } 497 }
351 } else if (!strcmp(key, "min_recovery_rate")) { 498 } else if (!strcasecmp(key, "min_recovery_rate")) {
352 rs->print_flags |= DMPF_MIN_RECOVERY_RATE; 499 rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
353 if (value > INT_MAX) { 500 if (value > INT_MAX) {
354 rs->ti->error = "min_recovery_rate out of range"; 501 rs->ti->error = "min_recovery_rate out of range";
355 return -EINVAL; 502 return -EINVAL;
356 } 503 }
357 rs->md.sync_speed_min = (int)value; 504 rs->md.sync_speed_min = (int)value;
358 } else if (!strcmp(key, "max_recovery_rate")) { 505 } else if (!strcasecmp(key, "max_recovery_rate")) {
359 rs->print_flags |= DMPF_MAX_RECOVERY_RATE; 506 rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
360 if (value > INT_MAX) { 507 if (value > INT_MAX) {
361 rs->ti->error = "max_recovery_rate out of range"; 508 rs->ti->error = "max_recovery_rate out of range";
362 return -EINVAL; 509 return -EINVAL;
363 } 510 }
364 rs->md.sync_speed_max = (int)value; 511 rs->md.sync_speed_max = (int)value;
512 } else if (!strcasecmp(key, "region_size")) {
513 rs->print_flags |= DMPF_REGION_SIZE;
514 region_size = value;
365 } else { 515 } else {
366 DMERR("Unable to parse RAID parameter: %s", key); 516 DMERR("Unable to parse RAID parameter: %s", key);
367 rs->ti->error = "Unable to parse RAID parameters"; 517 rs->ti->error = "Unable to parse RAID parameters";
@@ -369,6 +519,19 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
369 } 519 }
370 } 520 }
371 521
522 if (validate_region_size(rs, region_size))
523 return -EINVAL;
524
525 if (rs->md.chunk_sectors)
526 rs->ti->split_io = rs->md.chunk_sectors;
527 else
528 rs->ti->split_io = region_size;
529
530 if (rs->md.chunk_sectors)
531 rs->ti->split_io = rs->md.chunk_sectors;
532 else
533 rs->ti->split_io = region_size;
534
372 /* Assume there are no metadata devices until the drives are parsed */ 535 /* Assume there are no metadata devices until the drives are parsed */
373 rs->md.persistent = 0; 536 rs->md.persistent = 0;
374 rs->md.external = 1; 537 rs->md.external = 1;
@@ -387,17 +550,351 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
387{ 550{
388 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); 551 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
389 552
553 if (rs->raid_type->level == 1)
554 return md_raid1_congested(&rs->md, bits);
555
390 return md_raid5_congested(&rs->md, bits); 556 return md_raid5_congested(&rs->md, bits);
391} 557}
392 558
393/* 559/*
560 * This structure is never routinely used by userspace, unlike md superblocks.
561 * Devices with this superblock should only ever be accessed via device-mapper.
562 */
563#define DM_RAID_MAGIC 0x64526D44
564struct dm_raid_superblock {
565 __le32 magic; /* "DmRd" */
566 __le32 features; /* Used to indicate possible future changes */
567
568 __le32 num_devices; /* Number of devices in this array. (Max 64) */
569 __le32 array_position; /* The position of this drive in the array */
570
571 __le64 events; /* Incremented by md when superblock updated */
572 __le64 failed_devices; /* Bit field of devices to indicate failures */
573
574 /*
575 * This offset tracks the progress of the repair or replacement of
576 * an individual drive.
577 */
578 __le64 disk_recovery_offset;
579
580 /*
581 * This offset tracks the progress of the initial array
582 * synchronisation/parity calculation.
583 */
584 __le64 array_resync_offset;
585
586 /*
587 * RAID characteristics
588 */
589 __le32 level;
590 __le32 layout;
591 __le32 stripe_sectors;
592
593 __u8 pad[452]; /* Round struct to 512 bytes. */
594 /* Always set to 0 when writing. */
595} __packed;
596
597static int read_disk_sb(mdk_rdev_t *rdev, int size)
598{
599 BUG_ON(!rdev->sb_page);
600
601 if (rdev->sb_loaded)
602 return 0;
603
604 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
605 DMERR("Failed to read device superblock");
606 return -EINVAL;
607 }
608
609 rdev->sb_loaded = 1;
610
611 return 0;
612}
613
614static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev)
615{
616 mdk_rdev_t *r, *t;
617 uint64_t failed_devices;
618 struct dm_raid_superblock *sb;
619
620 sb = page_address(rdev->sb_page);
621 failed_devices = le64_to_cpu(sb->failed_devices);
622
623 rdev_for_each(r, t, mddev)
624 if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags))
625 failed_devices |= (1ULL << r->raid_disk);
626
627 memset(sb, 0, sizeof(*sb));
628
629 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
630 sb->features = cpu_to_le32(0); /* No features yet */
631
632 sb->num_devices = cpu_to_le32(mddev->raid_disks);
633 sb->array_position = cpu_to_le32(rdev->raid_disk);
634
635 sb->events = cpu_to_le64(mddev->events);
636 sb->failed_devices = cpu_to_le64(failed_devices);
637
638 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
639 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
640
641 sb->level = cpu_to_le32(mddev->level);
642 sb->layout = cpu_to_le32(mddev->layout);
643 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
644}
645
646/*
647 * super_load
648 *
649 * This function creates a superblock if one is not found on the device
650 * and will decide which superblock to use if there's a choice.
651 *
652 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
653 */
654static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev)
655{
656 int ret;
657 struct dm_raid_superblock *sb;
658 struct dm_raid_superblock *refsb;
659 uint64_t events_sb, events_refsb;
660
661 rdev->sb_start = 0;
662 rdev->sb_size = sizeof(*sb);
663
664 ret = read_disk_sb(rdev, rdev->sb_size);
665 if (ret)
666 return ret;
667
668 sb = page_address(rdev->sb_page);
669 if (sb->magic != cpu_to_le32(DM_RAID_MAGIC)) {
670 super_sync(rdev->mddev, rdev);
671
672 set_bit(FirstUse, &rdev->flags);
673
674 /* Force writing of superblocks to disk */
675 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
676
677 /* Any superblock is better than none, choose that if given */
678 return refdev ? 0 : 1;
679 }
680
681 if (!refdev)
682 return 1;
683
684 events_sb = le64_to_cpu(sb->events);
685
686 refsb = page_address(refdev->sb_page);
687 events_refsb = le64_to_cpu(refsb->events);
688
689 return (events_sb > events_refsb) ? 1 : 0;
690}
691
692static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
693{
694 int role;
695 struct raid_set *rs = container_of(mddev, struct raid_set, md);
696 uint64_t events_sb;
697 uint64_t failed_devices;
698 struct dm_raid_superblock *sb;
699 uint32_t new_devs = 0;
700 uint32_t rebuilds = 0;
701 mdk_rdev_t *r, *t;
702 struct dm_raid_superblock *sb2;
703
704 sb = page_address(rdev->sb_page);
705 events_sb = le64_to_cpu(sb->events);
706 failed_devices = le64_to_cpu(sb->failed_devices);
707
708 /*
709 * Initialise to 1 if this is a new superblock.
710 */
711 mddev->events = events_sb ? : 1;
712
713 /*
714 * Reshaping is not currently allowed
715 */
716 if ((le32_to_cpu(sb->level) != mddev->level) ||
717 (le32_to_cpu(sb->layout) != mddev->layout) ||
718 (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
719 DMERR("Reshaping arrays not yet supported.");
720 return -EINVAL;
721 }
722
723 /* We can only change the number of devices in RAID1 right now */
724 if ((rs->raid_type->level != 1) &&
725 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
726 DMERR("Reshaping arrays not yet supported.");
727 return -EINVAL;
728 }
729
730 if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
731 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
732
733 /*
734 * During load, we set FirstUse if a new superblock was written.
735 * There are two reasons we might not have a superblock:
736 * 1) The array is brand new - in which case, all of the
737 * devices must have their In_sync bit set. Also,
738 * recovery_cp must be 0, unless forced.
739 * 2) This is a new device being added to an old array
740 * and the new device needs to be rebuilt - in which
741 * case the In_sync bit will /not/ be set and
742 * recovery_cp must be MaxSector.
743 */
744 rdev_for_each(r, t, mddev) {
745 if (!test_bit(In_sync, &r->flags)) {
746 if (!test_bit(FirstUse, &r->flags))
747 DMERR("Superblock area of "
748 "rebuild device %d should have been "
749 "cleared.", r->raid_disk);
750 set_bit(FirstUse, &r->flags);
751 rebuilds++;
752 } else if (test_bit(FirstUse, &r->flags))
753 new_devs++;
754 }
755
756 if (!rebuilds) {
757 if (new_devs == mddev->raid_disks) {
758 DMINFO("Superblocks created for new array");
759 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
760 } else if (new_devs) {
761 DMERR("New device injected "
762 "into existing array without 'rebuild' "
763 "parameter specified");
764 return -EINVAL;
765 }
766 } else if (new_devs) {
767 DMERR("'rebuild' devices cannot be "
768 "injected into an array with other first-time devices");
769 return -EINVAL;
770 } else if (mddev->recovery_cp != MaxSector) {
771 DMERR("'rebuild' specified while array is not in-sync");
772 return -EINVAL;
773 }
774
775 /*
776 * Now we set the Faulty bit for those devices that are
777 * recorded in the superblock as failed.
778 */
779 rdev_for_each(r, t, mddev) {
780 if (!r->sb_page)
781 continue;
782 sb2 = page_address(r->sb_page);
783 sb2->failed_devices = 0;
784
785 /*
786 * Check for any device re-ordering.
787 */
788 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
789 role = le32_to_cpu(sb2->array_position);
790 if (role != r->raid_disk) {
791 if (rs->raid_type->level != 1) {
792 rs->ti->error = "Cannot change device "
793 "positions in RAID array";
794 return -EINVAL;
795 }
796 DMINFO("RAID1 device #%d now at position #%d",
797 role, r->raid_disk);
798 }
799
800 /*
801 * Partial recovery is performed on
802 * returning failed devices.
803 */
804 if (failed_devices & (1 << role))
805 set_bit(Faulty, &r->flags);
806 }
807 }
808
809 return 0;
810}
811
812static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev)
813{
814 struct dm_raid_superblock *sb = page_address(rdev->sb_page);
815
816 /*
817 * If mddev->events is not set, we know we have not yet initialized
818 * the array.
819 */
820 if (!mddev->events && super_init_validation(mddev, rdev))
821 return -EINVAL;
822
823 mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */
824 rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
825 if (!test_bit(FirstUse, &rdev->flags)) {
826 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
827 if (rdev->recovery_offset != MaxSector)
828 clear_bit(In_sync, &rdev->flags);
829 }
830
831 /*
832 * If a device comes back, set it as not In_sync and no longer faulty.
833 */
834 if (test_bit(Faulty, &rdev->flags)) {
835 clear_bit(Faulty, &rdev->flags);
836 clear_bit(In_sync, &rdev->flags);
837 rdev->saved_raid_disk = rdev->raid_disk;
838 rdev->recovery_offset = 0;
839 }
840
841 clear_bit(FirstUse, &rdev->flags);
842
843 return 0;
844}
845
846/*
847 * Analyse superblocks and select the freshest.
848 */
849static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
850{
851 int ret;
852 mdk_rdev_t *rdev, *freshest, *tmp;
853 mddev_t *mddev = &rs->md;
854
855 freshest = NULL;
856 rdev_for_each(rdev, tmp, mddev) {
857 if (!rdev->meta_bdev)
858 continue;
859
860 ret = super_load(rdev, freshest);
861
862 switch (ret) {
863 case 1:
864 freshest = rdev;
865 break;
866 case 0:
867 break;
868 default:
869 ti->error = "Failed to load superblock";
870 return ret;
871 }
872 }
873
874 if (!freshest)
875 return 0;
876
877 /*
878 * Validation of the freshest device provides the source of
879 * validation for the remaining devices.
880 */
881 ti->error = "Unable to assemble array: Invalid superblocks";
882 if (super_validate(mddev, freshest))
883 return -EINVAL;
884
885 rdev_for_each(rdev, tmp, mddev)
886 if ((rdev != freshest) && super_validate(mddev, rdev))
887 return -EINVAL;
888
889 return 0;
890}
891
892/*
394 * Construct a RAID4/5/6 mapping: 893 * Construct a RAID4/5/6 mapping:
395 * Args: 894 * Args:
396 * <raid_type> <#raid_params> <raid_params> \ 895 * <raid_type> <#raid_params> <raid_params> \
397 * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> } 896 * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
398 * 897 *
399 * ** metadata devices are not supported yet, use '-' instead **
400 *
401 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for 898 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
402 * details on possible <raid_params>. 899 * details on possible <raid_params>.
403 */ 900 */
@@ -465,8 +962,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
465 if (ret) 962 if (ret)
466 goto bad; 963 goto bad;
467 964
965 rs->md.sync_super = super_sync;
966 ret = analyse_superblocks(ti, rs);
967 if (ret)
968 goto bad;
969
468 INIT_WORK(&rs->md.event_work, do_table_event); 970 INIT_WORK(&rs->md.event_work, do_table_event);
469 ti->split_io = rs->md.chunk_sectors;
470 ti->private = rs; 971 ti->private = rs;
471 972
472 mutex_lock(&rs->md.reconfig_mutex); 973 mutex_lock(&rs->md.reconfig_mutex);
@@ -482,6 +983,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
482 rs->callbacks.congested_fn = raid_is_congested; 983 rs->callbacks.congested_fn = raid_is_congested;
483 dm_table_add_target_callbacks(ti->table, &rs->callbacks); 984 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
484 985
986 mddev_suspend(&rs->md);
485 return 0; 987 return 0;
486 988
487bad: 989bad:
@@ -546,12 +1048,17 @@ static int raid_status(struct dm_target *ti, status_type_t type,
546 break; 1048 break;
547 case STATUSTYPE_TABLE: 1049 case STATUSTYPE_TABLE:
548 /* The string you would use to construct this array */ 1050 /* The string you would use to construct this array */
549 for (i = 0; i < rs->md.raid_disks; i++) 1051 for (i = 0; i < rs->md.raid_disks; i++) {
550 if (rs->dev[i].data_dev && 1052 if ((rs->print_flags & DMPF_REBUILD) &&
1053 rs->dev[i].data_dev &&
551 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1054 !test_bit(In_sync, &rs->dev[i].rdev.flags))
552 raid_param_cnt++; /* for rebuilds */ 1055 raid_param_cnt += 2; /* for rebuilds */
1056 if (rs->dev[i].data_dev &&
1057 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1058 raid_param_cnt += 2;
1059 }
553 1060
554 raid_param_cnt += (hweight64(rs->print_flags) * 2); 1061 raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2);
555 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) 1062 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
556 raid_param_cnt--; 1063 raid_param_cnt--;
557 1064
@@ -565,7 +1072,8 @@ static int raid_status(struct dm_target *ti, status_type_t type,
565 DMEMIT(" nosync"); 1072 DMEMIT(" nosync");
566 1073
567 for (i = 0; i < rs->md.raid_disks; i++) 1074 for (i = 0; i < rs->md.raid_disks; i++)
568 if (rs->dev[i].data_dev && 1075 if ((rs->print_flags & DMPF_REBUILD) &&
1076 rs->dev[i].data_dev &&
569 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1077 !test_bit(In_sync, &rs->dev[i].rdev.flags))
570 DMEMIT(" rebuild %u", i); 1078 DMEMIT(" rebuild %u", i);
571 1079
@@ -579,6 +1087,11 @@ static int raid_status(struct dm_target *ti, status_type_t type,
579 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE) 1087 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
580 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); 1088 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
581 1089
1090 for (i = 0; i < rs->md.raid_disks; i++)
1091 if (rs->dev[i].data_dev &&
1092 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1093 DMEMIT(" write_mostly %u", i);
1094
582 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND) 1095 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
583 DMEMIT(" max_write_behind %lu", 1096 DMEMIT(" max_write_behind %lu",
584 rs->md.bitmap_info.max_write_behind); 1097 rs->md.bitmap_info.max_write_behind);
@@ -591,9 +1104,16 @@ static int raid_status(struct dm_target *ti, status_type_t type,
591 conf ? conf->max_nr_stripes * 2 : 0); 1104 conf ? conf->max_nr_stripes * 2 : 0);
592 } 1105 }
593 1106
1107 if (rs->print_flags & DMPF_REGION_SIZE)
1108 DMEMIT(" region_size %lu",
1109 rs->md.bitmap_info.chunksize >> 9);
1110
594 DMEMIT(" %d", rs->md.raid_disks); 1111 DMEMIT(" %d", rs->md.raid_disks);
595 for (i = 0; i < rs->md.raid_disks; i++) { 1112 for (i = 0; i < rs->md.raid_disks; i++) {
596 DMEMIT(" -"); /* metadata device */ 1113 if (rs->dev[i].meta_dev)
1114 DMEMIT(" %s", rs->dev[i].meta_dev->name);
1115 else
1116 DMEMIT(" -");
597 1117
598 if (rs->dev[i].data_dev) 1118 if (rs->dev[i].data_dev)
599 DMEMIT(" %s", rs->dev[i].data_dev->name); 1119 DMEMIT(" %s", rs->dev[i].data_dev->name);
@@ -650,12 +1170,13 @@ static void raid_resume(struct dm_target *ti)
650{ 1170{
651 struct raid_set *rs = ti->private; 1171 struct raid_set *rs = ti->private;
652 1172
1173 bitmap_load(&rs->md);
653 mddev_resume(&rs->md); 1174 mddev_resume(&rs->md);
654} 1175}
655 1176
656static struct target_type raid_target = { 1177static struct target_type raid_target = {
657 .name = "raid", 1178 .name = "raid",
658 .version = {1, 0, 0}, 1179 .version = {1, 1, 0},
659 .module = THIS_MODULE, 1180 .module = THIS_MODULE,
660 .ctr = raid_ctr, 1181 .ctr = raid_ctr,
661 .dtr = raid_dtr, 1182 .dtr = raid_dtr,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 135c2f1fdbfc..d1f1d7017103 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -58,25 +58,30 @@
58#define NUM_SNAPSHOT_HDR_CHUNKS 1 58#define NUM_SNAPSHOT_HDR_CHUNKS 1
59 59
60struct disk_header { 60struct disk_header {
61 uint32_t magic; 61 __le32 magic;
62 62
63 /* 63 /*
64 * Is this snapshot valid. There is no way of recovering 64 * Is this snapshot valid. There is no way of recovering
65 * an invalid snapshot. 65 * an invalid snapshot.
66 */ 66 */
67 uint32_t valid; 67 __le32 valid;
68 68
69 /* 69 /*
70 * Simple, incrementing version. no backward 70 * Simple, incrementing version. no backward
71 * compatibility. 71 * compatibility.
72 */ 72 */
73 uint32_t version; 73 __le32 version;
74 74
75 /* In sectors */ 75 /* In sectors */
76 uint32_t chunk_size; 76 __le32 chunk_size;
77}; 77} __packed;
78 78
79struct disk_exception { 79struct disk_exception {
80 __le64 old_chunk;
81 __le64 new_chunk;
82} __packed;
83
84struct core_exception {
80 uint64_t old_chunk; 85 uint64_t old_chunk;
81 uint64_t new_chunk; 86 uint64_t new_chunk;
82}; 87};
@@ -169,10 +174,9 @@ static int alloc_area(struct pstore *ps)
169 if (!ps->area) 174 if (!ps->area)
170 goto err_area; 175 goto err_area;
171 176
172 ps->zero_area = vmalloc(len); 177 ps->zero_area = vzalloc(len);
173 if (!ps->zero_area) 178 if (!ps->zero_area)
174 goto err_zero_area; 179 goto err_zero_area;
175 memset(ps->zero_area, 0, len);
176 180
177 ps->header_area = vmalloc(len); 181 ps->header_area = vmalloc(len);
178 if (!ps->header_area) 182 if (!ps->header_area)
@@ -396,32 +400,32 @@ static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
396} 400}
397 401
398static void read_exception(struct pstore *ps, 402static void read_exception(struct pstore *ps,
399 uint32_t index, struct disk_exception *result) 403 uint32_t index, struct core_exception *result)
400{ 404{
401 struct disk_exception *e = get_exception(ps, index); 405 struct disk_exception *de = get_exception(ps, index);
402 406
403 /* copy it */ 407 /* copy it */
404 result->old_chunk = le64_to_cpu(e->old_chunk); 408 result->old_chunk = le64_to_cpu(de->old_chunk);
405 result->new_chunk = le64_to_cpu(e->new_chunk); 409 result->new_chunk = le64_to_cpu(de->new_chunk);
406} 410}
407 411
408static void write_exception(struct pstore *ps, 412static void write_exception(struct pstore *ps,
409 uint32_t index, struct disk_exception *de) 413 uint32_t index, struct core_exception *e)
410{ 414{
411 struct disk_exception *e = get_exception(ps, index); 415 struct disk_exception *de = get_exception(ps, index);
412 416
413 /* copy it */ 417 /* copy it */
414 e->old_chunk = cpu_to_le64(de->old_chunk); 418 de->old_chunk = cpu_to_le64(e->old_chunk);
415 e->new_chunk = cpu_to_le64(de->new_chunk); 419 de->new_chunk = cpu_to_le64(e->new_chunk);
416} 420}
417 421
418static void clear_exception(struct pstore *ps, uint32_t index) 422static void clear_exception(struct pstore *ps, uint32_t index)
419{ 423{
420 struct disk_exception *e = get_exception(ps, index); 424 struct disk_exception *de = get_exception(ps, index);
421 425
422 /* clear it */ 426 /* clear it */
423 e->old_chunk = 0; 427 de->old_chunk = 0;
424 e->new_chunk = 0; 428 de->new_chunk = 0;
425} 429}
426 430
427/* 431/*
@@ -437,13 +441,13 @@ static int insert_exceptions(struct pstore *ps,
437{ 441{
438 int r; 442 int r;
439 unsigned int i; 443 unsigned int i;
440 struct disk_exception de; 444 struct core_exception e;
441 445
442 /* presume the area is full */ 446 /* presume the area is full */
443 *full = 1; 447 *full = 1;
444 448
445 for (i = 0; i < ps->exceptions_per_area; i++) { 449 for (i = 0; i < ps->exceptions_per_area; i++) {
446 read_exception(ps, i, &de); 450 read_exception(ps, i, &e);
447 451
448 /* 452 /*
449 * If the new_chunk is pointing at the start of 453 * If the new_chunk is pointing at the start of
@@ -451,7 +455,7 @@ static int insert_exceptions(struct pstore *ps,
451 * is we know that we've hit the end of the 455 * is we know that we've hit the end of the
452 * exceptions. Therefore the area is not full. 456 * exceptions. Therefore the area is not full.
453 */ 457 */
454 if (de.new_chunk == 0LL) { 458 if (e.new_chunk == 0LL) {
455 ps->current_committed = i; 459 ps->current_committed = i;
456 *full = 0; 460 *full = 0;
457 break; 461 break;
@@ -460,13 +464,13 @@ static int insert_exceptions(struct pstore *ps,
460 /* 464 /*
461 * Keep track of the start of the free chunks. 465 * Keep track of the start of the free chunks.
462 */ 466 */
463 if (ps->next_free <= de.new_chunk) 467 if (ps->next_free <= e.new_chunk)
464 ps->next_free = de.new_chunk + 1; 468 ps->next_free = e.new_chunk + 1;
465 469
466 /* 470 /*
467 * Otherwise we add the exception to the snapshot. 471 * Otherwise we add the exception to the snapshot.
468 */ 472 */
469 r = callback(callback_context, de.old_chunk, de.new_chunk); 473 r = callback(callback_context, e.old_chunk, e.new_chunk);
470 if (r) 474 if (r)
471 return r; 475 return r;
472 } 476 }
@@ -563,7 +567,7 @@ static int persistent_read_metadata(struct dm_exception_store *store,
563 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / 567 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
564 sizeof(struct disk_exception); 568 sizeof(struct disk_exception);
565 ps->callbacks = dm_vcalloc(ps->exceptions_per_area, 569 ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
566 sizeof(*ps->callbacks)); 570 sizeof(*ps->callbacks));
567 if (!ps->callbacks) 571 if (!ps->callbacks)
568 return -ENOMEM; 572 return -ENOMEM;
569 573
@@ -641,12 +645,12 @@ static void persistent_commit_exception(struct dm_exception_store *store,
641{ 645{
642 unsigned int i; 646 unsigned int i;
643 struct pstore *ps = get_info(store); 647 struct pstore *ps = get_info(store);
644 struct disk_exception de; 648 struct core_exception ce;
645 struct commit_callback *cb; 649 struct commit_callback *cb;
646 650
647 de.old_chunk = e->old_chunk; 651 ce.old_chunk = e->old_chunk;
648 de.new_chunk = e->new_chunk; 652 ce.new_chunk = e->new_chunk;
649 write_exception(ps, ps->current_committed++, &de); 653 write_exception(ps, ps->current_committed++, &ce);
650 654
651 /* 655 /*
652 * Add the callback to the back of the array. This code 656 * Add the callback to the back of the array. This code
@@ -670,7 +674,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
670 * If we completely filled the current area, then wipe the next one. 674 * If we completely filled the current area, then wipe the next one.
671 */ 675 */
672 if ((ps->current_committed == ps->exceptions_per_area) && 676 if ((ps->current_committed == ps->exceptions_per_area) &&
673 zero_disk_area(ps, ps->current_area + 1)) 677 zero_disk_area(ps, ps->current_area + 1))
674 ps->valid = 0; 678 ps->valid = 0;
675 679
676 /* 680 /*
@@ -701,7 +705,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
701 chunk_t *last_new_chunk) 705 chunk_t *last_new_chunk)
702{ 706{
703 struct pstore *ps = get_info(store); 707 struct pstore *ps = get_info(store);
704 struct disk_exception de; 708 struct core_exception ce;
705 int nr_consecutive; 709 int nr_consecutive;
706 int r; 710 int r;
707 711
@@ -722,9 +726,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
722 ps->current_committed = ps->exceptions_per_area; 726 ps->current_committed = ps->exceptions_per_area;
723 } 727 }
724 728
725 read_exception(ps, ps->current_committed - 1, &de); 729 read_exception(ps, ps->current_committed - 1, &ce);
726 *last_old_chunk = de.old_chunk; 730 *last_old_chunk = ce.old_chunk;
727 *last_new_chunk = de.new_chunk; 731 *last_new_chunk = ce.new_chunk;
728 732
729 /* 733 /*
730 * Find number of consecutive chunks within the current area, 734 * Find number of consecutive chunks within the current area,
@@ -733,9 +737,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
733 for (nr_consecutive = 1; nr_consecutive < ps->current_committed; 737 for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
734 nr_consecutive++) { 738 nr_consecutive++) {
735 read_exception(ps, ps->current_committed - 1 - nr_consecutive, 739 read_exception(ps, ps->current_committed - 1 - nr_consecutive,
736 &de); 740 &ce);
737 if (de.old_chunk != *last_old_chunk - nr_consecutive || 741 if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
738 de.new_chunk != *last_new_chunk - nr_consecutive) 742 ce.new_chunk != *last_new_chunk - nr_consecutive)
739 break; 743 break;
740 } 744 }
741 745
@@ -753,7 +757,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
753 for (i = 0; i < nr_merged; i++) 757 for (i = 0; i < nr_merged; i++)
754 clear_exception(ps, ps->current_committed - 1 - i); 758 clear_exception(ps, ps->current_committed - 1 - i);
755 759
756 r = area_io(ps, WRITE); 760 r = area_io(ps, WRITE_FLUSH_FUA);
757 if (r < 0) 761 if (r < 0)
758 return r; 762 return r;
759 763
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 9ecff5f3023a..6f758870fc19 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -30,16 +30,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
30 ((ti)->type->name == dm_snapshot_merge_target_name) 30 ((ti)->type->name == dm_snapshot_merge_target_name)
31 31
32/* 32/*
33 * The percentage increment we will wake up users at
34 */
35#define WAKE_UP_PERCENT 5
36
37/*
38 * kcopyd priority of snapshot operations
39 */
40#define SNAPSHOT_COPY_PRIORITY 2
41
42/*
43 * The size of the mempool used to track chunks in use. 33 * The size of the mempool used to track chunks in use.
44 */ 34 */
45#define MIN_IOS 256 35#define MIN_IOS 256
@@ -180,6 +170,13 @@ struct dm_snap_pending_exception {
180 * kcopyd. 170 * kcopyd.
181 */ 171 */
182 int started; 172 int started;
173
174 /*
175 * For writing a complete chunk, bypassing the copy.
176 */
177 struct bio *full_bio;
178 bio_end_io_t *full_bio_end_io;
179 void *full_bio_private;
183}; 180};
184 181
185/* 182/*
@@ -1055,8 +1052,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1055 1052
1056 s = kmalloc(sizeof(*s), GFP_KERNEL); 1053 s = kmalloc(sizeof(*s), GFP_KERNEL);
1057 if (!s) { 1054 if (!s) {
1058 ti->error = "Cannot allocate snapshot context private " 1055 ti->error = "Cannot allocate private snapshot structure";
1059 "structure";
1060 r = -ENOMEM; 1056 r = -ENOMEM;
1061 goto bad; 1057 goto bad;
1062 } 1058 }
@@ -1380,6 +1376,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1380 struct dm_snapshot *s = pe->snap; 1376 struct dm_snapshot *s = pe->snap;
1381 struct bio *origin_bios = NULL; 1377 struct bio *origin_bios = NULL;
1382 struct bio *snapshot_bios = NULL; 1378 struct bio *snapshot_bios = NULL;
1379 struct bio *full_bio = NULL;
1383 int error = 0; 1380 int error = 0;
1384 1381
1385 if (!success) { 1382 if (!success) {
@@ -1415,10 +1412,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1415 */ 1412 */
1416 dm_insert_exception(&s->complete, e); 1413 dm_insert_exception(&s->complete, e);
1417 1414
1418 out: 1415out:
1419 dm_remove_exception(&pe->e); 1416 dm_remove_exception(&pe->e);
1420 snapshot_bios = bio_list_get(&pe->snapshot_bios); 1417 snapshot_bios = bio_list_get(&pe->snapshot_bios);
1421 origin_bios = bio_list_get(&pe->origin_bios); 1418 origin_bios = bio_list_get(&pe->origin_bios);
1419 full_bio = pe->full_bio;
1420 if (full_bio) {
1421 full_bio->bi_end_io = pe->full_bio_end_io;
1422 full_bio->bi_private = pe->full_bio_private;
1423 }
1422 free_pending_exception(pe); 1424 free_pending_exception(pe);
1423 1425
1424 increment_pending_exceptions_done_count(); 1426 increment_pending_exceptions_done_count();
@@ -1426,10 +1428,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1426 up_write(&s->lock); 1428 up_write(&s->lock);
1427 1429
1428 /* Submit any pending write bios */ 1430 /* Submit any pending write bios */
1429 if (error) 1431 if (error) {
1432 if (full_bio)
1433 bio_io_error(full_bio);
1430 error_bios(snapshot_bios); 1434 error_bios(snapshot_bios);
1431 else 1435 } else {
1436 if (full_bio)
1437 bio_endio(full_bio, 0);
1432 flush_bios(snapshot_bios); 1438 flush_bios(snapshot_bios);
1439 }
1433 1440
1434 retry_origin_bios(s, origin_bios); 1441 retry_origin_bios(s, origin_bios);
1435} 1442}
@@ -1480,8 +1487,33 @@ static void start_copy(struct dm_snap_pending_exception *pe)
1480 dest.count = src.count; 1487 dest.count = src.count;
1481 1488
1482 /* Hand over to kcopyd */ 1489 /* Hand over to kcopyd */
1483 dm_kcopyd_copy(s->kcopyd_client, 1490 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1484 &src, 1, &dest, 0, copy_callback, pe); 1491}
1492
1493static void full_bio_end_io(struct bio *bio, int error)
1494{
1495 void *callback_data = bio->bi_private;
1496
1497 dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
1498}
1499
1500static void start_full_bio(struct dm_snap_pending_exception *pe,
1501 struct bio *bio)
1502{
1503 struct dm_snapshot *s = pe->snap;
1504 void *callback_data;
1505
1506 pe->full_bio = bio;
1507 pe->full_bio_end_io = bio->bi_end_io;
1508 pe->full_bio_private = bio->bi_private;
1509
1510 callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1511 copy_callback, pe);
1512
1513 bio->bi_end_io = full_bio_end_io;
1514 bio->bi_private = callback_data;
1515
1516 generic_make_request(bio);
1485} 1517}
1486 1518
1487static struct dm_snap_pending_exception * 1519static struct dm_snap_pending_exception *
@@ -1519,6 +1551,7 @@ __find_pending_exception(struct dm_snapshot *s,
1519 bio_list_init(&pe->origin_bios); 1551 bio_list_init(&pe->origin_bios);
1520 bio_list_init(&pe->snapshot_bios); 1552 bio_list_init(&pe->snapshot_bios);
1521 pe->started = 0; 1553 pe->started = 0;
1554 pe->full_bio = NULL;
1522 1555
1523 if (s->store->type->prepare_exception(s->store, &pe->e)) { 1556 if (s->store->type->prepare_exception(s->store, &pe->e)) {
1524 free_pending_exception(pe); 1557 free_pending_exception(pe);
@@ -1612,10 +1645,19 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1612 } 1645 }
1613 1646
1614 remap_exception(s, &pe->e, bio, chunk); 1647 remap_exception(s, &pe->e, bio, chunk);
1615 bio_list_add(&pe->snapshot_bios, bio);
1616 1648
1617 r = DM_MAPIO_SUBMITTED; 1649 r = DM_MAPIO_SUBMITTED;
1618 1650
1651 if (!pe->started &&
1652 bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
1653 pe->started = 1;
1654 up_write(&s->lock);
1655 start_full_bio(pe, bio);
1656 goto out;
1657 }
1658
1659 bio_list_add(&pe->snapshot_bios, bio);
1660
1619 if (!pe->started) { 1661 if (!pe->started) {
1620 /* this is protected by snap->lock */ 1662 /* this is protected by snap->lock */
1621 pe->started = 1; 1663 pe->started = 1;
@@ -1628,9 +1670,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1628 map_context->ptr = track_chunk(s, chunk); 1670 map_context->ptr = track_chunk(s, chunk);
1629 } 1671 }
1630 1672
1631 out_unlock: 1673out_unlock:
1632 up_write(&s->lock); 1674 up_write(&s->lock);
1633 out: 1675out:
1634 return r; 1676 return r;
1635} 1677}
1636 1678
@@ -1974,7 +2016,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
1974 pe_to_start_now = pe; 2016 pe_to_start_now = pe;
1975 } 2017 }
1976 2018
1977 next_snapshot: 2019next_snapshot:
1978 up_write(&snap->lock); 2020 up_write(&snap->lock);
1979 2021
1980 if (pe_to_start_now) { 2022 if (pe_to_start_now) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index bfe9c2333cea..986b8754bb08 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -54,7 +54,6 @@ struct dm_table {
54 sector_t *highs; 54 sector_t *highs;
55 struct dm_target *targets; 55 struct dm_target *targets;
56 56
57 unsigned discards_supported:1;
58 unsigned integrity_supported:1; 57 unsigned integrity_supported:1;
59 58
60 /* 59 /*
@@ -154,12 +153,11 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
154 return NULL; 153 return NULL;
155 154
156 size = nmemb * elem_size; 155 size = nmemb * elem_size;
157 addr = vmalloc(size); 156 addr = vzalloc(size);
158 if (addr)
159 memset(addr, 0, size);
160 157
161 return addr; 158 return addr;
162} 159}
160EXPORT_SYMBOL(dm_vcalloc);
163 161
164/* 162/*
165 * highs, and targets are managed as dynamic arrays during a 163 * highs, and targets are managed as dynamic arrays during a
@@ -209,7 +207,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
209 INIT_LIST_HEAD(&t->devices); 207 INIT_LIST_HEAD(&t->devices);
210 INIT_LIST_HEAD(&t->target_callbacks); 208 INIT_LIST_HEAD(&t->target_callbacks);
211 atomic_set(&t->holders, 0); 209 atomic_set(&t->holders, 0);
212 t->discards_supported = 1;
213 210
214 if (!num_targets) 211 if (!num_targets)
215 num_targets = KEYS_PER_NODE; 212 num_targets = KEYS_PER_NODE;
@@ -281,6 +278,7 @@ void dm_table_get(struct dm_table *t)
281{ 278{
282 atomic_inc(&t->holders); 279 atomic_inc(&t->holders);
283} 280}
281EXPORT_SYMBOL(dm_table_get);
284 282
285void dm_table_put(struct dm_table *t) 283void dm_table_put(struct dm_table *t)
286{ 284{
@@ -290,6 +288,7 @@ void dm_table_put(struct dm_table *t)
290 smp_mb__before_atomic_dec(); 288 smp_mb__before_atomic_dec();
291 atomic_dec(&t->holders); 289 atomic_dec(&t->holders);
292} 290}
291EXPORT_SYMBOL(dm_table_put);
293 292
294/* 293/*
295 * Checks to see if we need to extend highs or targets. 294 * Checks to see if we need to extend highs or targets.
@@ -455,13 +454,14 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
455 * Add a device to the list, or just increment the usage count if 454 * Add a device to the list, or just increment the usage count if
456 * it's already present. 455 * it's already present.
457 */ 456 */
458static int __table_get_device(struct dm_table *t, struct dm_target *ti, 457int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
459 const char *path, fmode_t mode, struct dm_dev **result) 458 struct dm_dev **result)
460{ 459{
461 int r; 460 int r;
462 dev_t uninitialized_var(dev); 461 dev_t uninitialized_var(dev);
463 struct dm_dev_internal *dd; 462 struct dm_dev_internal *dd;
464 unsigned int major, minor; 463 unsigned int major, minor;
464 struct dm_table *t = ti->table;
465 465
466 BUG_ON(!t); 466 BUG_ON(!t);
467 467
@@ -509,6 +509,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
509 *result = &dd->dm_dev; 509 *result = &dd->dm_dev;
510 return 0; 510 return 0;
511} 511}
512EXPORT_SYMBOL(dm_get_device);
512 513
513int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, 514int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
514 sector_t start, sector_t len, void *data) 515 sector_t start, sector_t len, void *data)
@@ -539,23 +540,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
539 * If not we'll force DM to use PAGE_SIZE or 540 * If not we'll force DM to use PAGE_SIZE or
540 * smaller I/O, just to be safe. 541 * smaller I/O, just to be safe.
541 */ 542 */
542 543 if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
543 if (q->merge_bvec_fn && !ti->type->merge)
544 blk_limits_max_hw_sectors(limits, 544 blk_limits_max_hw_sectors(limits,
545 (unsigned int) (PAGE_SIZE >> 9)); 545 (unsigned int) (PAGE_SIZE >> 9));
546 return 0; 546 return 0;
547} 547}
548EXPORT_SYMBOL_GPL(dm_set_device_limits); 548EXPORT_SYMBOL_GPL(dm_set_device_limits);
549 549
550int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
551 struct dm_dev **result)
552{
553 return __table_get_device(ti->table, ti, path, mode, result);
554}
555
556
557/* 550/*
558 * Decrement a devices use count and remove it if necessary. 551 * Decrement a device's use count and remove it if necessary.
559 */ 552 */
560void dm_put_device(struct dm_target *ti, struct dm_dev *d) 553void dm_put_device(struct dm_target *ti, struct dm_dev *d)
561{ 554{
@@ -568,6 +561,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
568 kfree(dd); 561 kfree(dd);
569 } 562 }
570} 563}
564EXPORT_SYMBOL(dm_put_device);
571 565
572/* 566/*
573 * Checks to see if the target joins onto the end of the table. 567 * Checks to see if the target joins onto the end of the table.
@@ -791,8 +785,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
791 785
792 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; 786 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
793 787
794 if (!tgt->num_discard_requests) 788 if (!tgt->num_discard_requests && tgt->discards_supported)
795 t->discards_supported = 0; 789 DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.",
790 dm_device_name(t->md), type);
796 791
797 return 0; 792 return 0;
798 793
@@ -802,6 +797,63 @@ int dm_table_add_target(struct dm_table *t, const char *type,
802 return r; 797 return r;
803} 798}
804 799
800/*
801 * Target argument parsing helpers.
802 */
803static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
804 unsigned *value, char **error, unsigned grouped)
805{
806 const char *arg_str = dm_shift_arg(arg_set);
807
808 if (!arg_str ||
809 (sscanf(arg_str, "%u", value) != 1) ||
810 (*value < arg->min) ||
811 (*value > arg->max) ||
812 (grouped && arg_set->argc < *value)) {
813 *error = arg->error;
814 return -EINVAL;
815 }
816
817 return 0;
818}
819
820int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
821 unsigned *value, char **error)
822{
823 return validate_next_arg(arg, arg_set, value, error, 0);
824}
825EXPORT_SYMBOL(dm_read_arg);
826
827int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
828 unsigned *value, char **error)
829{
830 return validate_next_arg(arg, arg_set, value, error, 1);
831}
832EXPORT_SYMBOL(dm_read_arg_group);
833
834const char *dm_shift_arg(struct dm_arg_set *as)
835{
836 char *r;
837
838 if (as->argc) {
839 as->argc--;
840 r = *as->argv;
841 as->argv++;
842 return r;
843 }
844
845 return NULL;
846}
847EXPORT_SYMBOL(dm_shift_arg);
848
849void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
850{
851 BUG_ON(as->argc < num_args);
852 as->argc -= num_args;
853 as->argv += num_args;
854}
855EXPORT_SYMBOL(dm_consume_args);
856
805static int dm_table_set_type(struct dm_table *t) 857static int dm_table_set_type(struct dm_table *t)
806{ 858{
807 unsigned i; 859 unsigned i;
@@ -1077,11 +1129,13 @@ void dm_table_event(struct dm_table *t)
1077 t->event_fn(t->event_context); 1129 t->event_fn(t->event_context);
1078 mutex_unlock(&_event_lock); 1130 mutex_unlock(&_event_lock);
1079} 1131}
1132EXPORT_SYMBOL(dm_table_event);
1080 1133
1081sector_t dm_table_get_size(struct dm_table *t) 1134sector_t dm_table_get_size(struct dm_table *t)
1082{ 1135{
1083 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; 1136 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1084} 1137}
1138EXPORT_SYMBOL(dm_table_get_size);
1085 1139
1086struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) 1140struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1087{ 1141{
@@ -1194,9 +1248,45 @@ static void dm_table_set_integrity(struct dm_table *t)
1194 blk_get_integrity(template_disk)); 1248 blk_get_integrity(template_disk));
1195} 1249}
1196 1250
1251static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1252 sector_t start, sector_t len, void *data)
1253{
1254 unsigned flush = (*(unsigned *)data);
1255 struct request_queue *q = bdev_get_queue(dev->bdev);
1256
1257 return q && (q->flush_flags & flush);
1258}
1259
1260static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
1261{
1262 struct dm_target *ti;
1263 unsigned i = 0;
1264
1265 /*
1266 * Require at least one underlying device to support flushes.
1267 * t->devices includes internal dm devices such as mirror logs
1268 * so we need to use iterate_devices here, which targets
1269 * supporting flushes must provide.
1270 */
1271 while (i < dm_table_get_num_targets(t)) {
1272 ti = dm_table_get_target(t, i++);
1273
1274 if (!ti->num_flush_requests)
1275 continue;
1276
1277 if (ti->type->iterate_devices &&
1278 ti->type->iterate_devices(ti, device_flush_capable, &flush))
1279 return 1;
1280 }
1281
1282 return 0;
1283}
1284
1197void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1285void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1198 struct queue_limits *limits) 1286 struct queue_limits *limits)
1199{ 1287{
1288 unsigned flush = 0;
1289
1200 /* 1290 /*
1201 * Copy table's limits to the DM device's request_queue 1291 * Copy table's limits to the DM device's request_queue
1202 */ 1292 */
@@ -1207,6 +1297,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1207 else 1297 else
1208 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 1298 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1209 1299
1300 if (dm_table_supports_flush(t, REQ_FLUSH)) {
1301 flush |= REQ_FLUSH;
1302 if (dm_table_supports_flush(t, REQ_FUA))
1303 flush |= REQ_FUA;
1304 }
1305 blk_queue_flush(q, flush);
1306
1210 dm_table_set_integrity(t); 1307 dm_table_set_integrity(t);
1211 1308
1212 /* 1309 /*
@@ -1237,6 +1334,7 @@ fmode_t dm_table_get_mode(struct dm_table *t)
1237{ 1334{
1238 return t->mode; 1335 return t->mode;
1239} 1336}
1337EXPORT_SYMBOL(dm_table_get_mode);
1240 1338
1241static void suspend_targets(struct dm_table *t, unsigned postsuspend) 1339static void suspend_targets(struct dm_table *t, unsigned postsuspend)
1242{ 1340{
@@ -1345,6 +1443,7 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
1345{ 1443{
1346 return t->md; 1444 return t->md;
1347} 1445}
1446EXPORT_SYMBOL(dm_table_get_md);
1348 1447
1349static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, 1448static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1350 sector_t start, sector_t len, void *data) 1449 sector_t start, sector_t len, void *data)
@@ -1359,19 +1458,19 @@ bool dm_table_supports_discards(struct dm_table *t)
1359 struct dm_target *ti; 1458 struct dm_target *ti;
1360 unsigned i = 0; 1459 unsigned i = 0;
1361 1460
1362 if (!t->discards_supported)
1363 return 0;
1364
1365 /* 1461 /*
1366 * Unless any target used by the table set discards_supported, 1462 * Unless any target used by the table set discards_supported,
1367 * require at least one underlying device to support discards. 1463 * require at least one underlying device to support discards.
1368 * t->devices includes internal dm devices such as mirror logs 1464 * t->devices includes internal dm devices such as mirror logs
1369 * so we need to use iterate_devices here, which targets 1465 * so we need to use iterate_devices here, which targets
1370 * supporting discard must provide. 1466 * supporting discard selectively must provide.
1371 */ 1467 */
1372 while (i < dm_table_get_num_targets(t)) { 1468 while (i < dm_table_get_num_targets(t)) {
1373 ti = dm_table_get_target(t, i++); 1469 ti = dm_table_get_target(t, i++);
1374 1470
1471 if (!ti->num_discard_requests)
1472 continue;
1473
1375 if (ti->discards_supported) 1474 if (ti->discards_supported)
1376 return 1; 1475 return 1;
1377 1476
@@ -1382,13 +1481,3 @@ bool dm_table_supports_discards(struct dm_table *t)
1382 1481
1383 return 0; 1482 return 0;
1384} 1483}
1385
1386EXPORT_SYMBOL(dm_vcalloc);
1387EXPORT_SYMBOL(dm_get_device);
1388EXPORT_SYMBOL(dm_put_device);
1389EXPORT_SYMBOL(dm_table_event);
1390EXPORT_SYMBOL(dm_table_get_size);
1391EXPORT_SYMBOL(dm_table_get_mode);
1392EXPORT_SYMBOL(dm_table_get_md);
1393EXPORT_SYMBOL(dm_table_put);
1394EXPORT_SYMBOL(dm_table_get);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0cf68b478878..52b39f335bb3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -37,6 +37,8 @@ static const char *_name = DM_NAME;
37static unsigned int major = 0; 37static unsigned int major = 0;
38static unsigned int _major = 0; 38static unsigned int _major = 0;
39 39
40static DEFINE_IDR(_minor_idr);
41
40static DEFINE_SPINLOCK(_minor_lock); 42static DEFINE_SPINLOCK(_minor_lock);
41/* 43/*
42 * For bio-based dm. 44 * For bio-based dm.
@@ -109,6 +111,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
109#define DMF_FREEING 3 111#define DMF_FREEING 3
110#define DMF_DELETING 4 112#define DMF_DELETING 4
111#define DMF_NOFLUSH_SUSPENDING 5 113#define DMF_NOFLUSH_SUSPENDING 5
114#define DMF_MERGE_IS_OPTIONAL 6
112 115
113/* 116/*
114 * Work processed by per-device workqueue. 117 * Work processed by per-device workqueue.
@@ -313,6 +316,12 @@ static void __exit dm_exit(void)
313 316
314 while (i--) 317 while (i--)
315 _exits[i](); 318 _exits[i]();
319
320 /*
321 * Should be empty by this point.
322 */
323 idr_remove_all(&_minor_idr);
324 idr_destroy(&_minor_idr);
316} 325}
317 326
318/* 327/*
@@ -1171,7 +1180,8 @@ static int __clone_and_map_discard(struct clone_info *ci)
1171 1180
1172 /* 1181 /*
1173 * Even though the device advertised discard support, 1182 * Even though the device advertised discard support,
1174 * reconfiguration might have changed that since the 1183 * that does not mean every target supports it, and
1184 * reconfiguration might also have changed that since the
1175 * check was performed. 1185 * check was performed.
1176 */ 1186 */
1177 if (!ti->num_discard_requests) 1187 if (!ti->num_discard_requests)
@@ -1705,8 +1715,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
1705/*----------------------------------------------------------------- 1715/*-----------------------------------------------------------------
1706 * An IDR is used to keep track of allocated minor numbers. 1716 * An IDR is used to keep track of allocated minor numbers.
1707 *---------------------------------------------------------------*/ 1717 *---------------------------------------------------------------*/
1708static DEFINE_IDR(_minor_idr);
1709
1710static void free_minor(int minor) 1718static void free_minor(int minor)
1711{ 1719{
1712 spin_lock(&_minor_lock); 1720 spin_lock(&_minor_lock);
@@ -1800,7 +1808,6 @@ static void dm_init_md_queue(struct mapped_device *md)
1800 blk_queue_make_request(md->queue, dm_request); 1808 blk_queue_make_request(md->queue, dm_request);
1801 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1809 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1802 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1810 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1803 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
1804} 1811}
1805 1812
1806/* 1813/*
@@ -1986,6 +1993,59 @@ static void __set_size(struct mapped_device *md, sector_t size)
1986} 1993}
1987 1994
1988/* 1995/*
1996 * Return 1 if the queue has a compulsory merge_bvec_fn function.
1997 *
1998 * If this function returns 0, then the device is either a non-dm
1999 * device without a merge_bvec_fn, or it is a dm device that is
2000 * able to split any bios it receives that are too big.
2001 */
2002int dm_queue_merge_is_compulsory(struct request_queue *q)
2003{
2004 struct mapped_device *dev_md;
2005
2006 if (!q->merge_bvec_fn)
2007 return 0;
2008
2009 if (q->make_request_fn == dm_request) {
2010 dev_md = q->queuedata;
2011 if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
2012 return 0;
2013 }
2014
2015 return 1;
2016}
2017
2018static int dm_device_merge_is_compulsory(struct dm_target *ti,
2019 struct dm_dev *dev, sector_t start,
2020 sector_t len, void *data)
2021{
2022 struct block_device *bdev = dev->bdev;
2023 struct request_queue *q = bdev_get_queue(bdev);
2024
2025 return dm_queue_merge_is_compulsory(q);
2026}
2027
2028/*
2029 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2030 * on the properties of the underlying devices.
2031 */
2032static int dm_table_merge_is_optional(struct dm_table *table)
2033{
2034 unsigned i = 0;
2035 struct dm_target *ti;
2036
2037 while (i < dm_table_get_num_targets(table)) {
2038 ti = dm_table_get_target(table, i++);
2039
2040 if (ti->type->iterate_devices &&
2041 ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2042 return 0;
2043 }
2044
2045 return 1;
2046}
2047
2048/*
1989 * Returns old map, which caller must destroy. 2049 * Returns old map, which caller must destroy.
1990 */ 2050 */
1991static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, 2051static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
@@ -1995,6 +2055,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
1995 struct request_queue *q = md->queue; 2055 struct request_queue *q = md->queue;
1996 sector_t size; 2056 sector_t size;
1997 unsigned long flags; 2057 unsigned long flags;
2058 int merge_is_optional;
1998 2059
1999 size = dm_table_get_size(t); 2060 size = dm_table_get_size(t);
2000 2061
@@ -2020,10 +2081,16 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2020 2081
2021 __bind_mempools(md, t); 2082 __bind_mempools(md, t);
2022 2083
2084 merge_is_optional = dm_table_merge_is_optional(t);
2085
2023 write_lock_irqsave(&md->map_lock, flags); 2086 write_lock_irqsave(&md->map_lock, flags);
2024 old_map = md->map; 2087 old_map = md->map;
2025 md->map = t; 2088 md->map = t;
2026 dm_table_set_restrictions(t, q, limits); 2089 dm_table_set_restrictions(t, q, limits);
2090 if (merge_is_optional)
2091 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2092 else
2093 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2027 write_unlock_irqrestore(&md->map_lock, flags); 2094 write_unlock_irqrestore(&md->map_lock, flags);
2028 2095
2029 return old_map; 2096 return old_map;
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1aaf16746da8..6745dbd278a4 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -66,6 +66,8 @@ int dm_table_alloc_md_mempools(struct dm_table *t);
66void dm_table_free_md_mempools(struct dm_table *t); 66void dm_table_free_md_mempools(struct dm_table *t);
67struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); 67struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
68 68
69int dm_queue_merge_is_compulsory(struct request_queue *q);
70
69void dm_lock_md_type(struct mapped_device *md); 71void dm_lock_md_type(struct mapped_device *md);
70void dm_unlock_md_type(struct mapped_device *md); 72void dm_unlock_md_type(struct mapped_device *md);
71void dm_set_md_type(struct mapped_device *md, unsigned type); 73void dm_set_md_type(struct mapped_device *md, unsigned type);
diff --git a/drivers/md/linear.h b/drivers/md/linear.h
index 0ce29b61605a..2f2da05b2ce9 100644
--- a/drivers/md/linear.h
+++ b/drivers/md/linear.h
@@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t;
10 10
11struct linear_private_data 11struct linear_private_data
12{ 12{
13 struct rcu_head rcu;
13 sector_t array_sectors; 14 sector_t array_sectors;
14 dev_info_t disks[0]; 15 dev_info_t disks[0];
15 struct rcu_head rcu;
16}; 16};
17 17
18 18
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8e221a20f5d9..5404b2295820 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -848,7 +848,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
848 bio->bi_end_io = super_written; 848 bio->bi_end_io = super_written;
849 849
850 atomic_inc(&mddev->pending_writes); 850 atomic_inc(&mddev->pending_writes);
851 submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); 851 submit_bio(WRITE_FLUSH_FUA, bio);
852} 852}
853 853
854void md_super_wait(mddev_t *mddev) 854void md_super_wait(mddev_t *mddev)
@@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
1138 ret = 0; 1138 ret = 0;
1139 } 1139 }
1140 rdev->sectors = rdev->sb_start; 1140 rdev->sectors = rdev->sb_start;
1141 /* Limit to 4TB as metadata cannot record more than that */
1142 if (rdev->sectors >= (2ULL << 32))
1143 rdev->sectors = (2ULL << 32) - 2;
1141 1144
1142 if (rdev->sectors < sb->size * 2 && sb->level > 1) 1145 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1143 /* "this cannot possibly happen" ... */ 1146 /* "this cannot possibly happen" ... */
1144 ret = -EINVAL; 1147 ret = -EINVAL;
1145 1148
@@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1173 mddev->clevel[0] = 0; 1176 mddev->clevel[0] = 0;
1174 mddev->layout = sb->layout; 1177 mddev->layout = sb->layout;
1175 mddev->raid_disks = sb->raid_disks; 1178 mddev->raid_disks = sb->raid_disks;
1176 mddev->dev_sectors = sb->size * 2; 1179 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1177 mddev->events = ev1; 1180 mddev->events = ev1;
1178 mddev->bitmap_info.offset = 0; 1181 mddev->bitmap_info.offset = 0;
1179 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1182 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
@@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1415 rdev->sb_start = calc_dev_sboffset(rdev); 1418 rdev->sb_start = calc_dev_sboffset(rdev);
1416 if (!num_sectors || num_sectors > rdev->sb_start) 1419 if (!num_sectors || num_sectors > rdev->sb_start)
1417 num_sectors = rdev->sb_start; 1420 num_sectors = rdev->sb_start;
1421 /* Limit to 4TB as metadata cannot record more than that.
1422 * 4TB == 2^32 KB, or 2*2^32 sectors.
1423 */
1424 if (num_sectors >= (2ULL << 32))
1425 num_sectors = (2ULL << 32) - 2;
1418 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1426 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1419 rdev->sb_page); 1427 rdev->sb_page);
1420 md_super_wait(rdev->mddev); 1428 md_super_wait(rdev->mddev);
@@ -1738,6 +1746,11 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1738 sb->level = cpu_to_le32(mddev->level); 1746 sb->level = cpu_to_le32(mddev->level);
1739 sb->layout = cpu_to_le32(mddev->layout); 1747 sb->layout = cpu_to_le32(mddev->layout);
1740 1748
1749 if (test_bit(WriteMostly, &rdev->flags))
1750 sb->devflags |= WriteMostly1;
1751 else
1752 sb->devflags &= ~WriteMostly1;
1753
1741 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1754 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1742 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1755 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1743 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1756 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -2561,7 +2574,10 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2561 int err = -EINVAL; 2574 int err = -EINVAL;
2562 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2575 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2563 md_error(rdev->mddev, rdev); 2576 md_error(rdev->mddev, rdev);
2564 err = 0; 2577 if (test_bit(Faulty, &rdev->flags))
2578 err = 0;
2579 else
2580 err = -EBUSY;
2565 } else if (cmd_match(buf, "remove")) { 2581 } else if (cmd_match(buf, "remove")) {
2566 if (rdev->raid_disk >= 0) 2582 if (rdev->raid_disk >= 0)
2567 err = -EBUSY; 2583 err = -EBUSY;
@@ -2584,7 +2600,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2584 err = 0; 2600 err = 0;
2585 } else if (cmd_match(buf, "-blocked")) { 2601 } else if (cmd_match(buf, "-blocked")) {
2586 if (!test_bit(Faulty, &rdev->flags) && 2602 if (!test_bit(Faulty, &rdev->flags) &&
2587 test_bit(BlockedBadBlocks, &rdev->flags)) { 2603 rdev->badblocks.unacked_exist) {
2588 /* metadata handler doesn't understand badblocks, 2604 /* metadata handler doesn't understand badblocks,
2589 * so we need to fail the device 2605 * so we need to fail the device
2590 */ 2606 */
@@ -5983,6 +5999,8 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5983 return -ENODEV; 5999 return -ENODEV;
5984 6000
5985 md_error(mddev, rdev); 6001 md_error(mddev, rdev);
6002 if (!test_bit(Faulty, &rdev->flags))
6003 return -EBUSY;
5986 return 0; 6004 return 0;
5987} 6005}
5988 6006
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 32323f0afd89..f4622dd8fc59 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1099,12 +1099,11 @@ read_again:
1099 bio_list_add(&conf->pending_bio_list, mbio); 1099 bio_list_add(&conf->pending_bio_list, mbio);
1100 spin_unlock_irqrestore(&conf->device_lock, flags); 1100 spin_unlock_irqrestore(&conf->device_lock, flags);
1101 } 1101 }
1102 r1_bio_write_done(r1_bio); 1102 /* Mustn't call r1_bio_write_done before this next test,
1103 1103 * as it could result in the bio being freed.
1104 /* In case raid1d snuck in to freeze_array */ 1104 */
1105 wake_up(&conf->wait_barrier);
1106
1107 if (sectors_handled < (bio->bi_size >> 9)) { 1105 if (sectors_handled < (bio->bi_size >> 9)) {
1106 r1_bio_write_done(r1_bio);
1108 /* We need another r1_bio. It has already been counted 1107 /* We need another r1_bio. It has already been counted
1109 * in bio->bi_phys_segments 1108 * in bio->bi_phys_segments
1110 */ 1109 */
@@ -1117,6 +1116,11 @@ read_again:
1117 goto retry_write; 1116 goto retry_write;
1118 } 1117 }
1119 1118
1119 r1_bio_write_done(r1_bio);
1120
1121 /* In case raid1d snuck in to freeze_array */
1122 wake_up(&conf->wait_barrier);
1123
1120 if (do_sync || !bitmap || !plugged) 1124 if (do_sync || !bitmap || !plugged)
1121 md_wakeup_thread(mddev->thread); 1125 md_wakeup_thread(mddev->thread);
1122 1126
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8b29cd4f01c8..d7a8468ddeab 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)
337 md_write_end(r10_bio->mddev); 337 md_write_end(r10_bio->mddev);
338} 338}
339 339
340static void one_write_done(r10bio_t *r10_bio)
341{
342 if (atomic_dec_and_test(&r10_bio->remaining)) {
343 if (test_bit(R10BIO_WriteError, &r10_bio->state))
344 reschedule_retry(r10_bio);
345 else {
346 close_write(r10_bio);
347 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
348 reschedule_retry(r10_bio);
349 else
350 raid_end_bio_io(r10_bio);
351 }
352 }
353}
354
340static void raid10_end_write_request(struct bio *bio, int error) 355static void raid10_end_write_request(struct bio *bio, int error)
341{ 356{
342 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 357 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
387 * Let's see if all mirrored write operations have finished 402 * Let's see if all mirrored write operations have finished
388 * already. 403 * already.
389 */ 404 */
390 if (atomic_dec_and_test(&r10_bio->remaining)) { 405 one_write_done(r10_bio);
391 if (test_bit(R10BIO_WriteError, &r10_bio->state))
392 reschedule_retry(r10_bio);
393 else {
394 close_write(r10_bio);
395 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
396 reschedule_retry(r10_bio);
397 else
398 raid_end_bio_io(r10_bio);
399 }
400 }
401 if (dec_rdev) 406 if (dec_rdev)
402 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 407 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
403} 408}
@@ -1127,20 +1132,12 @@ retry_write:
1127 spin_unlock_irqrestore(&conf->device_lock, flags); 1132 spin_unlock_irqrestore(&conf->device_lock, flags);
1128 } 1133 }
1129 1134
1130 if (atomic_dec_and_test(&r10_bio->remaining)) { 1135 /* Don't remove the bias on 'remaining' (one_write_done) until
1131 /* This matches the end of raid10_end_write_request() */ 1136 * after checking if we need to go around again.
1132 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 1137 */
1133 r10_bio->sectors,
1134 !test_bit(R10BIO_Degraded, &r10_bio->state),
1135 0);
1136 md_write_end(mddev);
1137 raid_end_bio_io(r10_bio);
1138 }
1139
1140 /* In case raid10d snuck in to freeze_array */
1141 wake_up(&conf->wait_barrier);
1142 1138
1143 if (sectors_handled < (bio->bi_size >> 9)) { 1139 if (sectors_handled < (bio->bi_size >> 9)) {
1140 one_write_done(r10_bio);
1144 /* We need another r10_bio. It has already been counted 1141 /* We need another r10_bio. It has already been counted
1145 * in bio->bi_phys_segments. 1142 * in bio->bi_phys_segments.
1146 */ 1143 */
@@ -1154,6 +1151,10 @@ retry_write:
1154 r10_bio->state = 0; 1151 r10_bio->state = 0;
1155 goto retry_write; 1152 goto retry_write;
1156 } 1153 }
1154 one_write_done(r10_bio);
1155
1156 /* In case raid10d snuck in to freeze_array */
1157 wake_up(&conf->wait_barrier);
1157 1158
1158 if (do_sync || !mddev->bitmap || !plugged) 1159 if (do_sync || !mddev->bitmap || !plugged)
1159 md_wakeup_thread(mddev->thread); 1160 md_wakeup_thread(mddev->thread);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dbae459fb02d..43709fa6b6df 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3336,7 +3336,7 @@ static void handle_stripe(struct stripe_head *sh)
3336 3336
3337finish: 3337finish:
3338 /* wait for this device to become unblocked */ 3338 /* wait for this device to become unblocked */
3339 if (unlikely(s.blocked_rdev)) 3339 if (conf->mddev->external && unlikely(s.blocked_rdev))
3340 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); 3340 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
3341 3341
3342 if (s.handle_bad_blocks) 3342 if (s.handle_bad_blocks)
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 498024cec2d3..90873af5682e 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties;
224static int vp7045_usb_probe(struct usb_interface *intf, 224static int vp7045_usb_probe(struct usb_interface *intf,
225 const struct usb_device_id *id) 225 const struct usb_device_id *id)
226{ 226{
227 struct dvb_usb_device *d; 227 return dvb_usb_device_init(intf, &vp7045_properties,
228 int ret = dvb_usb_device_init(intf, &vp7045_properties, 228 THIS_MODULE, NULL, adapter_nr);
229 THIS_MODULE, &d, adapter_nr);
230 if (ret)
231 return ret;
232
233 d->priv = kmalloc(20, GFP_KERNEL);
234 if (!d->priv) {
235 dvb_usb_device_exit(intf);
236 return -ENOMEM;
237 }
238
239 return ret;
240}
241
242static void vp7045_usb_disconnect(struct usb_interface *intf)
243{
244 struct dvb_usb_device *d = usb_get_intfdata(intf);
245 kfree(d->priv);
246 dvb_usb_device_exit(intf);
247} 229}
248 230
249static struct usb_device_id vp7045_usb_table [] = { 231static struct usb_device_id vp7045_usb_table [] = {
@@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table);
258static struct dvb_usb_device_properties vp7045_properties = { 240static struct dvb_usb_device_properties vp7045_properties = {
259 .usb_ctrl = CYPRESS_FX2, 241 .usb_ctrl = CYPRESS_FX2,
260 .firmware = "dvb-usb-vp7045-01.fw", 242 .firmware = "dvb-usb-vp7045-01.fw",
261 .size_of_priv = sizeof(u8 *), 243 .size_of_priv = 20,
262 244
263 .num_adapters = 1, 245 .num_adapters = 1,
264 .adapter = { 246 .adapter = {
@@ -308,7 +290,7 @@ static struct dvb_usb_device_properties vp7045_properties = {
308static struct usb_driver vp7045_usb_driver = { 290static struct usb_driver vp7045_usb_driver = {
309 .name = "dvb_usb_vp7045", 291 .name = "dvb_usb_vp7045",
310 .probe = vp7045_usb_probe, 292 .probe = vp7045_usb_probe,
311 .disconnect = vp7045_usb_disconnect, 293 .disconnect = dvb_usb_device_exit,
312 .id_table = vp7045_usb_table, 294 .id_table = vp7045_usb_table,
313}; 295};
314 296
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index eae05b500476..144f3f55d765 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -618,7 +618,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
618static void nvt_process_rx_ir_data(struct nvt_dev *nvt) 618static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
619{ 619{
620 DEFINE_IR_RAW_EVENT(rawir); 620 DEFINE_IR_RAW_EVENT(rawir);
621 unsigned int count;
622 u32 carrier; 621 u32 carrier;
623 u8 sample; 622 u8 sample;
624 int i; 623 int i;
@@ -631,65 +630,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
631 if (nvt->carrier_detect_enabled) 630 if (nvt->carrier_detect_enabled)
632 carrier = nvt_rx_carrier_detect(nvt); 631 carrier = nvt_rx_carrier_detect(nvt);
633 632
634 count = nvt->pkts; 633 nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
635 nvt_dbg_verbose("Processing buffer of len %d", count);
636 634
637 init_ir_raw_event(&rawir); 635 init_ir_raw_event(&rawir);
638 636
639 for (i = 0; i < count; i++) { 637 for (i = 0; i < nvt->pkts; i++) {
640 nvt->pkts--;
641 sample = nvt->buf[i]; 638 sample = nvt->buf[i];
642 639
643 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); 640 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
644 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK) 641 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
645 * SAMPLE_PERIOD); 642 * SAMPLE_PERIOD);
646 643
647 if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) { 644 nvt_dbg("Storing %s with duration %d",
648 if (nvt->rawir.pulse == rawir.pulse) 645 rawir.pulse ? "pulse" : "space", rawir.duration);
649 nvt->rawir.duration += rawir.duration;
650 else {
651 nvt->rawir.duration = rawir.duration;
652 nvt->rawir.pulse = rawir.pulse;
653 }
654 continue;
655 }
656
657 rawir.duration += nvt->rawir.duration;
658 646
659 init_ir_raw_event(&nvt->rawir); 647 ir_raw_event_store_with_filter(nvt->rdev, &rawir);
660 nvt->rawir.duration = 0;
661 nvt->rawir.pulse = rawir.pulse;
662
663 if (sample == BUF_PULSE_BIT)
664 rawir.pulse = false;
665
666 if (rawir.duration) {
667 nvt_dbg("Storing %s with duration %d",
668 rawir.pulse ? "pulse" : "space",
669 rawir.duration);
670
671 ir_raw_event_store_with_filter(nvt->rdev, &rawir);
672 }
673 648
674 /* 649 /*
675 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE 650 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
676 * indicates end of IR signal, but new data incoming. In both 651 * indicates end of IR signal, but new data incoming. In both
677 * cases, it means we're ready to call ir_raw_event_handle 652 * cases, it means we're ready to call ir_raw_event_handle
678 */ 653 */
679 if ((sample == BUF_PULSE_BIT) && nvt->pkts) { 654 if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
680 nvt_dbg("Calling ir_raw_event_handle (signal end)\n"); 655 nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
681 ir_raw_event_handle(nvt->rdev); 656 ir_raw_event_handle(nvt->rdev);
682 } 657 }
683 } 658 }
684 659
660 nvt->pkts = 0;
661
685 nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n"); 662 nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
686 ir_raw_event_handle(nvt->rdev); 663 ir_raw_event_handle(nvt->rdev);
687 664
688 if (nvt->pkts) {
689 nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
690 nvt->pkts = 0;
691 }
692
693 nvt_dbg_verbose("%s done", __func__); 665 nvt_dbg_verbose("%s done", __func__);
694} 666}
695 667
@@ -1048,7 +1020,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1048 1020
1049 spin_lock_init(&nvt->nvt_lock); 1021 spin_lock_init(&nvt->nvt_lock);
1050 spin_lock_init(&nvt->tx.lock); 1022 spin_lock_init(&nvt->tx.lock);
1051 init_ir_raw_event(&nvt->rawir);
1052 1023
1053 ret = -EBUSY; 1024 ret = -EBUSY;
1054 /* now claim resources */ 1025 /* now claim resources */
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 1241fc89a36c..0d5e0872a2ea 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -67,7 +67,6 @@ static int debug;
67struct nvt_dev { 67struct nvt_dev {
68 struct pnp_dev *pdev; 68 struct pnp_dev *pdev;
69 struct rc_dev *rdev; 69 struct rc_dev *rdev;
70 struct ir_raw_event rawir;
71 70
72 spinlock_t nvt_lock; 71 spinlock_t nvt_lock;
73 72
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index e9a0e94b9995..8c70e64444e7 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -338,7 +338,7 @@ int pwc_init_controls(struct pwc_device *pdev)
338 if (pdev->restore_factory) 338 if (pdev->restore_factory)
339 pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE; 339 pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE;
340 340
341 if (!pdev->features & FEATURE_MOTOR_PANTILT) 341 if (!(pdev->features & FEATURE_MOTOR_PANTILT))
342 return hdl->error; 342 return hdl->error;
343 343
344 /* Motor pan / tilt / reset */ 344 /* Motor pan / tilt / reset */
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index 85d3048c1d67..bb7f17f2a33c 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -1332,6 +1332,8 @@ static __devinit bool viacam_serial_is_enabled(void)
1332 struct pci_bus *pbus = pci_find_bus(0, 0); 1332 struct pci_bus *pbus = pci_find_bus(0, 0);
1333 u8 cbyte; 1333 u8 cbyte;
1334 1334
1335 if (!pbus)
1336 return false;
1335 pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN, 1337 pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,
1336 VIACAM_SERIAL_CREG, &cbyte); 1338 VIACAM_SERIAL_CREG, &cbyte);
1337 if ((cbyte & VIACAM_SERIAL_BIT) == 0) 1339 if ((cbyte & VIACAM_SERIAL_BIT) == 0)
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index a1d4ee6671be..ce61a5769765 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -827,7 +827,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
827 * DID_SOFT_ERROR is set. 827 * DID_SOFT_ERROR is set.
828 */ 828 */
829 if (ioc->bus_type == SPI) { 829 if (ioc->bus_type == SPI) {
830 if (pScsiReq->CDB[0] == READ_6 || 830 if ((pScsiReq->CDB[0] == READ_6 && ((pScsiReq->CDB[1] & 0x02) == 0)) ||
831 pScsiReq->CDB[0] == READ_10 || 831 pScsiReq->CDB[0] == READ_10 ||
832 pScsiReq->CDB[0] == READ_12 || 832 pScsiReq->CDB[0] == READ_12 ||
833 pScsiReq->CDB[0] == READ_16 || 833 pScsiReq->CDB[0] == READ_16 ||
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 37b83eb6d703..21574bdf485f 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -171,6 +171,37 @@ config MFD_TPS6586X
171 This driver can also be built as a module. If so, the module 171 This driver can also be built as a module. If so, the module
172 will be called tps6586x. 172 will be called tps6586x.
173 173
174config MFD_TPS65910
175 bool "TPS65910 Power Management chip"
176 depends on I2C=y && GPIOLIB
177 select MFD_CORE
178 select GPIO_TPS65910
179 help
180 if you say yes here you get support for the TPS65910 series of
181 Power Management chips.
182
183config MFD_TPS65912
184 bool
185 depends on GPIOLIB
186
187config MFD_TPS65912_I2C
188 bool "TPS95612 Power Management chip with I2C"
189 select MFD_CORE
190 select MFD_TPS65912
191 depends on I2C=y && GPIOLIB
192 help
193 If you say yes here you get support for the TPS65912 series of
194 PM chips with I2C interface.
195
196config MFD_TPS65912_SPI
197 bool "TPS65912 Power Management chip with SPI"
198 select MFD_CORE
199 select MFD_TPS65912
200 depends on SPI_MASTER && GPIOLIB
201 help
202 If you say yes here you get support for the TPS65912 series of
203 PM chips with SPI interface.
204
174config MENELAUS 205config MENELAUS
175 bool "Texas Instruments TWL92330/Menelaus PM chip" 206 bool "Texas Instruments TWL92330/Menelaus PM chip"
176 depends on I2C=y && ARCH_OMAP2 207 depends on I2C=y && ARCH_OMAP2
@@ -662,8 +693,9 @@ config MFD_JANZ_CMODIO
662 CAN and GPIO controllers. 693 CAN and GPIO controllers.
663 694
664config MFD_JZ4740_ADC 695config MFD_JZ4740_ADC
665 tristate "Support for the JZ4740 SoC ADC core" 696 bool "Support for the JZ4740 SoC ADC core"
666 select MFD_CORE 697 select MFD_CORE
698 select GENERIC_IRQ_CHIP
667 depends on MACH_JZ4740 699 depends on MACH_JZ4740
668 help 700 help
669 Say yes here if you want support for the ADC unit in the JZ4740 SoC. 701 Say yes here if you want support for the ADC unit in the JZ4740 SoC.
@@ -725,18 +757,19 @@ config MFD_PM8XXX_IRQ
725 This is required to use certain other PM 8xxx features, such as GPIO 757 This is required to use certain other PM 8xxx features, such as GPIO
726 and MPP. 758 and MPP.
727 759
728config MFD_TPS65910
729 bool "TPS65910 Power Management chip"
730 depends on I2C=y && GPIOLIB
731 select MFD_CORE
732 select GPIO_TPS65910
733 help
734 if you say yes here you get support for the TPS65910 series of
735 Power Management chips.
736
737config TPS65911_COMPARATOR 760config TPS65911_COMPARATOR
738 tristate 761 tristate
739 762
763config MFD_AAT2870_CORE
764 bool "Support for the AnalogicTech AAT2870"
765 select MFD_CORE
766 depends on I2C=y && GPIOLIB
767 help
768 If you say yes here you get support for the AAT2870.
769 This driver provides common support for accessing the device,
770 additional drivers must be enabled in order to use the
771 functionality of the device.
772
740endif # MFD_SUPPORT 773endif # MFD_SUPPORT
741 774
742menu "Multimedia Capabilities Port drivers" 775menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 22a280fcb705..c58020303d18 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
23 23
24obj-$(CONFIG_MFD_WM8400) += wm8400-core.o 24obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
25wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o 25wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
26wm831x-objs += wm831x-auxadc.o
26obj-$(CONFIG_MFD_WM831X) += wm831x.o 27obj-$(CONFIG_MFD_WM831X) += wm831x.o
27obj-$(CONFIG_MFD_WM831X_I2C) += wm831x-i2c.o 28obj-$(CONFIG_MFD_WM831X_I2C) += wm831x-i2c.o
28obj-$(CONFIG_MFD_WM831X_SPI) += wm831x-spi.o 29obj-$(CONFIG_MFD_WM831X_SPI) += wm831x-spi.o
@@ -35,6 +36,11 @@ obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
35obj-$(CONFIG_TPS6105X) += tps6105x.o 36obj-$(CONFIG_TPS6105X) += tps6105x.o
36obj-$(CONFIG_TPS65010) += tps65010.o 37obj-$(CONFIG_TPS65010) += tps65010.o
37obj-$(CONFIG_TPS6507X) += tps6507x.o 38obj-$(CONFIG_TPS6507X) += tps6507x.o
39obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
40tps65912-objs := tps65912-core.o tps65912-irq.o
41obj-$(CONFIG_MFD_TPS65912) += tps65912.o
42obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
43obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o
38obj-$(CONFIG_MENELAUS) += menelaus.o 44obj-$(CONFIG_MENELAUS) += menelaus.o
39 45
40obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o 46obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
@@ -94,5 +100,5 @@ obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
94obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o 100obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
95obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o 101obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
96obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o 102obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
97obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
98obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o 103obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
104obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
new file mode 100644
index 000000000000..345dc658ef06
--- /dev/null
+++ b/drivers/mfd/aat2870-core.c
@@ -0,0 +1,535 @@
1/*
2 * linux/drivers/mfd/aat2870-core.c
3 *
4 * Copyright (c) 2011, NVIDIA Corporation.
5 * Author: Jin Park <jinyoungp@nvidia.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/debugfs.h>
26#include <linux/slab.h>
27#include <linux/uaccess.h>
28#include <linux/i2c.h>
29#include <linux/delay.h>
30#include <linux/gpio.h>
31#include <linux/mfd/core.h>
32#include <linux/mfd/aat2870.h>
33#include <linux/regulator/machine.h>
34
35static struct aat2870_register aat2870_regs[AAT2870_REG_NUM] = {
36 /* readable, writeable, value */
37 { 0, 1, 0x00 }, /* 0x00 AAT2870_BL_CH_EN */
38 { 0, 1, 0x16 }, /* 0x01 AAT2870_BLM */
39 { 0, 1, 0x16 }, /* 0x02 AAT2870_BLS */
40 { 0, 1, 0x56 }, /* 0x03 AAT2870_BL1 */
41 { 0, 1, 0x56 }, /* 0x04 AAT2870_BL2 */
42 { 0, 1, 0x56 }, /* 0x05 AAT2870_BL3 */
43 { 0, 1, 0x56 }, /* 0x06 AAT2870_BL4 */
44 { 0, 1, 0x56 }, /* 0x07 AAT2870_BL5 */
45 { 0, 1, 0x56 }, /* 0x08 AAT2870_BL6 */
46 { 0, 1, 0x56 }, /* 0x09 AAT2870_BL7 */
47 { 0, 1, 0x56 }, /* 0x0A AAT2870_BL8 */
48 { 0, 1, 0x00 }, /* 0x0B AAT2870_FLR */
49 { 0, 1, 0x03 }, /* 0x0C AAT2870_FM */
50 { 0, 1, 0x03 }, /* 0x0D AAT2870_FS */
51 { 0, 1, 0x10 }, /* 0x0E AAT2870_ALS_CFG0 */
52 { 0, 1, 0x06 }, /* 0x0F AAT2870_ALS_CFG1 */
53 { 0, 1, 0x00 }, /* 0x10 AAT2870_ALS_CFG2 */
54 { 1, 0, 0x00 }, /* 0x11 AAT2870_AMB */
55 { 0, 1, 0x00 }, /* 0x12 AAT2870_ALS0 */
56 { 0, 1, 0x00 }, /* 0x13 AAT2870_ALS1 */
57 { 0, 1, 0x00 }, /* 0x14 AAT2870_ALS2 */
58 { 0, 1, 0x00 }, /* 0x15 AAT2870_ALS3 */
59 { 0, 1, 0x00 }, /* 0x16 AAT2870_ALS4 */
60 { 0, 1, 0x00 }, /* 0x17 AAT2870_ALS5 */
61 { 0, 1, 0x00 }, /* 0x18 AAT2870_ALS6 */
62 { 0, 1, 0x00 }, /* 0x19 AAT2870_ALS7 */
63 { 0, 1, 0x00 }, /* 0x1A AAT2870_ALS8 */
64 { 0, 1, 0x00 }, /* 0x1B AAT2870_ALS9 */
65 { 0, 1, 0x00 }, /* 0x1C AAT2870_ALSA */
66 { 0, 1, 0x00 }, /* 0x1D AAT2870_ALSB */
67 { 0, 1, 0x00 }, /* 0x1E AAT2870_ALSC */
68 { 0, 1, 0x00 }, /* 0x1F AAT2870_ALSD */
69 { 0, 1, 0x00 }, /* 0x20 AAT2870_ALSE */
70 { 0, 1, 0x00 }, /* 0x21 AAT2870_ALSF */
71 { 0, 1, 0x00 }, /* 0x22 AAT2870_SUB_SET */
72 { 0, 1, 0x00 }, /* 0x23 AAT2870_SUB_CTRL */
73 { 0, 1, 0x00 }, /* 0x24 AAT2870_LDO_AB */
74 { 0, 1, 0x00 }, /* 0x25 AAT2870_LDO_CD */
75 { 0, 1, 0x00 }, /* 0x26 AAT2870_LDO_EN */
76};
77
78static struct mfd_cell aat2870_devs[] = {
79 {
80 .name = "aat2870-backlight",
81 .id = AAT2870_ID_BL,
82 .pdata_size = sizeof(struct aat2870_bl_platform_data),
83 },
84 {
85 .name = "aat2870-regulator",
86 .id = AAT2870_ID_LDOA,
87 .pdata_size = sizeof(struct regulator_init_data),
88 },
89 {
90 .name = "aat2870-regulator",
91 .id = AAT2870_ID_LDOB,
92 .pdata_size = sizeof(struct regulator_init_data),
93 },
94 {
95 .name = "aat2870-regulator",
96 .id = AAT2870_ID_LDOC,
97 .pdata_size = sizeof(struct regulator_init_data),
98 },
99 {
100 .name = "aat2870-regulator",
101 .id = AAT2870_ID_LDOD,
102 .pdata_size = sizeof(struct regulator_init_data),
103 },
104};
105
106static int __aat2870_read(struct aat2870_data *aat2870, u8 addr, u8 *val)
107{
108 int ret;
109
110 if (addr >= AAT2870_REG_NUM) {
111 dev_err(aat2870->dev, "Invalid address, 0x%02x\n", addr);
112 return -EINVAL;
113 }
114
115 if (!aat2870->reg_cache[addr].readable) {
116 *val = aat2870->reg_cache[addr].value;
117 goto out;
118 }
119
120 ret = i2c_master_send(aat2870->client, &addr, 1);
121 if (ret < 0)
122 return ret;
123 if (ret != 1)
124 return -EIO;
125
126 ret = i2c_master_recv(aat2870->client, val, 1);
127 if (ret < 0)
128 return ret;
129 if (ret != 1)
130 return -EIO;
131
132out:
133 dev_dbg(aat2870->dev, "read: addr=0x%02x, val=0x%02x\n", addr, *val);
134 return 0;
135}
136
137static int __aat2870_write(struct aat2870_data *aat2870, u8 addr, u8 val)
138{
139 u8 msg[2];
140 int ret;
141
142 if (addr >= AAT2870_REG_NUM) {
143 dev_err(aat2870->dev, "Invalid address, 0x%02x\n", addr);
144 return -EINVAL;
145 }
146
147 if (!aat2870->reg_cache[addr].writeable) {
148 dev_err(aat2870->dev, "Address 0x%02x is not writeable\n",
149 addr);
150 return -EINVAL;
151 }
152
153 msg[0] = addr;
154 msg[1] = val;
155 ret = i2c_master_send(aat2870->client, msg, 2);
156 if (ret < 0)
157 return ret;
158 if (ret != 2)
159 return -EIO;
160
161 aat2870->reg_cache[addr].value = val;
162
163 dev_dbg(aat2870->dev, "write: addr=0x%02x, val=0x%02x\n", addr, val);
164 return 0;
165}
166
167static int aat2870_read(struct aat2870_data *aat2870, u8 addr, u8 *val)
168{
169 int ret;
170
171 mutex_lock(&aat2870->io_lock);
172 ret = __aat2870_read(aat2870, addr, val);
173 mutex_unlock(&aat2870->io_lock);
174
175 return ret;
176}
177
178static int aat2870_write(struct aat2870_data *aat2870, u8 addr, u8 val)
179{
180 int ret;
181
182 mutex_lock(&aat2870->io_lock);
183 ret = __aat2870_write(aat2870, addr, val);
184 mutex_unlock(&aat2870->io_lock);
185
186 return ret;
187}
188
189static int aat2870_update(struct aat2870_data *aat2870, u8 addr, u8 mask,
190 u8 val)
191{
192 int change;
193 u8 old_val, new_val;
194 int ret;
195
196 mutex_lock(&aat2870->io_lock);
197
198 ret = __aat2870_read(aat2870, addr, &old_val);
199 if (ret)
200 goto out_unlock;
201
202 new_val = (old_val & ~mask) | (val & mask);
203 change = old_val != new_val;
204 if (change)
205 ret = __aat2870_write(aat2870, addr, new_val);
206
207out_unlock:
208 mutex_unlock(&aat2870->io_lock);
209
210 return ret;
211}
212
213static inline void aat2870_enable(struct aat2870_data *aat2870)
214{
215 if (aat2870->en_pin >= 0)
216 gpio_set_value(aat2870->en_pin, 1);
217
218 aat2870->is_enable = 1;
219}
220
221static inline void aat2870_disable(struct aat2870_data *aat2870)
222{
223 if (aat2870->en_pin >= 0)
224 gpio_set_value(aat2870->en_pin, 0);
225
226 aat2870->is_enable = 0;
227}
228
229#ifdef CONFIG_DEBUG_FS
230static ssize_t aat2870_dump_reg(struct aat2870_data *aat2870, char *buf)
231{
232 u8 addr, val;
233 ssize_t count = 0;
234 int ret;
235
236 count += sprintf(buf, "aat2870 registers\n");
237 for (addr = 0; addr < AAT2870_REG_NUM; addr++) {
238 count += sprintf(buf + count, "0x%02x: ", addr);
239 if (count >= PAGE_SIZE - 1)
240 break;
241
242 ret = aat2870->read(aat2870, addr, &val);
243 if (ret == 0)
244 count += snprintf(buf + count, PAGE_SIZE - count,
245 "0x%02x", val);
246 else
247 count += snprintf(buf + count, PAGE_SIZE - count,
248 "<read fail: %d>", ret);
249
250 if (count >= PAGE_SIZE - 1)
251 break;
252
253 count += snprintf(buf + count, PAGE_SIZE - count, "\n");
254 if (count >= PAGE_SIZE - 1)
255 break;
256 }
257
258 /* Truncate count; min() would cause a warning */
259 if (count >= PAGE_SIZE)
260 count = PAGE_SIZE - 1;
261
262 return count;
263}
264
265static int aat2870_reg_open_file(struct inode *inode, struct file *file)
266{
267 file->private_data = inode->i_private;
268
269 return 0;
270}
271
272static ssize_t aat2870_reg_read_file(struct file *file, char __user *user_buf,
273 size_t count, loff_t *ppos)
274{
275 struct aat2870_data *aat2870 = file->private_data;
276 char *buf;
277 ssize_t ret;
278
279 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
280 if (!buf)
281 return -ENOMEM;
282
283 ret = aat2870_dump_reg(aat2870, buf);
284 if (ret >= 0)
285 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
286
287 kfree(buf);
288
289 return ret;
290}
291
292static ssize_t aat2870_reg_write_file(struct file *file,
293 const char __user *user_buf, size_t count,
294 loff_t *ppos)
295{
296 struct aat2870_data *aat2870 = file->private_data;
297 char buf[32];
298 int buf_size;
299 char *start = buf;
300 unsigned long addr, val;
301 int ret;
302
303 buf_size = min(count, (sizeof(buf)-1));
304 if (copy_from_user(buf, user_buf, buf_size)) {
305 dev_err(aat2870->dev, "Failed to copy from user\n");
306 return -EFAULT;
307 }
308 buf[buf_size] = 0;
309
310 while (*start == ' ')
311 start++;
312
313 addr = simple_strtoul(start, &start, 16);
314 if (addr >= AAT2870_REG_NUM) {
315 dev_err(aat2870->dev, "Invalid address, 0x%lx\n", addr);
316 return -EINVAL;
317 }
318
319 while (*start == ' ')
320 start++;
321
322 if (strict_strtoul(start, 16, &val))
323 return -EINVAL;
324
325 ret = aat2870->write(aat2870, (u8)addr, (u8)val);
326 if (ret)
327 return ret;
328
329 return buf_size;
330}
331
332static const struct file_operations aat2870_reg_fops = {
333 .open = aat2870_reg_open_file,
334 .read = aat2870_reg_read_file,
335 .write = aat2870_reg_write_file,
336};
337
338static void aat2870_init_debugfs(struct aat2870_data *aat2870)
339{
340 aat2870->dentry_root = debugfs_create_dir("aat2870", NULL);
341 if (!aat2870->dentry_root) {
342 dev_warn(aat2870->dev,
343 "Failed to create debugfs root directory\n");
344 return;
345 }
346
347 aat2870->dentry_reg = debugfs_create_file("regs", 0644,
348 aat2870->dentry_root,
349 aat2870, &aat2870_reg_fops);
350 if (!aat2870->dentry_reg)
351 dev_warn(aat2870->dev,
352 "Failed to create debugfs register file\n");
353}
354
355static void aat2870_uninit_debugfs(struct aat2870_data *aat2870)
356{
357 debugfs_remove_recursive(aat2870->dentry_root);
358}
359#else
360static inline void aat2870_init_debugfs(struct aat2870_data *aat2870)
361{
362}
363
364static inline void aat2870_uninit_debugfs(struct aat2870_data *aat2870)
365{
366}
367#endif /* CONFIG_DEBUG_FS */
368
369static int aat2870_i2c_probe(struct i2c_client *client,
370 const struct i2c_device_id *id)
371{
372 struct aat2870_platform_data *pdata = client->dev.platform_data;
373 struct aat2870_data *aat2870;
374 int i, j;
375 int ret = 0;
376
377 aat2870 = kzalloc(sizeof(struct aat2870_data), GFP_KERNEL);
378 if (!aat2870) {
379 dev_err(&client->dev,
380 "Failed to allocate memory for aat2870\n");
381 ret = -ENOMEM;
382 goto out;
383 }
384
385 aat2870->dev = &client->dev;
386 dev_set_drvdata(aat2870->dev, aat2870);
387
388 aat2870->client = client;
389 i2c_set_clientdata(client, aat2870);
390
391 aat2870->reg_cache = aat2870_regs;
392
393 if (pdata->en_pin < 0)
394 aat2870->en_pin = -1;
395 else
396 aat2870->en_pin = pdata->en_pin;
397
398 aat2870->init = pdata->init;
399 aat2870->uninit = pdata->uninit;
400 aat2870->read = aat2870_read;
401 aat2870->write = aat2870_write;
402 aat2870->update = aat2870_update;
403
404 mutex_init(&aat2870->io_lock);
405
406 if (aat2870->init)
407 aat2870->init(aat2870);
408
409 if (aat2870->en_pin >= 0) {
410 ret = gpio_request(aat2870->en_pin, "aat2870-en");
411 if (ret < 0) {
412 dev_err(&client->dev,
413 "Failed to request GPIO %d\n", aat2870->en_pin);
414 goto out_kfree;
415 }
416 gpio_direction_output(aat2870->en_pin, 1);
417 }
418
419 aat2870_enable(aat2870);
420
421 for (i = 0; i < pdata->num_subdevs; i++) {
422 for (j = 0; j < ARRAY_SIZE(aat2870_devs); j++) {
423 if ((pdata->subdevs[i].id == aat2870_devs[j].id) &&
424 !strcmp(pdata->subdevs[i].name,
425 aat2870_devs[j].name)) {
426 aat2870_devs[j].platform_data =
427 pdata->subdevs[i].platform_data;
428 break;
429 }
430 }
431 }
432
433 ret = mfd_add_devices(aat2870->dev, 0, aat2870_devs,
434 ARRAY_SIZE(aat2870_devs), NULL, 0);
435 if (ret != 0) {
436 dev_err(aat2870->dev, "Failed to add subdev: %d\n", ret);
437 goto out_disable;
438 }
439
440 aat2870_init_debugfs(aat2870);
441
442 return 0;
443
444out_disable:
445 aat2870_disable(aat2870);
446 if (aat2870->en_pin >= 0)
447 gpio_free(aat2870->en_pin);
448out_kfree:
449 kfree(aat2870);
450out:
451 return ret;
452}
453
454static int aat2870_i2c_remove(struct i2c_client *client)
455{
456 struct aat2870_data *aat2870 = i2c_get_clientdata(client);
457
458 aat2870_uninit_debugfs(aat2870);
459
460 mfd_remove_devices(aat2870->dev);
461 aat2870_disable(aat2870);
462 if (aat2870->en_pin >= 0)
463 gpio_free(aat2870->en_pin);
464 if (aat2870->uninit)
465 aat2870->uninit(aat2870);
466 kfree(aat2870);
467
468 return 0;
469}
470
471#ifdef CONFIG_PM
472static int aat2870_i2c_suspend(struct i2c_client *client, pm_message_t state)
473{
474 struct aat2870_data *aat2870 = i2c_get_clientdata(client);
475
476 aat2870_disable(aat2870);
477
478 return 0;
479}
480
481static int aat2870_i2c_resume(struct i2c_client *client)
482{
483 struct aat2870_data *aat2870 = i2c_get_clientdata(client);
484 struct aat2870_register *reg = NULL;
485 int i;
486
487 aat2870_enable(aat2870);
488
489 /* restore registers */
490 for (i = 0; i < AAT2870_REG_NUM; i++) {
491 reg = &aat2870->reg_cache[i];
492 if (reg->writeable)
493 aat2870->write(aat2870, i, reg->value);
494 }
495
496 return 0;
497}
498#else
499#define aat2870_i2c_suspend NULL
500#define aat2870_i2c_resume NULL
501#endif /* CONFIG_PM */
502
503static struct i2c_device_id aat2870_i2c_id_table[] = {
504 { "aat2870", 0 },
505 { }
506};
507MODULE_DEVICE_TABLE(i2c, aat2870_i2c_id_table);
508
509static struct i2c_driver aat2870_i2c_driver = {
510 .driver = {
511 .name = "aat2870",
512 .owner = THIS_MODULE,
513 },
514 .probe = aat2870_i2c_probe,
515 .remove = aat2870_i2c_remove,
516 .suspend = aat2870_i2c_suspend,
517 .resume = aat2870_i2c_resume,
518 .id_table = aat2870_i2c_id_table,
519};
520
521static int __init aat2870_init(void)
522{
523 return i2c_add_driver(&aat2870_i2c_driver);
524}
525subsys_initcall(aat2870_init);
526
527static void __exit aat2870_exit(void)
528{
529 i2c_del_driver(&aat2870_i2c_driver);
530}
531module_exit(aat2870_exit);
532
533MODULE_DESCRIPTION("Core support for the AnalogicTech AAT2870");
534MODULE_LICENSE("GPL");
535MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 3d7dce671b93..56ba1943c91d 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -879,20 +879,13 @@ static ssize_t ab3550_bank_write(struct file *file,
879 size_t count, loff_t *ppos) 879 size_t count, loff_t *ppos)
880{ 880{
881 struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private; 881 struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
882 char buf[32];
883 int buf_size;
884 unsigned long user_bank; 882 unsigned long user_bank;
885 int err; 883 int err;
886 884
887 /* Get userspace string and assure termination */ 885 /* Get userspace string and assure termination */
888 buf_size = min(count, (sizeof(buf) - 1)); 886 err = kstrtoul_from_user(user_buf, count, 0, &user_bank);
889 if (copy_from_user(buf, user_buf, buf_size))
890 return -EFAULT;
891 buf[buf_size] = 0;
892
893 err = strict_strtoul(buf, 0, &user_bank);
894 if (err) 887 if (err)
895 return -EINVAL; 888 return err;
896 889
897 if (user_bank >= AB3550_NUM_BANKS) { 890 if (user_bank >= AB3550_NUM_BANKS) {
898 dev_err(&ab->i2c_client[0]->dev, 891 dev_err(&ab->i2c_client[0]->dev,
@@ -902,7 +895,7 @@ static ssize_t ab3550_bank_write(struct file *file,
902 895
903 ab->debug_bank = user_bank; 896 ab->debug_bank = user_bank;
904 897
905 return buf_size; 898 return count;
906} 899}
907 900
908static int ab3550_address_print(struct seq_file *s, void *p) 901static int ab3550_address_print(struct seq_file *s, void *p)
@@ -923,27 +916,21 @@ static ssize_t ab3550_address_write(struct file *file,
923 size_t count, loff_t *ppos) 916 size_t count, loff_t *ppos)
924{ 917{
925 struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private; 918 struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
926 char buf[32];
927 int buf_size;
928 unsigned long user_address; 919 unsigned long user_address;
929 int err; 920 int err;
930 921
931 /* Get userspace string and assure termination */ 922 /* Get userspace string and assure termination */
932 buf_size = min(count, (sizeof(buf) - 1)); 923 err = kstrtoul_from_user(user_buf, count, 0, &user_address);
933 if (copy_from_user(buf, user_buf, buf_size))
934 return -EFAULT;
935 buf[buf_size] = 0;
936
937 err = strict_strtoul(buf, 0, &user_address);
938 if (err) 924 if (err)
939 return -EINVAL; 925 return err;
926
940 if (user_address > 0xff) { 927 if (user_address > 0xff) {
941 dev_err(&ab->i2c_client[0]->dev, 928 dev_err(&ab->i2c_client[0]->dev,
942 "debugfs error input > 0xff\n"); 929 "debugfs error input > 0xff\n");
943 return -EINVAL; 930 return -EINVAL;
944 } 931 }
945 ab->debug_address = user_address; 932 ab->debug_address = user_address;
946 return buf_size; 933 return count;
947} 934}
948 935
949static int ab3550_val_print(struct seq_file *s, void *p) 936static int ab3550_val_print(struct seq_file *s, void *p)
@@ -971,21 +958,15 @@ static ssize_t ab3550_val_write(struct file *file,
971 size_t count, loff_t *ppos) 958 size_t count, loff_t *ppos)
972{ 959{
973 struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private; 960 struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
974 char buf[32];
975 int buf_size;
976 unsigned long user_val; 961 unsigned long user_val;
977 int err; 962 int err;
978 u8 regvalue; 963 u8 regvalue;
979 964
980 /* Get userspace string and assure termination */ 965 /* Get userspace string and assure termination */
981 buf_size = min(count, (sizeof(buf)-1)); 966 err = kstrtoul_from_user(user_buf, count, 0, &user_val);
982 if (copy_from_user(buf, user_buf, buf_size))
983 return -EFAULT;
984 buf[buf_size] = 0;
985
986 err = strict_strtoul(buf, 0, &user_val);
987 if (err) 967 if (err)
988 return -EINVAL; 968 return err;
969
989 if (user_val > 0xff) { 970 if (user_val > 0xff) {
990 dev_err(&ab->i2c_client[0]->dev, 971 dev_err(&ab->i2c_client[0]->dev,
991 "debugfs error input > 0xff\n"); 972 "debugfs error input > 0xff\n");
@@ -1002,7 +983,7 @@ static ssize_t ab3550_val_write(struct file *file,
1002 if (err) 983 if (err)
1003 return -EINVAL; 984 return -EINVAL;
1004 985
1005 return buf_size; 986 return count;
1006} 987}
1007 988
1008static const struct file_operations ab3550_bank_fops = { 989static const struct file_operations ab3550_bank_fops = {
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index fc0c1af1566e..387705e494b9 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -363,7 +363,7 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
363 } 363 }
364} 364}
365 365
366static struct resource ab8500_gpio_resources[] = { 366static struct resource __devinitdata ab8500_gpio_resources[] = {
367 { 367 {
368 .name = "GPIO_INT6", 368 .name = "GPIO_INT6",
369 .start = AB8500_INT_GPIO6R, 369 .start = AB8500_INT_GPIO6R,
@@ -372,7 +372,7 @@ static struct resource ab8500_gpio_resources[] = {
372 } 372 }
373}; 373};
374 374
375static struct resource ab8500_gpadc_resources[] = { 375static struct resource __devinitdata ab8500_gpadc_resources[] = {
376 { 376 {
377 .name = "HW_CONV_END", 377 .name = "HW_CONV_END",
378 .start = AB8500_INT_GP_HW_ADC_CONV_END, 378 .start = AB8500_INT_GP_HW_ADC_CONV_END,
@@ -387,7 +387,7 @@ static struct resource ab8500_gpadc_resources[] = {
387 }, 387 },
388}; 388};
389 389
390static struct resource ab8500_rtc_resources[] = { 390static struct resource __devinitdata ab8500_rtc_resources[] = {
391 { 391 {
392 .name = "60S", 392 .name = "60S",
393 .start = AB8500_INT_RTC_60S, 393 .start = AB8500_INT_RTC_60S,
@@ -402,7 +402,7 @@ static struct resource ab8500_rtc_resources[] = {
402 }, 402 },
403}; 403};
404 404
405static struct resource ab8500_poweronkey_db_resources[] = { 405static struct resource __devinitdata ab8500_poweronkey_db_resources[] = {
406 { 406 {
407 .name = "ONKEY_DBF", 407 .name = "ONKEY_DBF",
408 .start = AB8500_INT_PON_KEY1DB_F, 408 .start = AB8500_INT_PON_KEY1DB_F,
@@ -417,20 +417,47 @@ static struct resource ab8500_poweronkey_db_resources[] = {
417 }, 417 },
418}; 418};
419 419
420static struct resource ab8500_bm_resources[] = { 420static struct resource __devinitdata ab8500_av_acc_detect_resources[] = {
421 { 421 {
422 .name = "MAIN_EXT_CH_NOT_OK", 422 .name = "ACC_DETECT_1DB_F",
423 .start = AB8500_INT_MAIN_EXT_CH_NOT_OK, 423 .start = AB8500_INT_ACC_DETECT_1DB_F,
424 .end = AB8500_INT_MAIN_EXT_CH_NOT_OK, 424 .end = AB8500_INT_ACC_DETECT_1DB_F,
425 .flags = IORESOURCE_IRQ, 425 .flags = IORESOURCE_IRQ,
426 }, 426 },
427 { 427 {
428 .name = "BATT_OVV", 428 .name = "ACC_DETECT_1DB_R",
429 .start = AB8500_INT_BATT_OVV, 429 .start = AB8500_INT_ACC_DETECT_1DB_R,
430 .end = AB8500_INT_BATT_OVV, 430 .end = AB8500_INT_ACC_DETECT_1DB_R,
431 .flags = IORESOURCE_IRQ, 431 .flags = IORESOURCE_IRQ,
432 },
433 {
434 .name = "ACC_DETECT_21DB_F",
435 .start = AB8500_INT_ACC_DETECT_21DB_F,
436 .end = AB8500_INT_ACC_DETECT_21DB_F,
437 .flags = IORESOURCE_IRQ,
438 },
439 {
440 .name = "ACC_DETECT_21DB_R",
441 .start = AB8500_INT_ACC_DETECT_21DB_R,
442 .end = AB8500_INT_ACC_DETECT_21DB_R,
443 .flags = IORESOURCE_IRQ,
444 },
445 {
446 .name = "ACC_DETECT_22DB_F",
447 .start = AB8500_INT_ACC_DETECT_22DB_F,
448 .end = AB8500_INT_ACC_DETECT_22DB_F,
449 .flags = IORESOURCE_IRQ,
432 }, 450 },
433 { 451 {
452 .name = "ACC_DETECT_22DB_R",
453 .start = AB8500_INT_ACC_DETECT_22DB_R,
454 .end = AB8500_INT_ACC_DETECT_22DB_R,
455 .flags = IORESOURCE_IRQ,
456 },
457};
458
459static struct resource __devinitdata ab8500_charger_resources[] = {
460 {
434 .name = "MAIN_CH_UNPLUG_DET", 461 .name = "MAIN_CH_UNPLUG_DET",
435 .start = AB8500_INT_MAIN_CH_UNPLUG_DET, 462 .start = AB8500_INT_MAIN_CH_UNPLUG_DET,
436 .end = AB8500_INT_MAIN_CH_UNPLUG_DET, 463 .end = AB8500_INT_MAIN_CH_UNPLUG_DET,
@@ -443,27 +470,27 @@ static struct resource ab8500_bm_resources[] = {
443 .flags = IORESOURCE_IRQ, 470 .flags = IORESOURCE_IRQ,
444 }, 471 },
445 { 472 {
446 .name = "VBUS_DET_F",
447 .start = AB8500_INT_VBUS_DET_F,
448 .end = AB8500_INT_VBUS_DET_F,
449 .flags = IORESOURCE_IRQ,
450 },
451 {
452 .name = "VBUS_DET_R", 473 .name = "VBUS_DET_R",
453 .start = AB8500_INT_VBUS_DET_R, 474 .start = AB8500_INT_VBUS_DET_R,
454 .end = AB8500_INT_VBUS_DET_R, 475 .end = AB8500_INT_VBUS_DET_R,
455 .flags = IORESOURCE_IRQ, 476 .flags = IORESOURCE_IRQ,
456 }, 477 },
457 { 478 {
458 .name = "BAT_CTRL_INDB", 479 .name = "VBUS_DET_F",
459 .start = AB8500_INT_BAT_CTRL_INDB, 480 .start = AB8500_INT_VBUS_DET_F,
460 .end = AB8500_INT_BAT_CTRL_INDB, 481 .end = AB8500_INT_VBUS_DET_F,
461 .flags = IORESOURCE_IRQ, 482 .flags = IORESOURCE_IRQ,
462 }, 483 },
463 { 484 {
464 .name = "CH_WD_EXP", 485 .name = "USB_LINK_STATUS",
465 .start = AB8500_INT_CH_WD_EXP, 486 .start = AB8500_INT_USB_LINK_STATUS,
466 .end = AB8500_INT_CH_WD_EXP, 487 .end = AB8500_INT_USB_LINK_STATUS,
488 .flags = IORESOURCE_IRQ,
489 },
490 {
491 .name = "USB_CHARGE_DET_DONE",
492 .start = AB8500_INT_USB_CHG_DET_DONE,
493 .end = AB8500_INT_USB_CHG_DET_DONE,
467 .flags = IORESOURCE_IRQ, 494 .flags = IORESOURCE_IRQ,
468 }, 495 },
469 { 496 {
@@ -473,21 +500,60 @@ static struct resource ab8500_bm_resources[] = {
473 .flags = IORESOURCE_IRQ, 500 .flags = IORESOURCE_IRQ,
474 }, 501 },
475 { 502 {
476 .name = "NCONV_ACCU", 503 .name = "USB_CH_TH_PROT_R",
477 .start = AB8500_INT_CCN_CONV_ACC, 504 .start = AB8500_INT_USB_CH_TH_PROT_R,
478 .end = AB8500_INT_CCN_CONV_ACC, 505 .end = AB8500_INT_USB_CH_TH_PROT_R,
479 .flags = IORESOURCE_IRQ, 506 .flags = IORESOURCE_IRQ,
480 }, 507 },
481 { 508 {
482 .name = "LOW_BAT_F", 509 .name = "USB_CH_TH_PROT_F",
483 .start = AB8500_INT_LOW_BAT_F, 510 .start = AB8500_INT_USB_CH_TH_PROT_F,
484 .end = AB8500_INT_LOW_BAT_F, 511 .end = AB8500_INT_USB_CH_TH_PROT_F,
485 .flags = IORESOURCE_IRQ, 512 .flags = IORESOURCE_IRQ,
486 }, 513 },
487 { 514 {
488 .name = "LOW_BAT_R", 515 .name = "MAIN_EXT_CH_NOT_OK",
489 .start = AB8500_INT_LOW_BAT_R, 516 .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
490 .end = AB8500_INT_LOW_BAT_R, 517 .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
518 .flags = IORESOURCE_IRQ,
519 },
520 {
521 .name = "MAIN_CH_TH_PROT_R",
522 .start = AB8500_INT_MAIN_CH_TH_PROT_R,
523 .end = AB8500_INT_MAIN_CH_TH_PROT_R,
524 .flags = IORESOURCE_IRQ,
525 },
526 {
527 .name = "MAIN_CH_TH_PROT_F",
528 .start = AB8500_INT_MAIN_CH_TH_PROT_F,
529 .end = AB8500_INT_MAIN_CH_TH_PROT_F,
530 .flags = IORESOURCE_IRQ,
531 },
532 {
533 .name = "USB_CHARGER_NOT_OKR",
534 .start = AB8500_INT_USB_CHARGER_NOT_OK,
535 .end = AB8500_INT_USB_CHARGER_NOT_OK,
536 .flags = IORESOURCE_IRQ,
537 },
538 {
539 .name = "USB_CHARGER_NOT_OKF",
540 .start = AB8500_INT_USB_CHARGER_NOT_OKF,
541 .end = AB8500_INT_USB_CHARGER_NOT_OKF,
542 .flags = IORESOURCE_IRQ,
543 },
544 {
545 .name = "CH_WD_EXP",
546 .start = AB8500_INT_CH_WD_EXP,
547 .end = AB8500_INT_CH_WD_EXP,
548 .flags = IORESOURCE_IRQ,
549 },
550};
551
552static struct resource __devinitdata ab8500_btemp_resources[] = {
553 {
554 .name = "BAT_CTRL_INDB",
555 .start = AB8500_INT_BAT_CTRL_INDB,
556 .end = AB8500_INT_BAT_CTRL_INDB,
491 .flags = IORESOURCE_IRQ, 557 .flags = IORESOURCE_IRQ,
492 }, 558 },
493 { 559 {
@@ -503,38 +569,55 @@ static struct resource ab8500_bm_resources[] = {
503 .flags = IORESOURCE_IRQ, 569 .flags = IORESOURCE_IRQ,
504 }, 570 },
505 { 571 {
506 .name = "USB_CHARGER_NOT_OKR", 572 .name = "BTEMP_LOW_MEDIUM",
507 .start = AB8500_INT_USB_CHARGER_NOT_OK, 573 .start = AB8500_INT_BTEMP_LOW_MEDIUM,
508 .end = AB8500_INT_USB_CHARGER_NOT_OK, 574 .end = AB8500_INT_BTEMP_LOW_MEDIUM,
509 .flags = IORESOURCE_IRQ, 575 .flags = IORESOURCE_IRQ,
510 }, 576 },
511 { 577 {
512 .name = "USB_CHARGE_DET_DONE", 578 .name = "BTEMP_MEDIUM_HIGH",
513 .start = AB8500_INT_USB_CHG_DET_DONE, 579 .start = AB8500_INT_BTEMP_MEDIUM_HIGH,
514 .end = AB8500_INT_USB_CHG_DET_DONE, 580 .end = AB8500_INT_BTEMP_MEDIUM_HIGH,
515 .flags = IORESOURCE_IRQ, 581 .flags = IORESOURCE_IRQ,
516 }, 582 },
583};
584
585static struct resource __devinitdata ab8500_fg_resources[] = {
517 { 586 {
518 .name = "USB_CH_TH_PROT_R", 587 .name = "NCONV_ACCU",
519 .start = AB8500_INT_USB_CH_TH_PROT_R, 588 .start = AB8500_INT_CCN_CONV_ACC,
520 .end = AB8500_INT_USB_CH_TH_PROT_R, 589 .end = AB8500_INT_CCN_CONV_ACC,
521 .flags = IORESOURCE_IRQ, 590 .flags = IORESOURCE_IRQ,
522 }, 591 },
523 { 592 {
524 .name = "MAIN_CH_TH_PROT_R", 593 .name = "BATT_OVV",
525 .start = AB8500_INT_MAIN_CH_TH_PROT_R, 594 .start = AB8500_INT_BATT_OVV,
526 .end = AB8500_INT_MAIN_CH_TH_PROT_R, 595 .end = AB8500_INT_BATT_OVV,
527 .flags = IORESOURCE_IRQ, 596 .flags = IORESOURCE_IRQ,
528 }, 597 },
529 { 598 {
530 .name = "USB_CHARGER_NOT_OKF", 599 .name = "LOW_BAT_F",
531 .start = AB8500_INT_USB_CHARGER_NOT_OKF, 600 .start = AB8500_INT_LOW_BAT_F,
532 .end = AB8500_INT_USB_CHARGER_NOT_OKF, 601 .end = AB8500_INT_LOW_BAT_F,
602 .flags = IORESOURCE_IRQ,
603 },
604 {
605 .name = "LOW_BAT_R",
606 .start = AB8500_INT_LOW_BAT_R,
607 .end = AB8500_INT_LOW_BAT_R,
608 .flags = IORESOURCE_IRQ,
609 },
610 {
611 .name = "CC_INT_CALIB",
612 .start = AB8500_INT_CC_INT_CALIB,
613 .end = AB8500_INT_CC_INT_CALIB,
533 .flags = IORESOURCE_IRQ, 614 .flags = IORESOURCE_IRQ,
534 }, 615 },
535}; 616};
536 617
537static struct resource ab8500_debug_resources[] = { 618static struct resource __devinitdata ab8500_chargalg_resources[] = {};
619
620static struct resource __devinitdata ab8500_debug_resources[] = {
538 { 621 {
539 .name = "IRQ_FIRST", 622 .name = "IRQ_FIRST",
540 .start = AB8500_INT_MAIN_EXT_CH_NOT_OK, 623 .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
@@ -549,7 +632,7 @@ static struct resource ab8500_debug_resources[] = {
549 }, 632 },
550}; 633};
551 634
552static struct resource ab8500_usb_resources[] = { 635static struct resource __devinitdata ab8500_usb_resources[] = {
553 { 636 {
554 .name = "ID_WAKEUP_R", 637 .name = "ID_WAKEUP_R",
555 .start = AB8500_INT_ID_WAKEUP_R, 638 .start = AB8500_INT_ID_WAKEUP_R,
@@ -580,9 +663,21 @@ static struct resource ab8500_usb_resources[] = {
580 .end = AB8500_INT_USB_LINK_STATUS, 663 .end = AB8500_INT_USB_LINK_STATUS,
581 .flags = IORESOURCE_IRQ, 664 .flags = IORESOURCE_IRQ,
582 }, 665 },
666 {
667 .name = "USB_ADP_PROBE_PLUG",
668 .start = AB8500_INT_ADP_PROBE_PLUG,
669 .end = AB8500_INT_ADP_PROBE_PLUG,
670 .flags = IORESOURCE_IRQ,
671 },
672 {
673 .name = "USB_ADP_PROBE_UNPLUG",
674 .start = AB8500_INT_ADP_PROBE_UNPLUG,
675 .end = AB8500_INT_ADP_PROBE_UNPLUG,
676 .flags = IORESOURCE_IRQ,
677 },
583}; 678};
584 679
585static struct resource ab8500_temp_resources[] = { 680static struct resource __devinitdata ab8500_temp_resources[] = {
586 { 681 {
587 .name = "AB8500_TEMP_WARM", 682 .name = "AB8500_TEMP_WARM",
588 .start = AB8500_INT_TEMP_WARM, 683 .start = AB8500_INT_TEMP_WARM,
@@ -591,7 +686,7 @@ static struct resource ab8500_temp_resources[] = {
591 }, 686 },
592}; 687};
593 688
594static struct mfd_cell ab8500_devs[] = { 689static struct mfd_cell __devinitdata ab8500_devs[] = {
595#ifdef CONFIG_DEBUG_FS 690#ifdef CONFIG_DEBUG_FS
596 { 691 {
597 .name = "ab8500-debug", 692 .name = "ab8500-debug",
@@ -621,11 +716,33 @@ static struct mfd_cell ab8500_devs[] = {
621 .resources = ab8500_rtc_resources, 716 .resources = ab8500_rtc_resources,
622 }, 717 },
623 { 718 {
624 .name = "ab8500-bm", 719 .name = "ab8500-charger",
625 .num_resources = ARRAY_SIZE(ab8500_bm_resources), 720 .num_resources = ARRAY_SIZE(ab8500_charger_resources),
626 .resources = ab8500_bm_resources, 721 .resources = ab8500_charger_resources,
722 },
723 {
724 .name = "ab8500-btemp",
725 .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
726 .resources = ab8500_btemp_resources,
727 },
728 {
729 .name = "ab8500-fg",
730 .num_resources = ARRAY_SIZE(ab8500_fg_resources),
731 .resources = ab8500_fg_resources,
732 },
733 {
734 .name = "ab8500-chargalg",
735 .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
736 .resources = ab8500_chargalg_resources,
737 },
738 {
739 .name = "ab8500-acc-det",
740 .num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
741 .resources = ab8500_av_acc_detect_resources,
742 },
743 {
744 .name = "ab8500-codec",
627 }, 745 },
628 { .name = "ab8500-codec", },
629 { 746 {
630 .name = "ab8500-usb", 747 .name = "ab8500-usb",
631 .num_resources = ARRAY_SIZE(ab8500_usb_resources), 748 .num_resources = ARRAY_SIZE(ab8500_usb_resources),
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 64748e42ac03..64bdeeb1c11a 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -419,20 +419,13 @@ static ssize_t ab8500_bank_write(struct file *file,
419 size_t count, loff_t *ppos) 419 size_t count, loff_t *ppos)
420{ 420{
421 struct device *dev = ((struct seq_file *)(file->private_data))->private; 421 struct device *dev = ((struct seq_file *)(file->private_data))->private;
422 char buf[32];
423 int buf_size;
424 unsigned long user_bank; 422 unsigned long user_bank;
425 int err; 423 int err;
426 424
427 /* Get userspace string and assure termination */ 425 /* Get userspace string and assure termination */
428 buf_size = min(count, (sizeof(buf) - 1)); 426 err = kstrtoul_from_user(user_buf, count, 0, &user_bank);
429 if (copy_from_user(buf, user_buf, buf_size))
430 return -EFAULT;
431 buf[buf_size] = 0;
432
433 err = strict_strtoul(buf, 0, &user_bank);
434 if (err) 427 if (err)
435 return -EINVAL; 428 return err;
436 429
437 if (user_bank >= AB8500_NUM_BANKS) { 430 if (user_bank >= AB8500_NUM_BANKS) {
438 dev_err(dev, "debugfs error input > number of banks\n"); 431 dev_err(dev, "debugfs error input > number of banks\n");
@@ -441,7 +434,7 @@ static ssize_t ab8500_bank_write(struct file *file,
441 434
442 debug_bank = user_bank; 435 debug_bank = user_bank;
443 436
444 return buf_size; 437 return count;
445} 438}
446 439
447static int ab8500_address_print(struct seq_file *s, void *p) 440static int ab8500_address_print(struct seq_file *s, void *p)
@@ -459,26 +452,20 @@ static ssize_t ab8500_address_write(struct file *file,
459 size_t count, loff_t *ppos) 452 size_t count, loff_t *ppos)
460{ 453{
461 struct device *dev = ((struct seq_file *)(file->private_data))->private; 454 struct device *dev = ((struct seq_file *)(file->private_data))->private;
462 char buf[32];
463 int buf_size;
464 unsigned long user_address; 455 unsigned long user_address;
465 int err; 456 int err;
466 457
467 /* Get userspace string and assure termination */ 458 /* Get userspace string and assure termination */
468 buf_size = min(count, (sizeof(buf) - 1)); 459 err = kstrtoul_from_user(user_buf, count, 0, &user_address);
469 if (copy_from_user(buf, user_buf, buf_size))
470 return -EFAULT;
471 buf[buf_size] = 0;
472
473 err = strict_strtoul(buf, 0, &user_address);
474 if (err) 460 if (err)
475 return -EINVAL; 461 return err;
462
476 if (user_address > 0xff) { 463 if (user_address > 0xff) {
477 dev_err(dev, "debugfs error input > 0xff\n"); 464 dev_err(dev, "debugfs error input > 0xff\n");
478 return -EINVAL; 465 return -EINVAL;
479 } 466 }
480 debug_address = user_address; 467 debug_address = user_address;
481 return buf_size; 468 return count;
482} 469}
483 470
484static int ab8500_val_print(struct seq_file *s, void *p) 471static int ab8500_val_print(struct seq_file *s, void *p)
@@ -509,20 +496,14 @@ static ssize_t ab8500_val_write(struct file *file,
509 size_t count, loff_t *ppos) 496 size_t count, loff_t *ppos)
510{ 497{
511 struct device *dev = ((struct seq_file *)(file->private_data))->private; 498 struct device *dev = ((struct seq_file *)(file->private_data))->private;
512 char buf[32];
513 int buf_size;
514 unsigned long user_val; 499 unsigned long user_val;
515 int err; 500 int err;
516 501
517 /* Get userspace string and assure termination */ 502 /* Get userspace string and assure termination */
518 buf_size = min(count, (sizeof(buf)-1)); 503 err = kstrtoul_from_user(user_buf, count, 0, &user_val);
519 if (copy_from_user(buf, user_buf, buf_size))
520 return -EFAULT;
521 buf[buf_size] = 0;
522
523 err = strict_strtoul(buf, 0, &user_val);
524 if (err) 504 if (err)
525 return -EINVAL; 505 return err;
506
526 if (user_val > 0xff) { 507 if (user_val > 0xff) {
527 dev_err(dev, "debugfs error input > 0xff\n"); 508 dev_err(dev, "debugfs error input > 0xff\n");
528 return -EINVAL; 509 return -EINVAL;
@@ -534,7 +515,7 @@ static ssize_t ab8500_val_write(struct file *file,
534 return -EINVAL; 515 return -EINVAL;
535 } 516 }
536 517
537 return buf_size; 518 return count;
538} 519}
539 520
540static const struct file_operations ab8500_bank_fops = { 521static const struct file_operations ab8500_bank_fops = {
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index a0bd0cf05af3..21131c7b0f1e 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -56,7 +56,7 @@ struct jz4740_adc {
56 void __iomem *base; 56 void __iomem *base;
57 57
58 int irq; 58 int irq;
59 int irq_base; 59 struct irq_chip_generic *gc;
60 60
61 struct clk *clk; 61 struct clk *clk;
62 atomic_t clk_ref; 62 atomic_t clk_ref;
@@ -64,63 +64,17 @@ struct jz4740_adc {
64 spinlock_t lock; 64 spinlock_t lock;
65}; 65};
66 66
67static inline void jz4740_adc_irq_set_masked(struct jz4740_adc *adc, int irq,
68 bool masked)
69{
70 unsigned long flags;
71 uint8_t val;
72
73 irq -= adc->irq_base;
74
75 spin_lock_irqsave(&adc->lock, flags);
76
77 val = readb(adc->base + JZ_REG_ADC_CTRL);
78 if (masked)
79 val |= BIT(irq);
80 else
81 val &= ~BIT(irq);
82 writeb(val, adc->base + JZ_REG_ADC_CTRL);
83
84 spin_unlock_irqrestore(&adc->lock, flags);
85}
86
87static void jz4740_adc_irq_mask(struct irq_data *data)
88{
89 struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
90 jz4740_adc_irq_set_masked(adc, data->irq, true);
91}
92
93static void jz4740_adc_irq_unmask(struct irq_data *data)
94{
95 struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
96 jz4740_adc_irq_set_masked(adc, data->irq, false);
97}
98
99static void jz4740_adc_irq_ack(struct irq_data *data)
100{
101 struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
102 unsigned int irq = data->irq - adc->irq_base;
103 writeb(BIT(irq), adc->base + JZ_REG_ADC_STATUS);
104}
105
106static struct irq_chip jz4740_adc_irq_chip = {
107 .name = "jz4740-adc",
108 .irq_mask = jz4740_adc_irq_mask,
109 .irq_unmask = jz4740_adc_irq_unmask,
110 .irq_ack = jz4740_adc_irq_ack,
111};
112
113static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc) 67static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
114{ 68{
115 struct jz4740_adc *adc = irq_desc_get_handler_data(desc); 69 struct irq_chip_generic *gc = irq_desc_get_handler_data(desc);
116 uint8_t status; 70 uint8_t status;
117 unsigned int i; 71 unsigned int i;
118 72
119 status = readb(adc->base + JZ_REG_ADC_STATUS); 73 status = readb(gc->reg_base + JZ_REG_ADC_STATUS);
120 74
121 for (i = 0; i < 5; ++i) { 75 for (i = 0; i < 5; ++i) {
122 if (status & BIT(i)) 76 if (status & BIT(i))
123 generic_handle_irq(adc->irq_base + i); 77 generic_handle_irq(gc->irq_base + i);
124 } 78 }
125} 79}
126 80
@@ -249,10 +203,12 @@ const struct mfd_cell jz4740_adc_cells[] = {
249 203
250static int __devinit jz4740_adc_probe(struct platform_device *pdev) 204static int __devinit jz4740_adc_probe(struct platform_device *pdev)
251{ 205{
252 int ret; 206 struct irq_chip_generic *gc;
207 struct irq_chip_type *ct;
253 struct jz4740_adc *adc; 208 struct jz4740_adc *adc;
254 struct resource *mem_base; 209 struct resource *mem_base;
255 int irq; 210 int ret;
211 int irq_base;
256 212
257 adc = kmalloc(sizeof(*adc), GFP_KERNEL); 213 adc = kmalloc(sizeof(*adc), GFP_KERNEL);
258 if (!adc) { 214 if (!adc) {
@@ -267,9 +223,9 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
267 goto err_free; 223 goto err_free;
268 } 224 }
269 225
270 adc->irq_base = platform_get_irq(pdev, 1); 226 irq_base = platform_get_irq(pdev, 1);
271 if (adc->irq_base < 0) { 227 if (irq_base < 0) {
272 ret = adc->irq_base; 228 ret = irq_base;
273 dev_err(&pdev->dev, "Failed to get irq base: %d\n", ret); 229 dev_err(&pdev->dev, "Failed to get irq base: %d\n", ret);
274 goto err_free; 230 goto err_free;
275 } 231 }
@@ -309,20 +265,28 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
309 265
310 platform_set_drvdata(pdev, adc); 266 platform_set_drvdata(pdev, adc);
311 267
312 for (irq = adc->irq_base; irq < adc->irq_base + 5; ++irq) { 268 gc = irq_alloc_generic_chip("INTC", 1, irq_base, adc->base,
313 irq_set_chip_data(irq, adc); 269 handle_level_irq);
314 irq_set_chip_and_handler(irq, &jz4740_adc_irq_chip, 270
315 handle_level_irq); 271 ct = gc->chip_types;
316 } 272 ct->regs.mask = JZ_REG_ADC_CTRL;
273 ct->regs.ack = JZ_REG_ADC_STATUS;
274 ct->chip.irq_mask = irq_gc_mask_set_bit;
275 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
276 ct->chip.irq_ack = irq_gc_ack;
277
278 irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
279
280 adc->gc = gc;
317 281
318 irq_set_handler_data(adc->irq, adc); 282 irq_set_handler_data(adc->irq, gc);
319 irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux); 283 irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux);
320 284
321 writeb(0x00, adc->base + JZ_REG_ADC_ENABLE); 285 writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
322 writeb(0xff, adc->base + JZ_REG_ADC_CTRL); 286 writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
323 287
324 ret = mfd_add_devices(&pdev->dev, 0, jz4740_adc_cells, 288 ret = mfd_add_devices(&pdev->dev, 0, jz4740_adc_cells,
325 ARRAY_SIZE(jz4740_adc_cells), mem_base, adc->irq_base); 289 ARRAY_SIZE(jz4740_adc_cells), mem_base, irq_base);
326 if (ret < 0) 290 if (ret < 0)
327 goto err_clk_put; 291 goto err_clk_put;
328 292
@@ -347,6 +311,8 @@ static int __devexit jz4740_adc_remove(struct platform_device *pdev)
347 311
348 mfd_remove_devices(&pdev->dev); 312 mfd_remove_devices(&pdev->dev);
349 313
314 irq_remove_generic_chip(adc->gc, IRQ_MSK(5), IRQ_NOPROBE | IRQ_LEVEL, 0);
315 kfree(adc->gc);
350 irq_set_handler_data(adc->irq, NULL); 316 irq_set_handler_data(adc->irq, NULL);
351 irq_set_chained_handler(adc->irq, NULL); 317 irq_set_chained_handler(adc->irq, NULL);
352 318
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index ea3f52c07ef7..ea1169b04779 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -37,6 +37,9 @@
37#define GPIOBASE 0x44 37#define GPIOBASE 0x44
38#define GPIO_IO_SIZE 64 38#define GPIO_IO_SIZE 64
39 39
40#define WDTBASE 0x84
41#define WDT_IO_SIZE 64
42
40static struct resource smbus_sch_resource = { 43static struct resource smbus_sch_resource = {
41 .flags = IORESOURCE_IO, 44 .flags = IORESOURCE_IO,
42}; 45};
@@ -59,6 +62,18 @@ static struct mfd_cell lpc_sch_cells[] = {
59 }, 62 },
60}; 63};
61 64
65static struct resource wdt_sch_resource = {
66 .flags = IORESOURCE_IO,
67};
68
69static struct mfd_cell tunnelcreek_cells[] = {
70 {
71 .name = "tunnelcreek_wdt",
72 .num_resources = 1,
73 .resources = &wdt_sch_resource,
74 },
75};
76
62static struct pci_device_id lpc_sch_ids[] = { 77static struct pci_device_id lpc_sch_ids[] = {
63 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) }, 78 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
64 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) }, 79 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
@@ -72,6 +87,7 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
72 unsigned int base_addr_cfg; 87 unsigned int base_addr_cfg;
73 unsigned short base_addr; 88 unsigned short base_addr;
74 int i; 89 int i;
90 int ret;
75 91
76 pci_read_config_dword(dev, SMBASE, &base_addr_cfg); 92 pci_read_config_dword(dev, SMBASE, &base_addr_cfg);
77 if (!(base_addr_cfg & (1 << 31))) { 93 if (!(base_addr_cfg & (1 << 31))) {
@@ -104,8 +120,39 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
104 for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++) 120 for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
105 lpc_sch_cells[i].id = id->device; 121 lpc_sch_cells[i].id = id->device;
106 122
107 return mfd_add_devices(&dev->dev, 0, 123 ret = mfd_add_devices(&dev->dev, 0,
108 lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL, 0); 124 lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL, 0);
125 if (ret)
126 goto out_dev;
127
128 if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) {
129 pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
130 if (!(base_addr_cfg & (1 << 31))) {
131 dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
132 ret = -ENODEV;
133 goto out_dev;
134 }
135 base_addr = (unsigned short)base_addr_cfg;
136 if (base_addr == 0) {
137 dev_err(&dev->dev, "I/O space for WDT uninitialized\n");
138 ret = -ENODEV;
139 goto out_dev;
140 }
141
142 wdt_sch_resource.start = base_addr;
143 wdt_sch_resource.end = base_addr + WDT_IO_SIZE - 1;
144
145 for (i = 0; i < ARRAY_SIZE(tunnelcreek_cells); i++)
146 tunnelcreek_cells[i].id = id->device;
147
148 ret = mfd_add_devices(&dev->dev, 0, tunnelcreek_cells,
149 ARRAY_SIZE(tunnelcreek_cells), NULL, 0);
150 }
151
152 return ret;
153out_dev:
154 mfd_remove_devices(&dev->dev);
155 return ret;
109} 156}
110 157
111static void __devexit lpc_sch_remove(struct pci_dev *dev) 158static void __devexit lpc_sch_remove(struct pci_dev *dev)
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index 638bf7e4d3b3..09274cf7c33b 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -58,8 +58,6 @@ static struct i2c_client *get_i2c(struct max8997_dev *max8997,
58 default: 58 default:
59 return ERR_PTR(-EINVAL); 59 return ERR_PTR(-EINVAL);
60 } 60 }
61
62 return ERR_PTR(-EINVAL);
63} 61}
64 62
65struct max8997_irq_data { 63struct max8997_irq_data {
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 9ec7570f5b81..de4096aee248 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -39,6 +39,8 @@ static struct mfd_cell max8998_devs[] = {
39 .name = "max8998-pmic", 39 .name = "max8998-pmic",
40 }, { 40 }, {
41 .name = "max8998-rtc", 41 .name = "max8998-rtc",
42 }, {
43 .name = "max8998-battery",
42 }, 44 },
43}; 45};
44 46
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 1717144fe7f4..29601e7d606d 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -998,9 +998,9 @@ static void usbhs_disable(struct device *dev)
998 998
999 if (is_omap_usbhs_rev2(omap)) { 999 if (is_omap_usbhs_rev2(omap)) {
1000 if (is_ehci_tll_mode(pdata->port_mode[0])) 1000 if (is_ehci_tll_mode(pdata->port_mode[0]))
1001 clk_enable(omap->usbtll_p1_fck); 1001 clk_disable(omap->usbtll_p1_fck);
1002 if (is_ehci_tll_mode(pdata->port_mode[1])) 1002 if (is_ehci_tll_mode(pdata->port_mode[1]))
1003 clk_enable(omap->usbtll_p2_fck); 1003 clk_disable(omap->usbtll_p2_fck);
1004 clk_disable(omap->utmi_p2_fck); 1004 clk_disable(omap->utmi_p2_fck);
1005 clk_disable(omap->utmi_p1_fck); 1005 clk_disable(omap->utmi_p1_fck);
1006 } 1006 }
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 7ab7746631d4..2963689cf45c 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -228,7 +228,7 @@ int stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length,
228EXPORT_SYMBOL_GPL(stmpe_block_write); 228EXPORT_SYMBOL_GPL(stmpe_block_write);
229 229
230/** 230/**
231 * stmpe_set_altfunc: set the alternate function for STMPE pins 231 * stmpe_set_altfunc()- set the alternate function for STMPE pins
232 * @stmpe: Device to configure 232 * @stmpe: Device to configure
233 * @pins: Bitmask of pins to affect 233 * @pins: Bitmask of pins to affect
234 * @block: block to enable alternate functions for 234 * @block: block to enable alternate functions for
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index 0dbdc4e8cd77..e4ee38956583 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -42,6 +42,7 @@ struct stmpe_variant_block {
42 * @id_mask: bits valid in CHIPID register for comparison with id_val 42 * @id_mask: bits valid in CHIPID register for comparison with id_val
43 * @num_gpios: number of GPIOS 43 * @num_gpios: number of GPIOS
44 * @af_bits: number of bits used to specify the alternate function 44 * @af_bits: number of bits used to specify the alternate function
45 * @regs: variant specific registers.
45 * @blocks: list of blocks present on this device 46 * @blocks: list of blocks present on this device
46 * @num_blocks: number of blocks present on this device 47 * @num_blocks: number of blocks present on this device
47 * @num_irqs: number of internal IRQs available on this device 48 * @num_irqs: number of internal IRQs available on this device
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 2229e66d80db..6f5b8cf2f652 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -147,12 +147,11 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
147 if (init_data == NULL) 147 if (init_data == NULL)
148 return -ENOMEM; 148 return -ENOMEM;
149 149
150 init_data->irq = pmic_plat_data->irq;
151 init_data->irq_base = pmic_plat_data->irq;
152
153 tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL); 150 tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
154 if (tps65910 == NULL) 151 if (tps65910 == NULL) {
152 kfree(init_data);
155 return -ENOMEM; 153 return -ENOMEM;
154 }
156 155
157 i2c_set_clientdata(i2c, tps65910); 156 i2c_set_clientdata(i2c, tps65910);
158 tps65910->dev = &i2c->dev; 157 tps65910->dev = &i2c->dev;
@@ -168,17 +167,22 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
168 if (ret < 0) 167 if (ret < 0)
169 goto err; 168 goto err;
170 169
170 init_data->irq = pmic_plat_data->irq;
171 init_data->irq_base = pmic_plat_data->irq;
172
171 tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base); 173 tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
172 174
173 ret = tps65910_irq_init(tps65910, init_data->irq, init_data); 175 ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
174 if (ret < 0) 176 if (ret < 0)
175 goto err; 177 goto err;
176 178
179 kfree(init_data);
177 return ret; 180 return ret;
178 181
179err: 182err:
180 mfd_remove_devices(tps65910->dev); 183 mfd_remove_devices(tps65910->dev);
181 kfree(tps65910); 184 kfree(tps65910);
185 kfree(init_data);
182 return ret; 186 return ret;
183} 187}
184 188
@@ -187,6 +191,7 @@ static int tps65910_i2c_remove(struct i2c_client *i2c)
187 struct tps65910 *tps65910 = i2c_get_clientdata(i2c); 191 struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
188 192
189 mfd_remove_devices(tps65910->dev); 193 mfd_remove_devices(tps65910->dev);
194 tps65910_irq_exit(tps65910);
190 kfree(tps65910); 195 kfree(tps65910);
191 196
192 return 0; 197 return 0;
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index 283ac6759757..e7ff783aa31e 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -157,6 +157,8 @@ static __devexit int tps65911_comparator_remove(struct platform_device *pdev)
157 struct tps65910 *tps65910; 157 struct tps65910 *tps65910;
158 158
159 tps65910 = dev_get_drvdata(pdev->dev.parent); 159 tps65910 = dev_get_drvdata(pdev->dev.parent);
160 device_remove_file(&pdev->dev, &dev_attr_comp2_threshold);
161 device_remove_file(&pdev->dev, &dev_attr_comp1_threshold);
160 162
161 return 0; 163 return 0;
162} 164}
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
new file mode 100644
index 000000000000..955bc00e4b20
--- /dev/null
+++ b/drivers/mfd/tps65912-core.c
@@ -0,0 +1,177 @@
1/*
2 * tps65912-core.c -- TI TPS65912x
3 *
4 * Copyright 2011 Texas Instruments Inc.
5 *
6 * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This driver is based on wm8350 implementation.
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/gpio.h>
21#include <linux/mfd/core.h>
22#include <linux/mfd/tps65912.h>
23
24static struct mfd_cell tps65912s[] = {
25 {
26 .name = "tps65912-pmic",
27 },
28};
29
30int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
31{
32 u8 data;
33 int err;
34
35 mutex_lock(&tps65912->io_mutex);
36
37 err = tps65912->read(tps65912, reg, 1, &data);
38 if (err) {
39 dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
40 goto out;
41 }
42
43 data |= mask;
44 err = tps65912->write(tps65912, reg, 1, &data);
45 if (err)
46 dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
47
48out:
49 mutex_unlock(&tps65912->io_mutex);
50 return err;
51}
52EXPORT_SYMBOL_GPL(tps65912_set_bits);
53
54int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
55{
56 u8 data;
57 int err;
58
59 mutex_lock(&tps65912->io_mutex);
60 err = tps65912->read(tps65912, reg, 1, &data);
61 if (err) {
62 dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
63 goto out;
64 }
65
66 data &= ~mask;
67 err = tps65912->write(tps65912, reg, 1, &data);
68 if (err)
69 dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
70
71out:
72 mutex_unlock(&tps65912->io_mutex);
73 return err;
74}
75EXPORT_SYMBOL_GPL(tps65912_clear_bits);
76
77static inline int tps65912_read(struct tps65912 *tps65912, u8 reg)
78{
79 u8 val;
80 int err;
81
82 err = tps65912->read(tps65912, reg, 1, &val);
83 if (err < 0)
84 return err;
85
86 return val;
87}
88
89static inline int tps65912_write(struct tps65912 *tps65912, u8 reg, u8 val)
90{
91 return tps65912->write(tps65912, reg, 1, &val);
92}
93
94int tps65912_reg_read(struct tps65912 *tps65912, u8 reg)
95{
96 int data;
97
98 mutex_lock(&tps65912->io_mutex);
99
100 data = tps65912_read(tps65912, reg);
101 if (data < 0)
102 dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
103
104 mutex_unlock(&tps65912->io_mutex);
105 return data;
106}
107EXPORT_SYMBOL_GPL(tps65912_reg_read);
108
109int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val)
110{
111 int err;
112
113 mutex_lock(&tps65912->io_mutex);
114
115 err = tps65912_write(tps65912, reg, val);
116 if (err < 0)
117 dev_err(tps65912->dev, "Write for reg 0x%x failed\n", reg);
118
119 mutex_unlock(&tps65912->io_mutex);
120 return err;
121}
122EXPORT_SYMBOL_GPL(tps65912_reg_write);
123
124int tps65912_device_init(struct tps65912 *tps65912)
125{
126 struct tps65912_board *pmic_plat_data = tps65912->dev->platform_data;
127 struct tps65912_platform_data *init_data;
128 int ret, dcdc_avs, value;
129
130 init_data = kzalloc(sizeof(struct tps65912_platform_data), GFP_KERNEL);
131 if (init_data == NULL)
132 return -ENOMEM;
133
134 init_data->irq = pmic_plat_data->irq;
135 init_data->irq_base = pmic_plat_data->irq;
136
137 mutex_init(&tps65912->io_mutex);
138 dev_set_drvdata(tps65912->dev, tps65912);
139
140 dcdc_avs = (pmic_plat_data->is_dcdc1_avs << 0 |
141 pmic_plat_data->is_dcdc2_avs << 1 |
142 pmic_plat_data->is_dcdc3_avs << 2 |
143 pmic_plat_data->is_dcdc4_avs << 3);
144 if (dcdc_avs) {
145 tps65912->read(tps65912, TPS65912_I2C_SPI_CFG, 1, &value);
146 dcdc_avs |= value;
147 tps65912->write(tps65912, TPS65912_I2C_SPI_CFG, 1, &dcdc_avs);
148 }
149
150 ret = mfd_add_devices(tps65912->dev, -1,
151 tps65912s, ARRAY_SIZE(tps65912s),
152 NULL, 0);
153 if (ret < 0)
154 goto err;
155
156 ret = tps65912_irq_init(tps65912, init_data->irq, init_data);
157 if (ret < 0)
158 goto err;
159
160 return ret;
161
162err:
163 kfree(init_data);
164 mfd_remove_devices(tps65912->dev);
165 kfree(tps65912);
166 return ret;
167}
168
169void tps65912_device_exit(struct tps65912 *tps65912)
170{
171 mfd_remove_devices(tps65912->dev);
172 kfree(tps65912);
173}
174
175MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
176MODULE_DESCRIPTION("TPS65912x chip family multi-function driver");
177MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
new file mode 100644
index 000000000000..c041f2c3d2bd
--- /dev/null
+++ b/drivers/mfd/tps65912-i2c.c
@@ -0,0 +1,139 @@
1/*
2 * tps65912-i2c.c -- I2C access for TI TPS65912x PMIC
3 *
4 * Copyright 2011 Texas Instruments Inc.
5 *
6 * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This driver is based on wm8350 implementation.
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/gpio.h>
21#include <linux/i2c.h>
22#include <linux/mfd/core.h>
23#include <linux/mfd/tps65912.h>
24
25static int tps65912_i2c_read(struct tps65912 *tps65912, u8 reg,
26 int bytes, void *dest)
27{
28 struct i2c_client *i2c = tps65912->control_data;
29 struct i2c_msg xfer[2];
30 int ret;
31
32 /* Write register */
33 xfer[0].addr = i2c->addr;
34 xfer[0].flags = 0;
35 xfer[0].len = 1;
36 xfer[0].buf = &reg;
37
38 /* Read data */
39 xfer[1].addr = i2c->addr;
40 xfer[1].flags = I2C_M_RD;
41 xfer[1].len = bytes;
42 xfer[1].buf = dest;
43
44 ret = i2c_transfer(i2c->adapter, xfer, 2);
45 if (ret == 2)
46 ret = 0;
47 else if (ret >= 0)
48 ret = -EIO;
49 return ret;
50}
51
52static int tps65912_i2c_write(struct tps65912 *tps65912, u8 reg,
53 int bytes, void *src)
54{
55 struct i2c_client *i2c = tps65912->control_data;
56 /* we add 1 byte for device register */
57 u8 msg[TPS6591X_MAX_REGISTER + 1];
58 int ret;
59
60 if (bytes > TPS6591X_MAX_REGISTER)
61 return -EINVAL;
62
63 msg[0] = reg;
64 memcpy(&msg[1], src, bytes);
65
66 ret = i2c_master_send(i2c, msg, bytes + 1);
67 if (ret < 0)
68 return ret;
69 if (ret != bytes + 1)
70 return -EIO;
71
72 return 0;
73}
74
75static int tps65912_i2c_probe(struct i2c_client *i2c,
76 const struct i2c_device_id *id)
77{
78 struct tps65912 *tps65912;
79
80 tps65912 = kzalloc(sizeof(struct tps65912), GFP_KERNEL);
81 if (tps65912 == NULL)
82 return -ENOMEM;
83
84 i2c_set_clientdata(i2c, tps65912);
85 tps65912->dev = &i2c->dev;
86 tps65912->control_data = i2c;
87 tps65912->read = tps65912_i2c_read;
88 tps65912->write = tps65912_i2c_write;
89
90 return tps65912_device_init(tps65912);
91}
92
93static int tps65912_i2c_remove(struct i2c_client *i2c)
94{
95 struct tps65912 *tps65912 = i2c_get_clientdata(i2c);
96
97 tps65912_device_exit(tps65912);
98
99 return 0;
100}
101
102static const struct i2c_device_id tps65912_i2c_id[] = {
103 {"tps65912", 0 },
104 { }
105};
106MODULE_DEVICE_TABLE(i2c, tps65912_i2c_id);
107
108static struct i2c_driver tps65912_i2c_driver = {
109 .driver = {
110 .name = "tps65912",
111 .owner = THIS_MODULE,
112 },
113 .probe = tps65912_i2c_probe,
114 .remove = tps65912_i2c_remove,
115 .id_table = tps65912_i2c_id,
116};
117
118static int __init tps65912_i2c_init(void)
119{
120 int ret;
121
122 ret = i2c_add_driver(&tps65912_i2c_driver);
123 if (ret != 0)
124 pr_err("Failed to register TPS65912 I2C driver: %d\n", ret);
125
126 return ret;
127}
128/* init early so consumer devices can complete system boot */
129subsys_initcall(tps65912_i2c_init);
130
131static void __exit tps65912_i2c_exit(void)
132{
133 i2c_del_driver(&tps65912_i2c_driver);
134}
135module_exit(tps65912_i2c_exit);
136
137MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
138MODULE_DESCRIPTION("TPS6591x chip family multi-function driver");
139MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65912-irq.c b/drivers/mfd/tps65912-irq.c
new file mode 100644
index 000000000000..d360a83a2738
--- /dev/null
+++ b/drivers/mfd/tps65912-irq.c
@@ -0,0 +1,224 @@
1/*
2 * tps65912-irq.c -- TI TPS6591x
3 *
4 * Copyright 2011 Texas Instruments Inc.
5 *
6 * Author: Margarita Olaya <magi@slimlogic.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This driver is based on wm8350 implementation.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/bug.h>
20#include <linux/device.h>
21#include <linux/interrupt.h>
22#include <linux/irq.h>
23#include <linux/gpio.h>
24#include <linux/mfd/tps65912.h>
25
26static inline int irq_to_tps65912_irq(struct tps65912 *tps65912,
27 int irq)
28{
29 return irq - tps65912->irq_base;
30}
31
32/*
33 * This is a threaded IRQ handler so can access I2C/SPI. Since the
34 * IRQ handler explicitly clears the IRQ it handles the IRQ line
35 * will be reasserted and the physical IRQ will be handled again if
36 * another interrupt is asserted while we run - in the normal course
37 * of events this is a rare occurrence so we save I2C/SPI reads. We're
38 * also assuming that it's rare to get lots of interrupts firing
39 * simultaneously so try to minimise I/O.
40 */
41static irqreturn_t tps65912_irq(int irq, void *irq_data)
42{
43 struct tps65912 *tps65912 = irq_data;
44 u32 irq_sts;
45 u32 irq_mask;
46 u8 reg;
47 int i;
48
49
50 tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
51 irq_sts = reg;
52 tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
53 irq_sts |= reg << 8;
54 tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
55 irq_sts |= reg << 16;
56 tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
57 irq_sts |= reg << 24;
58
59 tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
60 irq_mask = reg;
61 tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
62 irq_mask |= reg << 8;
63 tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
64 irq_mask |= reg << 16;
65 tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
66 irq_mask |= reg << 24;
67
68 irq_sts &= ~irq_mask;
69 if (!irq_sts)
70 return IRQ_NONE;
71
72 for (i = 0; i < tps65912->irq_num; i++) {
73 if (!(irq_sts & (1 << i)))
74 continue;
75
76 handle_nested_irq(tps65912->irq_base + i);
77 }
78
79 /* Write the STS register back to clear IRQs we handled */
80 reg = irq_sts & 0xFF;
81 irq_sts >>= 8;
82 if (reg)
83 tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
84 reg = irq_sts & 0xFF;
85 irq_sts >>= 8;
86 if (reg)
87 tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
88 reg = irq_sts & 0xFF;
89 irq_sts >>= 8;
90 if (reg)
91 tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
92 reg = irq_sts & 0xFF;
93 if (reg)
94 tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
95
96 return IRQ_HANDLED;
97}
98
99static void tps65912_irq_lock(struct irq_data *data)
100{
101 struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
102
103 mutex_lock(&tps65912->irq_lock);
104}
105
106static void tps65912_irq_sync_unlock(struct irq_data *data)
107{
108 struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
109 u32 reg_mask;
110 u8 reg;
111
112 tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
113 reg_mask = reg;
114 tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
115 reg_mask |= reg << 8;
116 tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
117 reg_mask |= reg << 16;
118 tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
119 reg_mask |= reg << 24;
120
121 if (tps65912->irq_mask != reg_mask) {
122 reg = tps65912->irq_mask & 0xFF;
123 tps65912->write(tps65912, TPS65912_INT_MSK, 1, &reg);
124 reg = tps65912->irq_mask >> 8 & 0xFF;
125 tps65912->write(tps65912, TPS65912_INT_MSK2, 1, &reg);
126 reg = tps65912->irq_mask >> 16 & 0xFF;
127 tps65912->write(tps65912, TPS65912_INT_MSK3, 1, &reg);
128 reg = tps65912->irq_mask >> 24 & 0xFF;
129 tps65912->write(tps65912, TPS65912_INT_MSK4, 1, &reg);
130 }
131
132 mutex_unlock(&tps65912->irq_lock);
133}
134
135static void tps65912_irq_enable(struct irq_data *data)
136{
137 struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
138
139 tps65912->irq_mask &= ~(1 << irq_to_tps65912_irq(tps65912, data->irq));
140}
141
142static void tps65912_irq_disable(struct irq_data *data)
143{
144 struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
145
146 tps65912->irq_mask |= (1 << irq_to_tps65912_irq(tps65912, data->irq));
147}
148
149static struct irq_chip tps65912_irq_chip = {
150 .name = "tps65912",
151 .irq_bus_lock = tps65912_irq_lock,
152 .irq_bus_sync_unlock = tps65912_irq_sync_unlock,
153 .irq_disable = tps65912_irq_disable,
154 .irq_enable = tps65912_irq_enable,
155};
156
157int tps65912_irq_init(struct tps65912 *tps65912, int irq,
158 struct tps65912_platform_data *pdata)
159{
160 int ret, cur_irq;
161 int flags = IRQF_ONESHOT;
162 u8 reg;
163
164 if (!irq) {
165 dev_warn(tps65912->dev, "No interrupt support, no core IRQ\n");
166 return 0;
167 }
168
169 if (!pdata || !pdata->irq_base) {
170 dev_warn(tps65912->dev, "No interrupt support, no IRQ base\n");
171 return 0;
172 }
173
174 /* Clear unattended interrupts */
175 tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
176 tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
177 tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
178 tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
179 tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
180 tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
181 tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
182 tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
183
184 /* Mask top level interrupts */
185 tps65912->irq_mask = 0xFFFFFFFF;
186
187 mutex_init(&tps65912->irq_lock);
188 tps65912->chip_irq = irq;
189 tps65912->irq_base = pdata->irq_base;
190
191 tps65912->irq_num = TPS65912_NUM_IRQ;
192
193 /* Register with genirq */
194 for (cur_irq = tps65912->irq_base;
195 cur_irq < tps65912->irq_num + tps65912->irq_base;
196 cur_irq++) {
197 irq_set_chip_data(cur_irq, tps65912);
198 irq_set_chip_and_handler(cur_irq, &tps65912_irq_chip,
199 handle_edge_irq);
200 irq_set_nested_thread(cur_irq, 1);
201 /* ARM needs us to explicitly flag the IRQ as valid
202 * and will set them noprobe when we do so. */
203#ifdef CONFIG_ARM
204 set_irq_flags(cur_irq, IRQF_VALID);
205#else
206 irq_set_noprobe(cur_irq);
207#endif
208 }
209
210 ret = request_threaded_irq(irq, NULL, tps65912_irq, flags,
211 "tps65912", tps65912);
212
213 irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
214 if (ret != 0)
215 dev_err(tps65912->dev, "Failed to request IRQ: %d\n", ret);
216
217 return ret;
218}
219
220int tps65912_irq_exit(struct tps65912 *tps65912)
221{
222 free_irq(tps65912->chip_irq, tps65912);
223 return 0;
224}
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
new file mode 100644
index 000000000000..6d71e0d25744
--- /dev/null
+++ b/drivers/mfd/tps65912-spi.c
@@ -0,0 +1,142 @@
1/*
2 * tps65912-spi.c -- SPI access for TI TPS65912x PMIC
3 *
4 * Copyright 2011 Texas Instruments Inc.
5 *
6 * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This driver is based on wm8350 implementation.
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/gpio.h>
21#include <linux/spi/spi.h>
22#include <linux/mfd/core.h>
23#include <linux/mfd/tps65912.h>
24
25static int tps65912_spi_write(struct tps65912 *tps65912, u8 addr,
26 int bytes, void *src)
27{
28 struct spi_device *spi = tps65912->control_data;
29 u8 *data = (u8 *) src;
30 int ret;
31 /* bit 23 is the read/write bit */
32 unsigned long spi_data = 1 << 23 | addr << 15 | *data;
33 struct spi_transfer xfer;
34 struct spi_message msg;
35 u32 tx_buf, rx_buf;
36
37 tx_buf = spi_data;
38 rx_buf = 0;
39
40 xfer.tx_buf = &tx_buf;
41 xfer.rx_buf = NULL;
42 xfer.len = sizeof(unsigned long);
43 xfer.bits_per_word = 24;
44
45 spi_message_init(&msg);
46 spi_message_add_tail(&xfer, &msg);
47
48 ret = spi_sync(spi, &msg);
49 return ret;
50}
51
52static int tps65912_spi_read(struct tps65912 *tps65912, u8 addr,
53 int bytes, void *dest)
54{
55 struct spi_device *spi = tps65912->control_data;
56 /* bit 23 is the read/write bit */
57 unsigned long spi_data = 0 << 23 | addr << 15;
58 struct spi_transfer xfer;
59 struct spi_message msg;
60 int ret;
61 u8 *data = (u8 *) dest;
62 u32 tx_buf, rx_buf;
63
64 tx_buf = spi_data;
65 rx_buf = 0;
66
67 xfer.tx_buf = &tx_buf;
68 xfer.rx_buf = &rx_buf;
69 xfer.len = sizeof(unsigned long);
70 xfer.bits_per_word = 24;
71
72 spi_message_init(&msg);
73 spi_message_add_tail(&xfer, &msg);
74
75 if (spi == NULL)
76 return 0;
77
78 ret = spi_sync(spi, &msg);
79 if (ret == 0)
80 *data = (u8) (rx_buf & 0xFF);
81 return ret;
82}
83
84static int __devinit tps65912_spi_probe(struct spi_device *spi)
85{
86 struct tps65912 *tps65912;
87
88 tps65912 = kzalloc(sizeof(struct tps65912), GFP_KERNEL);
89 if (tps65912 == NULL)
90 return -ENOMEM;
91
92 tps65912->dev = &spi->dev;
93 tps65912->control_data = spi;
94 tps65912->read = tps65912_spi_read;
95 tps65912->write = tps65912_spi_write;
96
97 spi_set_drvdata(spi, tps65912);
98
99 return tps65912_device_init(tps65912);
100}
101
102static int __devexit tps65912_spi_remove(struct spi_device *spi)
103{
104 struct tps65912 *tps65912 = spi_get_drvdata(spi);
105
106 tps65912_device_exit(tps65912);
107
108 return 0;
109}
110
111static struct spi_driver tps65912_spi_driver = {
112 .driver = {
113 .name = "tps65912",
114 .bus = &spi_bus_type,
115 .owner = THIS_MODULE,
116 },
117 .probe = tps65912_spi_probe,
118 .remove = __devexit_p(tps65912_spi_remove),
119};
120
121static int __init tps65912_spi_init(void)
122{
123 int ret;
124
125 ret = spi_register_driver(&tps65912_spi_driver);
126 if (ret != 0)
127 pr_err("Failed to register TPS65912 SPI driver: %d\n", ret);
128
129 return 0;
130}
131/* init early so consumer devices can complete system boot */
132subsys_initcall(tps65912_spi_init);
133
134static void __exit tps65912_spi_exit(void)
135{
136 spi_unregister_driver(&tps65912_spi_driver);
137}
138module_exit(tps65912_spi_exit);
139
140MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
141MODULE_DESCRIPTION("SPI support for TPS65912 chip family mfd");
142MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index a2eddc70995c..01ecfeee6524 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1283,6 +1283,8 @@ static const struct i2c_device_id twl_ids[] = {
1283 { "tps65950", 0 }, /* catalog version of twl5030 */ 1283 { "tps65950", 0 }, /* catalog version of twl5030 */
1284 { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */ 1284 { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */
1285 { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */ 1285 { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */
1286 { "tps65921", TPS_SUBSET }, /* fewer LDOs; no codec, no LED
1287 and vibrator. Charger in USB module*/
1286 { "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */ 1288 { "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */
1287 { "twl6025", TWL6030_CLASS | TWL6025_SUBCLASS }, /* "Phoenix lite" */ 1289 { "twl6025", TWL6030_CLASS | TWL6025_SUBCLASS }, /* "Phoenix lite" */
1288 { /* end of list */ }, 1290 { /* end of list */ },
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index 3941ddcf15fe..b5d598c3aa71 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -530,13 +530,13 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
530 if (ret) { 530 if (ret) {
531 dev_err(twl4030_madc->dev, 531 dev_err(twl4030_madc->dev,
532 "unable to write sel register 0x%X\n", method->sel + 1); 532 "unable to write sel register 0x%X\n", method->sel + 1);
533 return ret; 533 goto out;
534 } 534 }
535 ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel); 535 ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel);
536 if (ret) { 536 if (ret) {
537 dev_err(twl4030_madc->dev, 537 dev_err(twl4030_madc->dev,
538 "unable to write sel register 0x%X\n", method->sel + 1); 538 "unable to write sel register 0x%X\n", method->sel + 1);
539 return ret; 539 goto out;
540 } 540 }
541 /* Select averaging for all channels if do_avg is set */ 541 /* Select averaging for all channels if do_avg is set */
542 if (req->do_avg) { 542 if (req->do_avg) {
@@ -546,7 +546,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
546 dev_err(twl4030_madc->dev, 546 dev_err(twl4030_madc->dev,
547 "unable to write avg register 0x%X\n", 547 "unable to write avg register 0x%X\n",
548 method->avg + 1); 548 method->avg + 1);
549 return ret; 549 goto out;
550 } 550 }
551 ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, 551 ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
552 ch_lsb, method->avg); 552 ch_lsb, method->avg);
@@ -554,7 +554,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
554 dev_err(twl4030_madc->dev, 554 dev_err(twl4030_madc->dev,
555 "unable to write sel reg 0x%X\n", 555 "unable to write sel reg 0x%X\n",
556 method->sel + 1); 556 method->sel + 1);
557 return ret; 557 goto out;
558 } 558 }
559 } 559 }
560 if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) { 560 if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
diff --git a/drivers/mfd/twl6030-pwm.c b/drivers/mfd/twl6030-pwm.c
index 5d25bdc78424..e8fee147678d 100644
--- a/drivers/mfd/twl6030-pwm.c
+++ b/drivers/mfd/twl6030-pwm.c
@@ -161,3 +161,5 @@ void pwm_free(struct pwm_device *pwm)
161 kfree(pwm); 161 kfree(pwm);
162} 162}
163EXPORT_SYMBOL(pwm_free); 163EXPORT_SYMBOL(pwm_free);
164
165MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
new file mode 100644
index 000000000000..87210954a066
--- /dev/null
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -0,0 +1,299 @@
1/*
2 * wm831x-auxadc.c -- AUXADC for Wolfson WM831x PMICs
3 *
4 * Copyright 2009-2011 Wolfson Microelectronics PLC.
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/delay.h>
18#include <linux/mfd/core.h>
19#include <linux/slab.h>
20#include <linux/list.h>
21
22#include <linux/mfd/wm831x/core.h>
23#include <linux/mfd/wm831x/pdata.h>
24#include <linux/mfd/wm831x/irq.h>
25#include <linux/mfd/wm831x/auxadc.h>
26#include <linux/mfd/wm831x/otp.h>
27#include <linux/mfd/wm831x/regulator.h>
28
29struct wm831x_auxadc_req {
30 struct list_head list;
31 enum wm831x_auxadc input;
32 int val;
33 struct completion done;
34};
35
36static int wm831x_auxadc_read_irq(struct wm831x *wm831x,
37 enum wm831x_auxadc input)
38{
39 struct wm831x_auxadc_req *req;
40 int ret;
41 bool ena = false;
42
43 req = kzalloc(sizeof(*req), GFP_KERNEL);
44 if (!req)
45 return -ENOMEM;
46
47 init_completion(&req->done);
48 req->input = input;
49 req->val = -ETIMEDOUT;
50
51 mutex_lock(&wm831x->auxadc_lock);
52
53 /* Enqueue the request */
54 list_add(&req->list, &wm831x->auxadc_pending);
55
56 ena = !wm831x->auxadc_active;
57
58 if (ena) {
59 ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
60 WM831X_AUX_ENA, WM831X_AUX_ENA);
61 if (ret != 0) {
62 dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n",
63 ret);
64 goto out;
65 }
66 }
67
68 /* Enable the conversion if not already running */
69 if (!(wm831x->auxadc_active & (1 << input))) {
70 ret = wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE,
71 1 << input, 1 << input);
72 if (ret != 0) {
73 dev_err(wm831x->dev,
74 "Failed to set AUXADC source: %d\n", ret);
75 goto out;
76 }
77
78 wm831x->auxadc_active |= 1 << input;
79 }
80
81 /* We convert at the fastest rate possible */
82 if (ena) {
83 ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
84 WM831X_AUX_CVT_ENA |
85 WM831X_AUX_RATE_MASK,
86 WM831X_AUX_CVT_ENA |
87 WM831X_AUX_RATE_MASK);
88 if (ret != 0) {
89 dev_err(wm831x->dev, "Failed to start AUXADC: %d\n",
90 ret);
91 goto out;
92 }
93 }
94
95 mutex_unlock(&wm831x->auxadc_lock);
96
97 /* Wait for an interrupt */
98 wait_for_completion_timeout(&req->done, msecs_to_jiffies(500));
99
100 mutex_lock(&wm831x->auxadc_lock);
101
102 list_del(&req->list);
103 ret = req->val;
104
105out:
106 mutex_unlock(&wm831x->auxadc_lock);
107
108 kfree(req);
109
110 return ret;
111}
112
113static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data)
114{
115 struct wm831x *wm831x = irq_data;
116 struct wm831x_auxadc_req *req;
117 int ret, input, val;
118
119 ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
120 if (ret < 0) {
121 dev_err(wm831x->dev,
122 "Failed to read AUXADC data: %d\n", ret);
123 return IRQ_NONE;
124 }
125
126 input = ((ret & WM831X_AUX_DATA_SRC_MASK)
127 >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
128
129 if (input == 14)
130 input = WM831X_AUX_CAL;
131
132 val = ret & WM831X_AUX_DATA_MASK;
133
134 mutex_lock(&wm831x->auxadc_lock);
135
136 /* Disable this conversion, we're about to complete all users */
137 wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE,
138 1 << input, 0);
139 wm831x->auxadc_active &= ~(1 << input);
140
141 /* Turn off the entire convertor if idle */
142 if (!wm831x->auxadc_active)
143 wm831x_reg_write(wm831x, WM831X_AUXADC_CONTROL, 0);
144
145 /* Wake up any threads waiting for this request */
146 list_for_each_entry(req, &wm831x->auxadc_pending, list) {
147 if (req->input == input) {
148 req->val = val;
149 complete(&req->done);
150 }
151 }
152
153 mutex_unlock(&wm831x->auxadc_lock);
154
155 return IRQ_HANDLED;
156}
157
158static int wm831x_auxadc_read_polled(struct wm831x *wm831x,
159 enum wm831x_auxadc input)
160{
161 int ret, src, timeout;
162
163 mutex_lock(&wm831x->auxadc_lock);
164
165 ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
166 WM831X_AUX_ENA, WM831X_AUX_ENA);
167 if (ret < 0) {
168 dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret);
169 goto out;
170 }
171
172 /* We force a single source at present */
173 src = input;
174 ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE,
175 1 << src);
176 if (ret < 0) {
177 dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret);
178 goto out;
179 }
180
181 ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
182 WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
183 if (ret < 0) {
184 dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret);
185 goto disable;
186 }
187
188 /* If we're not using interrupts then poll the
189 * interrupt status register */
190 timeout = 5;
191 while (timeout) {
192 msleep(1);
193
194 ret = wm831x_reg_read(wm831x,
195 WM831X_INTERRUPT_STATUS_1);
196 if (ret < 0) {
197 dev_err(wm831x->dev,
198 "ISR 1 read failed: %d\n", ret);
199 goto disable;
200 }
201
202 /* Did it complete? */
203 if (ret & WM831X_AUXADC_DATA_EINT) {
204 wm831x_reg_write(wm831x,
205 WM831X_INTERRUPT_STATUS_1,
206 WM831X_AUXADC_DATA_EINT);
207 break;
208 } else {
209 dev_err(wm831x->dev,
210 "AUXADC conversion timeout\n");
211 ret = -EBUSY;
212 goto disable;
213 }
214 }
215
216 ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
217 if (ret < 0) {
218 dev_err(wm831x->dev,
219 "Failed to read AUXADC data: %d\n", ret);
220 goto disable;
221 }
222
223 src = ((ret & WM831X_AUX_DATA_SRC_MASK)
224 >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
225
226 if (src == 14)
227 src = WM831X_AUX_CAL;
228
229 if (src != input) {
230 dev_err(wm831x->dev, "Data from source %d not %d\n",
231 src, input);
232 ret = -EINVAL;
233 } else {
234 ret &= WM831X_AUX_DATA_MASK;
235 }
236
237disable:
238 wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0);
239out:
240 mutex_unlock(&wm831x->auxadc_lock);
241 return ret;
242}
243
244/**
245 * wm831x_auxadc_read: Read a value from the WM831x AUXADC
246 *
247 * @wm831x: Device to read from.
248 * @input: AUXADC input to read.
249 */
250int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
251{
252 return wm831x->auxadc_read(wm831x, input);
253}
254EXPORT_SYMBOL_GPL(wm831x_auxadc_read);
255
256/**
257 * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC
258 *
259 * @wm831x: Device to read from.
260 * @input: AUXADC input to read.
261 */
262int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input)
263{
264 int ret;
265
266 ret = wm831x_auxadc_read(wm831x, input);
267 if (ret < 0)
268 return ret;
269
270 ret *= 1465;
271
272 return ret;
273}
274EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv);
275
276void wm831x_auxadc_init(struct wm831x *wm831x)
277{
278 int ret;
279
280 mutex_init(&wm831x->auxadc_lock);
281 INIT_LIST_HEAD(&wm831x->auxadc_pending);
282
283 if (wm831x->irq && wm831x->irq_base) {
284 wm831x->auxadc_read = wm831x_auxadc_read_irq;
285
286 ret = request_threaded_irq(wm831x->irq_base +
287 WM831X_IRQ_AUXADC_DATA,
288 NULL, wm831x_auxadc_irq, 0,
289 "auxadc", wm831x);
290 if (ret < 0) {
291 dev_err(wm831x->dev, "AUXADC IRQ request failed: %d\n",
292 ret);
293 wm831x->auxadc_read = NULL;
294 }
295 }
296
297 if (!wm831x->auxadc_read)
298 wm831x->auxadc_read = wm831x_auxadc_read_polled;
299}
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 265f75fc6a25..282e76ab678f 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -295,7 +295,7 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
295 goto out; 295 goto out;
296 296
297 r &= ~mask; 297 r &= ~mask;
298 r |= val; 298 r |= val & mask;
299 299
300 ret = wm831x_write(wm831x, reg, 2, &r); 300 ret = wm831x_write(wm831x, reg, 2, &r);
301 301
@@ -306,146 +306,6 @@ out:
306} 306}
307EXPORT_SYMBOL_GPL(wm831x_set_bits); 307EXPORT_SYMBOL_GPL(wm831x_set_bits);
308 308
309/**
310 * wm831x_auxadc_read: Read a value from the WM831x AUXADC
311 *
312 * @wm831x: Device to read from.
313 * @input: AUXADC input to read.
314 */
315int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
316{
317 int ret, src, irq_masked, timeout;
318
319 /* Are we using the interrupt? */
320 irq_masked = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1_MASK);
321 irq_masked &= WM831X_AUXADC_DATA_EINT;
322
323 mutex_lock(&wm831x->auxadc_lock);
324
325 ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
326 WM831X_AUX_ENA, WM831X_AUX_ENA);
327 if (ret < 0) {
328 dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret);
329 goto out;
330 }
331
332 /* We force a single source at present */
333 src = input;
334 ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE,
335 1 << src);
336 if (ret < 0) {
337 dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret);
338 goto out;
339 }
340
341 /* Clear any notification from a very late arriving interrupt */
342 try_wait_for_completion(&wm831x->auxadc_done);
343
344 ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
345 WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
346 if (ret < 0) {
347 dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret);
348 goto disable;
349 }
350
351 if (irq_masked) {
352 /* If we're not using interrupts then poll the
353 * interrupt status register */
354 timeout = 5;
355 while (timeout) {
356 msleep(1);
357
358 ret = wm831x_reg_read(wm831x,
359 WM831X_INTERRUPT_STATUS_1);
360 if (ret < 0) {
361 dev_err(wm831x->dev,
362 "ISR 1 read failed: %d\n", ret);
363 goto disable;
364 }
365
366 /* Did it complete? */
367 if (ret & WM831X_AUXADC_DATA_EINT) {
368 wm831x_reg_write(wm831x,
369 WM831X_INTERRUPT_STATUS_1,
370 WM831X_AUXADC_DATA_EINT);
371 break;
372 } else {
373 dev_err(wm831x->dev,
374 "AUXADC conversion timeout\n");
375 ret = -EBUSY;
376 goto disable;
377 }
378 }
379 } else {
380 /* If we are using interrupts then wait for the
381 * interrupt to complete. Use an extremely long
382 * timeout to handle situations with heavy load where
383 * the notification of the interrupt may be delayed by
384 * threaded IRQ handling. */
385 if (!wait_for_completion_timeout(&wm831x->auxadc_done,
386 msecs_to_jiffies(500))) {
387 dev_err(wm831x->dev, "Timed out waiting for AUXADC\n");
388 ret = -EBUSY;
389 goto disable;
390 }
391 }
392
393 ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
394 if (ret < 0) {
395 dev_err(wm831x->dev, "Failed to read AUXADC data: %d\n", ret);
396 } else {
397 src = ((ret & WM831X_AUX_DATA_SRC_MASK)
398 >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
399
400 if (src == 14)
401 src = WM831X_AUX_CAL;
402
403 if (src != input) {
404 dev_err(wm831x->dev, "Data from source %d not %d\n",
405 src, input);
406 ret = -EINVAL;
407 } else {
408 ret &= WM831X_AUX_DATA_MASK;
409 }
410 }
411
412disable:
413 wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0);
414out:
415 mutex_unlock(&wm831x->auxadc_lock);
416 return ret;
417}
418EXPORT_SYMBOL_GPL(wm831x_auxadc_read);
419
420static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data)
421{
422 struct wm831x *wm831x = irq_data;
423
424 complete(&wm831x->auxadc_done);
425
426 return IRQ_HANDLED;
427}
428
429/**
430 * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC
431 *
432 * @wm831x: Device to read from.
433 * @input: AUXADC input to read.
434 */
435int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input)
436{
437 int ret;
438
439 ret = wm831x_auxadc_read(wm831x, input);
440 if (ret < 0)
441 return ret;
442
443 ret *= 1465;
444
445 return ret;
446}
447EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv);
448
449static struct resource wm831x_dcdc1_resources[] = { 309static struct resource wm831x_dcdc1_resources[] = {
450 { 310 {
451 .start = WM831X_DC1_CONTROL_1, 311 .start = WM831X_DC1_CONTROL_1,
@@ -872,6 +732,9 @@ static struct mfd_cell wm8310_devs[] = {
872 .resources = wm831x_dcdc4_resources, 732 .resources = wm831x_dcdc4_resources,
873 }, 733 },
874 { 734 {
735 .name = "wm831x-clk",
736 },
737 {
875 .name = "wm831x-epe", 738 .name = "wm831x-epe",
876 .id = 1, 739 .id = 1,
877 }, 740 },
@@ -976,11 +839,6 @@ static struct mfd_cell wm8310_devs[] = {
976 .resources = wm831x_power_resources, 839 .resources = wm831x_power_resources,
977 }, 840 },
978 { 841 {
979 .name = "wm831x-rtc",
980 .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
981 .resources = wm831x_rtc_resources,
982 },
983 {
984 .name = "wm831x-status", 842 .name = "wm831x-status",
985 .id = 1, 843 .id = 1,
986 .num_resources = ARRAY_SIZE(wm831x_status1_resources), 844 .num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1028,6 +886,9 @@ static struct mfd_cell wm8311_devs[] = {
1028 .resources = wm831x_dcdc4_resources, 886 .resources = wm831x_dcdc4_resources,
1029 }, 887 },
1030 { 888 {
889 .name = "wm831x-clk",
890 },
891 {
1031 .name = "wm831x-epe", 892 .name = "wm831x-epe",
1032 .id = 1, 893 .id = 1,
1033 }, 894 },
@@ -1108,11 +969,6 @@ static struct mfd_cell wm8311_devs[] = {
1108 .resources = wm831x_power_resources, 969 .resources = wm831x_power_resources,
1109 }, 970 },
1110 { 971 {
1111 .name = "wm831x-rtc",
1112 .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
1113 .resources = wm831x_rtc_resources,
1114 },
1115 {
1116 .name = "wm831x-status", 972 .name = "wm831x-status",
1117 .id = 1, 973 .id = 1,
1118 .num_resources = ARRAY_SIZE(wm831x_status1_resources), 974 .num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1125,11 +981,6 @@ static struct mfd_cell wm8311_devs[] = {
1125 .resources = wm831x_status2_resources, 981 .resources = wm831x_status2_resources,
1126 }, 982 },
1127 { 983 {
1128 .name = "wm831x-touch",
1129 .num_resources = ARRAY_SIZE(wm831x_touch_resources),
1130 .resources = wm831x_touch_resources,
1131 },
1132 {
1133 .name = "wm831x-watchdog", 984 .name = "wm831x-watchdog",
1134 .num_resources = ARRAY_SIZE(wm831x_wdt_resources), 985 .num_resources = ARRAY_SIZE(wm831x_wdt_resources),
1135 .resources = wm831x_wdt_resources, 986 .resources = wm831x_wdt_resources,
@@ -1165,6 +1016,9 @@ static struct mfd_cell wm8312_devs[] = {
1165 .resources = wm831x_dcdc4_resources, 1016 .resources = wm831x_dcdc4_resources,
1166 }, 1017 },
1167 { 1018 {
1019 .name = "wm831x-clk",
1020 },
1021 {
1168 .name = "wm831x-epe", 1022 .name = "wm831x-epe",
1169 .id = 1, 1023 .id = 1,
1170 }, 1024 },
@@ -1269,11 +1123,6 @@ static struct mfd_cell wm8312_devs[] = {
1269 .resources = wm831x_power_resources, 1123 .resources = wm831x_power_resources,
1270 }, 1124 },
1271 { 1125 {
1272 .name = "wm831x-rtc",
1273 .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
1274 .resources = wm831x_rtc_resources,
1275 },
1276 {
1277 .name = "wm831x-status", 1126 .name = "wm831x-status",
1278 .id = 1, 1127 .id = 1,
1279 .num_resources = ARRAY_SIZE(wm831x_status1_resources), 1128 .num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1286,11 +1135,6 @@ static struct mfd_cell wm8312_devs[] = {
1286 .resources = wm831x_status2_resources, 1135 .resources = wm831x_status2_resources,
1287 }, 1136 },
1288 { 1137 {
1289 .name = "wm831x-touch",
1290 .num_resources = ARRAY_SIZE(wm831x_touch_resources),
1291 .resources = wm831x_touch_resources,
1292 },
1293 {
1294 .name = "wm831x-watchdog", 1138 .name = "wm831x-watchdog",
1295 .num_resources = ARRAY_SIZE(wm831x_wdt_resources), 1139 .num_resources = ARRAY_SIZE(wm831x_wdt_resources),
1296 .resources = wm831x_wdt_resources, 1140 .resources = wm831x_wdt_resources,
@@ -1326,6 +1170,9 @@ static struct mfd_cell wm8320_devs[] = {
1326 .resources = wm8320_dcdc4_buck_resources, 1170 .resources = wm8320_dcdc4_buck_resources,
1327 }, 1171 },
1328 { 1172 {
1173 .name = "wm831x-clk",
1174 },
1175 {
1329 .name = "wm831x-gpio", 1176 .name = "wm831x-gpio",
1330 .num_resources = ARRAY_SIZE(wm831x_gpio_resources), 1177 .num_resources = ARRAY_SIZE(wm831x_gpio_resources),
1331 .resources = wm831x_gpio_resources, 1178 .resources = wm831x_gpio_resources,
@@ -1405,11 +1252,6 @@ static struct mfd_cell wm8320_devs[] = {
1405 .resources = wm831x_on_resources, 1252 .resources = wm831x_on_resources,
1406 }, 1253 },
1407 { 1254 {
1408 .name = "wm831x-rtc",
1409 .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
1410 .resources = wm831x_rtc_resources,
1411 },
1412 {
1413 .name = "wm831x-status", 1255 .name = "wm831x-status",
1414 .id = 1, 1256 .id = 1,
1415 .num_resources = ARRAY_SIZE(wm831x_status1_resources), 1257 .num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1428,6 +1270,22 @@ static struct mfd_cell wm8320_devs[] = {
1428 }, 1270 },
1429}; 1271};
1430 1272
1273static struct mfd_cell touch_devs[] = {
1274 {
1275 .name = "wm831x-touch",
1276 .num_resources = ARRAY_SIZE(wm831x_touch_resources),
1277 .resources = wm831x_touch_resources,
1278 },
1279};
1280
1281static struct mfd_cell rtc_devs[] = {
1282 {
1283 .name = "wm831x-rtc",
1284 .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
1285 .resources = wm831x_rtc_resources,
1286 },
1287};
1288
1431static struct mfd_cell backlight_devs[] = { 1289static struct mfd_cell backlight_devs[] = {
1432 { 1290 {
1433 .name = "wm831x-backlight", 1291 .name = "wm831x-backlight",
@@ -1440,14 +1298,12 @@ static struct mfd_cell backlight_devs[] = {
1440int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) 1298int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1441{ 1299{
1442 struct wm831x_pdata *pdata = wm831x->dev->platform_data; 1300 struct wm831x_pdata *pdata = wm831x->dev->platform_data;
1443 int rev; 1301 int rev, wm831x_num;
1444 enum wm831x_parent parent; 1302 enum wm831x_parent parent;
1445 int ret, i; 1303 int ret, i;
1446 1304
1447 mutex_init(&wm831x->io_lock); 1305 mutex_init(&wm831x->io_lock);
1448 mutex_init(&wm831x->key_lock); 1306 mutex_init(&wm831x->key_lock);
1449 mutex_init(&wm831x->auxadc_lock);
1450 init_completion(&wm831x->auxadc_done);
1451 dev_set_drvdata(wm831x->dev, wm831x); 1307 dev_set_drvdata(wm831x->dev, wm831x);
1452 1308
1453 ret = wm831x_reg_read(wm831x, WM831X_PARENT_ID); 1309 ret = wm831x_reg_read(wm831x, WM831X_PARENT_ID);
@@ -1592,45 +1448,51 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1592 } 1448 }
1593 } 1449 }
1594 1450
1451 /* Multiply by 10 as we have many subdevices of the same type */
1452 if (pdata && pdata->wm831x_num)
1453 wm831x_num = pdata->wm831x_num * 10;
1454 else
1455 wm831x_num = -1;
1456
1595 ret = wm831x_irq_init(wm831x, irq); 1457 ret = wm831x_irq_init(wm831x, irq);
1596 if (ret != 0) 1458 if (ret != 0)
1597 goto err; 1459 goto err;
1598 1460
1599 if (wm831x->irq_base) { 1461 wm831x_auxadc_init(wm831x);
1600 ret = request_threaded_irq(wm831x->irq_base +
1601 WM831X_IRQ_AUXADC_DATA,
1602 NULL, wm831x_auxadc_irq, 0,
1603 "auxadc", wm831x);
1604 if (ret < 0)
1605 dev_err(wm831x->dev, "AUXADC IRQ request failed: %d\n",
1606 ret);
1607 }
1608 1462
1609 /* The core device is up, instantiate the subdevices. */ 1463 /* The core device is up, instantiate the subdevices. */
1610 switch (parent) { 1464 switch (parent) {
1611 case WM8310: 1465 case WM8310:
1612 ret = mfd_add_devices(wm831x->dev, -1, 1466 ret = mfd_add_devices(wm831x->dev, wm831x_num,
1613 wm8310_devs, ARRAY_SIZE(wm8310_devs), 1467 wm8310_devs, ARRAY_SIZE(wm8310_devs),
1614 NULL, wm831x->irq_base); 1468 NULL, wm831x->irq_base);
1615 break; 1469 break;
1616 1470
1617 case WM8311: 1471 case WM8311:
1618 ret = mfd_add_devices(wm831x->dev, -1, 1472 ret = mfd_add_devices(wm831x->dev, wm831x_num,
1619 wm8311_devs, ARRAY_SIZE(wm8311_devs), 1473 wm8311_devs, ARRAY_SIZE(wm8311_devs),
1620 NULL, wm831x->irq_base); 1474 NULL, wm831x->irq_base);
1475 if (!pdata || !pdata->disable_touch)
1476 mfd_add_devices(wm831x->dev, wm831x_num,
1477 touch_devs, ARRAY_SIZE(touch_devs),
1478 NULL, wm831x->irq_base);
1621 break; 1479 break;
1622 1480
1623 case WM8312: 1481 case WM8312:
1624 ret = mfd_add_devices(wm831x->dev, -1, 1482 ret = mfd_add_devices(wm831x->dev, wm831x_num,
1625 wm8312_devs, ARRAY_SIZE(wm8312_devs), 1483 wm8312_devs, ARRAY_SIZE(wm8312_devs),
1626 NULL, wm831x->irq_base); 1484 NULL, wm831x->irq_base);
1485 if (!pdata || !pdata->disable_touch)
1486 mfd_add_devices(wm831x->dev, wm831x_num,
1487 touch_devs, ARRAY_SIZE(touch_devs),
1488 NULL, wm831x->irq_base);
1627 break; 1489 break;
1628 1490
1629 case WM8320: 1491 case WM8320:
1630 case WM8321: 1492 case WM8321:
1631 case WM8325: 1493 case WM8325:
1632 case WM8326: 1494 case WM8326:
1633 ret = mfd_add_devices(wm831x->dev, -1, 1495 ret = mfd_add_devices(wm831x->dev, wm831x_num,
1634 wm8320_devs, ARRAY_SIZE(wm8320_devs), 1496 wm8320_devs, ARRAY_SIZE(wm8320_devs),
1635 NULL, wm831x->irq_base); 1497 NULL, wm831x->irq_base);
1636 break; 1498 break;
@@ -1645,9 +1507,30 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1645 goto err_irq; 1507 goto err_irq;
1646 } 1508 }
1647 1509
1510 /* The RTC can only be used if the 32.768kHz crystal is
1511 * enabled; this can't be controlled by software at runtime.
1512 */
1513 ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
1514 if (ret < 0) {
1515 dev_err(wm831x->dev, "Failed to read clock status: %d\n", ret);
1516 goto err_irq;
1517 }
1518
1519 if (ret & WM831X_XTAL_ENA) {
1520 ret = mfd_add_devices(wm831x->dev, wm831x_num,
1521 rtc_devs, ARRAY_SIZE(rtc_devs),
1522 NULL, wm831x->irq_base);
1523 if (ret != 0) {
1524 dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret);
1525 goto err_irq;
1526 }
1527 } else {
1528 dev_info(wm831x->dev, "32.768kHz clock disabled, no RTC\n");
1529 }
1530
1648 if (pdata && pdata->backlight) { 1531 if (pdata && pdata->backlight) {
1649 /* Treat errors as non-critical */ 1532 /* Treat errors as non-critical */
1650 ret = mfd_add_devices(wm831x->dev, -1, backlight_devs, 1533 ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
1651 ARRAY_SIZE(backlight_devs), NULL, 1534 ARRAY_SIZE(backlight_devs), NULL,
1652 wm831x->irq_base); 1535 wm831x->irq_base);
1653 if (ret < 0) 1536 if (ret < 0)
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 42b928ec891e..ada1835a5455 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -348,6 +348,15 @@ static void wm831x_irq_sync_unlock(struct irq_data *data)
348 struct wm831x *wm831x = irq_data_get_irq_chip_data(data); 348 struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
349 int i; 349 int i;
350 350
351 for (i = 0; i < ARRAY_SIZE(wm831x->gpio_update); i++) {
352 if (wm831x->gpio_update[i]) {
353 wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + i,
354 WM831X_GPN_INT_MODE | WM831X_GPN_POL,
355 wm831x->gpio_update[i]);
356 wm831x->gpio_update[i] = 0;
357 }
358 }
359
351 for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) { 360 for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
352 /* If there's been a change in the mask write it back 361 /* If there's been a change in the mask write it back
353 * to the hardware. */ 362 * to the hardware. */
@@ -387,7 +396,7 @@ static void wm831x_irq_disable(struct irq_data *data)
387static int wm831x_irq_set_type(struct irq_data *data, unsigned int type) 396static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
388{ 397{
389 struct wm831x *wm831x = irq_data_get_irq_chip_data(data); 398 struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
390 int val, irq; 399 int irq;
391 400
392 irq = data->irq - wm831x->irq_base; 401 irq = data->irq - wm831x->irq_base;
393 402
@@ -399,22 +408,30 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
399 return -EINVAL; 408 return -EINVAL;
400 } 409 }
401 410
411 /* Rebase the IRQ into the GPIO range so we've got a sensible array
412 * index.
413 */
414 irq -= WM831X_IRQ_GPIO_1;
415
416 /* We set the high bit to flag that we need an update; don't
417 * do the update here as we can be called with the bus lock
418 * held.
419 */
402 switch (type) { 420 switch (type) {
403 case IRQ_TYPE_EDGE_BOTH: 421 case IRQ_TYPE_EDGE_BOTH:
404 val = WM831X_GPN_INT_MODE; 422 wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE;
405 break; 423 break;
406 case IRQ_TYPE_EDGE_RISING: 424 case IRQ_TYPE_EDGE_RISING:
407 val = WM831X_GPN_POL; 425 wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
408 break; 426 break;
409 case IRQ_TYPE_EDGE_FALLING: 427 case IRQ_TYPE_EDGE_FALLING:
410 val = 0; 428 wm831x->gpio_update[irq] = 0x10000;
411 break; 429 break;
412 default: 430 default:
413 return -EINVAL; 431 return -EINVAL;
414 } 432 }
415 433
416 return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + irq, 434 return 0;
417 WM831X_GPN_INT_MODE | WM831X_GPN_POL, val);
418} 435}
419 436
420static struct irq_chip wm831x_irq_chip = { 437static struct irq_chip wm831x_irq_chip = {
@@ -432,7 +449,7 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
432{ 449{
433 struct wm831x *wm831x = data; 450 struct wm831x *wm831x = data;
434 unsigned int i; 451 unsigned int i;
435 int primary; 452 int primary, status_addr;
436 int status_regs[WM831X_NUM_IRQ_REGS] = { 0 }; 453 int status_regs[WM831X_NUM_IRQ_REGS] = { 0 };
437 int read[WM831X_NUM_IRQ_REGS] = { 0 }; 454 int read[WM831X_NUM_IRQ_REGS] = { 0 };
438 int *status; 455 int *status;
@@ -467,8 +484,9 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
467 /* Hopefully there should only be one register to read 484 /* Hopefully there should only be one register to read
468 * each time otherwise we ought to do a block read. */ 485 * each time otherwise we ought to do a block read. */
469 if (!read[offset]) { 486 if (!read[offset]) {
470 *status = wm831x_reg_read(wm831x, 487 status_addr = irq_data_to_status_reg(&wm831x_irqs[i]);
471 irq_data_to_status_reg(&wm831x_irqs[i])); 488
489 *status = wm831x_reg_read(wm831x, status_addr);
472 if (*status < 0) { 490 if (*status < 0) {
473 dev_err(wm831x->dev, 491 dev_err(wm831x->dev,
474 "Failed to read IRQ status: %d\n", 492 "Failed to read IRQ status: %d\n",
@@ -477,26 +495,21 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
477 } 495 }
478 496
479 read[offset] = 1; 497 read[offset] = 1;
498
499 /* Ignore any bits that we don't think are masked */
500 *status &= ~wm831x->irq_masks_cur[offset];
501
502 /* Acknowledge now so we don't miss
503 * notifications while we handle.
504 */
505 wm831x_reg_write(wm831x, status_addr, *status);
480 } 506 }
481 507
482 /* Report it if it isn't masked, or forget the status. */ 508 if (*status & wm831x_irqs[i].mask)
483 if ((*status & ~wm831x->irq_masks_cur[offset])
484 & wm831x_irqs[i].mask)
485 handle_nested_irq(wm831x->irq_base + i); 509 handle_nested_irq(wm831x->irq_base + i);
486 else
487 *status &= ~wm831x_irqs[i].mask;
488 } 510 }
489 511
490out: 512out:
491 /* Touchscreen interrupts are handled specially in the driver */
492 status_regs[0] &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
493
494 for (i = 0; i < ARRAY_SIZE(status_regs); i++) {
495 if (status_regs[i])
496 wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1 + i,
497 status_regs[i]);
498 }
499
500 return IRQ_HANDLED; 513 return IRQ_HANDLED;
501} 514}
502 515
@@ -515,13 +528,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
515 0xffff); 528 0xffff);
516 } 529 }
517 530
518 if (!pdata || !pdata->irq_base) { 531 /* Try to dynamically allocate IRQs if no base is specified */
519 dev_err(wm831x->dev, 532 if (!pdata || !pdata->irq_base)
520 "No interrupt base specified, no interrupts\n"); 533 wm831x->irq_base = -1;
534 else
535 wm831x->irq_base = pdata->irq_base;
536
537 wm831x->irq_base = irq_alloc_descs(wm831x->irq_base, 0,
538 WM831X_NUM_IRQS, 0);
539 if (wm831x->irq_base < 0) {
540 dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
541 wm831x->irq_base);
542 wm831x->irq_base = 0;
521 return 0; 543 return 0;
522 } 544 }
523 545
524 if (pdata->irq_cmos) 546 if (pdata && pdata->irq_cmos)
525 i = 0; 547 i = 0;
526 else 548 else
527 i = WM831X_IRQ_OD; 549 i = WM831X_IRQ_OD;
@@ -541,7 +563,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
541 } 563 }
542 564
543 wm831x->irq = irq; 565 wm831x->irq = irq;
544 wm831x->irq_base = pdata->irq_base;
545 566
546 /* Register them with genirq */ 567 /* Register them with genirq */
547 for (cur_irq = wm831x->irq_base; 568 for (cur_irq = wm831x->irq_base;
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index ed4b22a167b3..8a1fafd0bf7d 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -473,17 +473,13 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
473{ 473{
474 int ret, cur_irq, i; 474 int ret, cur_irq, i;
475 int flags = IRQF_ONESHOT; 475 int flags = IRQF_ONESHOT;
476 int irq_base = -1;
476 477
477 if (!irq) { 478 if (!irq) {
478 dev_warn(wm8350->dev, "No interrupt support, no core IRQ\n"); 479 dev_warn(wm8350->dev, "No interrupt support, no core IRQ\n");
479 return 0; 480 return 0;
480 } 481 }
481 482
482 if (!pdata || !pdata->irq_base) {
483 dev_warn(wm8350->dev, "No interrupt support, no IRQ base\n");
484 return 0;
485 }
486
487 /* Mask top level interrupts */ 483 /* Mask top level interrupts */
488 wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF); 484 wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF);
489 485
@@ -502,7 +498,17 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
502 wm8350->chip_irq = irq; 498 wm8350->chip_irq = irq;
503 wm8350->irq_base = pdata->irq_base; 499 wm8350->irq_base = pdata->irq_base;
504 500
505 if (pdata->irq_high) { 501 if (pdata && pdata->irq_base > 0)
502 irq_base = pdata->irq_base;
503
504 wm8350->irq_base = irq_alloc_descs(irq_base, 0, ARRAY_SIZE(wm8350_irqs), 0);
505 if (wm8350->irq_base < 0) {
506 dev_warn(wm8350->dev, "Allocating irqs failed with %d\n",
507 wm8350->irq_base);
508 return 0;
509 }
510
511 if (pdata && pdata->irq_high) {
506 flags |= IRQF_TRIGGER_HIGH; 512 flags |= IRQF_TRIGGER_HIGH;
507 513
508 wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1, 514 wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index e198d40292e7..96479c9b1728 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -316,7 +316,7 @@ static int wm8994_suspend(struct device *dev)
316static int wm8994_resume(struct device *dev) 316static int wm8994_resume(struct device *dev)
317{ 317{
318 struct wm8994 *wm8994 = dev_get_drvdata(dev); 318 struct wm8994 *wm8994 = dev_get_drvdata(dev);
319 int ret; 319 int ret, i;
320 320
321 /* We may have lied to the PM core about suspending */ 321 /* We may have lied to the PM core about suspending */
322 if (!wm8994->suspended) 322 if (!wm8994->suspended)
@@ -329,10 +329,16 @@ static int wm8994_resume(struct device *dev)
329 return ret; 329 return ret;
330 } 330 }
331 331
332 ret = wm8994_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK, 332 /* Write register at a time as we use the cache on the CPU so store
333 WM8994_NUM_IRQ_REGS * 2, &wm8994->irq_masks_cur); 333 * it in native endian.
334 if (ret < 0) 334 */
335 dev_err(dev, "Failed to restore interrupt masks: %d\n", ret); 335 for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
336 ret = wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK
337 + i, wm8994->irq_masks_cur[i]);
338 if (ret < 0)
339 dev_err(dev, "Failed to restore interrupt masks: %d\n",
340 ret);
341 }
336 342
337 ret = wm8994_write(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2, 343 ret = wm8994_write(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
338 &wm8994->ldo_regs); 344 &wm8994->ldo_regs);
@@ -403,7 +409,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
403 break; 409 break;
404 default: 410 default:
405 BUG(); 411 BUG();
406 return -EINVAL; 412 goto err;
407 } 413 }
408 414
409 wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) * 415 wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
@@ -425,7 +431,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
425 break; 431 break;
426 default: 432 default:
427 BUG(); 433 BUG();
428 return -EINVAL; 434 goto err;
429 } 435 }
430 436
431 ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies, 437 ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
@@ -476,13 +482,18 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
476 goto err_enable; 482 goto err_enable;
477 } 483 }
478 484
479 switch (ret) { 485 switch (wm8994->type) {
480 case 0: 486 case WM8994:
481 case 1: 487 switch (ret) {
482 if (wm8994->type == WM8994) 488 case 0:
489 case 1:
483 dev_warn(wm8994->dev, 490 dev_warn(wm8994->dev,
484 "revision %c not fully supported\n", 491 "revision %c not fully supported\n",
485 'A' + ret); 492 'A' + ret);
493 break;
494 default:
495 break;
496 }
486 break; 497 break;
487 default: 498 default:
488 break; 499 break;
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 71c6e8f9aedb..d682f7bd112c 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -231,12 +231,6 @@ static irqreturn_t wm8994_irq_thread(int irq, void *data)
231 status[i] &= ~wm8994->irq_masks_cur[i]; 231 status[i] &= ~wm8994->irq_masks_cur[i];
232 } 232 }
233 233
234 /* Report */
235 for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
236 if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
237 handle_nested_irq(wm8994->irq_base + i);
238 }
239
240 /* Ack any unmasked IRQs */ 234 /* Ack any unmasked IRQs */
241 for (i = 0; i < ARRAY_SIZE(status); i++) { 235 for (i = 0; i < ARRAY_SIZE(status); i++) {
242 if (status[i]) 236 if (status[i])
@@ -244,6 +238,12 @@ static irqreturn_t wm8994_irq_thread(int irq, void *data)
244 status[i]); 238 status[i]);
245 } 239 }
246 240
241 /* Report */
242 for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
243 if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
244 handle_nested_irq(wm8994->irq_base + i);
245 }
246
247 return IRQ_HANDLED; 247 return IRQ_HANDLED;
248} 248}
249 249
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0a4d86c6c4a4..2d6423c2d193 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -146,6 +146,7 @@ config PHANTOM
146 146
147config INTEL_MID_PTI 147config INTEL_MID_PTI
148 tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard" 148 tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
149 depends on PCI
149 default n 150 default n
150 help 151 help
151 The PTI (Parallel Trace Interface) driver directs 152 The PTI (Parallel Trace Interface) driver directs
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index 54e3d05b63cc..35903154ca2e 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -164,5 +164,5 @@ subsys_initcall(ab8500_pwm_init);
164module_exit(ab8500_pwm_exit); 164module_exit(ab8500_pwm_exit);
165MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>"); 165MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>");
166MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); 166MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver");
167MODULE_ALIAS("AB8500 PWM driver"); 167MODULE_ALIAS("platform:ab8500-pwm");
168MODULE_LICENSE("GPL v2"); 168MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index efec4139c3f6..68cd05b6d829 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg);
33static int __devinit cb710_pci_configure(struct pci_dev *pdev) 33static int __devinit cb710_pci_configure(struct pci_dev *pdev)
34{ 34{
35 unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); 35 unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
36 struct pci_dev *pdev0 = pci_get_slot(pdev->bus, devfn); 36 struct pci_dev *pdev0;
37 u32 val; 37 u32 val;
38 38
39 cb710_pci_update_config_reg(pdev, 0x48, 39 cb710_pci_update_config_reg(pdev, 0x48,
@@ -43,6 +43,7 @@ static int __devinit cb710_pci_configure(struct pci_dev *pdev)
43 if (val & 0x80000000) 43 if (val & 0x80000000)
44 return 0; 44 return 0;
45 45
46 pdev0 = pci_get_slot(pdev->bus, devfn);
46 if (!pdev0) 47 if (!pdev0)
47 return -ENODEV; 48 return -ENODEV;
48 49
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c
index 5325a7e70dcf..27dc0d21aafa 100644
--- a/drivers/misc/fsa9480.c
+++ b/drivers/misc/fsa9480.c
@@ -455,7 +455,7 @@ static int __devinit fsa9480_probe(struct i2c_client *client,
455 455
456fail2: 456fail2:
457 if (client->irq) 457 if (client->irq)
458 free_irq(client->irq, NULL); 458 free_irq(client->irq, usbsw);
459fail1: 459fail1:
460 i2c_set_clientdata(client, NULL); 460 i2c_set_clientdata(client, NULL);
461 kfree(usbsw); 461 kfree(usbsw);
@@ -466,7 +466,7 @@ static int __devexit fsa9480_remove(struct i2c_client *client)
466{ 466{
467 struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); 467 struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
468 if (client->irq) 468 if (client->irq)
469 free_irq(client->irq, NULL); 469 free_irq(client->irq, usbsw);
470 i2c_set_clientdata(client, NULL); 470 i2c_set_clientdata(client, NULL);
471 471
472 sysfs_remove_group(&client->dev.kobj, &fsa9480_group); 472 sysfs_remove_group(&client->dev.kobj, &fsa9480_group);
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index 8653bd0b1a33..06df1877ad0f 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -33,6 +33,8 @@
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/miscdevice.h> 34#include <linux/miscdevice.h>
35#include <linux/pti.h> 35#include <linux/pti.h>
36#include <linux/slab.h>
37#include <linux/uaccess.h>
36 38
37#define DRIVERNAME "pti" 39#define DRIVERNAME "pti"
38#define PCINAME "pciPTI" 40#define PCINAME "pciPTI"
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 54c91ffe4a91..ba168a7d54d4 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -338,6 +338,12 @@ void st_int_recv(void *disc_data,
338 /* Unknow packet? */ 338 /* Unknow packet? */
339 default: 339 default:
340 type = *ptr; 340 type = *ptr;
341 if (st_gdata->list[type] == NULL) {
342 pr_err("chip/interface misbehavior dropping"
343 " frame starting with 0x%02x", type);
344 goto done;
345
346 }
341 st_gdata->rx_skb = alloc_skb( 347 st_gdata->rx_skb = alloc_skb(
342 st_gdata->list[type]->max_frame_size, 348 st_gdata->list[type]->max_frame_size,
343 GFP_ATOMIC); 349 GFP_ATOMIC);
@@ -354,6 +360,7 @@ void st_int_recv(void *disc_data,
354 ptr++; 360 ptr++;
355 count--; 361 count--;
356 } 362 }
363done:
357 spin_unlock_irqrestore(&st_gdata->lock, flags); 364 spin_unlock_irqrestore(&st_gdata->lock, flags);
358 pr_debug("done %s", __func__); 365 pr_debug("done %s", __func__);
359 return; 366 return;
@@ -717,9 +724,10 @@ static void st_tty_close(struct tty_struct *tty)
717 */ 724 */
718 spin_lock_irqsave(&st_gdata->lock, flags); 725 spin_lock_irqsave(&st_gdata->lock, flags);
719 for (i = ST_BT; i < ST_MAX_CHANNELS; i++) { 726 for (i = ST_BT; i < ST_MAX_CHANNELS; i++) {
720 if (st_gdata->list[i] != NULL) 727 if (st_gdata->is_registered[i] == true)
721 pr_err("%d not un-registered", i); 728 pr_err("%d not un-registered", i);
722 st_gdata->list[i] = NULL; 729 st_gdata->list[i] = NULL;
730 st_gdata->is_registered[i] = false;
723 } 731 }
724 st_gdata->protos_registered = 0; 732 st_gdata->protos_registered = 0;
725 spin_unlock_irqrestore(&st_gdata->lock, flags); 733 spin_unlock_irqrestore(&st_gdata->lock, flags);
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 38fd2f04c07e..3a3580566dfc 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -68,6 +68,7 @@ void validate_firmware_response(struct kim_data_s *kim_gdata)
68 if (unlikely(skb->data[5] != 0)) { 68 if (unlikely(skb->data[5] != 0)) {
69 pr_err("no proper response during fw download"); 69 pr_err("no proper response during fw download");
70 pr_err("data6 %x", skb->data[5]); 70 pr_err("data6 %x", skb->data[5]);
71 kfree_skb(skb);
71 return; /* keep waiting for the proper response */ 72 return; /* keep waiting for the proper response */
72 } 73 }
73 /* becos of all the script being downloaded */ 74 /* becos of all the script being downloaded */
@@ -210,6 +211,7 @@ static long read_local_version(struct kim_data_s *kim_gdata, char *bts_scr_name)
210 pr_err(" waiting for ver info- timed out "); 211 pr_err(" waiting for ver info- timed out ");
211 return -ETIMEDOUT; 212 return -ETIMEDOUT;
212 } 213 }
214 INIT_COMPLETION(kim_gdata->kim_rcvd);
213 215
214 version = 216 version =
215 MAKEWORD(kim_gdata->resp_buffer[13], 217 MAKEWORD(kim_gdata->resp_buffer[13],
@@ -298,6 +300,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
298 300
299 switch (((struct bts_action *)ptr)->type) { 301 switch (((struct bts_action *)ptr)->type) {
300 case ACTION_SEND_COMMAND: /* action send */ 302 case ACTION_SEND_COMMAND: /* action send */
303 pr_debug("S");
301 action_ptr = &(((struct bts_action *)ptr)->data[0]); 304 action_ptr = &(((struct bts_action *)ptr)->data[0]);
302 if (unlikely 305 if (unlikely
303 (((struct hci_command *)action_ptr)->opcode == 306 (((struct hci_command *)action_ptr)->opcode ==
@@ -335,6 +338,10 @@ static long download_firmware(struct kim_data_s *kim_gdata)
335 release_firmware(kim_gdata->fw_entry); 338 release_firmware(kim_gdata->fw_entry);
336 return -ETIMEDOUT; 339 return -ETIMEDOUT;
337 } 340 }
341 /* reinit completion before sending for the
342 * relevant wait
343 */
344 INIT_COMPLETION(kim_gdata->kim_rcvd);
338 345
339 /* 346 /*
340 * Free space found in uart buffer, call st_int_write 347 * Free space found in uart buffer, call st_int_write
@@ -361,6 +368,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
361 } 368 }
362 break; 369 break;
363 case ACTION_WAIT_EVENT: /* wait */ 370 case ACTION_WAIT_EVENT: /* wait */
371 pr_debug("W");
364 if (!wait_for_completion_timeout 372 if (!wait_for_completion_timeout
365 (&kim_gdata->kim_rcvd, 373 (&kim_gdata->kim_rcvd,
366 msecs_to_jiffies(CMD_RESP_TIME))) { 374 msecs_to_jiffies(CMD_RESP_TIME))) {
@@ -434,11 +442,17 @@ long st_kim_start(void *kim_data)
434{ 442{
435 long err = 0; 443 long err = 0;
436 long retry = POR_RETRY_COUNT; 444 long retry = POR_RETRY_COUNT;
445 struct ti_st_plat_data *pdata;
437 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; 446 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
438 447
439 pr_info(" %s", __func__); 448 pr_info(" %s", __func__);
449 pdata = kim_gdata->kim_pdev->dev.platform_data;
440 450
441 do { 451 do {
452 /* platform specific enabling code here */
453 if (pdata->chip_enable)
454 pdata->chip_enable(kim_gdata);
455
442 /* Configure BT nShutdown to HIGH state */ 456 /* Configure BT nShutdown to HIGH state */
443 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); 457 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
444 mdelay(5); /* FIXME: a proper toggle */ 458 mdelay(5); /* FIXME: a proper toggle */
@@ -460,6 +474,12 @@ long st_kim_start(void *kim_data)
460 pr_info("ldisc_install = 0"); 474 pr_info("ldisc_install = 0");
461 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, 475 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
462 NULL, "install"); 476 NULL, "install");
477 /* the following wait is never going to be completed,
478 * since the ldisc was never installed, hence serving
479 * as a mdelay of LDISC_TIME msecs */
480 err = wait_for_completion_timeout
481 (&kim_gdata->ldisc_installed,
482 msecs_to_jiffies(LDISC_TIME));
463 err = -ETIMEDOUT; 483 err = -ETIMEDOUT;
464 continue; 484 continue;
465 } else { 485 } else {
@@ -472,6 +492,13 @@ long st_kim_start(void *kim_data)
472 pr_info("ldisc_install = 0"); 492 pr_info("ldisc_install = 0");
473 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj, 493 sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
474 NULL, "install"); 494 NULL, "install");
495 /* this wait might be completed, though in the
496 * tty_close() since the ldisc is already
497 * installed */
498 err = wait_for_completion_timeout
499 (&kim_gdata->ldisc_installed,
500 msecs_to_jiffies(LDISC_TIME));
501 err = -EINVAL;
475 continue; 502 continue;
476 } else { /* on success don't retry */ 503 } else { /* on success don't retry */
477 break; 504 break;
@@ -489,6 +516,8 @@ long st_kim_stop(void *kim_data)
489{ 516{
490 long err = 0; 517 long err = 0;
491 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data; 518 struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
519 struct ti_st_plat_data *pdata =
520 kim_gdata->kim_pdev->dev.platform_data;
492 521
493 INIT_COMPLETION(kim_gdata->ldisc_installed); 522 INIT_COMPLETION(kim_gdata->ldisc_installed);
494 523
@@ -515,6 +544,10 @@ long st_kim_stop(void *kim_data)
515 gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); 544 gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
516 mdelay(1); 545 mdelay(1);
517 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); 546 gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
547
548 /* platform specific disable */
549 if (pdata->chip_disable)
550 pdata->chip_disable(kim_gdata);
518 return err; 551 return err;
519} 552}
520 553
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 3f2495138855..1ff460a8e9c7 100644
--- a/drivers/misc/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -22,6 +22,7 @@
22#define pr_fmt(fmt) "(stll) :" fmt 22#define pr_fmt(fmt) "(stll) :" fmt
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h>
25#include <linux/ti_wilink_st.h> 26#include <linux/ti_wilink_st.h>
26 27
27/**********************************************************************/ 28/**********************************************************************/
@@ -37,6 +38,9 @@ static void send_ll_cmd(struct st_data_s *st_data,
37 38
38static void ll_device_want_to_sleep(struct st_data_s *st_data) 39static void ll_device_want_to_sleep(struct st_data_s *st_data)
39{ 40{
41 struct kim_data_s *kim_data;
42 struct ti_st_plat_data *pdata;
43
40 pr_debug("%s", __func__); 44 pr_debug("%s", __func__);
41 /* sanity check */ 45 /* sanity check */
42 if (st_data->ll_state != ST_LL_AWAKE) 46 if (st_data->ll_state != ST_LL_AWAKE)
@@ -46,10 +50,19 @@ static void ll_device_want_to_sleep(struct st_data_s *st_data)
46 send_ll_cmd(st_data, LL_SLEEP_ACK); 50 send_ll_cmd(st_data, LL_SLEEP_ACK);
47 /* update state */ 51 /* update state */
48 st_data->ll_state = ST_LL_ASLEEP; 52 st_data->ll_state = ST_LL_ASLEEP;
53
54 /* communicate to platform about chip asleep */
55 kim_data = st_data->kim_data;
56 pdata = kim_data->kim_pdev->dev.platform_data;
57 if (pdata->chip_asleep)
58 pdata->chip_asleep(NULL);
49} 59}
50 60
51static void ll_device_want_to_wakeup(struct st_data_s *st_data) 61static void ll_device_want_to_wakeup(struct st_data_s *st_data)
52{ 62{
63 struct kim_data_s *kim_data;
64 struct ti_st_plat_data *pdata;
65
53 /* diff actions in diff states */ 66 /* diff actions in diff states */
54 switch (st_data->ll_state) { 67 switch (st_data->ll_state) {
55 case ST_LL_ASLEEP: 68 case ST_LL_ASLEEP:
@@ -70,6 +83,12 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)
70 } 83 }
71 /* update state */ 84 /* update state */
72 st_data->ll_state = ST_LL_AWAKE; 85 st_data->ll_state = ST_LL_AWAKE;
86
87 /* communicate to platform about chip wakeup */
88 kim_data = st_data->kim_data;
89 pdata = kim_data->kim_pdev->dev.platform_data;
90 if (pdata->chip_asleep)
91 pdata->chip_awake(NULL);
73} 92}
74 93
75/**********************************************************************/ 94/**********************************************************************/
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 006a5e9f8ab8..2bf229acd3b8 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -224,7 +224,7 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
224static int mmc_test_busy(struct mmc_command *cmd) 224static int mmc_test_busy(struct mmc_command *cmd)
225{ 225{
226 return !(cmd->resp[0] & R1_READY_FOR_DATA) || 226 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
227 (R1_CURRENT_STATE(cmd->resp[0]) == 7); 227 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
228} 228}
229 229
230/* 230/*
@@ -2900,7 +2900,7 @@ static const struct file_operations mmc_test_fops_testlist = {
2900 .release = single_release, 2900 .release = single_release,
2901}; 2901};
2902 2902
2903static void mmc_test_free_file_test(struct mmc_card *card) 2903static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2904{ 2904{
2905 struct mmc_test_dbgfs_file *df, *dfs; 2905 struct mmc_test_dbgfs_file *df, *dfs;
2906 2906
@@ -2917,34 +2917,21 @@ static void mmc_test_free_file_test(struct mmc_card *card)
2917 mutex_unlock(&mmc_test_lock); 2917 mutex_unlock(&mmc_test_lock);
2918} 2918}
2919 2919
2920static int mmc_test_register_file_test(struct mmc_card *card) 2920static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2921 const char *name, mode_t mode, const struct file_operations *fops)
2921{ 2922{
2922 struct dentry *file = NULL; 2923 struct dentry *file = NULL;
2923 struct mmc_test_dbgfs_file *df; 2924 struct mmc_test_dbgfs_file *df;
2924 int ret = 0;
2925
2926 mutex_lock(&mmc_test_lock);
2927
2928 if (card->debugfs_root)
2929 file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2930 card->debugfs_root, card, &mmc_test_fops_test);
2931
2932 if (IS_ERR_OR_NULL(file)) {
2933 dev_err(&card->dev,
2934 "Can't create test. Perhaps debugfs is disabled.\n");
2935 ret = -ENODEV;
2936 goto err;
2937 }
2938 2925
2939 if (card->debugfs_root) 2926 if (card->debugfs_root)
2940 file = debugfs_create_file("testlist", S_IRUGO, 2927 file = debugfs_create_file(name, mode, card->debugfs_root,
2941 card->debugfs_root, card, &mmc_test_fops_testlist); 2928 card, fops);
2942 2929
2943 if (IS_ERR_OR_NULL(file)) { 2930 if (IS_ERR_OR_NULL(file)) {
2944 dev_err(&card->dev, 2931 dev_err(&card->dev,
2945 "Can't create testlist. Perhaps debugfs is disabled.\n"); 2932 "Can't create %s. Perhaps debugfs is disabled.\n",
2946 ret = -ENODEV; 2933 name);
2947 goto err; 2934 return -ENODEV;
2948 } 2935 }
2949 2936
2950 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); 2937 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
@@ -2952,14 +2939,31 @@ static int mmc_test_register_file_test(struct mmc_card *card)
2952 debugfs_remove(file); 2939 debugfs_remove(file);
2953 dev_err(&card->dev, 2940 dev_err(&card->dev,
2954 "Can't allocate memory for internal usage.\n"); 2941 "Can't allocate memory for internal usage.\n");
2955 ret = -ENOMEM; 2942 return -ENOMEM;
2956 goto err;
2957 } 2943 }
2958 2944
2959 df->card = card; 2945 df->card = card;
2960 df->file = file; 2946 df->file = file;
2961 2947
2962 list_add(&df->link, &mmc_test_file_test); 2948 list_add(&df->link, &mmc_test_file_test);
2949 return 0;
2950}
2951
2952static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2953{
2954 int ret;
2955
2956 mutex_lock(&mmc_test_lock);
2957
2958 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2959 &mmc_test_fops_test);
2960 if (ret)
2961 goto err;
2962
2963 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2964 &mmc_test_fops_testlist);
2965 if (ret)
2966 goto err;
2963 2967
2964err: 2968err:
2965 mutex_unlock(&mmc_test_lock); 2969 mutex_unlock(&mmc_test_lock);
@@ -2974,7 +2978,7 @@ static int mmc_test_probe(struct mmc_card *card)
2974 if (!mmc_card_mmc(card) && !mmc_card_sd(card)) 2978 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2975 return -ENODEV; 2979 return -ENODEV;
2976 2980
2977 ret = mmc_test_register_file_test(card); 2981 ret = mmc_test_register_dbgfs_file(card);
2978 if (ret) 2982 if (ret)
2979 return ret; 2983 return ret;
2980 2984
@@ -2986,7 +2990,7 @@ static int mmc_test_probe(struct mmc_card *card)
2986static void mmc_test_remove(struct mmc_card *card) 2990static void mmc_test_remove(struct mmc_card *card)
2987{ 2991{
2988 mmc_test_free_result(card); 2992 mmc_test_free_result(card);
2989 mmc_test_free_file_test(card); 2993 mmc_test_free_dbgfs_file(card);
2990} 2994}
2991 2995
2992static struct mmc_driver mmc_driver = { 2996static struct mmc_driver mmc_driver = {
@@ -3006,7 +3010,7 @@ static void __exit mmc_test_exit(void)
3006{ 3010{
3007 /* Clear stalled data if card is still plugged */ 3011 /* Clear stalled data if card is still plugged */
3008 mmc_test_free_result(NULL); 3012 mmc_test_free_result(NULL);
3009 mmc_test_free_file_test(NULL); 3013 mmc_test_free_dbgfs_file(NULL);
3010 3014
3011 mmc_unregister_driver(&mmc_driver); 3015 mmc_unregister_driver(&mmc_driver);
3012} 3016}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 89bdeaec7182..b27b94078c21 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
133 if (mrq->done) 133 if (mrq->done)
134 mrq->done(mrq); 134 mrq->done(mrq);
135 135
136 mmc_host_clk_gate(host); 136 mmc_host_clk_release(host);
137 } 137 }
138} 138}
139 139
@@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
192 mrq->stop->mrq = mrq; 192 mrq->stop->mrq = mrq;
193 } 193 }
194 } 194 }
195 mmc_host_clk_ungate(host); 195 mmc_host_clk_hold(host);
196 led_trigger_event(host->led, LED_FULL); 196 led_trigger_event(host->led, LED_FULL);
197 host->ops->request(host, mrq); 197 host->ops->request(host, mrq);
198} 198}
@@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
728 */ 728 */
729void mmc_set_chip_select(struct mmc_host *host, int mode) 729void mmc_set_chip_select(struct mmc_host *host, int mode)
730{ 730{
731 mmc_host_clk_hold(host);
731 host->ios.chip_select = mode; 732 host->ios.chip_select = mode;
732 mmc_set_ios(host); 733 mmc_set_ios(host);
734 mmc_host_clk_release(host);
733} 735}
734 736
735/* 737/*
736 * Sets the host clock to the highest possible frequency that 738 * Sets the host clock to the highest possible frequency that
737 * is below "hz". 739 * is below "hz".
738 */ 740 */
739void mmc_set_clock(struct mmc_host *host, unsigned int hz) 741static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
740{ 742{
741 WARN_ON(hz < host->f_min); 743 WARN_ON(hz < host->f_min);
742 744
@@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
747 mmc_set_ios(host); 749 mmc_set_ios(host);
748} 750}
749 751
752void mmc_set_clock(struct mmc_host *host, unsigned int hz)
753{
754 mmc_host_clk_hold(host);
755 __mmc_set_clock(host, hz);
756 mmc_host_clk_release(host);
757}
758
750#ifdef CONFIG_MMC_CLKGATE 759#ifdef CONFIG_MMC_CLKGATE
751/* 760/*
752 * This gates the clock by setting it to 0 Hz. 761 * This gates the clock by setting it to 0 Hz.
@@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
779 if (host->clk_old) { 788 if (host->clk_old) {
780 BUG_ON(host->ios.clock); 789 BUG_ON(host->ios.clock);
781 /* This call will also set host->clk_gated to false */ 790 /* This call will also set host->clk_gated to false */
782 mmc_set_clock(host, host->clk_old); 791 __mmc_set_clock(host, host->clk_old);
783 } 792 }
784} 793}
785 794
@@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
807 */ 816 */
808void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 817void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
809{ 818{
819 mmc_host_clk_hold(host);
810 host->ios.bus_mode = mode; 820 host->ios.bus_mode = mode;
811 mmc_set_ios(host); 821 mmc_set_ios(host);
822 mmc_host_clk_release(host);
812} 823}
813 824
814/* 825/*
@@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
816 */ 827 */
817void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 828void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
818{ 829{
830 mmc_host_clk_hold(host);
819 host->ios.bus_width = width; 831 host->ios.bus_width = width;
820 mmc_set_ios(host); 832 mmc_set_ios(host);
833 mmc_host_clk_release(host);
821} 834}
822 835
823/** 836/**
@@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1015 1028
1016 ocr &= 3 << bit; 1029 ocr &= 3 << bit;
1017 1030
1031 mmc_host_clk_hold(host);
1018 host->ios.vdd = bit; 1032 host->ios.vdd = bit;
1019 mmc_set_ios(host); 1033 mmc_set_ios(host);
1034 mmc_host_clk_release(host);
1020 } else { 1035 } else {
1021 pr_warning("%s: host doesn't support card's voltages\n", 1036 pr_warning("%s: host doesn't support card's voltages\n",
1022 mmc_hostname(host)); 1037 mmc_hostname(host));
@@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
1063 */ 1078 */
1064void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1079void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1065{ 1080{
1081 mmc_host_clk_hold(host);
1066 host->ios.timing = timing; 1082 host->ios.timing = timing;
1067 mmc_set_ios(host); 1083 mmc_set_ios(host);
1084 mmc_host_clk_release(host);
1068} 1085}
1069 1086
1070/* 1087/*
@@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1072 */ 1089 */
1073void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1090void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1074{ 1091{
1092 mmc_host_clk_hold(host);
1075 host->ios.drv_type = drv_type; 1093 host->ios.drv_type = drv_type;
1076 mmc_set_ios(host); 1094 mmc_set_ios(host);
1095 mmc_host_clk_release(host);
1077} 1096}
1078 1097
1079/* 1098/*
@@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
1091{ 1110{
1092 int bit; 1111 int bit;
1093 1112
1113 mmc_host_clk_hold(host);
1114
1094 /* If ocr is set, we use it */ 1115 /* If ocr is set, we use it */
1095 if (host->ocr) 1116 if (host->ocr)
1096 bit = ffs(host->ocr) - 1; 1117 bit = ffs(host->ocr) - 1;
@@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
1126 * time required to reach a stable voltage. 1147 * time required to reach a stable voltage.
1127 */ 1148 */
1128 mmc_delay(10); 1149 mmc_delay(10);
1150
1151 mmc_host_clk_release(host);
1129} 1152}
1130 1153
1131static void mmc_power_off(struct mmc_host *host) 1154static void mmc_power_off(struct mmc_host *host)
1132{ 1155{
1156 mmc_host_clk_hold(host);
1157
1133 host->ios.clock = 0; 1158 host->ios.clock = 0;
1134 host->ios.vdd = 0; 1159 host->ios.vdd = 0;
1135 1160
@@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
1147 host->ios.bus_width = MMC_BUS_WIDTH_1; 1172 host->ios.bus_width = MMC_BUS_WIDTH_1;
1148 host->ios.timing = MMC_TIMING_LEGACY; 1173 host->ios.timing = MMC_TIMING_LEGACY;
1149 mmc_set_ios(host); 1174 mmc_set_ios(host);
1175
1176 mmc_host_clk_release(host);
1150} 1177}
1151 1178
1152/* 1179/*
@@ -1502,7 +1529,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1502 goto out; 1529 goto out;
1503 } 1530 }
1504 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || 1531 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1505 R1_CURRENT_STATE(cmd.resp[0]) == 7); 1532 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1506out: 1533out:
1507 return err; 1534 return err;
1508} 1535}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index b29d3e8fd3a2..793d0a0dad8d 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
119} 119}
120 120
121/** 121/**
122 * mmc_host_clk_ungate - ungate hardware MCI clocks 122 * mmc_host_clk_hold - ungate hardware MCI clocks
123 * @host: host to ungate. 123 * @host: host to ungate.
124 * 124 *
125 * Makes sure the host ios.clock is restored to a non-zero value 125 * Makes sure the host ios.clock is restored to a non-zero value
126 * past this call. Increase clock reference count and ungate clock 126 * past this call. Increase clock reference count and ungate clock
127 * if we're the first user. 127 * if we're the first user.
128 */ 128 */
129void mmc_host_clk_ungate(struct mmc_host *host) 129void mmc_host_clk_hold(struct mmc_host *host)
130{ 130{
131 unsigned long flags; 131 unsigned long flags;
132 132
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
164} 164}
165 165
166/** 166/**
167 * mmc_host_clk_gate - gate off hardware MCI clocks 167 * mmc_host_clk_release - gate off hardware MCI clocks
168 * @host: host to gate. 168 * @host: host to gate.
169 * 169 *
170 * Calls the host driver with ios.clock set to zero as often as possible 170 * Calls the host driver with ios.clock set to zero as often as possible
171 * in order to gate off hardware MCI clocks. Decrease clock reference 171 * in order to gate off hardware MCI clocks. Decrease clock reference
172 * count and schedule disabling of clock. 172 * count and schedule disabling of clock.
173 */ 173 */
174void mmc_host_clk_gate(struct mmc_host *host) 174void mmc_host_clk_release(struct mmc_host *host)
175{ 175{
176 unsigned long flags; 176 unsigned long flags;
177 177
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
179 host->clk_requests--; 179 host->clk_requests--;
180 if (mmc_host_may_gate_card(host->card) && 180 if (mmc_host_may_gate_card(host->card) &&
181 !host->clk_requests) 181 !host->clk_requests)
182 schedule_work(&host->clk_gate_work); 182 queue_work(system_nrt_wq, &host->clk_gate_work);
183 spin_unlock_irqrestore(&host->clk_lock, flags); 183 spin_unlock_irqrestore(&host->clk_lock, flags);
184} 184}
185 185
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
231 if (cancel_work_sync(&host->clk_gate_work)) 231 if (cancel_work_sync(&host->clk_gate_work))
232 mmc_host_clk_gate_delayed(host); 232 mmc_host_clk_gate_delayed(host);
233 if (host->clk_gated) 233 if (host->clk_gated)
234 mmc_host_clk_ungate(host); 234 mmc_host_clk_hold(host);
235 /* There should be only one user now */ 235 /* There should be only one user now */
236 WARN_ON(host->clk_requests > 1); 236 WARN_ON(host->clk_requests > 1);
237} 237}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index de199f911928..fb8a5cd2e4a1 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -16,16 +16,16 @@ int mmc_register_host_class(void);
16void mmc_unregister_host_class(void); 16void mmc_unregister_host_class(void);
17 17
18#ifdef CONFIG_MMC_CLKGATE 18#ifdef CONFIG_MMC_CLKGATE
19void mmc_host_clk_ungate(struct mmc_host *host); 19void mmc_host_clk_hold(struct mmc_host *host);
20void mmc_host_clk_gate(struct mmc_host *host); 20void mmc_host_clk_release(struct mmc_host *host);
21unsigned int mmc_host_clk_rate(struct mmc_host *host); 21unsigned int mmc_host_clk_rate(struct mmc_host *host);
22 22
23#else 23#else
24static inline void mmc_host_clk_ungate(struct mmc_host *host) 24static inline void mmc_host_clk_hold(struct mmc_host *host)
25{ 25{
26} 26}
27 27
28static inline void mmc_host_clk_gate(struct mmc_host *host) 28static inline void mmc_host_clk_release(struct mmc_host *host)
29{ 29{
30} 30}
31 31
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index aa7d1d79b8c5..5700b1cbdfec 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -259,7 +259,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
259 } 259 }
260 260
261 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 261 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
262 if (card->ext_csd.rev > 5) { 262 if (card->ext_csd.rev > 6) {
263 printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", 263 printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
264 mmc_hostname(card->host), card->ext_csd.rev); 264 mmc_hostname(card->host), card->ext_csd.rev);
265 err = -EINVAL; 265 err = -EINVAL;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 845ce7c533b9..770c3d06f5dc 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -407,7 +407,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
407 break; 407 break;
408 if (mmc_host_is_spi(card->host)) 408 if (mmc_host_is_spi(card->host))
409 break; 409 break;
410 } while (R1_CURRENT_STATE(status) == 7); 410 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
411 411
412 if (mmc_host_is_spi(card->host)) { 412 if (mmc_host_is_spi(card->host)) {
413 if (status & R1_SPI_ILLEGAL_COMMAND) 413 if (status & R1_SPI_ILLEGAL_COMMAND)
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 633975ff2bb3..0370e03e3142 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
469 return 0; 469 return 0;
470} 470}
471 471
472static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) 472static void sd_update_bus_speed_mode(struct mmc_card *card)
473{ 473{
474 unsigned int bus_speed = 0, timing = 0;
475 int err;
476
477 /* 474 /*
478 * If the host doesn't support any of the UHS-I modes, fallback on 475 * If the host doesn't support any of the UHS-I modes, fallback on
479 * default speed. 476 * default speed.
480 */ 477 */
481 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 478 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
482 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) 479 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
483 return 0; 480 card->sd_bus_speed = 0;
481 return;
482 }
484 483
485 if ((card->host->caps & MMC_CAP_UHS_SDR104) && 484 if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
486 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { 485 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
487 bus_speed = UHS_SDR104_BUS_SPEED; 486 card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
488 timing = MMC_TIMING_UHS_SDR104;
489 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
490 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && 487 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
491 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { 488 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
492 bus_speed = UHS_DDR50_BUS_SPEED; 489 card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
493 timing = MMC_TIMING_UHS_DDR50;
494 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
495 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 490 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
496 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & 491 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
497 SD_MODE_UHS_SDR50)) { 492 SD_MODE_UHS_SDR50)) {
498 bus_speed = UHS_SDR50_BUS_SPEED; 493 card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
499 timing = MMC_TIMING_UHS_SDR50;
500 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
501 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 494 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
502 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && 495 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
503 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { 496 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
504 bus_speed = UHS_SDR25_BUS_SPEED; 497 card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
505 timing = MMC_TIMING_UHS_SDR25;
506 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
507 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 498 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
508 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | 499 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
509 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & 500 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
510 SD_MODE_UHS_SDR12)) { 501 SD_MODE_UHS_SDR12)) {
511 bus_speed = UHS_SDR12_BUS_SPEED; 502 card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
512 timing = MMC_TIMING_UHS_SDR12; 503 }
513 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; 504}
505
506static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
507{
508 int err;
509 unsigned int timing = 0;
510
511 switch (card->sd_bus_speed) {
512 case UHS_SDR104_BUS_SPEED:
513 timing = MMC_TIMING_UHS_SDR104;
514 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
515 break;
516 case UHS_DDR50_BUS_SPEED:
517 timing = MMC_TIMING_UHS_DDR50;
518 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
519 break;
520 case UHS_SDR50_BUS_SPEED:
521 timing = MMC_TIMING_UHS_SDR50;
522 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
523 break;
524 case UHS_SDR25_BUS_SPEED:
525 timing = MMC_TIMING_UHS_SDR25;
526 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
527 break;
528 case UHS_SDR12_BUS_SPEED:
529 timing = MMC_TIMING_UHS_SDR12;
530 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
531 break;
532 default:
533 return 0;
514 } 534 }
515 535
516 card->sd_bus_speed = bus_speed; 536 err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
517 err = mmc_sd_switch(card, 1, 0, bus_speed, status);
518 if (err) 537 if (err)
519 return err; 538 return err;
520 539
521 if ((status[16] & 0xF) != bus_speed) 540 if ((status[16] & 0xF) != card->sd_bus_speed)
522 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", 541 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
523 mmc_hostname(card->host)); 542 mmc_hostname(card->host));
524 else { 543 else {
@@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
618 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); 637 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
619 } 638 }
620 639
640 /*
641 * Select the bus speed mode depending on host
642 * and card capability.
643 */
644 sd_update_bus_speed_mode(card);
645
621 /* Set the driver strength for the card */ 646 /* Set the driver strength for the card */
622 err = sd_select_driver_type(card, status); 647 err = sd_select_driver_type(card, status);
623 if (err) 648 if (err)
624 goto out; 649 goto out;
625 650
626 /* Set bus speed mode of the card */ 651 /* Set current limit for the card */
627 err = sd_set_bus_speed_mode(card, status); 652 err = sd_set_current_limit(card, status);
628 if (err) 653 if (err)
629 goto out; 654 goto out;
630 655
631 /* Set current limit for the card */ 656 /* Set bus speed mode of the card */
632 err = sd_set_current_limit(card, status); 657 err = sd_set_bus_speed_mode(card, status);
633 if (err) 658 if (err)
634 goto out; 659 goto out;
635 660
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 77f0b6b1681d..ff0f714b012c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -62,7 +62,7 @@ struct idmac_desc {
62 62
63 u32 des1; /* Buffer sizes */ 63 u32 des1; /* Buffer sizes */
64#define IDMAC_SET_BUFFER1_SIZE(d, s) \ 64#define IDMAC_SET_BUFFER1_SIZE(d, s) \
65 ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff)) 65 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
66 66
67 u32 des2; /* buffer 1 physical address */ 67 u32 des2; /* buffer 1 physical address */
68 68
@@ -699,7 +699,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
699 } 699 }
700 700
701 /* DDR mode set */ 701 /* DDR mode set */
702 if (ios->ddr) { 702 if (ios->timing == MMC_TIMING_UHS_DDR50) {
703 regs = mci_readl(slot->host, UHS_REG); 703 regs = mci_readl(slot->host, UHS_REG);
704 regs |= (0x1 << slot->id) << 16; 704 regs |= (0x1 << slot->id) << 16;
705 mci_writel(slot->host, UHS_REG, regs); 705 mci_writel(slot->host, UHS_REG, regs);
@@ -1646,7 +1646,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1646 mmc->caps |= MMC_CAP_4_BIT_DATA; 1646 mmc->caps |= MMC_CAP_4_BIT_DATA;
1647 1647
1648 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1648 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1649 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1649 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1650 1650
1651#ifdef CONFIG_MMC_DW_IDMAC 1651#ifdef CONFIG_MMC_DW_IDMAC
1652 mmc->max_segs = host->ring_size; 1652 mmc->max_segs = host->ring_size;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 9ebfb4b482f5..4dc0028086a3 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/module.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
21#include <linux/mmc/mmc.h> 22#include <linux/mmc/mmc.h>
@@ -27,6 +28,7 @@
27#include "sdhci-pltfm.h" 28#include "sdhci-pltfm.h"
28#include "sdhci-esdhc.h" 29#include "sdhci-esdhc.h"
29 30
31#define SDHCI_CTRL_D3CD 0x08
30/* VENDOR SPEC register */ 32/* VENDOR SPEC register */
31#define SDHCI_VENDOR_SPEC 0xC0 33#define SDHCI_VENDOR_SPEC 0xC0
32#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 34#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
@@ -141,13 +143,32 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
141 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 143 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
142 struct pltfm_imx_data *imx_data = pltfm_host->priv; 144 struct pltfm_imx_data *imx_data = pltfm_host->priv;
143 struct esdhc_platform_data *boarddata = &imx_data->boarddata; 145 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
144 146 u32 data;
145 if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) 147
146 && (boarddata->cd_type == ESDHC_CD_GPIO))) 148 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
147 /* 149 if (boarddata->cd_type == ESDHC_CD_GPIO)
148 * these interrupts won't work with a custom card_detect gpio 150 /*
149 */ 151 * These interrupts won't work with a custom
150 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 152 * card_detect gpio (only applied to mx25/35)
153 */
154 val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
155
156 if (val & SDHCI_INT_CARD_INT) {
157 /*
158 * Clear and then set D3CD bit to avoid missing the
159 * card interrupt. This is a eSDHC controller problem
160 * so we need to apply the following workaround: clear
161 * and set D3CD bit will make eSDHC re-sample the card
162 * interrupt. In case a card interrupt was lost,
163 * re-sample it by the following steps.
164 */
165 data = readl(host->ioaddr + SDHCI_HOST_CONTROL);
166 data &= ~SDHCI_CTRL_D3CD;
167 writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
168 data |= SDHCI_CTRL_D3CD;
169 writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
170 }
171 }
151 172
152 if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) 173 if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
153 && (reg == SDHCI_INT_STATUS) 174 && (reg == SDHCI_INT_STATUS)
@@ -217,8 +238,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
217 */ 238 */
218 return; 239 return;
219 case SDHCI_HOST_CONTROL: 240 case SDHCI_HOST_CONTROL:
220 /* FSL messed up here, so we can just keep those two */ 241 /* FSL messed up here, so we can just keep those three */
221 new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS); 242 new_val = val & (SDHCI_CTRL_LED | \
243 SDHCI_CTRL_4BITBUS | \
244 SDHCI_CTRL_D3CD);
222 /* ensure the endianess */ 245 /* ensure the endianess */
223 new_val |= ESDHC_HOST_CONTROL_LE; 246 new_val |= ESDHC_HOST_CONTROL_LE;
224 /* DMA mode bits are shifted */ 247 /* DMA mode bits are shifted */
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 4198dbbc5c20..fc7e4a515629 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -195,7 +195,8 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
195 clk_enable(clk); 195 clk_enable(clk);
196 196
197 host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL 197 host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
198 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; 198 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
199 | SDHCI_QUIRK_32BIT_ADMA_SIZE;
199 200
200 /* enable 1/8V DDR capable */ 201 /* enable 1/8V DDR capable */
201 host->mmc->caps |= MMC_CAP_1_8V_DDR; 202 host->mmc->caps |= MMC_CAP_1_8V_DDR;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 460ffaf0f6d7..fe886d6c474a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -19,6 +19,7 @@
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/gpio.h> 21#include <linux/gpio.h>
22#include <linux/module.h>
22 23
23#include <linux/mmc/host.h> 24#include <linux/mmc/host.h>
24 25
@@ -301,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
301 ctrl &= ~SDHCI_CTRL_8BITBUS; 302 ctrl &= ~SDHCI_CTRL_8BITBUS;
302 break; 303 break;
303 default: 304 default:
305 ctrl &= ~SDHCI_CTRL_4BITBUS;
306 ctrl &= ~SDHCI_CTRL_8BITBUS;
304 break; 307 break;
305 } 308 }
306 309
@@ -502,6 +505,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
502 /* This host supports the Auto CMD12 */ 505 /* This host supports the Auto CMD12 */
503 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; 506 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
504 507
508 /* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
509 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;
510
505 if (pdata->cd_type == S3C_SDHCI_CD_NONE || 511 if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
506 pdata->cd_type == S3C_SDHCI_CD_PERMANENT) 512 pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
507 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; 513 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c31a3343340d..0e02cc1df12e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -628,12 +628,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
628 /* timeout in us */ 628 /* timeout in us */
629 if (!data) 629 if (!data)
630 target_timeout = cmd->cmd_timeout_ms * 1000; 630 target_timeout = cmd->cmd_timeout_ms * 1000;
631 else 631 else {
632 target_timeout = data->timeout_ns / 1000 + 632 target_timeout = data->timeout_ns / 1000;
633 data->timeout_clks / host->clock; 633 if (host->clock)
634 634 target_timeout += data->timeout_clks / host->clock;
635 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 635 }
636 host->timeout_clk = host->clock / 1000;
637 636
638 /* 637 /*
639 * Figure out needed cycles. 638 * Figure out needed cycles.
@@ -645,7 +644,6 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
645 * => 644 * =>
646 * (1) / (2) > 2^6 645 * (1) / (2) > 2^6
647 */ 646 */
648 BUG_ON(!host->timeout_clk);
649 count = 0; 647 count = 0;
650 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 648 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
651 while (current_timeout < target_timeout) { 649 while (current_timeout < target_timeout) {
@@ -1867,9 +1865,6 @@ static void sdhci_tasklet_finish(unsigned long param)
1867 1865
1868 del_timer(&host->timer); 1866 del_timer(&host->timer);
1869 1867
1870 if (host->version >= SDHCI_SPEC_300)
1871 del_timer(&host->tuning_timer);
1872
1873 mrq = host->mrq; 1868 mrq = host->mrq;
1874 1869
1875 /* 1870 /*
@@ -2461,22 +2456,6 @@ int sdhci_add_host(struct sdhci_host *host)
2461 host->max_clk = host->ops->get_max_clock(host); 2456 host->max_clk = host->ops->get_max_clock(host);
2462 } 2457 }
2463 2458
2464 host->timeout_clk =
2465 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
2466 if (host->timeout_clk == 0) {
2467 if (host->ops->get_timeout_clock) {
2468 host->timeout_clk = host->ops->get_timeout_clock(host);
2469 } else if (!(host->quirks &
2470 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2471 printk(KERN_ERR
2472 "%s: Hardware doesn't specify timeout clock "
2473 "frequency.\n", mmc_hostname(mmc));
2474 return -ENODEV;
2475 }
2476 }
2477 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
2478 host->timeout_clk *= 1000;
2479
2480 /* 2459 /*
2481 * In case of Host Controller v3.00, find out whether clock 2460 * In case of Host Controller v3.00, find out whether clock
2482 * multiplier is supported. 2461 * multiplier is supported.
@@ -2509,10 +2488,26 @@ int sdhci_add_host(struct sdhci_host *host)
2509 } else 2488 } else
2510 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 2489 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
2511 2490
2491 host->timeout_clk =
2492 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
2493 if (host->timeout_clk == 0) {
2494 if (host->ops->get_timeout_clock) {
2495 host->timeout_clk = host->ops->get_timeout_clock(host);
2496 } else if (!(host->quirks &
2497 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2498 printk(KERN_ERR
2499 "%s: Hardware doesn't specify timeout clock "
2500 "frequency.\n", mmc_hostname(mmc));
2501 return -ENODEV;
2502 }
2503 }
2504 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
2505 host->timeout_clk *= 1000;
2506
2512 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 2507 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
2513 mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000); 2508 host->timeout_clk = mmc->f_max / 1000;
2514 else 2509
2515 mmc->max_discard_to = (1 << 27) / host->timeout_clk; 2510 mmc->max_discard_to = (1 << 27) / host->timeout_clk;
2516 2511
2517 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 2512 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
2518 2513
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 774f6439d7ce..0c4a672f5db6 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
120 mmc_data->hclk = clk_get_rate(priv->clk); 120 mmc_data->hclk = clk_get_rate(priv->clk);
121 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; 121 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
122 mmc_data->get_cd = sh_mobile_sdhi_get_cd; 122 mmc_data->get_cd = sh_mobile_sdhi_get_cd;
123 if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
124 mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
125 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; 123 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
126 if (p) { 124 if (p) {
127 mmc_data->flags = p->tmio_flags; 125 mmc_data->flags = p->tmio_flags;
126 if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
127 mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
128 mmc_data->ocr_mask = p->tmio_ocr_mask; 128 mmc_data->ocr_mask = p->tmio_ocr_mask;
129 mmc_data->capabilities |= p->tmio_caps; 129 mmc_data->capabilities |= p->tmio_caps;
130 130
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 8d185de90d20..44a9668c4b7a 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -27,7 +27,6 @@
27static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) 27static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
28{ 28{
29 const struct mfd_cell *cell = mfd_get_cell(dev); 29 const struct mfd_cell *cell = mfd_get_cell(dev);
30 struct mmc_host *mmc = platform_get_drvdata(dev);
31 int ret; 30 int ret;
32 31
33 ret = tmio_mmc_host_suspend(&dev->dev); 32 ret = tmio_mmc_host_suspend(&dev->dev);
@@ -42,7 +41,6 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
42static int tmio_mmc_resume(struct platform_device *dev) 41static int tmio_mmc_resume(struct platform_device *dev)
43{ 42{
44 const struct mfd_cell *cell = mfd_get_cell(dev); 43 const struct mfd_cell *cell = mfd_get_cell(dev);
45 struct mmc_host *mmc = platform_get_drvdata(dev);
46 int ret = 0; 44 int ret = 0;
47 45
48 /* Tell the MFD core we are ready to be enabled */ 46 /* Tell the MFD core we are ready to be enabled */
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 65b5b76cc379..64fbb0021825 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
181 181
182#define ubi_dbg_msg(fmt, ...) do { \ 182#define ubi_dbg_msg(fmt, ...) do { \
183 if (0) \ 183 if (0) \
184 pr_debug(fmt "\n", ##__VA_ARGS__); \ 184 printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
185} while (0) 185} while (0)
186 186
187#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__) 187#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 52fe21e1e2cd..3b1416e3d217 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
308 struct net_device *dev = (struct net_device *)data; 308 struct net_device *dev = (struct net_device *)data;
309 struct dev_priv *priv = netdev_priv(dev); 309 struct dev_priv *priv = netdev_priv(dev);
310 unsigned int lnkstat, carrier; 310 unsigned int lnkstat, carrier;
311 unsigned long flags;
311 312
313 spin_lock_irqsave(&priv->chip_lock, flags);
312 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; 314 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
315 spin_unlock_irqrestore(&priv->chip_lock, flags);
313 carrier = netif_carrier_ok(dev); 316 carrier = netif_carrier_ok(dev);
314 317
315 if (lnkstat && !carrier) { 318 if (lnkstat && !carrier) {
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 5b0dba6d4efa..37e5790681ad 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -63,8 +63,9 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); 63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
64 64
65#ifdef BCM_CNIC 65#ifdef BCM_CNIC
66 /* We don't want TPA on FCoE, FWD and OOO L2 rings */ 66 /* We don't want TPA on an FCoE L2 ring */
67 bnx2x_fcoe(bp, disable_tpa) = 1; 67 if (IS_FCOE_FP(fp))
68 fp->disable_tpa = 1;
68#endif 69#endif
69} 70}
70 71
@@ -1404,10 +1405,9 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1404u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1405u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1405{ 1406{
1406 struct bnx2x *bp = netdev_priv(dev); 1407 struct bnx2x *bp = netdev_priv(dev);
1408
1407#ifdef BCM_CNIC 1409#ifdef BCM_CNIC
1408 if (NO_FCOE(bp)) 1410 if (!NO_FCOE(bp)) {
1409 return skb_tx_hash(dev, skb);
1410 else {
1411 struct ethhdr *hdr = (struct ethhdr *)skb->data; 1411 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1412 u16 ether_type = ntohs(hdr->h_proto); 1412 u16 ether_type = ntohs(hdr->h_proto);
1413 1413
@@ -1424,8 +1424,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1424 return bnx2x_fcoe_tx(bp, txq_index); 1424 return bnx2x_fcoe_tx(bp, txq_index);
1425 } 1425 }
1426#endif 1426#endif
1427 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring 1427 /* select a non-FCoE queue */
1428 */
1429 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1428 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1430} 1429}
1431 1430
@@ -1448,6 +1447,28 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1448 bp->num_queues += NON_ETH_CONTEXT_USE; 1447 bp->num_queues += NON_ETH_CONTEXT_USE;
1449} 1448}
1450 1449
1450/**
1451 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1452 *
1453 * @bp: Driver handle
1454 *
1455 * We currently support for at most 16 Tx queues for each CoS thus we will
1456 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1457 * bp->max_cos.
1458 *
1459 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1460 * index after all ETH L2 indices.
1461 *
1462 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1463 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1464 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1465 *
1466 * The proper configuration of skb->queue_mapping is handled by
1467 * bnx2x_select_queue() and __skb_tx_hash().
1468 *
1469 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1470 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1471 */
1451static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) 1472static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1452{ 1473{
1453 int rc, tx, rx; 1474 int rc, tx, rx;
@@ -1989,14 +2010,20 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1989 return -EINVAL; 2010 return -EINVAL;
1990 } 2011 }
1991 2012
2013 /*
2014 * It's important to set the bp->state to the value different from
2015 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2016 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2017 */
2018 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2019 smp_mb();
2020
1992 /* Stop Tx */ 2021 /* Stop Tx */
1993 bnx2x_tx_disable(bp); 2022 bnx2x_tx_disable(bp);
1994 2023
1995#ifdef BCM_CNIC 2024#ifdef BCM_CNIC
1996 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 2025 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1997#endif 2026#endif
1998 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1999 smp_mb();
2000 2027
2001 bp->rx_mode = BNX2X_RX_MODE_NONE; 2028 bp->rx_mode = BNX2X_RX_MODE_NONE;
2002 2029
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index a4ea35f6a456..a1e004a82f7a 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -920,7 +920,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
920 920
921void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) 921void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
922{ 922{
923 if (!CHIP_IS_E1x(bp)) { 923 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
924 bp->dcb_state = dcb_on; 924 bp->dcb_state = dcb_on;
925 bp->dcbx_enabled = dcbx_enabled; 925 bp->dcbx_enabled = dcbx_enabled;
926 } else { 926 } else {
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 06727f32e505..dc24de40e336 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -1204,6 +1204,8 @@ struct drv_port_mb {
1204 1204
1205 #define LINK_STATUS_PFC_ENABLED 0x20000000 1205 #define LINK_STATUS_PFC_ENABLED 0x20000000
1206 1206
1207 #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
1208
1207 u32 port_stx; 1209 u32 port_stx;
1208 1210
1209 u32 stat_nig_timer; 1211 u32 stat_nig_timer;
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index bcd8f0038628..d45b1555a602 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1546,6 +1546,12 @@ static void bnx2x_umac_enable(struct link_params *params,
1546 vars->line_speed); 1546 vars->line_speed);
1547 break; 1547 break;
1548 } 1548 }
1549 if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1550 val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
1551
1552 if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
1553 val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
1554
1549 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); 1555 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1550 udelay(50); 1556 udelay(50);
1551 1557
@@ -1661,10 +1667,20 @@ static void bnx2x_xmac_disable(struct link_params *params)
1661{ 1667{
1662 u8 port = params->port; 1668 u8 port = params->port;
1663 struct bnx2x *bp = params->bp; 1669 struct bnx2x *bp = params->bp;
1664 u32 xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 1670 u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
1665 1671
1666 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 1672 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
1667 MISC_REGISTERS_RESET_REG_2_XMAC) { 1673 MISC_REGISTERS_RESET_REG_2_XMAC) {
1674 /*
1675 * Send an indication to change the state in the NIG back to XON
1676 * Clearing this bit enables the next set of this bit to get
1677 * rising edge
1678 */
1679 pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
1680 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
1681 (pfc_ctrl & ~(1<<1)));
1682 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
1683 (pfc_ctrl | (1<<1)));
1668 DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); 1684 DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
1669 REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); 1685 REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
1670 usleep_range(1000, 1000); 1686 usleep_range(1000, 1000);
@@ -1729,6 +1745,10 @@ static int bnx2x_emac_enable(struct link_params *params,
1729 1745
1730 DP(NETIF_MSG_LINK, "enabling EMAC\n"); 1746 DP(NETIF_MSG_LINK, "enabling EMAC\n");
1731 1747
1748 /* Disable BMAC */
1749 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1750 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1751
1732 /* enable emac and not bmac */ 1752 /* enable emac and not bmac */
1733 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); 1753 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
1734 1754
@@ -2583,12 +2603,6 @@ static int bnx2x_bmac1_enable(struct link_params *params,
2583 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, 2603 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2584 wb_data, 2); 2604 wb_data, 2);
2585 2605
2586 if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) {
2587 REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LSS_STATUS,
2588 wb_data, 2);
2589 if (wb_data[0] > 0)
2590 return -ESRCH;
2591 }
2592 return 0; 2606 return 0;
2593} 2607}
2594 2608
@@ -2654,16 +2668,6 @@ static int bnx2x_bmac2_enable(struct link_params *params,
2654 udelay(30); 2668 udelay(30);
2655 bnx2x_update_pfc_bmac2(params, vars, is_lb); 2669 bnx2x_update_pfc_bmac2(params, vars, is_lb);
2656 2670
2657 if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) {
2658 REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LSS_STAT,
2659 wb_data, 2);
2660 if (wb_data[0] > 0) {
2661 DP(NETIF_MSG_LINK, "Got bad LSS status 0x%x\n",
2662 wb_data[0]);
2663 return -ESRCH;
2664 }
2665 }
2666
2667 return 0; 2671 return 0;
2668} 2672}
2669 2673
@@ -2949,7 +2953,9 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
2949 u32 val; 2953 u32 val;
2950 u16 i; 2954 u16 i;
2951 int rc = 0; 2955 int rc = 0;
2952 2956 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
2957 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
2958 EMAC_MDIO_STATUS_10MB);
2953 /* address */ 2959 /* address */
2954 val = ((phy->addr << 21) | (devad << 16) | reg | 2960 val = ((phy->addr << 21) | (devad << 16) | reg |
2955 EMAC_MDIO_COMM_COMMAND_ADDRESS | 2961 EMAC_MDIO_COMM_COMMAND_ADDRESS |
@@ -3003,6 +3009,9 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
3003 } 3009 }
3004 } 3010 }
3005 3011
3012 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
3013 bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
3014 EMAC_MDIO_STATUS_10MB);
3006 return rc; 3015 return rc;
3007} 3016}
3008 3017
@@ -3012,6 +3021,9 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3012 u32 tmp; 3021 u32 tmp;
3013 u8 i; 3022 u8 i;
3014 int rc = 0; 3023 int rc = 0;
3024 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
3025 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
3026 EMAC_MDIO_STATUS_10MB);
3015 3027
3016 /* address */ 3028 /* address */
3017 3029
@@ -3065,7 +3077,9 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3065 bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); 3077 bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
3066 } 3078 }
3067 } 3079 }
3068 3080 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
3081 bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
3082 EMAC_MDIO_STATUS_10MB);
3069 return rc; 3083 return rc;
3070} 3084}
3071 3085
@@ -4353,6 +4367,9 @@ void bnx2x_link_status_update(struct link_params *params,
4353 4367
4354 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); 4368 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
4355 vars->phy_flags = PHY_XGXS_FLAG; 4369 vars->phy_flags = PHY_XGXS_FLAG;
4370 if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
4371 vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
4372
4356 if (vars->link_up) { 4373 if (vars->link_up) {
4357 DP(NETIF_MSG_LINK, "phy link up\n"); 4374 DP(NETIF_MSG_LINK, "phy link up\n");
4358 4375
@@ -4444,6 +4461,8 @@ void bnx2x_link_status_update(struct link_params *params,
4444 4461
4445 /* indicate no mac active */ 4462 /* indicate no mac active */
4446 vars->mac_type = MAC_TYPE_NONE; 4463 vars->mac_type = MAC_TYPE_NONE;
4464 if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
4465 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
4447 } 4466 }
4448 4467
4449 /* Sync media type */ 4468 /* Sync media type */
@@ -5903,20 +5922,30 @@ int bnx2x_set_led(struct link_params *params,
5903 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 5922 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5904 EMAC_WR(bp, EMAC_REG_EMAC_LED, 5923 EMAC_WR(bp, EMAC_REG_EMAC_LED,
5905 (tmp | EMAC_LED_OVERRIDE)); 5924 (tmp | EMAC_LED_OVERRIDE));
5906 return rc; 5925 /*
5926 * return here without enabling traffic
5927 * LED blink andsetting rate in ON mode.
5928 * In oper mode, enabling LED blink
5929 * and setting rate is needed.
5930 */
5931 if (mode == LED_MODE_ON)
5932 return rc;
5907 } 5933 }
5908 } else if (SINGLE_MEDIA_DIRECT(params) && 5934 } else if (SINGLE_MEDIA_DIRECT(params)) {
5909 (CHIP_IS_E1x(bp) ||
5910 CHIP_IS_E2(bp))) {
5911 /* 5935 /*
5912 * This is a work-around for HW issue found when link 5936 * This is a work-around for HW issue found when link
5913 * is up in CL73 5937 * is up in CL73
5914 */ 5938 */
5915 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
5916 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 5939 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5917 } else { 5940 if (CHIP_IS_E1x(bp) ||
5941 CHIP_IS_E2(bp) ||
5942 (mode == LED_MODE_ON))
5943 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
5944 else
5945 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5946 hw_led_mode);
5947 } else
5918 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); 5948 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
5919 }
5920 5949
5921 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); 5950 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
5922 /* Set blinking rate to ~15.9Hz */ 5951 /* Set blinking rate to ~15.9Hz */
@@ -6160,6 +6189,7 @@ static int bnx2x_update_link_down(struct link_params *params,
6160 /* update shared memory */ 6189 /* update shared memory */
6161 vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | 6190 vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
6162 LINK_STATUS_LINK_UP | 6191 LINK_STATUS_LINK_UP |
6192 LINK_STATUS_PHYSICAL_LINK_FLAG |
6163 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | 6193 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
6164 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | 6194 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
6165 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | 6195 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
@@ -6197,7 +6227,8 @@ static int bnx2x_update_link_up(struct link_params *params,
6197 u8 port = params->port; 6227 u8 port = params->port;
6198 int rc = 0; 6228 int rc = 0;
6199 6229
6200 vars->link_status |= LINK_STATUS_LINK_UP; 6230 vars->link_status |= (LINK_STATUS_LINK_UP |
6231 LINK_STATUS_PHYSICAL_LINK_FLAG);
6201 vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; 6232 vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
6202 6233
6203 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 6234 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
@@ -7998,6 +8029,9 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
7998 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 8029 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
7999 MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); 8030 MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
8000 8031
8032 /* Restart microcode to re-read the new mode */
8033 bnx2x_warpcore_reset_lane(bp, phy, 1);
8034 bnx2x_warpcore_reset_lane(bp, phy, 0);
8001 8035
8002} 8036}
8003 8037
@@ -8116,7 +8150,6 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8116 offsetof(struct shmem_region, dev_info. 8150 offsetof(struct shmem_region, dev_info.
8117 port_feature_config[params->port]. 8151 port_feature_config[params->port].
8118 config)); 8152 config));
8119
8120 bnx2x_set_gpio_int(bp, gpio_num, 8153 bnx2x_set_gpio_int(bp, gpio_num,
8121 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 8154 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
8122 gpio_port); 8155 gpio_port);
@@ -8125,8 +8158,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8125 * Disable transmit for this module 8158 * Disable transmit for this module
8126 */ 8159 */
8127 phy->media_type = ETH_PHY_NOT_PRESENT; 8160 phy->media_type = ETH_PHY_NOT_PRESENT;
8128 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 8161 if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
8129 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 8162 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
8163 CHIP_IS_E3(bp))
8130 bnx2x_sfp_set_transmitter(params, phy, 0); 8164 bnx2x_sfp_set_transmitter(params, phy, 0);
8131 } 8165 }
8132} 8166}
@@ -8228,9 +8262,6 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
8228 u16 cnt, val, tmp1; 8262 u16 cnt, val, tmp1;
8229 struct bnx2x *bp = params->bp; 8263 struct bnx2x *bp = params->bp;
8230 8264
8231 /* SPF+ PHY: Set flag to check for Tx error */
8232 vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
8233
8234 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 8265 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
8235 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 8266 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
8236 /* HW reset */ 8267 /* HW reset */
@@ -8414,9 +8445,6 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
8414 struct bnx2x *bp = params->bp; 8445 struct bnx2x *bp = params->bp;
8415 DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); 8446 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
8416 8447
8417 /* SPF+ PHY: Set flag to check for Tx error */
8418 vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
8419
8420 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 8448 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
8421 bnx2x_wait_reset_complete(bp, phy, params); 8449 bnx2x_wait_reset_complete(bp, phy, params);
8422 8450
@@ -8585,9 +8613,6 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
8585 struct bnx2x *bp = params->bp; 8613 struct bnx2x *bp = params->bp;
8586 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ 8614 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
8587 8615
8588 /* SPF+ PHY: Set flag to check for Tx error */
8589 vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG;
8590
8591 bnx2x_wait_reset_complete(bp, phy, params); 8616 bnx2x_wait_reset_complete(bp, phy, params);
8592 rx_alarm_ctrl_val = (1<<2) | (1<<5) ; 8617 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
8593 /* Should be 0x6 to enable XS on Tx side. */ 8618 /* Should be 0x6 to enable XS on Tx side. */
@@ -9243,7 +9268,13 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9243 if (phy->req_duplex == DUPLEX_FULL) 9268 if (phy->req_duplex == DUPLEX_FULL)
9244 autoneg_val |= (1<<8); 9269 autoneg_val |= (1<<8);
9245 9270
9246 bnx2x_cl45_write(bp, phy, 9271 /*
9272 * Always write this if this is not 84833.
9273 * For 84833, write it only when it's a forced speed.
9274 */
9275 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
9276 ((autoneg_val & (1<<12)) == 0))
9277 bnx2x_cl45_write(bp, phy,
9247 MDIO_AN_DEVAD, 9278 MDIO_AN_DEVAD,
9248 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); 9279 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
9249 9280
@@ -9257,13 +9288,12 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9257 bnx2x_cl45_write(bp, phy, 9288 bnx2x_cl45_write(bp, phy,
9258 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 9289 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
9259 0x3200); 9290 0x3200);
9260 } else if (phy->req_line_speed != SPEED_10 && 9291 } else
9261 phy->req_line_speed != SPEED_100) {
9262 bnx2x_cl45_write(bp, phy, 9292 bnx2x_cl45_write(bp, phy,
9263 MDIO_AN_DEVAD, 9293 MDIO_AN_DEVAD,
9264 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, 9294 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
9265 1); 9295 1);
9266 } 9296
9267 /* Save spirom version */ 9297 /* Save spirom version */
9268 bnx2x_save_848xx_spirom_version(phy, params); 9298 bnx2x_save_848xx_spirom_version(phy, params);
9269 9299
@@ -9756,11 +9786,9 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
9756 bnx2x_cl45_read(bp, phy, 9786 bnx2x_cl45_read(bp, phy,
9757 MDIO_CTL_DEVAD, 9787 MDIO_CTL_DEVAD,
9758 0x400f, &val16); 9788 0x400f, &val16);
9759 /* Put to low power mode on newer FW */ 9789 bnx2x_cl45_write(bp, phy,
9760 if ((val16 & 0x303f) > 0x1009) 9790 MDIO_PMA_DEVAD,
9761 bnx2x_cl45_write(bp, phy, 9791 MDIO_PMA_REG_CTRL, 0x800);
9762 MDIO_PMA_DEVAD,
9763 MDIO_PMA_REG_CTRL, 0x800);
9764 } 9792 }
9765} 9793}
9766 9794
@@ -10191,8 +10219,15 @@ static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
10191 u32 cfg_pin; 10219 u32 cfg_pin;
10192 u8 port; 10220 u8 port;
10193 10221
10194 /* This works with E3 only, no need to check the chip 10222 /*
10195 before determining the port. */ 10223 * In case of no EPIO routed to reset the GPHY, put it
10224 * in low power mode.
10225 */
10226 bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
10227 /*
10228 * This works with E3 only, no need to check the chip
10229 * before determining the port.
10230 */
10196 port = params->port; 10231 port = params->port;
10197 cfg_pin = (REG_RD(bp, params->shmem_base + 10232 cfg_pin = (REG_RD(bp, params->shmem_base +
10198 offsetof(struct shmem_region, 10233 offsetof(struct shmem_region,
@@ -10603,7 +10638,8 @@ static struct bnx2x_phy phy_warpcore = {
10603 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 10638 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
10604 .addr = 0xff, 10639 .addr = 0xff,
10605 .def_md_devad = 0, 10640 .def_md_devad = 0,
10606 .flags = FLAGS_HW_LOCK_REQUIRED, 10641 .flags = (FLAGS_HW_LOCK_REQUIRED |
10642 FLAGS_TX_ERROR_CHECK),
10607 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10643 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10608 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10644 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10609 .mdio_ctrl = 0, 10645 .mdio_ctrl = 0,
@@ -10729,7 +10765,8 @@ static struct bnx2x_phy phy_8706 = {
10729 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, 10765 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
10730 .addr = 0xff, 10766 .addr = 0xff,
10731 .def_md_devad = 0, 10767 .def_md_devad = 0,
10732 .flags = FLAGS_INIT_XGXS_FIRST, 10768 .flags = (FLAGS_INIT_XGXS_FIRST |
10769 FLAGS_TX_ERROR_CHECK),
10733 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10770 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10734 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10771 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10735 .mdio_ctrl = 0, 10772 .mdio_ctrl = 0,
@@ -10760,7 +10797,8 @@ static struct bnx2x_phy phy_8726 = {
10760 .addr = 0xff, 10797 .addr = 0xff,
10761 .def_md_devad = 0, 10798 .def_md_devad = 0,
10762 .flags = (FLAGS_HW_LOCK_REQUIRED | 10799 .flags = (FLAGS_HW_LOCK_REQUIRED |
10763 FLAGS_INIT_XGXS_FIRST), 10800 FLAGS_INIT_XGXS_FIRST |
10801 FLAGS_TX_ERROR_CHECK),
10764 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10802 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10765 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10803 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10766 .mdio_ctrl = 0, 10804 .mdio_ctrl = 0,
@@ -10791,7 +10829,8 @@ static struct bnx2x_phy phy_8727 = {
10791 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 10829 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
10792 .addr = 0xff, 10830 .addr = 0xff,
10793 .def_md_devad = 0, 10831 .def_md_devad = 0,
10794 .flags = FLAGS_FAN_FAILURE_DET_REQ, 10832 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
10833 FLAGS_TX_ERROR_CHECK),
10795 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10834 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10796 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10835 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
10797 .mdio_ctrl = 0, 10836 .mdio_ctrl = 0,
@@ -11112,6 +11151,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11112 */ 11151 */
11113 if (CHIP_REV(bp) == CHIP_REV_Ax) 11152 if (CHIP_REV(bp) == CHIP_REV_Ax)
11114 phy->flags |= FLAGS_MDC_MDIO_WA; 11153 phy->flags |= FLAGS_MDC_MDIO_WA;
11154 else
11155 phy->flags |= FLAGS_MDC_MDIO_WA_B0;
11115 } else { 11156 } else {
11116 switch (switch_cfg) { 11157 switch (switch_cfg) {
11117 case SWITCH_CFG_1G: 11158 case SWITCH_CFG_1G:
@@ -11500,13 +11541,12 @@ void bnx2x_init_xmac_loopback(struct link_params *params,
11500 * Set WC to loopback mode since link is required to provide clock 11541 * Set WC to loopback mode since link is required to provide clock
11501 * to the XMAC in 20G mode 11542 * to the XMAC in 20G mode
11502 */ 11543 */
11503 if (vars->line_speed == SPEED_20000) { 11544 bnx2x_set_aer_mmd(params, &params->phy[0]);
11504 bnx2x_set_aer_mmd(params, &params->phy[0]); 11545 bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
11505 bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0); 11546 params->phy[INT_PHY].config_loopback(
11506 params->phy[INT_PHY].config_loopback(
11507 &params->phy[INT_PHY], 11547 &params->phy[INT_PHY],
11508 params); 11548 params);
11509 } 11549
11510 bnx2x_xmac_enable(params, vars, 1); 11550 bnx2x_xmac_enable(params, vars, 1);
11511 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 11551 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
11512} 11552}
@@ -11684,12 +11724,16 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
11684 bnx2x_set_led(params, vars, LED_MODE_OFF, 0); 11724 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
11685 11725
11686 if (reset_ext_phy) { 11726 if (reset_ext_phy) {
11727 bnx2x_set_mdio_clk(bp, params->chip_id, port);
11687 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 11728 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
11688 phy_index++) { 11729 phy_index++) {
11689 if (params->phy[phy_index].link_reset) 11730 if (params->phy[phy_index].link_reset) {
11731 bnx2x_set_aer_mmd(params,
11732 &params->phy[phy_index]);
11690 params->phy[phy_index].link_reset( 11733 params->phy[phy_index].link_reset(
11691 &params->phy[phy_index], 11734 &params->phy[phy_index],
11692 params); 11735 params);
11736 }
11693 if (params->phy[phy_index].flags & 11737 if (params->phy[phy_index].flags &
11694 FLAGS_REARM_LATCH_SIGNAL) 11738 FLAGS_REARM_LATCH_SIGNAL)
11695 clear_latch_ind = 1; 11739 clear_latch_ind = 1;
@@ -12178,10 +12222,6 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12178 u8 led_mode; 12222 u8 led_mode;
12179 u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; 12223 u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
12180 12224
12181 /*DP(NETIF_MSG_LINK, "CHECK LINK: %x half_open:%x-> lss:%x\n",
12182 vars->link_up,
12183 half_open_conn, lss_status);*/
12184
12185 if ((lss_status ^ half_open_conn) == 0) 12225 if ((lss_status ^ half_open_conn) == 0)
12186 return; 12226 return;
12187 12227
@@ -12194,6 +12234,7 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12194 * b. Update link_vars->link_up 12234 * b. Update link_vars->link_up
12195 */ 12235 */
12196 if (lss_status) { 12236 if (lss_status) {
12237 DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
12197 vars->link_status &= ~LINK_STATUS_LINK_UP; 12238 vars->link_status &= ~LINK_STATUS_LINK_UP;
12198 vars->link_up = 0; 12239 vars->link_up = 0;
12199 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; 12240 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
@@ -12203,6 +12244,7 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12203 */ 12244 */
12204 led_mode = LED_MODE_OFF; 12245 led_mode = LED_MODE_OFF;
12205 } else { 12246 } else {
12247 DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
12206 vars->link_status |= LINK_STATUS_LINK_UP; 12248 vars->link_status |= LINK_STATUS_LINK_UP;
12207 vars->link_up = 1; 12249 vars->link_up = 1;
12208 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 12250 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
@@ -12219,6 +12261,15 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12219 bnx2x_notify_link_changed(bp); 12261 bnx2x_notify_link_changed(bp);
12220} 12262}
12221 12263
12264/******************************************************************************
12265* Description:
12266* This function checks for half opened connection change indication.
12267* When such change occurs, it calls the bnx2x_analyze_link_error
12268* to check if Remote Fault is set or cleared. Reception of remote fault
12269* status message in the MAC indicates that the peer's MAC has detected
12270* a fault, for example, due to break in the TX side of fiber.
12271*
12272******************************************************************************/
12222static void bnx2x_check_half_open_conn(struct link_params *params, 12273static void bnx2x_check_half_open_conn(struct link_params *params,
12223 struct link_vars *vars) 12274 struct link_vars *vars)
12224{ 12275{
@@ -12229,9 +12280,28 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
12229 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) 12280 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
12230 return; 12281 return;
12231 12282
12232 if (!CHIP_IS_E3(bp) && 12283 if (CHIP_IS_E3(bp) &&
12233 (REG_RD(bp, MISC_REG_RESET_REG_2) & 12284 (REG_RD(bp, MISC_REG_RESET_REG_2) &
12234 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))) { 12285 (MISC_REGISTERS_RESET_REG_2_XMAC))) {
12286 /* Check E3 XMAC */
12287 /*
12288 * Note that link speed cannot be queried here, since it may be
12289 * zero while link is down. In case UMAC is active, LSS will
12290 * simply not be set
12291 */
12292 mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
12293
12294 /* Clear stick bits (Requires rising edge) */
12295 REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
12296 REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
12297 XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
12298 XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
12299 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
12300 lss_status = 1;
12301
12302 bnx2x_analyze_link_error(params, vars, lss_status);
12303 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
12304 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
12235 /* Check E1X / E2 BMAC */ 12305 /* Check E1X / E2 BMAC */
12236 u32 lss_status_reg; 12306 u32 lss_status_reg;
12237 u32 wb_data[2]; 12307 u32 wb_data[2];
@@ -12253,14 +12323,20 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
12253void bnx2x_period_func(struct link_params *params, struct link_vars *vars) 12323void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
12254{ 12324{
12255 struct bnx2x *bp = params->bp; 12325 struct bnx2x *bp = params->bp;
12326 u16 phy_idx;
12256 if (!params) { 12327 if (!params) {
12257 DP(NETIF_MSG_LINK, "Ininitliazed params !\n"); 12328 DP(NETIF_MSG_LINK, "Uninitialized params !\n");
12258 return; 12329 return;
12259 } 12330 }
12260 /* DP(NETIF_MSG_LINK, "Periodic called vars->phy_flags 0x%x speed 0x%x 12331
12261 RESET_REG_2 0x%x\n", vars->phy_flags, vars->line_speed, 12332 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
12262 REG_RD(bp, MISC_REG_RESET_REG_2)); */ 12333 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
12263 bnx2x_check_half_open_conn(params, vars); 12334 bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
12335 bnx2x_check_half_open_conn(params, vars);
12336 break;
12337 }
12338 }
12339
12264 if (CHIP_IS_E3(bp)) 12340 if (CHIP_IS_E3(bp))
12265 bnx2x_check_over_curr(params, vars); 12341 bnx2x_check_over_curr(params, vars);
12266} 12342}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 6a7708d5da37..c12db6da213e 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -145,6 +145,8 @@ struct bnx2x_phy {
145#define FLAGS_SFP_NOT_APPROVED (1<<7) 145#define FLAGS_SFP_NOT_APPROVED (1<<7)
146#define FLAGS_MDC_MDIO_WA (1<<8) 146#define FLAGS_MDC_MDIO_WA (1<<8)
147#define FLAGS_DUMMY_READ (1<<9) 147#define FLAGS_DUMMY_READ (1<<9)
148#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
149#define FLAGS_TX_ERROR_CHECK (1<<12)
148 150
149 /* preemphasis values for the rx side */ 151 /* preemphasis values for the rx side */
150 u16 rx_preemphasis[4]; 152 u16 rx_preemphasis[4];
@@ -276,7 +278,6 @@ struct link_vars {
276#define PHY_PHYSICAL_LINK_FLAG (1<<2) 278#define PHY_PHYSICAL_LINK_FLAG (1<<2)
277#define PHY_HALF_OPEN_CONN_FLAG (1<<3) 279#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
278#define PHY_OVER_CURRENT_FLAG (1<<4) 280#define PHY_OVER_CURRENT_FLAG (1<<4)
279#define PHY_TX_ERROR_CHECK_FLAG (1<<5)
280 281
281 u8 mac_type; 282 u8 mac_type;
282#define MAC_TYPE_NONE 0 283#define MAC_TYPE_NONE 0
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 150709111548..f74582a22c68 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -5798,6 +5798,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5798 5798
5799 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); 5799 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
5800 5800
5801 /*
5802 * take the UNDI lock to protect undi_unload flow from accessing
5803 * registers while we're resetting the chip
5804 */
5805 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5806
5801 bnx2x_reset_common(bp); 5807 bnx2x_reset_common(bp);
5802 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5808 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5803 5809
@@ -5808,6 +5814,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
5808 } 5814 }
5809 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 5815 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
5810 5816
5817 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5818
5811 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 5819 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
5812 5820
5813 if (!CHIP_IS_E1x(bp)) { 5821 if (!CHIP_IS_E1x(bp)) {
@@ -10251,10 +10259,17 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10251 /* clean indirect addresses */ 10259 /* clean indirect addresses */
10252 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 10260 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10253 PCICFG_VENDOR_ID_OFFSET); 10261 PCICFG_VENDOR_ID_OFFSET);
10254 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0); 10262 /* Clean the following indirect addresses for all functions since it
10255 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0); 10263 * is not used by the driver.
10256 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); 10264 */
10257 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); 10265 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
10266 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
10267 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
10268 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
10269 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
10270 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
10271 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
10272 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
10258 10273
10259 /* 10274 /*
10260 * Enable internal target-read (in case we are probed after PF FLR). 10275 * Enable internal target-read (in case we are probed after PF FLR).
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 02461fef8751..40266c14e6dc 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -3007,11 +3007,27 @@
3007/* [R 6] Debug only: Number of used entries in the data FIFO */ 3007/* [R 6] Debug only: Number of used entries in the data FIFO */
3008#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c 3008#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
3009/* [R 7] Debug only: Number of used entries in the header FIFO */ 3009/* [R 7] Debug only: Number of used entries in the header FIFO */
3010#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 3010#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
3011#define PXP2_REG_PGL_ADDR_88_F0 0x120534 3011#define PXP2_REG_PGL_ADDR_88_F0 0x120534
3012#define PXP2_REG_PGL_ADDR_8C_F0 0x120538 3012/* [R 32] GRC address for configuration access to PCIE config address 0x88.
3013#define PXP2_REG_PGL_ADDR_90_F0 0x12053c 3013 * any write to this PCIE address will cause a GRC write access to the
3014#define PXP2_REG_PGL_ADDR_94_F0 0x120540 3014 * address that's in t this register */
3015#define PXP2_REG_PGL_ADDR_88_F1 0x120544
3016#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
3017/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
3018 * any write to this PCIE address will cause a GRC write access to the
3019 * address that's in t this register */
3020#define PXP2_REG_PGL_ADDR_8C_F1 0x120548
3021#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
3022/* [R 32] GRC address for configuration access to PCIE config address 0x90.
3023 * any write to this PCIE address will cause a GRC write access to the
3024 * address that's in t this register */
3025#define PXP2_REG_PGL_ADDR_90_F1 0x12054c
3026#define PXP2_REG_PGL_ADDR_94_F0 0x120540
3027/* [R 32] GRC address for configuration access to PCIE config address 0x94.
3028 * any write to this PCIE address will cause a GRC write access to the
3029 * address that's in t this register */
3030#define PXP2_REG_PGL_ADDR_94_F1 0x120550
3015#define PXP2_REG_PGL_CONTROL0 0x120490 3031#define PXP2_REG_PGL_CONTROL0 0x120490
3016#define PXP2_REG_PGL_CONTROL1 0x120514 3032#define PXP2_REG_PGL_CONTROL1 0x120514
3017#define PXP2_REG_PGL_DEBUG 0x120520 3033#define PXP2_REG_PGL_DEBUG 0x120520
@@ -4771,9 +4787,11 @@
4771 The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - 4787 The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
4772 header pointer. */ 4788 header pointer. */
4773#define UCM_REG_XX_TABLE 0xe0300 4789#define UCM_REG_XX_TABLE 0xe0300
4790#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28)
4774#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15) 4791#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15)
4775#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24) 4792#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24)
4776#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5) 4793#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5)
4794#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8)
4777#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4) 4795#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4)
4778#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1) 4796#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1)
4779#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) 4797#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
@@ -5622,8 +5640,9 @@
5622#define EMAC_MDIO_COMM_START_BUSY (1L<<29) 5640#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
5623#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) 5641#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
5624#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) 5642#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
5625#define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16) 5643#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16)
5626#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 5644#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
5645#define EMAC_MDIO_STATUS_10MB (1L<<1)
5627#define EMAC_MODE_25G_MODE (1L<<5) 5646#define EMAC_MODE_25G_MODE (1L<<5)
5628#define EMAC_MODE_HALF_DUPLEX (1L<<1) 5647#define EMAC_MODE_HALF_DUPLEX (1L<<1)
5629#define EMAC_MODE_PORT_GMII (2L<<2) 5648#define EMAC_MODE_PORT_GMII (2L<<2)
@@ -5634,6 +5653,7 @@
5634#define EMAC_REG_EMAC_MAC_MATCH 0x10 5653#define EMAC_REG_EMAC_MAC_MATCH 0x10
5635#define EMAC_REG_EMAC_MDIO_COMM 0xac 5654#define EMAC_REG_EMAC_MDIO_COMM 0xac
5636#define EMAC_REG_EMAC_MDIO_MODE 0xb4 5655#define EMAC_REG_EMAC_MDIO_MODE 0xb4
5656#define EMAC_REG_EMAC_MDIO_STATUS 0xb0
5637#define EMAC_REG_EMAC_MODE 0x0 5657#define EMAC_REG_EMAC_MODE 0x0
5638#define EMAC_REG_EMAC_RX_MODE 0xc8 5658#define EMAC_REG_EMAC_RX_MODE 0xc8
5639#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c 5659#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 38a83acd502e..43f2ea541088 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3419,9 +3419,27 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3419static int bond_open(struct net_device *bond_dev) 3419static int bond_open(struct net_device *bond_dev)
3420{ 3420{
3421 struct bonding *bond = netdev_priv(bond_dev); 3421 struct bonding *bond = netdev_priv(bond_dev);
3422 struct slave *slave;
3423 int i;
3422 3424
3423 bond->kill_timers = 0; 3425 bond->kill_timers = 0;
3424 3426
3427 /* reset slave->backup and slave->inactive */
3428 read_lock(&bond->lock);
3429 if (bond->slave_cnt > 0) {
3430 read_lock(&bond->curr_slave_lock);
3431 bond_for_each_slave(bond, slave, i) {
3432 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3433 && (slave != bond->curr_active_slave)) {
3434 bond_set_slave_inactive_flags(slave);
3435 } else {
3436 bond_set_slave_active_flags(slave);
3437 }
3438 }
3439 read_unlock(&bond->curr_slave_lock);
3440 }
3441 read_unlock(&bond->lock);
3442
3425 INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); 3443 INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
3426 3444
3427 if (bond_is_lb(bond)) { 3445 if (bond_is_lb(bond)) {
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 231385b8e08f..c7f3d4ea1167 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -408,7 +408,7 @@ static void plx_pci_del_card(struct pci_dev *pdev)
408 struct sja1000_priv *priv; 408 struct sja1000_priv *priv;
409 int i = 0; 409 int i = 0;
410 410
411 for (i = 0; i < card->channels; i++) { 411 for (i = 0; i < PLX_PCI_MAX_CHAN; i++) {
412 dev = card->net_dev[i]; 412 dev = card->net_dev[i];
413 if (!dev) 413 if (!dev)
414 continue; 414 continue;
@@ -536,7 +536,6 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
536 if (err) { 536 if (err) {
537 dev_err(&pdev->dev, "Registering device failed " 537 dev_err(&pdev->dev, "Registering device failed "
538 "(err=%d)\n", err); 538 "(err=%d)\n", err);
539 free_sja1000dev(dev);
540 goto failure_cleanup; 539 goto failure_cleanup;
541 } 540 }
542 541
@@ -549,6 +548,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
549 dev_err(&pdev->dev, "Channel #%d not detected\n", 548 dev_err(&pdev->dev, "Channel #%d not detected\n",
550 i + 1); 549 i + 1);
551 free_sja1000dev(dev); 550 free_sja1000dev(dev);
551 card->net_dev[i] = NULL;
552 } 552 }
553 } 553 }
554 554
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f523f1cc5142..4b70b7e8bdeb 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -197,7 +197,7 @@ static void slc_bump(struct slcan *sl)
197 skb->ip_summed = CHECKSUM_UNNECESSARY; 197 skb->ip_summed = CHECKSUM_UNNECESSARY;
198 memcpy(skb_put(skb, sizeof(struct can_frame)), 198 memcpy(skb_put(skb, sizeof(struct can_frame)),
199 &cf, sizeof(struct can_frame)); 199 &cf, sizeof(struct can_frame));
200 netif_rx(skb); 200 netif_rx_ni(skb);
201 201
202 sl->dev->stats.rx_packets++; 202 sl->dev->stats.rx_packets++;
203 sl->dev->stats.rx_bytes += cf.can_dlc; 203 sl->dev->stats.rx_bytes += cf.can_dlc;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f7bbde9eb2cb..a81249246ece 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -503,9 +503,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
503 spin_unlock_irqrestore(&priv->mbx_lock, flags); 503 spin_unlock_irqrestore(&priv->mbx_lock, flags);
504 504
505 /* Prepare mailbox for transmission */ 505 /* Prepare mailbox for transmission */
506 data = cf->can_dlc | (get_tx_head_prio(priv) << 8);
506 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ 507 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
507 data |= HECC_CANMCF_RTR; 508 data |= HECC_CANMCF_RTR;
508 data |= get_tx_head_prio(priv) << 8;
509 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); 509 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
510 510
511 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ 511 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
@@ -923,6 +923,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
923 priv->can.do_get_state = ti_hecc_get_state; 923 priv->can.do_get_state = ti_hecc_get_state;
924 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 924 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
925 925
926 spin_lock_init(&priv->mbx_lock);
926 ndev->irq = irq->start; 927 ndev->irq = irq->start;
927 ndev->flags |= IFF_ECHO; 928 ndev->flags |= IFF_ECHO;
928 platform_set_drvdata(pdev, ndev); 929 platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 646c86bcc545..fdb7a1756409 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2452,14 +2452,13 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
2452 struct net_device *dev = dev_id; 2452 struct net_device *dev = dev_id;
2453 struct cas *cp = netdev_priv(dev); 2453 struct cas *cp = netdev_priv(dev);
2454 unsigned long flags; 2454 unsigned long flags;
2455 int ring; 2455 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2456 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); 2456 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2457 2457
2458 /* check for shared irq */ 2458 /* check for shared irq */
2459 if (status == 0) 2459 if (status == 0)
2460 return IRQ_NONE; 2460 return IRQ_NONE;
2461 2461
2462 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2463 spin_lock_irqsave(&cp->lock, flags); 2462 spin_lock_irqsave(&cp->lock, flags);
2464 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2463 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2465#ifdef USE_NAPI 2464#ifdef USE_NAPI
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c5f0f04219f3..5548d464261a 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -838,6 +838,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
838 838
839 /* Disable all the interrupts */ 839 /* Disable all the interrupts */
840 ew32(IMC, 0xFFFFFFFF); 840 ew32(IMC, 0xFFFFFFFF);
841 E1000_WRITE_FLUSH();
841 msleep(10); 842 msleep(10);
842 843
843 /* Test each interrupt */ 844 /* Test each interrupt */
@@ -856,6 +857,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
856 adapter->test_icr = 0; 857 adapter->test_icr = 0;
857 ew32(IMC, mask); 858 ew32(IMC, mask);
858 ew32(ICS, mask); 859 ew32(ICS, mask);
860 E1000_WRITE_FLUSH();
859 msleep(10); 861 msleep(10);
860 862
861 if (adapter->test_icr & mask) { 863 if (adapter->test_icr & mask) {
@@ -873,6 +875,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
873 adapter->test_icr = 0; 875 adapter->test_icr = 0;
874 ew32(IMS, mask); 876 ew32(IMS, mask);
875 ew32(ICS, mask); 877 ew32(ICS, mask);
878 E1000_WRITE_FLUSH();
876 msleep(10); 879 msleep(10);
877 880
878 if (!(adapter->test_icr & mask)) { 881 if (!(adapter->test_icr & mask)) {
@@ -890,6 +893,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
890 adapter->test_icr = 0; 893 adapter->test_icr = 0;
891 ew32(IMC, ~mask & 0x00007FFF); 894 ew32(IMC, ~mask & 0x00007FFF);
892 ew32(ICS, ~mask & 0x00007FFF); 895 ew32(ICS, ~mask & 0x00007FFF);
896 E1000_WRITE_FLUSH();
893 msleep(10); 897 msleep(10);
894 898
895 if (adapter->test_icr) { 899 if (adapter->test_icr) {
@@ -901,6 +905,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
901 905
902 /* Disable all the interrupts */ 906 /* Disable all the interrupts */
903 ew32(IMC, 0xFFFFFFFF); 907 ew32(IMC, 0xFFFFFFFF);
908 E1000_WRITE_FLUSH();
904 msleep(10); 909 msleep(10);
905 910
906 /* Unhook test interrupt handler */ 911 /* Unhook test interrupt handler */
@@ -1394,6 +1399,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1394 if (unlikely(++k == txdr->count)) k = 0; 1399 if (unlikely(++k == txdr->count)) k = 0;
1395 } 1400 }
1396 ew32(TDT, k); 1401 ew32(TDT, k);
1402 E1000_WRITE_FLUSH();
1397 msleep(200); 1403 msleep(200);
1398 time = jiffies; /* set the start time for the receive */ 1404 time = jiffies; /* set the start time for the receive */
1399 good_cnt = 0; 1405 good_cnt = 0;
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 1698622af434..8545c7aa93eb 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -446,6 +446,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
446 /* Must reset the PHY before resetting the MAC */ 446 /* Must reset the PHY before resetting the MAC */
447 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 447 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
448 ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); 448 ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
449 E1000_WRITE_FLUSH();
449 msleep(5); 450 msleep(5);
450 } 451 }
451 452
@@ -3752,6 +3753,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
3752 /* Clear SK and CS */ 3753 /* Clear SK and CS */
3753 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 3754 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
3754 ew32(EECD, eecd); 3755 ew32(EECD, eecd);
3756 E1000_WRITE_FLUSH();
3755 udelay(1); 3757 udelay(1);
3756 } 3758 }
3757 3759
@@ -3824,6 +3826,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
3824 eecd &= ~E1000_EECD_SK; /* Lower SCK */ 3826 eecd &= ~E1000_EECD_SK; /* Lower SCK */
3825 3827
3826 ew32(EECD, eecd); 3828 ew32(EECD, eecd);
3829 E1000_WRITE_FLUSH();
3827 3830
3828 udelay(hw->eeprom.delay_usec); 3831 udelay(hw->eeprom.delay_usec);
3829 } else if (hw->eeprom.type == e1000_eeprom_microwire) { 3832 } else if (hw->eeprom.type == e1000_eeprom_microwire) {
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 480f2592f8a5..536b3a55c45f 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -2085,7 +2085,8 @@ struct e1000_info e1000_82574_info = {
2085 | FLAG_HAS_AMT 2085 | FLAG_HAS_AMT
2086 | FLAG_HAS_CTRLEXT_ON_LOAD, 2086 | FLAG_HAS_CTRLEXT_ON_LOAD,
2087 .flags2 = FLAG2_CHECK_PHY_HANG 2087 .flags2 = FLAG2_CHECK_PHY_HANG
2088 | FLAG2_DISABLE_ASPM_L0S, 2088 | FLAG2_DISABLE_ASPM_L0S
2089 | FLAG2_NO_DISABLE_RX,
2089 .pba = 32, 2090 .pba = 32,
2090 .max_hw_frame_size = DEFAULT_JUMBO, 2091 .max_hw_frame_size = DEFAULT_JUMBO,
2091 .get_variants = e1000_get_variants_82571, 2092 .get_variants = e1000_get_variants_82571,
@@ -2104,7 +2105,8 @@ struct e1000_info e1000_82583_info = {
2104 | FLAG_HAS_AMT 2105 | FLAG_HAS_AMT
2105 | FLAG_HAS_JUMBO_FRAMES 2106 | FLAG_HAS_JUMBO_FRAMES
2106 | FLAG_HAS_CTRLEXT_ON_LOAD, 2107 | FLAG_HAS_CTRLEXT_ON_LOAD,
2107 .flags2 = FLAG2_DISABLE_ASPM_L0S, 2108 .flags2 = FLAG2_DISABLE_ASPM_L0S
2109 | FLAG2_NO_DISABLE_RX,
2108 .pba = 32, 2110 .pba = 32,
2109 .max_hw_frame_size = DEFAULT_JUMBO, 2111 .max_hw_frame_size = DEFAULT_JUMBO,
2110 .get_variants = e1000_get_variants_82571, 2112 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 638d175792cf..8533ad7f3559 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -155,6 +155,9 @@ struct e1000_info;
155#define HV_M_STATUS_SPEED_1000 0x0200 155#define HV_M_STATUS_SPEED_1000 0x0200
156#define HV_M_STATUS_LINK_UP 0x0040 156#define HV_M_STATUS_LINK_UP 0x0040
157 157
158#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
159#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
160
158/* Time to wait before putting the device into D3 if there's no link (in ms). */ 161/* Time to wait before putting the device into D3 if there's no link (in ms). */
159#define LINK_TIMEOUT 100 162#define LINK_TIMEOUT 100
160 163
@@ -453,6 +456,8 @@ struct e1000_info {
453#define FLAG2_DISABLE_ASPM_L0S (1 << 7) 456#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
454#define FLAG2_DISABLE_AIM (1 << 8) 457#define FLAG2_DISABLE_AIM (1 << 8)
455#define FLAG2_CHECK_PHY_HANG (1 << 9) 458#define FLAG2_CHECK_PHY_HANG (1 << 9)
459#define FLAG2_NO_DISABLE_RX (1 << 10)
460#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
456 461
457#define E1000_RX_DESC_PS(R, i) \ 462#define E1000_RX_DESC_PS(R, i) \
458 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 463 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index c0ecb2d9fdb7..e4f42257c24c 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1313,6 +1313,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1313 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 1313 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
1314 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; 1314 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
1315 ew32(KMRNCTRLSTA, kmrnctrlsta); 1315 ew32(KMRNCTRLSTA, kmrnctrlsta);
1316 e1e_flush();
1316 1317
1317 udelay(2); 1318 udelay(2);
1318 1319
@@ -1347,6 +1348,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1347 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 1348 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
1348 E1000_KMRNCTRLSTA_OFFSET) | data; 1349 E1000_KMRNCTRLSTA_OFFSET) | data;
1349 ew32(KMRNCTRLSTA, kmrnctrlsta); 1350 ew32(KMRNCTRLSTA, kmrnctrlsta);
1351 e1e_flush();
1350 1352
1351 udelay(2); 1353 udelay(2);
1352 1354
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index cb1a3623253e..6a0526a59a8a 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -28,8 +28,8 @@
28 28
29/* ethtool support for e1000 */ 29/* ethtool support for e1000 */
30 30
31#include <linux/interrupt.h>
32#include <linux/netdevice.h> 31#include <linux/netdevice.h>
32#include <linux/interrupt.h>
33#include <linux/ethtool.h> 33#include <linux/ethtool.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
@@ -964,6 +964,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
964 964
965 /* Disable all the interrupts */ 965 /* Disable all the interrupts */
966 ew32(IMC, 0xFFFFFFFF); 966 ew32(IMC, 0xFFFFFFFF);
967 e1e_flush();
967 usleep_range(10000, 20000); 968 usleep_range(10000, 20000);
968 969
969 /* Test each interrupt */ 970 /* Test each interrupt */
@@ -996,6 +997,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
996 adapter->test_icr = 0; 997 adapter->test_icr = 0;
997 ew32(IMC, mask); 998 ew32(IMC, mask);
998 ew32(ICS, mask); 999 ew32(ICS, mask);
1000 e1e_flush();
999 usleep_range(10000, 20000); 1001 usleep_range(10000, 20000);
1000 1002
1001 if (adapter->test_icr & mask) { 1003 if (adapter->test_icr & mask) {
@@ -1014,6 +1016,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
1014 adapter->test_icr = 0; 1016 adapter->test_icr = 0;
1015 ew32(IMS, mask); 1017 ew32(IMS, mask);
1016 ew32(ICS, mask); 1018 ew32(ICS, mask);
1019 e1e_flush();
1017 usleep_range(10000, 20000); 1020 usleep_range(10000, 20000);
1018 1021
1019 if (!(adapter->test_icr & mask)) { 1022 if (!(adapter->test_icr & mask)) {
@@ -1032,6 +1035,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
1032 adapter->test_icr = 0; 1035 adapter->test_icr = 0;
1033 ew32(IMC, ~mask & 0x00007FFF); 1036 ew32(IMC, ~mask & 0x00007FFF);
1034 ew32(ICS, ~mask & 0x00007FFF); 1037 ew32(ICS, ~mask & 0x00007FFF);
1038 e1e_flush();
1035 usleep_range(10000, 20000); 1039 usleep_range(10000, 20000);
1036 1040
1037 if (adapter->test_icr) { 1041 if (adapter->test_icr) {
@@ -1043,6 +1047,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
1043 1047
1044 /* Disable all the interrupts */ 1048 /* Disable all the interrupts */
1045 ew32(IMC, 0xFFFFFFFF); 1049 ew32(IMC, 0xFFFFFFFF);
1050 e1e_flush();
1046 usleep_range(10000, 20000); 1051 usleep_range(10000, 20000);
1047 1052
1048 /* Unhook test interrupt handler */ 1053 /* Unhook test interrupt handler */
@@ -1201,7 +1206,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1201 rx_ring->next_to_clean = 0; 1206 rx_ring->next_to_clean = 0;
1202 1207
1203 rctl = er32(RCTL); 1208 rctl = er32(RCTL);
1204 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1209 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
1210 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1205 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); 1211 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
1206 ew32(RDBAH, ((u64) rx_ring->dma >> 32)); 1212 ew32(RDBAH, ((u64) rx_ring->dma >> 32));
1207 ew32(RDLEN, rx_ring->size); 1213 ew32(RDLEN, rx_ring->size);
@@ -1276,6 +1282,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1276 E1000_CTRL_FD); /* Force Duplex to FULL */ 1282 E1000_CTRL_FD); /* Force Duplex to FULL */
1277 1283
1278 ew32(CTRL, ctrl_reg); 1284 ew32(CTRL, ctrl_reg);
1285 e1e_flush();
1279 udelay(500); 1286 udelay(500);
1280 1287
1281 return 0; 1288 return 0;
@@ -1418,6 +1425,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1418 */ 1425 */
1419#define E1000_SERDES_LB_ON 0x410 1426#define E1000_SERDES_LB_ON 0x410
1420 ew32(SCTL, E1000_SERDES_LB_ON); 1427 ew32(SCTL, E1000_SERDES_LB_ON);
1428 e1e_flush();
1421 usleep_range(10000, 20000); 1429 usleep_range(10000, 20000);
1422 1430
1423 return 0; 1431 return 0;
@@ -1513,6 +1521,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1513 hw->phy.media_type == e1000_media_type_internal_serdes) { 1521 hw->phy.media_type == e1000_media_type_internal_serdes) {
1514#define E1000_SERDES_LB_OFF 0x400 1522#define E1000_SERDES_LB_OFF 0x400
1515 ew32(SCTL, E1000_SERDES_LB_OFF); 1523 ew32(SCTL, E1000_SERDES_LB_OFF);
1524 e1e_flush();
1516 usleep_range(10000, 20000); 1525 usleep_range(10000, 20000);
1517 break; 1526 break;
1518 } 1527 }
@@ -1592,6 +1601,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1592 k = 0; 1601 k = 0;
1593 } 1602 }
1594 ew32(TDT, k); 1603 ew32(TDT, k);
1604 e1e_flush();
1595 msleep(200); 1605 msleep(200);
1596 time = jiffies; /* set the start time for the receive */ 1606 time = jiffies; /* set the start time for the receive */
1597 good_cnt = 0; 1607 good_cnt = 0;
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index c1752124f3cd..54add27c8f76 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -137,8 +137,9 @@
137#define HV_PM_CTRL PHY_REG(770, 17) 137#define HV_PM_CTRL PHY_REG(770, 17)
138 138
139/* PHY Low Power Idle Control */ 139/* PHY Low Power Idle Control */
140#define I82579_LPI_CTRL PHY_REG(772, 20) 140#define I82579_LPI_CTRL PHY_REG(772, 20)
141#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 141#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
142#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
142 143
143/* EMI Registers */ 144/* EMI Registers */
144#define I82579_EMI_ADDR 0x10 145#define I82579_EMI_ADDR 0x10
@@ -163,6 +164,11 @@
163#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) 164#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
164#define HV_KMRN_MDIO_SLOW 0x0400 165#define HV_KMRN_MDIO_SLOW 0x0400
165 166
167/* KMRN FIFO Control and Status */
168#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
169#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
170#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
171
166/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 172/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
167/* Offset 04h HSFSTS */ 173/* Offset 04h HSFSTS */
168union ich8_hws_flash_status { 174union ich8_hws_flash_status {
@@ -283,6 +289,7 @@ static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
283 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 289 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
284 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 290 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
285 ew32(CTRL, ctrl); 291 ew32(CTRL, ctrl);
292 e1e_flush();
286 udelay(10); 293 udelay(10);
287 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 294 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
288 ew32(CTRL, ctrl); 295 ew32(CTRL, ctrl);
@@ -656,6 +663,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
656 struct e1000_mac_info *mac = &hw->mac; 663 struct e1000_mac_info *mac = &hw->mac;
657 s32 ret_val; 664 s32 ret_val;
658 bool link; 665 bool link;
666 u16 phy_reg;
659 667
660 /* 668 /*
661 * We only want to go out to the PHY registers to see if Auto-Neg 669 * We only want to go out to the PHY registers to see if Auto-Neg
@@ -688,16 +696,35 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
688 696
689 mac->get_link_status = false; 697 mac->get_link_status = false;
690 698
691 if (hw->phy.type == e1000_phy_82578) { 699 switch (hw->mac.type) {
692 ret_val = e1000_link_stall_workaround_hv(hw); 700 case e1000_pch2lan:
693 if (ret_val)
694 goto out;
695 }
696
697 if (hw->mac.type == e1000_pch2lan) {
698 ret_val = e1000_k1_workaround_lv(hw); 701 ret_val = e1000_k1_workaround_lv(hw);
699 if (ret_val) 702 if (ret_val)
700 goto out; 703 goto out;
704 /* fall-thru */
705 case e1000_pchlan:
706 if (hw->phy.type == e1000_phy_82578) {
707 ret_val = e1000_link_stall_workaround_hv(hw);
708 if (ret_val)
709 goto out;
710 }
711
712 /*
713 * Workaround for PCHx parts in half-duplex:
714 * Set the number of preambles removed from the packet
715 * when it is passed from the PHY to the MAC to prevent
716 * the MAC from misinterpreting the packet type.
717 */
718 e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
719 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
720
721 if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
722 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
723
724 e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
725 break;
726 default:
727 break;
701 } 728 }
702 729
703 /* 730 /*
@@ -787,6 +814,11 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
787 (adapter->hw.phy.type == e1000_phy_igp_3)) 814 (adapter->hw.phy.type == e1000_phy_igp_3))
788 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; 815 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
789 816
817 /* Enable workaround for 82579 w/ ME enabled */
818 if ((adapter->hw.mac.type == e1000_pch2lan) &&
819 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
820 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
821
790 /* Disable EEE by default until IEEE802.3az spec is finalized */ 822 /* Disable EEE by default until IEEE802.3az spec is finalized */
791 if (adapter->flags2 & FLAG2_HAS_EEE) 823 if (adapter->flags2 & FLAG2_HAS_EEE)
792 adapter->hw.dev_spec.ich8lan.eee_disable = true; 824 adapter->hw.dev_spec.ich8lan.eee_disable = true;
@@ -1230,9 +1262,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1230 ew32(CTRL, reg); 1262 ew32(CTRL, reg);
1231 1263
1232 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); 1264 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1265 e1e_flush();
1233 udelay(20); 1266 udelay(20);
1234 ew32(CTRL, ctrl_reg); 1267 ew32(CTRL, ctrl_reg);
1235 ew32(CTRL_EXT, ctrl_ext); 1268 ew32(CTRL_EXT, ctrl_ext);
1269 e1e_flush();
1236 udelay(20); 1270 udelay(20);
1237 1271
1238out: 1272out:
@@ -1352,7 +1386,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1352 return ret_val; 1386 return ret_val;
1353 1387
1354 /* Preamble tuning for SSC */ 1388 /* Preamble tuning for SSC */
1355 ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204); 1389 ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
1356 if (ret_val) 1390 if (ret_val)
1357 return ret_val; 1391 return ret_val;
1358 } 1392 }
@@ -1642,6 +1676,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1642 s32 ret_val = 0; 1676 s32 ret_val = 0;
1643 u16 status_reg = 0; 1677 u16 status_reg = 0;
1644 u32 mac_reg; 1678 u32 mac_reg;
1679 u16 phy_reg;
1645 1680
1646 if (hw->mac.type != e1000_pch2lan) 1681 if (hw->mac.type != e1000_pch2lan)
1647 goto out; 1682 goto out;
@@ -1656,12 +1691,19 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1656 mac_reg = er32(FEXTNVM4); 1691 mac_reg = er32(FEXTNVM4);
1657 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 1692 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1658 1693
1659 if (status_reg & HV_M_STATUS_SPEED_1000) 1694 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
1695 if (ret_val)
1696 goto out;
1697
1698 if (status_reg & HV_M_STATUS_SPEED_1000) {
1660 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 1699 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1661 else 1700 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1701 } else {
1662 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 1702 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1663 1703 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1704 }
1664 ew32(FEXTNVM4, mac_reg); 1705 ew32(FEXTNVM4, mac_reg);
1706 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
1665 } 1707 }
1666 1708
1667out: 1709out:
@@ -2134,8 +2176,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2134 2176
2135 ret_val = 0; 2177 ret_val = 0;
2136 for (i = 0; i < words; i++) { 2178 for (i = 0; i < words; i++) {
2137 if ((dev_spec->shadow_ram) && 2179 if (dev_spec->shadow_ram[offset+i].modified) {
2138 (dev_spec->shadow_ram[offset+i].modified)) {
2139 data[i] = dev_spec->shadow_ram[offset+i].value; 2180 data[i] = dev_spec->shadow_ram[offset+i].value;
2140 } else { 2181 } else {
2141 ret_val = e1000_read_flash_word_ich8lan(hw, 2182 ret_val = e1000_read_flash_word_ich8lan(hw,
@@ -3090,6 +3131,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3090 ret_val = e1000_acquire_swflag_ich8lan(hw); 3131 ret_val = e1000_acquire_swflag_ich8lan(hw);
3091 e_dbg("Issuing a global reset to ich8lan\n"); 3132 e_dbg("Issuing a global reset to ich8lan\n");
3092 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 3133 ew32(CTRL, (ctrl | E1000_CTRL_RST));
3134 /* cannot issue a flush here because it hangs the hardware */
3093 msleep(20); 3135 msleep(20);
3094 3136
3095 if (!ret_val) 3137 if (!ret_val)
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 65580b405942..0893ab107adf 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -190,7 +190,8 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
190 /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ 190 /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
191 if (!((nvm_data & NVM_COMPAT_LOM) || 191 if (!((nvm_data & NVM_COMPAT_LOM) ||
192 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || 192 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
193 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) 193 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
194 (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
194 goto out; 195 goto out;
195 196
196 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 197 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
@@ -200,10 +201,10 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
200 goto out; 201 goto out;
201 } 202 }
202 203
203 if (nvm_alt_mac_addr_offset == 0xFFFF) { 204 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
205 (nvm_alt_mac_addr_offset == 0x0000))
204 /* There is no Alternate MAC Address */ 206 /* There is no Alternate MAC Address */
205 goto out; 207 goto out;
206 }
207 208
208 if (hw->bus.func == E1000_FUNC_1) 209 if (hw->bus.func == E1000_FUNC_1)
209 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; 210 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
@@ -1986,6 +1987,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1986 /* Clear SK and CS */ 1987 /* Clear SK and CS */
1987 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 1988 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1988 ew32(EECD, eecd); 1989 ew32(EECD, eecd);
1990 e1e_flush();
1989 udelay(1); 1991 udelay(1);
1990 1992
1991 /* 1993 /*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 4353ad56cf16..2198e615f241 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -31,12 +31,12 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/interrupt.h>
35#include <linux/pci.h> 34#include <linux/pci.h>
36#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
37#include <linux/pagemap.h> 36#include <linux/pagemap.h>
38#include <linux/delay.h> 37#include <linux/delay.h>
39#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/interrupt.h>
40#include <linux/tcp.h> 40#include <linux/tcp.h>
41#include <linux/ipv6.h> 41#include <linux/ipv6.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "1.3.16" DRV_EXTRAVERSION 59#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -519,6 +519,63 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
519} 519}
520 520
521/** 521/**
522 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
523 * @hw: pointer to the HW structure
524 * @tail: address of tail descriptor register
525 * @i: value to write to tail descriptor register
526 *
527 * When updating the tail register, the ME could be accessing Host CSR
528 * registers at the same time. Normally, this is handled in h/w by an
529 * arbiter but on some parts there is a bug that acknowledges Host accesses
530 * later than it should which could result in the descriptor register to
531 * have an incorrect value. Workaround this by checking the FWSM register
532 * which has bit 24 set while ME is accessing Host CSR registers, wait
533 * if it is set and try again a number of times.
534 **/
535static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
536 unsigned int i)
537{
538 unsigned int j = 0;
539
540 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
541 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
542 udelay(50);
543
544 writel(i, tail);
545
546 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
547 return E1000_ERR_SWFW_SYNC;
548
549 return 0;
550}
551
552static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
553{
554 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
555 struct e1000_hw *hw = &adapter->hw;
556
557 if (e1000e_update_tail_wa(hw, tail, i)) {
558 u32 rctl = er32(RCTL);
559 ew32(RCTL, rctl & ~E1000_RCTL_EN);
560 e_err("ME firmware caused invalid RDT - resetting\n");
561 schedule_work(&adapter->reset_task);
562 }
563}
564
565static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
566{
567 u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
568 struct e1000_hw *hw = &adapter->hw;
569
570 if (e1000e_update_tail_wa(hw, tail, i)) {
571 u32 tctl = er32(TCTL);
572 ew32(TCTL, tctl & ~E1000_TCTL_EN);
573 e_err("ME firmware caused invalid TDT - resetting\n");
574 schedule_work(&adapter->reset_task);
575 }
576}
577
578/**
522 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 579 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
523 * @adapter: address of board private structure 580 * @adapter: address of board private structure
524 **/ 581 **/
@@ -573,7 +630,10 @@ map_skb:
573 * such as IA-64). 630 * such as IA-64).
574 */ 631 */
575 wmb(); 632 wmb();
576 writel(i, adapter->hw.hw_addr + rx_ring->tail); 633 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
634 e1000e_update_rdt_wa(adapter, i);
635 else
636 writel(i, adapter->hw.hw_addr + rx_ring->tail);
577 } 637 }
578 i++; 638 i++;
579 if (i == rx_ring->count) 639 if (i == rx_ring->count)
@@ -673,7 +733,11 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
673 * such as IA-64). 733 * such as IA-64).
674 */ 734 */
675 wmb(); 735 wmb();
676 writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); 736 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
737 e1000e_update_rdt_wa(adapter, i << 1);
738 else
739 writel(i << 1,
740 adapter->hw.hw_addr + rx_ring->tail);
677 } 741 }
678 742
679 i++; 743 i++;
@@ -756,7 +820,10 @@ check_page:
756 * applicable for weak-ordered memory model archs, 820 * applicable for weak-ordered memory model archs,
757 * such as IA-64). */ 821 * such as IA-64). */
758 wmb(); 822 wmb();
759 writel(i, adapter->hw.hw_addr + rx_ring->tail); 823 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
824 e1000e_update_rdt_wa(adapter, i);
825 else
826 writel(i, adapter->hw.hw_addr + rx_ring->tail);
760 } 827 }
761} 828}
762 829
@@ -2915,7 +2982,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2915 2982
2916 /* disable receives while setting up the descriptors */ 2983 /* disable receives while setting up the descriptors */
2917 rctl = er32(RCTL); 2984 rctl = er32(RCTL);
2918 ew32(RCTL, rctl & ~E1000_RCTL_EN); 2985 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
2986 ew32(RCTL, rctl & ~E1000_RCTL_EN);
2919 e1e_flush(); 2987 e1e_flush();
2920 usleep_range(10000, 20000); 2988 usleep_range(10000, 20000);
2921 2989
@@ -3394,7 +3462,8 @@ void e1000e_down(struct e1000_adapter *adapter)
3394 3462
3395 /* disable receives in the hardware */ 3463 /* disable receives in the hardware */
3396 rctl = er32(RCTL); 3464 rctl = er32(RCTL);
3397 ew32(RCTL, rctl & ~E1000_RCTL_EN); 3465 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3466 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3398 /* flush and sleep below */ 3467 /* flush and sleep below */
3399 3468
3400 netif_stop_queue(netdev); 3469 netif_stop_queue(netdev);
@@ -3403,6 +3472,7 @@ void e1000e_down(struct e1000_adapter *adapter)
3403 tctl = er32(TCTL); 3472 tctl = er32(TCTL);
3404 tctl &= ~E1000_TCTL_EN; 3473 tctl &= ~E1000_TCTL_EN;
3405 ew32(TCTL, tctl); 3474 ew32(TCTL, tctl);
3475
3406 /* flush both disables and wait for them to finish */ 3476 /* flush both disables and wait for them to finish */
3407 e1e_flush(); 3477 e1e_flush();
3408 usleep_range(10000, 20000); 3478 usleep_range(10000, 20000);
@@ -4686,7 +4756,12 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
4686 wmb(); 4756 wmb();
4687 4757
4688 tx_ring->next_to_use = i; 4758 tx_ring->next_to_use = i;
4689 writel(i, adapter->hw.hw_addr + tx_ring->tail); 4759
4760 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
4761 e1000e_update_tdt_wa(adapter, i);
4762 else
4763 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4764
4690 /* 4765 /*
4691 * we need this if more than one processor can write to our tail 4766 * we need this if more than one processor can write to our tail
4692 * at a time, it synchronizes IO on IA64/Altix systems 4767 * at a time, it synchronizes IO on IA64/Altix systems
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 2a6ee13285b1..8666476cb9be 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -537,6 +537,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
537 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 537 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
538 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; 538 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
539 ew32(KMRNCTRLSTA, kmrnctrlsta); 539 ew32(KMRNCTRLSTA, kmrnctrlsta);
540 e1e_flush();
540 541
541 udelay(2); 542 udelay(2);
542 543
@@ -609,6 +610,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
609 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 610 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
610 E1000_KMRNCTRLSTA_OFFSET) | data; 611 E1000_KMRNCTRLSTA_OFFSET) | data;
611 ew32(KMRNCTRLSTA, kmrnctrlsta); 612 ew32(KMRNCTRLSTA, kmrnctrlsta);
613 e1e_flush();
612 614
613 udelay(2); 615 udelay(2);
614 616
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index e55df308a3af..6d5fbd4d4256 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5615,7 +5615,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5615 goto out_error; 5615 goto out_error;
5616 } 5616 }
5617 5617
5618 nv_vlan_mode(dev, dev->features); 5618 if (id->driver_data & DEV_HAS_VLAN)
5619 nv_vlan_mode(dev, dev->features);
5619 5620
5620 netif_carrier_off(dev); 5621 netif_carrier_off(dev);
5621 5622
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 2659daad783d..31d5c574e5a9 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2710,8 +2710,13 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2710 /* Tell the skb what kind of packet this is */ 2710 /* Tell the skb what kind of packet this is */
2711 skb->protocol = eth_type_trans(skb, dev); 2711 skb->protocol = eth_type_trans(skb, dev);
2712 2712
2713 /* Set vlan tag */ 2713 /*
2714 if (fcb->flags & RXFCB_VLN) 2714 * There's need to check for NETIF_F_HW_VLAN_RX here.
2715 * Even if vlan rx accel is disabled, on some chips
2716 * RXFCB_VLN is pseudo randomly set.
2717 */
2718 if (dev->features & NETIF_F_HW_VLAN_RX &&
2719 fcb->flags & RXFCB_VLN)
2715 __vlan_hwaccel_put_tag(skb, fcb->vlctl); 2720 __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2716 2721
2717 /* Send the packet up the stack */ 2722 /* Send the packet up the stack */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6e350692d118..25a8c2adb001 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -686,10 +686,21 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
686{ 686{
687 unsigned int last_rule_idx = priv->cur_filer_idx; 687 unsigned int last_rule_idx = priv->cur_filer_idx;
688 unsigned int cmp_rqfpr; 688 unsigned int cmp_rqfpr;
689 unsigned int local_rqfpr[MAX_FILER_IDX + 1]; 689 unsigned int *local_rqfpr;
690 unsigned int local_rqfcr[MAX_FILER_IDX + 1]; 690 unsigned int *local_rqfcr;
691 int i = 0x0, k = 0x0; 691 int i = 0x0, k = 0x0;
692 int j = MAX_FILER_IDX, l = 0x0; 692 int j = MAX_FILER_IDX, l = 0x0;
693 int ret = 1;
694
695 local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
696 GFP_KERNEL);
697 local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
698 GFP_KERNEL);
699 if (!local_rqfpr || !local_rqfcr) {
700 pr_err("Out of memory\n");
701 ret = 0;
702 goto err;
703 }
693 704
694 switch (class) { 705 switch (class) {
695 case TCP_V4_FLOW: 706 case TCP_V4_FLOW:
@@ -706,7 +717,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
706 break; 717 break;
707 default: 718 default:
708 pr_err("Right now this class is not supported\n"); 719 pr_err("Right now this class is not supported\n");
709 return 0; 720 ret = 0;
721 goto err;
710 } 722 }
711 723
712 for (i = 0; i < MAX_FILER_IDX + 1; i++) { 724 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
@@ -721,7 +733,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
721 733
722 if (i == MAX_FILER_IDX + 1) { 734 if (i == MAX_FILER_IDX + 1) {
723 pr_err("No parse rule found, can't create hash rules\n"); 735 pr_err("No parse rule found, can't create hash rules\n");
724 return 0; 736 ret = 0;
737 goto err;
725 } 738 }
726 739
727 /* If a match was found, then it begins the starting of a cluster rule 740 /* If a match was found, then it begins the starting of a cluster rule
@@ -765,7 +778,10 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
765 priv->cur_filer_idx = priv->cur_filer_idx - 1; 778 priv->cur_filer_idx = priv->cur_filer_idx - 1;
766 } 779 }
767 780
768 return 1; 781err:
782 kfree(local_rqfcr);
783 kfree(local_rqfpr);
784 return ret;
769} 785}
770 786
771static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) 787static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c
index 1c97861596f0..f67b8aebc89c 100644
--- a/drivers/net/gianfar_ptp.c
+++ b/drivers/net/gianfar_ptp.c
@@ -193,14 +193,9 @@ static void set_alarm(struct etsects *etsects)
193/* Caller must hold etsects->lock. */ 193/* Caller must hold etsects->lock. */
194static void set_fipers(struct etsects *etsects) 194static void set_fipers(struct etsects *etsects)
195{ 195{
196 u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl); 196 set_alarm(etsects);
197
198 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE));
199 gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
200 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); 197 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
201 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); 198 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
202 set_alarm(etsects);
203 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE);
204} 199}
205 200
206/* 201/*
@@ -511,7 +506,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
511 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); 506 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
512 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); 507 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
513 set_alarm(etsects); 508 set_alarm(etsects);
514 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE); 509 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD);
515 510
516 spin_unlock_irqrestore(&etsects->lock, flags); 511 spin_unlock_irqrestore(&etsects->lock, flags);
517 512
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index ba99af05bf62..3e6679269400 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -395,7 +395,7 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
395} 395}
396 396
397/* recycle the current buffer on the rx queue */ 397/* recycle the current buffer on the rx queue */
398static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) 398static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
399{ 399{
400 u32 q_index = adapter->rx_queue.index; 400 u32 q_index = adapter->rx_queue.index;
401 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; 401 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
@@ -403,6 +403,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
403 unsigned int index = correlator & 0xffffffffUL; 403 unsigned int index = correlator & 0xffffffffUL;
404 union ibmveth_buf_desc desc; 404 union ibmveth_buf_desc desc;
405 unsigned long lpar_rc; 405 unsigned long lpar_rc;
406 int ret = 1;
406 407
407 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); 408 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
408 BUG_ON(index >= adapter->rx_buff_pool[pool].size); 409 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
@@ -410,7 +411,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
410 if (!adapter->rx_buff_pool[pool].active) { 411 if (!adapter->rx_buff_pool[pool].active) {
411 ibmveth_rxq_harvest_buffer(adapter); 412 ibmveth_rxq_harvest_buffer(adapter);
412 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); 413 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
413 return; 414 goto out;
414 } 415 }
415 416
416 desc.fields.flags_len = IBMVETH_BUF_VALID | 417 desc.fields.flags_len = IBMVETH_BUF_VALID |
@@ -423,12 +424,16 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
423 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " 424 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
424 "during recycle rc=%ld", lpar_rc); 425 "during recycle rc=%ld", lpar_rc);
425 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 426 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
427 ret = 0;
426 } 428 }
427 429
428 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 430 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
429 adapter->rx_queue.index = 0; 431 adapter->rx_queue.index = 0;
430 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 432 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
431 } 433 }
434
435out:
436 return ret;
432} 437}
433 438
434static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) 439static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
@@ -1084,8 +1089,9 @@ restart_poll:
1084 if (rx_flush) 1089 if (rx_flush)
1085 ibmveth_flush_buffer(skb->data, 1090 ibmveth_flush_buffer(skb->data,
1086 length + offset); 1091 length + offset);
1092 if (!ibmveth_rxq_recycle_buffer(adapter))
1093 kfree_skb(skb);
1087 skb = new_skb; 1094 skb = new_skb;
1088 ibmveth_rxq_recycle_buffer(adapter);
1089 } else { 1095 } else {
1090 ibmveth_rxq_harvest_buffer(adapter); 1096 ibmveth_rxq_harvest_buffer(adapter);
1091 skb_reserve(skb, offset); 1097 skb_reserve(skb, offset);
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index 7dcd65cede56..40407124e722 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -285,6 +285,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
285 /* Clear SK and CS */ 285 /* Clear SK and CS */
286 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 286 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
287 wr32(E1000_EECD, eecd); 287 wr32(E1000_EECD, eecd);
288 wrfl();
288 udelay(1); 289 udelay(1);
289 timeout = NVM_MAX_RETRY_SPI; 290 timeout = NVM_MAX_RETRY_SPI;
290 291
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index ff244ce803ce..414b0225be89 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1225,6 +1225,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1225 1225
1226 /* Disable all the interrupts */ 1226 /* Disable all the interrupts */
1227 wr32(E1000_IMC, ~0); 1227 wr32(E1000_IMC, ~0);
1228 wrfl();
1228 msleep(10); 1229 msleep(10);
1229 1230
1230 /* Define all writable bits for ICS */ 1231 /* Define all writable bits for ICS */
@@ -1268,6 +1269,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1268 1269
1269 wr32(E1000_IMC, mask); 1270 wr32(E1000_IMC, mask);
1270 wr32(E1000_ICS, mask); 1271 wr32(E1000_ICS, mask);
1272 wrfl();
1271 msleep(10); 1273 msleep(10);
1272 1274
1273 if (adapter->test_icr & mask) { 1275 if (adapter->test_icr & mask) {
@@ -1289,6 +1291,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1289 1291
1290 wr32(E1000_IMS, mask); 1292 wr32(E1000_IMS, mask);
1291 wr32(E1000_ICS, mask); 1293 wr32(E1000_ICS, mask);
1294 wrfl();
1292 msleep(10); 1295 msleep(10);
1293 1296
1294 if (!(adapter->test_icr & mask)) { 1297 if (!(adapter->test_icr & mask)) {
@@ -1310,6 +1313,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1310 1313
1311 wr32(E1000_IMC, ~mask); 1314 wr32(E1000_IMC, ~mask);
1312 wr32(E1000_ICS, ~mask); 1315 wr32(E1000_ICS, ~mask);
1316 wrfl();
1313 msleep(10); 1317 msleep(10);
1314 1318
1315 if (adapter->test_icr & mask) { 1319 if (adapter->test_icr & mask) {
@@ -1321,6 +1325,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1321 1325
1322 /* Disable all the interrupts */ 1326 /* Disable all the interrupts */
1323 wr32(E1000_IMC, ~0); 1327 wr32(E1000_IMC, ~0);
1328 wrfl();
1324 msleep(10); 1329 msleep(10);
1325 1330
1326 /* Unhook test interrupt handler */ 1331 /* Unhook test interrupt handler */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index dc599059512a..40d4c405fd7e 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1052,6 +1052,7 @@ msi_only:
1052 kfree(adapter->vf_data); 1052 kfree(adapter->vf_data);
1053 adapter->vf_data = NULL; 1053 adapter->vf_data = NULL;
1054 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); 1054 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1055 wrfl();
1055 msleep(100); 1056 msleep(100);
1056 dev_info(&adapter->pdev->dev, "IOV Disabled\n"); 1057 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1057 } 1058 }
@@ -2022,7 +2023,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2022 2023
2023 if (hw->bus.func == 0) 2024 if (hw->bus.func == 0)
2024 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 2025 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
2025 else if (hw->mac.type == e1000_82580) 2026 else if (hw->mac.type >= e1000_82580)
2026 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2027 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2027 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2028 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2028 &eeprom_data); 2029 &eeprom_data);
@@ -2198,6 +2199,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2198 kfree(adapter->vf_data); 2199 kfree(adapter->vf_data);
2199 adapter->vf_data = NULL; 2200 adapter->vf_data = NULL;
2200 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); 2201 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
2202 wrfl();
2201 msleep(100); 2203 msleep(100);
2202 dev_info(&pdev->dev, "IOV Disabled\n"); 2204 dev_info(&pdev->dev, "IOV Disabled\n");
2203 } 2205 }
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1330c8e932da..40ed066e3ef4 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1226,6 +1226,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1226 /* disable transmits */ 1226 /* disable transmits */
1227 txdctl = er32(TXDCTL(0)); 1227 txdctl = er32(TXDCTL(0));
1228 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); 1228 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1229 e1e_flush();
1229 msleep(10); 1230 msleep(10);
1230 1231
1231 /* Setup the HW Tx Head and Tail descriptor pointers */ 1232 /* Setup the HW Tx Head and Tail descriptor pointers */
@@ -1306,6 +1307,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1306 /* disable receives */ 1307 /* disable receives */
1307 rxdctl = er32(RXDCTL(0)); 1308 rxdctl = er32(RXDCTL(0));
1308 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1309 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1310 e1e_flush();
1309 msleep(10); 1311 msleep(10);
1310 1312
1311 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1313 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4488bd581eca..82660672dcd9 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -22,6 +22,8 @@
22 * - DMA transfer support 22 * - DMA transfer support
23 * - FIFO mode support 23 * - FIFO mode support
24 */ 24 */
25#include <linux/io.h>
26#include <linux/interrupt.h>
25#include <linux/module.h> 27#include <linux/module.h>
26#include <linux/platform_device.h> 28#include <linux/platform_device.h>
27#include <linux/clk.h> 29#include <linux/clk.h>
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 52a7c86af663..ed7d7d62bf68 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -12,6 +12,8 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/io.h>
16#include <linux/interrupt.h>
15#include <linux/module.h> 17#include <linux/module.h>
16#include <linux/platform_device.h> 18#include <linux/platform_device.h>
17#include <linux/slab.h> 19#include <linux/slab.h>
@@ -511,7 +513,7 @@ static void sh_sir_tx(struct sh_sir_self *self, int phase)
511 513
512static int sh_sir_read_data(struct sh_sir_self *self) 514static int sh_sir_read_data(struct sh_sir_self *self)
513{ 515{
514 u16 val; 516 u16 val = 0;
515 int timeout = 1024; 517 int timeout = 1024;
516 518
517 while (timeout--) { 519 while (timeout--) {
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 954f6e938fb7..8b1c3484d271 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -2405,8 +2405,6 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
2405 * addresses making a subsystem device table necessary. 2405 * addresses making a subsystem device table necessary.
2406 */ 2406 */
2407#ifdef CONFIG_PCI 2407#ifdef CONFIG_PCI
2408#define PCIID_VENDOR_INTEL 0x8086
2409#define PCIID_VENDOR_ALI 0x10b9
2410static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = { 2408static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
2411 /* 2409 /*
2412 * Subsystems needing entries: 2410 * Subsystems needing entries:
@@ -2416,7 +2414,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
2416 */ 2414 */
2417 { 2415 {
2418 /* Guessed entry */ 2416 /* Guessed entry */
2419 .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ 2417 .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
2420 .device = 0x24cc, 2418 .device = 0x24cc,
2421 .subvendor = 0x103c, 2419 .subvendor = 0x103c,
2422 .subdevice = 0x08bc, 2420 .subdevice = 0x08bc,
@@ -2429,7 +2427,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
2429 .name = "HP nx5000 family", 2427 .name = "HP nx5000 family",
2430 }, 2428 },
2431 { 2429 {
2432 .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ 2430 .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
2433 .device = 0x24cc, 2431 .device = 0x24cc,
2434 .subvendor = 0x103c, 2432 .subvendor = 0x103c,
2435 .subdevice = 0x088c, 2433 .subdevice = 0x088c,
@@ -2443,7 +2441,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
2443 .name = "HP nc8000 family", 2441 .name = "HP nc8000 family",
2444 }, 2442 },
2445 { 2443 {
2446 .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ 2444 .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
2447 .device = 0x24cc, 2445 .device = 0x24cc,
2448 .subvendor = 0x103c, 2446 .subvendor = 0x103c,
2449 .subdevice = 0x0890, 2447 .subdevice = 0x0890,
@@ -2456,7 +2454,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
2456 .name = "HP nc6000 family", 2454 .name = "HP nc6000 family",
2457 }, 2455 },
2458 { 2456 {
2459 .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ 2457 .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
2460 .device = 0x24cc, 2458 .device = 0x24cc,
2461 .subvendor = 0x0e11, 2459 .subvendor = 0x0e11,
2462 .subdevice = 0x0860, 2460 .subdevice = 0x0860,
@@ -2471,7 +2469,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
2471 }, 2469 },
2472 { 2470 {
2473 /* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */ 2471 /* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */
2474 .vendor = PCIID_VENDOR_INTEL, 2472 .vendor = PCI_VENDOR_ID_INTEL,
2475 .device = 0x24c0, 2473 .device = 0x24c0,
2476 .subvendor = 0x1179, 2474 .subvendor = 0x1179,
2477 .subdevice = 0xffff, /* 0xffff is "any" */ 2475 .subdevice = 0xffff, /* 0xffff is "any" */
@@ -2484,7 +2482,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
2484 .name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge", 2482 .name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge",
2485 }, 2483 },
2486 { 2484 {
2487 .vendor = PCIID_VENDOR_INTEL, /* Intel 82801CAM ISA bridge */ 2485 .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801CAM ISA bridge */
2488 .device = 0x248c, 2486 .device = 0x248c,
2489 .subvendor = 0x1179, 2487 .subvendor = 0x1179,
2490 .subdevice = 0xffff, /* 0xffff is "any" */ 2488 .subdevice = 0xffff, /* 0xffff is "any" */
@@ -2498,7 +2496,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
2498 }, 2496 },
2499 { 2497 {
2500 /* 82801DBM (ICH4-M) LPC Interface Bridge */ 2498 /* 82801DBM (ICH4-M) LPC Interface Bridge */
2501 .vendor = PCIID_VENDOR_INTEL, 2499 .vendor = PCI_VENDOR_ID_INTEL,
2502 .device = 0x24cc, 2500 .device = 0x24cc,
2503 .subvendor = 0x1179, 2501 .subvendor = 0x1179,
2504 .subdevice = 0xffff, /* 0xffff is "any" */ 2502 .subdevice = 0xffff, /* 0xffff is "any" */
@@ -2512,7 +2510,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
2512 }, 2510 },
2513 { 2511 {
2514 /* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */ 2512 /* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */
2515 .vendor = PCIID_VENDOR_ALI, 2513 .vendor = PCI_VENDOR_ID_AL,
2516 .device = 0x1533, 2514 .device = 0x1533,
2517 .subvendor = 0x1179, 2515 .subvendor = 0x1179,
2518 .subdevice = 0xffff, /* 0xffff is "any" */ 2516 .subdevice = 0xffff, /* 0xffff is "any" */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index c982ab9f9005..38b362b67857 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -57,6 +57,7 @@ ixgb_raise_clock(struct ixgb_hw *hw,
57 */ 57 */
58 *eecd_reg = *eecd_reg | IXGB_EECD_SK; 58 *eecd_reg = *eecd_reg | IXGB_EECD_SK;
59 IXGB_WRITE_REG(hw, EECD, *eecd_reg); 59 IXGB_WRITE_REG(hw, EECD, *eecd_reg);
60 IXGB_WRITE_FLUSH(hw);
60 udelay(50); 61 udelay(50);
61} 62}
62 63
@@ -75,6 +76,7 @@ ixgb_lower_clock(struct ixgb_hw *hw,
75 */ 76 */
76 *eecd_reg = *eecd_reg & ~IXGB_EECD_SK; 77 *eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
77 IXGB_WRITE_REG(hw, EECD, *eecd_reg); 78 IXGB_WRITE_REG(hw, EECD, *eecd_reg);
79 IXGB_WRITE_FLUSH(hw);
78 udelay(50); 80 udelay(50);
79} 81}
80 82
@@ -112,6 +114,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
112 eecd_reg |= IXGB_EECD_DI; 114 eecd_reg |= IXGB_EECD_DI;
113 115
114 IXGB_WRITE_REG(hw, EECD, eecd_reg); 116 IXGB_WRITE_REG(hw, EECD, eecd_reg);
117 IXGB_WRITE_FLUSH(hw);
115 118
116 udelay(50); 119 udelay(50);
117 120
@@ -206,21 +209,25 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
206 /* Deselect EEPROM */ 209 /* Deselect EEPROM */
207 eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); 210 eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
208 IXGB_WRITE_REG(hw, EECD, eecd_reg); 211 IXGB_WRITE_REG(hw, EECD, eecd_reg);
212 IXGB_WRITE_FLUSH(hw);
209 udelay(50); 213 udelay(50);
210 214
211 /* Clock high */ 215 /* Clock high */
212 eecd_reg |= IXGB_EECD_SK; 216 eecd_reg |= IXGB_EECD_SK;
213 IXGB_WRITE_REG(hw, EECD, eecd_reg); 217 IXGB_WRITE_REG(hw, EECD, eecd_reg);
218 IXGB_WRITE_FLUSH(hw);
214 udelay(50); 219 udelay(50);
215 220
216 /* Select EEPROM */ 221 /* Select EEPROM */
217 eecd_reg |= IXGB_EECD_CS; 222 eecd_reg |= IXGB_EECD_CS;
218 IXGB_WRITE_REG(hw, EECD, eecd_reg); 223 IXGB_WRITE_REG(hw, EECD, eecd_reg);
224 IXGB_WRITE_FLUSH(hw);
219 udelay(50); 225 udelay(50);
220 226
221 /* Clock low */ 227 /* Clock low */
222 eecd_reg &= ~IXGB_EECD_SK; 228 eecd_reg &= ~IXGB_EECD_SK;
223 IXGB_WRITE_REG(hw, EECD, eecd_reg); 229 IXGB_WRITE_REG(hw, EECD, eecd_reg);
230 IXGB_WRITE_FLUSH(hw);
224 udelay(50); 231 udelay(50);
225} 232}
226 233
@@ -239,11 +246,13 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
239 /* Rising edge of clock */ 246 /* Rising edge of clock */
240 eecd_reg |= IXGB_EECD_SK; 247 eecd_reg |= IXGB_EECD_SK;
241 IXGB_WRITE_REG(hw, EECD, eecd_reg); 248 IXGB_WRITE_REG(hw, EECD, eecd_reg);
249 IXGB_WRITE_FLUSH(hw);
242 udelay(50); 250 udelay(50);
243 251
244 /* Falling edge of clock */ 252 /* Falling edge of clock */
245 eecd_reg &= ~IXGB_EECD_SK; 253 eecd_reg &= ~IXGB_EECD_SK;
246 IXGB_WRITE_REG(hw, EECD, eecd_reg); 254 IXGB_WRITE_REG(hw, EECD, eecd_reg);
255 IXGB_WRITE_FLUSH(hw);
247 udelay(50); 256 udelay(50);
248} 257}
249 258
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 6cb2e42ff4c1..3d61a9e4faf7 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -149,6 +149,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
149 */ 149 */
150 IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN); 150 IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
151 IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN); 151 IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
152 IXGB_WRITE_FLUSH(hw);
152 msleep(IXGB_DELAY_BEFORE_RESET); 153 msleep(IXGB_DELAY_BEFORE_RESET);
153 154
154 /* Issue a global reset to the MAC. This will reset the chip's 155 /* Issue a global reset to the MAC. This will reset the chip's
@@ -1220,6 +1221,7 @@ ixgb_optics_reset_bcm(struct ixgb_hw *hw)
1220 ctrl &= ~IXGB_CTRL0_SDP2; 1221 ctrl &= ~IXGB_CTRL0_SDP2;
1221 ctrl |= IXGB_CTRL0_SDP3; 1222 ctrl |= IXGB_CTRL0_SDP3;
1222 IXGB_WRITE_REG(hw, CTRL0, ctrl); 1223 IXGB_WRITE_REG(hw, CTRL0, ctrl);
1224 IXGB_WRITE_FLUSH(hw);
1223 1225
1224 /* SerDes needs extra delay */ 1226 /* SerDes needs extra delay */
1225 msleep(IXGB_SUN_PHY_RESET_DELAY); 1227 msleep(IXGB_SUN_PHY_RESET_DELAY);
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 3b3dd4df4c5c..34f30ec79c2e 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -213,6 +213,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
213 switch (hw->phy.type) { 213 switch (hw->phy.type) {
214 case ixgbe_phy_tn: 214 case ixgbe_phy_tn:
215 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 215 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
216 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
216 phy->ops.get_firmware_version = 217 phy->ops.get_firmware_version =
217 &ixgbe_get_phy_firmware_version_tnx; 218 &ixgbe_get_phy_firmware_version_tnx;
218 break; 219 break;
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 777051f54e53..fc1375f26fe5 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -2632,6 +2632,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2632 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2632 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2633 autoc_reg |= IXGBE_AUTOC_FLU; 2633 autoc_reg |= IXGBE_AUTOC_FLU;
2634 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2634 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2635 IXGBE_WRITE_FLUSH(hw);
2635 usleep_range(10000, 20000); 2636 usleep_range(10000, 20000);
2636 } 2637 }
2637 2638
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index dc649553a0a6..82d4244c6e10 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1378,6 +1378,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1378 1378
1379 /* Disable all the interrupts */ 1379 /* Disable all the interrupts */
1380 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1380 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1381 IXGBE_WRITE_FLUSH(&adapter->hw);
1381 usleep_range(10000, 20000); 1382 usleep_range(10000, 20000);
1382 1383
1383 /* Test each interrupt */ 1384 /* Test each interrupt */
@@ -1398,6 +1399,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1398 ~mask & 0x00007FFF); 1399 ~mask & 0x00007FFF);
1399 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1400 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1400 ~mask & 0x00007FFF); 1401 ~mask & 0x00007FFF);
1402 IXGBE_WRITE_FLUSH(&adapter->hw);
1401 usleep_range(10000, 20000); 1403 usleep_range(10000, 20000);
1402 1404
1403 if (adapter->test_icr & mask) { 1405 if (adapter->test_icr & mask) {
@@ -1415,6 +1417,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1415 adapter->test_icr = 0; 1417 adapter->test_icr = 0;
1416 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1418 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1417 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1419 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1420 IXGBE_WRITE_FLUSH(&adapter->hw);
1418 usleep_range(10000, 20000); 1421 usleep_range(10000, 20000);
1419 1422
1420 if (!(adapter->test_icr &mask)) { 1423 if (!(adapter->test_icr &mask)) {
@@ -1435,6 +1438,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1435 ~mask & 0x00007FFF); 1438 ~mask & 0x00007FFF);
1436 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1439 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1437 ~mask & 0x00007FFF); 1440 ~mask & 0x00007FFF);
1441 IXGBE_WRITE_FLUSH(&adapter->hw);
1438 usleep_range(10000, 20000); 1442 usleep_range(10000, 20000);
1439 1443
1440 if (adapter->test_icr) { 1444 if (adapter->test_icr) {
@@ -1446,6 +1450,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1446 1450
1447 /* Disable all the interrupts */ 1451 /* Disable all the interrupts */
1448 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1452 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1453 IXGBE_WRITE_FLUSH(&adapter->hw);
1449 usleep_range(10000, 20000); 1454 usleep_range(10000, 20000);
1450 1455
1451 /* Unhook test interrupt handler */ 1456 /* Unhook test interrupt handler */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 1be617545dc9..22790394318a 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -184,6 +184,7 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
184 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 184 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
185 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; 185 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
186 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); 186 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
187 IXGBE_WRITE_FLUSH(hw);
187 188
188 /* take a breather then clean up driver data */ 189 /* take a breather then clean up driver data */
189 msleep(100); 190 msleep(100);
@@ -1005,7 +1006,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
1005 struct ixgbe_adapter *adapter = dev_get_drvdata(dev); 1006 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1006 unsigned long event = *(unsigned long *)data; 1007 unsigned long event = *(unsigned long *)data;
1007 1008
1008 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) 1009 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1009 return 0; 1010 return 0;
1010 1011
1011 switch (event) { 1012 switch (event) {
@@ -1458,8 +1459,10 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1458 if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { 1459 if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
1459 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, 1460 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
1460 staterr); 1461 staterr);
1461 if (!ddp_bytes) 1462 if (!ddp_bytes) {
1463 dev_kfree_skb_any(skb);
1462 goto next_desc; 1464 goto next_desc;
1465 }
1463 } 1466 }
1464#endif /* IXGBE_FCOE */ 1467#endif /* IXGBE_FCOE */
1465 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 1468 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 735f686c3b36..f7ca3511b9fe 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1585,6 +1585,7 @@ static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1585 *i2cctl |= IXGBE_I2C_CLK_OUT; 1585 *i2cctl |= IXGBE_I2C_CLK_OUT;
1586 1586
1587 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); 1587 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
1588 IXGBE_WRITE_FLUSH(hw);
1588 1589
1589 /* SCL rise time (1000ns) */ 1590 /* SCL rise time (1000ns) */
1590 udelay(IXGBE_I2C_T_RISE); 1591 udelay(IXGBE_I2C_T_RISE);
@@ -1605,6 +1606,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1605 *i2cctl &= ~IXGBE_I2C_CLK_OUT; 1606 *i2cctl &= ~IXGBE_I2C_CLK_OUT;
1606 1607
1607 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); 1608 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
1609 IXGBE_WRITE_FLUSH(hw);
1608 1610
1609 /* SCL fall time (300ns) */ 1611 /* SCL fall time (300ns) */
1610 udelay(IXGBE_I2C_T_FALL); 1612 udelay(IXGBE_I2C_T_FALL);
@@ -1628,6 +1630,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
1628 *i2cctl &= ~IXGBE_I2C_DATA_OUT; 1630 *i2cctl &= ~IXGBE_I2C_DATA_OUT;
1629 1631
1630 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); 1632 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
1633 IXGBE_WRITE_FLUSH(hw);
1631 1634
1632 /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ 1635 /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
1633 udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); 1636 udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index bec30ed91adc..2696c78e9f46 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -162,6 +162,7 @@ mac_reset_top:
162 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 162 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
163 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 163 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
164 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 164 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
165 IXGBE_WRITE_FLUSH(hw);
165 166
166 msleep(50); 167 msleep(50);
167 168
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 0fcdc25699d8..dc4e305a1087 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -322,6 +322,9 @@ static void macb_tx(struct macb *bp)
322 for (i = 0; i < TX_RING_SIZE; i++) 322 for (i = 0; i < TX_RING_SIZE; i++)
323 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); 323 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
324 324
325 /* Add wrap bit */
326 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
327
325 /* free transmit buffer in upper layer*/ 328 /* free transmit buffer in upper layer*/
326 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { 329 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
327 struct ring_info *rp = &bp->tx_skb[tail]; 330 struct ring_info *rp = &bp->tx_skb[tail];
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index 5e7109178061..5ada5b469112 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -128,7 +128,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
128 memset(context, 0, sizeof *context); 128 memset(context, 0, sizeof *context);
129 129
130 context->base_qpn = cpu_to_be32(base_qpn); 130 context->base_qpn = cpu_to_be32(base_qpn);
131 context->n_mac = 0x7; 131 context->n_mac = 0x2;
132 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | 132 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
133 base_qpn); 133 base_qpn);
134 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | 134 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index c94b3426d355..f0ee35df4dd7 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1117,6 +1117,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1117 info->port = port; 1117 info->port = port;
1118 mlx4_init_mac_table(dev, &info->mac_table); 1118 mlx4_init_mac_table(dev, &info->mac_table);
1119 mlx4_init_vlan_table(dev, &info->vlan_table); 1119 mlx4_init_vlan_table(dev, &info->vlan_table);
1120 info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
1121 (port - 1) * (1 << log_num_mac);
1120 1122
1121 sprintf(info->dev_name, "mlx4_port%d", port); 1123 sprintf(info->dev_name, "mlx4_port%d", port);
1122 info->port_attr.attr.name = info->dev_name; 1124 info->port_attr.attr.name = info->dev_name;
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 1f95afda6841..609e0ec14cee 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -258,9 +258,12 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
258 if (validate_index(dev, table, index)) 258 if (validate_index(dev, table, index))
259 goto out; 259 goto out;
260 260
261 table->entries[index] = 0; 261 /* Check whether this address has reference count */
262 mlx4_set_port_mac_table(dev, port, table->entries); 262 if (!(--table->refs[index])) {
263 --table->total; 263 table->entries[index] = 0;
264 mlx4_set_port_mac_table(dev, port, table->entries);
265 --table->total;
266 }
264out: 267out:
265 mutex_unlock(&table->mutex); 268 mutex_unlock(&table->mutex);
266} 269}
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index cd6c2317e29e..ed47585a6862 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -9201,7 +9201,7 @@ static int __devinit niu_ldg_init(struct niu *np)
9201 9201
9202 first_chan = 0; 9202 first_chan = 0;
9203 for (i = 0; i < port; i++) 9203 for (i = 0; i < port; i++)
9204 first_chan += parent->rxchan_per_port[port]; 9204 first_chan += parent->rxchan_per_port[i];
9205 num_chan = parent->rxchan_per_port[port]; 9205 num_chan = parent->rxchan_per_port[port];
9206 9206
9207 for (i = first_chan; i < (first_chan + num_chan); i++) { 9207 for (i = first_chan; i < (first_chan + num_chan); i++) {
@@ -9217,7 +9217,7 @@ static int __devinit niu_ldg_init(struct niu *np)
9217 9217
9218 first_chan = 0; 9218 first_chan = 0;
9219 for (i = 0; i < port; i++) 9219 for (i = 0; i < port; i++)
9220 first_chan += parent->txchan_per_port[port]; 9220 first_chan += parent->txchan_per_port[i];
9221 num_chan = parent->txchan_per_port[port]; 9221 num_chan = parent->txchan_per_port[port];
9222 for (i = first_chan; i < (first_chan + num_chan); i++) { 9222 for (i = first_chan; i < (first_chan + num_chan); i++) {
9223 err = niu_ldg_assign_ldn(np, parent, 9223 err = niu_ldg_assign_ldn(np, parent,
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 1cd9394c3359..cffbc0373fa9 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -809,7 +809,7 @@ static int smc91c92_config(struct pcmcia_device *link)
809 struct net_device *dev = link->priv; 809 struct net_device *dev = link->priv;
810 struct smc_private *smc = netdev_priv(dev); 810 struct smc_private *smc = netdev_priv(dev);
811 char *name; 811 char *name;
812 int i, j, rev; 812 int i, rev, j = 0;
813 unsigned int ioaddr; 813 unsigned int ioaddr;
814 u_long mir; 814 u_long mir;
815 815
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 8b3090dc4bcd..80b6f36a8074 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -82,7 +82,7 @@ static int cards_found;
82/* 82/*
83 * VLB I/O addresses 83 * VLB I/O addresses
84 */ 84 */
85static unsigned int pcnet32_portlist[] __initdata = 85static unsigned int pcnet32_portlist[] =
86 { 0x300, 0x320, 0x340, 0x360, 0 }; 86 { 0x300, 0x320, 0x340, 0x360, 0 };
87 87
88static int pcnet32_debug; 88static int pcnet32_debug;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2cd8dc5847b4..cb6e0b486b1e 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -34,8 +34,7 @@
34#define PAGESEL 0x13 34#define PAGESEL 0x13
35#define LAYER4 0x02 35#define LAYER4 0x02
36#define LAYER2 0x01 36#define LAYER2 0x01
37#define MAX_RXTS 4 37#define MAX_RXTS 64
38#define MAX_TXTS 4
39#define N_EXT_TS 1 38#define N_EXT_TS 1
40#define PSF_PTPVER 2 39#define PSF_PTPVER 2
41#define PSF_EVNT 0x4000 40#define PSF_EVNT 0x4000
@@ -218,7 +217,7 @@ static void phy2rxts(struct phy_rxts *p, struct rxts *rxts)
218 rxts->seqid = p->seqid; 217 rxts->seqid = p->seqid;
219 rxts->msgtype = (p->msgtype >> 12) & 0xf; 218 rxts->msgtype = (p->msgtype >> 12) & 0xf;
220 rxts->hash = p->msgtype & 0x0fff; 219 rxts->hash = p->msgtype & 0x0fff;
221 rxts->tmo = jiffies + HZ; 220 rxts->tmo = jiffies + 2;
222} 221}
223 222
224static u64 phy2txts(struct phy_txts *p) 223static u64 phy2txts(struct phy_txts *p)
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 0620ba963508..04bb8fcc0cb5 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -25,8 +25,9 @@
25/* DP83865 phy identifier values */ 25/* DP83865 phy identifier values */
26#define DP83865_PHY_ID 0x20005c7a 26#define DP83865_PHY_ID 0x20005c7a
27 27
28#define DP83865_INT_MASK_REG 0x15 28#define DP83865_INT_STATUS 0x14
29#define DP83865_INT_MASK_STATUS 0x14 29#define DP83865_INT_MASK 0x15
30#define DP83865_INT_CLEAR 0x17
30 31
31#define DP83865_INT_REMOTE_FAULT 0x0008 32#define DP83865_INT_REMOTE_FAULT 0x0008
32#define DP83865_INT_ANE_COMPLETED 0x0010 33#define DP83865_INT_ANE_COMPLETED 0x0010
@@ -68,21 +69,25 @@ static int ns_config_intr(struct phy_device *phydev)
68 int err; 69 int err;
69 70
70 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 71 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
71 err = phy_write(phydev, DP83865_INT_MASK_REG, 72 err = phy_write(phydev, DP83865_INT_MASK,
72 DP83865_INT_MASK_DEFAULT); 73 DP83865_INT_MASK_DEFAULT);
73 else 74 else
74 err = phy_write(phydev, DP83865_INT_MASK_REG, 0); 75 err = phy_write(phydev, DP83865_INT_MASK, 0);
75 76
76 return err; 77 return err;
77} 78}
78 79
79static int ns_ack_interrupt(struct phy_device *phydev) 80static int ns_ack_interrupt(struct phy_device *phydev)
80{ 81{
81 int ret = phy_read(phydev, DP83865_INT_MASK_STATUS); 82 int ret = phy_read(phydev, DP83865_INT_STATUS);
82 if (ret < 0) 83 if (ret < 0)
83 return ret; 84 return ret;
84 85
85 return 0; 86 /* Clear the interrupt status bit by writing a “1”
87 * to the corresponding bit in INT_CLEAR (2:0 are reserved) */
88 ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7);
89
90 return ret;
86} 91}
87 92
88static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) 93static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 7d9c650f395e..02339b3352e7 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -239,6 +239,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
239 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, 239 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
240 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, 240 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
241 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, 241 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
242 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
242 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, 243 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
243 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, 244 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
244 { PCI_VENDOR_ID_LINKSYS, 0x1032, 245 { PCI_VENDOR_ID_LINKSYS, 0x1032,
@@ -1091,6 +1092,21 @@ rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
1091 rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type); 1092 rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
1092} 1093}
1093 1094
1095struct exgmac_reg {
1096 u16 addr;
1097 u16 mask;
1098 u32 val;
1099};
1100
1101static void rtl_write_exgmac_batch(void __iomem *ioaddr,
1102 const struct exgmac_reg *r, int len)
1103{
1104 while (len-- > 0) {
1105 rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1106 r++;
1107 }
1108}
1109
1094static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) 1110static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
1095{ 1111{
1096 u8 value = 0xff; 1112 u8 value = 0xff;
@@ -3116,6 +3132,18 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3116 RTL_W32(MAC0, low); 3132 RTL_W32(MAC0, low);
3117 RTL_R32(MAC0); 3133 RTL_R32(MAC0);
3118 3134
3135 if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
3136 const struct exgmac_reg e[] = {
3137 { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3138 { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3139 { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3140 { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3141 low >> 16 },
3142 };
3143
3144 rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
3145 }
3146
3119 RTL_W8(Cfg9346, Cfg9346_Lock); 3147 RTL_W8(Cfg9346, Cfg9346_Lock);
3120 3148
3121 spin_unlock_irq(&tp->lock); 3149 spin_unlock_irq(&tp->lock);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 86ac38c96bcf..3bb131137033 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -80,13 +80,13 @@ static int rionet_capable = 1;
80 */ 80 */
81static struct rio_dev **rionet_active; 81static struct rio_dev **rionet_active;
82 82
83#define is_rionet_capable(pef, src_ops, dst_ops) \ 83#define is_rionet_capable(src_ops, dst_ops) \
84 ((pef & RIO_PEF_INB_MBOX) && \ 84 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
85 (pef & RIO_PEF_INB_DOORBELL) && \ 85 (dst_ops & RIO_DST_OPS_DATA_MSG) && \
86 (src_ops & RIO_SRC_OPS_DOORBELL) && \ 86 (src_ops & RIO_SRC_OPS_DOORBELL) && \
87 (dst_ops & RIO_DST_OPS_DOORBELL)) 87 (dst_ops & RIO_DST_OPS_DOORBELL))
88#define dev_rionet_capable(dev) \ 88#define dev_rionet_capable(dev) \
89 is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) 89 is_rionet_capable(dev->src_ops, dev->dst_ops)
90 90
91#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) 91#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
92#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) 92#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
@@ -282,7 +282,6 @@ static int rionet_open(struct net_device *ndev)
282{ 282{
283 int i, rc = 0; 283 int i, rc = 0;
284 struct rionet_peer *peer, *tmp; 284 struct rionet_peer *peer, *tmp;
285 u32 pwdcsr;
286 struct rionet_private *rnet = netdev_priv(ndev); 285 struct rionet_private *rnet = netdev_priv(ndev);
287 286
288 if (netif_msg_ifup(rnet)) 287 if (netif_msg_ifup(rnet))
@@ -332,13 +331,8 @@ static int rionet_open(struct net_device *ndev)
332 continue; 331 continue;
333 } 332 }
334 333
335 /* 334 /* Send a join message */
336 * If device has initialized inbound doorbells, 335 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
337 * send a join message
338 */
339 rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
340 if (pwdcsr & RIO_DOORBELL_AVAIL)
341 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
342 } 336 }
343 337
344 out: 338 out:
@@ -492,7 +486,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
492static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) 486static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
493{ 487{
494 int rc = -ENODEV; 488 int rc = -ENODEV;
495 u32 lpef, lsrc_ops, ldst_ops; 489 u32 lsrc_ops, ldst_ops;
496 struct rionet_peer *peer; 490 struct rionet_peer *peer;
497 struct net_device *ndev = NULL; 491 struct net_device *ndev = NULL;
498 492
@@ -515,12 +509,11 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
515 * on later probes 509 * on later probes
516 */ 510 */
517 if (!rionet_check) { 511 if (!rionet_check) {
518 rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
519 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, 512 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
520 &lsrc_ops); 513 &lsrc_ops);
521 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, 514 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
522 &ldst_ops); 515 &ldst_ops);
523 if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { 516 if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
524 printk(KERN_ERR 517 printk(KERN_ERR
525 "%s: local device is not network capable\n", 518 "%s: local device is not network capable\n",
526 DRV_NAME); 519 DRV_NAME);
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index ad35c210b839..1c1666e99106 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/interrupt.h>
24#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
25#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
26#include <linux/delay.h> 27#include <linux/delay.h>
@@ -30,6 +31,7 @@
30#include <linux/phy.h> 31#include <linux/phy.h>
31#include <linux/cache.h> 32#include <linux/cache.h>
32#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/interrupt.h>
33#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
34#include <linux/slab.h> 36#include <linux/slab.h>
35#include <linux/ethtool.h> 37#include <linux/ethtool.h>
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 8ad7bfbaa3af..3c0f1312b391 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1825,6 +1825,16 @@ static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1825 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL); 1825 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1826} 1826}
1827 1827
1828static int sis190_mac_addr(struct net_device *dev, void *p)
1829{
1830 int rc;
1831
1832 rc = eth_mac_addr(dev, p);
1833 if (!rc)
1834 sis190_init_rxfilter(dev);
1835 return rc;
1836}
1837
1828static const struct net_device_ops sis190_netdev_ops = { 1838static const struct net_device_ops sis190_netdev_ops = {
1829 .ndo_open = sis190_open, 1839 .ndo_open = sis190_open,
1830 .ndo_stop = sis190_close, 1840 .ndo_stop = sis190_close,
@@ -1833,7 +1843,7 @@ static const struct net_device_ops sis190_netdev_ops = {
1833 .ndo_tx_timeout = sis190_tx_timeout, 1843 .ndo_tx_timeout = sis190_tx_timeout,
1834 .ndo_set_multicast_list = sis190_set_rx_mode, 1844 .ndo_set_multicast_list = sis190_set_rx_mode,
1835 .ndo_change_mtu = eth_change_mtu, 1845 .ndo_change_mtu = eth_change_mtu,
1836 .ndo_set_mac_address = eth_mac_addr, 1846 .ndo_set_mac_address = sis190_mac_addr,
1837 .ndo_validate_addr = eth_validate_addr, 1847 .ndo_validate_addr = eth_validate_addr,
1838#ifdef CONFIG_NET_POLL_CONTROLLER 1848#ifdef CONFIG_NET_POLL_CONTROLLER
1839 .ndo_poll_controller = sis190_netpoll, 1849 .ndo_poll_controller = sis190_netpoll,
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index f11b3f3df24f..4c617534f937 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -367,7 +367,7 @@ static void sl_bump(struct slip *sl)
367 memcpy(skb_put(skb, count), sl->rbuff, count); 367 memcpy(skb_put(skb, count), sl->rbuff, count);
368 skb_reset_mac_header(skb); 368 skb_reset_mac_header(skb);
369 skb->protocol = htons(ETH_P_IP); 369 skb->protocol = htons(ETH_P_IP);
370 netif_rx(skb); 370 netif_rx_ni(skb);
371 dev->stats.rx_packets++; 371 dev->stats.rx_packets++;
372} 372}
373 373
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index be745ae8f4e3..ade35dde5b51 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -46,14 +46,15 @@
46#include <asm/byteorder.h> 46#include <asm/byteorder.h>
47#include <asm/uaccess.h> 47#include <asm/uaccess.h>
48#include <asm/irq.h> 48#include <asm/irq.h>
49#include <asm/prom.h>
50 49
51#ifdef CONFIG_SPARC 50#ifdef CONFIG_SPARC
52#include <asm/idprom.h> 51#include <asm/idprom.h>
52#include <asm/prom.h>
53#endif 53#endif
54 54
55#ifdef CONFIG_PPC_PMAC 55#ifdef CONFIG_PPC_PMAC
56#include <asm/pci-bridge.h> 56#include <asm/pci-bridge.h>
57#include <asm/prom.h>
57#include <asm/machdep.h> 58#include <asm/machdep.h>
58#include <asm/pmac_feature.h> 59#include <asm/pmac_feature.h>
59#endif 60#endif
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index fd622a66ebbf..f06fb78383a1 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -53,7 +53,7 @@
53#include <linux/usb/usbnet.h> 53#include <linux/usb/usbnet.h>
54#include <linux/usb/cdc.h> 54#include <linux/usb/cdc.h>
55 55
56#define DRIVER_VERSION "01-June-2011" 56#define DRIVER_VERSION "04-Aug-2011"
57 57
58/* CDC NCM subclass 3.2.1 */ 58/* CDC NCM subclass 3.2.1 */
59#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 59#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -163,35 +163,8 @@ cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
163 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); 163 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
164} 164}
165 165
166static int
167cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
168 void *data, u16 flags, u16 *actlen, u16 timeout)
169{
170 int err;
171
172 err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
173 usb_rcvctrlpipe(ctx->udev, 0) :
174 usb_sndctrlpipe(ctx->udev, 0),
175 req->bNotificationType, req->bmRequestType,
176 req->wValue,
177 req->wIndex, data,
178 req->wLength, timeout);
179
180 if (err < 0) {
181 if (actlen)
182 *actlen = 0;
183 return err;
184 }
185
186 if (actlen)
187 *actlen = err;
188
189 return 0;
190}
191
192static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) 166static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
193{ 167{
194 struct usb_cdc_notification req;
195 u32 val; 168 u32 val;
196 u8 flags; 169 u8 flags;
197 u8 iface_no; 170 u8 iface_no;
@@ -200,14 +173,14 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
200 173
201 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; 174 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
202 175
203 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; 176 err = usb_control_msg(ctx->udev,
204 req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS; 177 usb_rcvctrlpipe(ctx->udev, 0),
205 req.wValue = 0; 178 USB_CDC_GET_NTB_PARAMETERS,
206 req.wIndex = cpu_to_le16(iface_no); 179 USB_TYPE_CLASS | USB_DIR_IN
207 req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm)); 180 | USB_RECIP_INTERFACE,
208 181 0, iface_no, &ctx->ncm_parm,
209 err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000); 182 sizeof(ctx->ncm_parm), 10000);
210 if (err) { 183 if (err < 0) {
211 pr_debug("failed GET_NTB_PARAMETERS\n"); 184 pr_debug("failed GET_NTB_PARAMETERS\n");
212 return 1; 185 return 1;
213 } 186 }
@@ -253,31 +226,43 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
253 226
254 /* inform device about NTB input size changes */ 227 /* inform device about NTB input size changes */
255 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { 228 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
256 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
257 USB_RECIP_INTERFACE;
258 req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
259 req.wValue = 0;
260 req.wIndex = cpu_to_le16(iface_no);
261 229
262 if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { 230 if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
263 struct usb_cdc_ncm_ndp_input_size ndp_in_sz; 231 struct usb_cdc_ncm_ndp_input_size *ndp_in_sz;
264
265 req.wLength = 8;
266 ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
267 ndp_in_sz.wNtbInMaxDatagrams =
268 cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
269 ndp_in_sz.wReserved = 0;
270 err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
271 1000);
272 } else {
273 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
274 232
275 req.wLength = 4; 233 ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL);
276 err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0, 234 if (!ndp_in_sz) {
277 NULL, 1000); 235 err = -ENOMEM;
278 } 236 goto size_err;
237 }
279 238
280 if (err) 239 err = usb_control_msg(ctx->udev,
240 usb_sndctrlpipe(ctx->udev, 0),
241 USB_CDC_SET_NTB_INPUT_SIZE,
242 USB_TYPE_CLASS | USB_DIR_OUT
243 | USB_RECIP_INTERFACE,
244 0, iface_no, ndp_in_sz, 8, 1000);
245 kfree(ndp_in_sz);
246 } else {
247 __le32 *dwNtbInMaxSize;
248 dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize),
249 GFP_KERNEL);
250 if (!dwNtbInMaxSize) {
251 err = -ENOMEM;
252 goto size_err;
253 }
254 *dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
255
256 err = usb_control_msg(ctx->udev,
257 usb_sndctrlpipe(ctx->udev, 0),
258 USB_CDC_SET_NTB_INPUT_SIZE,
259 USB_TYPE_CLASS | USB_DIR_OUT
260 | USB_RECIP_INTERFACE,
261 0, iface_no, dwNtbInMaxSize, 4, 1000);
262 kfree(dwNtbInMaxSize);
263 }
264size_err:
265 if (err < 0)
281 pr_debug("Setting NTB Input Size failed\n"); 266 pr_debug("Setting NTB Input Size failed\n");
282 } 267 }
283 268
@@ -332,29 +317,24 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
332 317
333 /* set CRC Mode */ 318 /* set CRC Mode */
334 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { 319 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
335 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | 320 err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
336 USB_RECIP_INTERFACE; 321 USB_CDC_SET_CRC_MODE,
337 req.bNotificationType = USB_CDC_SET_CRC_MODE; 322 USB_TYPE_CLASS | USB_DIR_OUT
338 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); 323 | USB_RECIP_INTERFACE,
339 req.wIndex = cpu_to_le16(iface_no); 324 USB_CDC_NCM_CRC_NOT_APPENDED,
340 req.wLength = 0; 325 iface_no, NULL, 0, 1000);
341 326 if (err < 0)
342 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
343 if (err)
344 pr_debug("Setting CRC mode off failed\n"); 327 pr_debug("Setting CRC mode off failed\n");
345 } 328 }
346 329
347 /* set NTB format, if both formats are supported */ 330 /* set NTB format, if both formats are supported */
348 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { 331 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
349 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | 332 err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
350 USB_RECIP_INTERFACE; 333 USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS
351 req.bNotificationType = USB_CDC_SET_NTB_FORMAT; 334 | USB_DIR_OUT | USB_RECIP_INTERFACE,
352 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); 335 USB_CDC_NCM_NTB16_FORMAT,
353 req.wIndex = cpu_to_le16(iface_no); 336 iface_no, NULL, 0, 1000);
354 req.wLength = 0; 337 if (err < 0)
355
356 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
357 if (err)
358 pr_debug("Setting NTB format to 16-bit failed\n"); 338 pr_debug("Setting NTB format to 16-bit failed\n");
359 } 339 }
360 340
@@ -362,23 +342,29 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
362 342
363 /* set Max Datagram Size (MTU) */ 343 /* set Max Datagram Size (MTU) */
364 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { 344 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
365 __le16 max_datagram_size; 345 __le16 *max_datagram_size;
366 u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); 346 u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
367 347
368 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | 348 max_datagram_size = kzalloc(sizeof(*max_datagram_size),
369 USB_RECIP_INTERFACE; 349 GFP_KERNEL);
370 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; 350 if (!max_datagram_size) {
371 req.wValue = 0; 351 err = -ENOMEM;
372 req.wIndex = cpu_to_le16(iface_no); 352 goto max_dgram_err;
373 req.wLength = cpu_to_le16(2); 353 }
374 354
375 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 355 err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0),
376 1000); 356 USB_CDC_GET_MAX_DATAGRAM_SIZE,
377 if (err) { 357 USB_TYPE_CLASS | USB_DIR_IN
358 | USB_RECIP_INTERFACE,
359 0, iface_no, max_datagram_size,
360 2, 1000);
361 if (err < 0) {
378 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", 362 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
379 CDC_NCM_MIN_DATAGRAM_SIZE); 363 CDC_NCM_MIN_DATAGRAM_SIZE);
364 kfree(max_datagram_size);
380 } else { 365 } else {
381 ctx->max_datagram_size = le16_to_cpu(max_datagram_size); 366 ctx->max_datagram_size =
367 le16_to_cpu(*max_datagram_size);
382 /* Check Eth descriptor value */ 368 /* Check Eth descriptor value */
383 if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { 369 if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
384 if (ctx->max_datagram_size > eth_max_sz) 370 if (ctx->max_datagram_size > eth_max_sz)
@@ -395,17 +381,17 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
395 CDC_NCM_MIN_DATAGRAM_SIZE; 381 CDC_NCM_MIN_DATAGRAM_SIZE;
396 382
397 /* if value changed, update device */ 383 /* if value changed, update device */
398 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | 384 err = usb_control_msg(ctx->udev,
399 USB_RECIP_INTERFACE; 385 usb_sndctrlpipe(ctx->udev, 0),
400 req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE; 386 USB_CDC_SET_MAX_DATAGRAM_SIZE,
401 req.wValue = 0; 387 USB_TYPE_CLASS | USB_DIR_OUT
402 req.wIndex = cpu_to_le16(iface_no); 388 | USB_RECIP_INTERFACE,
403 req.wLength = 2; 389 0,
404 max_datagram_size = cpu_to_le16(ctx->max_datagram_size); 390 iface_no, max_datagram_size,
405 391 2, 1000);
406 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 392 kfree(max_datagram_size);
407 0, NULL, 1000); 393max_dgram_err:
408 if (err) 394 if (err < 0)
409 pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); 395 pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
410 } 396 }
411 397
@@ -671,7 +657,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
671 u32 rem; 657 u32 rem;
672 u32 offset; 658 u32 offset;
673 u32 last_offset; 659 u32 last_offset;
674 u16 n = 0; 660 u16 n = 0, index;
675 u8 ready2send = 0; 661 u8 ready2send = 0;
676 662
677 /* if there is a remaining skb, it gets priority */ 663 /* if there is a remaining skb, it gets priority */
@@ -859,8 +845,8 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
859 cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); 845 cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
860 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); 846 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
861 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); 847 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
862 ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), 848 index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus);
863 ctx->tx_ndp_modulus); 849 ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index);
864 850
865 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); 851 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
866 ctx->tx_seq++; 852 ctx->tx_seq++;
@@ -873,12 +859,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
873 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); 859 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
874 ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */ 860 ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
875 861
876 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex, 862 memcpy(((u8 *)skb_out->data) + index,
877 &(ctx->tx_ncm.ndp16), 863 &(ctx->tx_ncm.ndp16),
878 sizeof(ctx->tx_ncm.ndp16)); 864 sizeof(ctx->tx_ncm.ndp16));
879 865
880 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex + 866 memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16),
881 sizeof(ctx->tx_ncm.ndp16),
882 &(ctx->tx_ncm.dpe16), 867 &(ctx->tx_ncm.dpe16),
883 (ctx->tx_curr_frame_num + 1) * 868 (ctx->tx_curr_frame_num + 1) *
884 sizeof(struct usb_cdc_ncm_dpe16)); 869 sizeof(struct usb_cdc_ncm_dpe16));
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 041fb7d43c4f..ef3b236b5145 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -977,7 +977,6 @@ static void rtl8150_disconnect(struct usb_interface *intf)
977 usb_set_intfdata(intf, NULL); 977 usb_set_intfdata(intf, NULL);
978 if (dev) { 978 if (dev) {
979 set_bit(RTL8150_UNPLUG, &dev->flags); 979 set_bit(RTL8150_UNPLUG, &dev->flags);
980 tasklet_disable(&dev->tl);
981 tasklet_kill(&dev->tl); 980 tasklet_kill(&dev->tl);
982 unregister_netdev(dev->netdev); 981 unregister_netdev(dev->netdev);
983 unlink_all_urbs(dev); 982 unlink_all_urbs(dev);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index deb1eca13c9f..7c5336c5c37f 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -515,10 +515,6 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
515 mac_set_cam_mask(regs, vptr->mCAMmask); 515 mac_set_cam_mask(regs, vptr->mCAMmask);
516 516
517 /* Enable VCAMs */ 517 /* Enable VCAMs */
518
519 if (test_bit(0, vptr->active_vlans))
520 WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
521
522 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { 518 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
523 mac_set_vlan_cam(regs, i, (u8 *) &vid); 519 mac_set_vlan_cam(regs, i, (u8 *) &vid);
524 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); 520 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 1cbacb389652..0959583feb27 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1929,14 +1929,17 @@ static void
1929vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1929vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1930{ 1930{
1931 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1931 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1932 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1933 unsigned long flags;
1934 1932
1935 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1933 if (!(netdev->flags & IFF_PROMISC)) {
1936 spin_lock_irqsave(&adapter->cmd_lock, flags); 1934 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1937 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1935 unsigned long flags;
1938 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1936
1939 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1937 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1938 spin_lock_irqsave(&adapter->cmd_lock, flags);
1939 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1940 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1941 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1942 }
1940 1943
1941 set_bit(vid, adapter->active_vlans); 1944 set_bit(vid, adapter->active_vlans);
1942} 1945}
@@ -1946,14 +1949,17 @@ static void
1946vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1949vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1947{ 1950{
1948 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1951 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1949 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1950 unsigned long flags;
1951 1952
1952 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 1953 if (!(netdev->flags & IFF_PROMISC)) {
1953 spin_lock_irqsave(&adapter->cmd_lock, flags); 1954 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1954 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1955 unsigned long flags;
1955 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1956
1956 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1957 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1958 spin_lock_irqsave(&adapter->cmd_lock, flags);
1959 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1960 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1961 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1962 }
1957 1963
1958 clear_bit(vid, adapter->active_vlans); 1964 clear_bit(vid, adapter->active_vlans);
1959} 1965}
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index f54dff44ed50..c3119a6caace 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1735,6 +1735,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1735 1735
1736 if (dma_mapping_error(ah->dev, bf->skbaddr)) { 1736 if (dma_mapping_error(ah->dev, bf->skbaddr)) {
1737 ATH5K_ERR(ah, "beacon DMA mapping failed\n"); 1737 ATH5K_ERR(ah, "beacon DMA mapping failed\n");
1738 dev_kfree_skb_any(skb);
1739 bf->skb = NULL;
1738 return -EIO; 1740 return -EIO;
1739 } 1741 }
1740 1742
@@ -1819,8 +1821,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1819 ath5k_txbuf_free_skb(ah, avf->bbuf); 1821 ath5k_txbuf_free_skb(ah, avf->bbuf);
1820 avf->bbuf->skb = skb; 1822 avf->bbuf->skb = skb;
1821 ret = ath5k_beacon_setup(ah, avf->bbuf); 1823 ret = ath5k_beacon_setup(ah, avf->bbuf);
1822 if (ret)
1823 avf->bbuf->skb = NULL;
1824out: 1824out:
1825 return ret; 1825 return ret;
1826} 1826}
@@ -1840,6 +1840,7 @@ ath5k_beacon_send(struct ath5k_hw *ah)
1840 struct ath5k_vif *avf; 1840 struct ath5k_vif *avf;
1841 struct ath5k_buf *bf; 1841 struct ath5k_buf *bf;
1842 struct sk_buff *skb; 1842 struct sk_buff *skb;
1843 int err;
1843 1844
1844 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n"); 1845 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1845 1846
@@ -1888,11 +1889,6 @@ ath5k_beacon_send(struct ath5k_hw *ah)
1888 1889
1889 avf = (void *)vif->drv_priv; 1890 avf = (void *)vif->drv_priv;
1890 bf = avf->bbuf; 1891 bf = avf->bbuf;
1891 if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
1892 ah->opmode == NL80211_IFTYPE_MONITOR)) {
1893 ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
1894 return;
1895 }
1896 1892
1897 /* 1893 /*
1898 * Stop any current dma and put the new frame on the queue. 1894 * Stop any current dma and put the new frame on the queue.
@@ -1906,8 +1902,17 @@ ath5k_beacon_send(struct ath5k_hw *ah)
1906 1902
1907 /* refresh the beacon for AP or MESH mode */ 1903 /* refresh the beacon for AP or MESH mode */
1908 if (ah->opmode == NL80211_IFTYPE_AP || 1904 if (ah->opmode == NL80211_IFTYPE_AP ||
1909 ah->opmode == NL80211_IFTYPE_MESH_POINT) 1905 ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1910 ath5k_beacon_update(ah->hw, vif); 1906 err = ath5k_beacon_update(ah->hw, vif);
1907 if (err)
1908 return;
1909 }
1910
1911 if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
1912 ah->opmode == NL80211_IFTYPE_MONITOR)) {
1913 ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
1914 return;
1915 }
1911 1916
1912 trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]); 1917 trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
1913 1918
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 9ff7c30573b8..44d9d8d56490 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -309,11 +309,7 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
309 u8 i; 309 u8 i;
310 u32 val; 310 u32 val;
311 311
312 if (ah->is_pciexpress != true) 312 if (ah->is_pciexpress != true || ah->aspm_enabled != true)
313 return;
314
315 /* Do not touch SerDes registers */
316 if (ah->config.pcie_powersave_enable == 2)
317 return; 313 return;
318 314
319 /* Nothing to do on restore for 11N */ 315 /* Nothing to do on restore for 11N */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d109c25417f4..1b9400371eaf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -69,7 +69,7 @@ static int ar9003_hw_power_interpolate(int32_t x,
69static const struct ar9300_eeprom ar9300_default = { 69static const struct ar9300_eeprom ar9300_default = {
70 .eepromVersion = 2, 70 .eepromVersion = 2,
71 .templateVersion = 2, 71 .templateVersion = 2,
72 .macAddr = {1, 2, 3, 4, 5, 6}, 72 .macAddr = {0, 2, 3, 4, 5, 6},
73 .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 73 .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
74 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 74 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
75 .baseEepHeader = { 75 .baseEepHeader = {
@@ -307,7 +307,7 @@ static const struct ar9300_eeprom ar9300_default = {
307 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 307 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
308 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, 308 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
309 309
310 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, 310 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
311 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 311 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
312 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 312 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
313 313
@@ -884,7 +884,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
884 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 884 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
885 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, 885 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
886 886
887 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, 887 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
888 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 888 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
889 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 889 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
890 890
@@ -2040,7 +2040,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
2040 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 2040 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2041 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, 2041 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
2042 2042
2043 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, 2043 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
2044 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 2044 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2045 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, 2045 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2046 2046
@@ -3734,7 +3734,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3734 } 3734 }
3735 } else { 3735 } else {
3736 reg_pmu_set = (5 << 1) | (7 << 4) | 3736 reg_pmu_set = (5 << 1) | (7 << 4) |
3737 (1 << 8) | (2 << 14) | 3737 (2 << 8) | (2 << 14) |
3738 (6 << 17) | (1 << 20) | 3738 (6 << 17) | (1 << 20) |
3739 (3 << 24) | (1 << 28); 3739 (3 << 24) | (1 << 28);
3740 } 3740 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 8efdec247c02..ad2bb2bf4e8a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -519,11 +519,7 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
519 int restore, 519 int restore,
520 int power_off) 520 int power_off)
521{ 521{
522 if (ah->is_pciexpress != true) 522 if (ah->is_pciexpress != true || ah->aspm_enabled != true)
523 return;
524
525 /* Do not touch SerDes registers */
526 if (ah->config.pcie_powersave_enable == 2)
527 return; 523 return;
528 524
529 /* Nothing to do on restore for 11N */ 525 /* Nothing to do on restore for 11N */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 6de3f0bc18e6..5c590429f120 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -850,7 +850,7 @@
850#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) 850#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
851#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240) 851#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
852#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c) 852#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
853#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM_BASE + 0x450 + ((_i) << 2)) 853#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2))
854 854
855/* 855/*
856 * Channel 2 Register Map 856 * Channel 2 Register Map
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8006ce0c7357..8dcefe74f4c3 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -318,6 +318,14 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
318 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 318 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
319} 319}
320 320
321static void ath9k_hw_aspm_init(struct ath_hw *ah)
322{
323 struct ath_common *common = ath9k_hw_common(ah);
324
325 if (common->bus_ops->aspm_init)
326 common->bus_ops->aspm_init(common);
327}
328
321/* This should work for all families including legacy */ 329/* This should work for all families including legacy */
322static bool ath9k_hw_chip_test(struct ath_hw *ah) 330static bool ath9k_hw_chip_test(struct ath_hw *ah)
323{ 331{
@@ -378,7 +386,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
378 ah->config.additional_swba_backoff = 0; 386 ah->config.additional_swba_backoff = 0;
379 ah->config.ack_6mb = 0x0; 387 ah->config.ack_6mb = 0x0;
380 ah->config.cwm_ignore_extcca = 0; 388 ah->config.cwm_ignore_extcca = 0;
381 ah->config.pcie_powersave_enable = 0;
382 ah->config.pcie_clock_req = 0; 389 ah->config.pcie_clock_req = 0;
383 ah->config.pcie_waen = 0; 390 ah->config.pcie_waen = 0;
384 ah->config.analog_shiftreg = 1; 391 ah->config.analog_shiftreg = 1;
@@ -598,7 +605,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
598 605
599 606
600 if (ah->is_pciexpress) 607 if (ah->is_pciexpress)
601 ath9k_hw_configpcipowersave(ah, 0, 0); 608 ath9k_hw_aspm_init(ah);
602 else 609 else
603 ath9k_hw_disablepcie(ah); 610 ath9k_hw_disablepcie(ah);
604 611
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 6acd0f975ae1..c79889036ec4 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -219,7 +219,6 @@ struct ath9k_ops_config {
219 int additional_swba_backoff; 219 int additional_swba_backoff;
220 int ack_6mb; 220 int ack_6mb;
221 u32 cwm_ignore_extcca; 221 u32 cwm_ignore_extcca;
222 u8 pcie_powersave_enable;
223 bool pcieSerDesWrite; 222 bool pcieSerDesWrite;
224 u8 pcie_clock_req; 223 u8 pcie_clock_req;
225 u32 pcie_waen; 224 u32 pcie_waen;
@@ -673,6 +672,7 @@ struct ath_hw {
673 672
674 bool sw_mgmt_crypto; 673 bool sw_mgmt_crypto;
675 bool is_pciexpress; 674 bool is_pciexpress;
675 bool aspm_enabled;
676 bool is_monitoring; 676 bool is_monitoring;
677 bool need_an_top2_fixup; 677 bool need_an_top2_fixup;
678 u16 tx_trig_level; 678 u16 tx_trig_level;
@@ -874,6 +874,7 @@ struct ath_bus_ops {
874 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data); 874 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
875 void (*bt_coex_prep)(struct ath_common *common); 875 void (*bt_coex_prep)(struct ath_common *common);
876 void (*extn_synch_en)(struct ath_common *common); 876 void (*extn_synch_en)(struct ath_common *common);
877 void (*aspm_init)(struct ath_common *common);
877}; 878};
878 879
879static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) 880static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index ac5107172f94..aa0ff7e2c922 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -670,8 +670,10 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
670static void ath9k_init_txpower_limits(struct ath_softc *sc) 670static void ath9k_init_txpower_limits(struct ath_softc *sc)
671{ 671{
672 struct ath_hw *ah = sc->sc_ah; 672 struct ath_hw *ah = sc->sc_ah;
673 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
673 struct ath9k_channel *curchan = ah->curchan; 674 struct ath9k_channel *curchan = ah->curchan;
674 675
676 ah->txchainmask = common->tx_chainmask;
675 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 677 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
676 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ); 678 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
677 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 679 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9098aaad97a9..6530694a59ae 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2283,7 +2283,11 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2283 2283
2284 mutex_lock(&sc->mutex); 2284 mutex_lock(&sc->mutex);
2285 ah->coverage_class = coverage_class; 2285 ah->coverage_class = coverage_class;
2286
2287 ath9k_ps_wakeup(sc);
2286 ath9k_hw_init_global_settings(ah); 2288 ath9k_hw_init_global_settings(ah);
2289 ath9k_ps_restore(sc);
2290
2287 mutex_unlock(&sc->mutex); 2291 mutex_unlock(&sc->mutex);
2288} 2292}
2289 2293
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 3bad0b2cf9a3..be4ea1329813 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/pci-aspm.h>
19#include <linux/ath9k_platform.h> 20#include <linux/ath9k_platform.h>
20#include "ath9k.h" 21#include "ath9k.h"
21 22
@@ -115,12 +116,38 @@ static void ath_pci_extn_synch_enable(struct ath_common *common)
115 pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl); 116 pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl);
116} 117}
117 118
119static void ath_pci_aspm_init(struct ath_common *common)
120{
121 struct ath_softc *sc = (struct ath_softc *) common->priv;
122 struct ath_hw *ah = sc->sc_ah;
123 struct pci_dev *pdev = to_pci_dev(sc->dev);
124 struct pci_dev *parent;
125 int pos;
126 u8 aspm;
127
128 if (!pci_is_pcie(pdev))
129 return;
130
131 parent = pdev->bus->self;
132 if (WARN_ON(!parent))
133 return;
134
135 pos = pci_pcie_cap(parent);
136 pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm);
137 if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
138 ah->aspm_enabled = true;
139 /* Initialize PCIe PM and SERDES registers. */
140 ath9k_hw_configpcipowersave(ah, 0, 0);
141 }
142}
143
118static const struct ath_bus_ops ath_pci_bus_ops = { 144static const struct ath_bus_ops ath_pci_bus_ops = {
119 .ath_bus_type = ATH_PCI, 145 .ath_bus_type = ATH_PCI,
120 .read_cachesize = ath_pci_read_cachesize, 146 .read_cachesize = ath_pci_read_cachesize,
121 .eeprom_read = ath_pci_eeprom_read, 147 .eeprom_read = ath_pci_eeprom_read,
122 .bt_coex_prep = ath_pci_bt_coex_prep, 148 .bt_coex_prep = ath_pci_bt_coex_prep,
123 .extn_synch_en = ath_pci_extn_synch_enable, 149 .extn_synch_en = ath_pci_extn_synch_enable,
150 .aspm_init = ath_pci_aspm_init,
124}; 151};
125 152
126static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 153static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 0122930b14c7..0474e6638d21 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1066,8 +1066,10 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1066 * the high througput speed in 802.11n networks. 1066 * the high througput speed in 802.11n networks.
1067 */ 1067 */
1068 1068
1069 if (!is_main_vif(ar, vif)) 1069 if (!is_main_vif(ar, vif)) {
1070 mutex_lock(&ar->mutex);
1070 goto err_softw; 1071 goto err_softw;
1072 }
1071 1073
1072 /* 1074 /*
1073 * While the hardware supports *catch-all* key, for offloading 1075 * While the hardware supports *catch-all* key, for offloading
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 83cba22ac6e8..481e534534eb 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -795,9 +795,23 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
795 u32 tmp; 795 u32 tmp;
796 u16 mmio_base; 796 u16 mmio_base;
797 797
798 tmp = b43_read32(dev, SSB_TMSHIGH); 798 switch (dev->dev->bus_type) {
799 if (tmp & SSB_TMSHIGH_DMA64) 799#ifdef CONFIG_B43_BCMA
800 return DMA_BIT_MASK(64); 800 case B43_BUS_BCMA:
801 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
802 if (tmp & BCMA_IOST_DMA64)
803 return DMA_BIT_MASK(64);
804 break;
805#endif
806#ifdef CONFIG_B43_SSB
807 case B43_BUS_SSB:
808 tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
809 if (tmp & SSB_TMSHIGH_DMA64)
810 return DMA_BIT_MASK(64);
811 break;
812#endif
813 }
814
801 mmio_base = b43_dmacontroller_base(0, 0); 815 mmio_base = b43_dmacontroller_base(0, 0);
802 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); 816 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
803 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); 817 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index c052a0d5cbdd..5441ad195119 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -648,6 +648,8 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
648 0x74c5e40d), 648 0x74c5e40d),
649 PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil", 649 PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil",
650 0x4b801a17), 650 0x4b801a17),
651 PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.02",
652 0x4b74baa0),
651 PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus", 653 PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
652 0x7a954bd9, 0x74be00c6), 654 0x7a954bd9, 0x74be00c6),
653 PCMCIA_DEVICE_PROD_ID123( 655 PCMCIA_DEVICE_PROD_ID123(
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
index dab67a12d73b..73fe3cdf796b 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -1746,7 +1746,11 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1746 } 1746 }
1747 1747
1748 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); 1748 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1749 1749 /*
1750 * We do not commit tx power settings while channel changing,
1751 * do it now if tx power changed.
1752 */
1753 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
1750 return 0; 1754 return 0;
1751 } 1755 }
1752 1756
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index bd4b000733f7..ecdc6e557428 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1235,7 +1235,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1235 1235
1236 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); 1236 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1237 iwl_legacy_print_rx_config_cmd(priv, ctx); 1237 iwl_legacy_print_rx_config_cmd(priv, ctx);
1238 goto set_tx_power; 1238 /*
1239 * We do not commit tx power settings while channel changing,
1240 * do it now if tx power changed.
1241 */
1242 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
1243 return 0;
1239 } 1244 }
1240 1245
1241 /* If we are currently associated and the new config requires 1246 /* If we are currently associated and the new config requires
@@ -1315,7 +1320,6 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1315 1320
1316 iwl4965_init_sensitivity(priv); 1321 iwl4965_init_sensitivity(priv);
1317 1322
1318set_tx_power:
1319 /* If we issue a new RXON command which required a tune then we must 1323 /* If we issue a new RXON command which required a tune then we must
1320 * send a new TXPOWER command or we won't be able to Tx any frames */ 1324 * send a new TXPOWER command or we won't be able to Tx any frames */
1321 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); 1325 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 3eeb12ebe6e9..c95cefd529dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -365,6 +365,7 @@ static struct iwl_base_params iwl5000_base_params = {
365 .chain_noise_scale = 1000, 365 .chain_noise_scale = 1000,
366 .wd_timeout = IWL_LONG_WD_TIMEOUT, 366 .wd_timeout = IWL_LONG_WD_TIMEOUT,
367 .max_event_log_size = 512, 367 .max_event_log_size = 512,
368 .no_idle_support = true,
368}; 369};
369static struct iwl_ht_params iwl5000_ht_params = { 370static struct iwl_ht_params iwl5000_ht_params = {
370 .ht_greenfield_support = true, 371 .ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 3e6bb734dcb7..02817a438550 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -135,6 +135,7 @@ struct iwl_mod_params {
135 * @temperature_kelvin: temperature report by uCode in kelvin 135 * @temperature_kelvin: temperature report by uCode in kelvin
136 * @max_event_log_size: size of event log buffer size for ucode event logging 136 * @max_event_log_size: size of event log buffer size for ucode event logging
137 * @shadow_reg_enable: HW shadhow register bit 137 * @shadow_reg_enable: HW shadhow register bit
138 * @no_idle_support: do not support idle mode
138 */ 139 */
139struct iwl_base_params { 140struct iwl_base_params {
140 int eeprom_size; 141 int eeprom_size;
@@ -156,6 +157,7 @@ struct iwl_base_params {
156 bool temperature_kelvin; 157 bool temperature_kelvin;
157 u32 max_event_log_size; 158 u32 max_event_log_size;
158 const bool shadow_reg_enable; 159 const bool shadow_reg_enable;
160 const bool no_idle_support;
159}; 161};
160/* 162/*
161 * @advanced_bt_coexist: support advanced bt coexist 163 * @advanced_bt_coexist: support advanced bt coexist
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index fb7e436b40c7..2fdbffa079c1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -134,6 +134,7 @@ static void iwl_pci_apm_config(struct iwl_bus *bus)
134static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data) 134static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data)
135{ 135{
136 bus->drv_data = drv_data; 136 bus->drv_data = drv_data;
137 pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data);
137} 138}
138 139
139static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[], 140static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
@@ -454,8 +455,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
454 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 455 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
455 } 456 }
456 457
457 pci_set_drvdata(pdev, bus);
458
459 bus->dev = &pdev->dev; 458 bus->dev = &pdev->dev;
460 bus->irq = pdev->irq; 459 bus->irq = pdev->irq;
461 bus->ops = &pci_ops; 460 bus->ops = &pci_ops;
@@ -479,26 +478,22 @@ out_no_pci:
479 return err; 478 return err;
480} 479}
481 480
482static void iwl_pci_down(struct iwl_bus *bus)
483{
484 struct iwl_pci_bus *pci_bus = (struct iwl_pci_bus *) bus->bus_specific;
485
486 pci_disable_msi(pci_bus->pci_dev);
487 pci_iounmap(pci_bus->pci_dev, pci_bus->hw_base);
488 pci_release_regions(pci_bus->pci_dev);
489 pci_disable_device(pci_bus->pci_dev);
490 pci_set_drvdata(pci_bus->pci_dev, NULL);
491
492 kfree(bus);
493}
494
495static void __devexit iwl_pci_remove(struct pci_dev *pdev) 481static void __devexit iwl_pci_remove(struct pci_dev *pdev)
496{ 482{
497 struct iwl_bus *bus = pci_get_drvdata(pdev); 483 struct iwl_priv *priv = pci_get_drvdata(pdev);
484 struct iwl_bus *bus = priv->bus;
485 struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus);
486 struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
498 487
499 iwl_remove(bus->drv_data); 488 iwl_remove(priv);
500 489
501 iwl_pci_down(bus); 490 pci_disable_msi(pci_dev);
491 pci_iounmap(pci_dev, pci_bus->hw_base);
492 pci_release_regions(pci_dev);
493 pci_disable_device(pci_dev);
494 pci_set_drvdata(pci_dev, NULL);
495
496 kfree(bus);
502} 497}
503 498
504#ifdef CONFIG_PM 499#ifdef CONFIG_PM
@@ -506,20 +501,20 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
506static int iwl_pci_suspend(struct device *device) 501static int iwl_pci_suspend(struct device *device)
507{ 502{
508 struct pci_dev *pdev = to_pci_dev(device); 503 struct pci_dev *pdev = to_pci_dev(device);
509 struct iwl_bus *bus = pci_get_drvdata(pdev); 504 struct iwl_priv *priv = pci_get_drvdata(pdev);
510 505
511 /* Before you put code here, think about WoWLAN. You cannot check here 506 /* Before you put code here, think about WoWLAN. You cannot check here
512 * whether WoWLAN is enabled or not, and your code will run even if 507 * whether WoWLAN is enabled or not, and your code will run even if
513 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. 508 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
514 */ 509 */
515 510
516 return iwl_suspend(bus->drv_data); 511 return iwl_suspend(priv);
517} 512}
518 513
519static int iwl_pci_resume(struct device *device) 514static int iwl_pci_resume(struct device *device)
520{ 515{
521 struct pci_dev *pdev = to_pci_dev(device); 516 struct pci_dev *pdev = to_pci_dev(device);
522 struct iwl_bus *bus = pci_get_drvdata(pdev); 517 struct iwl_priv *priv = pci_get_drvdata(pdev);
523 518
524 /* Before you put code here, think about WoWLAN. You cannot check here 519 /* Before you put code here, think about WoWLAN. You cannot check here
525 * whether WoWLAN is enabled or not, and your code will run even if 520 * whether WoWLAN is enabled or not, and your code will run even if
@@ -532,7 +527,7 @@ static int iwl_pci_resume(struct device *device)
532 */ 527 */
533 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 528 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
534 529
535 return iwl_resume(bus->drv_data); 530 return iwl_resume(priv);
536} 531}
537 532
538static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); 533static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 3ec619c6881c..cd64df05f9ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -349,7 +349,8 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
349 349
350 if (priv->wowlan) 350 if (priv->wowlan)
351 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); 351 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
352 else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE) 352 else if (!priv->cfg->base_params->no_idle_support &&
353 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
353 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); 354 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
354 else if (iwl_tt_is_low_power_state(priv)) { 355 else if (iwl_tt_is_low_power_state(priv)) {
355 /* in thermal throttling low power state */ 356 /* in thermal throttling low power state */
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 3f7fc4a0b43d..d7dbc00bcfbe 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -239,7 +239,6 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
239 239
240static const struct pcmcia_device_id orinoco_cs_ids[] = { 240static const struct pcmcia_device_id orinoco_cs_ids[] = {
241 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */ 241 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
242 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
243 PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */ 242 PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
244 PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */ 243 PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
245 PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */ 244 PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
@@ -272,6 +271,7 @@ static const struct pcmcia_device_id orinoco_cs_ids[] = {
272 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26), 271 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
273 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b), 272 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
274 PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e), 273 PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
274 PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.01", 0xd27deb1a), /* Lucent Orinoco */
275#ifdef CONFIG_HERMES_PRISM 275#ifdef CONFIG_HERMES_PRISM
276 /* Only entries that certainly identify Prism chipset */ 276 /* Only entries that certainly identify Prism chipset */
277 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */ 277 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
@@ -321,6 +321,9 @@ static const struct pcmcia_device_id orinoco_cs_ids[] = {
321 PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2), 321 PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
322 PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b), 322 PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
323 PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39), 323 PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
324
325 /* This may be Agere or Intersil Firmware */
326 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
324#endif 327#endif
325 PCMCIA_DEVICE_NULL, 328 PCMCIA_DEVICE_NULL,
326}; 329};
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 84ab7d1acb6a..ef67f6786a84 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -703,8 +703,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
703 /* 703 /*
704 * Add space for the TXWI in front of the skb. 704 * Add space for the TXWI in front of the skb.
705 */ 705 */
706 skb_push(entry->skb, TXWI_DESC_SIZE); 706 memset(skb_push(entry->skb, TXWI_DESC_SIZE), 0, TXWI_DESC_SIZE);
707 memset(entry->skb, 0, TXWI_DESC_SIZE);
708 707
709 /* 708 /*
710 * Register descriptor details in skb frame descriptor. 709 * Register descriptor details in skb frame descriptor.
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 507559361d87..dbf501ca317f 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -464,6 +464,15 @@ static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
464 int wcid, ack, pid; 464 int wcid, ack, pid;
465 int tx_wcid, tx_ack, tx_pid; 465 int tx_wcid, tx_ack, tx_pid;
466 466
467 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
468 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) {
469 WARNING(entry->queue->rt2x00dev,
470 "Data pending for entry %u in queue %u\n",
471 entry->entry_idx, entry->queue->qid);
472 cond_resched();
473 return false;
474 }
475
467 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); 476 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
468 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); 477 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
469 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); 478 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
@@ -529,12 +538,11 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
529 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 538 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
530 if (rt2800usb_txdone_entry_check(entry, reg)) 539 if (rt2800usb_txdone_entry_check(entry, reg))
531 break; 540 break;
541 entry = NULL;
532 } 542 }
533 543
534 if (!entry || rt2x00queue_empty(queue)) 544 if (entry)
535 break; 545 rt2800_txdone_entry(entry, reg);
536
537 rt2800_txdone_entry(entry, reg);
538 } 546 }
539} 547}
540 548
@@ -558,8 +566,10 @@ static void rt2800usb_work_txdone(struct work_struct *work)
558 while (!rt2x00queue_empty(queue)) { 566 while (!rt2x00queue_empty(queue)) {
559 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 567 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
560 568
561 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 569 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
570 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
562 break; 571 break;
572
563 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) 573 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
564 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); 574 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
565 else if (rt2x00queue_status_timeout(entry)) 575 else if (rt2x00queue_status_timeout(entry))
@@ -921,6 +931,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
921 { USB_DEVICE(0x07d1, 0x3c16) }, 931 { USB_DEVICE(0x07d1, 0x3c16) },
922 /* Draytek */ 932 /* Draytek */
923 { USB_DEVICE(0x07fa, 0x7712) }, 933 { USB_DEVICE(0x07fa, 0x7712) },
934 /* DVICO */
935 { USB_DEVICE(0x0fe9, 0xb307) },
924 /* Edimax */ 936 /* Edimax */
925 { USB_DEVICE(0x7392, 0x7711) }, 937 { USB_DEVICE(0x7392, 0x7711) },
926 { USB_DEVICE(0x7392, 0x7717) }, 938 { USB_DEVICE(0x7392, 0x7717) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 15cdc7e57fc4..4cdf247a870d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -355,7 +355,8 @@ static inline enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *
355 return CIPHER_NONE; 355 return CIPHER_NONE;
356} 356}
357 357
358static inline void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry, 358static inline void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
359 struct sk_buff *skb,
359 struct txentry_desc *txdesc) 360 struct txentry_desc *txdesc)
360{ 361{
361} 362}
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 8efab3983528..4ccf23805973 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -113,7 +113,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
113 * due to possible race conditions in mac80211. 113 * due to possible race conditions in mac80211.
114 */ 114 */
115 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 115 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
116 goto exit_fail; 116 goto exit_free_skb;
117 117
118 /* 118 /*
119 * Use the ATIM queue if appropriate and present. 119 * Use the ATIM queue if appropriate and present.
@@ -127,7 +127,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
127 ERROR(rt2x00dev, 127 ERROR(rt2x00dev,
128 "Attempt to send packet over invalid queue %d.\n" 128 "Attempt to send packet over invalid queue %d.\n"
129 "Please file bug report to %s.\n", qid, DRV_PROJECT); 129 "Please file bug report to %s.\n", qid, DRV_PROJECT);
130 goto exit_fail; 130 goto exit_free_skb;
131 } 131 }
132 132
133 /* 133 /*
@@ -159,6 +159,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
159 159
160 exit_fail: 160 exit_fail:
161 rt2x00queue_pause_queue(queue); 161 rt2x00queue_pause_queue(queue);
162 exit_free_skb:
162 dev_kfree_skb_any(skb); 163 dev_kfree_skb_any(skb);
163} 164}
164EXPORT_SYMBOL_GPL(rt2x00mac_tx); 165EXPORT_SYMBOL_GPL(rt2x00mac_tx);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index b6b4542c2460..1e31050dafc9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -262,23 +262,20 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
262 struct queue_entry *entry = (struct queue_entry *)urb->context; 262 struct queue_entry *entry = (struct queue_entry *)urb->context;
263 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 263 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
264 264
265 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 265 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
266 return; 266 return;
267
268 if (rt2x00dev->ops->lib->tx_dma_done)
269 rt2x00dev->ops->lib->tx_dma_done(entry);
270
271 /*
272 * Report the frame as DMA done
273 */
274 rt2x00lib_dmadone(entry);
275
276 /* 267 /*
277 * Check if the frame was correctly uploaded 268 * Check if the frame was correctly uploaded
278 */ 269 */
279 if (urb->status) 270 if (urb->status)
280 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 271 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
272 /*
273 * Report the frame as DMA done
274 */
275 rt2x00lib_dmadone(entry);
281 276
277 if (rt2x00dev->ops->lib->tx_dma_done)
278 rt2x00dev->ops->lib->tx_dma_done(entry);
282 /* 279 /*
283 * Schedule the delayed work for reading the TX status 280 * Schedule the delayed work for reading the TX status
284 * from the device. 281 * from the device.
@@ -874,18 +871,8 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
874{ 871{
875 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 872 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
876 struct rt2x00_dev *rt2x00dev = hw->priv; 873 struct rt2x00_dev *rt2x00dev = hw->priv;
877 int retval;
878
879 retval = rt2x00lib_suspend(rt2x00dev, state);
880 if (retval)
881 return retval;
882 874
883 /* 875 return rt2x00lib_suspend(rt2x00dev, state);
884 * Decrease usbdev refcount.
885 */
886 usb_put_dev(interface_to_usbdev(usb_intf));
887
888 return 0;
889} 876}
890EXPORT_SYMBOL_GPL(rt2x00usb_suspend); 877EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
891 878
@@ -894,8 +881,6 @@ int rt2x00usb_resume(struct usb_interface *usb_intf)
894 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 881 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
895 struct rt2x00_dev *rt2x00dev = hw->priv; 882 struct rt2x00_dev *rt2x00dev = hw->priv;
896 883
897 usb_get_dev(interface_to_usbdev(usb_intf));
898
899 return rt2x00lib_resume(rt2x00dev); 884 return rt2x00lib_resume(rt2x00dev);
900} 885}
901EXPORT_SYMBOL_GPL(rt2x00usb_resume); 886EXPORT_SYMBOL_GPL(rt2x00usb_resume);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 6a93939f44e8..0baeb894f093 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2420,6 +2420,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2420 /* Buffalo */ 2420 /* Buffalo */
2421 { USB_DEVICE(0x0411, 0x00d8) }, 2421 { USB_DEVICE(0x0411, 0x00d8) },
2422 { USB_DEVICE(0x0411, 0x00d9) }, 2422 { USB_DEVICE(0x0411, 0x00d9) },
2423 { USB_DEVICE(0x0411, 0x00e6) },
2423 { USB_DEVICE(0x0411, 0x00f4) }, 2424 { USB_DEVICE(0x0411, 0x00f4) },
2424 { USB_DEVICE(0x0411, 0x0116) }, 2425 { USB_DEVICE(0x0411, 0x0116) },
2425 { USB_DEVICE(0x0411, 0x0119) }, 2426 { USB_DEVICE(0x0411, 0x0119) },
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 5efd57833489..56f12358389d 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1696,15 +1696,17 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1696 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn); 1696 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
1697 pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn); 1697 pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
1698 1698
1699 /*find bridge info */ 1699 if (bridge_pdev) {
1700 pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor; 1700 /*find bridge info if available */
1701 for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) { 1701 pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
1702 if (bridge_pdev->vendor == pcibridge_vendors[tmp]) { 1702 for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
1703 pcipriv->ndis_adapter.pcibridge_vendor = tmp; 1703 if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
1704 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 1704 pcipriv->ndis_adapter.pcibridge_vendor = tmp;
1705 ("Pci Bridge Vendor is found index: %d\n", 1705 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1706 tmp)); 1706 ("Pci Bridge Vendor is found index:"
1707 break; 1707 " %d\n", tmp));
1708 break;
1709 }
1708 } 1710 }
1709 } 1711 }
1710 1712
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 942f7a3969a7..ef63c0df006a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -281,6 +281,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
281 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)}, 281 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
282 /* 8188CE-VAU USB minCard (b/g mode only) */ 282 /* 8188CE-VAU USB minCard (b/g mode only) */
283 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)}, 283 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
284 /* 8188RU in Alfa AWUS036NHR */
285 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
284 /* 8188 Combo for BC4 */ 286 /* 8188 Combo for BC4 */
285 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, 287 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
286 288
@@ -303,20 +305,23 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
303 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ 305 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
304 /* HP - Lite-On ,8188CUS Slim Combo */ 306 /* HP - Lite-On ,8188CUS Slim Combo */
305 {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)}, 307 {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
308 {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
306 {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/ 309 {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
307 {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/ 310 {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
308 {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/ 311 {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
309 {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/ 312 {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
310 {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/ 313 {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
311 {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/ 314 {RTL_USB_DEVICE(0x13d3, 0x3358, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
312 /* Russian customer -Azwave (8188CE-VAU b/g mode only) */ 315 /* Russian customer -Azwave (8188CE-VAU b/g mode only) */
313 {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)}, 316 {RTL_USB_DEVICE(0x13d3, 0x3359, rtl92cu_hal_cfg)},
317 {RTL_USB_DEVICE(0x4855, 0x0090, rtl92cu_hal_cfg)}, /* Feixun */
318 {RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
319 {RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
314 320
315 /****** 8192CU ********/ 321 /****** 8192CU ********/
316 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ 322 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
317 {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ 323 {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
318 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ 324 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
319 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/
320 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ 325 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
321 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ 326 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
322 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ 327 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c
index ef8370edace7..ad87a1ac6462 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/wl1251/acx.c
@@ -140,8 +140,6 @@ int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth)
140 auth->sleep_auth = sleep_auth; 140 auth->sleep_auth = sleep_auth;
141 141
142 ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); 142 ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
143 if (ret < 0)
144 return ret;
145 143
146out: 144out:
147 kfree(auth); 145 kfree(auth);
@@ -681,10 +679,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl)
681 679
682 ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD, 680 ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD,
683 detection, sizeof(*detection)); 681 detection, sizeof(*detection));
684 if (ret < 0) { 682 if (ret < 0)
685 wl1251_warning("failed to set cca threshold: %d", ret); 683 wl1251_warning("failed to set cca threshold: %d", ret);
686 return ret;
687 }
688 684
689out: 685out:
690 kfree(detection); 686 kfree(detection);
diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/wl1251/cmd.c
index 81f164bc4888..d14d69d733a0 100644
--- a/drivers/net/wireless/wl1251/cmd.c
+++ b/drivers/net/wireless/wl1251/cmd.c
@@ -241,7 +241,7 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
241 if (ret < 0) { 241 if (ret < 0) {
242 wl1251_error("tx %s cmd for channel %d failed", 242 wl1251_error("tx %s cmd for channel %d failed",
243 enable ? "start" : "stop", channel); 243 enable ? "start" : "stop", channel);
244 return ret; 244 goto out;
245 } 245 }
246 246
247 wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d", 247 wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index 7e33f1f4f3d4..34f6ab53e519 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -77,8 +77,6 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
77 auth->sleep_auth = sleep_auth; 77 auth->sleep_auth = sleep_auth;
78 78
79 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); 79 ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
80 if (ret < 0)
81 return ret;
82 80
83out: 81out:
84 kfree(auth); 82 kfree(auth);
@@ -624,10 +622,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl)
624 622
625 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, 623 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
626 detection, sizeof(*detection)); 624 detection, sizeof(*detection));
627 if (ret < 0) { 625 if (ret < 0)
628 wl1271_warning("failed to set cca threshold: %d", ret); 626 wl1271_warning("failed to set cca threshold: %d", ret);
629 return ret;
630 }
631 627
632out: 628out:
633 kfree(detection); 629 kfree(detection);
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index e58c22d21e39..b70ae40ad660 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -4283,6 +4283,7 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
4283 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 4283 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
4284 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); 4284 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
4285 wl->hw->wiphy->max_scan_ssids = 1; 4285 wl->hw->wiphy->max_scan_ssids = 1;
4286 wl->hw->wiphy->max_sched_scan_ssids = 1;
4286 /* 4287 /*
4287 * Maximum length of elements in scanning probe request templates 4288 * Maximum length of elements in scanning probe request templates
4288 * should be the maximum length possible for a template, without 4289 * should be the maximum length possible for a template, without
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 5cf18c2c23f0..fb1fd5af75ea 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -164,7 +164,7 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
164 /* If enabled, tell runtime PM not to power off the card */ 164 /* If enabled, tell runtime PM not to power off the card */
165 if (pm_runtime_enabled(&func->dev)) { 165 if (pm_runtime_enabled(&func->dev)) {
166 ret = pm_runtime_get_sync(&func->dev); 166 ret = pm_runtime_get_sync(&func->dev);
167 if (ret) 167 if (ret < 0)
168 goto out; 168 goto out;
169 } else { 169 } else {
170 /* Runtime PM is disabled: power up the card manually */ 170 /* Runtime PM is disabled: power up the card manually */
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 5d5e1ef87206..4ae8effaee22 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -36,7 +36,6 @@ enum wl1271_tm_commands {
36 WL1271_TM_CMD_TEST, 36 WL1271_TM_CMD_TEST,
37 WL1271_TM_CMD_INTERROGATE, 37 WL1271_TM_CMD_INTERROGATE,
38 WL1271_TM_CMD_CONFIGURE, 38 WL1271_TM_CMD_CONFIGURE,
39 WL1271_TM_CMD_NVS_PUSH,
40 WL1271_TM_CMD_SET_PLT_MODE, 39 WL1271_TM_CMD_SET_PLT_MODE,
41 WL1271_TM_CMD_RECOVER, 40 WL1271_TM_CMD_RECOVER,
42 41
@@ -139,12 +138,15 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
139 138
140 if (ret < 0) { 139 if (ret < 0) {
141 wl1271_warning("testmode cmd interrogate failed: %d", ret); 140 wl1271_warning("testmode cmd interrogate failed: %d", ret);
141 kfree(cmd);
142 return ret; 142 return ret;
143 } 143 }
144 144
145 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); 145 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
146 if (!skb) 146 if (!skb) {
147 kfree(cmd);
147 return -ENOMEM; 148 return -ENOMEM;
149 }
148 150
149 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); 151 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
150 152
@@ -187,48 +189,6 @@ static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
187 return 0; 189 return 0;
188} 190}
189 191
190static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
191{
192 int ret = 0;
193 size_t len;
194 void *buf;
195
196 wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push");
197
198 if (!tb[WL1271_TM_ATTR_DATA])
199 return -EINVAL;
200
201 buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
202 len = nla_len(tb[WL1271_TM_ATTR_DATA]);
203
204 mutex_lock(&wl->mutex);
205
206 kfree(wl->nvs);
207
208 if ((wl->chip.id == CHIP_ID_1283_PG20) &&
209 (len != sizeof(struct wl128x_nvs_file)))
210 return -EINVAL;
211 else if (len != sizeof(struct wl1271_nvs_file))
212 return -EINVAL;
213
214 wl->nvs = kzalloc(len, GFP_KERNEL);
215 if (!wl->nvs) {
216 wl1271_error("could not allocate memory for the nvs file");
217 ret = -ENOMEM;
218 goto out;
219 }
220
221 memcpy(wl->nvs, buf, len);
222 wl->nvs_len = len;
223
224 wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs");
225
226out:
227 mutex_unlock(&wl->mutex);
228
229 return ret;
230}
231
232static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) 192static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
233{ 193{
234 u32 val; 194 u32 val;
@@ -285,8 +245,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
285 return wl1271_tm_cmd_interrogate(wl, tb); 245 return wl1271_tm_cmd_interrogate(wl, tb);
286 case WL1271_TM_CMD_CONFIGURE: 246 case WL1271_TM_CMD_CONFIGURE:
287 return wl1271_tm_cmd_configure(wl, tb); 247 return wl1271_tm_cmd_configure(wl, tb);
288 case WL1271_TM_CMD_NVS_PUSH:
289 return wl1271_tm_cmd_nvs_push(wl, tb);
290 case WL1271_TM_CMD_SET_PLT_MODE: 248 case WL1271_TM_CMD_SET_PLT_MODE:
291 return wl1271_tm_cmd_set_plt_mode(wl, tb); 249 return wl1271_tm_cmd_set_plt_mode(wl, tb);
292 case WL1271_TM_CMD_RECOVER: 250 case WL1271_TM_CMD_RECOVER:
diff --git a/drivers/of/address.c b/drivers/of/address.c
index da1f4b9605df..72c33fbe451d 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -610,6 +610,6 @@ void __iomem *of_iomap(struct device_node *np, int index)
610 if (of_address_to_resource(np, index, &res)) 610 if (of_address_to_resource(np, index, &res))
611 return NULL; 611 return NULL;
612 612
613 return ioremap(res.start, 1 + res.end - res.start); 613 return ioremap(res.start, resource_size(&res));
614} 614}
615EXPORT_SYMBOL(of_iomap); 615EXPORT_SYMBOL(of_iomap);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 02ed36719def..3ff22e32b602 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -610,8 +610,9 @@ EXPORT_SYMBOL(of_find_node_by_phandle);
610 * 610 *
611 * The out_value is modified only if a valid u32 value can be decoded. 611 * The out_value is modified only if a valid u32 value can be decoded.
612 */ 612 */
613int of_property_read_u32_array(const struct device_node *np, char *propname, 613int of_property_read_u32_array(const struct device_node *np,
614 u32 *out_values, size_t sz) 614 const char *propname, u32 *out_values,
615 size_t sz)
615{ 616{
616 struct property *prop = of_find_property(np, propname, NULL); 617 struct property *prop = of_find_property(np, propname, NULL);
617 const __be32 *val; 618 const __be32 *val;
@@ -645,7 +646,7 @@ EXPORT_SYMBOL_GPL(of_property_read_u32_array);
645 * 646 *
646 * The out_string pointer is modified only if a valid string can be decoded. 647 * The out_string pointer is modified only if a valid string can be decoded.
647 */ 648 */
648int of_property_read_string(struct device_node *np, char *propname, 649int of_property_read_string(struct device_node *np, const char *propname,
649 const char **out_string) 650 const char **out_string)
650{ 651{
651 struct property *prop = of_find_property(np, propname, NULL); 652 struct property *prop = of_find_property(np, propname, NULL);
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index 3007662ac614..ef0105fa52b1 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -127,8 +127,8 @@ EXPORT_SYMBOL(of_gpio_count);
127 * gpio chips. This function performs only one sanity check: whether gpio 127 * gpio chips. This function performs only one sanity check: whether gpio
128 * is less than ngpios (that is specified in the gpio_chip). 128 * is less than ngpios (that is specified in the gpio_chip).
129 */ 129 */
130static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np, 130int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
131 const void *gpio_spec, u32 *flags) 131 const void *gpio_spec, u32 *flags)
132{ 132{
133 const __be32 *gpio = gpio_spec; 133 const __be32 *gpio = gpio_spec;
134 const u32 n = be32_to_cpup(gpio); 134 const u32 n = be32_to_cpup(gpio);
@@ -152,6 +152,7 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
152 152
153 return n; 153 return n;
154} 154}
155EXPORT_SYMBOL(of_gpio_simple_xlate);
155 156
156/** 157/**
157 * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank) 158 * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank)
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a70fa89f76fd..220285760b68 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -110,7 +110,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
110} 110}
111 111
112 112
113static struct acpi_dock_ops acpiphp_dock_ops = { 113static const struct acpi_dock_ops acpiphp_dock_ops = {
114 .handler = handle_hotplug_event_func, 114 .handler = handle_hotplug_event_func,
115}; 115};
116 116
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index 749fdf070319..3ffd9c1acc0a 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -158,47 +158,6 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
158 */ 158 */
159} 159}
160 160
161/* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */
162static int pci_set_payload(struct pci_dev *dev)
163{
164 int pos, ppos;
165 u16 pctl, psz;
166 u16 dctl, dsz, dcap, dmax;
167 struct pci_dev *parent;
168
169 parent = dev->bus->self;
170 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
171 if (!pos)
172 return 0;
173
174 /* Read Device MaxPayload capability and setting */
175 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl);
176 pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap);
177 dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
178 dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD);
179
180 /* Read Parent MaxPayload setting */
181 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
182 if (!ppos)
183 return 0;
184 pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
185 psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
186
187 /* If parent payload > device max payload -> error
188 * If parent payload > device payload -> set speed
189 * If parent payload <= device payload -> do nothing
190 */
191 if (psz > dmax)
192 return -1;
193 else if (psz > dsz) {
194 dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz);
195 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
196 (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) +
197 (psz << 5));
198 }
199 return 0;
200}
201
202void pci_configure_slot(struct pci_dev *dev) 161void pci_configure_slot(struct pci_dev *dev)
203{ 162{
204 struct pci_dev *cdev; 163 struct pci_dev *cdev;
@@ -210,9 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
210 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) 169 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
211 return; 170 return;
212 171
213 ret = pci_set_payload(dev); 172 if (dev->bus && dev->bus->self)
214 if (ret) 173 pcie_bus_configure_settings(dev->bus,
215 dev_warn(&dev->dev, "could not set device max payload\n"); 174 dev->bus->self->pcie_mpss);
216 175
217 memset(&hpp, 0, sizeof(hpp)); 176 memset(&hpp, 0, sizeof(hpp));
218 ret = pci_get_hp_params(dev, &hpp); 177 ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index c94d37ec55c8..f0929934bb7a 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -55,7 +55,7 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
55 */ 55 */
56 if (bus->bridge->of_node) 56 if (bus->bridge->of_node)
57 return of_node_get(bus->bridge->of_node); 57 return of_node_get(bus->bridge->of_node);
58 if (bus->bridge->parent->of_node) 58 if (bus->bridge->parent && bus->bridge->parent->of_node)
59 return of_node_get(bus->bridge->parent->of_node); 59 return of_node_get(bus->bridge->parent->of_node);
60 return NULL; 60 return NULL;
61} 61}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 08a95b369d85..4e84fd4a4312 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -77,6 +77,8 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79 79
80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
81
80/* 82/*
81 * The default CLS is used if arch didn't set CLS explicitly and not 83 * The default CLS is used if arch didn't set CLS explicitly and not
82 * all pci devices agree on the same value. Arch can override either 84 * all pci devices agree on the same value. Arch can override either
@@ -3223,6 +3225,67 @@ out:
3223EXPORT_SYMBOL(pcie_set_readrq); 3225EXPORT_SYMBOL(pcie_set_readrq);
3224 3226
3225/** 3227/**
3228 * pcie_get_mps - get PCI Express maximum payload size
3229 * @dev: PCI device to query
3230 *
3231 * Returns maximum payload size in bytes
3232 * or appropriate error value.
3233 */
3234int pcie_get_mps(struct pci_dev *dev)
3235{
3236 int ret, cap;
3237 u16 ctl;
3238
3239 cap = pci_pcie_cap(dev);
3240 if (!cap)
3241 return -EINVAL;
3242
3243 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3244 if (!ret)
3245 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3246
3247 return ret;
3248}
3249
3250/**
3251 * pcie_set_mps - set PCI Express maximum payload size
3252 * @dev: PCI device to query
3253 * @mps: maximum payload size in bytes
3254 * valid values are 128, 256, 512, 1024, 2048, 4096
3255 *
3256 * If possible sets maximum payload size
3257 */
3258int pcie_set_mps(struct pci_dev *dev, int mps)
3259{
3260 int cap, err = -EINVAL;
3261 u16 ctl, v;
3262
3263 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3264 goto out;
3265
3266 v = ffs(mps) - 8;
3267 if (v > dev->pcie_mpss)
3268 goto out;
3269 v <<= 5;
3270
3271 cap = pci_pcie_cap(dev);
3272 if (!cap)
3273 goto out;
3274
3275 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3276 if (err)
3277 goto out;
3278
3279 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3280 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3281 ctl |= v;
3282 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3283 }
3284out:
3285 return err;
3286}
3287
3288/**
3226 * pci_select_bars - Make BAR mask from the type of resource 3289 * pci_select_bars - Make BAR mask from the type of resource
3227 * @dev: the PCI device for which BAR mask is made 3290 * @dev: the PCI device for which BAR mask is made
3228 * @flags: resource type mask to be selected 3291 * @flags: resource type mask to be selected
@@ -3505,6 +3568,10 @@ static int __init pci_setup(char *str)
3505 pci_hotplug_io_size = memparse(str + 9, &str); 3568 pci_hotplug_io_size = memparse(str + 9, &str);
3506 } else if (!strncmp(str, "hpmemsize=", 10)) { 3569 } else if (!strncmp(str, "hpmemsize=", 10)) {
3507 pci_hotplug_mem_size = memparse(str + 10, &str); 3570 pci_hotplug_mem_size = memparse(str + 10, &str);
3571 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3572 pcie_bus_config = PCIE_BUS_SAFE;
3573 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3574 pcie_bus_config = PCIE_BUS_PERFORMANCE;
3508 } else { 3575 } else {
3509 printk(KERN_ERR "PCI: Unknown option `%s'\n", 3576 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3510 str); 3577 str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index c8cee764b0de..b74084e9ca12 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -283,6 +283,8 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
283 283
284#endif /* CONFIG_PCI_IOV */ 284#endif /* CONFIG_PCI_IOV */
285 285
286extern unsigned long pci_cardbus_resource_alignment(struct resource *);
287
286static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, 288static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
287 struct resource *res) 289 struct resource *res)
288{ 290{
@@ -292,6 +294,8 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
292 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) 294 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
293 return pci_sriov_resource_alignment(dev, resno); 295 return pci_sriov_resource_alignment(dev, resno);
294#endif 296#endif
297 if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
298 return pci_cardbus_resource_alignment(res);
295 return resource_alignment(res); 299 return resource_alignment(res);
296} 300}
297 301
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 795c9026d55f..b1187ff31d89 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -856,6 +856,8 @@ void set_pcie_port_type(struct pci_dev *pdev)
856 pdev->pcie_cap = pos; 856 pdev->pcie_cap = pos;
857 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); 857 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
858 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; 858 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
859 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
860 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
859} 861}
860 862
861void set_pcie_hotplug_bridge(struct pci_dev *pdev) 863void set_pcie_hotplug_bridge(struct pci_dev *pdev)
@@ -1326,6 +1328,150 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
1326 return nr; 1328 return nr;
1327} 1329}
1328 1330
1331static int pcie_find_smpss(struct pci_dev *dev, void *data)
1332{
1333 u8 *smpss = data;
1334
1335 if (!pci_is_pcie(dev))
1336 return 0;
1337
1338 /* For PCIE hotplug enabled slots not connected directly to a
1339 * PCI-E root port, there can be problems when hotplugging
1340 * devices. This is due to the possibility of hotplugging a
1341 * device into the fabric with a smaller MPS that the devices
1342 * currently running have configured. Modifying the MPS on the
1343 * running devices could cause a fatal bus error due to an
1344 * incoming frame being larger than the newly configured MPS.
1345 * To work around this, the MPS for the entire fabric must be
1346 * set to the minimum size. Any devices hotplugged into this
1347 * fabric will have the minimum MPS set. If the PCI hotplug
1348 * slot is directly connected to the root port and there are not
1349 * other devices on the fabric (which seems to be the most
1350 * common case), then this is not an issue and MPS discovery
1351 * will occur as normal.
1352 */
1353 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
1354 dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))
1355 *smpss = 0;
1356
1357 if (*smpss > dev->pcie_mpss)
1358 *smpss = dev->pcie_mpss;
1359
1360 return 0;
1361}
1362
1363static void pcie_write_mps(struct pci_dev *dev, int mps)
1364{
1365 int rc, dev_mpss;
1366
1367 dev_mpss = 128 << dev->pcie_mpss;
1368
1369 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1370 if (dev->bus->self) {
1371 dev_dbg(&dev->bus->dev, "Bus MPSS %d\n",
1372 128 << dev->bus->self->pcie_mpss);
1373
1374 /* For "MPS Force Max", the assumption is made that
1375 * downstream communication will never be larger than
1376 * the MRRS. So, the MPS only needs to be configured
1377 * for the upstream communication. This being the case,
1378 * walk from the top down and set the MPS of the child
1379 * to that of the parent bus.
1380 */
1381 mps = 128 << dev->bus->self->pcie_mpss;
1382 if (mps > dev_mpss)
1383 dev_warn(&dev->dev, "MPS configured higher than"
1384 " maximum supported by the device. If"
1385 " a bus issue occurs, try running with"
1386 " pci=pcie_bus_safe.\n");
1387 }
1388
1389 dev->pcie_mpss = ffs(mps) - 8;
1390 }
1391
1392 rc = pcie_set_mps(dev, mps);
1393 if (rc)
1394 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1395}
1396
1397static void pcie_write_mrrs(struct pci_dev *dev, int mps)
1398{
1399 int rc, mrrs, dev_mpss;
1400
1401 /* In the "safe" case, do not configure the MRRS. There appear to be
1402 * issues with setting MRRS to 0 on a number of devices.
1403 */
1404
1405 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1406 return;
1407
1408 dev_mpss = 128 << dev->pcie_mpss;
1409
1410 /* For Max performance, the MRRS must be set to the largest supported
1411 * value. However, it cannot be configured larger than the MPS the
1412 * device or the bus can support. This assumes that the largest MRRS
1413 * available on the device cannot be smaller than the device MPSS.
1414 */
1415 mrrs = min(mps, dev_mpss);
1416
1417 /* MRRS is a R/W register. Invalid values can be written, but a
1418 * subsequent read will verify if the value is acceptable or not.
1419 * If the MRRS value provided is not acceptable (e.g., too large),
1420 * shrink the value until it is acceptable to the HW.
1421 */
1422 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1423 dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
1424 " to %d. If any issues are encountered, please try "
1425 "running with pci=pcie_bus_safe\n", mrrs);
1426 rc = pcie_set_readrq(dev, mrrs);
1427 if (rc)
1428 dev_err(&dev->dev,
1429 "Failed attempting to set the MRRS\n");
1430
1431 mrrs /= 2;
1432 }
1433}
1434
1435static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1436{
1437 int mps = 128 << *(u8 *)data;
1438
1439 if (!pci_is_pcie(dev))
1440 return 0;
1441
1442 dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
1443 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
1444
1445 pcie_write_mps(dev, mps);
1446 pcie_write_mrrs(dev, mps);
1447
1448 dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
1449 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
1450
1451 return 0;
1452}
1453
1454/* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down,
1455 * parents then children fashion. If this changes, then this code will not
1456 * work as designed.
1457 */
1458void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
1459{
1460 u8 smpss = mpss;
1461
1462 if (!pci_is_pcie(bus->self))
1463 return;
1464
1465 if (pcie_bus_config == PCIE_BUS_SAFE) {
1466 pcie_find_smpss(bus->self, &smpss);
1467 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1468 }
1469
1470 pcie_bus_configure_set(bus->self, &smpss);
1471 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1472}
1473EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1474
1329unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) 1475unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1330{ 1476{
1331 unsigned int devfn, pass, max = bus->secondary; 1477 unsigned int devfn, pass, max = bus->secondary;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8a1d3c7863a8..784da9d36029 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -34,6 +34,7 @@ struct resource_list_x {
34 resource_size_t start; 34 resource_size_t start;
35 resource_size_t end; 35 resource_size_t end;
36 resource_size_t add_size; 36 resource_size_t add_size;
37 resource_size_t min_align;
37 unsigned long flags; 38 unsigned long flags;
38}; 39};
39 40
@@ -65,7 +66,7 @@ void pci_realloc(void)
65 */ 66 */
66static void add_to_list(struct resource_list_x *head, 67static void add_to_list(struct resource_list_x *head,
67 struct pci_dev *dev, struct resource *res, 68 struct pci_dev *dev, struct resource *res,
68 resource_size_t add_size) 69 resource_size_t add_size, resource_size_t min_align)
69{ 70{
70 struct resource_list_x *list = head; 71 struct resource_list_x *list = head;
71 struct resource_list_x *ln = list->next; 72 struct resource_list_x *ln = list->next;
@@ -84,13 +85,16 @@ static void add_to_list(struct resource_list_x *head,
84 tmp->end = res->end; 85 tmp->end = res->end;
85 tmp->flags = res->flags; 86 tmp->flags = res->flags;
86 tmp->add_size = add_size; 87 tmp->add_size = add_size;
88 tmp->min_align = min_align;
87 list->next = tmp; 89 list->next = tmp;
88} 90}
89 91
90static void add_to_failed_list(struct resource_list_x *head, 92static void add_to_failed_list(struct resource_list_x *head,
91 struct pci_dev *dev, struct resource *res) 93 struct pci_dev *dev, struct resource *res)
92{ 94{
93 add_to_list(head, dev, res, 0); 95 add_to_list(head, dev, res,
96 0 /* dont care */,
97 0 /* dont care */);
94} 98}
95 99
96static void __dev_sort_resources(struct pci_dev *dev, 100static void __dev_sort_resources(struct pci_dev *dev,
@@ -121,18 +125,18 @@ static inline void reset_resource(struct resource *res)
121} 125}
122 126
123/** 127/**
124 * adjust_resources_sorted() - satisfy any additional resource requests 128 * reassign_resources_sorted() - satisfy any additional resource requests
125 * 129 *
126 * @add_head : head of the list tracking requests requiring additional 130 * @realloc_head : head of the list tracking requests requiring additional
127 * resources 131 * resources
128 * @head : head of the list tracking requests with allocated 132 * @head : head of the list tracking requests with allocated
129 * resources 133 * resources
130 * 134 *
131 * Walk through each element of the add_head and try to procure 135 * Walk through each element of the realloc_head and try to procure
132 * additional resources for the element, provided the element 136 * additional resources for the element, provided the element
133 * is in the head list. 137 * is in the head list.
134 */ 138 */
135static void adjust_resources_sorted(struct resource_list_x *add_head, 139static void reassign_resources_sorted(struct resource_list_x *realloc_head,
136 struct resource_list *head) 140 struct resource_list *head)
137{ 141{
138 struct resource *res; 142 struct resource *res;
@@ -141,8 +145,8 @@ static void adjust_resources_sorted(struct resource_list_x *add_head,
141 resource_size_t add_size; 145 resource_size_t add_size;
142 int idx; 146 int idx;
143 147
144 prev = add_head; 148 prev = realloc_head;
145 for (list = add_head->next; list;) { 149 for (list = realloc_head->next; list;) {
146 res = list->res; 150 res = list->res;
147 /* skip resource that has been reset */ 151 /* skip resource that has been reset */
148 if (!res->flags) 152 if (!res->flags)
@@ -159,13 +163,17 @@ static void adjust_resources_sorted(struct resource_list_x *add_head,
159 163
160 idx = res - &list->dev->resource[0]; 164 idx = res - &list->dev->resource[0];
161 add_size=list->add_size; 165 add_size=list->add_size;
162 if (!resource_size(res) && add_size) { 166 if (!resource_size(res)) {
163 res->end = res->start + add_size - 1; 167 res->start = list->start;
164 if(pci_assign_resource(list->dev, idx)) 168 res->end = res->start + add_size - 1;
169 if(pci_assign_resource(list->dev, idx))
165 reset_resource(res); 170 reset_resource(res);
166 } else if (add_size) { 171 } else {
167 adjust_resource(res, res->start, 172 resource_size_t align = list->min_align;
168 resource_size(res) + add_size); 173 res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
174 if (pci_reassign_resource(list->dev, idx, add_size, align))
175 dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n",
176 res);
169 } 177 }
170out: 178out:
171 tmp = list; 179 tmp = list;
@@ -210,16 +218,16 @@ static void assign_requested_resources_sorted(struct resource_list *head,
210} 218}
211 219
212static void __assign_resources_sorted(struct resource_list *head, 220static void __assign_resources_sorted(struct resource_list *head,
213 struct resource_list_x *add_head, 221 struct resource_list_x *realloc_head,
214 struct resource_list_x *fail_head) 222 struct resource_list_x *fail_head)
215{ 223{
216 /* Satisfy the must-have resource requests */ 224 /* Satisfy the must-have resource requests */
217 assign_requested_resources_sorted(head, fail_head); 225 assign_requested_resources_sorted(head, fail_head);
218 226
219 /* Try to satisfy any additional nice-to-have resource 227 /* Try to satisfy any additional optional resource
220 requests */ 228 requests */
221 if (add_head) 229 if (realloc_head)
222 adjust_resources_sorted(add_head, head); 230 reassign_resources_sorted(realloc_head, head);
223 free_list(resource_list, head); 231 free_list(resource_list, head);
224} 232}
225 233
@@ -235,7 +243,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev,
235} 243}
236 244
237static void pbus_assign_resources_sorted(const struct pci_bus *bus, 245static void pbus_assign_resources_sorted(const struct pci_bus *bus,
238 struct resource_list_x *add_head, 246 struct resource_list_x *realloc_head,
239 struct resource_list_x *fail_head) 247 struct resource_list_x *fail_head)
240{ 248{
241 struct pci_dev *dev; 249 struct pci_dev *dev;
@@ -245,7 +253,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus,
245 list_for_each_entry(dev, &bus->devices, bus_list) 253 list_for_each_entry(dev, &bus->devices, bus_list)
246 __dev_sort_resources(dev, &head); 254 __dev_sort_resources(dev, &head);
247 255
248 __assign_resources_sorted(&head, add_head, fail_head); 256 __assign_resources_sorted(&head, realloc_head, fail_head);
249} 257}
250 258
251void pci_setup_cardbus(struct pci_bus *bus) 259void pci_setup_cardbus(struct pci_bus *bus)
@@ -540,13 +548,27 @@ static resource_size_t calculate_memsize(resource_size_t size,
540 return size; 548 return size;
541} 549}
542 550
551static resource_size_t get_res_add_size(struct resource_list_x *realloc_head,
552 struct resource *res)
553{
554 struct resource_list_x *list;
555
556 /* check if it is in realloc_head list */
557 for (list = realloc_head->next; list && list->res != res;
558 list = list->next);
559 if (list)
560 return list->add_size;
561
562 return 0;
563}
564
543/** 565/**
544 * pbus_size_io() - size the io window of a given bus 566 * pbus_size_io() - size the io window of a given bus
545 * 567 *
546 * @bus : the bus 568 * @bus : the bus
547 * @min_size : the minimum io window that must to be allocated 569 * @min_size : the minimum io window that must to be allocated
548 * @add_size : additional optional io window 570 * @add_size : additional optional io window
549 * @add_head : track the additional io window on this list 571 * @realloc_head : track the additional io window on this list
550 * 572 *
551 * Sizing the IO windows of the PCI-PCI bridge is trivial, 573 * Sizing the IO windows of the PCI-PCI bridge is trivial,
552 * since these windows have 4K granularity and the IO ranges 574 * since these windows have 4K granularity and the IO ranges
@@ -554,11 +576,12 @@ static resource_size_t calculate_memsize(resource_size_t size,
554 * We must be careful with the ISA aliasing though. 576 * We must be careful with the ISA aliasing though.
555 */ 577 */
556static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, 578static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
557 resource_size_t add_size, struct resource_list_x *add_head) 579 resource_size_t add_size, struct resource_list_x *realloc_head)
558{ 580{
559 struct pci_dev *dev; 581 struct pci_dev *dev;
560 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); 582 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
561 unsigned long size = 0, size0 = 0, size1 = 0; 583 unsigned long size = 0, size0 = 0, size1 = 0;
584 resource_size_t children_add_size = 0;
562 585
563 if (!b_res) 586 if (!b_res)
564 return; 587 return;
@@ -579,11 +602,16 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
579 size += r_size; 602 size += r_size;
580 else 603 else
581 size1 += r_size; 604 size1 += r_size;
605
606 if (realloc_head)
607 children_add_size += get_res_add_size(realloc_head, r);
582 } 608 }
583 } 609 }
584 size0 = calculate_iosize(size, min_size, size1, 610 size0 = calculate_iosize(size, min_size, size1,
585 resource_size(b_res), 4096); 611 resource_size(b_res), 4096);
586 size1 = (!add_head || (add_head && !add_size)) ? size0 : 612 if (children_add_size > add_size)
613 add_size = children_add_size;
614 size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
587 calculate_iosize(size, min_size+add_size, size1, 615 calculate_iosize(size, min_size+add_size, size1,
588 resource_size(b_res), 4096); 616 resource_size(b_res), 4096);
589 if (!size0 && !size1) { 617 if (!size0 && !size1) {
@@ -598,8 +626,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
598 b_res->start = 4096; 626 b_res->start = 4096;
599 b_res->end = b_res->start + size0 - 1; 627 b_res->end = b_res->start + size0 - 1;
600 b_res->flags |= IORESOURCE_STARTALIGN; 628 b_res->flags |= IORESOURCE_STARTALIGN;
601 if (size1 > size0 && add_head) 629 if (size1 > size0 && realloc_head)
602 add_to_list(add_head, bus->self, b_res, size1-size0); 630 add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096);
603} 631}
604 632
605/** 633/**
@@ -608,7 +636,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
608 * @bus : the bus 636 * @bus : the bus
609 * @min_size : the minimum memory window that must to be allocated 637 * @min_size : the minimum memory window that must to be allocated
610 * @add_size : additional optional memory window 638 * @add_size : additional optional memory window
611 * @add_head : track the additional memory window on this list 639 * @realloc_head : track the additional memory window on this list
612 * 640 *
613 * Calculate the size of the bus and minimal alignment which 641 * Calculate the size of the bus and minimal alignment which
614 * guarantees that all child resources fit in this size. 642 * guarantees that all child resources fit in this size.
@@ -616,7 +644,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
616static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, 644static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
617 unsigned long type, resource_size_t min_size, 645 unsigned long type, resource_size_t min_size,
618 resource_size_t add_size, 646 resource_size_t add_size,
619 struct resource_list_x *add_head) 647 struct resource_list_x *realloc_head)
620{ 648{
621 struct pci_dev *dev; 649 struct pci_dev *dev;
622 resource_size_t min_align, align, size, size0, size1; 650 resource_size_t min_align, align, size, size0, size1;
@@ -624,6 +652,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
624 int order, max_order; 652 int order, max_order;
625 struct resource *b_res = find_free_bus_resource(bus, type); 653 struct resource *b_res = find_free_bus_resource(bus, type);
626 unsigned int mem64_mask = 0; 654 unsigned int mem64_mask = 0;
655 resource_size_t children_add_size = 0;
627 656
628 if (!b_res) 657 if (!b_res)
629 return 0; 658 return 0;
@@ -645,6 +674,16 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
645 if (r->parent || (r->flags & mask) != type) 674 if (r->parent || (r->flags & mask) != type)
646 continue; 675 continue;
647 r_size = resource_size(r); 676 r_size = resource_size(r);
677#ifdef CONFIG_PCI_IOV
678 /* put SRIOV requested res to the optional list */
679 if (realloc_head && i >= PCI_IOV_RESOURCES &&
680 i <= PCI_IOV_RESOURCE_END) {
681 r->end = r->start - 1;
682 add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */);
683 children_add_size += r_size;
684 continue;
685 }
686#endif
648 /* For bridges size != alignment */ 687 /* For bridges size != alignment */
649 align = pci_resource_alignment(dev, r); 688 align = pci_resource_alignment(dev, r);
650 order = __ffs(align) - 20; 689 order = __ffs(align) - 20;
@@ -665,6 +704,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
665 if (order > max_order) 704 if (order > max_order)
666 max_order = order; 705 max_order = order;
667 mem64_mask &= r->flags & IORESOURCE_MEM_64; 706 mem64_mask &= r->flags & IORESOURCE_MEM_64;
707
708 if (realloc_head)
709 children_add_size += get_res_add_size(realloc_head, r);
668 } 710 }
669 } 711 }
670 align = 0; 712 align = 0;
@@ -681,7 +723,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
681 align += aligns[order]; 723 align += aligns[order];
682 } 724 }
683 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); 725 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
684 size1 = (!add_head || (add_head && !add_size)) ? size0 : 726 if (children_add_size > add_size)
727 add_size = children_add_size;
728 size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
685 calculate_memsize(size, min_size+add_size, 0, 729 calculate_memsize(size, min_size+add_size, 0,
686 resource_size(b_res), min_align); 730 resource_size(b_res), min_align);
687 if (!size0 && !size1) { 731 if (!size0 && !size1) {
@@ -695,12 +739,22 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
695 b_res->start = min_align; 739 b_res->start = min_align;
696 b_res->end = size0 + min_align - 1; 740 b_res->end = size0 + min_align - 1;
697 b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; 741 b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask;
698 if (size1 > size0 && add_head) 742 if (size1 > size0 && realloc_head)
699 add_to_list(add_head, bus->self, b_res, size1-size0); 743 add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
700 return 1; 744 return 1;
701} 745}
702 746
703static void pci_bus_size_cardbus(struct pci_bus *bus) 747unsigned long pci_cardbus_resource_alignment(struct resource *res)
748{
749 if (res->flags & IORESOURCE_IO)
750 return pci_cardbus_io_size;
751 if (res->flags & IORESOURCE_MEM)
752 return pci_cardbus_mem_size;
753 return 0;
754}
755
756static void pci_bus_size_cardbus(struct pci_bus *bus,
757 struct resource_list_x *realloc_head)
704{ 758{
705 struct pci_dev *bridge = bus->self; 759 struct pci_dev *bridge = bus->self;
706 struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; 760 struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
@@ -711,12 +765,14 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
711 * a fixed amount of bus space for CardBus bridges. 765 * a fixed amount of bus space for CardBus bridges.
712 */ 766 */
713 b_res[0].start = 0; 767 b_res[0].start = 0;
714 b_res[0].end = pci_cardbus_io_size - 1;
715 b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; 768 b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
769 if (realloc_head)
770 add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */);
716 771
717 b_res[1].start = 0; 772 b_res[1].start = 0;
718 b_res[1].end = pci_cardbus_io_size - 1;
719 b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; 773 b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
774 if (realloc_head)
775 add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */);
720 776
721 /* 777 /*
722 * Check whether prefetchable memory is supported 778 * Check whether prefetchable memory is supported
@@ -736,21 +792,31 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
736 */ 792 */
737 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { 793 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
738 b_res[2].start = 0; 794 b_res[2].start = 0;
739 b_res[2].end = pci_cardbus_mem_size - 1;
740 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; 795 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
796 if (realloc_head)
797 add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */);
741 798
742 b_res[3].start = 0; 799 b_res[3].start = 0;
743 b_res[3].end = pci_cardbus_mem_size - 1;
744 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; 800 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
801 if (realloc_head)
802 add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */);
745 } else { 803 } else {
746 b_res[3].start = 0; 804 b_res[3].start = 0;
747 b_res[3].end = pci_cardbus_mem_size * 2 - 1;
748 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; 805 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
806 if (realloc_head)
807 add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */);
749 } 808 }
809
810 /* set the size of the resource to zero, so that the resource does not
811 * get assigned during required-resource allocation cycle but gets assigned
812 * during the optional-resource allocation cycle.
813 */
814 b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1;
815 b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0;
750} 816}
751 817
752void __ref __pci_bus_size_bridges(struct pci_bus *bus, 818void __ref __pci_bus_size_bridges(struct pci_bus *bus,
753 struct resource_list_x *add_head) 819 struct resource_list_x *realloc_head)
754{ 820{
755 struct pci_dev *dev; 821 struct pci_dev *dev;
756 unsigned long mask, prefmask; 822 unsigned long mask, prefmask;
@@ -763,12 +829,12 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
763 829
764 switch (dev->class >> 8) { 830 switch (dev->class >> 8) {
765 case PCI_CLASS_BRIDGE_CARDBUS: 831 case PCI_CLASS_BRIDGE_CARDBUS:
766 pci_bus_size_cardbus(b); 832 pci_bus_size_cardbus(b, realloc_head);
767 break; 833 break;
768 834
769 case PCI_CLASS_BRIDGE_PCI: 835 case PCI_CLASS_BRIDGE_PCI:
770 default: 836 default:
771 __pci_bus_size_bridges(b, add_head); 837 __pci_bus_size_bridges(b, realloc_head);
772 break; 838 break;
773 } 839 }
774 } 840 }
@@ -792,7 +858,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
792 * Follow thru 858 * Follow thru
793 */ 859 */
794 default: 860 default:
795 pbus_size_io(bus, 0, additional_io_size, add_head); 861 pbus_size_io(bus, 0, additional_io_size, realloc_head);
796 /* If the bridge supports prefetchable range, size it 862 /* If the bridge supports prefetchable range, size it
797 separately. If it doesn't, or its prefetchable window 863 separately. If it doesn't, or its prefetchable window
798 has already been allocated by arch code, try 864 has already been allocated by arch code, try
@@ -800,11 +866,11 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
800 resources. */ 866 resources. */
801 mask = IORESOURCE_MEM; 867 mask = IORESOURCE_MEM;
802 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; 868 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
803 if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head)) 869 if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, realloc_head))
804 mask = prefmask; /* Success, size non-prefetch only. */ 870 mask = prefmask; /* Success, size non-prefetch only. */
805 else 871 else
806 additional_mem_size += additional_mem_size; 872 additional_mem_size += additional_mem_size;
807 pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head); 873 pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, realloc_head);
808 break; 874 break;
809 } 875 }
810} 876}
@@ -816,20 +882,20 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
816EXPORT_SYMBOL(pci_bus_size_bridges); 882EXPORT_SYMBOL(pci_bus_size_bridges);
817 883
818static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, 884static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
819 struct resource_list_x *add_head, 885 struct resource_list_x *realloc_head,
820 struct resource_list_x *fail_head) 886 struct resource_list_x *fail_head)
821{ 887{
822 struct pci_bus *b; 888 struct pci_bus *b;
823 struct pci_dev *dev; 889 struct pci_dev *dev;
824 890
825 pbus_assign_resources_sorted(bus, add_head, fail_head); 891 pbus_assign_resources_sorted(bus, realloc_head, fail_head);
826 892
827 list_for_each_entry(dev, &bus->devices, bus_list) { 893 list_for_each_entry(dev, &bus->devices, bus_list) {
828 b = dev->subordinate; 894 b = dev->subordinate;
829 if (!b) 895 if (!b)
830 continue; 896 continue;
831 897
832 __pci_bus_assign_resources(b, add_head, fail_head); 898 __pci_bus_assign_resources(b, realloc_head, fail_head);
833 899
834 switch (dev->class >> 8) { 900 switch (dev->class >> 8) {
835 case PCI_CLASS_BRIDGE_PCI: 901 case PCI_CLASS_BRIDGE_PCI:
@@ -1039,7 +1105,7 @@ void __init
1039pci_assign_unassigned_resources(void) 1105pci_assign_unassigned_resources(void)
1040{ 1106{
1041 struct pci_bus *bus; 1107 struct pci_bus *bus;
1042 struct resource_list_x add_list; /* list of resources that 1108 struct resource_list_x realloc_list; /* list of resources that
1043 want additional resources */ 1109 want additional resources */
1044 int tried_times = 0; 1110 int tried_times = 0;
1045 enum release_type rel_type = leaf_only; 1111 enum release_type rel_type = leaf_only;
@@ -1052,7 +1118,7 @@ pci_assign_unassigned_resources(void)
1052 1118
1053 1119
1054 head.next = NULL; 1120 head.next = NULL;
1055 add_list.next = NULL; 1121 realloc_list.next = NULL;
1056 1122
1057 pci_try_num = max_depth + 1; 1123 pci_try_num = max_depth + 1;
1058 printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", 1124 printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
@@ -1062,12 +1128,12 @@ again:
1062 /* Depth first, calculate sizes and alignments of all 1128 /* Depth first, calculate sizes and alignments of all
1063 subordinate buses. */ 1129 subordinate buses. */
1064 list_for_each_entry(bus, &pci_root_buses, node) 1130 list_for_each_entry(bus, &pci_root_buses, node)
1065 __pci_bus_size_bridges(bus, &add_list); 1131 __pci_bus_size_bridges(bus, &realloc_list);
1066 1132
1067 /* Depth last, allocate resources and update the hardware. */ 1133 /* Depth last, allocate resources and update the hardware. */
1068 list_for_each_entry(bus, &pci_root_buses, node) 1134 list_for_each_entry(bus, &pci_root_buses, node)
1069 __pci_bus_assign_resources(bus, &add_list, &head); 1135 __pci_bus_assign_resources(bus, &realloc_list, &head);
1070 BUG_ON(add_list.next); 1136 BUG_ON(realloc_list.next);
1071 tried_times++; 1137 tried_times++;
1072 1138
1073 /* any device complain? */ 1139 /* any device complain? */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 319f359906e8..51a9095c7da4 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -128,16 +128,16 @@ void pci_disable_bridge_window(struct pci_dev *dev)
128} 128}
129#endif /* CONFIG_PCI_QUIRKS */ 129#endif /* CONFIG_PCI_QUIRKS */
130 130
131
132
131static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, 133static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
132 int resno) 134 int resno, resource_size_t size, resource_size_t align)
133{ 135{
134 struct resource *res = dev->resource + resno; 136 struct resource *res = dev->resource + resno;
135 resource_size_t size, min, align; 137 resource_size_t min;
136 int ret; 138 int ret;
137 139
138 size = resource_size(res);
139 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; 140 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
140 align = pci_resource_alignment(dev, res);
141 141
142 /* First, try exact prefetching match.. */ 142 /* First, try exact prefetching match.. */
143 ret = pci_bus_alloc_resource(bus, res, size, align, min, 143 ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -154,56 +154,101 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
154 ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, 154 ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
155 pcibios_align_resource, dev); 155 pcibios_align_resource, dev);
156 } 156 }
157 return ret;
158}
157 159
158 if (ret < 0 && dev->fw_addr[resno]) { 160static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
159 struct resource *root, *conflict; 161 int resno, resource_size_t size)
160 resource_size_t start, end; 162{
163 struct resource *root, *conflict;
164 resource_size_t start, end;
165 int ret = 0;
161 166
162 /* 167 if (res->flags & IORESOURCE_IO)
163 * If we failed to assign anything, let's try the address 168 root = &ioport_resource;
164 * where firmware left it. That at least has a chance of 169 else
165 * working, which is better than just leaving it disabled. 170 root = &iomem_resource;
166 */ 171
172 start = res->start;
173 end = res->end;
174 res->start = dev->fw_addr[resno];
175 res->end = res->start + size - 1;
176 dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
177 resno, res);
178 conflict = request_resource_conflict(root, res);
179 if (conflict) {
180 dev_info(&dev->dev,
181 "BAR %d: %pR conflicts with %s %pR\n", resno,
182 res, conflict->name, conflict);
183 res->start = start;
184 res->end = end;
185 ret = 1;
186 }
187 return ret;
188}
189
190static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align)
191{
192 struct resource *res = dev->resource + resno;
193 struct pci_bus *bus;
194 int ret;
195 char *type;
167 196
168 if (res->flags & IORESOURCE_IO) 197 bus = dev->bus;
169 root = &ioport_resource; 198 while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) {
199 if (!bus->parent || !bus->self->transparent)
200 break;
201 bus = bus->parent;
202 }
203
204 if (ret) {
205 if (res->flags & IORESOURCE_MEM)
206 if (res->flags & IORESOURCE_PREFETCH)
207 type = "mem pref";
208 else
209 type = "mem";
210 else if (res->flags & IORESOURCE_IO)
211 type = "io";
170 else 212 else
171 root = &iomem_resource; 213 type = "unknown";
172 214 dev_info(&dev->dev,
173 start = res->start; 215 "BAR %d: can't assign %s (size %#llx)\n",
174 end = res->end; 216 resno, type, (unsigned long long) resource_size(res));
175 res->start = dev->fw_addr[resno];
176 res->end = res->start + size - 1;
177 dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
178 resno, res);
179 conflict = request_resource_conflict(root, res);
180 if (conflict) {
181 dev_info(&dev->dev,
182 "BAR %d: %pR conflicts with %s %pR\n", resno,
183 res, conflict->name, conflict);
184 res->start = start;
185 res->end = end;
186 } else
187 ret = 0;
188 } 217 }
189 218
219 return ret;
220}
221
222int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
223 resource_size_t min_align)
224{
225 struct resource *res = dev->resource + resno;
226 resource_size_t new_size;
227 int ret;
228
229 if (!res->parent) {
230 dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resouce %pR "
231 "\n", resno, res);
232 return -EINVAL;
233 }
234
235 new_size = resource_size(res) + addsize + min_align;
236 ret = _pci_assign_resource(dev, resno, new_size, min_align);
190 if (!ret) { 237 if (!ret) {
191 res->flags &= ~IORESOURCE_STARTALIGN; 238 res->flags &= ~IORESOURCE_STARTALIGN;
192 dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); 239 dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
193 if (resno < PCI_BRIDGE_RESOURCES) 240 if (resno < PCI_BRIDGE_RESOURCES)
194 pci_update_resource(dev, resno); 241 pci_update_resource(dev, resno);
195 } 242 }
196
197 return ret; 243 return ret;
198} 244}
199 245
200int pci_assign_resource(struct pci_dev *dev, int resno) 246int pci_assign_resource(struct pci_dev *dev, int resno)
201{ 247{
202 struct resource *res = dev->resource + resno; 248 struct resource *res = dev->resource + resno;
203 resource_size_t align; 249 resource_size_t align, size;
204 struct pci_bus *bus; 250 struct pci_bus *bus;
205 int ret; 251 int ret;
206 char *type;
207 252
208 align = pci_resource_alignment(dev, res); 253 align = pci_resource_alignment(dev, res);
209 if (!align) { 254 if (!align) {
@@ -213,34 +258,27 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
213 } 258 }
214 259
215 bus = dev->bus; 260 bus = dev->bus;
216 while ((ret = __pci_assign_resource(bus, dev, resno))) { 261 size = resource_size(res);
217 if (bus->parent && bus->self->transparent) 262 ret = _pci_assign_resource(dev, resno, size, align);
218 bus = bus->parent;
219 else
220 bus = NULL;
221 if (bus)
222 continue;
223 break;
224 }
225 263
226 if (ret) { 264 /*
227 if (res->flags & IORESOURCE_MEM) 265 * If we failed to assign anything, let's try the address
228 if (res->flags & IORESOURCE_PREFETCH) 266 * where firmware left it. That at least has a chance of
229 type = "mem pref"; 267 * working, which is better than just leaving it disabled.
230 else 268 */
231 type = "mem"; 269 if (ret < 0 && dev->fw_addr[resno])
232 else if (res->flags & IORESOURCE_IO) 270 ret = pci_revert_fw_address(res, dev, resno, size);
233 type = "io";
234 else
235 type = "unknown";
236 dev_info(&dev->dev,
237 "BAR %d: can't assign %s (size %#llx)\n",
238 resno, type, (unsigned long long) resource_size(res));
239 }
240 271
272 if (!ret) {
273 res->flags &= ~IORESOURCE_STARTALIGN;
274 dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
275 if (resno < PCI_BRIDGE_RESOURCES)
276 pci_update_resource(dev, resno);
277 }
241 return ret; 278 return ret;
242} 279}
243 280
281
244/* Sort resources by alignment */ 282/* Sort resources by alignment */
245void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) 283void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
246{ 284{
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c
index 4c3e94c0ae85..f56d7de7c751 100644
--- a/drivers/pcmcia/pxa2xx_balloon3.c
+++ b/drivers/pcmcia/pxa2xx_balloon3.c
@@ -103,22 +103,12 @@ static int balloon3_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
103 return 0; 103 return 0;
104} 104}
105 105
106static void balloon3_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
107{
108}
109
110static void balloon3_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
111{
112}
113
114static struct pcmcia_low_level balloon3_pcmcia_ops = { 106static struct pcmcia_low_level balloon3_pcmcia_ops = {
115 .owner = THIS_MODULE, 107 .owner = THIS_MODULE,
116 .hw_init = balloon3_pcmcia_hw_init, 108 .hw_init = balloon3_pcmcia_hw_init,
117 .hw_shutdown = balloon3_pcmcia_hw_shutdown, 109 .hw_shutdown = balloon3_pcmcia_hw_shutdown,
118 .socket_state = balloon3_pcmcia_socket_state, 110 .socket_state = balloon3_pcmcia_socket_state,
119 .configure_socket = balloon3_pcmcia_configure_socket, 111 .configure_socket = balloon3_pcmcia_configure_socket,
120 .socket_init = balloon3_pcmcia_socket_init,
121 .socket_suspend = balloon3_pcmcia_socket_suspend,
122 .first = 0, 112 .first = 0,
123 .nr = 1, 113 .nr = 1,
124}; 114};
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c
index 05913d0bbdbe..63f4d5211ed2 100644
--- a/drivers/pcmcia/pxa2xx_cm_x255.c
+++ b/drivers/pcmcia/pxa2xx_cm_x255.c
@@ -102,23 +102,12 @@ static int cmx255_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
102 return 0; 102 return 0;
103} 103}
104 104
105static void cmx255_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
106{
107}
108
109static void cmx255_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
110{
111}
112
113
114static struct pcmcia_low_level cmx255_pcmcia_ops __initdata = { 105static struct pcmcia_low_level cmx255_pcmcia_ops __initdata = {
115 .owner = THIS_MODULE, 106 .owner = THIS_MODULE,
116 .hw_init = cmx255_pcmcia_hw_init, 107 .hw_init = cmx255_pcmcia_hw_init,
117 .hw_shutdown = cmx255_pcmcia_shutdown, 108 .hw_shutdown = cmx255_pcmcia_shutdown,
118 .socket_state = cmx255_pcmcia_socket_state, 109 .socket_state = cmx255_pcmcia_socket_state,
119 .configure_socket = cmx255_pcmcia_configure_socket, 110 .configure_socket = cmx255_pcmcia_configure_socket,
120 .socket_init = cmx255_pcmcia_socket_init,
121 .socket_suspend = cmx255_pcmcia_socket_suspend,
122 .nr = 1, 111 .nr = 1,
123}; 112};
124 113
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index 5662646b84da..6ee42b4c3e68 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -82,23 +82,12 @@ static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
82 return 0; 82 return 0;
83} 83}
84 84
85static void cmx270_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
86{
87}
88
89static void cmx270_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
90{
91}
92
93
94static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = { 85static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
95 .owner = THIS_MODULE, 86 .owner = THIS_MODULE,
96 .hw_init = cmx270_pcmcia_hw_init, 87 .hw_init = cmx270_pcmcia_hw_init,
97 .hw_shutdown = cmx270_pcmcia_shutdown, 88 .hw_shutdown = cmx270_pcmcia_shutdown,
98 .socket_state = cmx270_pcmcia_socket_state, 89 .socket_state = cmx270_pcmcia_socket_state,
99 .configure_socket = cmx270_pcmcia_configure_socket, 90 .configure_socket = cmx270_pcmcia_configure_socket,
100 .socket_init = cmx270_pcmcia_socket_init,
101 .socket_suspend = cmx270_pcmcia_socket_suspend,
102 .nr = 1, 91 .nr = 1,
103}; 92};
104 93
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c
index 443cb7fc872d..c6dec572a05d 100644
--- a/drivers/pcmcia/pxa2xx_colibri.c
+++ b/drivers/pcmcia/pxa2xx_colibri.c
@@ -116,14 +116,6 @@ colibri_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
116 return 0; 116 return 0;
117} 117}
118 118
119static void colibri_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
120{
121}
122
123static void colibri_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
124{
125}
126
127static struct pcmcia_low_level colibri_pcmcia_ops = { 119static struct pcmcia_low_level colibri_pcmcia_ops = {
128 .owner = THIS_MODULE, 120 .owner = THIS_MODULE,
129 121
@@ -135,9 +127,6 @@ static struct pcmcia_low_level colibri_pcmcia_ops = {
135 127
136 .socket_state = colibri_pcmcia_socket_state, 128 .socket_state = colibri_pcmcia_socket_state,
137 .configure_socket = colibri_pcmcia_configure_socket, 129 .configure_socket = colibri_pcmcia_configure_socket,
138
139 .socket_init = colibri_pcmcia_socket_init,
140 .socket_suspend = colibri_pcmcia_socket_suspend,
141}; 130};
142 131
143static struct platform_device *colibri_pcmcia_device; 132static struct platform_device *colibri_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 92016fe932b4..aded706c0b9f 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -128,22 +128,12 @@ static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
128 return ret; 128 return ret;
129} 129}
130 130
131static void mst_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
132{
133}
134
135static void mst_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
136{
137}
138
139static struct pcmcia_low_level mst_pcmcia_ops __initdata = { 131static struct pcmcia_low_level mst_pcmcia_ops __initdata = {
140 .owner = THIS_MODULE, 132 .owner = THIS_MODULE,
141 .hw_init = mst_pcmcia_hw_init, 133 .hw_init = mst_pcmcia_hw_init,
142 .hw_shutdown = mst_pcmcia_hw_shutdown, 134 .hw_shutdown = mst_pcmcia_hw_shutdown,
143 .socket_state = mst_pcmcia_socket_state, 135 .socket_state = mst_pcmcia_socket_state,
144 .configure_socket = mst_pcmcia_configure_socket, 136 .configure_socket = mst_pcmcia_configure_socket,
145 .socket_init = mst_pcmcia_socket_init,
146 .socket_suspend = mst_pcmcia_socket_suspend,
147 .nr = 2, 137 .nr = 2,
148}; 138};
149 139
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c
index 69f73670949a..d589ad1dcd4c 100644
--- a/drivers/pcmcia/pxa2xx_palmld.c
+++ b/drivers/pcmcia/pxa2xx_palmld.c
@@ -65,14 +65,6 @@ static int palmld_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
65 return 0; 65 return 0;
66} 66}
67 67
68static void palmld_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
69{
70}
71
72static void palmld_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
73{
74}
75
76static struct pcmcia_low_level palmld_pcmcia_ops = { 68static struct pcmcia_low_level palmld_pcmcia_ops = {
77 .owner = THIS_MODULE, 69 .owner = THIS_MODULE,
78 70
@@ -84,9 +76,6 @@ static struct pcmcia_low_level palmld_pcmcia_ops = {
84 76
85 .socket_state = palmld_pcmcia_socket_state, 77 .socket_state = palmld_pcmcia_socket_state,
86 .configure_socket = palmld_pcmcia_configure_socket, 78 .configure_socket = palmld_pcmcia_configure_socket,
87
88 .socket_init = palmld_pcmcia_socket_init,
89 .socket_suspend = palmld_pcmcia_socket_suspend,
90}; 79};
91 80
92static struct platform_device *palmld_pcmcia_device; 81static struct platform_device *palmld_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
index d0ad6a76bbde..9c6a04b2f71b 100644
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -117,14 +117,6 @@ static int palmtc_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
117 return ret; 117 return ret;
118} 118}
119 119
120static void palmtc_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
121{
122}
123
124static void palmtc_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
125{
126}
127
128static struct pcmcia_low_level palmtc_pcmcia_ops = { 120static struct pcmcia_low_level palmtc_pcmcia_ops = {
129 .owner = THIS_MODULE, 121 .owner = THIS_MODULE,
130 122
@@ -136,9 +128,6 @@ static struct pcmcia_low_level palmtc_pcmcia_ops = {
136 128
137 .socket_state = palmtc_pcmcia_socket_state, 129 .socket_state = palmtc_pcmcia_socket_state,
138 .configure_socket = palmtc_pcmcia_configure_socket, 130 .configure_socket = palmtc_pcmcia_configure_socket,
139
140 .socket_init = palmtc_pcmcia_socket_init,
141 .socket_suspend = palmtc_pcmcia_socket_suspend,
142}; 131};
143 132
144static struct platform_device *palmtc_pcmcia_device; 133static struct platform_device *palmtc_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
index 1a2580450402..80645a688ee3 100644
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -67,14 +67,6 @@ palmtx_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
67 return 0; 67 return 0;
68} 68}
69 69
70static void palmtx_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
71{
72}
73
74static void palmtx_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
75{
76}
77
78static struct pcmcia_low_level palmtx_pcmcia_ops = { 70static struct pcmcia_low_level palmtx_pcmcia_ops = {
79 .owner = THIS_MODULE, 71 .owner = THIS_MODULE,
80 72
@@ -86,9 +78,6 @@ static struct pcmcia_low_level palmtx_pcmcia_ops = {
86 78
87 .socket_state = palmtx_pcmcia_socket_state, 79 .socket_state = palmtx_pcmcia_socket_state,
88 .configure_socket = palmtx_pcmcia_configure_socket, 80 .configure_socket = palmtx_pcmcia_configure_socket,
89
90 .socket_init = palmtx_pcmcia_socket_init,
91 .socket_suspend = palmtx_pcmcia_socket_suspend,
92}; 81};
93 82
94static struct platform_device *palmtx_pcmcia_device; 83static struct platform_device *palmtx_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
index d08802fe35f9..939622251dfb 100644
--- a/drivers/pcmcia/pxa2xx_stargate2.c
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -28,7 +28,6 @@
28 28
29#include "soc_common.h" 29#include "soc_common.h"
30 30
31#define SG2_S0_BUFF_CTL 120
32#define SG2_S0_POWER_CTL 108 31#define SG2_S0_POWER_CTL 108
33#define SG2_S0_GPIO_RESET 82 32#define SG2_S0_GPIO_RESET 82
34#define SG2_S0_GPIO_DETECT 53 33#define SG2_S0_GPIO_DETECT 53
@@ -38,6 +37,11 @@ static struct pcmcia_irqs irqs[] = {
38 { 0, IRQ_GPIO(SG2_S0_GPIO_DETECT), "PCMCIA0 CD" }, 37 { 0, IRQ_GPIO(SG2_S0_GPIO_DETECT), "PCMCIA0 CD" },
39}; 38};
40 39
40static struct gpio sg2_pcmcia_gpios[] = {
41 { SG2_S0_GPIO_RESET, GPIOF_OUT_INIT_HIGH, "PCMCIA Reset" },
42 { SG2_S0_POWER_CTL, GPIOF_OUT_INIT_HIGH, "PCMCIA Power Ctrl" },
43};
44
41static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 45static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
42{ 46{
43 skt->socket.pci_irq = IRQ_GPIO(SG2_S0_GPIO_READY); 47 skt->socket.pci_irq = IRQ_GPIO(SG2_S0_GPIO_READY);
@@ -122,37 +126,23 @@ static int __init sg2_pcmcia_init(void)
122 if (!sg2_pcmcia_device) 126 if (!sg2_pcmcia_device)
123 return -ENOMEM; 127 return -ENOMEM;
124 128
125 ret = gpio_request(SG2_S0_BUFF_CTL, "SG2 CF buff ctl"); 129 ret = gpio_request_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
126 if (ret) 130 if (ret)
127 goto error_put_platform_device; 131 goto error_put_platform_device;
128 ret = gpio_request(SG2_S0_POWER_CTL, "SG2 CF power ctl");
129 if (ret)
130 goto error_free_gpio_buff_ctl;
131 ret = gpio_request(SG2_S0_GPIO_RESET, "SG2 CF reset");
132 if (ret)
133 goto error_free_gpio_power_ctl;
134 /* Set gpio directions */
135 gpio_direction_output(SG2_S0_BUFF_CTL, 0);
136 gpio_direction_output(SG2_S0_POWER_CTL, 1);
137 gpio_direction_output(SG2_S0_GPIO_RESET, 1);
138 132
139 ret = platform_device_add_data(sg2_pcmcia_device, 133 ret = platform_device_add_data(sg2_pcmcia_device,
140 &sg2_pcmcia_ops, 134 &sg2_pcmcia_ops,
141 sizeof(sg2_pcmcia_ops)); 135 sizeof(sg2_pcmcia_ops));
142 if (ret) 136 if (ret)
143 goto error_free_gpio_reset; 137 goto error_free_gpios;
144 138
145 ret = platform_device_add(sg2_pcmcia_device); 139 ret = platform_device_add(sg2_pcmcia_device);
146 if (ret) 140 if (ret)
147 goto error_free_gpio_reset; 141 goto error_free_gpios;
148 142
149 return 0; 143 return 0;
150error_free_gpio_reset: 144error_free_gpios:
151 gpio_free(SG2_S0_GPIO_RESET); 145 gpio_free_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
152error_free_gpio_power_ctl:
153 gpio_free(SG2_S0_POWER_CTL);
154error_free_gpio_buff_ctl:
155 gpio_free(SG2_S0_BUFF_CTL);
156error_put_platform_device: 146error_put_platform_device:
157 platform_device_put(sg2_pcmcia_device); 147 platform_device_put(sg2_pcmcia_device);
158 148
@@ -162,9 +152,7 @@ error_put_platform_device:
162static void __exit sg2_pcmcia_exit(void) 152static void __exit sg2_pcmcia_exit(void)
163{ 153{
164 platform_device_unregister(sg2_pcmcia_device); 154 platform_device_unregister(sg2_pcmcia_device);
165 gpio_free(SG2_S0_BUFF_CTL); 155 gpio_free_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
166 gpio_free(SG2_S0_POWER_CTL);
167 gpio_free(SG2_S0_GPIO_RESET);
168} 156}
169 157
170fs_initcall(sg2_pcmcia_init); 158fs_initcall(sg2_pcmcia_init);
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index a51f2077644a..1064b1c2869d 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -136,22 +136,12 @@ static int viper_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
136 return 0; 136 return 0;
137} 137}
138 138
139static void viper_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
140{
141}
142
143static void viper_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
144{
145}
146
147static struct pcmcia_low_level viper_pcmcia_ops = { 139static struct pcmcia_low_level viper_pcmcia_ops = {
148 .owner = THIS_MODULE, 140 .owner = THIS_MODULE,
149 .hw_init = viper_pcmcia_hw_init, 141 .hw_init = viper_pcmcia_hw_init,
150 .hw_shutdown = viper_pcmcia_hw_shutdown, 142 .hw_shutdown = viper_pcmcia_hw_shutdown,
151 .socket_state = viper_pcmcia_socket_state, 143 .socket_state = viper_pcmcia_socket_state,
152 .configure_socket = viper_pcmcia_configure_socket, 144 .configure_socket = viper_pcmcia_configure_socket,
153 .socket_init = viper_pcmcia_socket_init,
154 .socket_suspend = viper_pcmcia_socket_suspend,
155 .nr = 1, 145 .nr = 1,
156}; 146};
157 147
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 768f9572a8c8..a0a9c2aa8d78 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -186,8 +186,8 @@ static int soc_common_pcmcia_sock_init(struct pcmcia_socket *sock)
186 struct soc_pcmcia_socket *skt = to_soc_pcmcia_socket(sock); 186 struct soc_pcmcia_socket *skt = to_soc_pcmcia_socket(sock);
187 187
188 debug(skt, 2, "initializing socket\n"); 188 debug(skt, 2, "initializing socket\n");
189 189 if (skt->ops->socket_init)
190 skt->ops->socket_init(skt); 190 skt->ops->socket_init(skt);
191 return 0; 191 return 0;
192} 192}
193 193
@@ -207,7 +207,8 @@ static int soc_common_pcmcia_suspend(struct pcmcia_socket *sock)
207 207
208 debug(skt, 2, "suspending socket\n"); 208 debug(skt, 2, "suspending socket\n");
209 209
210 skt->ops->socket_suspend(skt); 210 if (skt->ops->socket_suspend)
211 skt->ops->socket_suspend(skt);
211 212
212 return 0; 213 return 0;
213} 214}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 45e0191c35dd..1e88d4785321 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -769,4 +769,12 @@ config INTEL_OAKTRAIL
769 enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y 769 enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
770 here; it will only load on supported platforms. 770 here; it will only load on supported platforms.
771 771
772config SAMSUNG_Q10
773 tristate "Samsung Q10 Extras"
774 depends on SERIO_I8042
775 select BACKLIGHT_CLASS_DEVICE
776 ---help---
777 This driver provides support for backlight control on Samsung Q10
778 and related laptops, including Dell Latitude X200.
779
772endif # X86_PLATFORM_DEVICES 780endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index afc1f832aa67..293a320d9faa 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
44obj-$(CONFIG_MXM_WMI) += mxm-wmi.o 44obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
45obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o 45obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
46obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o 46obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
47obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index e1c4938b301b..af2bb20cb2fb 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -99,6 +99,7 @@ enum acer_wmi_event_ids {
99static const struct key_entry acer_wmi_keymap[] = { 99static const struct key_entry acer_wmi_keymap[] = {
100 {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */ 100 {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */
101 {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */ 101 {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */
102 {KE_KEY, 0x04, {KEY_WLAN} }, /* WiFi */
102 {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */ 103 {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */
103 {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */ 104 {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */
104 {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ 105 {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
@@ -304,6 +305,10 @@ static struct quirk_entry quirk_fujitsu_amilo_li_1718 = {
304 .wireless = 2, 305 .wireless = 2,
305}; 306};
306 307
308static struct quirk_entry quirk_lenovo_ideapad_s205 = {
309 .wireless = 3,
310};
311
307/* The Aspire One has a dummy ACPI-WMI interface - disable it */ 312/* The Aspire One has a dummy ACPI-WMI interface - disable it */
308static struct dmi_system_id __devinitdata acer_blacklist[] = { 313static struct dmi_system_id __devinitdata acer_blacklist[] = {
309 { 314 {
@@ -450,6 +455,15 @@ static struct dmi_system_id acer_quirks[] = {
450 }, 455 },
451 .driver_data = &quirk_medion_md_98300, 456 .driver_data = &quirk_medion_md_98300,
452 }, 457 },
458 {
459 .callback = dmi_matched,
460 .ident = "Lenovo Ideapad S205",
461 .matches = {
462 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
463 DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"),
464 },
465 .driver_data = &quirk_lenovo_ideapad_s205,
466 },
453 {} 467 {}
454}; 468};
455 469
@@ -542,6 +556,12 @@ struct wmi_interface *iface)
542 return AE_ERROR; 556 return AE_ERROR;
543 *value = result & 0x1; 557 *value = result & 0x1;
544 return AE_OK; 558 return AE_OK;
559 case 3:
560 err = ec_read(0x78, &result);
561 if (err)
562 return AE_ERROR;
563 *value = result & 0x1;
564 return AE_OK;
545 default: 565 default:
546 err = ec_read(0xA, &result); 566 err = ec_read(0xA, &result);
547 if (err) 567 if (err)
@@ -1266,8 +1286,13 @@ static void acer_rfkill_update(struct work_struct *ignored)
1266 acpi_status status; 1286 acpi_status status;
1267 1287
1268 status = get_u32(&state, ACER_CAP_WIRELESS); 1288 status = get_u32(&state, ACER_CAP_WIRELESS);
1269 if (ACPI_SUCCESS(status)) 1289 if (ACPI_SUCCESS(status)) {
1270 rfkill_set_sw_state(wireless_rfkill, !state); 1290 if (quirks->wireless == 3) {
1291 rfkill_set_hw_state(wireless_rfkill, !state);
1292 } else {
1293 rfkill_set_sw_state(wireless_rfkill, !state);
1294 }
1295 }
1271 1296
1272 if (has_cap(ACER_CAP_BLUETOOTH)) { 1297 if (has_cap(ACER_CAP_BLUETOOTH)) {
1273 status = get_u32(&state, ACER_CAP_BLUETOOTH); 1298 status = get_u32(&state, ACER_CAP_BLUETOOTH);
@@ -1400,6 +1425,9 @@ static ssize_t show_bool_threeg(struct device *dev,
1400{ 1425{
1401 u32 result; \ 1426 u32 result; \
1402 acpi_status status; 1427 acpi_status status;
1428
1429 pr_info("This threeg sysfs will be removed in 2012"
1430 " - used by: %s\n", current->comm);
1403 if (wmi_has_guid(WMID_GUID3)) 1431 if (wmi_has_guid(WMID_GUID3))
1404 status = wmid3_get_device_status(&result, 1432 status = wmid3_get_device_status(&result,
1405 ACER_WMID3_GDS_THREEG); 1433 ACER_WMID3_GDS_THREEG);
@@ -1415,8 +1443,10 @@ static ssize_t set_bool_threeg(struct device *dev,
1415{ 1443{
1416 u32 tmp = simple_strtoul(buf, NULL, 10); 1444 u32 tmp = simple_strtoul(buf, NULL, 10);
1417 acpi_status status = set_u32(tmp, ACER_CAP_THREEG); 1445 acpi_status status = set_u32(tmp, ACER_CAP_THREEG);
1418 if (ACPI_FAILURE(status)) 1446 pr_info("This threeg sysfs will be removed in 2012"
1419 return -EINVAL; 1447 " - used by: %s\n", current->comm);
1448 if (ACPI_FAILURE(status))
1449 return -EINVAL;
1420 return count; 1450 return count;
1421} 1451}
1422static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, 1452static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
@@ -1425,6 +1455,8 @@ static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
1425static ssize_t show_interface(struct device *dev, struct device_attribute *attr, 1455static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
1426 char *buf) 1456 char *buf)
1427{ 1457{
1458 pr_info("This interface sysfs will be removed in 2012"
1459 " - used by: %s\n", current->comm);
1428 switch (interface->type) { 1460 switch (interface->type) {
1429 case ACER_AMW0: 1461 case ACER_AMW0:
1430 return sprintf(buf, "AMW0\n"); 1462 return sprintf(buf, "AMW0\n");
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index fca3489218b7..760c6d7624fe 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -182,6 +182,7 @@ static const struct bios_settings_t bios_tbl[] = {
182 {"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, 182 {"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
183 {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, 183 {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
184 {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, 184 {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
185 {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
185 /* Acer 531 */ 186 /* Acer 531 */
186 {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} }, 187 {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} },
187 /* Gateway */ 188 /* Gateway */
@@ -703,15 +704,15 @@ MODULE_LICENSE("GPL");
703MODULE_AUTHOR("Peter Feuerer"); 704MODULE_AUTHOR("Peter Feuerer");
704MODULE_DESCRIPTION("Aspire One temperature and fan driver"); 705MODULE_DESCRIPTION("Aspire One temperature and fan driver");
705MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); 706MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
706MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1410*:"); 707MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:");
707MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1810*:"); 708MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:");
708MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:"); 709MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:");
709MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); 710MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
710MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:"); 711MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
711MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:"); 712MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:");
712MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:"); 713MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:");
713MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMU*:"); 714MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
714MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMA*:"); 715MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
715 716
716module_init(acerhdf_init); 717module_init(acerhdf_init);
717module_exit(acerhdf_exit); 718module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d65df92e2acc..fa6d7ec68b26 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -70,11 +70,10 @@ MODULE_LICENSE("GPL");
70 * WAPF defines the behavior of the Fn+Fx wlan key 70 * WAPF defines the behavior of the Fn+Fx wlan key
71 * The significance of values is yet to be found, but 71 * The significance of values is yet to be found, but
72 * most of the time: 72 * most of the time:
73 * 0x0 will do nothing 73 * Bit | Bluetooth | WLAN
74 * 0x1 will allow to control the device with Fn+Fx key. 74 * 0 | Hardware | Hardware
75 * 0x4 will send an ACPI event (0x88) while pressing the Fn+Fx key 75 * 1 | Hardware | Software
76 * 0x5 like 0x1 or 0x4 76 * 4 | Software | Software
77 * So, if something doesn't work as you want, just try other values =)
78 */ 77 */
79static uint wapf = 1; 78static uint wapf = 1;
80module_param(wapf, uint, 0444); 79module_param(wapf, uint, 0444);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 0580d99b0798..b0859d4183e8 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -38,6 +38,24 @@ MODULE_LICENSE("GPL");
38 38
39MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID); 39MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID);
40 40
41/*
42 * WAPF defines the behavior of the Fn+Fx wlan key
43 * The significance of values is yet to be found, but
44 * most of the time:
45 * Bit | Bluetooth | WLAN
46 * 0 | Hardware | Hardware
47 * 1 | Hardware | Software
48 * 4 | Software | Software
49 */
50static uint wapf;
51module_param(wapf, uint, 0444);
52MODULE_PARM_DESC(wapf, "WAPF value");
53
54static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
55{
56 driver->wapf = wapf;
57}
58
41static const struct key_entry asus_nb_wmi_keymap[] = { 59static const struct key_entry asus_nb_wmi_keymap[] = {
42 { KE_KEY, 0x30, { KEY_VOLUMEUP } }, 60 { KE_KEY, 0x30, { KEY_VOLUMEUP } },
43 { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, 61 { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
@@ -53,16 +71,16 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
53 { KE_KEY, 0x51, { KEY_WWW } }, 71 { KE_KEY, 0x51, { KEY_WWW } },
54 { KE_KEY, 0x55, { KEY_CALC } }, 72 { KE_KEY, 0x55, { KEY_CALC } },
55 { KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */ 73 { KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */
56 { KE_KEY, 0x5D, { KEY_WLAN } }, 74 { KE_KEY, 0x5D, { KEY_WLAN } }, /* Wireless console Toggle */
57 { KE_KEY, 0x5E, { KEY_WLAN } }, 75 { KE_KEY, 0x5E, { KEY_WLAN } }, /* Wireless console Enable */
58 { KE_KEY, 0x5F, { KEY_WLAN } }, 76 { KE_KEY, 0x5F, { KEY_WLAN } }, /* Wireless console Disable */
59 { KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } }, 77 { KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
60 { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, 78 { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
61 { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, 79 { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
62 { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, 80 { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
63 { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, 81 { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
64 { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
65 { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, 82 { KE_KEY, 0x7D, { KEY_BLUETOOTH } },
83 { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
66 { KE_KEY, 0x82, { KEY_CAMERA } }, 84 { KE_KEY, 0x82, { KEY_CAMERA } },
67 { KE_KEY, 0x88, { KEY_RFKILL } }, 85 { KE_KEY, 0x88, { KEY_RFKILL } },
68 { KE_KEY, 0x8A, { KEY_PROG1 } }, 86 { KE_KEY, 0x8A, { KEY_PROG1 } },
@@ -81,6 +99,7 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
81 .keymap = asus_nb_wmi_keymap, 99 .keymap = asus_nb_wmi_keymap,
82 .input_name = "Asus WMI hotkeys", 100 .input_name = "Asus WMI hotkeys",
83 .input_phys = ASUS_NB_WMI_FILE "/input0", 101 .input_phys = ASUS_NB_WMI_FILE "/input0",
102 .quirks = asus_nb_wmi_quirks,
84}; 103};
85 104
86 105
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 65b66aa44c78..95cba9ebf6c0 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -44,6 +44,7 @@
44#include <linux/debugfs.h> 44#include <linux/debugfs.h>
45#include <linux/seq_file.h> 45#include <linux/seq_file.h>
46#include <linux/platform_device.h> 46#include <linux/platform_device.h>
47#include <linux/thermal.h>
47#include <acpi/acpi_bus.h> 48#include <acpi/acpi_bus.h>
48#include <acpi/acpi_drivers.h> 49#include <acpi/acpi_drivers.h>
49 50
@@ -66,6 +67,8 @@ MODULE_LICENSE("GPL");
66#define NOTIFY_BRNUP_MAX 0x1f 67#define NOTIFY_BRNUP_MAX 0x1f
67#define NOTIFY_BRNDOWN_MIN 0x20 68#define NOTIFY_BRNDOWN_MIN 0x20
68#define NOTIFY_BRNDOWN_MAX 0x2e 69#define NOTIFY_BRNDOWN_MAX 0x2e
70#define NOTIFY_KBD_BRTUP 0xc4
71#define NOTIFY_KBD_BRTDWN 0xc5
69 72
70/* WMI Methods */ 73/* WMI Methods */
71#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ 74#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
@@ -93,6 +96,7 @@ MODULE_LICENSE("GPL");
93/* Wireless */ 96/* Wireless */
94#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001 97#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001
95#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 98#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
99#define ASUS_WMI_DEVID_CWAP 0x00010003
96#define ASUS_WMI_DEVID_WLAN 0x00010011 100#define ASUS_WMI_DEVID_WLAN 0x00010011
97#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 101#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
98#define ASUS_WMI_DEVID_GPS 0x00010015 102#define ASUS_WMI_DEVID_GPS 0x00010015
@@ -102,6 +106,12 @@ MODULE_LICENSE("GPL");
102 106
103/* Leds */ 107/* Leds */
104/* 0x000200XX and 0x000400XX */ 108/* 0x000200XX and 0x000400XX */
109#define ASUS_WMI_DEVID_LED1 0x00020011
110#define ASUS_WMI_DEVID_LED2 0x00020012
111#define ASUS_WMI_DEVID_LED3 0x00020013
112#define ASUS_WMI_DEVID_LED4 0x00020014
113#define ASUS_WMI_DEVID_LED5 0x00020015
114#define ASUS_WMI_DEVID_LED6 0x00020016
105 115
106/* Backlight and Brightness */ 116/* Backlight and Brightness */
107#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 117#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
@@ -174,13 +184,18 @@ struct asus_wmi {
174 184
175 struct led_classdev tpd_led; 185 struct led_classdev tpd_led;
176 int tpd_led_wk; 186 int tpd_led_wk;
187 struct led_classdev kbd_led;
188 int kbd_led_wk;
177 struct workqueue_struct *led_workqueue; 189 struct workqueue_struct *led_workqueue;
178 struct work_struct tpd_led_work; 190 struct work_struct tpd_led_work;
191 struct work_struct kbd_led_work;
179 192
180 struct asus_rfkill wlan; 193 struct asus_rfkill wlan;
181 struct asus_rfkill bluetooth; 194 struct asus_rfkill bluetooth;
182 struct asus_rfkill wimax; 195 struct asus_rfkill wimax;
183 struct asus_rfkill wwan3g; 196 struct asus_rfkill wwan3g;
197 struct asus_rfkill gps;
198 struct asus_rfkill uwb;
184 199
185 struct hotplug_slot *hotplug_slot; 200 struct hotplug_slot *hotplug_slot;
186 struct mutex hotplug_lock; 201 struct mutex hotplug_lock;
@@ -205,6 +220,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
205 asus->inputdev->phys = asus->driver->input_phys; 220 asus->inputdev->phys = asus->driver->input_phys;
206 asus->inputdev->id.bustype = BUS_HOST; 221 asus->inputdev->id.bustype = BUS_HOST;
207 asus->inputdev->dev.parent = &asus->platform_device->dev; 222 asus->inputdev->dev.parent = &asus->platform_device->dev;
223 set_bit(EV_REP, asus->inputdev->evbit);
208 224
209 err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL); 225 err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
210 if (err) 226 if (err)
@@ -359,30 +375,80 @@ static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
359 return read_tpd_led_state(asus); 375 return read_tpd_led_state(asus);
360} 376}
361 377
362static int asus_wmi_led_init(struct asus_wmi *asus) 378static void kbd_led_update(struct work_struct *work)
363{ 379{
364 int rv; 380 int ctrl_param = 0;
381 struct asus_wmi *asus;
365 382
366 if (read_tpd_led_state(asus) < 0) 383 asus = container_of(work, struct asus_wmi, kbd_led_work);
367 return 0;
368 384
369 asus->led_workqueue = create_singlethread_workqueue("led_workqueue"); 385 /*
370 if (!asus->led_workqueue) 386 * bits 0-2: level
371 return -ENOMEM; 387 * bit 7: light on/off
372 INIT_WORK(&asus->tpd_led_work, tpd_led_update); 388 */
389 if (asus->kbd_led_wk > 0)
390 ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
373 391
374 asus->tpd_led.name = "asus::touchpad"; 392 asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
375 asus->tpd_led.brightness_set = tpd_led_set; 393}
376 asus->tpd_led.brightness_get = tpd_led_get;
377 asus->tpd_led.max_brightness = 1;
378 394
379 rv = led_classdev_register(&asus->platform_device->dev, &asus->tpd_led); 395static int kbd_led_read(struct asus_wmi *asus, int *level, int *env)
380 if (rv) { 396{
381 destroy_workqueue(asus->led_workqueue); 397 int retval;
382 return rv; 398
399 /*
400 * bits 0-2: level
401 * bit 7: light on/off
402 * bit 8-10: environment (0: dark, 1: normal, 2: light)
403 * bit 17: status unknown
404 */
405 retval = asus_wmi_get_devstate_bits(asus, ASUS_WMI_DEVID_KBD_BACKLIGHT,
406 0xFFFF);
407
408 /* Unknown status is considered as off */
409 if (retval == 0x8000)
410 retval = 0;
411
412 if (retval >= 0) {
413 if (level)
414 *level = retval & 0x80 ? retval & 0x7F : 0;
415 if (env)
416 *env = (retval >> 8) & 0x7F;
417 retval = 0;
383 } 418 }
384 419
385 return 0; 420 return retval;
421}
422
423static void kbd_led_set(struct led_classdev *led_cdev,
424 enum led_brightness value)
425{
426 struct asus_wmi *asus;
427
428 asus = container_of(led_cdev, struct asus_wmi, kbd_led);
429
430 if (value > asus->kbd_led.max_brightness)
431 value = asus->kbd_led.max_brightness;
432 else if (value < 0)
433 value = 0;
434
435 asus->kbd_led_wk = value;
436 queue_work(asus->led_workqueue, &asus->kbd_led_work);
437}
438
439static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
440{
441 struct asus_wmi *asus;
442 int retval, value;
443
444 asus = container_of(led_cdev, struct asus_wmi, kbd_led);
445
446 retval = kbd_led_read(asus, &value, NULL);
447
448 if (retval < 0)
449 return retval;
450
451 return value;
386} 452}
387 453
388static void asus_wmi_led_exit(struct asus_wmi *asus) 454static void asus_wmi_led_exit(struct asus_wmi *asus)
@@ -393,6 +459,48 @@ static void asus_wmi_led_exit(struct asus_wmi *asus)
393 destroy_workqueue(asus->led_workqueue); 459 destroy_workqueue(asus->led_workqueue);
394} 460}
395 461
462static int asus_wmi_led_init(struct asus_wmi *asus)
463{
464 int rv = 0;
465
466 asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
467 if (!asus->led_workqueue)
468 return -ENOMEM;
469
470 if (read_tpd_led_state(asus) >= 0) {
471 INIT_WORK(&asus->tpd_led_work, tpd_led_update);
472
473 asus->tpd_led.name = "asus::touchpad";
474 asus->tpd_led.brightness_set = tpd_led_set;
475 asus->tpd_led.brightness_get = tpd_led_get;
476 asus->tpd_led.max_brightness = 1;
477
478 rv = led_classdev_register(&asus->platform_device->dev,
479 &asus->tpd_led);
480 if (rv)
481 goto error;
482 }
483
484 if (kbd_led_read(asus, NULL, NULL) >= 0) {
485 INIT_WORK(&asus->kbd_led_work, kbd_led_update);
486
487 asus->kbd_led.name = "asus::kbd_backlight";
488 asus->kbd_led.brightness_set = kbd_led_set;
489 asus->kbd_led.brightness_get = kbd_led_get;
490 asus->kbd_led.max_brightness = 3;
491
492 rv = led_classdev_register(&asus->platform_device->dev,
493 &asus->kbd_led);
494 }
495
496error:
497 if (rv)
498 asus_wmi_led_exit(asus);
499
500 return rv;
501}
502
503
396/* 504/*
397 * PCI hotplug (for wlan rfkill) 505 * PCI hotplug (for wlan rfkill)
398 */ 506 */
@@ -729,6 +837,16 @@ static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
729 rfkill_destroy(asus->wwan3g.rfkill); 837 rfkill_destroy(asus->wwan3g.rfkill);
730 asus->wwan3g.rfkill = NULL; 838 asus->wwan3g.rfkill = NULL;
731 } 839 }
840 if (asus->gps.rfkill) {
841 rfkill_unregister(asus->gps.rfkill);
842 rfkill_destroy(asus->gps.rfkill);
843 asus->gps.rfkill = NULL;
844 }
845 if (asus->uwb.rfkill) {
846 rfkill_unregister(asus->uwb.rfkill);
847 rfkill_destroy(asus->uwb.rfkill);
848 asus->uwb.rfkill = NULL;
849 }
732} 850}
733 851
734static int asus_wmi_rfkill_init(struct asus_wmi *asus) 852static int asus_wmi_rfkill_init(struct asus_wmi *asus)
@@ -763,6 +881,18 @@ static int asus_wmi_rfkill_init(struct asus_wmi *asus)
763 if (result && result != -ENODEV) 881 if (result && result != -ENODEV)
764 goto exit; 882 goto exit;
765 883
884 result = asus_new_rfkill(asus, &asus->gps, "asus-gps",
885 RFKILL_TYPE_GPS, ASUS_WMI_DEVID_GPS);
886
887 if (result && result != -ENODEV)
888 goto exit;
889
890 result = asus_new_rfkill(asus, &asus->uwb, "asus-uwb",
891 RFKILL_TYPE_UWB, ASUS_WMI_DEVID_UWB);
892
893 if (result && result != -ENODEV)
894 goto exit;
895
766 if (!asus->driver->hotplug_wireless) 896 if (!asus->driver->hotplug_wireless)
767 goto exit; 897 goto exit;
768 898
@@ -797,8 +927,8 @@ exit:
797 * Hwmon device 927 * Hwmon device
798 */ 928 */
799static ssize_t asus_hwmon_pwm1(struct device *dev, 929static ssize_t asus_hwmon_pwm1(struct device *dev,
800 struct device_attribute *attr, 930 struct device_attribute *attr,
801 char *buf) 931 char *buf)
802{ 932{
803 struct asus_wmi *asus = dev_get_drvdata(dev); 933 struct asus_wmi *asus = dev_get_drvdata(dev);
804 u32 value; 934 u32 value;
@@ -809,7 +939,7 @@ static ssize_t asus_hwmon_pwm1(struct device *dev,
809 if (err < 0) 939 if (err < 0)
810 return err; 940 return err;
811 941
812 value |= 0xFF; 942 value &= 0xFF;
813 943
814 if (value == 1) /* Low Speed */ 944 if (value == 1) /* Low Speed */
815 value = 85; 945 value = 85;
@@ -825,7 +955,26 @@ static ssize_t asus_hwmon_pwm1(struct device *dev,
825 return sprintf(buf, "%d\n", value); 955 return sprintf(buf, "%d\n", value);
826} 956}
827 957
958static ssize_t asus_hwmon_temp1(struct device *dev,
959 struct device_attribute *attr,
960 char *buf)
961{
962 struct asus_wmi *asus = dev_get_drvdata(dev);
963 u32 value;
964 int err;
965
966 err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_THERMAL_CTRL, &value);
967
968 if (err < 0)
969 return err;
970
971 value = KELVIN_TO_CELSIUS((value & 0xFFFF)) * 1000;
972
973 return sprintf(buf, "%d\n", value);
974}
975
828static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0); 976static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0);
977static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL, 0);
829 978
830static ssize_t 979static ssize_t
831show_name(struct device *dev, struct device_attribute *attr, char *buf) 980show_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -836,12 +985,13 @@ static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
836 985
837static struct attribute *hwmon_attributes[] = { 986static struct attribute *hwmon_attributes[] = {
838 &sensor_dev_attr_pwm1.dev_attr.attr, 987 &sensor_dev_attr_pwm1.dev_attr.attr,
988 &sensor_dev_attr_temp1_input.dev_attr.attr,
839 &sensor_dev_attr_name.dev_attr.attr, 989 &sensor_dev_attr_name.dev_attr.attr,
840 NULL 990 NULL
841}; 991};
842 992
843static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, 993static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
844 struct attribute *attr, int idx) 994 struct attribute *attr, int idx)
845{ 995{
846 struct device *dev = container_of(kobj, struct device, kobj); 996 struct device *dev = container_of(kobj, struct device, kobj);
847 struct platform_device *pdev = to_platform_device(dev->parent); 997 struct platform_device *pdev = to_platform_device(dev->parent);
@@ -852,6 +1002,8 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
852 1002
853 if (attr == &sensor_dev_attr_pwm1.dev_attr.attr) 1003 if (attr == &sensor_dev_attr_pwm1.dev_attr.attr)
854 dev_id = ASUS_WMI_DEVID_FAN_CTRL; 1004 dev_id = ASUS_WMI_DEVID_FAN_CTRL;
1005 else if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr)
1006 dev_id = ASUS_WMI_DEVID_THERMAL_CTRL;
855 1007
856 if (dev_id != -1) { 1008 if (dev_id != -1) {
857 int err = asus_wmi_get_devstate(asus, dev_id, &value); 1009 int err = asus_wmi_get_devstate(asus, dev_id, &value);
@@ -869,9 +1021,13 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
869 * - reverved bits are non-zero 1021 * - reverved bits are non-zero
870 * - sfun and presence bit are not set 1022 * - sfun and presence bit are not set
871 */ 1023 */
872 if (value != ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000 1024 if (value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
873 || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT))) 1025 || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT)))
874 ok = false; 1026 ok = false;
1027 } else if (dev_id == ASUS_WMI_DEVID_THERMAL_CTRL) {
1028 /* If value is zero, something is clearly wrong */
1029 if (value == 0)
1030 ok = false;
875 } 1031 }
876 1032
877 return ok ? attr->mode : 0; 1033 return ok ? attr->mode : 0;
@@ -904,6 +1060,7 @@ static int asus_wmi_hwmon_init(struct asus_wmi *asus)
904 pr_err("Could not register asus hwmon device\n"); 1060 pr_err("Could not register asus hwmon device\n");
905 return PTR_ERR(hwmon); 1061 return PTR_ERR(hwmon);
906 } 1062 }
1063 dev_set_drvdata(hwmon, asus);
907 asus->hwmon_device = hwmon; 1064 asus->hwmon_device = hwmon;
908 result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group); 1065 result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group);
909 if (result) 1066 if (result)
@@ -1060,6 +1217,8 @@ static void asus_wmi_notify(u32 value, void *context)
1060 acpi_status status; 1217 acpi_status status;
1061 int code; 1218 int code;
1062 int orig_code; 1219 int orig_code;
1220 unsigned int key_value = 1;
1221 bool autorelease = 1;
1063 1222
1064 status = wmi_get_event_data(value, &response); 1223 status = wmi_get_event_data(value, &response);
1065 if (status != AE_OK) { 1224 if (status != AE_OK) {
@@ -1075,6 +1234,13 @@ static void asus_wmi_notify(u32 value, void *context)
1075 code = obj->integer.value; 1234 code = obj->integer.value;
1076 orig_code = code; 1235 orig_code = code;
1077 1236
1237 if (asus->driver->key_filter) {
1238 asus->driver->key_filter(asus->driver, &code, &key_value,
1239 &autorelease);
1240 if (code == ASUS_WMI_KEY_IGNORE)
1241 goto exit;
1242 }
1243
1078 if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) 1244 if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
1079 code = NOTIFY_BRNUP_MIN; 1245 code = NOTIFY_BRNUP_MIN;
1080 else if (code >= NOTIFY_BRNDOWN_MIN && 1246 else if (code >= NOTIFY_BRNDOWN_MIN &&
@@ -1084,7 +1250,8 @@ static void asus_wmi_notify(u32 value, void *context)
1084 if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { 1250 if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
1085 if (!acpi_video_backlight_support()) 1251 if (!acpi_video_backlight_support())
1086 asus_wmi_backlight_notify(asus, orig_code); 1252 asus_wmi_backlight_notify(asus, orig_code);
1087 } else if (!sparse_keymap_report_event(asus->inputdev, code, 1, true)) 1253 } else if (!sparse_keymap_report_event(asus->inputdev, code,
1254 key_value, autorelease))
1088 pr_info("Unknown key %x pressed\n", code); 1255 pr_info("Unknown key %x pressed\n", code);
1089 1256
1090exit: 1257exit:
@@ -1164,14 +1331,18 @@ ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
1164static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, 1331static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
1165 const char *buf, size_t count) 1332 const char *buf, size_t count)
1166{ 1333{
1167 int value; 1334 int value, rv;
1168 1335
1169 if (!count || sscanf(buf, "%i", &value) != 1) 1336 if (!count || sscanf(buf, "%i", &value) != 1)
1170 return -EINVAL; 1337 return -EINVAL;
1171 if (value < 0 || value > 2) 1338 if (value < 0 || value > 2)
1172 return -EINVAL; 1339 return -EINVAL;
1173 1340
1174 return asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL); 1341 rv = asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL);
1342 if (rv < 0)
1343 return rv;
1344
1345 return count;
1175} 1346}
1176 1347
1177static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv); 1348static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv);
@@ -1234,7 +1405,7 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
1234 1405
1235 /* We don't know yet what to do with this version... */ 1406 /* We don't know yet what to do with this version... */
1236 if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) { 1407 if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) {
1237 pr_info("BIOS WMI version: %d.%d", rv >> 8, rv & 0xFF); 1408 pr_info("BIOS WMI version: %d.%d", rv >> 16, rv & 0xFF);
1238 asus->spec = rv; 1409 asus->spec = rv;
1239 } 1410 }
1240 1411
@@ -1266,6 +1437,12 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
1266 return -ENODEV; 1437 return -ENODEV;
1267 } 1438 }
1268 1439
1440 /* CWAP allow to define the behavior of the Fn+F2 key,
1441 * this method doesn't seems to be present on Eee PCs */
1442 if (asus->driver->wapf >= 0)
1443 asus_wmi_set_devstate(ASUS_WMI_DEVID_CWAP,
1444 asus->driver->wapf, NULL);
1445
1269 return asus_wmi_sysfs_init(asus->platform_device); 1446 return asus_wmi_sysfs_init(asus->platform_device);
1270} 1447}
1271 1448
@@ -1568,6 +1745,14 @@ static int asus_hotk_restore(struct device *device)
1568 bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G); 1745 bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G);
1569 rfkill_set_sw_state(asus->wwan3g.rfkill, bl); 1746 rfkill_set_sw_state(asus->wwan3g.rfkill, bl);
1570 } 1747 }
1748 if (asus->gps.rfkill) {
1749 bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPS);
1750 rfkill_set_sw_state(asus->gps.rfkill, bl);
1751 }
1752 if (asus->uwb.rfkill) {
1753 bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_UWB);
1754 rfkill_set_sw_state(asus->uwb.rfkill, bl);
1755 }
1571 1756
1572 return 0; 1757 return 0;
1573} 1758}
@@ -1604,7 +1789,7 @@ static int asus_wmi_probe(struct platform_device *pdev)
1604 1789
1605static bool used; 1790static bool used;
1606 1791
1607int asus_wmi_register_driver(struct asus_wmi_driver *driver) 1792int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver)
1608{ 1793{
1609 struct platform_driver *platform_driver; 1794 struct platform_driver *platform_driver;
1610 struct platform_device *platform_device; 1795 struct platform_device *platform_device;
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index c044522c8766..8147c10161cc 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -29,12 +29,15 @@
29 29
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31 31
32#define ASUS_WMI_KEY_IGNORE (-1)
33
32struct module; 34struct module;
33struct key_entry; 35struct key_entry;
34struct asus_wmi; 36struct asus_wmi;
35 37
36struct asus_wmi_driver { 38struct asus_wmi_driver {
37 bool hotplug_wireless; 39 bool hotplug_wireless;
40 int wapf;
38 41
39 const char *name; 42 const char *name;
40 struct module *owner; 43 struct module *owner;
@@ -44,6 +47,10 @@ struct asus_wmi_driver {
44 const struct key_entry *keymap; 47 const struct key_entry *keymap;
45 const char *input_name; 48 const char *input_name;
46 const char *input_phys; 49 const char *input_phys;
50 /* Returns new code, value, and autorelease values in arguments.
51 * Return ASUS_WMI_KEY_IGNORE in code if event should be ignored. */
52 void (*key_filter) (struct asus_wmi_driver *driver, int *code,
53 unsigned int *value, bool *autorelease);
47 54
48 int (*probe) (struct platform_device *device); 55 int (*probe) (struct platform_device *device);
49 void (*quirks) (struct asus_wmi_driver *driver); 56 void (*quirks) (struct asus_wmi_driver *driver);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index e39ab1d3ed87..f31fa4efa725 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -612,7 +612,6 @@ static int __init dell_init(void)
612 if (!bufferpage) 612 if (!bufferpage)
613 goto fail_buffer; 613 goto fail_buffer;
614 buffer = page_address(bufferpage); 614 buffer = page_address(bufferpage);
615 mutex_init(&buffer_mutex);
616 615
617 ret = dell_setup_rfkill(); 616 ret = dell_setup_rfkill();
618 617
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index ce790827e199..fa9a2171cc13 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -54,6 +54,8 @@ MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
54 */ 54 */
55 55
56static const struct key_entry dell_wmi_legacy_keymap[] __initconst = { 56static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
57 { KE_IGNORE, 0x003a, { KEY_CAPSLOCK } },
58
57 { KE_KEY, 0xe045, { KEY_PROG1 } }, 59 { KE_KEY, 0xe045, { KEY_PROG1 } },
58 { KE_KEY, 0xe009, { KEY_EJECTCD } }, 60 { KE_KEY, 0xe009, { KEY_EJECTCD } },
59 61
@@ -85,6 +87,11 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
85 { KE_IGNORE, 0xe013, { KEY_RESERVED } }, 87 { KE_IGNORE, 0xe013, { KEY_RESERVED } },
86 88
87 { KE_IGNORE, 0xe020, { KEY_MUTE } }, 89 { KE_IGNORE, 0xe020, { KEY_MUTE } },
90
91 /* Shortcut and audio panel keys */
92 { KE_IGNORE, 0xe025, { KEY_RESERVED } },
93 { KE_IGNORE, 0xe026, { KEY_RESERVED } },
94
88 { KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } }, 95 { KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } },
89 { KE_IGNORE, 0xe030, { KEY_VOLUMEUP } }, 96 { KE_IGNORE, 0xe030, { KEY_VOLUMEUP } },
90 { KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } }, 97 { KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } },
@@ -92,6 +99,9 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
92 { KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } }, 99 { KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } },
93 { KE_IGNORE, 0xe045, { KEY_NUMLOCK } }, 100 { KE_IGNORE, 0xe045, { KEY_NUMLOCK } },
94 { KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } }, 101 { KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } },
102 { KE_IGNORE, 0xe0f7, { KEY_MUTE } },
103 { KE_IGNORE, 0xe0f8, { KEY_VOLUMEDOWN } },
104 { KE_IGNORE, 0xe0f9, { KEY_VOLUMEUP } },
95 { KE_END, 0 } 105 { KE_END, 0 }
96}; 106};
97 107
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 4aa867a9b88b..9f6e64302b45 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -56,6 +56,11 @@ MODULE_PARM_DESC(hotplug_wireless,
56 "If your laptop needs that, please report to " 56 "If your laptop needs that, please report to "
57 "acpi4asus-user@lists.sourceforge.net."); 57 "acpi4asus-user@lists.sourceforge.net.");
58 58
59/* Values for T101MT "Home" key */
60#define HOME_PRESS 0xe4
61#define HOME_HOLD 0xea
62#define HOME_RELEASE 0xe5
63
59static const struct key_entry eeepc_wmi_keymap[] = { 64static const struct key_entry eeepc_wmi_keymap[] = {
60 /* Sleep already handled via generic ACPI code */ 65 /* Sleep already handled via generic ACPI code */
61 { KE_KEY, 0x30, { KEY_VOLUMEUP } }, 66 { KE_KEY, 0x30, { KEY_VOLUMEUP } },
@@ -71,6 +76,7 @@ static const struct key_entry eeepc_wmi_keymap[] = {
71 { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, 76 { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
72 { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ 77 { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */
73 { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ 78 { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
79 { KE_KEY, HOME_PRESS, { KEY_CONFIG } }, /* Home/Express gate key */
74 { KE_KEY, 0xe8, { KEY_SCREENLOCK } }, 80 { KE_KEY, 0xe8, { KEY_SCREENLOCK } },
75 { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, 81 { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } },
76 { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, 82 { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
@@ -81,6 +87,25 @@ static const struct key_entry eeepc_wmi_keymap[] = {
81 { KE_END, 0}, 87 { KE_END, 0},
82}; 88};
83 89
90static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
91 unsigned int *value, bool *autorelease)
92{
93 switch (*code) {
94 case HOME_PRESS:
95 *value = 1;
96 *autorelease = 0;
97 break;
98 case HOME_HOLD:
99 *code = ASUS_WMI_KEY_IGNORE;
100 break;
101 case HOME_RELEASE:
102 *code = HOME_PRESS;
103 *value = 0;
104 *autorelease = 0;
105 break;
106 }
107}
108
84static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level, 109static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
85 void *context, void **retval) 110 void *context, void **retval)
86{ 111{
@@ -141,6 +166,7 @@ static void eeepc_dmi_check(struct asus_wmi_driver *driver)
141static void eeepc_wmi_quirks(struct asus_wmi_driver *driver) 166static void eeepc_wmi_quirks(struct asus_wmi_driver *driver)
142{ 167{
143 driver->hotplug_wireless = hotplug_wireless; 168 driver->hotplug_wireless = hotplug_wireless;
169 driver->wapf = -1;
144 eeepc_dmi_check(driver); 170 eeepc_dmi_check(driver);
145} 171}
146 172
@@ -151,6 +177,7 @@ static struct asus_wmi_driver asus_wmi_driver = {
151 .keymap = eeepc_wmi_keymap, 177 .keymap = eeepc_wmi_keymap,
152 .input_name = "Eee PC WMI hotkeys", 178 .input_name = "Eee PC WMI hotkeys",
153 .input_phys = EEEPC_WMI_FILE "/input0", 179 .input_phys = EEEPC_WMI_FILE "/input0",
180 .key_filter = eeepc_wmi_key_filter,
154 .probe = eeepc_wmi_probe, 181 .probe = eeepc_wmi_probe,
155 .quirks = eeepc_wmi_quirks, 182 .quirks = eeepc_wmi_quirks,
156}; 183};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index bfdda33feb26..0c595410e788 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -32,13 +32,22 @@
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/input.h> 33#include <linux/input.h>
34#include <linux/input/sparse-keymap.h> 34#include <linux/input/sparse-keymap.h>
35#include <linux/backlight.h>
36#include <linux/fb.h>
35 37
36#define IDEAPAD_RFKILL_DEV_NUM (3) 38#define IDEAPAD_RFKILL_DEV_NUM (3)
37 39
40#define CFG_BT_BIT (16)
41#define CFG_3G_BIT (17)
42#define CFG_WIFI_BIT (18)
43#define CFG_CAMERA_BIT (19)
44
38struct ideapad_private { 45struct ideapad_private {
39 struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM]; 46 struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
40 struct platform_device *platform_device; 47 struct platform_device *platform_device;
41 struct input_dev *inputdev; 48 struct input_dev *inputdev;
49 struct backlight_device *blightdev;
50 unsigned long cfg;
42}; 51};
43 52
44static acpi_handle ideapad_handle; 53static acpi_handle ideapad_handle;
@@ -155,7 +164,7 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
155} 164}
156 165
157/* 166/*
158 * camera power 167 * sysfs
159 */ 168 */
160static ssize_t show_ideapad_cam(struct device *dev, 169static ssize_t show_ideapad_cam(struct device *dev,
161 struct device_attribute *attr, 170 struct device_attribute *attr,
@@ -186,6 +195,44 @@ static ssize_t store_ideapad_cam(struct device *dev,
186 195
187static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam); 196static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
188 197
198static ssize_t show_ideapad_cfg(struct device *dev,
199 struct device_attribute *attr,
200 char *buf)
201{
202 struct ideapad_private *priv = dev_get_drvdata(dev);
203
204 return sprintf(buf, "0x%.8lX\n", priv->cfg);
205}
206
207static DEVICE_ATTR(cfg, 0444, show_ideapad_cfg, NULL);
208
209static struct attribute *ideapad_attributes[] = {
210 &dev_attr_camera_power.attr,
211 &dev_attr_cfg.attr,
212 NULL
213};
214
215static mode_t ideapad_is_visible(struct kobject *kobj,
216 struct attribute *attr,
217 int idx)
218{
219 struct device *dev = container_of(kobj, struct device, kobj);
220 struct ideapad_private *priv = dev_get_drvdata(dev);
221 bool supported;
222
223 if (attr == &dev_attr_camera_power.attr)
224 supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
225 else
226 supported = true;
227
228 return supported ? attr->mode : 0;
229}
230
231static struct attribute_group ideapad_attribute_group = {
232 .is_visible = ideapad_is_visible,
233 .attrs = ideapad_attributes
234};
235
189/* 236/*
190 * Rfkill 237 * Rfkill
191 */ 238 */
@@ -197,9 +244,9 @@ struct ideapad_rfk_data {
197}; 244};
198 245
199const struct ideapad_rfk_data ideapad_rfk_data[] = { 246const struct ideapad_rfk_data ideapad_rfk_data[] = {
200 { "ideapad_wlan", 18, 0x15, RFKILL_TYPE_WLAN }, 247 { "ideapad_wlan", CFG_WIFI_BIT, 0x15, RFKILL_TYPE_WLAN },
201 { "ideapad_bluetooth", 16, 0x17, RFKILL_TYPE_BLUETOOTH }, 248 { "ideapad_bluetooth", CFG_BT_BIT, 0x17, RFKILL_TYPE_BLUETOOTH },
202 { "ideapad_3g", 17, 0x20, RFKILL_TYPE_WWAN }, 249 { "ideapad_3g", CFG_3G_BIT, 0x20, RFKILL_TYPE_WWAN },
203}; 250};
204 251
205static int ideapad_rfk_set(void *data, bool blocked) 252static int ideapad_rfk_set(void *data, bool blocked)
@@ -265,8 +312,7 @@ static int __devinit ideapad_register_rfkill(struct acpi_device *adevice,
265 return 0; 312 return 0;
266} 313}
267 314
268static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice, 315static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
269 int dev)
270{ 316{
271 struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); 317 struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
272 318
@@ -280,15 +326,6 @@ static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice,
280/* 326/*
281 * Platform device 327 * Platform device
282 */ 328 */
283static struct attribute *ideapad_attributes[] = {
284 &dev_attr_camera_power.attr,
285 NULL
286};
287
288static struct attribute_group ideapad_attribute_group = {
289 .attrs = ideapad_attributes
290};
291
292static int __devinit ideapad_platform_init(struct ideapad_private *priv) 329static int __devinit ideapad_platform_init(struct ideapad_private *priv)
293{ 330{
294 int result; 331 int result;
@@ -369,7 +406,7 @@ err_free_dev:
369 return error; 406 return error;
370} 407}
371 408
372static void __devexit ideapad_input_exit(struct ideapad_private *priv) 409static void ideapad_input_exit(struct ideapad_private *priv)
373{ 410{
374 sparse_keymap_free(priv->inputdev); 411 sparse_keymap_free(priv->inputdev);
375 input_unregister_device(priv->inputdev); 412 input_unregister_device(priv->inputdev);
@@ -383,6 +420,98 @@ static void ideapad_input_report(struct ideapad_private *priv,
383} 420}
384 421
385/* 422/*
423 * backlight
424 */
425static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
426{
427 unsigned long now;
428
429 if (read_ec_data(ideapad_handle, 0x12, &now))
430 return -EIO;
431 return now;
432}
433
434static int ideapad_backlight_update_status(struct backlight_device *blightdev)
435{
436 if (write_ec_cmd(ideapad_handle, 0x13, blightdev->props.brightness))
437 return -EIO;
438 if (write_ec_cmd(ideapad_handle, 0x33,
439 blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
440 return -EIO;
441
442 return 0;
443}
444
445static const struct backlight_ops ideapad_backlight_ops = {
446 .get_brightness = ideapad_backlight_get_brightness,
447 .update_status = ideapad_backlight_update_status,
448};
449
450static int ideapad_backlight_init(struct ideapad_private *priv)
451{
452 struct backlight_device *blightdev;
453 struct backlight_properties props;
454 unsigned long max, now, power;
455
456 if (read_ec_data(ideapad_handle, 0x11, &max))
457 return -EIO;
458 if (read_ec_data(ideapad_handle, 0x12, &now))
459 return -EIO;
460 if (read_ec_data(ideapad_handle, 0x18, &power))
461 return -EIO;
462
463 memset(&props, 0, sizeof(struct backlight_properties));
464 props.max_brightness = max;
465 props.type = BACKLIGHT_PLATFORM;
466 blightdev = backlight_device_register("ideapad",
467 &priv->platform_device->dev,
468 priv,
469 &ideapad_backlight_ops,
470 &props);
471 if (IS_ERR(blightdev)) {
472 pr_err("Could not register backlight device\n");
473 return PTR_ERR(blightdev);
474 }
475
476 priv->blightdev = blightdev;
477 blightdev->props.brightness = now;
478 blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
479 backlight_update_status(blightdev);
480
481 return 0;
482}
483
484static void ideapad_backlight_exit(struct ideapad_private *priv)
485{
486 if (priv->blightdev)
487 backlight_device_unregister(priv->blightdev);
488 priv->blightdev = NULL;
489}
490
491static void ideapad_backlight_notify_power(struct ideapad_private *priv)
492{
493 unsigned long power;
494 struct backlight_device *blightdev = priv->blightdev;
495
496 if (read_ec_data(ideapad_handle, 0x18, &power))
497 return;
498 blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
499}
500
501static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
502{
503 unsigned long now;
504
505 /* if we control brightness via acpi video driver */
506 if (priv->blightdev == NULL) {
507 read_ec_data(ideapad_handle, 0x12, &now);
508 return;
509 }
510
511 backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY);
512}
513
514/*
386 * module init/exit 515 * module init/exit
387 */ 516 */
388static const struct acpi_device_id ideapad_device_ids[] = { 517static const struct acpi_device_id ideapad_device_ids[] = {
@@ -393,10 +522,11 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
393 522
394static int __devinit ideapad_acpi_add(struct acpi_device *adevice) 523static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
395{ 524{
396 int ret, i, cfg; 525 int ret, i;
526 unsigned long cfg;
397 struct ideapad_private *priv; 527 struct ideapad_private *priv;
398 528
399 if (read_method_int(adevice->handle, "_CFG", &cfg)) 529 if (read_method_int(adevice->handle, "_CFG", (int *)&cfg))
400 return -ENODEV; 530 return -ENODEV;
401 531
402 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 532 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -404,6 +534,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
404 return -ENOMEM; 534 return -ENOMEM;
405 dev_set_drvdata(&adevice->dev, priv); 535 dev_set_drvdata(&adevice->dev, priv);
406 ideapad_handle = adevice->handle; 536 ideapad_handle = adevice->handle;
537 priv->cfg = cfg;
407 538
408 ret = ideapad_platform_init(priv); 539 ret = ideapad_platform_init(priv);
409 if (ret) 540 if (ret)
@@ -414,15 +545,25 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
414 goto input_failed; 545 goto input_failed;
415 546
416 for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) { 547 for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
417 if (test_bit(ideapad_rfk_data[i].cfgbit, (unsigned long *)&cfg)) 548 if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg))
418 ideapad_register_rfkill(adevice, i); 549 ideapad_register_rfkill(adevice, i);
419 else 550 else
420 priv->rfk[i] = NULL; 551 priv->rfk[i] = NULL;
421 } 552 }
422 ideapad_sync_rfk_state(adevice); 553 ideapad_sync_rfk_state(adevice);
423 554
555 if (!acpi_video_backlight_support()) {
556 ret = ideapad_backlight_init(priv);
557 if (ret && ret != -ENODEV)
558 goto backlight_failed;
559 }
560
424 return 0; 561 return 0;
425 562
563backlight_failed:
564 for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
565 ideapad_unregister_rfkill(adevice, i);
566 ideapad_input_exit(priv);
426input_failed: 567input_failed:
427 ideapad_platform_exit(priv); 568 ideapad_platform_exit(priv);
428platform_failed: 569platform_failed:
@@ -435,6 +576,7 @@ static int __devexit ideapad_acpi_remove(struct acpi_device *adevice, int type)
435 struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); 576 struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
436 int i; 577 int i;
437 578
579 ideapad_backlight_exit(priv);
438 for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) 580 for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
439 ideapad_unregister_rfkill(adevice, i); 581 ideapad_unregister_rfkill(adevice, i);
440 ideapad_input_exit(priv); 582 ideapad_input_exit(priv);
@@ -459,12 +601,19 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
459 vpc1 = (vpc2 << 8) | vpc1; 601 vpc1 = (vpc2 << 8) | vpc1;
460 for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) { 602 for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
461 if (test_bit(vpc_bit, &vpc1)) { 603 if (test_bit(vpc_bit, &vpc1)) {
462 if (vpc_bit == 9) 604 switch (vpc_bit) {
605 case 9:
463 ideapad_sync_rfk_state(adevice); 606 ideapad_sync_rfk_state(adevice);
464 else if (vpc_bit == 4) 607 break;
465 read_ec_data(handle, 0x12, &vpc2); 608 case 4:
466 else 609 ideapad_backlight_notify_brightness(priv);
610 break;
611 case 2:
612 ideapad_backlight_notify_power(priv);
613 break;
614 default:
467 ideapad_input_report(priv, vpc_bit); 615 ideapad_input_report(priv, vpc_bit);
616 }
468 } 617 }
469 } 618 }
470} 619}
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 5ffe7c398148..809a3ae943c6 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -403,7 +403,7 @@ static void ips_cpu_raise(struct ips_driver *ips)
403 403
404 thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8); 404 thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8);
405 405
406 turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN; 406 turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
407 wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 407 wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
408 408
409 turbo_override &= ~TURBO_TDP_MASK; 409 turbo_override &= ~TURBO_TDP_MASK;
@@ -438,7 +438,7 @@ static void ips_cpu_lower(struct ips_driver *ips)
438 438
439 thm_writew(THM_MPCPC, (new_limit * 10) / 8); 439 thm_writew(THM_MPCPC, (new_limit * 10) / 8);
440 440
441 turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN; 441 turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
442 wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); 442 wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
443 443
444 turbo_override &= ~TURBO_TDP_MASK; 444 turbo_override &= ~TURBO_TDP_MASK;
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 809adea4965f..abddc83e9fd7 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -477,6 +477,8 @@ static acpi_status intel_menlow_register_sensor(acpi_handle handle, u32 lvl,
477 return AE_ERROR; 477 return AE_ERROR;
478 } 478 }
479 479
480 return AE_OK;
481
480 aux1_not_found: 482 aux1_not_found:
481 if (status == AE_NOT_FOUND) 483 if (status == AE_NOT_FOUND)
482 return AE_OK; 484 return AE_OK;
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 3a578323122b..ccd7b1f83519 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -493,20 +493,30 @@ static int mid_thermal_probe(struct platform_device *pdev)
493 493
494 /* Register each sensor with the generic thermal framework*/ 494 /* Register each sensor with the generic thermal framework*/
495 for (i = 0; i < MSIC_THERMAL_SENSORS; i++) { 495 for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
496 struct thermal_device_info *td_info = initialize_sensor(i);
497
498 if (!td_info) {
499 ret = -ENOMEM;
500 goto err;
501 }
496 pinfo->tzd[i] = thermal_zone_device_register(name[i], 502 pinfo->tzd[i] = thermal_zone_device_register(name[i],
497 0, initialize_sensor(i), &tzd_ops, 0, 0, 0, 0); 503 0, td_info, &tzd_ops, 0, 0, 0, 0);
498 if (IS_ERR(pinfo->tzd[i])) 504 if (IS_ERR(pinfo->tzd[i])) {
499 goto reg_fail; 505 kfree(td_info);
506 ret = PTR_ERR(pinfo->tzd[i]);
507 goto err;
508 }
500 } 509 }
501 510
502 pinfo->pdev = pdev; 511 pinfo->pdev = pdev;
503 platform_set_drvdata(pdev, pinfo); 512 platform_set_drvdata(pdev, pinfo);
504 return 0; 513 return 0;
505 514
506reg_fail: 515err:
507 ret = PTR_ERR(pinfo->tzd[i]); 516 while (--i >= 0) {
508 while (--i >= 0) 517 kfree(pinfo->tzd[i]->devdata);
509 thermal_zone_device_unregister(pinfo->tzd[i]); 518 thermal_zone_device_unregister(pinfo->tzd[i]);
519 }
510 configure_adc(0); 520 configure_adc(0);
511 kfree(pinfo); 521 kfree(pinfo);
512 return ret; 522 return ret;
@@ -524,8 +534,10 @@ static int mid_thermal_remove(struct platform_device *pdev)
524 int i; 534 int i;
525 struct platform_info *pinfo = platform_get_drvdata(pdev); 535 struct platform_info *pinfo = platform_get_drvdata(pdev);
526 536
527 for (i = 0; i < MSIC_THERMAL_SENSORS; i++) 537 for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
538 kfree(pinfo->tzd[i]->devdata);
528 thermal_zone_device_unregister(pinfo->tzd[i]); 539 thermal_zone_device_unregister(pinfo->tzd[i]);
540 }
529 541
530 kfree(pinfo); 542 kfree(pinfo);
531 platform_set_drvdata(pdev, NULL); 543 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c
index bde47e9080cd..c8a6aed45277 100644
--- a/drivers/platform/x86/intel_rar_register.c
+++ b/drivers/platform/x86/intel_rar_register.c
@@ -637,15 +637,13 @@ end_function:
637 return error; 637 return error;
638} 638}
639 639
640const struct pci_device_id rar_pci_id_tbl[] = { 640static DEFINE_PCI_DEVICE_TABLE(rar_pci_id_tbl) = {
641 { PCI_VDEVICE(INTEL, 0x4110) }, 641 { PCI_VDEVICE(INTEL, 0x4110) },
642 { 0 } 642 { 0 }
643}; 643};
644 644
645MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl); 645MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl);
646 646
647const struct pci_device_id *my_id_table = rar_pci_id_tbl;
648
649/* field for registering driver to PCI device */ 647/* field for registering driver to PCI device */
650static struct pci_driver rar_pci_driver = { 648static struct pci_driver rar_pci_driver = {
651 .name = "rar_register_driver", 649 .name = "rar_register_driver",
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 940accbe28d3..c86665369a22 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -725,7 +725,7 @@ static void ipc_remove(struct pci_dev *pdev)
725 intel_scu_devices_destroy(); 725 intel_scu_devices_destroy();
726} 726}
727 727
728static const struct pci_device_id pci_ids[] = { 728static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
729 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)}, 729 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)},
730 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)}, 730 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)},
731 { 0,} 731 { 0,}
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 3ff629df9f01..f204643c5052 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -538,6 +538,15 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
538 }, 538 },
539 .callback = dmi_check_cb 539 .callback = dmi_check_cb
540 }, 540 },
541 {
542 .ident = "MSI U270",
543 .matches = {
544 DMI_MATCH(DMI_SYS_VENDOR,
545 "Micro-Star International Co., Ltd."),
546 DMI_MATCH(DMI_PRODUCT_NAME, "U270 series"),
547 },
548 .callback = dmi_check_cb
549 },
541 { } 550 { }
542}; 551};
543 552
@@ -996,3 +1005,4 @@ MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N034:*");
996MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*"); 1005MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*");
997MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*"); 1006MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*");
998MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*"); 1007MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*");
1008MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnU270series:*");
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index c832e3356cd6..6f40bf202dc7 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -272,6 +272,7 @@ static int __init msi_wmi_init(void)
272err_free_backlight: 272err_free_backlight:
273 backlight_device_unregister(backlight); 273 backlight_device_unregister(backlight);
274err_free_input: 274err_free_input:
275 sparse_keymap_free(msi_wmi_input_dev);
275 input_unregister_device(msi_wmi_input_dev); 276 input_unregister_device(msi_wmi_input_dev);
276err_uninstall_notifier: 277err_uninstall_notifier:
277 wmi_remove_notify_handler(MSIWMI_EVENT_GUID); 278 wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index d347116d150e..359163011044 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -521,6 +521,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
521 .callback = dmi_check_cb, 521 .callback = dmi_check_cb,
522 }, 522 },
523 { 523 {
524 .ident = "N510",
525 .matches = {
526 DMI_MATCH(DMI_SYS_VENDOR,
527 "SAMSUNG ELECTRONICS CO., LTD."),
528 DMI_MATCH(DMI_PRODUCT_NAME, "N510"),
529 DMI_MATCH(DMI_BOARD_NAME, "N510"),
530 },
531 .callback = dmi_check_cb,
532 },
533 {
524 .ident = "X125", 534 .ident = "X125",
525 .matches = { 535 .matches = {
526 DMI_MATCH(DMI_SYS_VENDOR, 536 DMI_MATCH(DMI_SYS_VENDOR,
@@ -601,6 +611,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
601 .callback = dmi_check_cb, 611 .callback = dmi_check_cb,
602 }, 612 },
603 { 613 {
614 .ident = "N150/N210/N220",
615 .matches = {
616 DMI_MATCH(DMI_SYS_VENDOR,
617 "SAMSUNG ELECTRONICS CO., LTD."),
618 DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
619 DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
620 },
621 .callback = dmi_check_cb,
622 },
623 {
604 .ident = "N150/N210/N220/N230", 624 .ident = "N150/N210/N220/N230",
605 .matches = { 625 .matches = {
606 DMI_MATCH(DMI_SYS_VENDOR, 626 DMI_MATCH(DMI_SYS_VENDOR,
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
new file mode 100644
index 000000000000..1e54ae74274c
--- /dev/null
+++ b/drivers/platform/x86/samsung-q10.c
@@ -0,0 +1,196 @@
1/*
2 * Driver for Samsung Q10 and related laptops: controls the backlight
3 *
4 * Copyright (c) 2011 Frederick van der Wyck <fvanderwyck@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/platform_device.h>
16#include <linux/backlight.h>
17#include <linux/i8042.h>
18#include <linux/dmi.h>
19
20#define SAMSUNGQ10_BL_MAX_INTENSITY 255
21#define SAMSUNGQ10_BL_DEFAULT_INTENSITY 185
22
23#define SAMSUNGQ10_BL_8042_CMD 0xbe
24#define SAMSUNGQ10_BL_8042_DATA { 0x89, 0x91 }
25
26static int samsungq10_bl_brightness;
27
28static bool force;
29module_param(force, bool, 0);
30MODULE_PARM_DESC(force,
31 "Disable the DMI check and force the driver to be loaded");
32
33static int samsungq10_bl_set_intensity(struct backlight_device *bd)
34{
35
36 int brightness = bd->props.brightness;
37 unsigned char c[3] = SAMSUNGQ10_BL_8042_DATA;
38
39 c[2] = (unsigned char)brightness;
40 i8042_lock_chip();
41 i8042_command(c, (0x30 << 8) | SAMSUNGQ10_BL_8042_CMD);
42 i8042_unlock_chip();
43 samsungq10_bl_brightness = brightness;
44
45 return 0;
46}
47
48static int samsungq10_bl_get_intensity(struct backlight_device *bd)
49{
50 return samsungq10_bl_brightness;
51}
52
53static const struct backlight_ops samsungq10_bl_ops = {
54 .get_brightness = samsungq10_bl_get_intensity,
55 .update_status = samsungq10_bl_set_intensity,
56};
57
58#ifdef CONFIG_PM_SLEEP
59static int samsungq10_suspend(struct device *dev)
60{
61 return 0;
62}
63
64static int samsungq10_resume(struct device *dev)
65{
66
67 struct backlight_device *bd = dev_get_drvdata(dev);
68
69 samsungq10_bl_set_intensity(bd);
70 return 0;
71}
72#else
73#define samsungq10_suspend NULL
74#define samsungq10_resume NULL
75#endif
76
77static SIMPLE_DEV_PM_OPS(samsungq10_pm_ops,
78 samsungq10_suspend, samsungq10_resume);
79
80static int __devinit samsungq10_probe(struct platform_device *pdev)
81{
82
83 struct backlight_properties props;
84 struct backlight_device *bd;
85
86 memset(&props, 0, sizeof(struct backlight_properties));
87 props.type = BACKLIGHT_PLATFORM;
88 props.max_brightness = SAMSUNGQ10_BL_MAX_INTENSITY;
89 bd = backlight_device_register("samsung", &pdev->dev, NULL,
90 &samsungq10_bl_ops, &props);
91 if (IS_ERR(bd))
92 return PTR_ERR(bd);
93
94 platform_set_drvdata(pdev, bd);
95
96 bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
97 samsungq10_bl_set_intensity(bd);
98
99 return 0;
100}
101
102static int __devexit samsungq10_remove(struct platform_device *pdev)
103{
104
105 struct backlight_device *bd = platform_get_drvdata(pdev);
106
107 bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
108 samsungq10_bl_set_intensity(bd);
109
110 backlight_device_unregister(bd);
111
112 return 0;
113}
114
115static struct platform_driver samsungq10_driver = {
116 .driver = {
117 .name = KBUILD_MODNAME,
118 .owner = THIS_MODULE,
119 .pm = &samsungq10_pm_ops,
120 },
121 .probe = samsungq10_probe,
122 .remove = __devexit_p(samsungq10_remove),
123};
124
125static struct platform_device *samsungq10_device;
126
127static int __init dmi_check_callback(const struct dmi_system_id *id)
128{
129 printk(KERN_INFO KBUILD_MODNAME ": found model '%s'\n", id->ident);
130 return 1;
131}
132
133static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
134 {
135 .ident = "Samsung Q10",
136 .matches = {
137 DMI_MATCH(DMI_SYS_VENDOR, "Samsung"),
138 DMI_MATCH(DMI_PRODUCT_NAME, "SQ10"),
139 },
140 .callback = dmi_check_callback,
141 },
142 {
143 .ident = "Samsung Q20",
144 .matches = {
145 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"),
146 DMI_MATCH(DMI_PRODUCT_NAME, "SENS Q20"),
147 },
148 .callback = dmi_check_callback,
149 },
150 {
151 .ident = "Samsung Q25",
152 .matches = {
153 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"),
154 DMI_MATCH(DMI_PRODUCT_NAME, "NQ25"),
155 },
156 .callback = dmi_check_callback,
157 },
158 {
159 .ident = "Dell Latitude X200",
160 .matches = {
161 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
162 DMI_MATCH(DMI_PRODUCT_NAME, "X200"),
163 },
164 .callback = dmi_check_callback,
165 },
166 { },
167};
168MODULE_DEVICE_TABLE(dmi, samsungq10_dmi_table);
169
170static int __init samsungq10_init(void)
171{
172 if (!force && !dmi_check_system(samsungq10_dmi_table))
173 return -ENODEV;
174
175 samsungq10_device = platform_create_bundle(&samsungq10_driver,
176 samsungq10_probe,
177 NULL, 0, NULL, 0);
178
179 if (IS_ERR(samsungq10_device))
180 return PTR_ERR(samsungq10_device);
181
182 return 0;
183}
184
185static void __exit samsungq10_exit(void)
186{
187 platform_device_unregister(samsungq10_device);
188 platform_driver_unregister(&samsungq10_driver);
189}
190
191module_init(samsungq10_init);
192module_exit(samsungq10_exit);
193
194MODULE_AUTHOR("Frederick van der Wyck <fvanderwyck@gmail.com>");
195MODULE_DESCRIPTION("Samsung Q10 Driver");
196MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 26c5b117df22..7bd829f247eb 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3186,8 +3186,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3186 KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */ 3186 KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
3187 3187
3188 /* (assignments unknown, please report if found) */ 3188 /* (assignments unknown, please report if found) */
3189 KEY_UNKNOWN, KEY_UNKNOWN,
3190
3191 /*
3192 * The mic mute button only sends 0x1a. It does not
3193 * automatically mute the mic or change the mute light.
3194 */
3195 KEY_MICMUTE, /* 0x1a: Mic mute (since ?400 or so) */
3196
3197 /* (assignments unknown, please report if found) */
3189 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, 3198 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
3190 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, 3199 KEY_UNKNOWN,
3191 }, 3200 },
3192 }; 3201 };
3193 3202
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index e57b50b38565..57de051a74b3 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -235,4 +235,18 @@ config CHARGER_GPIO
235 This driver can be build as a module. If so, the module will be 235 This driver can be build as a module. If so, the module will be
236 called gpio-charger. 236 called gpio-charger.
237 237
238config CHARGER_MAX8997
239 tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
240 depends on MFD_MAX8997 && REGULATOR_MAX8997
241 help
242 Say Y to enable support for the battery charger control sysfs and
243 platform data of MAX8997/LP3974 PMICs.
244
245config CHARGER_MAX8998
246 tristate "Maxim MAX8998/LP3974 PMIC battery charger driver"
247 depends on MFD_MAX8998 && REGULATOR_MAX8998
248 help
249 Say Y to enable support for the battery charger control sysfs and
250 platform data of MAX8998/LP3974 PMICs.
251
238endif # POWER_SUPPLY 252endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 009a90fa8ac9..b4af13dd8b66 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -36,3 +36,5 @@ obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
36obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o 36obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
37obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o 37obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
38obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o 38obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
39obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
40obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index dc628cb2e762..8a612dec9139 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -14,11 +14,11 @@
14#include <linux/apm-emulation.h> 14#include <linux/apm-emulation.h>
15 15
16 16
17#define PSY_PROP(psy, prop, val) psy->get_property(psy, \ 17#define PSY_PROP(psy, prop, val) (psy->get_property(psy, \
18 POWER_SUPPLY_PROP_##prop, val) 18 POWER_SUPPLY_PROP_##prop, val))
19 19
20#define _MPSY_PROP(prop, val) main_battery->get_property(main_battery, \ 20#define _MPSY_PROP(prop, val) (main_battery->get_property(main_battery, \
21 prop, val) 21 prop, val))
22 22
23#define MPSY_PROP(prop, val) _MPSY_PROP(POWER_SUPPLY_PROP_##prop, val) 23#define MPSY_PROP(prop, val) _MPSY_PROP(POWER_SUPPLY_PROP_##prop, val)
24 24
diff --git a/drivers/power/bq20z75.c b/drivers/power/bq20z75.c
index 506585e31a5b..9c5e5beda3a8 100644
--- a/drivers/power/bq20z75.c
+++ b/drivers/power/bq20z75.c
@@ -152,6 +152,10 @@ struct bq20z75_info {
152 bool gpio_detect; 152 bool gpio_detect;
153 bool enable_detection; 153 bool enable_detection;
154 int irq; 154 int irq;
155 int last_state;
156 int poll_time;
157 struct delayed_work work;
158 int ignore_changes;
155}; 159};
156 160
157static int bq20z75_read_word_data(struct i2c_client *client, u8 address) 161static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
@@ -279,6 +283,7 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
279 int reg_offset, enum power_supply_property psp, 283 int reg_offset, enum power_supply_property psp,
280 union power_supply_propval *val) 284 union power_supply_propval *val)
281{ 285{
286 struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
282 s32 ret; 287 s32 ret;
283 288
284 ret = bq20z75_read_word_data(client, 289 ret = bq20z75_read_word_data(client,
@@ -293,15 +298,24 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
293 if (ret >= bq20z75_data[reg_offset].min_value && 298 if (ret >= bq20z75_data[reg_offset].min_value &&
294 ret <= bq20z75_data[reg_offset].max_value) { 299 ret <= bq20z75_data[reg_offset].max_value) {
295 val->intval = ret; 300 val->intval = ret;
296 if (psp == POWER_SUPPLY_PROP_STATUS) { 301 if (psp != POWER_SUPPLY_PROP_STATUS)
297 if (ret & BATTERY_FULL_CHARGED) 302 return 0;
298 val->intval = POWER_SUPPLY_STATUS_FULL; 303
299 else if (ret & BATTERY_FULL_DISCHARGED) 304 if (ret & BATTERY_FULL_CHARGED)
300 val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; 305 val->intval = POWER_SUPPLY_STATUS_FULL;
301 else if (ret & BATTERY_DISCHARGING) 306 else if (ret & BATTERY_FULL_DISCHARGED)
302 val->intval = POWER_SUPPLY_STATUS_DISCHARGING; 307 val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
303 else 308 else if (ret & BATTERY_DISCHARGING)
304 val->intval = POWER_SUPPLY_STATUS_CHARGING; 309 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
310 else
311 val->intval = POWER_SUPPLY_STATUS_CHARGING;
312
313 if (bq20z75_device->poll_time == 0)
314 bq20z75_device->last_state = val->intval;
315 else if (bq20z75_device->last_state != val->intval) {
316 cancel_delayed_work_sync(&bq20z75_device->work);
317 power_supply_changed(&bq20z75_device->power_supply);
318 bq20z75_device->poll_time = 0;
305 } 319 }
306 } else { 320 } else {
307 if (psp == POWER_SUPPLY_PROP_STATUS) 321 if (psp == POWER_SUPPLY_PROP_STATUS)
@@ -545,6 +559,60 @@ static irqreturn_t bq20z75_irq(int irq, void *devid)
545 return IRQ_HANDLED; 559 return IRQ_HANDLED;
546} 560}
547 561
562static void bq20z75_external_power_changed(struct power_supply *psy)
563{
564 struct bq20z75_info *bq20z75_device;
565
566 bq20z75_device = container_of(psy, struct bq20z75_info, power_supply);
567
568 if (bq20z75_device->ignore_changes > 0) {
569 bq20z75_device->ignore_changes--;
570 return;
571 }
572
573 /* cancel outstanding work */
574 cancel_delayed_work_sync(&bq20z75_device->work);
575
576 schedule_delayed_work(&bq20z75_device->work, HZ);
577 bq20z75_device->poll_time = bq20z75_device->pdata->poll_retry_count;
578}
579
580static void bq20z75_delayed_work(struct work_struct *work)
581{
582 struct bq20z75_info *bq20z75_device;
583 s32 ret;
584
585 bq20z75_device = container_of(work, struct bq20z75_info, work.work);
586
587 ret = bq20z75_read_word_data(bq20z75_device->client,
588 bq20z75_data[REG_STATUS].addr);
589 /* if the read failed, give up on this work */
590 if (ret < 0) {
591 bq20z75_device->poll_time = 0;
592 return;
593 }
594
595 if (ret & BATTERY_FULL_CHARGED)
596 ret = POWER_SUPPLY_STATUS_FULL;
597 else if (ret & BATTERY_FULL_DISCHARGED)
598 ret = POWER_SUPPLY_STATUS_NOT_CHARGING;
599 else if (ret & BATTERY_DISCHARGING)
600 ret = POWER_SUPPLY_STATUS_DISCHARGING;
601 else
602 ret = POWER_SUPPLY_STATUS_CHARGING;
603
604 if (bq20z75_device->last_state != ret) {
605 bq20z75_device->poll_time = 0;
606 power_supply_changed(&bq20z75_device->power_supply);
607 return;
608 }
609 if (bq20z75_device->poll_time > 0) {
610 schedule_delayed_work(&bq20z75_device->work, HZ);
611 bq20z75_device->poll_time--;
612 return;
613 }
614}
615
548static int __devinit bq20z75_probe(struct i2c_client *client, 616static int __devinit bq20z75_probe(struct i2c_client *client,
549 const struct i2c_device_id *id) 617 const struct i2c_device_id *id)
550{ 618{
@@ -566,6 +634,13 @@ static int __devinit bq20z75_probe(struct i2c_client *client,
566 bq20z75_device->power_supply.num_properties = 634 bq20z75_device->power_supply.num_properties =
567 ARRAY_SIZE(bq20z75_properties); 635 ARRAY_SIZE(bq20z75_properties);
568 bq20z75_device->power_supply.get_property = bq20z75_get_property; 636 bq20z75_device->power_supply.get_property = bq20z75_get_property;
637 /* ignore first notification of external change, it is generated
638 * from the power_supply_register call back
639 */
640 bq20z75_device->ignore_changes = 1;
641 bq20z75_device->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
642 bq20z75_device->power_supply.external_power_changed =
643 bq20z75_external_power_changed;
569 644
570 if (pdata) { 645 if (pdata) {
571 bq20z75_device->gpio_detect = 646 bq20z75_device->gpio_detect =
@@ -625,6 +700,10 @@ skip_gpio:
625 dev_info(&client->dev, 700 dev_info(&client->dev,
626 "%s: battery gas gauge device registered\n", client->name); 701 "%s: battery gas gauge device registered\n", client->name);
627 702
703 INIT_DELAYED_WORK(&bq20z75_device->work, bq20z75_delayed_work);
704
705 bq20z75_device->enable_detection = true;
706
628 return 0; 707 return 0;
629 708
630exit_psupply: 709exit_psupply:
@@ -648,6 +727,9 @@ static int __devexit bq20z75_remove(struct i2c_client *client)
648 gpio_free(bq20z75_device->pdata->battery_detect); 727 gpio_free(bq20z75_device->pdata->battery_detect);
649 728
650 power_supply_unregister(&bq20z75_device->power_supply); 729 power_supply_unregister(&bq20z75_device->power_supply);
730
731 cancel_delayed_work_sync(&bq20z75_device->work);
732
651 kfree(bq20z75_device); 733 kfree(bq20z75_device);
652 bq20z75_device = NULL; 734 bq20z75_device = NULL;
653 735
@@ -661,6 +743,9 @@ static int bq20z75_suspend(struct i2c_client *client,
661 struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client); 743 struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
662 s32 ret; 744 s32 ret;
663 745
746 if (bq20z75_device->poll_time > 0)
747 cancel_delayed_work_sync(&bq20z75_device->work);
748
664 /* write to manufacturer access with sleep command */ 749 /* write to manufacturer access with sleep command */
665 ret = bq20z75_write_word_data(client, 750 ret = bq20z75_write_word_data(client,
666 bq20z75_data[REG_MANUFACTURER_DATA].addr, 751 bq20z75_data[REG_MANUFACTURER_DATA].addr,
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 718f2c537827..a64b8854cfd5 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -127,7 +127,7 @@ static int __devinit gpio_charger_probe(struct platform_device *pdev)
127 ret = request_any_context_irq(irq, gpio_charger_irq, 127 ret = request_any_context_irq(irq, gpio_charger_irq,
128 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 128 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
129 dev_name(&pdev->dev), charger); 129 dev_name(&pdev->dev), charger);
130 if (ret) 130 if (ret < 0)
131 dev_warn(&pdev->dev, "Failed to request irq: %d\n", ret); 131 dev_warn(&pdev->dev, "Failed to request irq: %d\n", ret);
132 else 132 else
133 gpio_charger->irq = irq; 133 gpio_charger->irq = irq;
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index c5c8805156cb..98bfab35b8e9 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -29,74 +29,6 @@
29#include <linux/power_supply.h> 29#include <linux/power_supply.h>
30#include <linux/power/max17042_battery.h> 30#include <linux/power/max17042_battery.h>
31 31
32enum max17042_register {
33 MAX17042_STATUS = 0x00,
34 MAX17042_VALRT_Th = 0x01,
35 MAX17042_TALRT_Th = 0x02,
36 MAX17042_SALRT_Th = 0x03,
37 MAX17042_AtRate = 0x04,
38 MAX17042_RepCap = 0x05,
39 MAX17042_RepSOC = 0x06,
40 MAX17042_Age = 0x07,
41 MAX17042_TEMP = 0x08,
42 MAX17042_VCELL = 0x09,
43 MAX17042_Current = 0x0A,
44 MAX17042_AvgCurrent = 0x0B,
45 MAX17042_Qresidual = 0x0C,
46 MAX17042_SOC = 0x0D,
47 MAX17042_AvSOC = 0x0E,
48 MAX17042_RemCap = 0x0F,
49 MAX17402_FullCAP = 0x10,
50 MAX17042_TTE = 0x11,
51 MAX17042_V_empty = 0x12,
52
53 MAX17042_RSLOW = 0x14,
54
55 MAX17042_AvgTA = 0x16,
56 MAX17042_Cycles = 0x17,
57 MAX17042_DesignCap = 0x18,
58 MAX17042_AvgVCELL = 0x19,
59 MAX17042_MinMaxTemp = 0x1A,
60 MAX17042_MinMaxVolt = 0x1B,
61 MAX17042_MinMaxCurr = 0x1C,
62 MAX17042_CONFIG = 0x1D,
63 MAX17042_ICHGTerm = 0x1E,
64 MAX17042_AvCap = 0x1F,
65 MAX17042_ManName = 0x20,
66 MAX17042_DevName = 0x21,
67 MAX17042_DevChem = 0x22,
68
69 MAX17042_TempNom = 0x24,
70 MAX17042_TempCold = 0x25,
71 MAX17042_TempHot = 0x26,
72 MAX17042_AIN = 0x27,
73 MAX17042_LearnCFG = 0x28,
74 MAX17042_SHFTCFG = 0x29,
75 MAX17042_RelaxCFG = 0x2A,
76 MAX17042_MiscCFG = 0x2B,
77 MAX17042_TGAIN = 0x2C,
78 MAx17042_TOFF = 0x2D,
79 MAX17042_CGAIN = 0x2E,
80 MAX17042_COFF = 0x2F,
81
82 MAX17042_Q_empty = 0x33,
83 MAX17042_T_empty = 0x34,
84
85 MAX17042_RCOMP0 = 0x38,
86 MAX17042_TempCo = 0x39,
87 MAX17042_Rx = 0x3A,
88 MAX17042_T_empty0 = 0x3B,
89 MAX17042_TaskPeriod = 0x3C,
90 MAX17042_FSTAT = 0x3D,
91
92 MAX17042_SHDNTIMER = 0x3F,
93
94 MAX17042_VFRemCap = 0x4A,
95
96 MAX17042_QH = 0x4D,
97 MAX17042_QL = 0x4E,
98};
99
100struct max17042_chip { 32struct max17042_chip {
101 struct i2c_client *client; 33 struct i2c_client *client;
102 struct power_supply battery; 34 struct power_supply battery;
@@ -123,10 +55,27 @@ static int max17042_read_reg(struct i2c_client *client, u8 reg)
123 return ret; 55 return ret;
124} 56}
125 57
58static void max17042_set_reg(struct i2c_client *client,
59 struct max17042_reg_data *data, int size)
60{
61 int i;
62
63 for (i = 0; i < size; i++)
64 max17042_write_reg(client, data[i].addr, data[i].data);
65}
66
126static enum power_supply_property max17042_battery_props[] = { 67static enum power_supply_property max17042_battery_props[] = {
68 POWER_SUPPLY_PROP_PRESENT,
69 POWER_SUPPLY_PROP_CYCLE_COUNT,
70 POWER_SUPPLY_PROP_VOLTAGE_MAX,
71 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
127 POWER_SUPPLY_PROP_VOLTAGE_NOW, 72 POWER_SUPPLY_PROP_VOLTAGE_NOW,
128 POWER_SUPPLY_PROP_VOLTAGE_AVG, 73 POWER_SUPPLY_PROP_VOLTAGE_AVG,
129 POWER_SUPPLY_PROP_CAPACITY, 74 POWER_SUPPLY_PROP_CAPACITY,
75 POWER_SUPPLY_PROP_CHARGE_FULL,
76 POWER_SUPPLY_PROP_TEMP,
77 POWER_SUPPLY_PROP_CURRENT_NOW,
78 POWER_SUPPLY_PROP_CURRENT_AVG,
130}; 79};
131 80
132static int max17042_get_property(struct power_supply *psy, 81static int max17042_get_property(struct power_supply *psy,
@@ -137,6 +86,30 @@ static int max17042_get_property(struct power_supply *psy,
137 struct max17042_chip, battery); 86 struct max17042_chip, battery);
138 87
139 switch (psp) { 88 switch (psp) {
89 case POWER_SUPPLY_PROP_PRESENT:
90 val->intval = max17042_read_reg(chip->client,
91 MAX17042_STATUS);
92 if (val->intval & MAX17042_STATUS_BattAbsent)
93 val->intval = 0;
94 else
95 val->intval = 1;
96 break;
97 case POWER_SUPPLY_PROP_CYCLE_COUNT:
98 val->intval = max17042_read_reg(chip->client,
99 MAX17042_Cycles);
100 break;
101 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
102 val->intval = max17042_read_reg(chip->client,
103 MAX17042_MinMaxVolt);
104 val->intval >>= 8;
105 val->intval *= 20000; /* Units of LSB = 20mV */
106 break;
107 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
108 val->intval = max17042_read_reg(chip->client,
109 MAX17042_V_empty);
110 val->intval >>= 7;
111 val->intval *= 10000; /* Units of LSB = 10mV */
112 break;
140 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 113 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
141 val->intval = max17042_read_reg(chip->client, 114 val->intval = max17042_read_reg(chip->client,
142 MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */ 115 MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */
@@ -149,6 +122,57 @@ static int max17042_get_property(struct power_supply *psy,
149 val->intval = max17042_read_reg(chip->client, 122 val->intval = max17042_read_reg(chip->client,
150 MAX17042_SOC) / 256; 123 MAX17042_SOC) / 256;
151 break; 124 break;
125 case POWER_SUPPLY_PROP_CHARGE_FULL:
126 val->intval = max17042_read_reg(chip->client,
127 MAX17042_RepSOC);
128 if ((val->intval / 256) >= MAX17042_BATTERY_FULL)
129 val->intval = 1;
130 else if (val->intval >= 0)
131 val->intval = 0;
132 break;
133 case POWER_SUPPLY_PROP_TEMP:
134 val->intval = max17042_read_reg(chip->client,
135 MAX17042_TEMP);
136 /* The value is signed. */
137 if (val->intval & 0x8000) {
138 val->intval = (0x7fff & ~val->intval) + 1;
139 val->intval *= -1;
140 }
141 /* The value is converted into deci-centigrade scale */
142 /* Units of LSB = 1 / 256 degree Celsius */
143 val->intval = val->intval * 10 / 256;
144 break;
145 case POWER_SUPPLY_PROP_CURRENT_NOW:
146 if (chip->pdata->enable_current_sense) {
147 val->intval = max17042_read_reg(chip->client,
148 MAX17042_Current);
149 if (val->intval & 0x8000) {
150 /* Negative */
151 val->intval = ~val->intval & 0x7fff;
152 val->intval++;
153 val->intval *= -1;
154 }
155 val->intval >>= 4;
156 val->intval *= 1000000 * 25 / chip->pdata->r_sns;
157 } else {
158 return -EINVAL;
159 }
160 break;
161 case POWER_SUPPLY_PROP_CURRENT_AVG:
162 if (chip->pdata->enable_current_sense) {
163 val->intval = max17042_read_reg(chip->client,
164 MAX17042_AvgCurrent);
165 if (val->intval & 0x8000) {
166 /* Negative */
167 val->intval = ~val->intval & 0x7fff;
168 val->intval++;
169 val->intval *= -1;
170 }
171 val->intval *= 1562500 / chip->pdata->r_sns;
172 } else {
173 return -EINVAL;
174 }
175 break;
152 default: 176 default:
153 return -EINVAL; 177 return -EINVAL;
154 } 178 }
@@ -180,18 +204,30 @@ static int __devinit max17042_probe(struct i2c_client *client,
180 chip->battery.properties = max17042_battery_props; 204 chip->battery.properties = max17042_battery_props;
181 chip->battery.num_properties = ARRAY_SIZE(max17042_battery_props); 205 chip->battery.num_properties = ARRAY_SIZE(max17042_battery_props);
182 206
207 /* When current is not measured,
208 * CURRENT_NOW and CURRENT_AVG properties should be invisible. */
209 if (!chip->pdata->enable_current_sense)
210 chip->battery.num_properties -= 2;
211
183 ret = power_supply_register(&client->dev, &chip->battery); 212 ret = power_supply_register(&client->dev, &chip->battery);
184 if (ret) { 213 if (ret) {
185 dev_err(&client->dev, "failed: power supply register\n"); 214 dev_err(&client->dev, "failed: power supply register\n");
186 i2c_set_clientdata(client, NULL);
187 kfree(chip); 215 kfree(chip);
188 return ret; 216 return ret;
189 } 217 }
190 218
219 /* Initialize registers according to values from the platform data */
220 if (chip->pdata->init_data)
221 max17042_set_reg(client, chip->pdata->init_data,
222 chip->pdata->num_init_data);
223
191 if (!chip->pdata->enable_current_sense) { 224 if (!chip->pdata->enable_current_sense) {
192 max17042_write_reg(client, MAX17042_CGAIN, 0x0000); 225 max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
193 max17042_write_reg(client, MAX17042_MiscCFG, 0x0003); 226 max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
194 max17042_write_reg(client, MAX17042_LearnCFG, 0x0007); 227 max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
228 } else {
229 if (chip->pdata->r_sns == 0)
230 chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
195 } 231 }
196 232
197 return 0; 233 return 0;
@@ -202,7 +238,6 @@ static int __devexit max17042_remove(struct i2c_client *client)
202 struct max17042_chip *chip = i2c_get_clientdata(client); 238 struct max17042_chip *chip = i2c_get_clientdata(client);
203 239
204 power_supply_unregister(&chip->battery); 240 power_supply_unregister(&chip->battery);
205 i2c_set_clientdata(client, NULL);
206 kfree(chip); 241 kfree(chip);
207 return 0; 242 return 0;
208} 243}
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
index 33ff0e37809e..a9b0209a2f55 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/max8903_charger.c
@@ -28,7 +28,7 @@
28#include <linux/power/max8903_charger.h> 28#include <linux/power/max8903_charger.h>
29 29
30struct max8903_data { 30struct max8903_data {
31 struct max8903_pdata *pdata; 31 struct max8903_pdata pdata;
32 struct device *dev; 32 struct device *dev;
33 struct power_supply psy; 33 struct power_supply psy;
34 bool fault; 34 bool fault;
@@ -52,8 +52,8 @@ static int max8903_get_property(struct power_supply *psy,
52 switch (psp) { 52 switch (psp) {
53 case POWER_SUPPLY_PROP_STATUS: 53 case POWER_SUPPLY_PROP_STATUS:
54 val->intval = POWER_SUPPLY_STATUS_UNKNOWN; 54 val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
55 if (data->pdata->chg) { 55 if (data->pdata.chg) {
56 if (gpio_get_value(data->pdata->chg) == 0) 56 if (gpio_get_value(data->pdata.chg) == 0)
57 val->intval = POWER_SUPPLY_STATUS_CHARGING; 57 val->intval = POWER_SUPPLY_STATUS_CHARGING;
58 else if (data->usb_in || data->ta_in) 58 else if (data->usb_in || data->ta_in)
59 val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; 59 val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -80,7 +80,7 @@ static int max8903_get_property(struct power_supply *psy,
80static irqreturn_t max8903_dcin(int irq, void *_data) 80static irqreturn_t max8903_dcin(int irq, void *_data)
81{ 81{
82 struct max8903_data *data = _data; 82 struct max8903_data *data = _data;
83 struct max8903_pdata *pdata = data->pdata; 83 struct max8903_pdata *pdata = &data->pdata;
84 bool ta_in; 84 bool ta_in;
85 enum power_supply_type old_type; 85 enum power_supply_type old_type;
86 86
@@ -121,7 +121,7 @@ static irqreturn_t max8903_dcin(int irq, void *_data)
121static irqreturn_t max8903_usbin(int irq, void *_data) 121static irqreturn_t max8903_usbin(int irq, void *_data)
122{ 122{
123 struct max8903_data *data = _data; 123 struct max8903_data *data = _data;
124 struct max8903_pdata *pdata = data->pdata; 124 struct max8903_pdata *pdata = &data->pdata;
125 bool usb_in; 125 bool usb_in;
126 enum power_supply_type old_type; 126 enum power_supply_type old_type;
127 127
@@ -160,7 +160,7 @@ static irqreturn_t max8903_usbin(int irq, void *_data)
160static irqreturn_t max8903_fault(int irq, void *_data) 160static irqreturn_t max8903_fault(int irq, void *_data)
161{ 161{
162 struct max8903_data *data = _data; 162 struct max8903_data *data = _data;
163 struct max8903_pdata *pdata = data->pdata; 163 struct max8903_pdata *pdata = &data->pdata;
164 bool fault; 164 bool fault;
165 165
166 fault = gpio_get_value(pdata->flt) ? false : true; 166 fault = gpio_get_value(pdata->flt) ? false : true;
@@ -193,7 +193,7 @@ static __devinit int max8903_probe(struct platform_device *pdev)
193 dev_err(dev, "Cannot allocate memory.\n"); 193 dev_err(dev, "Cannot allocate memory.\n");
194 return -ENOMEM; 194 return -ENOMEM;
195 } 195 }
196 data->pdata = pdata; 196 memcpy(&data->pdata, pdata, sizeof(struct max8903_pdata));
197 data->dev = dev; 197 data->dev = dev;
198 platform_set_drvdata(pdev, data); 198 platform_set_drvdata(pdev, data);
199 199
@@ -349,7 +349,7 @@ static __devexit int max8903_remove(struct platform_device *pdev)
349 struct max8903_data *data = platform_get_drvdata(pdev); 349 struct max8903_data *data = platform_get_drvdata(pdev);
350 350
351 if (data) { 351 if (data) {
352 struct max8903_pdata *pdata = data->pdata; 352 struct max8903_pdata *pdata = &data->pdata;
353 353
354 if (pdata->flt) 354 if (pdata->flt)
355 free_irq(gpio_to_irq(pdata->flt), data); 355 free_irq(gpio_to_irq(pdata->flt), data);
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c
new file mode 100644
index 000000000000..ffc5033ea9c9
--- /dev/null
+++ b/drivers/power/max8997_charger.c
@@ -0,0 +1,207 @@
1/*
2 * max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/err.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/platform_device.h>
26#include <linux/power_supply.h>
27#include <linux/mfd/max8997.h>
28#include <linux/mfd/max8997-private.h>
29
30struct charger_data {
31 struct device *dev;
32 struct max8997_dev *iodev;
33 struct power_supply battery;
34};
35
36static enum power_supply_property max8997_battery_props[] = {
37 POWER_SUPPLY_PROP_STATUS, /* "FULL" or "NOT FULL" only. */
38 POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
39 POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
40};
41
42/* Note that the charger control is done by a current regulator "CHARGER" */
43static int max8997_battery_get_property(struct power_supply *psy,
44 enum power_supply_property psp,
45 union power_supply_propval *val)
46{
47 struct charger_data *charger = container_of(psy,
48 struct charger_data, battery);
49 struct i2c_client *i2c = charger->iodev->i2c;
50 int ret;
51 u8 reg;
52
53 switch (psp) {
54 case POWER_SUPPLY_PROP_STATUS:
55 val->intval = 0;
56 ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
57 if (ret)
58 return ret;
59 if ((reg & (1 << 0)) == 0x1)
60 val->intval = POWER_SUPPLY_STATUS_FULL;
61
62 break;
63 case POWER_SUPPLY_PROP_PRESENT:
64 val->intval = 0;
65 ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
66 if (ret)
67 return ret;
68 if ((reg & (1 << 2)) == 0x0)
69 val->intval = 1;
70
71 break;
72 case POWER_SUPPLY_PROP_ONLINE:
73 val->intval = 0;
74 ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
75 if (ret)
76 return ret;
77 /* DCINOK */
78 if (reg & (1 << 1))
79 val->intval = 1;
80
81 break;
82 default:
83 return -EINVAL;
84 }
85
86 return 0;
87}
88
89static __devinit int max8997_battery_probe(struct platform_device *pdev)
90{
91 int ret = 0;
92 struct charger_data *charger;
93 struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
94 struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
95
96 if (!pdata)
97 return -EINVAL;
98
99 if (pdata->eoc_mA) {
100 u8 val = (pdata->eoc_mA - 50) / 10;
101 if (val < 0)
102 val = 0;
103 if (val > 0xf)
104 val = 0xf;
105
106 ret = max8997_update_reg(iodev->i2c,
107 MAX8997_REG_MBCCTRL5, val, 0xf);
108 if (ret < 0) {
109 dev_err(&pdev->dev, "Cannot use i2c bus.\n");
110 return ret;
111 }
112 }
113
114 switch (pdata->timeout) {
115 case 5:
116 ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
117 0x2 << 4, 0x7 << 4);
118 break;
119 case 6:
120 ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
121 0x3 << 4, 0x7 << 4);
122 break;
123 case 7:
124 ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
125 0x4 << 4, 0x7 << 4);
126 break;
127 case 0:
128 ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
129 0x7 << 4, 0x7 << 4);
130 break;
131 default:
132 dev_err(&pdev->dev, "incorrect timeout value (%d)\n",
133 pdata->timeout);
134 return -EINVAL;
135 }
136 if (ret < 0) {
137 dev_err(&pdev->dev, "Cannot use i2c bus.\n");
138 return ret;
139 }
140
141 charger = kzalloc(sizeof(struct charger_data), GFP_KERNEL);
142 if (charger == NULL) {
143 dev_err(&pdev->dev, "Cannot allocate memory.\n");
144 return -ENOMEM;
145 }
146
147 platform_set_drvdata(pdev, charger);
148
149 charger->battery.name = "max8997_pmic";
150 charger->battery.type = POWER_SUPPLY_TYPE_BATTERY;
151 charger->battery.get_property = max8997_battery_get_property;
152 charger->battery.properties = max8997_battery_props;
153 charger->battery.num_properties = ARRAY_SIZE(max8997_battery_props);
154
155 charger->dev = &pdev->dev;
156 charger->iodev = iodev;
157
158 ret = power_supply_register(&pdev->dev, &charger->battery);
159 if (ret) {
160 dev_err(&pdev->dev, "failed: power supply register\n");
161 goto err;
162 }
163
164 return 0;
165err:
166 kfree(charger);
167 return ret;
168}
169
170static int __devexit max8997_battery_remove(struct platform_device *pdev)
171{
172 struct charger_data *charger = platform_get_drvdata(pdev);
173
174 power_supply_unregister(&charger->battery);
175 kfree(charger);
176 return 0;
177}
178
179static const struct platform_device_id max8997_battery_id[] = {
180 { "max8997-battery", 0 },
181};
182
183static struct platform_driver max8997_battery_driver = {
184 .driver = {
185 .name = "max8997-battery",
186 .owner = THIS_MODULE,
187 },
188 .probe = max8997_battery_probe,
189 .remove = __devexit_p(max8997_battery_remove),
190 .id_table = max8997_battery_id,
191};
192
193static int __init max8997_battery_init(void)
194{
195 return platform_driver_register(&max8997_battery_driver);
196}
197subsys_initcall(max8997_battery_init);
198
199static void __exit max8997_battery_cleanup(void)
200{
201 platform_driver_unregister(&max8997_battery_driver);
202}
203module_exit(max8997_battery_cleanup);
204
205MODULE_DESCRIPTION("MAXIM 8997/8966 battery control driver");
206MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
207MODULE_LICENSE("GPL");
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
new file mode 100644
index 000000000000..ef8efadb58cb
--- /dev/null
+++ b/drivers/power/max8998_charger.c
@@ -0,0 +1,219 @@
1/*
2 * max8998_charger.c - Power supply consumer driver for the Maxim 8998/LP3974
3 *
4 * Copyright (C) 2009-2010 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/err.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/platform_device.h>
26#include <linux/power_supply.h>
27#include <linux/mfd/max8998.h>
28#include <linux/mfd/max8998-private.h>
29
30struct max8998_battery_data {
31 struct device *dev;
32 struct max8998_dev *iodev;
33 struct power_supply battery;
34};
35
36static enum power_supply_property max8998_battery_props[] = {
37 POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
38 POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
39};
40
41/* Note that the charger control is done by a current regulator "CHARGER" */
42static int max8998_battery_get_property(struct power_supply *psy,
43 enum power_supply_property psp,
44 union power_supply_propval *val)
45{
46 struct max8998_battery_data *max8998 = container_of(psy,
47 struct max8998_battery_data, battery);
48 struct i2c_client *i2c = max8998->iodev->i2c;
49 int ret;
50 u8 reg;
51
52 switch (psp) {
53 case POWER_SUPPLY_PROP_PRESENT:
54 ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, &reg);
55 if (ret)
56 return ret;
57 if (reg & (1 << 4))
58 val->intval = 0;
59 else
60 val->intval = 1;
61 break;
62 case POWER_SUPPLY_PROP_ONLINE:
63 ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, &reg);
64 if (ret)
65 return ret;
66 if (reg & (1 << 3))
67 val->intval = 0;
68 else
69 val->intval = 1;
70 break;
71 default:
72 return -EINVAL;
73 }
74
75 return 0;
76}
77
78static __devinit int max8998_battery_probe(struct platform_device *pdev)
79{
80 struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
81 struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
82 struct max8998_battery_data *max8998;
83 struct i2c_client *i2c;
84 int ret = 0;
85
86 if (!pdata) {
87 dev_err(pdev->dev.parent, "No platform init data supplied\n");
88 return -ENODEV;
89 }
90
91 max8998 = kzalloc(sizeof(struct max8998_battery_data), GFP_KERNEL);
92 if (!max8998)
93 return -ENOMEM;
94
95 max8998->dev = &pdev->dev;
96 max8998->iodev = iodev;
97 platform_set_drvdata(pdev, max8998);
98 i2c = max8998->iodev->i2c;
99
100 /* Setup "End of Charge" */
101 /* If EOC value equals 0,
102 * remain value set from bootloader or default value */
103 if (pdata->eoc >= 10 && pdata->eoc <= 45) {
104 max8998_update_reg(i2c, MAX8998_REG_CHGR1,
105 (pdata->eoc / 5 - 2) << 5, 0x7 << 5);
106 } else if (pdata->eoc == 0) {
107 dev_dbg(max8998->dev,
108 "EOC value not set: leave it unchanged.\n");
109 } else {
110 dev_err(max8998->dev, "Invalid EOC value\n");
111 ret = -EINVAL;
112 goto err;
113 }
114
115 /* Setup Charge Restart Level */
116 switch (pdata->restart) {
117 case 100:
118 max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x1 << 3, 0x3 << 3);
119 break;
120 case 150:
121 max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x0 << 3, 0x3 << 3);
122 break;
123 case 200:
124 max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x2 << 3, 0x3 << 3);
125 break;
126 case -1:
127 max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x3 << 3, 0x3 << 3);
128 break;
129 case 0:
130 dev_dbg(max8998->dev,
131 "Restart Level not set: leave it unchanged.\n");
132 break;
133 default:
134 dev_err(max8998->dev, "Invalid Restart Level\n");
135 ret = -EINVAL;
136 goto err;
137 }
138
139 /* Setup Charge Full Timeout */
140 switch (pdata->timeout) {
141 case 5:
142 max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x0 << 4, 0x3 << 4);
143 break;
144 case 6:
145 max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x1 << 4, 0x3 << 4);
146 break;
147 case 7:
148 max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x2 << 4, 0x3 << 4);
149 break;
150 case -1:
151 max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x3 << 4, 0x3 << 4);
152 break;
153 case 0:
154 dev_dbg(max8998->dev,
155 "Full Timeout not set: leave it unchanged.\n");
156 default:
157 dev_err(max8998->dev, "Invalid Full Timeout value\n");
158 ret = -EINVAL;
159 goto err;
160 }
161
162 max8998->battery.name = "max8998_pmic";
163 max8998->battery.type = POWER_SUPPLY_TYPE_BATTERY;
164 max8998->battery.get_property = max8998_battery_get_property;
165 max8998->battery.properties = max8998_battery_props;
166 max8998->battery.num_properties = ARRAY_SIZE(max8998_battery_props);
167
168 ret = power_supply_register(max8998->dev, &max8998->battery);
169 if (ret) {
170 dev_err(max8998->dev, "failed: power supply register\n");
171 goto err;
172 }
173
174 return 0;
175err:
176 kfree(max8998);
177 return ret;
178}
179
180static int __devexit max8998_battery_remove(struct platform_device *pdev)
181{
182 struct max8998_battery_data *max8998 = platform_get_drvdata(pdev);
183
184 power_supply_unregister(&max8998->battery);
185 kfree(max8998);
186
187 return 0;
188}
189
190static const struct platform_device_id max8998_battery_id[] = {
191 { "max8998-battery", TYPE_MAX8998 },
192};
193
194static struct platform_driver max8998_battery_driver = {
195 .driver = {
196 .name = "max8998-battery",
197 .owner = THIS_MODULE,
198 },
199 .probe = max8998_battery_probe,
200 .remove = __devexit_p(max8998_battery_remove),
201 .id_table = max8998_battery_id,
202};
203
204static int __init max8998_battery_init(void)
205{
206 return platform_driver_register(&max8998_battery_driver);
207}
208module_init(max8998_battery_init);
209
210static void __exit max8998_battery_cleanup(void)
211{
212 platform_driver_unregister(&max8998_battery_driver);
213}
214module_exit(max8998_battery_cleanup);
215
216MODULE_DESCRIPTION("MAXIM 8998 battery control driver");
217MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
218MODULE_LICENSE("GPL");
219MODULE_ALIAS("platform:max8998-battery");
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index d36c289aaef5..d32d0d70f9ba 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -20,6 +20,7 @@
20#include <linux/s3c_adc_battery.h> 20#include <linux/s3c_adc_battery.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/module.h>
23 24
24#include <plat/adc.h> 25#include <plat/adc.h>
25 26
@@ -266,7 +267,7 @@ static irqreturn_t s3c_adc_bat_charged(int irq, void *dev_id)
266 return IRQ_HANDLED; 267 return IRQ_HANDLED;
267} 268}
268 269
269static int __init s3c_adc_bat_probe(struct platform_device *pdev) 270static int __devinit s3c_adc_bat_probe(struct platform_device *pdev)
270{ 271{
271 struct s3c_adc_client *client; 272 struct s3c_adc_client *client;
272 struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data; 273 struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index 92c16e1677bd..54b9198fa576 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -62,7 +62,7 @@
62#define TWL4030_MSTATEC_COMPLETE4 0x0e 62#define TWL4030_MSTATEC_COMPLETE4 0x0e
63 63
64static bool allow_usb; 64static bool allow_usb;
65module_param(allow_usb, bool, 1); 65module_param(allow_usb, bool, 0644);
66MODULE_PARM_DESC(allow_usb, "Allow USB charge drawing default current"); 66MODULE_PARM_DESC(allow_usb, "Allow USB charge drawing default current");
67 67
68struct twl4030_bci { 68struct twl4030_bci {
@@ -425,7 +425,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
425{ 425{
426 struct twl4030_bci *bci; 426 struct twl4030_bci *bci;
427 int ret; 427 int ret;
428 int reg; 428 u32 reg;
429 429
430 bci = kzalloc(sizeof(*bci), GFP_KERNEL); 430 bci = kzalloc(sizeof(*bci), GFP_KERNEL);
431 if (bci == NULL) 431 if (bci == NULL)
@@ -486,7 +486,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
486 } 486 }
487 487
488 /* Enable interrupts now. */ 488 /* Enable interrupts now. */
489 reg = ~(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 | 489 reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 |
490 TWL4030_TBATOR1 | TWL4030_BATSTS); 490 TWL4030_TBATOR1 | TWL4030_BATSTS);
491 ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg, 491 ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
492 TWL4030_INTERRUPTS_BCIIMR1A); 492 TWL4030_INTERRUPTS_BCIIMR1A);
@@ -495,7 +495,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
495 goto fail_unmask_interrupts; 495 goto fail_unmask_interrupts;
496 } 496 }
497 497
498 reg = ~(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV); 498 reg = ~(u32)(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
499 ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg, 499 ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
500 TWL4030_INTERRUPTS_BCIIMR2A); 500 TWL4030_INTERRUPTS_BCIIMR2A);
501 if (ret < 0) 501 if (ret < 0)
@@ -572,7 +572,7 @@ static void __exit twl4030_bci_exit(void)
572} 572}
573module_exit(twl4030_bci_exit); 573module_exit(twl4030_bci_exit);
574 574
575MODULE_AUTHOR("Gražydas Ignotas"); 575MODULE_AUTHOR("Gražvydas Ignotas");
576MODULE_DESCRIPTION("TWL4030 Battery Charger Interface driver"); 576MODULE_DESCRIPTION("TWL4030 Battery Charger Interface driver");
577MODULE_LICENSE("GPL"); 577MODULE_LICENSE("GPL");
578MODULE_ALIAS("platform:twl4030_bci"); 578MODULE_ALIAS("platform:twl4030_bci");
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c
index 0fd130d80f5d..e648cbea1e6a 100644
--- a/drivers/power/wm831x_backup.c
+++ b/drivers/power/wm831x_backup.c
@@ -22,6 +22,7 @@
22struct wm831x_backup { 22struct wm831x_backup {
23 struct wm831x *wm831x; 23 struct wm831x *wm831x;
24 struct power_supply backup; 24 struct power_supply backup;
25 char name[20];
25}; 26};
26 27
27static int wm831x_backup_read_voltage(struct wm831x *wm831x, 28static int wm831x_backup_read_voltage(struct wm831x *wm831x,
@@ -163,6 +164,7 @@ static enum power_supply_property wm831x_backup_props[] = {
163static __devinit int wm831x_backup_probe(struct platform_device *pdev) 164static __devinit int wm831x_backup_probe(struct platform_device *pdev)
164{ 165{
165 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); 166 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
167 struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
166 struct wm831x_backup *devdata; 168 struct wm831x_backup *devdata;
167 struct power_supply *backup; 169 struct power_supply *backup;
168 int ret; 170 int ret;
@@ -182,7 +184,14 @@ static __devinit int wm831x_backup_probe(struct platform_device *pdev)
182 */ 184 */
183 wm831x_config_backup(wm831x); 185 wm831x_config_backup(wm831x);
184 186
185 backup->name = "wm831x-backup"; 187 if (wm831x_pdata && wm831x_pdata->wm831x_num)
188 snprintf(devdata->name, sizeof(devdata->name),
189 "wm831x-backup.%d", wm831x_pdata->wm831x_num);
190 else
191 snprintf(devdata->name, sizeof(devdata->name),
192 "wm831x-backup");
193
194 backup->name = devdata->name;
186 backup->type = POWER_SUPPLY_TYPE_BATTERY; 195 backup->type = POWER_SUPPLY_TYPE_BATTERY;
187 backup->properties = wm831x_backup_props; 196 backup->properties = wm831x_backup_props;
188 backup->num_properties = ARRAY_SIZE(wm831x_backup_props); 197 backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
@@ -203,6 +212,7 @@ static __devexit int wm831x_backup_remove(struct platform_device *pdev)
203 struct wm831x_backup *devdata = platform_get_drvdata(pdev); 212 struct wm831x_backup *devdata = platform_get_drvdata(pdev);
204 213
205 power_supply_unregister(&devdata->backup); 214 power_supply_unregister(&devdata->backup);
215 kfree(devdata->backup.name);
206 kfree(devdata); 216 kfree(devdata);
207 217
208 return 0; 218 return 0;
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index ddf8cf5f3204..6cc2ca6427f3 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -24,6 +24,9 @@ struct wm831x_power {
24 struct power_supply wall; 24 struct power_supply wall;
25 struct power_supply usb; 25 struct power_supply usb;
26 struct power_supply battery; 26 struct power_supply battery;
27 char wall_name[20];
28 char usb_name[20];
29 char battery_name[20];
27}; 30};
28 31
29static int wm831x_power_check_online(struct wm831x *wm831x, int supply, 32static int wm831x_power_check_online(struct wm831x *wm831x, int supply,
@@ -486,6 +489,7 @@ static irqreturn_t wm831x_pwr_src_irq(int irq, void *data)
486static __devinit int wm831x_power_probe(struct platform_device *pdev) 489static __devinit int wm831x_power_probe(struct platform_device *pdev)
487{ 490{
488 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); 491 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
492 struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
489 struct wm831x_power *power; 493 struct wm831x_power *power;
490 struct power_supply *usb; 494 struct power_supply *usb;
491 struct power_supply *battery; 495 struct power_supply *battery;
@@ -503,12 +507,28 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
503 battery = &power->battery; 507 battery = &power->battery;
504 wall = &power->wall; 508 wall = &power->wall;
505 509
510 if (wm831x_pdata && wm831x_pdata->wm831x_num) {
511 snprintf(power->wall_name, sizeof(power->wall_name),
512 "wm831x-wall.%d", wm831x_pdata->wm831x_num);
513 snprintf(power->battery_name, sizeof(power->wall_name),
514 "wm831x-battery.%d", wm831x_pdata->wm831x_num);
515 snprintf(power->usb_name, sizeof(power->wall_name),
516 "wm831x-usb.%d", wm831x_pdata->wm831x_num);
517 } else {
518 snprintf(power->wall_name, sizeof(power->wall_name),
519 "wm831x-wall");
520 snprintf(power->battery_name, sizeof(power->wall_name),
521 "wm831x-battery");
522 snprintf(power->usb_name, sizeof(power->wall_name),
523 "wm831x-usb");
524 }
525
506 /* We ignore configuration failures since we can still read back 526 /* We ignore configuration failures since we can still read back
507 * the status without enabling the charger. 527 * the status without enabling the charger.
508 */ 528 */
509 wm831x_config_battery(wm831x); 529 wm831x_config_battery(wm831x);
510 530
511 wall->name = "wm831x-wall"; 531 wall->name = power->wall_name;
512 wall->type = POWER_SUPPLY_TYPE_MAINS; 532 wall->type = POWER_SUPPLY_TYPE_MAINS;
513 wall->properties = wm831x_wall_props; 533 wall->properties = wm831x_wall_props;
514 wall->num_properties = ARRAY_SIZE(wm831x_wall_props); 534 wall->num_properties = ARRAY_SIZE(wm831x_wall_props);
@@ -517,7 +537,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
517 if (ret) 537 if (ret)
518 goto err_kmalloc; 538 goto err_kmalloc;
519 539
520 battery->name = "wm831x-battery"; 540 battery->name = power->battery_name;
521 battery->properties = wm831x_bat_props; 541 battery->properties = wm831x_bat_props;
522 battery->num_properties = ARRAY_SIZE(wm831x_bat_props); 542 battery->num_properties = ARRAY_SIZE(wm831x_bat_props);
523 battery->get_property = wm831x_bat_get_prop; 543 battery->get_property = wm831x_bat_get_prop;
@@ -526,7 +546,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
526 if (ret) 546 if (ret)
527 goto err_wall; 547 goto err_wall;
528 548
529 usb->name = "wm831x-usb", 549 usb->name = power->usb_name,
530 usb->type = POWER_SUPPLY_TYPE_USB; 550 usb->type = POWER_SUPPLY_TYPE_USB;
531 usb->properties = wm831x_usb_props; 551 usb->properties = wm831x_usb_props;
532 usb->num_properties = ARRAY_SIZE(wm831x_usb_props); 552 usb->num_properties = ARRAY_SIZE(wm831x_usb_props);
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index ee893581d4b7..ebe77dd87daf 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -505,8 +505,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
505 rdev->dev.dma_mask = &rdev->dma_mask; 505 rdev->dev.dma_mask = &rdev->dma_mask;
506 rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 506 rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
507 507
508 if ((rdev->pef & RIO_PEF_INB_DOORBELL) && 508 if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
509 (rdev->dst_ops & RIO_DST_OPS_DOORBELL))
510 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], 509 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
511 0, 0xffff); 510 0, 0xffff);
512 511
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 118eb213eb3a..c7fd2c0e3f2b 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -249,6 +249,12 @@ config REGULATOR_TPS6507X
249 three step-down converters and two general-purpose LDO voltage regulators. 249 three step-down converters and two general-purpose LDO voltage regulators.
250 It supports TI's software based Class-2 SmartReflex implementation. 250 It supports TI's software based Class-2 SmartReflex implementation.
251 251
252config REGULATOR_TPS65912
253 tristate "TI TPS65912 Power regulator"
254 depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
255 help
256 This driver supports TPS65912 voltage regulator chip.
257
252config REGULATOR_88PM8607 258config REGULATOR_88PM8607
253 bool "Marvell 88PM8607 Power regulators" 259 bool "Marvell 88PM8607 Power regulators"
254 depends on MFD_88PM860X=y 260 depends on MFD_88PM860X=y
@@ -304,5 +310,12 @@ config REGULATOR_TPS65910
304 help 310 help
305 This driver supports TPS65910 voltage regulator chips. 311 This driver supports TPS65910 voltage regulator chips.
306 312
313config REGULATOR_AAT2870
314 tristate "AnalogicTech AAT2870 Regulators"
315 depends on MFD_AAT2870_CORE
316 help
317 If you have a AnalogicTech AAT2870 say Y to enable the
318 regulator driver.
319
307endif 320endif
308 321
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 3932d2ec38f3..040d5aa63535 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -38,10 +38,12 @@ obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
38obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o 38obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
39obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o 39obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
40obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o 40obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
41obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
41obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o 42obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
42obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o 43obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
43obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o 44obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
44obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o 45obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
45obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o 46obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
47obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
46 48
47ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG 49ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
new file mode 100644
index 000000000000..cd4104542f0d
--- /dev/null
+++ b/drivers/regulator/aat2870-regulator.c
@@ -0,0 +1,232 @@
1/*
2 * linux/drivers/regulator/aat2870-regulator.c
3 *
4 * Copyright (c) 2011, NVIDIA Corporation.
5 * Author: Jin Park <jinyoungp@nvidia.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 */
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/err.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/platform_device.h>
28#include <linux/regulator/driver.h>
29#include <linux/regulator/machine.h>
30#include <linux/mfd/aat2870.h>
31
32struct aat2870_regulator {
33 struct platform_device *pdev;
34 struct regulator_desc desc;
35
36 const int *voltages; /* uV */
37
38 int min_uV;
39 int max_uV;
40
41 u8 enable_addr;
42 u8 enable_shift;
43 u8 enable_mask;
44
45 u8 voltage_addr;
46 u8 voltage_shift;
47 u8 voltage_mask;
48};
49
50static int aat2870_ldo_list_voltage(struct regulator_dev *rdev,
51 unsigned selector)
52{
53 struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
54
55 return ri->voltages[selector];
56}
57
58static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
59 unsigned selector)
60{
61 struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
62 struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
63
64 return aat2870->update(aat2870, ri->voltage_addr, ri->voltage_mask,
65 (selector << ri->voltage_shift) & ri->voltage_mask);
66}
67
68static int aat2870_ldo_get_voltage_sel(struct regulator_dev *rdev)
69{
70 struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
71 struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
72 u8 val;
73 int ret;
74
75 ret = aat2870->read(aat2870, ri->voltage_addr, &val);
76 if (ret)
77 return ret;
78
79 return (val & ri->voltage_mask) >> ri->voltage_shift;
80}
81
82static int aat2870_ldo_enable(struct regulator_dev *rdev)
83{
84 struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
85 struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
86
87 return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask,
88 ri->enable_mask);
89}
90
91static int aat2870_ldo_disable(struct regulator_dev *rdev)
92{
93 struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
94 struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
95
96 return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask, 0);
97}
98
99static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
100{
101 struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
102 struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
103 u8 val;
104 int ret;
105
106 ret = aat2870->read(aat2870, ri->enable_addr, &val);
107 if (ret)
108 return ret;
109
110 return val & ri->enable_mask ? 1 : 0;
111}
112
113static struct regulator_ops aat2870_ldo_ops = {
114 .list_voltage = aat2870_ldo_list_voltage,
115 .set_voltage_sel = aat2870_ldo_set_voltage_sel,
116 .get_voltage_sel = aat2870_ldo_get_voltage_sel,
117 .enable = aat2870_ldo_enable,
118 .disable = aat2870_ldo_disable,
119 .is_enabled = aat2870_ldo_is_enabled,
120};
121
122static const int aat2870_ldo_voltages[] = {
123 1200000, 1300000, 1500000, 1600000,
124 1800000, 2000000, 2200000, 2500000,
125 2600000, 2700000, 2800000, 2900000,
126 3000000, 3100000, 3200000, 3300000,
127};
128
129#define AAT2870_LDO(ids) \
130 { \
131 .desc = { \
132 .name = #ids, \
133 .id = AAT2870_ID_##ids, \
134 .n_voltages = ARRAY_SIZE(aat2870_ldo_voltages), \
135 .ops = &aat2870_ldo_ops, \
136 .type = REGULATOR_VOLTAGE, \
137 .owner = THIS_MODULE, \
138 }, \
139 .voltages = aat2870_ldo_voltages, \
140 .min_uV = 1200000, \
141 .max_uV = 3300000, \
142 }
143
144static struct aat2870_regulator aat2870_regulators[] = {
145 AAT2870_LDO(LDOA),
146 AAT2870_LDO(LDOB),
147 AAT2870_LDO(LDOC),
148 AAT2870_LDO(LDOD),
149};
150
151static struct aat2870_regulator *aat2870_get_regulator(int id)
152{
153 struct aat2870_regulator *ri = NULL;
154 int i;
155
156 for (i = 0; i < ARRAY_SIZE(aat2870_regulators); i++) {
157 ri = &aat2870_regulators[i];
158 if (ri->desc.id == id)
159 break;
160 }
161
162 if (!ri)
163 return NULL;
164
165 ri->enable_addr = AAT2870_LDO_EN;
166 ri->enable_shift = id - AAT2870_ID_LDOA;
167 ri->enable_mask = 0x1 << ri->enable_shift;
168
169 ri->voltage_addr = (id - AAT2870_ID_LDOA) / 2 ?
170 AAT2870_LDO_CD : AAT2870_LDO_AB;
171 ri->voltage_shift = (id - AAT2870_ID_LDOA) % 2 ? 0 : 4;
172 ri->voltage_mask = 0xF << ri->voltage_shift;
173
174 return ri;
175}
176
177static int aat2870_regulator_probe(struct platform_device *pdev)
178{
179 struct aat2870_regulator *ri;
180 struct regulator_dev *rdev;
181
182 ri = aat2870_get_regulator(pdev->id);
183 if (!ri) {
184 dev_err(&pdev->dev, "Invalid device ID, %d\n", pdev->id);
185 return -EINVAL;
186 }
187 ri->pdev = pdev;
188
189 rdev = regulator_register(&ri->desc, &pdev->dev,
190 pdev->dev.platform_data, ri);
191 if (IS_ERR(rdev)) {
192 dev_err(&pdev->dev, "Failed to register regulator %s\n",
193 ri->desc.name);
194 return PTR_ERR(rdev);
195 }
196 platform_set_drvdata(pdev, rdev);
197
198 return 0;
199}
200
201static int __devexit aat2870_regulator_remove(struct platform_device *pdev)
202{
203 struct regulator_dev *rdev = platform_get_drvdata(pdev);
204
205 regulator_unregister(rdev);
206 return 0;
207}
208
209static struct platform_driver aat2870_regulator_driver = {
210 .driver = {
211 .name = "aat2870-regulator",
212 .owner = THIS_MODULE,
213 },
214 .probe = aat2870_regulator_probe,
215 .remove = __devexit_p(aat2870_regulator_remove),
216};
217
218static int __init aat2870_regulator_init(void)
219{
220 return platform_driver_register(&aat2870_regulator_driver);
221}
222subsys_initcall(aat2870_regulator_init);
223
224static void __exit aat2870_regulator_exit(void)
225{
226 platform_driver_unregister(&aat2870_regulator_driver);
227}
228module_exit(aat2870_regulator_exit);
229
230MODULE_DESCRIPTION("AnalogicTech AAT2870 Regulator");
231MODULE_LICENSE("GPL");
232MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d3e38790906e..d8e6a429e8ba 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -20,6 +20,7 @@
20#include <linux/debugfs.h> 20#include <linux/debugfs.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/async.h>
23#include <linux/err.h> 24#include <linux/err.h>
24#include <linux/mutex.h> 25#include <linux/mutex.h>
25#include <linux/suspend.h> 26#include <linux/suspend.h>
@@ -33,6 +34,8 @@
33 34
34#include "dummy.h" 35#include "dummy.h"
35 36
37#define rdev_crit(rdev, fmt, ...) \
38 pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
36#define rdev_err(rdev, fmt, ...) \ 39#define rdev_err(rdev, fmt, ...) \
37 pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) 40 pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
38#define rdev_warn(rdev, fmt, ...) \ 41#define rdev_warn(rdev, fmt, ...) \
@@ -78,11 +81,13 @@ struct regulator {
78 char *supply_name; 81 char *supply_name;
79 struct device_attribute dev_attr; 82 struct device_attribute dev_attr;
80 struct regulator_dev *rdev; 83 struct regulator_dev *rdev;
84#ifdef CONFIG_DEBUG_FS
85 struct dentry *debugfs;
86#endif
81}; 87};
82 88
83static int _regulator_is_enabled(struct regulator_dev *rdev); 89static int _regulator_is_enabled(struct regulator_dev *rdev);
84static int _regulator_disable(struct regulator_dev *rdev, 90static int _regulator_disable(struct regulator_dev *rdev);
85 struct regulator_dev **supply_rdev_ptr);
86static int _regulator_get_voltage(struct regulator_dev *rdev); 91static int _regulator_get_voltage(struct regulator_dev *rdev);
87static int _regulator_get_current_limit(struct regulator_dev *rdev); 92static int _regulator_get_current_limit(struct regulator_dev *rdev);
88static unsigned int _regulator_get_mode(struct regulator_dev *rdev); 93static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
@@ -90,6 +95,9 @@ static void _notifier_call_chain(struct regulator_dev *rdev,
90 unsigned long event, void *data); 95 unsigned long event, void *data);
91static int _regulator_do_set_voltage(struct regulator_dev *rdev, 96static int _regulator_do_set_voltage(struct regulator_dev *rdev,
92 int min_uV, int max_uV); 97 int min_uV, int max_uV);
98static struct regulator *create_regulator(struct regulator_dev *rdev,
99 struct device *dev,
100 const char *supply_name);
93 101
94static const char *rdev_get_name(struct regulator_dev *rdev) 102static const char *rdev_get_name(struct regulator_dev *rdev)
95{ 103{
@@ -143,8 +151,11 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
143 if (*min_uV < rdev->constraints->min_uV) 151 if (*min_uV < rdev->constraints->min_uV)
144 *min_uV = rdev->constraints->min_uV; 152 *min_uV = rdev->constraints->min_uV;
145 153
146 if (*min_uV > *max_uV) 154 if (*min_uV > *max_uV) {
155 rdev_err(rdev, "unsupportable voltage range: %d-%duV\n",
156 *min_uV, *max_uV);
147 return -EINVAL; 157 return -EINVAL;
158 }
148 159
149 return 0; 160 return 0;
150} 161}
@@ -197,8 +208,11 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
197 if (*min_uA < rdev->constraints->min_uA) 208 if (*min_uA < rdev->constraints->min_uA)
198 *min_uA = rdev->constraints->min_uA; 209 *min_uA = rdev->constraints->min_uA;
199 210
200 if (*min_uA > *max_uA) 211 if (*min_uA > *max_uA) {
212 rdev_err(rdev, "unsupportable current range: %d-%duA\n",
213 *min_uA, *max_uA);
201 return -EINVAL; 214 return -EINVAL;
215 }
202 216
203 return 0; 217 return 0;
204} 218}
@@ -213,6 +227,7 @@ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
213 case REGULATOR_MODE_STANDBY: 227 case REGULATOR_MODE_STANDBY:
214 break; 228 break;
215 default: 229 default:
230 rdev_err(rdev, "invalid mode %x specified\n", *mode);
216 return -EINVAL; 231 return -EINVAL;
217 } 232 }
218 233
@@ -779,7 +794,6 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
779 if (ret < 0) { 794 if (ret < 0) {
780 rdev_err(rdev, "failed to apply %duV constraint\n", 795 rdev_err(rdev, "failed to apply %duV constraint\n",
781 rdev->constraints->min_uV); 796 rdev->constraints->min_uV);
782 rdev->constraints = NULL;
783 return ret; 797 return ret;
784 } 798 }
785 } 799 }
@@ -882,7 +896,6 @@ static int set_machine_constraints(struct regulator_dev *rdev,
882 ret = suspend_prepare(rdev, rdev->constraints->initial_state); 896 ret = suspend_prepare(rdev, rdev->constraints->initial_state);
883 if (ret < 0) { 897 if (ret < 0) {
884 rdev_err(rdev, "failed to set suspend state\n"); 898 rdev_err(rdev, "failed to set suspend state\n");
885 rdev->constraints = NULL;
886 goto out; 899 goto out;
887 } 900 }
888 } 901 }
@@ -909,13 +922,15 @@ static int set_machine_constraints(struct regulator_dev *rdev,
909 ret = ops->enable(rdev); 922 ret = ops->enable(rdev);
910 if (ret < 0) { 923 if (ret < 0) {
911 rdev_err(rdev, "failed to enable\n"); 924 rdev_err(rdev, "failed to enable\n");
912 rdev->constraints = NULL;
913 goto out; 925 goto out;
914 } 926 }
915 } 927 }
916 928
917 print_constraints(rdev); 929 print_constraints(rdev);
930 return 0;
918out: 931out:
932 kfree(rdev->constraints);
933 rdev->constraints = NULL;
919 return ret; 934 return ret;
920} 935}
921 936
@@ -929,21 +944,20 @@ out:
929 * core if it's child is enabled. 944 * core if it's child is enabled.
930 */ 945 */
931static int set_supply(struct regulator_dev *rdev, 946static int set_supply(struct regulator_dev *rdev,
932 struct regulator_dev *supply_rdev) 947 struct regulator_dev *supply_rdev)
933{ 948{
934 int err; 949 int err;
935 950
936 err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj, 951 rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
937 "supply"); 952
938 if (err) { 953 rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
939 rdev_err(rdev, "could not add device link %s err %d\n", 954 if (IS_ERR(rdev->supply)) {
940 supply_rdev->dev.kobj.name, err); 955 err = PTR_ERR(rdev->supply);
941 goto out; 956 rdev->supply = NULL;
957 return err;
942 } 958 }
943 rdev->supply = supply_rdev; 959
944 list_add(&rdev->slist, &supply_rdev->supply_list); 960 return 0;
945out:
946 return err;
947} 961}
948 962
949/** 963/**
@@ -1032,7 +1046,7 @@ static void unset_regulator_supplies(struct regulator_dev *rdev)
1032 } 1046 }
1033} 1047}
1034 1048
1035#define REG_STR_SIZE 32 1049#define REG_STR_SIZE 64
1036 1050
1037static struct regulator *create_regulator(struct regulator_dev *rdev, 1051static struct regulator *create_regulator(struct regulator_dev *rdev,
1038 struct device *dev, 1052 struct device *dev,
@@ -1052,8 +1066,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
1052 1066
1053 if (dev) { 1067 if (dev) {
1054 /* create a 'requested_microamps_name' sysfs entry */ 1068 /* create a 'requested_microamps_name' sysfs entry */
1055 size = scnprintf(buf, REG_STR_SIZE, "microamps_requested_%s", 1069 size = scnprintf(buf, REG_STR_SIZE,
1056 supply_name); 1070 "microamps_requested_%s-%s",
1071 dev_name(dev), supply_name);
1057 if (size >= REG_STR_SIZE) 1072 if (size >= REG_STR_SIZE)
1058 goto overflow_err; 1073 goto overflow_err;
1059 1074
@@ -1088,7 +1103,28 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
1088 dev->kobj.name, err); 1103 dev->kobj.name, err);
1089 goto link_name_err; 1104 goto link_name_err;
1090 } 1105 }
1106 } else {
1107 regulator->supply_name = kstrdup(supply_name, GFP_KERNEL);
1108 if (regulator->supply_name == NULL)
1109 goto attr_err;
1110 }
1111
1112#ifdef CONFIG_DEBUG_FS
1113 regulator->debugfs = debugfs_create_dir(regulator->supply_name,
1114 rdev->debugfs);
1115 if (IS_ERR_OR_NULL(regulator->debugfs)) {
1116 rdev_warn(rdev, "Failed to create debugfs directory\n");
1117 regulator->debugfs = NULL;
1118 } else {
1119 debugfs_create_u32("uA_load", 0444, regulator->debugfs,
1120 &regulator->uA_load);
1121 debugfs_create_u32("min_uV", 0444, regulator->debugfs,
1122 &regulator->min_uV);
1123 debugfs_create_u32("max_uV", 0444, regulator->debugfs,
1124 &regulator->max_uV);
1091 } 1125 }
1126#endif
1127
1092 mutex_unlock(&rdev->mutex); 1128 mutex_unlock(&rdev->mutex);
1093 return regulator; 1129 return regulator;
1094link_name_err: 1130link_name_err:
@@ -1267,13 +1303,17 @@ void regulator_put(struct regulator *regulator)
1267 mutex_lock(&regulator_list_mutex); 1303 mutex_lock(&regulator_list_mutex);
1268 rdev = regulator->rdev; 1304 rdev = regulator->rdev;
1269 1305
1306#ifdef CONFIG_DEBUG_FS
1307 debugfs_remove_recursive(regulator->debugfs);
1308#endif
1309
1270 /* remove any sysfs entries */ 1310 /* remove any sysfs entries */
1271 if (regulator->dev) { 1311 if (regulator->dev) {
1272 sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); 1312 sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
1273 kfree(regulator->supply_name);
1274 device_remove_file(regulator->dev, &regulator->dev_attr); 1313 device_remove_file(regulator->dev, &regulator->dev_attr);
1275 kfree(regulator->dev_attr.attr.name); 1314 kfree(regulator->dev_attr.attr.name);
1276 } 1315 }
1316 kfree(regulator->supply_name);
1277 list_del(&regulator->list); 1317 list_del(&regulator->list);
1278 kfree(regulator); 1318 kfree(regulator);
1279 1319
@@ -1301,19 +1341,6 @@ static int _regulator_enable(struct regulator_dev *rdev)
1301{ 1341{
1302 int ret, delay; 1342 int ret, delay;
1303 1343
1304 if (rdev->use_count == 0) {
1305 /* do we need to enable the supply regulator first */
1306 if (rdev->supply) {
1307 mutex_lock(&rdev->supply->mutex);
1308 ret = _regulator_enable(rdev->supply);
1309 mutex_unlock(&rdev->supply->mutex);
1310 if (ret < 0) {
1311 rdev_err(rdev, "failed to enable: %d\n", ret);
1312 return ret;
1313 }
1314 }
1315 }
1316
1317 /* check voltage and requested load before enabling */ 1344 /* check voltage and requested load before enabling */
1318 if (rdev->constraints && 1345 if (rdev->constraints &&
1319 (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) 1346 (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
@@ -1388,19 +1415,27 @@ int regulator_enable(struct regulator *regulator)
1388 struct regulator_dev *rdev = regulator->rdev; 1415 struct regulator_dev *rdev = regulator->rdev;
1389 int ret = 0; 1416 int ret = 0;
1390 1417
1418 if (rdev->supply) {
1419 ret = regulator_enable(rdev->supply);
1420 if (ret != 0)
1421 return ret;
1422 }
1423
1391 mutex_lock(&rdev->mutex); 1424 mutex_lock(&rdev->mutex);
1392 ret = _regulator_enable(rdev); 1425 ret = _regulator_enable(rdev);
1393 mutex_unlock(&rdev->mutex); 1426 mutex_unlock(&rdev->mutex);
1427
1428 if (ret != 0)
1429 regulator_disable(rdev->supply);
1430
1394 return ret; 1431 return ret;
1395} 1432}
1396EXPORT_SYMBOL_GPL(regulator_enable); 1433EXPORT_SYMBOL_GPL(regulator_enable);
1397 1434
1398/* locks held by regulator_disable() */ 1435/* locks held by regulator_disable() */
1399static int _regulator_disable(struct regulator_dev *rdev, 1436static int _regulator_disable(struct regulator_dev *rdev)
1400 struct regulator_dev **supply_rdev_ptr)
1401{ 1437{
1402 int ret = 0; 1438 int ret = 0;
1403 *supply_rdev_ptr = NULL;
1404 1439
1405 if (WARN(rdev->use_count <= 0, 1440 if (WARN(rdev->use_count <= 0,
1406 "unbalanced disables for %s\n", rdev_get_name(rdev))) 1441 "unbalanced disables for %s\n", rdev_get_name(rdev)))
@@ -1427,9 +1462,6 @@ static int _regulator_disable(struct regulator_dev *rdev,
1427 NULL); 1462 NULL);
1428 } 1463 }
1429 1464
1430 /* decrease our supplies ref count and disable if required */
1431 *supply_rdev_ptr = rdev->supply;
1432
1433 rdev->use_count = 0; 1465 rdev->use_count = 0;
1434 } else if (rdev->use_count > 1) { 1466 } else if (rdev->use_count > 1) {
1435 1467
@@ -1440,6 +1472,7 @@ static int _regulator_disable(struct regulator_dev *rdev,
1440 1472
1441 rdev->use_count--; 1473 rdev->use_count--;
1442 } 1474 }
1475
1443 return ret; 1476 return ret;
1444} 1477}
1445 1478
@@ -1458,29 +1491,21 @@ static int _regulator_disable(struct regulator_dev *rdev,
1458int regulator_disable(struct regulator *regulator) 1491int regulator_disable(struct regulator *regulator)
1459{ 1492{
1460 struct regulator_dev *rdev = regulator->rdev; 1493 struct regulator_dev *rdev = regulator->rdev;
1461 struct regulator_dev *supply_rdev = NULL;
1462 int ret = 0; 1494 int ret = 0;
1463 1495
1464 mutex_lock(&rdev->mutex); 1496 mutex_lock(&rdev->mutex);
1465 ret = _regulator_disable(rdev, &supply_rdev); 1497 ret = _regulator_disable(rdev);
1466 mutex_unlock(&rdev->mutex); 1498 mutex_unlock(&rdev->mutex);
1467 1499
1468 /* decrease our supplies ref count and disable if required */ 1500 if (ret == 0 && rdev->supply)
1469 while (supply_rdev != NULL) { 1501 regulator_disable(rdev->supply);
1470 rdev = supply_rdev;
1471
1472 mutex_lock(&rdev->mutex);
1473 _regulator_disable(rdev, &supply_rdev);
1474 mutex_unlock(&rdev->mutex);
1475 }
1476 1502
1477 return ret; 1503 return ret;
1478} 1504}
1479EXPORT_SYMBOL_GPL(regulator_disable); 1505EXPORT_SYMBOL_GPL(regulator_disable);
1480 1506
1481/* locks held by regulator_force_disable() */ 1507/* locks held by regulator_force_disable() */
1482static int _regulator_force_disable(struct regulator_dev *rdev, 1508static int _regulator_force_disable(struct regulator_dev *rdev)
1483 struct regulator_dev **supply_rdev_ptr)
1484{ 1509{
1485 int ret = 0; 1510 int ret = 0;
1486 1511
@@ -1497,10 +1522,6 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
1497 REGULATOR_EVENT_DISABLE, NULL); 1522 REGULATOR_EVENT_DISABLE, NULL);
1498 } 1523 }
1499 1524
1500 /* decrease our supplies ref count and disable if required */
1501 *supply_rdev_ptr = rdev->supply;
1502
1503 rdev->use_count = 0;
1504 return ret; 1525 return ret;
1505} 1526}
1506 1527
@@ -1516,16 +1537,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
1516int regulator_force_disable(struct regulator *regulator) 1537int regulator_force_disable(struct regulator *regulator)
1517{ 1538{
1518 struct regulator_dev *rdev = regulator->rdev; 1539 struct regulator_dev *rdev = regulator->rdev;
1519 struct regulator_dev *supply_rdev = NULL;
1520 int ret; 1540 int ret;
1521 1541
1522 mutex_lock(&rdev->mutex); 1542 mutex_lock(&rdev->mutex);
1523 regulator->uA_load = 0; 1543 regulator->uA_load = 0;
1524 ret = _regulator_force_disable(rdev, &supply_rdev); 1544 ret = _regulator_force_disable(regulator->rdev);
1525 mutex_unlock(&rdev->mutex); 1545 mutex_unlock(&rdev->mutex);
1526 1546
1527 if (supply_rdev) 1547 if (rdev->supply)
1528 regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev))); 1548 while (rdev->open_count--)
1549 regulator_disable(rdev->supply);
1529 1550
1530 return ret; 1551 return ret;
1531} 1552}
@@ -2136,7 +2157,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
2136 /* get input voltage */ 2157 /* get input voltage */
2137 input_uV = 0; 2158 input_uV = 0;
2138 if (rdev->supply) 2159 if (rdev->supply)
2139 input_uV = _regulator_get_voltage(rdev->supply); 2160 input_uV = regulator_get_voltage(rdev->supply);
2140 if (input_uV <= 0) 2161 if (input_uV <= 0)
2141 input_uV = rdev->constraints->input_uV; 2162 input_uV = rdev->constraints->input_uV;
2142 if (input_uV <= 0) { 2163 if (input_uV <= 0) {
@@ -2206,17 +2227,8 @@ EXPORT_SYMBOL_GPL(regulator_unregister_notifier);
2206static void _notifier_call_chain(struct regulator_dev *rdev, 2227static void _notifier_call_chain(struct regulator_dev *rdev,
2207 unsigned long event, void *data) 2228 unsigned long event, void *data)
2208{ 2229{
2209 struct regulator_dev *_rdev;
2210
2211 /* call rdev chain first */ 2230 /* call rdev chain first */
2212 blocking_notifier_call_chain(&rdev->notifier, event, NULL); 2231 blocking_notifier_call_chain(&rdev->notifier, event, NULL);
2213
2214 /* now notify regulator we supply */
2215 list_for_each_entry(_rdev, &rdev->supply_list, slist) {
2216 mutex_lock(&_rdev->mutex);
2217 _notifier_call_chain(_rdev, event, data);
2218 mutex_unlock(&_rdev->mutex);
2219 }
2220} 2232}
2221 2233
2222/** 2234/**
@@ -2264,6 +2276,13 @@ err:
2264} 2276}
2265EXPORT_SYMBOL_GPL(regulator_bulk_get); 2277EXPORT_SYMBOL_GPL(regulator_bulk_get);
2266 2278
2279static void regulator_bulk_enable_async(void *data, async_cookie_t cookie)
2280{
2281 struct regulator_bulk_data *bulk = data;
2282
2283 bulk->ret = regulator_enable(bulk->consumer);
2284}
2285
2267/** 2286/**
2268 * regulator_bulk_enable - enable multiple regulator consumers 2287 * regulator_bulk_enable - enable multiple regulator consumers
2269 * 2288 *
@@ -2279,21 +2298,33 @@ EXPORT_SYMBOL_GPL(regulator_bulk_get);
2279int regulator_bulk_enable(int num_consumers, 2298int regulator_bulk_enable(int num_consumers,
2280 struct regulator_bulk_data *consumers) 2299 struct regulator_bulk_data *consumers)
2281{ 2300{
2301 LIST_HEAD(async_domain);
2282 int i; 2302 int i;
2283 int ret; 2303 int ret = 0;
2304
2305 for (i = 0; i < num_consumers; i++)
2306 async_schedule_domain(regulator_bulk_enable_async,
2307 &consumers[i], &async_domain);
2308
2309 async_synchronize_full_domain(&async_domain);
2284 2310
2311 /* If any consumer failed we need to unwind any that succeeded */
2285 for (i = 0; i < num_consumers; i++) { 2312 for (i = 0; i < num_consumers; i++) {
2286 ret = regulator_enable(consumers[i].consumer); 2313 if (consumers[i].ret != 0) {
2287 if (ret != 0) 2314 ret = consumers[i].ret;
2288 goto err; 2315 goto err;
2316 }
2289 } 2317 }
2290 2318
2291 return 0; 2319 return 0;
2292 2320
2293err: 2321err:
2294 pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret); 2322 for (i = 0; i < num_consumers; i++)
2295 for (--i; i >= 0; --i) 2323 if (consumers[i].ret == 0)
2296 regulator_disable(consumers[i].consumer); 2324 regulator_disable(consumers[i].consumer);
2325 else
2326 pr_err("Failed to enable %s: %d\n",
2327 consumers[i].supply, consumers[i].ret);
2297 2328
2298 return ret; 2329 return ret;
2299} 2330}
@@ -2589,9 +2620,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2589 rdev->owner = regulator_desc->owner; 2620 rdev->owner = regulator_desc->owner;
2590 rdev->desc = regulator_desc; 2621 rdev->desc = regulator_desc;
2591 INIT_LIST_HEAD(&rdev->consumer_list); 2622 INIT_LIST_HEAD(&rdev->consumer_list);
2592 INIT_LIST_HEAD(&rdev->supply_list);
2593 INIT_LIST_HEAD(&rdev->list); 2623 INIT_LIST_HEAD(&rdev->list);
2594 INIT_LIST_HEAD(&rdev->slist);
2595 BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier); 2624 BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
2596 2625
2597 /* preform any regulator specific init */ 2626 /* preform any regulator specific init */
@@ -2672,6 +2701,7 @@ unset_supplies:
2672 unset_regulator_supplies(rdev); 2701 unset_regulator_supplies(rdev);
2673 2702
2674scrub: 2703scrub:
2704 kfree(rdev->constraints);
2675 device_unregister(&rdev->dev); 2705 device_unregister(&rdev->dev);
2676 /* device core frees rdev */ 2706 /* device core frees rdev */
2677 rdev = ERR_PTR(ret); 2707 rdev = ERR_PTR(ret);
@@ -2703,7 +2733,7 @@ void regulator_unregister(struct regulator_dev *rdev)
2703 unset_regulator_supplies(rdev); 2733 unset_regulator_supplies(rdev);
2704 list_del(&rdev->list); 2734 list_del(&rdev->list);
2705 if (rdev->supply) 2735 if (rdev->supply)
2706 sysfs_remove_link(&rdev->dev.kobj, "supply"); 2736 regulator_put(rdev->supply);
2707 device_unregister(&rdev->dev); 2737 device_unregister(&rdev->dev);
2708 kfree(rdev->constraints); 2738 kfree(rdev->constraints);
2709 mutex_unlock(&regulator_list_mutex); 2739 mutex_unlock(&regulator_list_mutex);
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index c7410bde7b5d..f6ef6694ab98 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -36,6 +36,29 @@ static struct regulator_desc dummy_desc = {
36 .ops = &dummy_ops, 36 .ops = &dummy_ops,
37}; 37};
38 38
39static int __devinit dummy_regulator_probe(struct platform_device *pdev)
40{
41 int ret;
42
43 dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
44 &dummy_initdata, NULL);
45 if (IS_ERR(dummy_regulator_rdev)) {
46 ret = PTR_ERR(dummy_regulator_rdev);
47 pr_err("Failed to register regulator: %d\n", ret);
48 return ret;
49 }
50
51 return 0;
52}
53
54static struct platform_driver dummy_regulator_driver = {
55 .probe = dummy_regulator_probe,
56 .driver = {
57 .name = "reg-dummy",
58 .owner = THIS_MODULE,
59 },
60};
61
39static struct platform_device *dummy_pdev; 62static struct platform_device *dummy_pdev;
40 63
41void __init regulator_dummy_init(void) 64void __init regulator_dummy_init(void)
@@ -55,12 +78,9 @@ void __init regulator_dummy_init(void)
55 return; 78 return;
56 } 79 }
57 80
58 dummy_regulator_rdev = regulator_register(&dummy_desc, NULL, 81 ret = platform_driver_register(&dummy_regulator_driver);
59 &dummy_initdata, NULL); 82 if (ret != 0) {
60 if (IS_ERR(dummy_regulator_rdev)) { 83 pr_err("Failed to register dummy regulator driver: %d\n", ret);
61 ret = PTR_ERR(dummy_regulator_rdev);
62 pr_err("Failed to register regulator: %d\n", ret);
63 platform_device_unregister(dummy_pdev); 84 platform_device_unregister(dummy_pdev);
64 return;
65 } 85 }
66} 86}
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 55dd4e6650db..66d2d60b436a 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -49,7 +49,6 @@
49#define TPS65911_REG_LDO7 11 49#define TPS65911_REG_LDO7 11
50#define TPS65911_REG_LDO8 12 50#define TPS65911_REG_LDO8 12
51 51
52#define TPS65910_NUM_REGULATOR 13
53#define TPS65910_SUPPLY_STATE_ENABLED 0x1 52#define TPS65910_SUPPLY_STATE_ENABLED 0x1
54 53
55/* supported VIO voltages in milivolts */ 54/* supported VIO voltages in milivolts */
@@ -264,11 +263,12 @@ static struct tps_info tps65911_regs[] = {
264}; 263};
265 264
266struct tps65910_reg { 265struct tps65910_reg {
267 struct regulator_desc desc[TPS65910_NUM_REGULATOR]; 266 struct regulator_desc *desc;
268 struct tps65910 *mfd; 267 struct tps65910 *mfd;
269 struct regulator_dev *rdev[TPS65910_NUM_REGULATOR]; 268 struct regulator_dev **rdev;
270 struct tps_info *info[TPS65910_NUM_REGULATOR]; 269 struct tps_info **info;
271 struct mutex mutex; 270 struct mutex mutex;
271 int num_regulators;
272 int mode; 272 int mode;
273 int (*get_ctrl_reg)(int); 273 int (*get_ctrl_reg)(int);
274}; 274};
@@ -759,8 +759,13 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
759 mult = (selector / VDD1_2_NUM_VOLTS) + 1; 759 mult = (selector / VDD1_2_NUM_VOLTS) + 1;
760 volt = VDD1_2_MIN_VOLT + 760 volt = VDD1_2_MIN_VOLT +
761 (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET; 761 (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
762 break;
762 case TPS65911_REG_VDDCTRL: 763 case TPS65911_REG_VDDCTRL:
763 volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET); 764 volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
765 break;
766 default:
767 BUG();
768 return -EINVAL;
764 } 769 }
765 770
766 return volt * 100 * mult; 771 return volt * 100 * mult;
@@ -897,16 +902,42 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
897 switch(tps65910_chip_id(tps65910)) { 902 switch(tps65910_chip_id(tps65910)) {
898 case TPS65910: 903 case TPS65910:
899 pmic->get_ctrl_reg = &tps65910_get_ctrl_register; 904 pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
905 pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
900 info = tps65910_regs; 906 info = tps65910_regs;
907 break;
901 case TPS65911: 908 case TPS65911:
902 pmic->get_ctrl_reg = &tps65911_get_ctrl_register; 909 pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
910 pmic->num_regulators = ARRAY_SIZE(tps65911_regs);
903 info = tps65911_regs; 911 info = tps65911_regs;
912 break;
904 default: 913 default:
905 pr_err("Invalid tps chip version\n"); 914 pr_err("Invalid tps chip version\n");
915 kfree(pmic);
906 return -ENODEV; 916 return -ENODEV;
907 } 917 }
908 918
909 for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) { 919 pmic->desc = kcalloc(pmic->num_regulators,
920 sizeof(struct regulator_desc), GFP_KERNEL);
921 if (!pmic->desc) {
922 err = -ENOMEM;
923 goto err_free_pmic;
924 }
925
926 pmic->info = kcalloc(pmic->num_regulators,
927 sizeof(struct tps_info *), GFP_KERNEL);
928 if (!pmic->info) {
929 err = -ENOMEM;
930 goto err_free_desc;
931 }
932
933 pmic->rdev = kcalloc(pmic->num_regulators,
934 sizeof(struct regulator_dev *), GFP_KERNEL);
935 if (!pmic->rdev) {
936 err = -ENOMEM;
937 goto err_free_info;
938 }
939
940 for (i = 0; i < pmic->num_regulators; i++, info++, reg_data++) {
910 /* Register the regulators */ 941 /* Register the regulators */
911 pmic->info[i] = info; 942 pmic->info[i] = info;
912 943
@@ -938,7 +969,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
938 "failed to register %s regulator\n", 969 "failed to register %s regulator\n",
939 pdev->name); 970 pdev->name);
940 err = PTR_ERR(rdev); 971 err = PTR_ERR(rdev);
941 goto err; 972 goto err_unregister_regulator;
942 } 973 }
943 974
944 /* Save regulator for cleanup */ 975 /* Save regulator for cleanup */
@@ -946,23 +977,31 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
946 } 977 }
947 return 0; 978 return 0;
948 979
949err: 980err_unregister_regulator:
950 while (--i >= 0) 981 while (--i >= 0)
951 regulator_unregister(pmic->rdev[i]); 982 regulator_unregister(pmic->rdev[i]);
952 983 kfree(pmic->rdev);
984err_free_info:
985 kfree(pmic->info);
986err_free_desc:
987 kfree(pmic->desc);
988err_free_pmic:
953 kfree(pmic); 989 kfree(pmic);
954 return err; 990 return err;
955} 991}
956 992
957static int __devexit tps65910_remove(struct platform_device *pdev) 993static int __devexit tps65910_remove(struct platform_device *pdev)
958{ 994{
959 struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev); 995 struct tps65910_reg *pmic = platform_get_drvdata(pdev);
960 int i; 996 int i;
961 997
962 for (i = 0; i < TPS65910_NUM_REGULATOR; i++) 998 for (i = 0; i < pmic->num_regulators; i++)
963 regulator_unregister(tps65910_reg->rdev[i]); 999 regulator_unregister(pmic->rdev[i]);
964 1000
965 kfree(tps65910_reg); 1001 kfree(pmic->rdev);
1002 kfree(pmic->info);
1003 kfree(pmic->desc);
1004 kfree(pmic);
966 return 0; 1005 return 0;
967} 1006}
968 1007
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
new file mode 100644
index 000000000000..3a9313e00fac
--- /dev/null
+++ b/drivers/regulator/tps65912-regulator.c
@@ -0,0 +1,800 @@
1/*
2 * tps65912.c -- TI tps65912
3 *
4 * Copyright 2011 Texas Instruments Inc.
5 *
6 * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This driver is based on wm8350 implementation.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/err.h>
20#include <linux/platform_device.h>
21#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h>
23#include <linux/delay.h>
24#include <linux/slab.h>
25#include <linux/gpio.h>
26#include <linux/mfd/tps65912.h>
27
28/* DCDC's */
29#define TPS65912_REG_DCDC1 0
30#define TPS65912_REG_DCDC2 1
31#define TPS65912_REG_DCDC3 2
32#define TPS65912_REG_DCDC4 3
33
34/* LDOs */
35#define TPS65912_REG_LDO1 4
36#define TPS65912_REG_LDO2 5
37#define TPS65912_REG_LDO3 6
38#define TPS65912_REG_LDO4 7
39#define TPS65912_REG_LDO5 8
40#define TPS65912_REG_LDO6 9
41#define TPS65912_REG_LDO7 10
42#define TPS65912_REG_LDO8 11
43#define TPS65912_REG_LDO9 12
44#define TPS65912_REG_LDO10 13
45
46#define TPS65912_MAX_REG_ID TPS65912_REG_LDO_10
47
48/* Number of step-down converters available */
49#define TPS65912_NUM_DCDC 4
50
51/* Number of LDO voltage regulators available */
52#define TPS65912_NUM_LDO 10
53
54/* Number of total regulators available */
55#define TPS65912_NUM_REGULATOR (TPS65912_NUM_DCDC + TPS65912_NUM_LDO)
56
57#define TPS65912_REG_ENABLED 0x80
58#define OP_SELREG_MASK 0x40
59#define OP_SELREG_SHIFT 6
60
61struct tps_info {
62 const char *name;
63};
64
65static struct tps_info tps65912_regs[] = {
66 {
67 .name = "DCDC1",
68 },
69 {
70 .name = "DCDC2",
71 },
72 {
73 .name = "DCDC3",
74 },
75 {
76 .name = "DCDC4",
77 },
78 {
79 .name = "LDO1",
80 },
81 {
82 .name = "LDO2",
83 },
84 {
85 .name = "LDO3",
86 },
87 {
88 .name = "LDO4",
89 },
90 {
91 .name = "LDO5",
92 },
93 {
94 .name = "LDO6",
95 },
96 {
97 .name = "LDO7",
98 },
99 {
100 .name = "LDO8",
101 },
102 {
103 .name = "LDO9",
104 },
105 {
106 .name = "LDO10",
107 },
108};
109
110struct tps65912_reg {
111 struct regulator_desc desc[TPS65912_NUM_REGULATOR];
112 struct tps65912 *mfd;
113 struct regulator_dev *rdev[TPS65912_NUM_REGULATOR];
114 struct tps_info *info[TPS65912_NUM_REGULATOR];
115 /* for read/write access */
116 struct mutex io_lock;
117 int mode;
118 int (*get_ctrl_reg)(int);
119 int dcdc1_range;
120 int dcdc2_range;
121 int dcdc3_range;
122 int dcdc4_range;
123 int pwm_mode_reg;
124 int eco_reg;
125};
126
127static int tps65912_get_range(struct tps65912_reg *pmic, int id)
128{
129 struct tps65912 *mfd = pmic->mfd;
130
131 if (id > TPS65912_REG_DCDC4)
132 return 0;
133
134 switch (id) {
135 case TPS65912_REG_DCDC1:
136 pmic->dcdc1_range = tps65912_reg_read(mfd,
137 TPS65912_DCDC1_LIMIT);
138 if (pmic->dcdc1_range < 0)
139 return pmic->dcdc1_range;
140 pmic->dcdc1_range = (pmic->dcdc1_range &
141 DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
142 return pmic->dcdc1_range;
143 case TPS65912_REG_DCDC2:
144 pmic->dcdc2_range = tps65912_reg_read(mfd,
145 TPS65912_DCDC2_LIMIT);
146 if (pmic->dcdc2_range < 0)
147 return pmic->dcdc2_range;
148 pmic->dcdc2_range = (pmic->dcdc2_range &
149 DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
150 return pmic->dcdc2_range;
151 case TPS65912_REG_DCDC3:
152 pmic->dcdc3_range = tps65912_reg_read(mfd,
153 TPS65912_DCDC3_LIMIT);
154 if (pmic->dcdc3_range < 0)
155 return pmic->dcdc3_range;
156 pmic->dcdc3_range = (pmic->dcdc3_range &
157 DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
158 return pmic->dcdc3_range;
159 case TPS65912_REG_DCDC4:
160 pmic->dcdc4_range = tps65912_reg_read(mfd,
161 TPS65912_DCDC4_LIMIT);
162 if (pmic->dcdc4_range < 0)
163 return pmic->dcdc4_range;
164 pmic->dcdc4_range = (pmic->dcdc4_range &
165 DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
166 return pmic->dcdc4_range;
167 default:
168 return 0;
169 }
170}
171
172static unsigned long tps65912_vsel_to_uv_range0(u8 vsel)
173{
174 unsigned long uv;
175
176 uv = ((vsel * 12500) + 500000);
177 return uv;
178}
179
180static unsigned long tps65912_vsel_to_uv_range1(u8 vsel)
181{
182 unsigned long uv;
183
184 uv = ((vsel * 12500) + 700000);
185 return uv;
186}
187
188static unsigned long tps65912_vsel_to_uv_range2(u8 vsel)
189{
190 unsigned long uv;
191
192 uv = ((vsel * 25000) + 500000);
193 return uv;
194}
195
196static unsigned long tps65912_vsel_to_uv_range3(u8 vsel)
197{
198 unsigned long uv;
199
200 if (vsel == 0x3f)
201 uv = 3800000;
202 else
203 uv = ((vsel * 50000) + 500000);
204
205 return uv;
206}
207
208static unsigned long tps65912_vsel_to_uv_ldo(u8 vsel)
209{
210 unsigned long uv = 0;
211
212 if (vsel <= 32)
213 uv = ((vsel * 25000) + 800000);
214 else if (vsel > 32 && vsel <= 60)
215 uv = (((vsel - 32) * 50000) + 1600000);
216 else if (vsel > 60)
217 uv = (((vsel - 60) * 100000) + 3000000);
218
219 return uv;
220}
221
222static int tps65912_get_ctrl_register(int id)
223{
224 switch (id) {
225 case TPS65912_REG_DCDC1:
226 return TPS65912_DCDC1_AVS;
227 case TPS65912_REG_DCDC2:
228 return TPS65912_DCDC2_AVS;
229 case TPS65912_REG_DCDC3:
230 return TPS65912_DCDC3_AVS;
231 case TPS65912_REG_DCDC4:
232 return TPS65912_DCDC4_AVS;
233 case TPS65912_REG_LDO1:
234 return TPS65912_LDO1_AVS;
235 case TPS65912_REG_LDO2:
236 return TPS65912_LDO2_AVS;
237 case TPS65912_REG_LDO3:
238 return TPS65912_LDO3_AVS;
239 case TPS65912_REG_LDO4:
240 return TPS65912_LDO4_AVS;
241 case TPS65912_REG_LDO5:
242 return TPS65912_LDO5;
243 case TPS65912_REG_LDO6:
244 return TPS65912_LDO6;
245 case TPS65912_REG_LDO7:
246 return TPS65912_LDO7;
247 case TPS65912_REG_LDO8:
248 return TPS65912_LDO8;
249 case TPS65912_REG_LDO9:
250 return TPS65912_LDO9;
251 case TPS65912_REG_LDO10:
252 return TPS65912_LDO10;
253 default:
254 return -EINVAL;
255 }
256}
257
258static int tps65912_get_dcdc_sel_register(struct tps65912_reg *pmic, int id)
259{
260 struct tps65912 *mfd = pmic->mfd;
261 int opvsel = 0, sr = 0;
262 u8 reg = 0;
263
264 if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_DCDC4)
265 return -EINVAL;
266
267 switch (id) {
268 case TPS65912_REG_DCDC1:
269 opvsel = tps65912_reg_read(mfd, TPS65912_DCDC1_OP);
270 sr = ((opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT);
271 if (sr)
272 reg = TPS65912_DCDC1_AVS;
273 else
274 reg = TPS65912_DCDC1_OP;
275 break;
276 case TPS65912_REG_DCDC2:
277 opvsel = tps65912_reg_read(mfd, TPS65912_DCDC2_OP);
278 sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
279 if (sr)
280 reg = TPS65912_DCDC2_AVS;
281 else
282 reg = TPS65912_DCDC2_OP;
283 break;
284 case TPS65912_REG_DCDC3:
285 opvsel = tps65912_reg_read(mfd, TPS65912_DCDC3_OP);
286 sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
287 if (sr)
288 reg = TPS65912_DCDC3_AVS;
289 else
290 reg = TPS65912_DCDC3_OP;
291 break;
292 case TPS65912_REG_DCDC4:
293 opvsel = tps65912_reg_read(mfd, TPS65912_DCDC4_OP);
294 sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
295 if (sr)
296 reg = TPS65912_DCDC4_AVS;
297 else
298 reg = TPS65912_DCDC4_OP;
299 break;
300 }
301 return reg;
302}
303
304static int tps65912_get_ldo_sel_register(struct tps65912_reg *pmic, int id)
305{
306 struct tps65912 *mfd = pmic->mfd;
307 int opvsel = 0, sr = 0;
308 u8 reg = 0;
309
310 if (id < TPS65912_REG_LDO1 || id > TPS65912_REG_LDO10)
311 return -EINVAL;
312
313 switch (id) {
314 case TPS65912_REG_LDO1:
315 opvsel = tps65912_reg_read(mfd, TPS65912_LDO1_OP);
316 sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
317 if (sr)
318 reg = TPS65912_LDO1_AVS;
319 else
320 reg = TPS65912_LDO1_OP;
321 break;
322 case TPS65912_REG_LDO2:
323 opvsel = tps65912_reg_read(mfd, TPS65912_LDO2_OP);
324 sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
325 if (sr)
326 reg = TPS65912_LDO2_AVS;
327 else
328 reg = TPS65912_LDO2_OP;
329 break;
330 case TPS65912_REG_LDO3:
331 opvsel = tps65912_reg_read(mfd, TPS65912_LDO3_OP);
332 sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
333 if (sr)
334 reg = TPS65912_LDO3_AVS;
335 else
336 reg = TPS65912_LDO3_OP;
337 break;
338 case TPS65912_REG_LDO4:
339 opvsel = tps65912_reg_read(mfd, TPS65912_LDO4_OP);
340 sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
341 if (sr)
342 reg = TPS65912_LDO4_AVS;
343 else
344 reg = TPS65912_LDO4_OP;
345 break;
346 case TPS65912_REG_LDO5:
347 reg = TPS65912_LDO5;
348 break;
349 case TPS65912_REG_LDO6:
350 reg = TPS65912_LDO6;
351 break;
352 case TPS65912_REG_LDO7:
353 reg = TPS65912_LDO7;
354 break;
355 case TPS65912_REG_LDO8:
356 reg = TPS65912_LDO8;
357 break;
358 case TPS65912_REG_LDO9:
359 reg = TPS65912_LDO9;
360 break;
361 case TPS65912_REG_LDO10:
362 reg = TPS65912_LDO10;
363 break;
364 }
365
366 return reg;
367}
368
369static int tps65912_get_mode_regiters(struct tps65912_reg *pmic, int id)
370{
371 switch (id) {
372 case TPS65912_REG_DCDC1:
373 pmic->pwm_mode_reg = TPS65912_DCDC1_CTRL;
374 pmic->eco_reg = TPS65912_DCDC1_AVS;
375 break;
376 case TPS65912_REG_DCDC2:
377 pmic->pwm_mode_reg = TPS65912_DCDC2_CTRL;
378 pmic->eco_reg = TPS65912_DCDC2_AVS;
379 break;
380 case TPS65912_REG_DCDC3:
381 pmic->pwm_mode_reg = TPS65912_DCDC3_CTRL;
382 pmic->eco_reg = TPS65912_DCDC3_AVS;
383 break;
384 case TPS65912_REG_DCDC4:
385 pmic->pwm_mode_reg = TPS65912_DCDC4_CTRL;
386 pmic->eco_reg = TPS65912_DCDC4_AVS;
387 break;
388 default:
389 return -EINVAL;
390 }
391
392 return 0;
393}
394
395static int tps65912_reg_is_enabled(struct regulator_dev *dev)
396{
397 struct tps65912_reg *pmic = rdev_get_drvdata(dev);
398 struct tps65912 *mfd = pmic->mfd;
399 int reg, value, id = rdev_get_id(dev);
400
401 if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_LDO10)
402 return -EINVAL;
403
404 reg = pmic->get_ctrl_reg(id);
405 if (reg < 0)
406 return reg;
407
408 value = tps65912_reg_read(mfd, reg);
409 if (value < 0)
410 return value;
411
412 return value & TPS65912_REG_ENABLED;
413}
414
415static int tps65912_reg_enable(struct regulator_dev *dev)
416{
417 struct tps65912_reg *pmic = rdev_get_drvdata(dev);
418 struct tps65912 *mfd = pmic->mfd;
419 int id = rdev_get_id(dev);
420 int reg;
421
422 if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_LDO10)
423 return -EINVAL;
424
425 reg = pmic->get_ctrl_reg(id);
426 if (reg < 0)
427 return reg;
428
429 return tps65912_set_bits(mfd, reg, TPS65912_REG_ENABLED);
430}
431
432static int tps65912_reg_disable(struct regulator_dev *dev)
433{
434 struct tps65912_reg *pmic = rdev_get_drvdata(dev);
435 struct tps65912 *mfd = pmic->mfd;
436 int id = rdev_get_id(dev), reg;
437
438 reg = pmic->get_ctrl_reg(id);
439 if (reg < 0)
440 return reg;
441
442 return tps65912_clear_bits(mfd, reg, TPS65912_REG_ENABLED);
443}
444
445static int tps65912_set_mode(struct regulator_dev *dev, unsigned int mode)
446{
447 struct tps65912_reg *pmic = rdev_get_drvdata(dev);
448 struct tps65912 *mfd = pmic->mfd;
449 int pwm_mode, eco, id = rdev_get_id(dev);
450
451 tps65912_get_mode_regiters(pmic, id);
452
453 pwm_mode = tps65912_reg_read(mfd, pmic->pwm_mode_reg);
454 eco = tps65912_reg_read(mfd, pmic->eco_reg);
455
456 pwm_mode &= DCDCCTRL_DCDC_MODE_MASK;
457 eco &= DCDC_AVS_ECO_MASK;
458
459 switch (mode) {
460 case REGULATOR_MODE_FAST:
461 /* Verify if mode alredy set */
462 if (pwm_mode && !eco)
463 break;
464 tps65912_set_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
465 tps65912_clear_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
466 break;
467 case REGULATOR_MODE_NORMAL:
468 case REGULATOR_MODE_IDLE:
469 if (!pwm_mode && !eco)
470 break;
471 tps65912_clear_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
472 tps65912_clear_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
473 break;
474 case REGULATOR_MODE_STANDBY:
475 if (!pwm_mode && eco)
476 break;
477 tps65912_clear_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
478 tps65912_set_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
479 break;
480 default:
481 return -EINVAL;
482 }
483
484 return 0;
485}
486
487static unsigned int tps65912_get_mode(struct regulator_dev *dev)
488{
489 struct tps65912_reg *pmic = rdev_get_drvdata(dev);
490 struct tps65912 *mfd = pmic->mfd;
491 int pwm_mode, eco, mode = 0, id = rdev_get_id(dev);
492
493 tps65912_get_mode_regiters(pmic, id);
494
495 pwm_mode = tps65912_reg_read(mfd, pmic->pwm_mode_reg);
496 eco = tps65912_reg_read(mfd, pmic->eco_reg);
497
498 pwm_mode &= DCDCCTRL_DCDC_MODE_MASK;
499 eco &= DCDC_AVS_ECO_MASK;
500
501 if (pwm_mode && !eco)
502 mode = REGULATOR_MODE_FAST;
503 else if (!pwm_mode && !eco)
504 mode = REGULATOR_MODE_NORMAL;
505 else if (!pwm_mode && eco)
506 mode = REGULATOR_MODE_STANDBY;
507
508 return mode;
509}
510
511static int tps65912_get_voltage_dcdc(struct regulator_dev *dev)
512{
513 struct tps65912_reg *pmic = rdev_get_drvdata(dev);
514 struct tps65912 *mfd = pmic->mfd;
515 int id = rdev_get_id(dev), voltage = 0, range;
516 int opvsel = 0, avsel = 0, sr, vsel;
517
518 switch (id) {
519 case TPS65912_REG_DCDC1:
520 opvsel = tps65912_reg_read(mfd, TPS65912_DCDC1_OP);
521 avsel = tps65912_reg_read(mfd, TPS65912_DCDC1_AVS);
522 range = pmic->dcdc1_range;
523 break;
524 case TPS65912_REG_DCDC2:
525 opvsel = tps65912_reg_read(mfd, TPS65912_DCDC2_OP);
526 avsel = tps65912_reg_read(mfd, TPS65912_DCDC2_AVS);
527 range = pmic->dcdc2_range;
528 break;
529 case TPS65912_REG_DCDC3:
530 opvsel = tps65912_reg_read(mfd, TPS65912_DCDC3_OP);
531 avsel = tps65912_reg_read(mfd, TPS65912_DCDC3_AVS);
532 range = pmic->dcdc3_range;
533 break;
534 case TPS65912_REG_DCDC4:
535 opvsel = tps65912_reg_read(mfd, TPS65912_DCDC4_OP);
536 avsel = tps65912_reg_read(mfd, TPS65912_DCDC4_AVS);
537 range = pmic->dcdc4_range;
538 break;
539 default:
540 return -EINVAL;
541 }
542
543 sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
544 if (sr)
545 vsel = avsel;
546 else
547 vsel = opvsel;
548 vsel &= 0x3F;
549
550 switch (range) {
551 case 0:
552 /* 0.5 - 1.2875V in 12.5mV steps */
553 voltage = tps65912_vsel_to_uv_range0(vsel);
554 break;
555 case 1:
556 /* 0.7 - 1.4875V in 12.5mV steps */
557 voltage = tps65912_vsel_to_uv_range1(vsel);
558 break;
559 case 2:
560 /* 0.5 - 2.075V in 25mV steps */
561 voltage = tps65912_vsel_to_uv_range2(vsel);
562 break;
563 case 3:
564 /* 0.5 - 3.8V in 50mV steps */
565 voltage = tps65912_vsel_to_uv_range3(vsel);
566 break;
567 }
568 return voltage;
569}
570
571static int tps65912_set_voltage_dcdc(struct regulator_dev *dev,
572 unsigned selector)
573{
574 struct tps65912_reg *pmic = rdev_get_drvdata(dev);
575 struct tps65912 *mfd = pmic->mfd;
576 int id = rdev_get_id(dev);
577 int value;
578 u8 reg;
579
580 reg = tps65912_get_dcdc_sel_register(pmic, id);
581 value = tps65912_reg_read(mfd, reg);
582 value &= 0xC0;
583 return tps65912_reg_write(mfd, reg, selector | value);
584}
585
586static int tps65912_get_voltage_ldo(struct regulator_dev *dev)
587{
588 struct tps65912_reg *pmic = rdev_get_drvdata(dev);
589 struct tps65912 *mfd = pmic->mfd;
590 int id = rdev_get_id(dev);
591 int vsel = 0;
592 u8 reg;
593
594 reg = tps65912_get_ldo_sel_register(pmic, id);
595 vsel = tps65912_reg_read(mfd, reg);
596 vsel &= 0x3F;
597
598 return tps65912_vsel_to_uv_ldo(vsel);
599}
600
601static int tps65912_set_voltage_ldo(struct regulator_dev *dev,
602 unsigned selector)
603{
604 struct tps65912_reg *pmic = rdev_get_drvdata(dev);
605 struct tps65912 *mfd = pmic->mfd;
606 int id = rdev_get_id(dev), reg, value;
607
608 reg = tps65912_get_ldo_sel_register(pmic, id);
609 value = tps65912_reg_read(mfd, reg);
610 value &= 0xC0;
611 return tps65912_reg_write(mfd, reg, selector | value);
612}
613
614static int tps65912_list_voltage_dcdc(struct regulator_dev *dev,
615 unsigned selector)
616{
617 struct tps65912_reg *pmic = rdev_get_drvdata(dev);
618 int range, voltage = 0, id = rdev_get_id(dev);
619
620 switch (id) {
621 case TPS65912_REG_DCDC1:
622 range = pmic->dcdc1_range;
623 break;
624 case TPS65912_REG_DCDC2:
625 range = pmic->dcdc2_range;
626 break;
627 case TPS65912_REG_DCDC3:
628 range = pmic->dcdc3_range;
629 break;
630 case TPS65912_REG_DCDC4:
631 range = pmic->dcdc4_range;
632 break;
633 default:
634 return -EINVAL;
635 }
636
637 switch (range) {
638 case 0:
639 /* 0.5 - 1.2875V in 12.5mV steps */
640 voltage = tps65912_vsel_to_uv_range0(selector);
641 break;
642 case 1:
643 /* 0.7 - 1.4875V in 12.5mV steps */
644 voltage = tps65912_vsel_to_uv_range1(selector);
645 break;
646 case 2:
647 /* 0.5 - 2.075V in 25mV steps */
648 voltage = tps65912_vsel_to_uv_range2(selector);
649 break;
650 case 3:
651 /* 0.5 - 3.8V in 50mV steps */
652 voltage = tps65912_vsel_to_uv_range3(selector);
653 break;
654 }
655 return voltage;
656}
657
658static int tps65912_list_voltage_ldo(struct regulator_dev *dev,
659 unsigned selector)
660{
661 int ldo = rdev_get_id(dev);
662
663 if (ldo < TPS65912_REG_LDO1 || ldo > TPS65912_REG_LDO10)
664 return -EINVAL;
665
666 return tps65912_vsel_to_uv_ldo(selector);
667}
668
669/* Operations permitted on DCDCx */
670static struct regulator_ops tps65912_ops_dcdc = {
671 .is_enabled = tps65912_reg_is_enabled,
672 .enable = tps65912_reg_enable,
673 .disable = tps65912_reg_disable,
674 .set_mode = tps65912_set_mode,
675 .get_mode = tps65912_get_mode,
676 .get_voltage = tps65912_get_voltage_dcdc,
677 .set_voltage_sel = tps65912_set_voltage_dcdc,
678 .list_voltage = tps65912_list_voltage_dcdc,
679};
680
681/* Operations permitted on LDOx */
682static struct regulator_ops tps65912_ops_ldo = {
683 .is_enabled = tps65912_reg_is_enabled,
684 .enable = tps65912_reg_enable,
685 .disable = tps65912_reg_disable,
686 .get_voltage = tps65912_get_voltage_ldo,
687 .set_voltage_sel = tps65912_set_voltage_ldo,
688 .list_voltage = tps65912_list_voltage_ldo,
689};
690
691static __devinit int tps65912_probe(struct platform_device *pdev)
692{
693 struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
694 struct tps_info *info;
695 struct regulator_init_data *reg_data;
696 struct regulator_dev *rdev;
697 struct tps65912_reg *pmic;
698 struct tps65912_board *pmic_plat_data;
699 int i, err;
700
701 pmic_plat_data = dev_get_platdata(tps65912->dev);
702 if (!pmic_plat_data)
703 return -EINVAL;
704
705 reg_data = pmic_plat_data->tps65912_pmic_init_data;
706
707 pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
708 if (!pmic)
709 return -ENOMEM;
710
711 mutex_init(&pmic->io_lock);
712 pmic->mfd = tps65912;
713 platform_set_drvdata(pdev, pmic);
714
715 pmic->get_ctrl_reg = &tps65912_get_ctrl_register;
716 info = tps65912_regs;
717
718 for (i = 0; i < TPS65912_NUM_REGULATOR; i++, info++, reg_data++) {
719 int range = 0;
720 /* Register the regulators */
721 pmic->info[i] = info;
722
723 pmic->desc[i].name = info->name;
724 pmic->desc[i].id = i;
725 pmic->desc[i].n_voltages = 64;
726 pmic->desc[i].ops = (i > TPS65912_REG_DCDC4 ?
727 &tps65912_ops_ldo : &tps65912_ops_dcdc);
728 pmic->desc[i].type = REGULATOR_VOLTAGE;
729 pmic->desc[i].owner = THIS_MODULE;
730 range = tps65912_get_range(pmic, i);
731 rdev = regulator_register(&pmic->desc[i],
732 tps65912->dev, reg_data, pmic);
733 if (IS_ERR(rdev)) {
734 dev_err(tps65912->dev,
735 "failed to register %s regulator\n",
736 pdev->name);
737 err = PTR_ERR(rdev);
738 goto err;
739 }
740
741 /* Save regulator for cleanup */
742 pmic->rdev[i] = rdev;
743 }
744 return 0;
745
746err:
747 while (--i >= 0)
748 regulator_unregister(pmic->rdev[i]);
749
750 kfree(pmic);
751 return err;
752}
753
754static int __devexit tps65912_remove(struct platform_device *pdev)
755{
756 struct tps65912_reg *tps65912_reg = platform_get_drvdata(pdev);
757 int i;
758
759 for (i = 0; i < TPS65912_NUM_REGULATOR; i++)
760 regulator_unregister(tps65912_reg->rdev[i]);
761
762 kfree(tps65912_reg);
763 return 0;
764}
765
766static struct platform_driver tps65912_driver = {
767 .driver = {
768 .name = "tps65912-pmic",
769 .owner = THIS_MODULE,
770 },
771 .probe = tps65912_probe,
772 .remove = __devexit_p(tps65912_remove),
773};
774
775/**
776 * tps65912_init
777 *
778 * Module init function
779 */
780static int __init tps65912_init(void)
781{
782 return platform_driver_register(&tps65912_driver);
783}
784subsys_initcall(tps65912_init);
785
786/**
787 * tps65912_cleanup
788 *
789 * Module exit function
790 */
791static void __exit tps65912_cleanup(void)
792{
793 platform_driver_unregister(&tps65912_driver);
794}
795module_exit(tps65912_cleanup);
796
797MODULE_AUTHOR("Margarita Olaya Cabrera <magi@slimlogic.co.uk>");
798MODULE_DESCRIPTION("TPS65912 voltage regulator driver");
799MODULE_LICENSE("GPL v2");
800MODULE_ALIAS("platform:tps65912-pmic");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 87fe0f75a56e..ee8747f4fa08 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -835,8 +835,8 @@ static struct regulator_ops twlsmps_ops = {
835 remap_conf) \ 835 remap_conf) \
836 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 836 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
837 remap_conf, TWL4030, twl4030fixed_ops) 837 remap_conf, TWL4030, twl4030fixed_ops)
838#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \ 838#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
839 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 839 TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \
840 0x0, TWL6030, twl6030fixed_ops) 840 0x0, TWL6030, twl6030fixed_ops)
841 841
842#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \ 842#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
@@ -856,24 +856,22 @@ static struct regulator_ops twlsmps_ops = {
856 }, \ 856 }, \
857 } 857 }
858 858
859#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \ 859#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
860 .base = offset, \ 860 .base = offset, \
861 .id = num, \
862 .min_mV = min_mVolts, \ 861 .min_mV = min_mVolts, \
863 .max_mV = max_mVolts, \ 862 .max_mV = max_mVolts, \
864 .desc = { \ 863 .desc = { \
865 .name = #label, \ 864 .name = #label, \
866 .id = TWL6030_REG_##label, \ 865 .id = TWL6030_REG_##label, \
867 .n_voltages = (max_mVolts - min_mVolts)/100, \ 866 .n_voltages = (max_mVolts - min_mVolts)/100 + 1, \
868 .ops = &twl6030ldo_ops, \ 867 .ops = &twl6030ldo_ops, \
869 .type = REGULATOR_VOLTAGE, \ 868 .type = REGULATOR_VOLTAGE, \
870 .owner = THIS_MODULE, \ 869 .owner = THIS_MODULE, \
871 }, \ 870 }, \
872 } 871 }
873 872
874#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \ 873#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
875 .base = offset, \ 874 .base = offset, \
876 .id = num, \
877 .min_mV = min_mVolts, \ 875 .min_mV = min_mVolts, \
878 .max_mV = max_mVolts, \ 876 .max_mV = max_mVolts, \
879 .desc = { \ 877 .desc = { \
@@ -903,9 +901,8 @@ static struct regulator_ops twlsmps_ops = {
903 }, \ 901 }, \
904 } 902 }
905 903
906#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \ 904#define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) { \
907 .base = offset, \ 905 .base = offset, \
908 .id = num, \
909 .delay = turnon_delay, \ 906 .delay = turnon_delay, \
910 .desc = { \ 907 .desc = { \
911 .name = #label, \ 908 .name = #label, \
@@ -916,9 +913,8 @@ static struct regulator_ops twlsmps_ops = {
916 }, \ 913 }, \
917 } 914 }
918 915
919#define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \ 916#define TWL6025_ADJUSTABLE_SMPS(label, offset) { \
920 .base = offset, \ 917 .base = offset, \
921 .id = num, \
922 .min_mV = 600, \ 918 .min_mV = 600, \
923 .max_mV = 2100, \ 919 .max_mV = 2100, \
924 .desc = { \ 920 .desc = { \
@@ -961,32 +957,32 @@ static struct twlreg_info twl_regs[] = {
961 /* 6030 REG with base as PMC Slave Misc : 0x0030 */ 957 /* 6030 REG with base as PMC Slave Misc : 0x0030 */
962 /* Turnon-delay and remap configuration values for 6030 are not 958 /* Turnon-delay and remap configuration values for 6030 are not
963 verified since the specification is not public */ 959 verified since the specification is not public */
964 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1), 960 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300),
965 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2), 961 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300),
966 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3), 962 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300),
967 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4), 963 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300),
968 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5), 964 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300),
969 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7), 965 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300),
970 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0), 966 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0),
971 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0), 967 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0),
972 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0), 968 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0),
973 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0), 969 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0),
974 TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0), 970 TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0),
975 971
976 /* 6025 are renamed compared to 6030 versions */ 972 /* 6025 are renamed compared to 6030 versions */
977 TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1), 973 TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300),
978 TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2), 974 TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300),
979 TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3), 975 TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300),
980 TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4), 976 TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300),
981 TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5), 977 TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300),
982 TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7), 978 TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300),
983 TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16), 979 TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300),
984 TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17), 980 TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300),
985 TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18), 981 TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300),
986 982
987 TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1), 983 TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34),
988 TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2), 984 TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10),
989 TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3), 985 TWL6025_ADJUSTABLE_SMPS(VIO, 0x16),
990}; 986};
991 987
992static u8 twl_get_smps_offset(void) 988static u8 twl_get_smps_offset(void)
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index a0982e809851..bd3531d8b2ac 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -267,23 +267,6 @@ static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev,
267 return vsel; 267 return vsel;
268} 268}
269 269
270static int wm831x_buckv_select_max_voltage(struct regulator_dev *rdev,
271 int min_uV, int max_uV)
272{
273 u16 vsel;
274
275 if (max_uV < 600000 || max_uV > 1800000)
276 return -EINVAL;
277
278 vsel = ((max_uV - 600000) / 12500) + 8;
279
280 if (wm831x_buckv_list_voltage(rdev, vsel) < min_uV ||
281 wm831x_buckv_list_voltage(rdev, vsel) < max_uV)
282 return -EINVAL;
283
284 return vsel;
285}
286
287static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state) 270static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
288{ 271{
289 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); 272 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
@@ -338,28 +321,23 @@ static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
338 if (ret < 0) 321 if (ret < 0)
339 return ret; 322 return ret;
340 323
341 /* Set the high voltage as the DVS voltage. This is optimised 324 /*
342 * for CPUfreq usage, most processors will keep the maximum 325 * If this VSEL is higher than the last one we've seen then
343 * voltage constant and lower the minimum with the frequency. */ 326 * remember it as the DVS VSEL. This is optimised for CPUfreq
344 vsel = wm831x_buckv_select_max_voltage(rdev, min_uV, max_uV); 327 * usage where we want to get to the highest voltage very
345 if (vsel < 0) { 328 * quickly.
346 /* This should never happen - at worst the same vsel 329 */
347 * should be chosen */ 330 if (vsel > dcdc->dvs_vsel) {
348 WARN_ON(vsel < 0); 331 ret = wm831x_set_bits(wm831x, dvs_reg,
349 return 0; 332 WM831X_DC1_DVS_VSEL_MASK,
333 dcdc->dvs_vsel);
334 if (ret == 0)
335 dcdc->dvs_vsel = vsel;
336 else
337 dev_warn(wm831x->dev,
338 "Failed to set DCDC DVS VSEL: %d\n", ret);
350 } 339 }
351 340
352 /* Don't bother if it's the same VSEL we're already using */
353 if (vsel == dcdc->on_vsel)
354 return 0;
355
356 ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, vsel);
357 if (ret == 0)
358 dcdc->dvs_vsel = vsel;
359 else
360 dev_warn(wm831x->dev, "Failed to set DCDC DVS VSEL: %d\n",
361 ret);
362
363 return 0; 341 return 0;
364} 342}
365 343
@@ -456,27 +434,6 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
456 if (!pdata || !pdata->dvs_gpio) 434 if (!pdata || !pdata->dvs_gpio)
457 return; 435 return;
458 436
459 switch (pdata->dvs_control_src) {
460 case 1:
461 ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
462 break;
463 case 2:
464 ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
465 break;
466 default:
467 dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
468 pdata->dvs_control_src, dcdc->name);
469 return;
470 }
471
472 ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
473 WM831X_DC1_DVS_SRC_MASK, ctrl);
474 if (ret < 0) {
475 dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
476 dcdc->name, ret);
477 return;
478 }
479
480 ret = gpio_request(pdata->dvs_gpio, "DCDC DVS"); 437 ret = gpio_request(pdata->dvs_gpio, "DCDC DVS");
481 if (ret < 0) { 438 if (ret < 0) {
482 dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n", 439 dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
@@ -498,17 +455,57 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
498 } 455 }
499 456
500 dcdc->dvs_gpio = pdata->dvs_gpio; 457 dcdc->dvs_gpio = pdata->dvs_gpio;
458
459 switch (pdata->dvs_control_src) {
460 case 1:
461 ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
462 break;
463 case 2:
464 ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
465 break;
466 default:
467 dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
468 pdata->dvs_control_src, dcdc->name);
469 return;
470 }
471
472 /* If DVS_VSEL is set to the minimum value then raise it to ON_VSEL
473 * to make bootstrapping a bit smoother.
474 */
475 if (!dcdc->dvs_vsel) {
476 ret = wm831x_set_bits(wm831x,
477 dcdc->base + WM831X_DCDC_DVS_CONTROL,
478 WM831X_DC1_DVS_VSEL_MASK, dcdc->on_vsel);
479 if (ret == 0)
480 dcdc->dvs_vsel = dcdc->on_vsel;
481 else
482 dev_warn(wm831x->dev, "Failed to set DVS_VSEL: %d\n",
483 ret);
484 }
485
486 ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
487 WM831X_DC1_DVS_SRC_MASK, ctrl);
488 if (ret < 0) {
489 dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
490 dcdc->name, ret);
491 }
501} 492}
502 493
503static __devinit int wm831x_buckv_probe(struct platform_device *pdev) 494static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
504{ 495{
505 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); 496 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
506 struct wm831x_pdata *pdata = wm831x->dev->platform_data; 497 struct wm831x_pdata *pdata = wm831x->dev->platform_data;
507 int id = pdev->id % ARRAY_SIZE(pdata->dcdc); 498 int id;
508 struct wm831x_dcdc *dcdc; 499 struct wm831x_dcdc *dcdc;
509 struct resource *res; 500 struct resource *res;
510 int ret, irq; 501 int ret, irq;
511 502
503 if (pdata && pdata->wm831x_num)
504 id = (pdata->wm831x_num * 10) + 1;
505 else
506 id = 0;
507 id = pdev->id - id;
508
512 dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); 509 dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1);
513 510
514 if (pdata == NULL || pdata->dcdc[id] == NULL) 511 if (pdata == NULL || pdata->dcdc[id] == NULL)
@@ -545,7 +542,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
545 } 542 }
546 dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK; 543 dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK;
547 544
548 ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG); 545 ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL);
549 if (ret < 0) { 546 if (ret < 0) {
550 dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret); 547 dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret);
551 goto err; 548 goto err;
@@ -709,11 +706,17 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
709{ 706{
710 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); 707 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
711 struct wm831x_pdata *pdata = wm831x->dev->platform_data; 708 struct wm831x_pdata *pdata = wm831x->dev->platform_data;
712 int id = pdev->id % ARRAY_SIZE(pdata->dcdc); 709 int id;
713 struct wm831x_dcdc *dcdc; 710 struct wm831x_dcdc *dcdc;
714 struct resource *res; 711 struct resource *res;
715 int ret, irq; 712 int ret, irq;
716 713
714 if (pdata && pdata->wm831x_num)
715 id = (pdata->wm831x_num * 10) + 1;
716 else
717 id = 0;
718 id = pdev->id - id;
719
717 dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); 720 dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1);
718 721
719 if (pdata == NULL || pdata->dcdc[id] == NULL) 722 if (pdata == NULL || pdata->dcdc[id] == NULL)
@@ -1046,3 +1049,4 @@ MODULE_DESCRIPTION("WM831x DC-DC convertor driver");
1046MODULE_LICENSE("GPL"); 1049MODULE_LICENSE("GPL");
1047MODULE_ALIAS("platform:wm831x-buckv"); 1050MODULE_ALIAS("platform:wm831x-buckv");
1048MODULE_ALIAS("platform:wm831x-buckp"); 1051MODULE_ALIAS("platform:wm831x-buckp");
1052MODULE_ALIAS("platform:wm831x-epe");
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 2220cf8defb1..6709710a059e 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -310,11 +310,17 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
310{ 310{
311 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); 311 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
312 struct wm831x_pdata *pdata = wm831x->dev->platform_data; 312 struct wm831x_pdata *pdata = wm831x->dev->platform_data;
313 int id = pdev->id % ARRAY_SIZE(pdata->ldo); 313 int id;
314 struct wm831x_ldo *ldo; 314 struct wm831x_ldo *ldo;
315 struct resource *res; 315 struct resource *res;
316 int ret, irq; 316 int ret, irq;
317 317
318 if (pdata && pdata->wm831x_num)
319 id = (pdata->wm831x_num * 10) + 1;
320 else
321 id = 0;
322 id = pdev->id - id;
323
318 dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); 324 dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
319 325
320 if (pdata == NULL || pdata->ldo[id] == NULL) 326 if (pdata == NULL || pdata->ldo[id] == NULL)
@@ -574,11 +580,17 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
574{ 580{
575 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); 581 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
576 struct wm831x_pdata *pdata = wm831x->dev->platform_data; 582 struct wm831x_pdata *pdata = wm831x->dev->platform_data;
577 int id = pdev->id % ARRAY_SIZE(pdata->ldo); 583 int id;
578 struct wm831x_ldo *ldo; 584 struct wm831x_ldo *ldo;
579 struct resource *res; 585 struct resource *res;
580 int ret, irq; 586 int ret, irq;
581 587
588 if (pdata && pdata->wm831x_num)
589 id = (pdata->wm831x_num * 10) + 1;
590 else
591 id = 0;
592 id = pdev->id - id;
593
582 dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); 594 dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
583 595
584 if (pdata == NULL || pdata->ldo[id] == NULL) 596 if (pdata == NULL || pdata->ldo[id] == NULL)
@@ -764,11 +776,18 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
764{ 776{
765 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); 777 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
766 struct wm831x_pdata *pdata = wm831x->dev->platform_data; 778 struct wm831x_pdata *pdata = wm831x->dev->platform_data;
767 int id = pdev->id % ARRAY_SIZE(pdata->ldo); 779 int id;
768 struct wm831x_ldo *ldo; 780 struct wm831x_ldo *ldo;
769 struct resource *res; 781 struct resource *res;
770 int ret; 782 int ret;
771 783
784 if (pdata && pdata->wm831x_num)
785 id = (pdata->wm831x_num * 10) + 1;
786 else
787 id = 0;
788 id = pdev->id - id;
789
790
772 dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); 791 dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
773 792
774 if (pdata == NULL || pdata->ldo[id] == NULL) 793 if (pdata == NULL || pdata->ldo[id] == NULL)
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 35b2958d5106..1a6a690f24db 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -43,7 +43,7 @@ static int wm8994_ldo_enable(struct regulator_dev *rdev)
43 if (!ldo->enable) 43 if (!ldo->enable)
44 return 0; 44 return 0;
45 45
46 gpio_set_value(ldo->enable, 1); 46 gpio_set_value_cansleep(ldo->enable, 1);
47 ldo->is_enabled = true; 47 ldo->is_enabled = true;
48 48
49 return 0; 49 return 0;
@@ -57,7 +57,7 @@ static int wm8994_ldo_disable(struct regulator_dev *rdev)
57 if (!ldo->enable) 57 if (!ldo->enable)
58 return -EINVAL; 58 return -EINVAL;
59 59
60 gpio_set_value(ldo->enable, 0); 60 gpio_set_value_cansleep(ldo->enable, 0);
61 ldo->is_enabled = false; 61 ldo->is_enabled = false;
62 62
63 return 0; 63 return 0;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 3195dbd3ec34..44e91e598f8d 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -639,7 +639,7 @@ EXPORT_SYMBOL_GPL(rtc_irq_unregister);
639static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) 639static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
640{ 640{
641 /* 641 /*
642 * We unconditionally cancel the timer here, because otherwise 642 * We always cancel the timer here first, because otherwise
643 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); 643 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
644 * when we manage to start the timer before the callback 644 * when we manage to start the timer before the callback
645 * returns HRTIMER_RESTART. 645 * returns HRTIMER_RESTART.
@@ -708,7 +708,7 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
708 int err = 0; 708 int err = 0;
709 unsigned long flags; 709 unsigned long flags;
710 710
711 if (freq <= 0 || freq > 5000) 711 if (freq <= 0 || freq > RTC_MAX_FREQ)
712 return -EINVAL; 712 return -EINVAL;
713retry: 713retry:
714 spin_lock_irqsave(&rtc->irq_task_lock, flags); 714 spin_lock_irqsave(&rtc->irq_task_lock, flags);
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 335551d333b2..14a42a1edc66 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -36,6 +36,7 @@
36 */ 36 */
37struct ep93xx_rtc { 37struct ep93xx_rtc {
38 void __iomem *mmio_base; 38 void __iomem *mmio_base;
39 struct rtc_device *rtc;
39}; 40};
40 41
41static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, 42static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
130{ 131{
131 struct ep93xx_rtc *ep93xx_rtc; 132 struct ep93xx_rtc *ep93xx_rtc;
132 struct resource *res; 133 struct resource *res;
133 struct rtc_device *rtc;
134 int err; 134 int err;
135 135
136 ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL); 136 ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
151 return -ENXIO; 151 return -ENXIO;
152 152
153 pdev->dev.platform_data = ep93xx_rtc; 153 pdev->dev.platform_data = ep93xx_rtc;
154 platform_set_drvdata(pdev, rtc); 154 platform_set_drvdata(pdev, ep93xx_rtc);
155 155
156 rtc = rtc_device_register(pdev->name, 156 ep93xx_rtc->rtc = rtc_device_register(pdev->name,
157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); 157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
158 if (IS_ERR(rtc)) { 158 if (IS_ERR(ep93xx_rtc->rtc)) {
159 err = PTR_ERR(rtc); 159 err = PTR_ERR(ep93xx_rtc->rtc);
160 goto exit; 160 goto exit;
161 } 161 }
162 162
@@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
167 return 0; 167 return 0;
168 168
169fail: 169fail:
170 rtc_device_unregister(rtc); 170 rtc_device_unregister(ep93xx_rtc->rtc);
171exit: 171exit:
172 platform_set_drvdata(pdev, NULL); 172 platform_set_drvdata(pdev, NULL);
173 pdev->dev.platform_data = NULL; 173 pdev->dev.platform_data = NULL;
@@ -176,11 +176,11 @@ exit:
176 176
177static int __exit ep93xx_rtc_remove(struct platform_device *pdev) 177static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
178{ 178{
179 struct rtc_device *rtc = platform_get_drvdata(pdev); 179 struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
180 180
181 sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); 181 sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
182 platform_set_drvdata(pdev, NULL); 182 platform_set_drvdata(pdev, NULL);
183 rtc_device_unregister(rtc); 183 rtc_device_unregister(ep93xx_rtc->rtc);
184 pdev->dev.platform_data = NULL; 184 pdev->dev.platform_data = NULL;
185 185
186 return 0; 186 return 0;
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 075f1708deae..c4cf05731118 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
85 time -= tm->tm_hour * 3600; 85 time -= tm->tm_hour * 3600;
86 tm->tm_min = time / 60; 86 tm->tm_min = time / 60;
87 tm->tm_sec = time - tm->tm_min * 60; 87 tm->tm_sec = time - tm->tm_min * 60;
88
89 tm->tm_isdst = 0;
88} 90}
89EXPORT_SYMBOL(rtc_time_to_tm); 91EXPORT_SYMBOL(rtc_time_to_tm);
90 92
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index bcae8dd41496..7789002bdd5c 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -368,7 +368,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
368 pr_info("%s: already running\n", pdev->name); 368 pr_info("%s: already running\n", pdev->name);
369 369
370 /* force to 24 hour mode */ 370 /* force to 24 hour mode */
371 new_ctrl = reg & ~(OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP); 371 new_ctrl = reg & (OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP);
372 new_ctrl |= OMAP_RTC_CTRL_STOP; 372 new_ctrl |= OMAP_RTC_CTRL_STOP;
373 373
374 /* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE: 374 /* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 9329dbb9ebab..4e7c04e773e0 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -152,10 +152,6 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
152 goto retry_get_time; 152 goto retry_get_time;
153 } 153 }
154 154
155 pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
156 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
157 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
158
159 rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); 155 rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
160 rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); 156 rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
161 rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); 157 rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
@@ -164,6 +160,11 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
164 rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); 160 rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
165 161
166 rtc_tm->tm_year += 100; 162 rtc_tm->tm_year += 100;
163
164 pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n",
165 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
166 rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
167
167 rtc_tm->tm_mon -= 1; 168 rtc_tm->tm_mon -= 1;
168 169
169 clk_disable(rtc_clk); 170 clk_disable(rtc_clk);
@@ -269,10 +270,9 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
269 clk_enable(rtc_clk); 270 clk_enable(rtc_clk);
270 pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n", 271 pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
271 alrm->enabled, 272 alrm->enabled,
272 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, 273 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
273 tm->tm_hour, tm->tm_min, tm->tm_sec); 274 tm->tm_hour, tm->tm_min, tm->tm_sec);
274 275
275
276 alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; 276 alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
277 writeb(0x00, base + S3C2410_RTCALM); 277 writeb(0x00, base + S3C2410_RTCALM);
278 278
@@ -319,49 +319,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
319 return 0; 319 return 0;
320} 320}
321 321
322static int s3c_rtc_open(struct device *dev)
323{
324 struct platform_device *pdev = to_platform_device(dev);
325 struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
326 int ret;
327
328 ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq,
329 IRQF_DISABLED, "s3c2410-rtc alarm", rtc_dev);
330
331 if (ret) {
332 dev_err(dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
333 return ret;
334 }
335
336 ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq,
337 IRQF_DISABLED, "s3c2410-rtc tick", rtc_dev);
338
339 if (ret) {
340 dev_err(dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
341 goto tick_err;
342 }
343
344 return ret;
345
346 tick_err:
347 free_irq(s3c_rtc_alarmno, rtc_dev);
348 return ret;
349}
350
351static void s3c_rtc_release(struct device *dev)
352{
353 struct platform_device *pdev = to_platform_device(dev);
354 struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
355
356 /* do not clear AIE here, it may be needed for wake */
357
358 free_irq(s3c_rtc_alarmno, rtc_dev);
359 free_irq(s3c_rtc_tickno, rtc_dev);
360}
361
362static const struct rtc_class_ops s3c_rtcops = { 322static const struct rtc_class_ops s3c_rtcops = {
363 .open = s3c_rtc_open,
364 .release = s3c_rtc_release,
365 .read_time = s3c_rtc_gettime, 323 .read_time = s3c_rtc_gettime,
366 .set_time = s3c_rtc_settime, 324 .set_time = s3c_rtc_settime,
367 .read_alarm = s3c_rtc_getalarm, 325 .read_alarm = s3c_rtc_getalarm,
@@ -425,6 +383,9 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
425{ 383{
426 struct rtc_device *rtc = platform_get_drvdata(dev); 384 struct rtc_device *rtc = platform_get_drvdata(dev);
427 385
386 free_irq(s3c_rtc_alarmno, rtc);
387 free_irq(s3c_rtc_tickno, rtc);
388
428 platform_set_drvdata(dev, NULL); 389 platform_set_drvdata(dev, NULL);
429 rtc_device_unregister(rtc); 390 rtc_device_unregister(rtc);
430 391
@@ -548,10 +509,32 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
548 509
549 s3c_rtc_setfreq(&pdev->dev, 1); 510 s3c_rtc_setfreq(&pdev->dev, 1);
550 511
512 ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq,
513 IRQF_DISABLED, "s3c2410-rtc alarm", rtc);
514 if (ret) {
515 dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
516 goto err_alarm_irq;
517 }
518
519 ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq,
520 IRQF_DISABLED, "s3c2410-rtc tick", rtc);
521 if (ret) {
522 dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
523 free_irq(s3c_rtc_alarmno, rtc);
524 goto err_tick_irq;
525 }
526
551 clk_disable(rtc_clk); 527 clk_disable(rtc_clk);
552 528
553 return 0; 529 return 0;
554 530
531 err_tick_irq:
532 free_irq(s3c_rtc_alarmno, rtc);
533
534 err_alarm_irq:
535 platform_set_drvdata(pdev, NULL);
536 rtc_device_unregister(rtc);
537
555 err_nortc: 538 err_nortc:
556 s3c_rtc_enable(pdev, 0); 539 s3c_rtc_enable(pdev, 0);
557 clk_disable(rtc_clk); 540 clk_disable(rtc_clk);
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 9a81f778d6b2..20687d55e7a7 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
362 int res; 362 int res;
363 u8 rd_reg; 363 u8 rd_reg;
364 364
365#ifdef CONFIG_LOCKDEP
366 /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
367 * we don't want and can't tolerate. Although it might be
368 * friendlier not to borrow this thread context...
369 */
370 local_irq_enable();
371#endif
372
373 res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 365 res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
374 if (res) 366 if (res)
375 goto out; 367 goto out;
@@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {
428static int __devinit twl_rtc_probe(struct platform_device *pdev) 420static int __devinit twl_rtc_probe(struct platform_device *pdev)
429{ 421{
430 struct rtc_device *rtc; 422 struct rtc_device *rtc;
431 int ret = 0; 423 int ret = -EINVAL;
432 int irq = platform_get_irq(pdev, 0); 424 int irq = platform_get_irq(pdev, 0);
433 u8 rd_reg; 425 u8 rd_reg;
434 426
435 if (irq <= 0) 427 if (irq <= 0)
436 return -EINVAL; 428 goto out1;
437
438 rtc = rtc_device_register(pdev->name,
439 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
440 if (IS_ERR(rtc)) {
441 ret = PTR_ERR(rtc);
442 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
443 PTR_ERR(rtc));
444 goto out0;
445
446 }
447
448 platform_set_drvdata(pdev, rtc);
449 429
450 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 430 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
451 if (ret < 0) 431 if (ret < 0)
@@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
462 if (ret < 0) 442 if (ret < 0)
463 goto out1; 443 goto out1;
464 444
465 ret = request_irq(irq, twl_rtc_interrupt,
466 IRQF_TRIGGER_RISING,
467 dev_name(&rtc->dev), rtc);
468 if (ret < 0) {
469 dev_err(&pdev->dev, "IRQ is not free.\n");
470 goto out1;
471 }
472
473 if (twl_class_is_6030()) { 445 if (twl_class_is_6030()) {
474 twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, 446 twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
475 REG_INT_MSK_LINE_A); 447 REG_INT_MSK_LINE_A);
@@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
480 /* Check RTC module status, Enable if it is off */ 452 /* Check RTC module status, Enable if it is off */
481 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG); 453 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
482 if (ret < 0) 454 if (ret < 0)
483 goto out2; 455 goto out1;
484 456
485 if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) { 457 if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
486 dev_info(&pdev->dev, "Enabling TWL-RTC.\n"); 458 dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
487 rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M; 459 rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
488 ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG); 460 ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
489 if (ret < 0) 461 if (ret < 0)
490 goto out2; 462 goto out1;
491 } 463 }
492 464
493 /* init cached IRQ enable bits */ 465 /* init cached IRQ enable bits */
494 ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); 466 ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
495 if (ret < 0) 467 if (ret < 0)
468 goto out1;
469
470 rtc = rtc_device_register(pdev->name,
471 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
472 if (IS_ERR(rtc)) {
473 ret = PTR_ERR(rtc);
474 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
475 PTR_ERR(rtc));
476 goto out1;
477 }
478
479 ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
480 IRQF_TRIGGER_RISING,
481 dev_name(&rtc->dev), rtc);
482 if (ret < 0) {
483 dev_err(&pdev->dev, "IRQ is not free.\n");
496 goto out2; 484 goto out2;
485 }
497 486
498 return ret; 487 platform_set_drvdata(pdev, rtc);
488 return 0;
499 489
500out2: 490out2:
501 free_irq(irq, rtc);
502out1:
503 rtc_device_unregister(rtc); 491 rtc_device_unregister(rtc);
504out0: 492out1:
505 return ret; 493 return ret;
506} 494}
507 495
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 432444af7ee4..a1d3ddba99cc 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -24,6 +24,7 @@
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/debugfs.h> 25#include <linux/debugfs.h>
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include <linux/vmalloc.h>
27 28
28#include <asm/ccwdev.h> 29#include <asm/ccwdev.h>
29#include <asm/ebcdic.h> 30#include <asm/ebcdic.h>
@@ -888,11 +889,11 @@ char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
888{ 889{
889 char *buffer; 890 char *buffer;
890 891
891 buffer = kmalloc(user_len + 1, GFP_KERNEL); 892 buffer = vmalloc(user_len + 1);
892 if (buffer == NULL) 893 if (buffer == NULL)
893 return ERR_PTR(-ENOMEM); 894 return ERR_PTR(-ENOMEM);
894 if (copy_from_user(buffer, user_buf, user_len) != 0) { 895 if (copy_from_user(buffer, user_buf, user_len) != 0) {
895 kfree(buffer); 896 vfree(buffer);
896 return ERR_PTR(-EFAULT); 897 return ERR_PTR(-EFAULT);
897 } 898 }
898 /* got the string, now strip linefeed. */ 899 /* got the string, now strip linefeed. */
@@ -930,7 +931,7 @@ static ssize_t dasd_stats_write(struct file *file,
930 dasd_profile_off(prof); 931 dasd_profile_off(prof);
931 } else 932 } else
932 rc = -EINVAL; 933 rc = -EINVAL;
933 kfree(buffer); 934 vfree(buffer);
934 return rc; 935 return rc;
935} 936}
936 937
@@ -1042,7 +1043,7 @@ static ssize_t dasd_stats_global_write(struct file *file,
1042 dasd_global_profile_level = DASD_PROFILE_OFF; 1043 dasd_global_profile_level = DASD_PROFILE_OFF;
1043 } else 1044 } else
1044 rc = -EINVAL; 1045 rc = -EINVAL;
1045 kfree(buffer); 1046 vfree(buffer);
1046 return rc; 1047 return rc;
1047} 1048}
1048 1049
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 30fb979d684d..6e835c9fdfcb 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1461,6 +1461,15 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1461 "Read device characteristic failed, rc=%d", rc); 1461 "Read device characteristic failed, rc=%d", rc);
1462 goto out_err3; 1462 goto out_err3;
1463 } 1463 }
1464
1465 if ((device->features & DASD_FEATURE_USERAW) &&
1466 !(private->rdc_data.facilities.RT_in_LR)) {
1467 dev_err(&device->cdev->dev, "The storage server does not "
1468 "support raw-track access\n");
1469 rc = -EINVAL;
1470 goto out_err3;
1471 }
1472
1464 /* find the valid cylinder size */ 1473 /* find the valid cylinder size */
1465 if (private->rdc_data.no_cyl == LV_COMPAT_CYL && 1474 if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1466 private->rdc_data.long_no_cyl) 1475 private->rdc_data.long_no_cyl)
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index eb4e034378cd..f1a2016829fc 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -249,6 +249,7 @@ static int dasd_ioctl_reset_profile(struct dasd_block *block)
249static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) 249static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
250{ 250{
251 struct dasd_profile_info_t *data; 251 struct dasd_profile_info_t *data;
252 int rc = 0;
252 253
253 data = kmalloc(sizeof(*data), GFP_KERNEL); 254 data = kmalloc(sizeof(*data), GFP_KERNEL);
254 if (!data) 255 if (!data)
@@ -279,11 +280,14 @@ static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
279 spin_unlock_bh(&block->profile.lock); 280 spin_unlock_bh(&block->profile.lock);
280 } else { 281 } else {
281 spin_unlock_bh(&block->profile.lock); 282 spin_unlock_bh(&block->profile.lock);
282 return -EIO; 283 rc = -EIO;
284 goto out;
283 } 285 }
284 if (copy_to_user(argp, data, sizeof(*data))) 286 if (copy_to_user(argp, data, sizeof(*data)))
285 return -EFAULT; 287 rc = -EFAULT;
286 return 0; 288out:
289 kfree(data);
290 return rc;
287} 291}
288#else 292#else
289static int dasd_ioctl_reset_profile(struct dasd_block *block) 293static int dasd_ioctl_reset_profile(struct dasd_block *block)
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 6c3c5364d082..e12989fff4ff 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -312,14 +312,14 @@ static ssize_t dasd_stats_proc_write(struct file *file,
312 pr_info("The statistics have been reset\n"); 312 pr_info("The statistics have been reset\n");
313 } else 313 } else
314 goto out_parse_error; 314 goto out_parse_error;
315 kfree(buffer); 315 vfree(buffer);
316 return user_len; 316 return user_len;
317out_parse_error: 317out_parse_error:
318 rc = -EINVAL; 318 rc = -EINVAL;
319 pr_warning("%s is not a supported value for /proc/dasd/statistics\n", 319 pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
320 str); 320 str);
321out_error: 321out_error:
322 kfree(buffer); 322 vfree(buffer);
323 return rc; 323 return rc;
324#else 324#else
325 pr_warning("/proc/dasd/statistics: is not activated in this kernel\n"); 325 pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index 7ad30e72f868..5f9f929e891c 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -82,12 +82,9 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write,
82 return -EFAULT; 82 return -EFAULT;
83 } else { 83 } else {
84 len = *count; 84 len = *count;
85 rc = copy_from_user(buf, buffer, sizeof(buf)); 85 rc = kstrtoul_from_user(buffer, len, 0, &val);
86 if (rc != 0) 86 if (rc)
87 return -EFAULT; 87 return rc;
88 buf[sizeof(buf) - 1] = '\0';
89 if (strict_strtoul(buf, 0, &val) != 0)
90 return -EINVAL;
91 if (val != 0 && val != 1) 88 if (val != 0 && val != 1)
92 return -EINVAL; 89 return -EINVAL;
93 callhome_enabled = val; 90 callhome_enabled = val;
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index be55fb2b1b1c..837e010299a8 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -383,8 +383,10 @@ static int sclp_attach_storage(u8 id)
383 switch (sccb->header.response_code) { 383 switch (sccb->header.response_code) {
384 case 0x0020: 384 case 0x0020:
385 set_bit(id, sclp_storage_ids); 385 set_bit(id, sclp_storage_ids);
386 for (i = 0; i < sccb->assigned; i++) 386 for (i = 0; i < sccb->assigned; i++) {
387 sclp_unassign_storage(sccb->entries[i] >> 16); 387 if (sccb->entries[i])
388 sclp_unassign_storage(sccb->entries[i] >> 16);
389 }
388 break; 390 break;
389 default: 391 default:
390 rc = -EIO; 392 rc = -EIO;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 7bc643f3f5ab..e5c966462c5a 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -14,6 +14,8 @@
14#include "chsc.h" 14#include "chsc.h"
15 15
16#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */ 16#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
17#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
18#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
17#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */ 19#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
18 20
19/* 21/*
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index f8b03a636e49..0e615cb912d0 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -188,19 +188,13 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
188 struct qdio_irq *irq_ptr = seq->private; 188 struct qdio_irq *irq_ptr = seq->private;
189 struct qdio_q *q; 189 struct qdio_q *q;
190 unsigned long val; 190 unsigned long val;
191 char buf[8];
192 int ret, i; 191 int ret, i;
193 192
194 if (!irq_ptr) 193 if (!irq_ptr)
195 return 0; 194 return 0;
196 if (count >= sizeof(buf)) 195
197 return -EINVAL; 196 ret = kstrtoul_from_user(ubuf, count, 10, &val);
198 if (copy_from_user(&buf, ubuf, count)) 197 if (ret)
199 return -EFAULT;
200 buf[count] = 0;
201
202 ret = strict_strtoul(buf, 10, &val);
203 if (ret < 0)
204 return ret; 198 return ret;
205 199
206 switch (val) { 200 switch (val) {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e58169c32474..288c9140290e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -313,7 +313,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
313 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 313 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
314 unsigned int fc = QDIO_SIGA_WRITE; 314 unsigned int fc = QDIO_SIGA_WRITE;
315 u64 start_time = 0; 315 u64 start_time = 0;
316 int cc; 316 int retries = 0, cc;
317 317
318 if (is_qebsm(q)) { 318 if (is_qebsm(q)) {
319 schid = q->irq_ptr->sch_token; 319 schid = q->irq_ptr->sch_token;
@@ -325,6 +325,7 @@ again:
325 /* hipersocket busy condition */ 325 /* hipersocket busy condition */
326 if (unlikely(*busy_bit)) { 326 if (unlikely(*busy_bit)) {
327 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); 327 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
328 retries++;
328 329
329 if (!start_time) { 330 if (!start_time) {
330 start_time = get_clock(); 331 start_time = get_clock();
@@ -333,6 +334,11 @@ again:
333 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 334 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
334 goto again; 335 goto again;
335 } 336 }
337 if (retries) {
338 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
339 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
340 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
341 }
336 return cc; 342 return cc;
337} 343}
338 344
@@ -728,13 +734,14 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
728 734
729static int qdio_kick_outbound_q(struct qdio_q *q) 735static int qdio_kick_outbound_q(struct qdio_q *q)
730{ 736{
737 int retries = 0, cc;
731 unsigned int busy_bit; 738 unsigned int busy_bit;
732 int cc;
733 739
734 if (!need_siga_out(q)) 740 if (!need_siga_out(q))
735 return 0; 741 return 0;
736 742
737 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); 743 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
744retry:
738 qperf_inc(q, siga_write); 745 qperf_inc(q, siga_write);
739 746
740 cc = qdio_siga_output(q, &busy_bit); 747 cc = qdio_siga_output(q, &busy_bit);
@@ -743,7 +750,11 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
743 break; 750 break;
744 case 2: 751 case 2:
745 if (busy_bit) { 752 if (busy_bit) {
746 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); 753 while (++retries < QDIO_BUSY_BIT_RETRIES) {
754 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
755 goto retry;
756 }
757 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
747 cc |= QDIO_ERROR_SIGA_BUSY; 758 cc |= QDIO_ERROR_SIGA_BUSY;
748 } else 759 } else
749 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); 760 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
@@ -753,6 +764,10 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
753 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); 764 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
754 break; 765 break;
755 } 766 }
767 if (retries) {
768 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
769 DBF_ERROR("count:%u", retries);
770 }
756 return cc; 771 return cc;
757} 772}
758 773
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 3b0af1102bf4..a796de935054 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -27,6 +27,7 @@
27struct bfa_s; 27struct bfa_s;
28 28
29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); 29typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
30typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
30 31
31/* 32/*
32 * Interrupt message handlers 33 * Interrupt message handlers
@@ -121,6 +122,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
121#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \ 122#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
122 (__hcb_qe)->cbfn = (__cbfn); \ 123 (__hcb_qe)->cbfn = (__cbfn); \
123 (__hcb_qe)->cbarg = (__cbarg); \ 124 (__hcb_qe)->cbarg = (__cbarg); \
125 (__hcb_qe)->pre_rmv = BFA_FALSE; \
124 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \ 126 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
125 } while (0) 127 } while (0)
126 128
@@ -135,6 +137,11 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
135 } \ 137 } \
136 } while (0) 138 } while (0)
137 139
140#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do { \
141 (__hcb_qe)->fw_status = (__status); \
142 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
143} while (0)
144
138#define bfa_cb_queue_done(__hcb_qe) do { \ 145#define bfa_cb_queue_done(__hcb_qe) do { \
139 (__hcb_qe)->once = BFA_FALSE; \ 146 (__hcb_qe)->once = BFA_FALSE; \
140 } while (0) 147 } while (0)
@@ -177,7 +184,7 @@ struct bfa_msix_s {
177struct bfa_hwif_s { 184struct bfa_hwif_s {
178 void (*hw_reginit)(struct bfa_s *bfa); 185 void (*hw_reginit)(struct bfa_s *bfa);
179 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); 186 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
180 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); 187 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
181 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); 188 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
182 void (*hw_msix_ctrl_install)(struct bfa_s *bfa); 189 void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
183 void (*hw_msix_queue_install)(struct bfa_s *bfa); 190 void (*hw_msix_queue_install)(struct bfa_s *bfa);
@@ -268,10 +275,8 @@ struct bfa_iocfc_s {
268 ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa)) 275 ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
269#define bfa_msix_uninstall(__bfa) \ 276#define bfa_msix_uninstall(__bfa) \
270 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) 277 ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
271#define bfa_isr_rspq_ack(__bfa, __queue) do { \ 278#define bfa_isr_rspq_ack(__bfa, __queue, __ci) \
272 if ((__bfa)->iocfc.hwif.hw_rspq_ack) \ 279 ((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci))
273 (__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue); \
274} while (0)
275#define bfa_isr_reqq_ack(__bfa, __queue) do { \ 280#define bfa_isr_reqq_ack(__bfa, __queue) do { \
276 if ((__bfa)->iocfc.hwif.hw_reqq_ack) \ 281 if ((__bfa)->iocfc.hwif.hw_reqq_ack) \
277 (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \ 282 (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \
@@ -311,7 +316,7 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
311void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); 316void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
312 317
313void bfa_hwcb_reginit(struct bfa_s *bfa); 318void bfa_hwcb_reginit(struct bfa_s *bfa);
314void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); 319void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
315void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); 320void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
316void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa); 321void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
317void bfa_hwcb_msix_queue_install(struct bfa_s *bfa); 322void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
@@ -324,7 +329,8 @@ void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
324void bfa_hwct_reginit(struct bfa_s *bfa); 329void bfa_hwct_reginit(struct bfa_s *bfa);
325void bfa_hwct2_reginit(struct bfa_s *bfa); 330void bfa_hwct2_reginit(struct bfa_s *bfa);
326void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); 331void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
327void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); 332void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
333void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
328void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); 334void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
329void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa); 335void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
330void bfa_hwct_msix_queue_install(struct bfa_s *bfa); 336void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
@@ -376,6 +382,22 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
376#define bfa_get_fw_clock_res(__bfa) \ 382#define bfa_get_fw_clock_res(__bfa) \
377 ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res) 383 ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
378 384
385/*
386 * lun mask macros return NULL when min cfg is enabled and there is
387 * no memory allocated for lunmask.
388 */
389#define bfa_get_lun_mask(__bfa) \
390 ((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
391 (&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask))
392
393#define bfa_get_lun_mask_list(_bfa) \
394 ((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
395 (bfa_get_lun_mask(_bfa)->lun_list)
396
397#define bfa_get_lun_mask_status(_bfa) \
398 (((&(_bfa)->modules.dconf_mod)->min_cfg) \
399 ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status))
400
379void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids); 401void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
380void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); 402void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
381void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); 403void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
@@ -406,7 +428,22 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
406 428
407void bfa_iocfc_enable(struct bfa_s *bfa); 429void bfa_iocfc_enable(struct bfa_s *bfa);
408void bfa_iocfc_disable(struct bfa_s *bfa); 430void bfa_iocfc_disable(struct bfa_s *bfa);
431void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
409#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ 432#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
410 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) 433 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
411 434
435struct bfa_cb_pending_q_s {
436 struct bfa_cb_qe_s hcb_qe;
437 void *data; /* Driver buffer */
438};
439
440/* Common macros to operate on pending stats/attr apis */
441#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do { \
442 bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
443 (__qe)->hcb_qe.cbfn = (__cbfn); \
444 (__qe)->hcb_qe.cbarg = (__cbarg); \
445 (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
446 (__qe)->data = (__data); \
447} while (0)
448
412#endif /* __BFA_H__ */ 449#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index c38e589105a5..4bd546bcc240 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -33,6 +33,7 @@ static struct bfa_module_s *hal_mods[] = {
33 &hal_mod_uf, 33 &hal_mod_uf,
34 &hal_mod_rport, 34 &hal_mod_rport,
35 &hal_mod_fcp, 35 &hal_mod_fcp,
36 &hal_mod_dconf,
36 NULL 37 NULL
37}; 38};
38 39
@@ -237,8 +238,6 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
237 u32 pi, ci; 238 u32 pi, ci;
238 struct list_head *waitq; 239 struct list_head *waitq;
239 240
240 bfa_isr_rspq_ack(bfa, qid);
241
242 ci = bfa_rspq_ci(bfa, qid); 241 ci = bfa_rspq_ci(bfa, qid);
243 pi = bfa_rspq_pi(bfa, qid); 242 pi = bfa_rspq_pi(bfa, qid);
244 243
@@ -251,11 +250,9 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
251 } 250 }
252 251
253 /* 252 /*
254 * update CI 253 * acknowledge RME completions and update CI
255 */ 254 */
256 bfa_rspq_ci(bfa, qid) = pi; 255 bfa_isr_rspq_ack(bfa, qid, ci);
257 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
258 mmiowb();
259 256
260 /* 257 /*
261 * Resume any pending requests in the corresponding reqq. 258 * Resume any pending requests in the corresponding reqq.
@@ -325,23 +322,19 @@ bfa_intx(struct bfa_s *bfa)
325 int queue; 322 int queue;
326 323
327 intr = readl(bfa->iocfc.bfa_regs.intr_status); 324 intr = readl(bfa->iocfc.bfa_regs.intr_status);
328 if (!intr)
329 return BFA_FALSE;
330 325
331 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK); 326 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
332 if (qintr) 327 if (qintr)
333 writel(qintr, bfa->iocfc.bfa_regs.intr_status); 328 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
334 329
335 /* 330 /*
336 * RME completion queue interrupt 331 * Unconditional RME completion queue interrupt
337 */ 332 */
338 qintr = intr & __HFN_INT_RME_MASK; 333 if (bfa->queue_process) {
339 if (qintr && bfa->queue_process) {
340 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 334 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
341 bfa_isr_rspq(bfa, queue); 335 bfa_isr_rspq(bfa, queue);
342 } 336 }
343 337
344 intr &= ~qintr;
345 if (!intr) 338 if (!intr)
346 return BFA_TRUE; 339 return BFA_TRUE;
347 340
@@ -432,7 +425,8 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
432 __HFN_INT_MBOX_LPU1_CT2); 425 __HFN_INT_MBOX_LPU1_CT2);
433 intr &= __HFN_INT_ERR_MASK_CT2; 426 intr &= __HFN_INT_ERR_MASK_CT2;
434 } else { 427 } else {
435 halt_isr = intr & __HFN_INT_LL_HALT; 428 halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
429 (intr & __HFN_INT_LL_HALT) : 0;
436 pss_isr = intr & __HFN_INT_ERR_PSS; 430 pss_isr = intr & __HFN_INT_ERR_PSS;
437 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1); 431 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
438 intr &= __HFN_INT_ERR_MASK; 432 intr &= __HFN_INT_ERR_MASK;
@@ -578,7 +572,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
578 } else { 572 } else {
579 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 573 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
580 iocfc->hwif.hw_reqq_ack = NULL; 574 iocfc->hwif.hw_reqq_ack = NULL;
581 iocfc->hwif.hw_rspq_ack = NULL; 575 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
582 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 576 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
583 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; 577 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
584 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; 578 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
@@ -595,7 +589,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
595 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) { 589 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
596 iocfc->hwif.hw_reginit = bfa_hwct2_reginit; 590 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
597 iocfc->hwif.hw_isr_mode_set = NULL; 591 iocfc->hwif.hw_isr_mode_set = NULL;
598 iocfc->hwif.hw_rspq_ack = NULL; 592 iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
599 } 593 }
600 594
601 iocfc->hwif.hw_reginit(bfa); 595 iocfc->hwif.hw_reginit(bfa);
@@ -685,7 +679,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
685 679
686 bfa->queue_process = BFA_TRUE; 680 bfa->queue_process = BFA_TRUE;
687 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 681 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
688 bfa_isr_rspq_ack(bfa, i); 682 bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
689 683
690 for (i = 0; hal_mods[i]; i++) 684 for (i = 0; hal_mods[i]; i++)
691 hal_mods[i]->start(bfa); 685 hal_mods[i]->start(bfa);
@@ -709,7 +703,7 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
709 struct bfa_s *bfa = bfa_arg; 703 struct bfa_s *bfa = bfa_arg;
710 704
711 if (complete) { 705 if (complete) {
712 if (bfa->iocfc.cfgdone) 706 if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
713 bfa_cb_init(bfa->bfad, BFA_STATUS_OK); 707 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
714 else 708 else
715 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); 709 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
@@ -822,9 +816,11 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
822 */ 816 */
823 bfa_fcport_init(bfa); 817 bfa_fcport_init(bfa);
824 818
825 if (iocfc->action == BFA_IOCFC_ACT_INIT) 819 if (iocfc->action == BFA_IOCFC_ACT_INIT) {
826 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); 820 if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
827 else { 821 bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
822 bfa_iocfc_init_cb, bfa);
823 } else {
828 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE) 824 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
829 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe, 825 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
830 bfa_iocfc_enable_cb, bfa); 826 bfa_iocfc_enable_cb, bfa);
@@ -1045,6 +1041,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
1045 } 1041 }
1046 1042
1047 bfa_iocfc_send_cfg(bfa); 1043 bfa_iocfc_send_cfg(bfa);
1044 bfa_dconf_modinit(bfa);
1048} 1045}
1049 1046
1050/* 1047/*
@@ -1207,7 +1204,9 @@ bfa_iocfc_stop(struct bfa_s *bfa)
1207 bfa->iocfc.action = BFA_IOCFC_ACT_STOP; 1204 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
1208 1205
1209 bfa->queue_process = BFA_FALSE; 1206 bfa->queue_process = BFA_FALSE;
1210 bfa_ioc_disable(&bfa->ioc); 1207 bfa_dconf_modexit(bfa);
1208 if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
1209 bfa_ioc_disable(&bfa->ioc);
1211} 1210}
1212 1211
1213void 1212void
@@ -1540,10 +1539,17 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1540 struct list_head *qe; 1539 struct list_head *qe;
1541 struct list_head *qen; 1540 struct list_head *qen;
1542 struct bfa_cb_qe_s *hcb_qe; 1541 struct bfa_cb_qe_s *hcb_qe;
1542 bfa_cb_cbfn_status_t cbfn;
1543 1543
1544 list_for_each_safe(qe, qen, comp_q) { 1544 list_for_each_safe(qe, qen, comp_q) {
1545 hcb_qe = (struct bfa_cb_qe_s *) qe; 1545 hcb_qe = (struct bfa_cb_qe_s *) qe;
1546 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); 1546 if (hcb_qe->pre_rmv) {
1547 /* qe is invalid after return, dequeue before cbfn() */
1548 list_del(qe);
1549 cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
1550 cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
1551 } else
1552 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1547 } 1553 }
1548} 1554}
1549 1555
@@ -1556,10 +1562,20 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1556 while (!list_empty(comp_q)) { 1562 while (!list_empty(comp_q)) {
1557 bfa_q_deq(comp_q, &qe); 1563 bfa_q_deq(comp_q, &qe);
1558 hcb_qe = (struct bfa_cb_qe_s *) qe; 1564 hcb_qe = (struct bfa_cb_qe_s *) qe;
1565 WARN_ON(hcb_qe->pre_rmv);
1559 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE); 1566 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1560 } 1567 }
1561} 1568}
1562 1569
1570void
1571bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
1572{
1573 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
1574 if (bfa->iocfc.cfgdone == BFA_TRUE)
1575 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1576 bfa_iocfc_init_cb, bfa);
1577 }
1578}
1563 1579
1564/* 1580/*
1565 * Return the list of PCI vendor/device id lists supported by this 1581 * Return the list of PCI vendor/device id lists supported by this
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index ed8d31b0188b..7b3d235d20b4 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -144,6 +144,7 @@ enum bfa_status {
144 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */ 144 BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
145 BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */ 145 BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */
146 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */ 146 BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
147 BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */
147 BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */ 148 BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */
148 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */ 149 BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
149 BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */ 150 BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
@@ -164,6 +165,8 @@ enum bfa_status {
164 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ 165 BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
165 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot 166 BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
166 * configuration */ 167 * configuration */
168 BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
169 BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
167 BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */ 170 BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
168 BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on 171 BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
169 * this adapter */ 172 * this adapter */
@@ -172,11 +175,15 @@ enum bfa_status {
172 BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */ 175 BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
173 BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */ 176 BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
174 BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */ 177 BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */
178 BFA_STATUS_ENTRY_EXISTS = 193, /* Entry already exists */
179 BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */
180 BFA_STATUS_NO_CHANGE = 195, /* Feature already in that state */
175 BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */ 181 BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */
176 BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */ 182 BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */
177 BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */ 183 BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */
178 BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */ 184 BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */
179 BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */ 185 BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */
186 BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */
180 BFA_STATUS_MAX_VAL /* Unknown error code */ 187 BFA_STATUS_MAX_VAL /* Unknown error code */
181}; 188};
182#define bfa_status_t enum bfa_status 189#define bfa_status_t enum bfa_status
@@ -359,6 +366,139 @@ struct bfa_ioc_attr_s {
359}; 366};
360 367
361/* 368/*
369 * AEN related definitions
370 */
371enum bfa_aen_category {
372 BFA_AEN_CAT_ADAPTER = 1,
373 BFA_AEN_CAT_PORT = 2,
374 BFA_AEN_CAT_LPORT = 3,
375 BFA_AEN_CAT_RPORT = 4,
376 BFA_AEN_CAT_ITNIM = 5,
377 BFA_AEN_CAT_AUDIT = 8,
378 BFA_AEN_CAT_IOC = 9,
379};
380
381/* BFA adapter level events */
382enum bfa_adapter_aen_event {
383 BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */
384 BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */
385};
386
387struct bfa_adapter_aen_data_s {
388 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
389 u32 nports; /* Number of NPorts */
390 wwn_t pwwn; /* WWN of one of its physical port */
391};
392
393/* BFA physical port Level events */
394enum bfa_port_aen_event {
395 BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */
396 BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */
397 BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */
398 BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */
399 BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */
400 BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */
401 BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */
402 BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */
403 BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */
404 BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */
405 BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */
406 BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */
407 BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change */
408 BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */
409 BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */
410};
411
412enum bfa_port_aen_sfp_pom {
413 BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
414 BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
415 BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */
416 BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED
417};
418
419struct bfa_port_aen_data_s {
420 wwn_t pwwn; /* WWN of the physical port */
421 wwn_t fwwn; /* WWN of the fabric port */
422 u32 phy_port_num; /* For SFP related events */
423 u16 ioc_type;
424 u16 level; /* Only transitions will be informed */
425 mac_t mac; /* MAC address of the ethernet port */
426 u16 rsvd;
427};
428
429/* BFA AEN logical port events */
430enum bfa_lport_aen_event {
431 BFA_LPORT_AEN_NEW = 1, /* LPort created event */
432 BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */
433 BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */
434 BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */
435 BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */
436 BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */
437 BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */
438 BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */
439 BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */
440 BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort with duplicate WWN */
441 BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */
442 BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code */
443};
444
445struct bfa_lport_aen_data_s {
446 u16 vf_id; /* vf_id of this logical port */
447 u16 roles; /* Logical port mode,IM/TM/IP etc */
448 u32 rsvd;
449 wwn_t ppwwn; /* WWN of its physical port */
450 wwn_t lpwwn; /* WWN of this logical port */
451};
452
453/* BFA ITNIM events */
454enum bfa_itnim_aen_event {
455 BFA_ITNIM_AEN_ONLINE = 1, /* Target online */
456 BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */
457 BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */
458};
459
460struct bfa_itnim_aen_data_s {
461 u16 vf_id; /* vf_id of the IT nexus */
462 u16 rsvd[3];
463 wwn_t ppwwn; /* WWN of its physical port */
464 wwn_t lpwwn; /* WWN of logical port */
465 wwn_t rpwwn; /* WWN of remote(target) port */
466};
467
468/* BFA audit events */
469enum bfa_audit_aen_event {
470 BFA_AUDIT_AEN_AUTH_ENABLE = 1,
471 BFA_AUDIT_AEN_AUTH_DISABLE = 2,
472 BFA_AUDIT_AEN_FLASH_ERASE = 3,
473 BFA_AUDIT_AEN_FLASH_UPDATE = 4,
474};
475
476struct bfa_audit_aen_data_s {
477 wwn_t pwwn;
478 int partition_inst;
479 int partition_type;
480};
481
482/* BFA IOC level events */
483enum bfa_ioc_aen_event {
484 BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */
485 BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */
486 BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */
487 BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */
488 BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */
489 BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */
490 BFA_IOC_AEN_INVALID_VENDOR = 7,
491 BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */
492 BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */
493};
494
495struct bfa_ioc_aen_data_s {
496 wwn_t pwwn;
497 u16 ioc_type;
498 mac_t mac;
499};
500
501/*
362 * ---------------------- mfg definitions ------------ 502 * ---------------------- mfg definitions ------------
363 */ 503 */
364 504
@@ -520,6 +660,20 @@ struct bfa_boot_bootlun_s {
520/* 660/*
521 * BOOT boot configuraton 661 * BOOT boot configuraton
522 */ 662 */
663struct bfa_boot_cfg_s {
664 u8 version;
665 u8 rsvd1;
666 u16 chksum;
667 u8 enable; /* enable/disable SAN boot */
668 u8 speed; /* boot speed settings */
669 u8 topology; /* boot topology setting */
670 u8 bootopt; /* bfa_boot_bootopt_t */
671 u32 nbluns; /* number of boot luns */
672 u32 rsvd2;
673 struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
674 struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
675};
676
523struct bfa_boot_pbc_s { 677struct bfa_boot_pbc_s {
524 u8 enable; /* enable/disable SAN boot */ 678 u8 enable; /* enable/disable SAN boot */
525 u8 speed; /* boot speed settings */ 679 u8 speed; /* boot speed settings */
@@ -529,6 +683,15 @@ struct bfa_boot_pbc_s {
529 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX]; 683 struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
530}; 684};
531 685
686struct bfa_ethboot_cfg_s {
687 u8 version;
688 u8 rsvd1;
689 u16 chksum;
690 u8 enable; /* enable/disable Eth/PXE boot */
691 u8 rsvd2;
692 u16 vlan;
693};
694
532/* 695/*
533 * ASIC block configuration related structures 696 * ASIC block configuration related structures
534 */ 697 */
@@ -587,6 +750,14 @@ struct bfa_ablk_cfg_s {
587 */ 750 */
588#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */ 751#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */
589 752
753/* SFP state change notification event */
754#define BFA_SFP_SCN_REMOVED 0
755#define BFA_SFP_SCN_INSERTED 1
756#define BFA_SFP_SCN_POM 2
757#define BFA_SFP_SCN_FAILED 3
758#define BFA_SFP_SCN_UNSUPPORT 4
759#define BFA_SFP_SCN_VALID 5
760
590enum bfa_defs_sfp_media_e { 761enum bfa_defs_sfp_media_e {
591 BFA_SFP_MEDIA_UNKNOWN = 0x00, 762 BFA_SFP_MEDIA_UNKNOWN = 0x00,
592 BFA_SFP_MEDIA_CU = 0x01, 763 BFA_SFP_MEDIA_CU = 0x01,
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 0b97525803fb..863c6ba7d5eb 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -268,6 +268,7 @@ struct bfa_fw_port_snsm_stats_s {
268 u32 error_resets; /* error resets initiated by upsm */ 268 u32 error_resets; /* error resets initiated by upsm */
269 u32 sync_lost; /* Sync loss count */ 269 u32 sync_lost; /* Sync loss count */
270 u32 sig_lost; /* Signal loss count */ 270 u32 sig_lost; /* Signal loss count */
271 u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
271}; 272};
272 273
273struct bfa_fw_port_physm_stats_s { 274struct bfa_fw_port_physm_stats_s {
@@ -468,6 +469,7 @@ struct bfa_fw_stats_s {
468 * QoS states 469 * QoS states
469 */ 470 */
470enum bfa_qos_state { 471enum bfa_qos_state {
472 BFA_QOS_DISABLED = 0, /* QoS is disabled */
471 BFA_QOS_ONLINE = 1, /* QoS is online */ 473 BFA_QOS_ONLINE = 1, /* QoS is online */
472 BFA_QOS_OFFLINE = 2, /* QoS is offline */ 474 BFA_QOS_OFFLINE = 2, /* QoS is offline */
473}; 475};
@@ -670,6 +672,12 @@ struct bfa_itnim_iostats_s {
670 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */ 672 u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
671 u32 tm_cleanups; /* TM cleanup requests */ 673 u32 tm_cleanups; /* TM cleanup requests */
672 u32 tm_cleanup_comps; /* TM cleanup completions */ 674 u32 tm_cleanup_comps; /* TM cleanup completions */
675 u32 lm_lun_across_sg; /* LM lun is across sg data buf */
676 u32 lm_lun_not_sup; /* LM lun not supported */
677 u32 lm_rpl_data_changed; /* LM report-lun data changed */
678 u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
679 u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
680 u32 lm_lun_not_rdy; /* LM lun not ready */
673}; 681};
674 682
675/* Modify char* port_stt[] in bfal_port.c if a new state was added */ 683/* Modify char* port_stt[] in bfal_port.c if a new state was added */
@@ -785,8 +793,51 @@ enum bfa_port_linkstate_rsn {
785 CEE_ISCSI_PRI_PFC_OFF = 42, 793 CEE_ISCSI_PRI_PFC_OFF = 42,
786 CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43 794 CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
787}; 795};
796
797#define MAX_LUN_MASK_CFG 16
798
799/*
800 * Initially flash content may be fff. On making LUN mask enable and disable
801 * state chnage. when report lun command is being processed it goes from
802 * BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
803 * BFA_LUN_MASK_ACTIVE.
804 */
805enum bfa_ioim_lun_mask_state_s {
806 BFA_IOIM_LUN_MASK_INACTIVE = 0,
807 BFA_IOIM_LUN_MASK_ACTIVE = 1,
808 BFA_IOIM_LUN_MASK_FETCHED = 2,
809};
810
811enum bfa_lunmask_state_s {
812 BFA_LUNMASK_DISABLED = 0x00,
813 BFA_LUNMASK_ENABLED = 0x01,
814 BFA_LUNMASK_MINCFG = 0x02,
815 BFA_LUNMASK_UNINITIALIZED = 0xff,
816};
817
788#pragma pack(1) 818#pragma pack(1)
789/* 819/*
820 * LUN mask configuration
821 */
822struct bfa_lun_mask_s {
823 wwn_t lp_wwn;
824 wwn_t rp_wwn;
825 struct scsi_lun lun;
826 u8 ua;
827 u8 rsvd[3];
828 u16 rp_tag;
829 u8 lp_tag;
830 u8 state;
831};
832
833#define MAX_LUN_MASK_CFG 16
834struct bfa_lunmask_cfg_s {
835 u32 status;
836 u32 rsvd;
837 struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG];
838};
839
840/*
790 * Physical port configuration 841 * Physical port configuration
791 */ 842 */
792struct bfa_port_cfg_s { 843struct bfa_port_cfg_s {
@@ -1228,4 +1279,52 @@ struct bfa_cee_stats_s {
1228 1279
1229#pragma pack() 1280#pragma pack()
1230 1281
1282/*
1283 * AEN related definitions
1284 */
1285#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \
1286 | BFA_PCI_VENDOR_ID_BROCADE)
1287
1288/* BFA remote port events */
1289enum bfa_rport_aen_event {
1290 BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */
1291 BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */
1292 BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */
1293 BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */
1294 BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */
1295};
1296
1297struct bfa_rport_aen_data_s {
1298 u16 vf_id; /* vf_id of this logical port */
1299 u16 rsvd[3];
1300 wwn_t ppwwn; /* WWN of its physical port */
1301 wwn_t lpwwn; /* WWN of this logical port */
1302 wwn_t rpwwn; /* WWN of this remote port */
1303 union {
1304 struct bfa_rport_qos_attr_s qos;
1305 } priv;
1306};
1307
1308union bfa_aen_data_u {
1309 struct bfa_adapter_aen_data_s adapter;
1310 struct bfa_port_aen_data_s port;
1311 struct bfa_lport_aen_data_s lport;
1312 struct bfa_rport_aen_data_s rport;
1313 struct bfa_itnim_aen_data_s itnim;
1314 struct bfa_audit_aen_data_s audit;
1315 struct bfa_ioc_aen_data_s ioc;
1316};
1317
1318#define BFA_AEN_MAX_ENTRY 512
1319
1320struct bfa_aen_entry_s {
1321 struct list_head qe;
1322 enum bfa_aen_category aen_category;
1323 u32 aen_type;
1324 union bfa_aen_data_u aen_data;
1325 struct timeval aen_tv;
1326 u32 seq_num;
1327 u32 bfad_num;
1328};
1329
1231#endif /* __BFA_DEFS_SVC_H__ */ 1330#endif /* __BFA_DEFS_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 8d0b88f67a38..50b6a1c86195 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,6 +56,161 @@ struct scsi_cdb_s {
56 56
57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */ 57#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
58 58
59#define SCSI_SENSE_CUR_ERR 0x70
60#define SCSI_SENSE_DEF_ERR 0x71
61
62/*
63 * SCSI additional sense codes
64 */
65#define SCSI_ASC_LUN_NOT_READY 0x04
66#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
67#define SCSI_ASC_TOCC 0x3F
68
69/*
70 * SCSI additional sense code qualifiers
71 */
72#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
73#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
74
75/*
76 * Methods of reporting informational exceptions
77 */
78#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
79
80struct scsi_report_luns_data_s {
81 u32 lun_list_length; /* length of LUN list length */
82 u32 reserved;
83 struct scsi_lun lun[1]; /* first LUN in lun list */
84};
85
86struct scsi_inquiry_vendor_s {
87 u8 vendor_id[8];
88};
89
90struct scsi_inquiry_prodid_s {
91 u8 product_id[16];
92};
93
94struct scsi_inquiry_prodrev_s {
95 u8 product_rev[4];
96};
97
98struct scsi_inquiry_data_s {
99#ifdef __BIG_ENDIAN
100 u8 peripheral_qual:3; /* peripheral qualifier */
101 u8 device_type:5; /* peripheral device type */
102 u8 rmb:1; /* removable medium bit */
103 u8 device_type_mod:7; /* device type modifier */
104 u8 version;
105 u8 aenc:1; /* async evt notification capability */
106 u8 trm_iop:1; /* terminate I/O process */
107 u8 norm_aca:1; /* normal ACA supported */
108 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
109 u8 rsp_data_format:4;
110 u8 additional_len;
111 u8 sccs:1;
112 u8 reserved1:7;
113 u8 reserved2:1;
114 u8 enc_serv:1; /* enclosure service component */
115 u8 reserved3:1;
116 u8 multi_port:1; /* multi-port device */
117 u8 m_chngr:1; /* device in medium transport element */
118 u8 ack_req_q:1; /* SIP specific bit */
119 u8 addr32:1; /* SIP specific bit */
120 u8 addr16:1; /* SIP specific bit */
121 u8 rel_adr:1; /* relative address */
122 u8 w_bus32:1;
123 u8 w_bus16:1;
124 u8 synchronous:1;
125 u8 linked_commands:1;
126 u8 trans_dis:1;
127 u8 cmd_queue:1; /* command queueing supported */
128 u8 soft_reset:1; /* soft reset alternative (VS) */
129#else
130 u8 device_type:5; /* peripheral device type */
131 u8 peripheral_qual:3; /* peripheral qualifier */
132 u8 device_type_mod:7; /* device type modifier */
133 u8 rmb:1; /* removable medium bit */
134 u8 version;
135 u8 rsp_data_format:4;
136 u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
137 u8 norm_aca:1; /* normal ACA supported */
138 u8 terminate_iop:1;/* terminate I/O process */
139 u8 aenc:1; /* async evt notification capability */
140 u8 additional_len;
141 u8 reserved1:7;
142 u8 sccs:1;
143 u8 addr16:1; /* SIP specific bit */
144 u8 addr32:1; /* SIP specific bit */
145 u8 ack_req_q:1; /* SIP specific bit */
146 u8 m_chngr:1; /* device in medium transport element */
147 u8 multi_port:1; /* multi-port device */
148 u8 reserved3:1; /* TBD - Vendor Specific */
149 u8 enc_serv:1; /* enclosure service component */
150 u8 reserved2:1;
151 u8 soft_seset:1; /* soft reset alternative (VS) */
152 u8 cmd_queue:1; /* command queueing supported */
153 u8 trans_dis:1;
154 u8 linked_commands:1;
155 u8 synchronous:1;
156 u8 w_bus16:1;
157 u8 w_bus32:1;
158 u8 rel_adr:1; /* relative address */
159#endif
160 struct scsi_inquiry_vendor_s vendor_id;
161 struct scsi_inquiry_prodid_s product_id;
162 struct scsi_inquiry_prodrev_s product_rev;
163 u8 vendor_specific[20];
164 u8 reserved4[40];
165};
166
167/*
168 * SCSI sense data format
169 */
170struct scsi_sense_s {
171#ifdef __BIG_ENDIAN
172 u8 valid:1;
173 u8 rsp_code:7;
174#else
175 u8 rsp_code:7;
176 u8 valid:1;
177#endif
178 u8 seg_num;
179#ifdef __BIG_ENDIAN
180 u8 file_mark:1;
181 u8 eom:1; /* end of media */
182 u8 ili:1; /* incorrect length indicator */
183 u8 reserved:1;
184 u8 sense_key:4;
185#else
186 u8 sense_key:4;
187 u8 reserved:1;
188 u8 ili:1; /* incorrect length indicator */
189 u8 eom:1; /* end of media */
190 u8 file_mark:1;
191#endif
192 u8 information[4]; /* device-type or cmd specific info */
193 u8 add_sense_length; /* additional sense length */
194 u8 command_info[4];/* command specific information */
195 u8 asc; /* additional sense code */
196 u8 ascq; /* additional sense code qualifier */
197 u8 fru_code; /* field replaceable unit code */
198#ifdef __BIG_ENDIAN
199 u8 sksv:1; /* sense key specific valid */
200 u8 c_d:1; /* command/data bit */
201 u8 res1:2;
202 u8 bpv:1; /* bit pointer valid */
203 u8 bpointer:3; /* bit pointer */
204#else
205 u8 bpointer:3; /* bit pointer */
206 u8 bpv:1; /* bit pointer valid */
207 u8 res1:2;
208 u8 c_d:1; /* command/data bit */
209 u8 sksv:1; /* sense key specific valid */
210#endif
211 u8 fpointer[2]; /* field pointer */
212};
213
59/* 214/*
60 * Fibre Channel Header Structure (FCHS) definition 215 * Fibre Channel Header Structure (FCHS) definition
61 */ 216 */
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index a4e7951c6063..e07bd4745d8b 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,6 +24,9 @@ BFA_TRC_FILE(HAL, FCPIM);
24 * BFA ITNIM Related definitions 24 * BFA ITNIM Related definitions
25 */ 25 */
26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); 26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
28static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
29static void bfa_ioim_lm_init(struct bfa_s *bfa);
27 30
28#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ 31#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
29 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))) 32 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
@@ -57,6 +60,14 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
57 } \ 60 } \
58} while (0) 61} while (0)
59 62
63#define bfa_ioim_rp_wwn(__ioim) \
64 (((struct bfa_fcs_rport_s *) \
65 (__ioim)->itnim->rport->rport_drv)->pwwn)
66
67#define bfa_ioim_lp_wwn(__ioim) \
68 ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
69 (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
70
60#define bfa_itnim_sler_cb(__itnim) do { \ 71#define bfa_itnim_sler_cb(__itnim) do { \
61 if ((__itnim)->bfa->fcs) \ 72 if ((__itnim)->bfa->fcs) \
62 bfa_cb_itnim_sler((__itnim)->ditn); \ 73 bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -66,6 +77,18 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
66 } \ 77 } \
67} while (0) 78} while (0)
68 79
80enum bfa_ioim_lm_status {
81 BFA_IOIM_LM_PRESENT = 1,
82 BFA_IOIM_LM_LUN_NOT_SUP = 2,
83 BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
84 BFA_IOIM_LM_LUN_NOT_RDY = 4,
85};
86
87enum bfa_ioim_lm_ua_status {
88 BFA_IOIM_LM_UA_RESET = 0,
89 BFA_IOIM_LM_UA_SET = 1,
90};
91
69/* 92/*
70 * itnim state machine event 93 * itnim state machine event
71 */ 94 */
@@ -122,6 +145,9 @@ enum bfa_ioim_event {
122 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ 145 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
123 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ 146 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
124 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ 147 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
148 BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
149 BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
150 BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
125}; 151};
126 152
127 153
@@ -219,6 +245,9 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
219static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); 245static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
220static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); 246static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
221static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); 247static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
248static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
249static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
250static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
222 251
223/* 252/*
224 * forward declaration of BFA IO state machine 253 * forward declaration of BFA IO state machine
@@ -416,6 +445,12 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
416 bfa_fcpim_add_iostats(lstats, rstats, output_reqs); 445 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
417 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); 446 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
418 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); 447 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
448 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
449 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
450 bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
451 bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
452 bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
453 bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
419} 454}
420 455
421bfa_status_t 456bfa_status_t
@@ -437,6 +472,59 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa,
437 return BFA_STATUS_OK; 472 return BFA_STATUS_OK;
438} 473}
439 474
475void
476bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
477{
478 struct bfa_itnim_latency_s *io_lat =
479 &(ioim->itnim->ioprofile.io_latency);
480 u32 val, idx;
481
482 val = (u32)(jiffies - ioim->start_time);
483 idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
484 bfa_itnim_ioprofile_update(ioim->itnim, idx);
485
486 io_lat->count[idx]++;
487 io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
488 io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
489 io_lat->avg[idx] += val;
490}
491
492void
493bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
494{
495 ioim->start_time = jiffies;
496}
497
498bfa_status_t
499bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
500{
501 struct bfa_itnim_s *itnim;
502 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
503 struct list_head *qe, *qen;
504
505 /* accumulate IO stats from itnim */
506 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
507 itnim = (struct bfa_itnim_s *) qe;
508 bfa_itnim_clear_stats(itnim);
509 }
510 fcpim->io_profile = BFA_TRUE;
511 fcpim->io_profile_start_time = time;
512 fcpim->profile_comp = bfa_ioim_profile_comp;
513 fcpim->profile_start = bfa_ioim_profile_start;
514 return BFA_STATUS_OK;
515}
516
517bfa_status_t
518bfa_fcpim_profile_off(struct bfa_s *bfa)
519{
520 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
521 fcpim->io_profile = BFA_FALSE;
522 fcpim->io_profile_start_time = 0;
523 fcpim->profile_comp = NULL;
524 fcpim->profile_start = NULL;
525 return BFA_STATUS_OK;
526}
527
440u16 528u16
441bfa_fcpim_qdepth_get(struct bfa_s *bfa) 529bfa_fcpim_qdepth_get(struct bfa_s *bfa)
442{ 530{
@@ -1401,6 +1489,26 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1401 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); 1489 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1402} 1490}
1403 1491
1492#define bfa_io_lat_clock_res_div HZ
1493#define bfa_io_lat_clock_res_mul 1000
1494bfa_status_t
1495bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1496 struct bfa_itnim_ioprofile_s *ioprofile)
1497{
1498 struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
1499 if (!fcpim->io_profile)
1500 return BFA_STATUS_IOPROFILE_OFF;
1501
1502 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1503 itnim->ioprofile.io_profile_start_time =
1504 bfa_io_profile_start_time(itnim->bfa);
1505 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1506 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1507 *ioprofile = itnim->ioprofile;
1508
1509 return BFA_STATUS_OK;
1510}
1511
1404void 1512void
1405bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) 1513bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1406{ 1514{
@@ -1469,7 +1577,28 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1469 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); 1577 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1470 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); 1578 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1471 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, 1579 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1472 __bfa_cb_ioim_abort, ioim); 1580 __bfa_cb_ioim_abort, ioim);
1581 break;
1582
1583 case BFA_IOIM_SM_LM_LUN_NOT_SUP:
1584 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1585 bfa_ioim_move_to_comp_q(ioim);
1586 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1587 __bfa_cb_ioim_lm_lun_not_sup, ioim);
1588 break;
1589
1590 case BFA_IOIM_SM_LM_RPL_DC:
1591 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1592 bfa_ioim_move_to_comp_q(ioim);
1593 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1594 __bfa_cb_ioim_lm_rpl_dc, ioim);
1595 break;
1596
1597 case BFA_IOIM_SM_LM_LUN_NOT_RDY:
1598 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1599 bfa_ioim_move_to_comp_q(ioim);
1600 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1601 __bfa_cb_ioim_lm_lun_not_rdy, ioim);
1473 break; 1602 break;
1474 1603
1475 default: 1604 default:
@@ -2009,6 +2138,264 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2009 } 2138 }
2010} 2139}
2011 2140
2141/*
2142 * This is called from bfa_fcpim_start after the bfa_init() with flash read
2143 * is complete by driver. now invalidate the stale content of lun mask
2144 * like unit attention, rp tag and lp tag.
2145 */
2146static void
2147bfa_ioim_lm_init(struct bfa_s *bfa)
2148{
2149 struct bfa_lun_mask_s *lunm_list;
2150 int i;
2151
2152 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2153 return;
2154
2155 lunm_list = bfa_get_lun_mask_list(bfa);
2156 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2157 lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
2158 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2159 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2160 }
2161}
2162
2163/*
2164 * Validate LUN for LUN masking
2165 */
2166static enum bfa_ioim_lm_status
2167bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
2168 struct bfa_rport_s *rp, struct scsi_lun lun)
2169{
2170 u8 i;
2171 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2172 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2173 struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
2174
2175 if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
2176 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2177 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2178 return BFA_IOIM_LM_PRESENT;
2179 }
2180
2181 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2182
2183 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2184 continue;
2185
2186 if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
2187 scsilun_to_int((struct scsi_lun *)&lun))
2188 && (rp->rport_tag == lun_list[i].rp_tag)
2189 && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
2190 lun_list[i].lp_tag)) {
2191 bfa_trc(ioim->bfa, lun_list[i].rp_tag);
2192 bfa_trc(ioim->bfa, lun_list[i].lp_tag);
2193 bfa_trc(ioim->bfa, scsilun_to_int(
2194 (struct scsi_lun *)&lun_list[i].lun));
2195
2196 if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
2197 ((cdb->scsi_cdb[0] != INQUIRY) ||
2198 (cdb->scsi_cdb[0] != REPORT_LUNS))) {
2199 lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
2200 return BFA_IOIM_LM_RPL_DATA_CHANGED;
2201 }
2202
2203 if (cdb->scsi_cdb[0] == REPORT_LUNS)
2204 ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
2205
2206 return BFA_IOIM_LM_PRESENT;
2207 }
2208 }
2209
2210 if ((cdb->scsi_cdb[0] == INQUIRY) &&
2211 (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
2212 ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
2213 return BFA_IOIM_LM_PRESENT;
2214 }
2215
2216 if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
2217 return BFA_IOIM_LM_LUN_NOT_RDY;
2218
2219 return BFA_IOIM_LM_LUN_NOT_SUP;
2220}
2221
2222static bfa_boolean_t
2223bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
2224{
2225 return BFA_TRUE;
2226}
2227
2228static void
2229bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
2230 int buf_lun_cnt)
2231{
2232 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2233 struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
2234 struct scsi_lun lun;
2235 int i, j;
2236
2237 bfa_trc(ioim->bfa, buf_lun_cnt);
2238 for (j = 0; j < buf_lun_cnt; j++) {
2239 lun = *((struct scsi_lun *)(lun_data + j));
2240 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2241 if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2242 continue;
2243 if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
2244 (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
2245 (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
2246 == scsilun_to_int((struct scsi_lun *)&lun))) {
2247 lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
2248 break;
2249 }
2250 } /* next lun in mask DB */
2251 } /* next lun in buf */
2252}
2253
2254static int
2255bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
2256 struct scsi_report_luns_data_s *rl)
2257{
2258 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2259 struct scatterlist *sg = scsi_sglist(cmnd);
2260 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
2261 struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
2262 int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
2263 int lun_across_sg_bytes, bytes_from_next_buf;
2264 u64 last_lun, temp_last_lun;
2265
2266 /* fetch luns from the first sg element */
2267 bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
2268 (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
2269
2270 /* fetch luns from multiple sg elements */
2271 scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
2272 if (sgeid == 0) {
2273 prev_sg_len = sg_dma_len(sg);
2274 prev_rl_data = (struct scsi_lun *)
2275 phys_to_virt(sg_dma_address(sg));
2276 continue;
2277 }
2278
2279 /* if the buf is having more data */
2280 lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
2281 if (lun_across_sg_bytes) {
2282 bfa_trc(ioim->bfa, lun_across_sg_bytes);
2283 bfa_stats(ioim->itnim, lm_lun_across_sg);
2284 bytes_from_next_buf = sizeof(struct scsi_lun) -
2285 lun_across_sg_bytes;
2286
2287 /* from next buf take higher bytes */
2288 temp_last_lun = *((u64 *)
2289 phys_to_virt(sg_dma_address(sg)));
2290 last_lun |= temp_last_lun >>
2291 (lun_across_sg_bytes * BITS_PER_BYTE);
2292
2293 /* from prev buf take higher bytes */
2294 temp_last_lun = *((u64 *)(prev_rl_data +
2295 (prev_sg_len - lun_across_sg_bytes)));
2296 temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
2297 last_lun = last_lun | (temp_last_lun <<
2298 (bytes_from_next_buf * BITS_PER_BYTE));
2299
2300 bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
2301 } else
2302 bytes_from_next_buf = 0;
2303
2304 *pgdlen += sg_dma_len(sg);
2305 prev_sg_len = sg_dma_len(sg);
2306 prev_rl_data = (struct scsi_lun *)
2307 phys_to_virt(sg_dma_address(sg));
2308 bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
2309 bytes_from_next_buf,
2310 sg_dma_len(sg) / sizeof(struct scsi_lun));
2311 }
2312
2313 /* update the report luns data - based on fetched luns */
2314 sg = scsi_sglist(cmnd);
2315 base_rl_data = (struct scsi_lun *)rl->lun;
2316 base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
2317 for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
2318 if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
2319 base_rl_data[j] = lun_list[i].lun;
2320 lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
2321 j++;
2322 lun_fetched_cnt++;
2323 }
2324
2325 if (j > base_count) {
2326 j = 0;
2327 sg = sg_next(sg);
2328 base_rl_data = (struct scsi_lun *)
2329 phys_to_virt(sg_dma_address(sg));
2330 base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
2331 }
2332 }
2333
2334 bfa_trc(ioim->bfa, lun_fetched_cnt);
2335 return lun_fetched_cnt;
2336}
2337
2338static bfa_boolean_t
2339bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
2340{
2341 struct scsi_inquiry_data_s *inq;
2342 struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
2343
2344 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2345 inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
2346
2347 bfa_trc(ioim->bfa, inq->device_type);
2348 inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
2349 return 0;
2350}
2351
2352static bfa_boolean_t
2353bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
2354{
2355 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
2356 struct scatterlist *sg = scsi_sglist(cmnd);
2357 struct bfi_ioim_rsp_s *m;
2358 struct scsi_report_luns_data_s *rl = NULL;
2359 int lun_count = 0, lun_fetched_cnt = 0;
2360 u32 residue, pgdlen = 0;
2361
2362 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2363 if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
2364 return BFA_TRUE;
2365
2366 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2367 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
2368 return BFA_TRUE;
2369
2370 pgdlen = sg_dma_len(sg);
2371 bfa_trc(ioim->bfa, pgdlen);
2372 rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
2373 lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
2374 lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
2375
2376 if (lun_count == lun_fetched_cnt)
2377 return BFA_TRUE;
2378
2379 bfa_trc(ioim->bfa, lun_count);
2380 bfa_trc(ioim->bfa, lun_fetched_cnt);
2381 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2382
2383 if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
2384 rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
2385 sizeof(struct scsi_lun);
2386 else
2387 bfa_stats(ioim->itnim, lm_small_buf_addresidue);
2388
2389 bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
2390 bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
2391
2392 residue = be32_to_cpu(m->residue);
2393 residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
2394 bfa_stats(ioim->itnim, lm_wire_residue_changed);
2395 m->residue = be32_to_cpu(residue);
2396 bfa_trc(ioim->bfa, ioim->nsges);
2397 return BFA_FALSE;
2398}
2012 2399
2013static void 2400static void
2014__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) 2401__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
@@ -2068,6 +2455,299 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2068} 2455}
2069 2456
2070static void 2457static void
2458__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
2459{
2460 struct bfa_ioim_s *ioim = cbarg;
2461 int sns_len = 0xD;
2462 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2463 struct scsi_sense_s *snsinfo;
2464
2465 if (!complete) {
2466 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2467 return;
2468 }
2469
2470 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2471 ioim->fcpim->fcp, ioim->iotag);
2472 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2473 snsinfo->add_sense_length = 0xa;
2474 snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
2475 snsinfo->sense_key = ILLEGAL_REQUEST;
2476 bfa_trc(ioim->bfa, residue);
2477 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2478 SCSI_STATUS_CHECK_CONDITION, sns_len,
2479 (u8 *)snsinfo, residue);
2480}
2481
2482static void
2483__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
2484{
2485 struct bfa_ioim_s *ioim = cbarg;
2486 int sns_len = 0xD;
2487 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2488 struct scsi_sense_s *snsinfo;
2489
2490 if (!complete) {
2491 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2492 return;
2493 }
2494
2495 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
2496 ioim->iotag);
2497 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2498 snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
2499 snsinfo->asc = SCSI_ASC_TOCC;
2500 snsinfo->add_sense_length = 0x6;
2501 snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
2502 bfa_trc(ioim->bfa, residue);
2503 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2504 SCSI_STATUS_CHECK_CONDITION, sns_len,
2505 (u8 *)snsinfo, residue);
2506}
2507
2508static void
2509__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
2510{
2511 struct bfa_ioim_s *ioim = cbarg;
2512 int sns_len = 0xD;
2513 u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
2514 struct scsi_sense_s *snsinfo;
2515
2516 if (!complete) {
2517 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2518 return;
2519 }
2520
2521 snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
2522 ioim->fcpim->fcp, ioim->iotag);
2523 snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
2524 snsinfo->add_sense_length = 0xa;
2525 snsinfo->sense_key = NOT_READY;
2526 snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
2527 snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
2528 bfa_trc(ioim->bfa, residue);
2529 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
2530 SCSI_STATUS_CHECK_CONDITION, sns_len,
2531 (u8 *)snsinfo, residue);
2532}
2533
2534void
2535bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
2536 u16 rp_tag, u8 lp_tag)
2537{
2538 struct bfa_lun_mask_s *lun_list;
2539 u8 i;
2540
2541 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2542 return;
2543
2544 lun_list = bfa_get_lun_mask_list(bfa);
2545 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2546 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2547 if ((lun_list[i].lp_wwn == lp_wwn) &&
2548 (lun_list[i].rp_wwn == rp_wwn)) {
2549 lun_list[i].rp_tag = rp_tag;
2550 lun_list[i].lp_tag = lp_tag;
2551 }
2552 }
2553 }
2554}
2555
2556/*
2557 * set UA for all active luns in LM DB
2558 */
2559static void
2560bfa_ioim_lm_set_ua(struct bfa_s *bfa)
2561{
2562 struct bfa_lun_mask_s *lunm_list;
2563 int i;
2564
2565 lunm_list = bfa_get_lun_mask_list(bfa);
2566 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2567 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2568 continue;
2569 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2570 }
2571}
2572
2573bfa_status_t
2574bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
2575{
2576 struct bfa_lunmask_cfg_s *lun_mask;
2577
2578 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2579 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2580 return BFA_STATUS_FAILED;
2581
2582 if (bfa_get_lun_mask_status(bfa) == update)
2583 return BFA_STATUS_NO_CHANGE;
2584
2585 lun_mask = bfa_get_lun_mask(bfa);
2586 lun_mask->status = update;
2587
2588 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
2589 bfa_ioim_lm_set_ua(bfa);
2590
2591 return bfa_dconf_update(bfa);
2592}
2593
2594bfa_status_t
2595bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
2596{
2597 int i;
2598 struct bfa_lun_mask_s *lunm_list;
2599
2600 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2601 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2602 return BFA_STATUS_FAILED;
2603
2604 lunm_list = bfa_get_lun_mask_list(bfa);
2605 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2606 if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
2607 if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
2608 bfa_rport_unset_lunmask(bfa,
2609 BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
2610 }
2611 }
2612
2613 memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
2614 return bfa_dconf_update(bfa);
2615}
2616
2617bfa_status_t
2618bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
2619{
2620 struct bfa_lunmask_cfg_s *lun_mask;
2621
2622 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2623 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2624 return BFA_STATUS_FAILED;
2625
2626 lun_mask = bfa_get_lun_mask(bfa);
2627 memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
2628 return BFA_STATUS_OK;
2629}
2630
2631bfa_status_t
2632bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2633 wwn_t rpwwn, struct scsi_lun lun)
2634{
2635 struct bfa_lun_mask_s *lunm_list;
2636 struct bfa_rport_s *rp = NULL;
2637 int i, free_index = MAX_LUN_MASK_CFG + 1;
2638 struct bfa_fcs_lport_s *port = NULL;
2639 struct bfa_fcs_rport_s *rp_fcs;
2640
2641 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2642 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2643 return BFA_STATUS_FAILED;
2644
2645 port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
2646 vf_id, *pwwn);
2647 if (port) {
2648 *pwwn = port->port_cfg.pwwn;
2649 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2650 rp = rp_fcs->bfa_rport;
2651 }
2652
2653 lunm_list = bfa_get_lun_mask_list(bfa);
2654 /* if entry exists */
2655 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2656 if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
2657 free_index = i;
2658 if ((lunm_list[i].lp_wwn == *pwwn) &&
2659 (lunm_list[i].rp_wwn == rpwwn) &&
2660 (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2661 scsilun_to_int((struct scsi_lun *)&lun)))
2662 return BFA_STATUS_ENTRY_EXISTS;
2663 }
2664
2665 if (free_index > MAX_LUN_MASK_CFG)
2666 return BFA_STATUS_MAX_ENTRY_REACHED;
2667
2668 if (rp) {
2669 lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
2670 rp->rport_info.local_pid);
2671 lunm_list[free_index].rp_tag = rp->rport_tag;
2672 } else {
2673 lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
2674 lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
2675 }
2676
2677 lunm_list[free_index].lp_wwn = *pwwn;
2678 lunm_list[free_index].rp_wwn = rpwwn;
2679 lunm_list[free_index].lun = lun;
2680 lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
2681
2682 /* set for all luns in this rp */
2683 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2684 if ((lunm_list[i].lp_wwn == *pwwn) &&
2685 (lunm_list[i].rp_wwn == rpwwn))
2686 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2687 }
2688
2689 return bfa_dconf_update(bfa);
2690}
2691
2692bfa_status_t
2693bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
2694 wwn_t rpwwn, struct scsi_lun lun)
2695{
2696 struct bfa_lun_mask_s *lunm_list;
2697 struct bfa_rport_s *rp = NULL;
2698 struct bfa_fcs_lport_s *port = NULL;
2699 struct bfa_fcs_rport_s *rp_fcs;
2700 int i;
2701
2702 /* in min cfg lunm_list could be NULL but no commands should run. */
2703 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
2704 return BFA_STATUS_FAILED;
2705
2706 bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
2707 bfa_trc(bfa, *pwwn);
2708 bfa_trc(bfa, rpwwn);
2709 bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
2710
2711 if (*pwwn == 0) {
2712 port = bfa_fcs_lookup_port(
2713 &((struct bfad_s *)bfa->bfad)->bfa_fcs,
2714 vf_id, *pwwn);
2715 if (port) {
2716 *pwwn = port->port_cfg.pwwn;
2717 rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
2718 rp = rp_fcs->bfa_rport;
2719 }
2720 }
2721
2722 lunm_list = bfa_get_lun_mask_list(bfa);
2723 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2724 if ((lunm_list[i].lp_wwn == *pwwn) &&
2725 (lunm_list[i].rp_wwn == rpwwn) &&
2726 (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
2727 scsilun_to_int((struct scsi_lun *)&lun))) {
2728 lunm_list[i].lp_wwn = 0;
2729 lunm_list[i].rp_wwn = 0;
2730 int_to_scsilun(0, &lunm_list[i].lun);
2731 lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
2732 if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
2733 lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
2734 lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
2735 }
2736 return bfa_dconf_update(bfa);
2737 }
2738 }
2739
2740 /* set for all luns in this rp */
2741 for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
2742 if ((lunm_list[i].lp_wwn == *pwwn) &&
2743 (lunm_list[i].rp_wwn == rpwwn))
2744 lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
2745 }
2746
2747 return BFA_STATUS_ENTRY_NOT_EXISTS;
2748}
2749
2750static void
2071__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) 2751__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2072{ 2752{
2073 struct bfa_ioim_s *ioim = cbarg; 2753 struct bfa_ioim_s *ioim = cbarg;
@@ -2077,6 +2757,7 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2077 return; 2757 return;
2078 } 2758 }
2079 2759
2760 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2080 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, 2761 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2081 0, 0, NULL, 0); 2762 0, 0, NULL, 0);
2082} 2763}
@@ -2092,6 +2773,7 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2092 return; 2773 return;
2093 } 2774 }
2094 2775
2776 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2095 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, 2777 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2096 0, 0, NULL, 0); 2778 0, 0, NULL, 0);
2097} 2779}
@@ -2106,6 +2788,7 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2106 return; 2788 return;
2107 } 2789 }
2108 2790
2791 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2109 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); 2792 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2110} 2793}
2111 2794
@@ -2449,6 +3132,7 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
2449 ioim->bfa = fcpim->bfa; 3132 ioim->bfa = fcpim->bfa;
2450 ioim->fcpim = fcpim; 3133 ioim->fcpim = fcpim;
2451 ioim->iosp = iosp; 3134 ioim->iosp = iosp;
3135 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2452 INIT_LIST_HEAD(&ioim->sgpg_q); 3136 INIT_LIST_HEAD(&ioim->sgpg_q);
2453 bfa_reqq_winit(&ioim->iosp->reqq_wait, 3137 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2454 bfa_ioim_qresume, ioim); 3138 bfa_ioim_qresume, ioim);
@@ -2486,6 +3170,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2486 evt = BFA_IOIM_SM_DONE; 3170 evt = BFA_IOIM_SM_DONE;
2487 else 3171 else
2488 evt = BFA_IOIM_SM_COMP; 3172 evt = BFA_IOIM_SM_COMP;
3173 ioim->proc_rsp_data(ioim);
2489 break; 3174 break;
2490 3175
2491 case BFI_IOIM_STS_TIMEDOUT: 3176 case BFI_IOIM_STS_TIMEDOUT:
@@ -2521,6 +3206,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2521 if (rsp->abort_tag != ioim->abort_tag) { 3206 if (rsp->abort_tag != ioim->abort_tag) {
2522 bfa_trc(ioim->bfa, rsp->abort_tag); 3207 bfa_trc(ioim->bfa, rsp->abort_tag);
2523 bfa_trc(ioim->bfa, ioim->abort_tag); 3208 bfa_trc(ioim->bfa, ioim->abort_tag);
3209 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2524 return; 3210 return;
2525 } 3211 }
2526 3212
@@ -2539,6 +3225,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2539 WARN_ON(1); 3225 WARN_ON(1);
2540 } 3226 }
2541 3227
3228 ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
2542 bfa_sm_send_event(ioim, evt); 3229 bfa_sm_send_event(ioim, evt);
2543} 3230}
2544 3231
@@ -2556,7 +3243,16 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2556 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag); 3243 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
2557 3244
2558 bfa_ioim_cb_profile_comp(fcpim, ioim); 3245 bfa_ioim_cb_profile_comp(fcpim, ioim);
2559 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); 3246
3247 if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
3248 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3249 return;
3250 }
3251
3252 if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
3253 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
3254 else
3255 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
2560} 3256}
2561 3257
2562/* 3258/*
@@ -2668,6 +3364,35 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
2668void 3364void
2669bfa_ioim_start(struct bfa_ioim_s *ioim) 3365bfa_ioim_start(struct bfa_ioim_s *ioim)
2670{ 3366{
3367 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
3368 struct bfa_lps_s *lps;
3369 enum bfa_ioim_lm_status status;
3370 struct scsi_lun scsilun;
3371
3372 if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
3373 lps = BFA_IOIM_TO_LPS(ioim);
3374 int_to_scsilun(cmnd->device->lun, &scsilun);
3375 status = bfa_ioim_lm_check(ioim, lps,
3376 ioim->itnim->rport, scsilun);
3377 if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
3378 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
3379 bfa_stats(ioim->itnim, lm_lun_not_rdy);
3380 return;
3381 }
3382
3383 if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
3384 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
3385 bfa_stats(ioim->itnim, lm_lun_not_sup);
3386 return;
3387 }
3388
3389 if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
3390 bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
3391 bfa_stats(ioim->itnim, lm_rpl_data_changed);
3392 return;
3393 }
3394 }
3395
2671 bfa_ioim_cb_profile_start(ioim->fcpim, ioim); 3396 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2672 3397
2673 /* 3398 /*
@@ -3411,6 +4136,13 @@ bfa_fcp_detach(struct bfa_s *bfa)
3411static void 4136static void
3412bfa_fcp_start(struct bfa_s *bfa) 4137bfa_fcp_start(struct bfa_s *bfa)
3413{ 4138{
4139 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
4140
4141 /*
4142 * bfa_init() with flash read is complete. now invalidate the stale
4143 * content of lun mask like unit attention, rp tag and lp tag.
4144 */
4145 bfa_ioim_lm_init(fcp->bfa);
3414} 4146}
3415 4147
3416static void 4148static void
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 57b695ad4ee5..1080bcb81cb7 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -79,14 +79,22 @@ bfa_ioim_get_index(u32 n) {
79 if (n >= (1UL)<<22) 79 if (n >= (1UL)<<22)
80 return BFA_IOBUCKET_MAX - 1; 80 return BFA_IOBUCKET_MAX - 1;
81 n >>= 8; 81 n >>= 8;
82 if (n >= (1UL)<<16) 82 if (n >= (1UL)<<16) {
83 n >>= 16; pos += 16; 83 n >>= 16;
84 if (n >= 1 << 8) 84 pos += 16;
85 n >>= 8; pos += 8; 85 }
86 if (n >= 1 << 4) 86 if (n >= 1 << 8) {
87 n >>= 4; pos += 4; 87 n >>= 8;
88 if (n >= 1 << 2) 88 pos += 8;
89 n >>= 2; pos += 2; 89 }
90 if (n >= 1 << 4) {
91 n >>= 4;
92 pos += 4;
93 }
94 if (n >= 1 << 2) {
95 n >>= 2;
96 pos += 2;
97 }
90 if (n >= 1 << 1) 98 if (n >= 1 << 1)
91 pos += 1; 99 pos += 1;
92 100
@@ -102,6 +110,7 @@ struct bfad_ioim_s;
102struct bfad_tskim_s; 110struct bfad_tskim_s;
103 111
104typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim); 112typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
113typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
105 114
106struct bfa_fcpim_s { 115struct bfa_fcpim_s {
107 struct bfa_s *bfa; 116 struct bfa_s *bfa;
@@ -115,7 +124,7 @@ struct bfa_fcpim_s {
115 u32 path_tov; 124 u32 path_tov;
116 u16 q_depth; 125 u16 q_depth;
117 u8 reqq; /* Request queue to be used */ 126 u8 reqq; /* Request queue to be used */
118 u8 rsvd; 127 u8 lun_masking_pending;
119 struct list_head itnim_q; /* queue of active itnim */ 128 struct list_head itnim_q; /* queue of active itnim */
120 struct list_head ioim_resfree_q; /* IOs waiting for f/w */ 129 struct list_head ioim_resfree_q; /* IOs waiting for f/w */
121 struct list_head ioim_comp_q; /* IO global comp Q */ 130 struct list_head ioim_comp_q; /* IO global comp Q */
@@ -170,7 +179,9 @@ struct bfa_ioim_s {
170 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */ 179 bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
171 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */ 180 struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
172 u8 reqq; /* Request queue for I/O */ 181 u8 reqq; /* Request queue for I/O */
182 u8 mode; /* IO is passthrough or not */
173 u64 start_time; /* IO's Profile start val */ 183 u64 start_time; /* IO's Profile start val */
184 bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
174}; 185};
175 186
176struct bfa_ioim_sp_s { 187struct bfa_ioim_sp_s {
@@ -250,6 +261,10 @@ struct bfa_itnim_s {
250 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \ 261 (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
251} while (0) 262} while (0)
252 263
264#define BFA_IOIM_TO_LPS(__ioim) \
265 BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
266 __ioim->itnim->rport->rport_info.lp_tag)
267
253static inline bfa_boolean_t 268static inline bfa_boolean_t
254bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim) 269bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
255{ 270{
@@ -297,6 +312,8 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
297 struct bfa_itnim_iostats_s *stats, u8 lp_tag); 312 struct bfa_itnim_iostats_s *stats, u8 lp_tag);
298void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats, 313void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
299 struct bfa_itnim_iostats_s *itnim_stats); 314 struct bfa_itnim_iostats_s *itnim_stats);
315bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
316bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
300 317
301#define bfa_fcpim_ioredirect_enabled(__bfa) \ 318#define bfa_fcpim_ioredirect_enabled(__bfa) \
302 (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect) 319 (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
@@ -397,4 +414,14 @@ void bfa_tskim_start(struct bfa_tskim_s *tskim,
397void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, 414void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
398 enum bfi_tskim_status tsk_status); 415 enum bfi_tskim_status tsk_status);
399 416
417void bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn,
418 wwn_t rp_wwn, u16 rp_tag, u8 lp_tag);
419bfa_status_t bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off);
420bfa_status_t bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf);
421bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
422 wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
423bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
424 wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
425bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
426
400#endif /* __BFA_FCPIM_H__ */ 427#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index a9b22bc48bc3..eaac57e1ddec 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include "bfad_drv.h" 22#include "bfad_drv.h"
23#include "bfad_im.h"
23#include "bfa_fcs.h" 24#include "bfa_fcs.h"
24#include "bfa_fcbuild.h" 25#include "bfa_fcbuild.h"
25 26
@@ -1327,6 +1328,29 @@ bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
1327 bfa_trc(fabric->fcs, status); 1328 bfa_trc(fabric->fcs, status);
1328} 1329}
1329 1330
1331
1332/*
1333 * Send AEN notification
1334 */
1335static void
1336bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port,
1337 enum bfa_port_aen_event event)
1338{
1339 struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
1340 struct bfa_aen_entry_s *aen_entry;
1341
1342 bfad_get_aen_entry(bfad, aen_entry);
1343 if (!aen_entry)
1344 return;
1345
1346 aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port);
1347 aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port);
1348
1349 /* Send the AEN notification */
1350 bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
1351 BFA_AEN_CAT_PORT, event);
1352}
1353
1330/* 1354/*
1331 * 1355 *
1332 * @param[in] fabric - fabric 1356 * @param[in] fabric - fabric
@@ -1358,6 +1382,8 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1358 BFA_LOG(KERN_WARNING, bfad, bfa_log_level, 1382 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1359 "Base port WWN = %s Fabric WWN = %s\n", 1383 "Base port WWN = %s Fabric WWN = %s\n",
1360 pwwn_ptr, fwwn_ptr); 1384 pwwn_ptr, fwwn_ptr);
1385 bfa_fcs_fabric_aen_post(&fabric->bport,
1386 BFA_PORT_AEN_FABRIC_NAME_CHANGE);
1361 } 1387 }
1362} 1388}
1363 1389
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index a5f1faf335a7..e75e07d25915 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -675,6 +675,7 @@ struct bfa_fcs_s {
675 struct bfa_fcs_fabric_s fabric; /* base fabric state machine */ 675 struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
676 struct bfa_fcs_stats_s stats; /* FCS statistics */ 676 struct bfa_fcs_stats_s stats; /* FCS statistics */
677 struct bfa_wc_s wc; /* waiting counter */ 677 struct bfa_wc_s wc; /* waiting counter */
678 int fcs_aen_seq;
678}; 679};
679 680
680/* 681/*
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 29b4108be269..9272840a2409 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -37,6 +37,8 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
37 struct bfa_fcxp_s *fcxp, void *cbarg, 37 struct bfa_fcxp_s *fcxp, void *cbarg,
38 bfa_status_t req_status, u32 rsp_len, 38 bfa_status_t req_status, u32 rsp_len,
39 u32 resid_len, struct fchs_s *rsp_fchs); 39 u32 resid_len, struct fchs_s *rsp_fchs);
40static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
41 enum bfa_itnim_aen_event event);
40 42
41/* 43/*
42 * fcs_itnim_sm FCS itnim state machine events 44 * fcs_itnim_sm FCS itnim state machine events
@@ -269,6 +271,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
269 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 271 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
270 "Target (WWN = %s) is online for initiator (WWN = %s)\n", 272 "Target (WWN = %s) is online for initiator (WWN = %s)\n",
271 rpwwn_buf, lpwwn_buf); 273 rpwwn_buf, lpwwn_buf);
274 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
272 break; 275 break;
273 276
274 case BFA_FCS_ITNIM_SM_OFFLINE: 277 case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -305,14 +308,17 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
305 bfa_itnim_offline(itnim->bfa_itnim); 308 bfa_itnim_offline(itnim->bfa_itnim);
306 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); 309 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
307 wwn2str(rpwwn_buf, itnim->rport->pwwn); 310 wwn2str(rpwwn_buf, itnim->rport->pwwn);
308 if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) 311 if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) {
309 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 312 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
310 "Target (WWN = %s) connectivity lost for " 313 "Target (WWN = %s) connectivity lost for "
311 "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); 314 "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
312 else 315 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
316 } else {
313 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 317 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
314 "Target (WWN = %s) offlined by initiator (WWN = %s)\n", 318 "Target (WWN = %s) offlined by initiator (WWN = %s)\n",
315 rpwwn_buf, lpwwn_buf); 319 rpwwn_buf, lpwwn_buf);
320 bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
321 }
316 break; 322 break;
317 323
318 case BFA_FCS_ITNIM_SM_DELETE: 324 case BFA_FCS_ITNIM_SM_DELETE:
@@ -382,6 +388,33 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
382} 388}
383 389
384static void 390static void
391bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
392 enum bfa_itnim_aen_event event)
393{
394 struct bfa_fcs_rport_s *rport = itnim->rport;
395 struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
396 struct bfa_aen_entry_s *aen_entry;
397
398 /* Don't post events for well known addresses */
399 if (BFA_FCS_PID_IS_WKA(rport->pid))
400 return;
401
402 bfad_get_aen_entry(bfad, aen_entry);
403 if (!aen_entry)
404 return;
405
406 aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id;
407 aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn(
408 bfa_fcs_get_base_port(itnim->fcs));
409 aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
410 aen_entry->aen_data.itnim.rpwwn = rport->pwwn;
411
412 /* Send the AEN notification */
413 bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
414 BFA_AEN_CAT_ITNIM, event);
415}
416
417static void
385bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) 418bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
386{ 419{
387 struct bfa_fcs_itnim_s *itnim = itnim_cbarg; 420 struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index f8251a91ba91..d4f951fe753e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfad_im.h"
19#include "bfa_fcs.h" 20#include "bfa_fcs.h"
20#include "bfa_fcbuild.h" 21#include "bfa_fcbuild.h"
21#include "bfa_fc.h" 22#include "bfa_fc.h"
@@ -300,6 +301,31 @@ bfa_fcs_lport_sm_deleting(
300 */ 301 */
301 302
302/* 303/*
304 * Send AEN notification
305 */
306static void
307bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port,
308 enum bfa_lport_aen_event event)
309{
310 struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
311 struct bfa_aen_entry_s *aen_entry;
312
313 bfad_get_aen_entry(bfad, aen_entry);
314 if (!aen_entry)
315 return;
316
317 aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
318 aen_entry->aen_data.lport.roles = port->port_cfg.roles;
319 aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
320 bfa_fcs_get_base_port(port->fcs));
321 aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
322
323 /* Send the AEN notification */
324 bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
325 BFA_AEN_CAT_LPORT, event);
326}
327
328/*
303 * Send a LS reject 329 * Send a LS reject
304 */ 330 */
305static void 331static void
@@ -593,6 +619,7 @@ bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
593 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 619 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
594 "Logical port online: WWN = %s Role = %s\n", 620 "Logical port online: WWN = %s Role = %s\n",
595 lpwwn_buf, "Initiator"); 621 lpwwn_buf, "Initiator");
622 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
596 623
597 bfad->bfad_flags |= BFAD_PORT_ONLINE; 624 bfad->bfad_flags |= BFAD_PORT_ONLINE;
598} 625}
@@ -611,14 +638,17 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
611 638
612 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 639 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
613 if (bfa_sm_cmp_state(port->fabric, 640 if (bfa_sm_cmp_state(port->fabric,
614 bfa_fcs_fabric_sm_online) == BFA_TRUE) 641 bfa_fcs_fabric_sm_online) == BFA_TRUE) {
615 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 642 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
616 "Logical port lost fabric connectivity: WWN = %s Role = %s\n", 643 "Logical port lost fabric connectivity: WWN = %s Role = %s\n",
617 lpwwn_buf, "Initiator"); 644 lpwwn_buf, "Initiator");
618 else 645 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
646 } else {
619 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 647 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
620 "Logical port taken offline: WWN = %s Role = %s\n", 648 "Logical port taken offline: WWN = %s Role = %s\n",
621 lpwwn_buf, "Initiator"); 649 lpwwn_buf, "Initiator");
650 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
651 }
622 652
623 list_for_each_safe(qe, qen, &port->rport_q) { 653 list_for_each_safe(qe, qen, &port->rport_q) {
624 rport = (struct bfa_fcs_rport_s *) qe; 654 rport = (struct bfa_fcs_rport_s *) qe;
@@ -676,6 +706,7 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
676 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 706 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
677 "Logical port deleted: WWN = %s Role = %s\n", 707 "Logical port deleted: WWN = %s Role = %s\n",
678 lpwwn_buf, "Initiator"); 708 lpwwn_buf, "Initiator");
709 bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
679 710
680 /* Base port will be deleted by the OS driver */ 711 /* Base port will be deleted by the OS driver */
681 if (port->vport) { 712 if (port->vport) {
@@ -973,6 +1004,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
973 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 1004 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
974 "New logical port created: WWN = %s Role = %s\n", 1005 "New logical port created: WWN = %s Role = %s\n",
975 lpwwn_buf, "Initiator"); 1006 lpwwn_buf, "Initiator");
1007 bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW);
976 1008
977 bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit); 1009 bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
978 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 1010 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
@@ -5559,6 +5591,31 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
5559 * fcs_vport_private FCS virtual port private functions 5591 * fcs_vport_private FCS virtual port private functions
5560 */ 5592 */
5561/* 5593/*
5594 * Send AEN notification
5595 */
5596static void
5597bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port,
5598 enum bfa_lport_aen_event event)
5599{
5600 struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
5601 struct bfa_aen_entry_s *aen_entry;
5602
5603 bfad_get_aen_entry(bfad, aen_entry);
5604 if (!aen_entry)
5605 return;
5606
5607 aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
5608 aen_entry->aen_data.lport.roles = port->port_cfg.roles;
5609 aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
5610 bfa_fcs_get_base_port(port->fcs));
5611 aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
5612
5613 /* Send the AEN notification */
5614 bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
5615 BFA_AEN_CAT_LPORT, event);
5616}
5617
5618/*
5562 * This routine will be called to send a FDISC command. 5619 * This routine will be called to send a FDISC command.
5563 */ 5620 */
5564static void 5621static void
@@ -5585,8 +5642,11 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
5585 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */ 5642 case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
5586 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) 5643 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
5587 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); 5644 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5588 else 5645 else {
5646 bfa_fcs_vport_aen_post(&vport->lport,
5647 BFA_LPORT_AEN_NPIV_DUP_WWN);
5589 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN); 5648 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
5649 }
5590 break; 5650 break;
5591 5651
5592 case FC_LS_RJT_EXP_INSUFF_RES: 5652 case FC_LS_RJT_EXP_INSUFF_RES:
@@ -5596,11 +5656,17 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
5596 */ 5656 */
5597 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) 5657 if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
5598 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); 5658 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5599 else 5659 else {
5660 bfa_fcs_vport_aen_post(&vport->lport,
5661 BFA_LPORT_AEN_NPIV_FABRIC_MAX);
5600 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); 5662 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
5663 }
5601 break; 5664 break;
5602 5665
5603 default: 5666 default:
5667 if (vport->fdisc_retries == 0)
5668 bfa_fcs_vport_aen_post(&vport->lport,
5669 BFA_LPORT_AEN_NPIV_UNKNOWN);
5604 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); 5670 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
5605 } 5671 }
5606} 5672}
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 2c514458a6b4..52628d5d3c9b 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include "bfad_drv.h" 22#include "bfad_drv.h"
23#include "bfad_im.h"
23#include "bfa_fcs.h" 24#include "bfa_fcs.h"
24#include "bfa_fcbuild.h" 25#include "bfa_fcbuild.h"
25 26
@@ -2041,6 +2042,35 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
2041} 2042}
2042 2043
2043static void 2044static void
2045bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
2046 enum bfa_rport_aen_event event,
2047 struct bfa_rport_aen_data_s *data)
2048{
2049 struct bfa_fcs_lport_s *port = rport->port;
2050 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
2051 struct bfa_aen_entry_s *aen_entry;
2052
2053 bfad_get_aen_entry(bfad, aen_entry);
2054 if (!aen_entry)
2055 return;
2056
2057 if (event == BFA_RPORT_AEN_QOS_PRIO)
2058 aen_entry->aen_data.rport.priv.qos = data->priv.qos;
2059 else if (event == BFA_RPORT_AEN_QOS_FLOWID)
2060 aen_entry->aen_data.rport.priv.qos = data->priv.qos;
2061
2062 aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id;
2063 aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn(
2064 bfa_fcs_get_base_port(rport->fcs));
2065 aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
2066 aen_entry->aen_data.rport.rpwwn = rport->pwwn;
2067
2068 /* Send the AEN notification */
2069 bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
2070 BFA_AEN_CAT_RPORT, event);
2071}
2072
2073static void
2044bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport) 2074bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2045{ 2075{
2046 struct bfa_fcs_lport_s *port = rport->port; 2076 struct bfa_fcs_lport_s *port = rport->port;
@@ -2063,10 +2093,12 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2063 2093
2064 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 2094 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2065 wwn2str(rpwwn_buf, rport->pwwn); 2095 wwn2str(rpwwn_buf, rport->pwwn);
2066 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2096 if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
2067 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2097 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2068 "Remote port (WWN = %s) online for logical port (WWN = %s)\n", 2098 "Remote port (WWN = %s) online for logical port (WWN = %s)\n",
2069 rpwwn_buf, lpwwn_buf); 2099 rpwwn_buf, lpwwn_buf);
2100 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
2101 }
2070} 2102}
2071 2103
2072static void 2104static void
@@ -2083,16 +2115,21 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
2083 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 2115 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2084 wwn2str(rpwwn_buf, rport->pwwn); 2116 wwn2str(rpwwn_buf, rport->pwwn);
2085 if (!BFA_FCS_PID_IS_WKA(rport->pid)) { 2117 if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
2086 if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) 2118 if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) {
2087 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2119 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2088 "Remote port (WWN = %s) connectivity lost for " 2120 "Remote port (WWN = %s) connectivity lost for "
2089 "logical port (WWN = %s)\n", 2121 "logical port (WWN = %s)\n",
2090 rpwwn_buf, lpwwn_buf); 2122 rpwwn_buf, lpwwn_buf);
2091 else 2123 bfa_fcs_rport_aen_post(rport,
2124 BFA_RPORT_AEN_DISCONNECT, NULL);
2125 } else {
2092 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2126 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2093 "Remote port (WWN = %s) offlined by " 2127 "Remote port (WWN = %s) offlined by "
2094 "logical port (WWN = %s)\n", 2128 "logical port (WWN = %s)\n",
2095 rpwwn_buf, lpwwn_buf); 2129 rpwwn_buf, lpwwn_buf);
2130 bfa_fcs_rport_aen_post(rport,
2131 BFA_RPORT_AEN_OFFLINE, NULL);
2132 }
2096 } 2133 }
2097 2134
2098 if (bfa_fcs_lport_is_initiator(port)) { 2135 if (bfa_fcs_lport_is_initiator(port)) {
@@ -2366,8 +2403,11 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
2366 struct bfa_rport_qos_attr_s new_qos_attr) 2403 struct bfa_rport_qos_attr_s new_qos_attr)
2367{ 2404{
2368 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; 2405 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2406 struct bfa_rport_aen_data_s aen_data;
2369 2407
2370 bfa_trc(rport->fcs, rport->pwwn); 2408 bfa_trc(rport->fcs, rport->pwwn);
2409 aen_data.priv.qos = new_qos_attr;
2410 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
2371} 2411}
2372 2412
2373/* 2413/*
@@ -2390,8 +2430,11 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
2390 struct bfa_rport_qos_attr_s new_qos_attr) 2430 struct bfa_rport_qos_attr_s new_qos_attr)
2391{ 2431{
2392 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; 2432 struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
2433 struct bfa_rport_aen_data_s aen_data;
2393 2434
2394 bfa_trc(rport->fcs, rport->pwwn); 2435 bfa_trc(rport->fcs, rport->pwwn);
2436 aen_data.priv.qos = new_qos_attr;
2437 bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
2395} 2438}
2396 2439
2397/* 2440/*
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index e7ffd8205dc7..ea24d4c6e67a 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -42,11 +42,36 @@ bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
42 bfa->iocfc.bfa_regs.intr_status); 42 bfa->iocfc.bfa_regs.intr_status);
43} 43}
44 44
45/*
46 * Actions to respond RME Interrupt for Crossbow ASIC:
47 * - Write 1 to Interrupt Status register
48 * INTX - done in bfa_intx()
49 * MSIX - done in bfa_hwcb_rspq_ack_msix()
50 * - Update CI (only if new CI)
51 */
45static void 52static void
46bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq) 53bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
47{ 54{
48 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), 55 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
49 bfa->iocfc.bfa_regs.intr_status); 56 bfa->iocfc.bfa_regs.intr_status);
57
58 if (bfa_rspq_ci(bfa, rspq) == ci)
59 return;
60
61 bfa_rspq_ci(bfa, rspq) = ci;
62 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
63 mmiowb();
64}
65
66void
67bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
68{
69 if (bfa_rspq_ci(bfa, rspq) == ci)
70 return;
71
72 bfa_rspq_ci(bfa, rspq) = ci;
73 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
74 mmiowb();
50} 75}
51 76
52void 77void
@@ -149,8 +174,13 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
149void 174void
150bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) 175bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
151{ 176{
152 bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix; 177 if (msix) {
153 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; 178 bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
179 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
180 } else {
181 bfa->iocfc.hwif.hw_reqq_ack = NULL;
182 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
183 }
154} 184}
155 185
156void 186void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 989bbce9b296..637527f48b40 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -64,13 +64,36 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
64 writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); 64 writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
65} 65}
66 66
67/*
68 * Actions to respond RME Interrupt for Catapult ASIC:
69 * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
70 * - Acknowledge by writing to RME Queue Control register
71 * - Update CI
72 */
67void 73void
68bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq) 74bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
69{ 75{
70 u32 r32; 76 u32 r32;
71 77
72 r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); 78 r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
73 writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); 79 writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
80
81 bfa_rspq_ci(bfa, rspq) = ci;
82 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
83 mmiowb();
84}
85
86/*
87 * Actions to respond RME Interrupt for Catapult2 ASIC:
88 * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
89 * - Update CI
90 */
91void
92bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
93{
94 bfa_rspq_ci(bfa, rspq) = ci;
95 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
96 mmiowb();
74} 97}
75 98
76void 99void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index d6c2bf3865d2..1ac5aecf25a6 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfad_im.h"
19#include "bfa_ioc.h" 20#include "bfa_ioc.h"
20#include "bfi_reg.h" 21#include "bfi_reg.h"
21#include "bfa_defs.h" 22#include "bfa_defs.h"
@@ -458,6 +459,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
458 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 459 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
459 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 460 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
460 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); 461 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
462 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
461} 463}
462 464
463static void 465static void
@@ -502,6 +504,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
502 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; 504 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
503 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); 505 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
504 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n"); 506 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
507 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
505} 508}
506 509
507/* 510/*
@@ -1966,6 +1969,7 @@ bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1966 1969
1967 BFA_LOG(KERN_CRIT, bfad, bfa_log_level, 1970 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1968 "Heart Beat of IOC has failed\n"); 1971 "Heart Beat of IOC has failed\n");
1972 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
1969 1973
1970} 1974}
1971 1975
@@ -1980,6 +1984,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1980 BFA_LOG(KERN_WARNING, bfad, bfa_log_level, 1984 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1981 "Running firmware version is incompatible " 1985 "Running firmware version is incompatible "
1982 "with the driver version\n"); 1986 "with the driver version\n");
1987 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
1983} 1988}
1984 1989
1985bfa_status_t 1990bfa_status_t
@@ -2679,6 +2684,43 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2679} 2684}
2680 2685
2681/* 2686/*
2687 * Send AEN notification
2688 */
2689void
2690bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2691{
2692 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2693 struct bfa_aen_entry_s *aen_entry;
2694 enum bfa_ioc_type_e ioc_type;
2695
2696 bfad_get_aen_entry(bfad, aen_entry);
2697 if (!aen_entry)
2698 return;
2699
2700 ioc_type = bfa_ioc_get_type(ioc);
2701 switch (ioc_type) {
2702 case BFA_IOC_TYPE_FC:
2703 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2704 break;
2705 case BFA_IOC_TYPE_FCoE:
2706 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2707 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2708 break;
2709 case BFA_IOC_TYPE_LL:
2710 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2711 break;
2712 default:
2713 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2714 break;
2715 }
2716
2717 /* Send the AEN notification */
2718 aen_entry->aen_data.ioc.ioc_type = ioc_type;
2719 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2720 BFA_AEN_CAT_IOC, event);
2721}
2722
2723/*
2682 * Retrieve saved firmware trace from a prior IOC failure. 2724 * Retrieve saved firmware trace from a prior IOC failure.
2683 */ 2725 */
2684bfa_status_t 2726bfa_status_t
@@ -2879,6 +2921,10 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2879{ 2921{
2880 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) 2922 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2881 return; 2923 return;
2924 if (ioc->attr->nwwn == 0)
2925 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
2926 if (ioc->attr->pwwn == 0)
2927 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
2882} 2928}
2883 2929
2884/* 2930/*
@@ -3443,6 +3489,54 @@ bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3443} 3489}
3444 3490
3445/* 3491/*
3492 * SFP's State Change Notification post to AEN
3493 */
3494static void
3495bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3496{
3497 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3498 struct bfa_aen_entry_s *aen_entry;
3499 enum bfa_port_aen_event aen_evt = 0;
3500
3501 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3502 ((u64)rsp->event));
3503
3504 bfad_get_aen_entry(bfad, aen_entry);
3505 if (!aen_entry)
3506 return;
3507
3508 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3509 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3510 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3511
3512 switch (rsp->event) {
3513 case BFA_SFP_SCN_INSERTED:
3514 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3515 break;
3516 case BFA_SFP_SCN_REMOVED:
3517 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3518 break;
3519 case BFA_SFP_SCN_FAILED:
3520 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3521 break;
3522 case BFA_SFP_SCN_UNSUPPORT:
3523 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3524 break;
3525 case BFA_SFP_SCN_POM:
3526 aen_evt = BFA_PORT_AEN_SFP_POM;
3527 aen_entry->aen_data.port.level = rsp->pomlvl;
3528 break;
3529 default:
3530 bfa_trc(sfp, rsp->event);
3531 WARN_ON(1);
3532 }
3533
3534 /* Send the AEN notification */
3535 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3536 BFA_AEN_CAT_PORT, aen_evt);
3537}
3538
3539/*
3446 * SFP get data send 3540 * SFP get data send
3447 */ 3541 */
3448static void 3542static void
@@ -3482,6 +3576,50 @@ bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3482} 3576}
3483 3577
3484/* 3578/*
3579 * SFP scn handler
3580 */
3581static void
3582bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3583{
3584 struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3585
3586 switch (rsp->event) {
3587 case BFA_SFP_SCN_INSERTED:
3588 sfp->state = BFA_SFP_STATE_INSERTED;
3589 sfp->data_valid = 0;
3590 bfa_sfp_scn_aen_post(sfp, rsp);
3591 break;
3592 case BFA_SFP_SCN_REMOVED:
3593 sfp->state = BFA_SFP_STATE_REMOVED;
3594 sfp->data_valid = 0;
3595 bfa_sfp_scn_aen_post(sfp, rsp);
3596 break;
3597 case BFA_SFP_SCN_FAILED:
3598 sfp->state = BFA_SFP_STATE_FAILED;
3599 sfp->data_valid = 0;
3600 bfa_sfp_scn_aen_post(sfp, rsp);
3601 break;
3602 case BFA_SFP_SCN_UNSUPPORT:
3603 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3604 bfa_sfp_scn_aen_post(sfp, rsp);
3605 if (!sfp->lock)
3606 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3607 break;
3608 case BFA_SFP_SCN_POM:
3609 bfa_sfp_scn_aen_post(sfp, rsp);
3610 break;
3611 case BFA_SFP_SCN_VALID:
3612 sfp->state = BFA_SFP_STATE_VALID;
3613 if (!sfp->lock)
3614 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3615 break;
3616 default:
3617 bfa_trc(sfp, rsp->event);
3618 WARN_ON(1);
3619 }
3620}
3621
3622/*
3485 * SFP show complete 3623 * SFP show complete
3486 */ 3624 */
3487static void 3625static void
@@ -3645,7 +3783,7 @@ bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3645 break; 3783 break;
3646 3784
3647 case BFI_SFP_I2H_SCN: 3785 case BFI_SFP_I2H_SCN:
3648 bfa_trc(sfp, msg->mh.msg_id); 3786 bfa_sfp_scn(sfp, msg);
3649 break; 3787 break;
3650 3788
3651 default: 3789 default:
@@ -3838,6 +3976,26 @@ bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3838 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ) 3976 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3839 3977
3840static void 3978static void
3979bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
3980 int inst, int type)
3981{
3982 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
3983 struct bfa_aen_entry_s *aen_entry;
3984
3985 bfad_get_aen_entry(bfad, aen_entry);
3986 if (!aen_entry)
3987 return;
3988
3989 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
3990 aen_entry->aen_data.audit.partition_inst = inst;
3991 aen_entry->aen_data.audit.partition_type = type;
3992
3993 /* Send the AEN notification */
3994 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
3995 BFA_AEN_CAT_AUDIT, event);
3996}
3997
3998static void
3841bfa_flash_cb(struct bfa_flash_s *flash) 3999bfa_flash_cb(struct bfa_flash_s *flash)
3842{ 4000{
3843 flash->op_busy = 0; 4001 flash->op_busy = 0;
@@ -3978,6 +4136,7 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
3978 struct bfi_flash_erase_rsp_s *erase; 4136 struct bfi_flash_erase_rsp_s *erase;
3979 struct bfi_flash_write_rsp_s *write; 4137 struct bfi_flash_write_rsp_s *write;
3980 struct bfi_flash_read_rsp_s *read; 4138 struct bfi_flash_read_rsp_s *read;
4139 struct bfi_flash_event_s *event;
3981 struct bfi_mbmsg_s *msg; 4140 struct bfi_mbmsg_s *msg;
3982 } m; 4141 } m;
3983 4142
@@ -4061,8 +4220,19 @@ bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4061 } 4220 }
4062 break; 4221 break;
4063 case BFI_FLASH_I2H_BOOT_VER_RSP: 4222 case BFI_FLASH_I2H_BOOT_VER_RSP:
4223 break;
4064 case BFI_FLASH_I2H_EVENT: 4224 case BFI_FLASH_I2H_EVENT:
4065 bfa_trc(flash, msg->mh.msg_id); 4225 status = be32_to_cpu(m.event->status);
4226 bfa_trc(flash, status);
4227 if (status == BFA_STATUS_BAD_FWCFG)
4228 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4229 else if (status == BFA_STATUS_INVALID_VENDOR) {
4230 u32 param;
4231 param = be32_to_cpu(m.event->param);
4232 bfa_trc(flash, param);
4233 bfa_ioc_aen_post(flash->ioc,
4234 BFA_IOC_AEN_INVALID_VENDOR);
4235 }
4066 break; 4236 break;
4067 4237
4068 default: 4238 default:
@@ -4204,6 +4374,8 @@ bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4204 flash->instance = instance; 4374 flash->instance = instance;
4205 4375
4206 bfa_flash_erase_send(flash); 4376 bfa_flash_erase_send(flash);
4377 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4378 instance, type);
4207 return BFA_STATUS_OK; 4379 return BFA_STATUS_OK;
4208} 4380}
4209 4381
@@ -5416,3 +5588,396 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5416 WARN_ON(1); 5588 WARN_ON(1);
5417 } 5589 }
5418} 5590}
5591
5592/*
5593 * DCONF module specific
5594 */
5595
5596BFA_MODULE(dconf);
5597
5598/*
5599 * DCONF state machine events
5600 */
5601enum bfa_dconf_event {
5602 BFA_DCONF_SM_INIT = 1, /* dconf Init */
5603 BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
5604 BFA_DCONF_SM_WR = 3, /* binding change, map */
5605 BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
5606 BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
5607 BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
5608};
5609
5610/* forward declaration of DCONF state machine */
5611static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5612 enum bfa_dconf_event event);
5613static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5614 enum bfa_dconf_event event);
5615static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5616 enum bfa_dconf_event event);
5617static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5618 enum bfa_dconf_event event);
5619static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5620 enum bfa_dconf_event event);
5621static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5622 enum bfa_dconf_event event);
5623static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5624 enum bfa_dconf_event event);
5625
5626static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5627static void bfa_dconf_timer(void *cbarg);
5628static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5629static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5630
5631/*
5632 * Begining state of dconf module. Waiting for an event to start.
5633 */
5634static void
5635bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5636{
5637 bfa_status_t bfa_status;
5638 bfa_trc(dconf->bfa, event);
5639
5640 switch (event) {
5641 case BFA_DCONF_SM_INIT:
5642 if (dconf->min_cfg) {
5643 bfa_trc(dconf->bfa, dconf->min_cfg);
5644 return;
5645 }
5646 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5647 dconf->flashdone = BFA_FALSE;
5648 bfa_trc(dconf->bfa, dconf->flashdone);
5649 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5650 BFA_FLASH_PART_DRV, dconf->instance,
5651 dconf->dconf,
5652 sizeof(struct bfa_dconf_s), 0,
5653 bfa_dconf_init_cb, dconf->bfa);
5654 if (bfa_status != BFA_STATUS_OK) {
5655 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5656 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5657 return;
5658 }
5659 break;
5660 case BFA_DCONF_SM_EXIT:
5661 dconf->flashdone = BFA_TRUE;
5662 case BFA_DCONF_SM_IOCDISABLE:
5663 case BFA_DCONF_SM_WR:
5664 case BFA_DCONF_SM_FLASH_COMP:
5665 break;
5666 default:
5667 bfa_sm_fault(dconf->bfa, event);
5668 }
5669}
5670
5671/*
5672 * Read flash for dconf entries and make a call back to the driver once done.
5673 */
5674static void
5675bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5676 enum bfa_dconf_event event)
5677{
5678 bfa_trc(dconf->bfa, event);
5679
5680 switch (event) {
5681 case BFA_DCONF_SM_FLASH_COMP:
5682 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5683 break;
5684 case BFA_DCONF_SM_TIMEOUT:
5685 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5686 break;
5687 case BFA_DCONF_SM_EXIT:
5688 dconf->flashdone = BFA_TRUE;
5689 bfa_trc(dconf->bfa, dconf->flashdone);
5690 case BFA_DCONF_SM_IOCDISABLE:
5691 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5692 break;
5693 default:
5694 bfa_sm_fault(dconf->bfa, event);
5695 }
5696}
5697
5698/*
5699 * DCONF Module is in ready state. Has completed the initialization.
5700 */
5701static void
5702bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5703{
5704 bfa_trc(dconf->bfa, event);
5705
5706 switch (event) {
5707 case BFA_DCONF_SM_WR:
5708 bfa_timer_start(dconf->bfa, &dconf->timer,
5709 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5710 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5711 break;
5712 case BFA_DCONF_SM_EXIT:
5713 dconf->flashdone = BFA_TRUE;
5714 bfa_trc(dconf->bfa, dconf->flashdone);
5715 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5716 break;
5717 case BFA_DCONF_SM_INIT:
5718 case BFA_DCONF_SM_IOCDISABLE:
5719 break;
5720 default:
5721 bfa_sm_fault(dconf->bfa, event);
5722 }
5723}
5724
5725/*
5726 * entries are dirty, write back to the flash.
5727 */
5728
5729static void
5730bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5731{
5732 bfa_trc(dconf->bfa, event);
5733
5734 switch (event) {
5735 case BFA_DCONF_SM_TIMEOUT:
5736 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5737 bfa_dconf_flash_write(dconf);
5738 break;
5739 case BFA_DCONF_SM_WR:
5740 bfa_timer_stop(&dconf->timer);
5741 bfa_timer_start(dconf->bfa, &dconf->timer,
5742 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5743 break;
5744 case BFA_DCONF_SM_EXIT:
5745 bfa_timer_stop(&dconf->timer);
5746 bfa_timer_start(dconf->bfa, &dconf->timer,
5747 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5748 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5749 bfa_dconf_flash_write(dconf);
5750 break;
5751 case BFA_DCONF_SM_FLASH_COMP:
5752 break;
5753 case BFA_DCONF_SM_IOCDISABLE:
5754 bfa_timer_stop(&dconf->timer);
5755 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5756 break;
5757 default:
5758 bfa_sm_fault(dconf->bfa, event);
5759 }
5760}
5761
5762/*
5763 * Sync the dconf entries to the flash.
5764 */
5765static void
5766bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5767 enum bfa_dconf_event event)
5768{
5769 bfa_trc(dconf->bfa, event);
5770
5771 switch (event) {
5772 case BFA_DCONF_SM_IOCDISABLE:
5773 case BFA_DCONF_SM_FLASH_COMP:
5774 bfa_timer_stop(&dconf->timer);
5775 case BFA_DCONF_SM_TIMEOUT:
5776 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5777 dconf->flashdone = BFA_TRUE;
5778 bfa_trc(dconf->bfa, dconf->flashdone);
5779 bfa_ioc_disable(&dconf->bfa->ioc);
5780 break;
5781 default:
5782 bfa_sm_fault(dconf->bfa, event);
5783 }
5784}
5785
5786static void
5787bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5788{
5789 bfa_trc(dconf->bfa, event);
5790
5791 switch (event) {
5792 case BFA_DCONF_SM_FLASH_COMP:
5793 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5794 break;
5795 case BFA_DCONF_SM_WR:
5796 bfa_timer_start(dconf->bfa, &dconf->timer,
5797 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5798 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5799 break;
5800 case BFA_DCONF_SM_EXIT:
5801 bfa_timer_start(dconf->bfa, &dconf->timer,
5802 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5803 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5804 break;
5805 case BFA_DCONF_SM_IOCDISABLE:
5806 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5807 break;
5808 default:
5809 bfa_sm_fault(dconf->bfa, event);
5810 }
5811}
5812
5813static void
5814bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5815 enum bfa_dconf_event event)
5816{
5817 bfa_trc(dconf->bfa, event);
5818
5819 switch (event) {
5820 case BFA_DCONF_SM_INIT:
5821 bfa_timer_start(dconf->bfa, &dconf->timer,
5822 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5823 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5824 break;
5825 case BFA_DCONF_SM_EXIT:
5826 dconf->flashdone = BFA_TRUE;
5827 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5828 break;
5829 case BFA_DCONF_SM_IOCDISABLE:
5830 break;
5831 default:
5832 bfa_sm_fault(dconf->bfa, event);
5833 }
5834}
5835
5836/*
5837 * Compute and return memory needed by DRV_CFG module.
5838 */
5839static void
5840bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5841 struct bfa_s *bfa)
5842{
5843 struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5844
5845 if (cfg->drvcfg.min_cfg)
5846 bfa_mem_kva_setup(meminfo, dconf_kva,
5847 sizeof(struct bfa_dconf_hdr_s));
5848 else
5849 bfa_mem_kva_setup(meminfo, dconf_kva,
5850 sizeof(struct bfa_dconf_s));
5851}
5852
5853static void
5854bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5855 struct bfa_pcidev_s *pcidev)
5856{
5857 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5858
5859 dconf->bfad = bfad;
5860 dconf->bfa = bfa;
5861 dconf->instance = bfa->ioc.port_id;
5862 bfa_trc(bfa, dconf->instance);
5863
5864 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5865 if (cfg->drvcfg.min_cfg) {
5866 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5867 dconf->min_cfg = BFA_TRUE;
5868 /*
5869 * Set the flashdone flag to TRUE explicitly as no flash
5870 * write will happen in min_cfg mode.
5871 */
5872 dconf->flashdone = BFA_TRUE;
5873 } else {
5874 dconf->min_cfg = BFA_FALSE;
5875 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5876 }
5877
5878 bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5879 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5880}
5881
5882static void
5883bfa_dconf_init_cb(void *arg, bfa_status_t status)
5884{
5885 struct bfa_s *bfa = arg;
5886 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5887
5888 dconf->flashdone = BFA_TRUE;
5889 bfa_trc(bfa, dconf->flashdone);
5890 bfa_iocfc_cb_dconf_modinit(bfa, status);
5891 if (status == BFA_STATUS_OK) {
5892 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5893 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5894 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5895 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5896 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5897 }
5898 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5899}
5900
5901void
5902bfa_dconf_modinit(struct bfa_s *bfa)
5903{
5904 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5905 bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5906}
5907static void
5908bfa_dconf_start(struct bfa_s *bfa)
5909{
5910}
5911
5912static void
5913bfa_dconf_stop(struct bfa_s *bfa)
5914{
5915}
5916
5917static void bfa_dconf_timer(void *cbarg)
5918{
5919 struct bfa_dconf_mod_s *dconf = cbarg;
5920 bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5921}
5922static void
5923bfa_dconf_iocdisable(struct bfa_s *bfa)
5924{
5925 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5926 bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5927}
5928
5929static void
5930bfa_dconf_detach(struct bfa_s *bfa)
5931{
5932}
5933
5934static bfa_status_t
5935bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5936{
5937 bfa_status_t bfa_status;
5938 bfa_trc(dconf->bfa, 0);
5939
5940 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5941 BFA_FLASH_PART_DRV, dconf->instance,
5942 dconf->dconf, sizeof(struct bfa_dconf_s), 0,
5943 bfa_dconf_cbfn, dconf);
5944 if (bfa_status != BFA_STATUS_OK)
5945 WARN_ON(bfa_status);
5946 bfa_trc(dconf->bfa, bfa_status);
5947
5948 return bfa_status;
5949}
5950
5951bfa_status_t
5952bfa_dconf_update(struct bfa_s *bfa)
5953{
5954 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5955 bfa_trc(dconf->bfa, 0);
5956 if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5957 return BFA_STATUS_FAILED;
5958
5959 if (dconf->min_cfg) {
5960 bfa_trc(dconf->bfa, dconf->min_cfg);
5961 return BFA_STATUS_FAILED;
5962 }
5963
5964 bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5965 return BFA_STATUS_OK;
5966}
5967
5968static void
5969bfa_dconf_cbfn(void *arg, bfa_status_t status)
5970{
5971 struct bfa_dconf_mod_s *dconf = arg;
5972 WARN_ON(status);
5973 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5974}
5975
5976void
5977bfa_dconf_modexit(struct bfa_s *bfa)
5978{
5979 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5980 BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
5981 bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
5982 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5983}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index c5ecd2edc95d..546d46b37101 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -327,6 +327,7 @@ struct bfa_ioc_s {
327 enum bfa_mode_s port_mode; 327 enum bfa_mode_s port_mode;
328 u8 ad_cap_bm; /* adapter cap bit mask */ 328 u8 ad_cap_bm; /* adapter cap bit mask */
329 u8 port_mode_cfg; /* config port mode */ 329 u8 port_mode_cfg; /* config port mode */
330 int ioc_aen_seq;
330}; 331};
331 332
332struct bfa_ioc_hwif_s { 333struct bfa_ioc_hwif_s {
@@ -366,6 +367,8 @@ struct bfa_cb_qe_s {
366 struct list_head qe; 367 struct list_head qe;
367 bfa_cb_cbfn_t cbfn; 368 bfa_cb_cbfn_t cbfn;
368 bfa_boolean_t once; 369 bfa_boolean_t once;
370 bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
371 bfa_status_t fw_status; /* to access fw status in comp proc */
369 void *cbarg; 372 void *cbarg;
370}; 373};
371 374
@@ -658,7 +661,6 @@ struct bfa_phy_s {
658 struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */ 661 struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
659 struct bfa_mem_dma_s phy_dma; 662 struct bfa_mem_dma_s phy_dma;
660}; 663};
661
662#define BFA_PHY(__bfa) (&(__bfa)->modules.phy) 664#define BFA_PHY(__bfa) (&(__bfa)->modules.phy)
663#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma)) 665#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma))
664 666
@@ -684,6 +686,49 @@ void bfa_phy_memclaim(struct bfa_phy_s *phy,
684void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg); 686void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
685 687
686/* 688/*
689 * Driver Config( dconf) specific
690 */
691#define BFI_DCONF_SIGNATURE 0xabcdabcd
692#define BFI_DCONF_VERSION 1
693
694#pragma pack(1)
695struct bfa_dconf_hdr_s {
696 u32 signature;
697 u32 version;
698};
699
700struct bfa_dconf_s {
701 struct bfa_dconf_hdr_s hdr;
702 struct bfa_lunmask_cfg_s lun_mask;
703};
704#pragma pack()
705
706struct bfa_dconf_mod_s {
707 bfa_sm_t sm;
708 u8 instance;
709 bfa_boolean_t flashdone;
710 bfa_boolean_t read_data_valid;
711 bfa_boolean_t min_cfg;
712 struct bfa_timer_s timer;
713 struct bfa_s *bfa;
714 void *bfad;
715 void *trcmod;
716 struct bfa_dconf_s *dconf;
717 struct bfa_mem_kva_s kva_seg;
718};
719
720#define BFA_DCONF_MOD(__bfa) \
721 (&(__bfa)->modules.dconf_mod)
722#define BFA_MEM_DCONF_KVA(__bfa) (&(BFA_DCONF_MOD(__bfa)->kva_seg))
723#define bfa_dconf_read_data_valid(__bfa) \
724 (BFA_DCONF_MOD(__bfa)->read_data_valid)
725#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */
726
727void bfa_dconf_modinit(struct bfa_s *bfa);
728void bfa_dconf_modexit(struct bfa_s *bfa);
729bfa_status_t bfa_dconf_update(struct bfa_s *bfa);
730
731/*
687 * IOC specfic macros 732 * IOC specfic macros
688 */ 733 */
689#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) 734#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -803,6 +848,7 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
803 struct bfi_ioc_image_hdr_s *fwhdr); 848 struct bfi_ioc_image_hdr_s *fwhdr);
804bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, 849bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
805 struct bfi_ioc_image_hdr_s *fwhdr); 850 struct bfi_ioc_image_hdr_s *fwhdr);
851void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
806bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats); 852bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
807bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc); 853bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
808 854
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 1c6efd40a673..2d36e4823835 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -44,6 +44,7 @@ struct bfa_modules_s {
44 struct bfa_flash_s flash; /* flash module */ 44 struct bfa_flash_s flash; /* flash module */
45 struct bfa_diag_s diag_mod; /* diagnostics module */ 45 struct bfa_diag_s diag_mod; /* diagnostics module */
46 struct bfa_phy_s phy; /* phy module */ 46 struct bfa_phy_s phy; /* phy module */
47 struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */
47}; 48};
48 49
49/* 50/*
@@ -119,6 +120,7 @@ struct bfa_s {
119 struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; 120 struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
120 bfa_boolean_t fcs; /* FCS is attached to BFA */ 121 bfa_boolean_t fcs; /* FCS is attached to BFA */
121 struct bfa_msix_s msix; 122 struct bfa_msix_s msix;
123 int bfa_aen_seq;
122}; 124};
123 125
124extern bfa_boolean_t bfa_auto_recover; 126extern bfa_boolean_t bfa_auto_recover;
@@ -130,5 +132,6 @@ extern struct bfa_module_s hal_mod_lps;
130extern struct bfa_module_s hal_mod_uf; 132extern struct bfa_module_s hal_mod_uf;
131extern struct bfa_module_s hal_mod_rport; 133extern struct bfa_module_s hal_mod_rport;
132extern struct bfa_module_s hal_mod_fcp; 134extern struct bfa_module_s hal_mod_fcp;
135extern struct bfa_module_s hal_mod_dconf;
133 136
134#endif /* __BFA_MODULES_H__ */ 137#endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 21caaefce99f..aa8a0eaf91f9 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "bfad_drv.h" 18#include "bfad_drv.h"
19#include "bfad_im.h"
19#include "bfa_plog.h" 20#include "bfa_plog.h"
20#include "bfa_cs.h" 21#include "bfa_cs.h"
21#include "bfa_modules.h" 22#include "bfa_modules.h"
@@ -2007,6 +2008,24 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2007 } 2008 }
2008} 2009}
2009 2010
2011static void
2012bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2013{
2014 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2015 struct bfa_aen_entry_s *aen_entry;
2016
2017 bfad_get_aen_entry(bfad, aen_entry);
2018 if (!aen_entry)
2019 return;
2020
2021 aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2022 aen_entry->aen_data.port.pwwn = fcport->pwwn;
2023
2024 /* Send the AEN notification */
2025 bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2026 BFA_AEN_CAT_PORT, event);
2027}
2028
2010/* 2029/*
2011 * FC PORT state machine functions 2030 * FC PORT state machine functions
2012 */ 2031 */
@@ -2095,6 +2114,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2095 wwn2str(pwwn_buf, fcport->pwwn); 2114 wwn2str(pwwn_buf, fcport->pwwn);
2096 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2115 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2097 "Base port disabled: WWN = %s\n", pwwn_buf); 2116 "Base port disabled: WWN = %s\n", pwwn_buf);
2117 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2098 break; 2118 break;
2099 2119
2100 case BFA_FCPORT_SM_LINKUP: 2120 case BFA_FCPORT_SM_LINKUP:
@@ -2155,6 +2175,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2155 wwn2str(pwwn_buf, fcport->pwwn); 2175 wwn2str(pwwn_buf, fcport->pwwn);
2156 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2176 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2157 "Base port disabled: WWN = %s\n", pwwn_buf); 2177 "Base port disabled: WWN = %s\n", pwwn_buf);
2178 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2158 break; 2179 break;
2159 2180
2160 case BFA_FCPORT_SM_STOP: 2181 case BFA_FCPORT_SM_STOP:
@@ -2208,6 +2229,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2208 wwn2str(pwwn_buf, fcport->pwwn); 2229 wwn2str(pwwn_buf, fcport->pwwn);
2209 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2230 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2210 "Base port online: WWN = %s\n", pwwn_buf); 2231 "Base port online: WWN = %s\n", pwwn_buf);
2232 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2233
2234 /* If QoS is enabled and it is not online, send AEN */
2235 if (fcport->cfg.qos_enabled &&
2236 fcport->qos_attr.state != BFA_QOS_ONLINE)
2237 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2211 break; 2238 break;
2212 2239
2213 case BFA_FCPORT_SM_LINKDOWN: 2240 case BFA_FCPORT_SM_LINKDOWN:
@@ -2234,6 +2261,7 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2234 wwn2str(pwwn_buf, fcport->pwwn); 2261 wwn2str(pwwn_buf, fcport->pwwn);
2235 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2262 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2236 "Base port disabled: WWN = %s\n", pwwn_buf); 2263 "Base port disabled: WWN = %s\n", pwwn_buf);
2264 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2237 break; 2265 break;
2238 2266
2239 case BFA_FCPORT_SM_STOP: 2267 case BFA_FCPORT_SM_STOP:
@@ -2279,8 +2307,10 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2279 wwn2str(pwwn_buf, fcport->pwwn); 2307 wwn2str(pwwn_buf, fcport->pwwn);
2280 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2308 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2281 "Base port offline: WWN = %s\n", pwwn_buf); 2309 "Base port offline: WWN = %s\n", pwwn_buf);
2310 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2282 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2311 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2283 "Base port disabled: WWN = %s\n", pwwn_buf); 2312 "Base port disabled: WWN = %s\n", pwwn_buf);
2313 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2284 break; 2314 break;
2285 2315
2286 case BFA_FCPORT_SM_LINKDOWN: 2316 case BFA_FCPORT_SM_LINKDOWN:
@@ -2290,26 +2320,32 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2290 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, 2320 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2291 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); 2321 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2292 wwn2str(pwwn_buf, fcport->pwwn); 2322 wwn2str(pwwn_buf, fcport->pwwn);
2293 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2323 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2294 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2324 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2295 "Base port offline: WWN = %s\n", pwwn_buf); 2325 "Base port offline: WWN = %s\n", pwwn_buf);
2296 else 2326 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2327 } else {
2297 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2328 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2298 "Base port (WWN = %s) " 2329 "Base port (WWN = %s) "
2299 "lost fabric connectivity\n", pwwn_buf); 2330 "lost fabric connectivity\n", pwwn_buf);
2331 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2332 }
2300 break; 2333 break;
2301 2334
2302 case BFA_FCPORT_SM_STOP: 2335 case BFA_FCPORT_SM_STOP:
2303 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); 2336 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2304 bfa_fcport_reset_linkinfo(fcport); 2337 bfa_fcport_reset_linkinfo(fcport);
2305 wwn2str(pwwn_buf, fcport->pwwn); 2338 wwn2str(pwwn_buf, fcport->pwwn);
2306 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2339 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2307 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2340 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2308 "Base port offline: WWN = %s\n", pwwn_buf); 2341 "Base port offline: WWN = %s\n", pwwn_buf);
2309 else 2342 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2343 } else {
2310 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2344 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2311 "Base port (WWN = %s) " 2345 "Base port (WWN = %s) "
2312 "lost fabric connectivity\n", pwwn_buf); 2346 "lost fabric connectivity\n", pwwn_buf);
2347 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2348 }
2313 break; 2349 break;
2314 2350
2315 case BFA_FCPORT_SM_HWFAIL: 2351 case BFA_FCPORT_SM_HWFAIL:
@@ -2317,13 +2353,16 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2317 bfa_fcport_reset_linkinfo(fcport); 2353 bfa_fcport_reset_linkinfo(fcport);
2318 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); 2354 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2319 wwn2str(pwwn_buf, fcport->pwwn); 2355 wwn2str(pwwn_buf, fcport->pwwn);
2320 if (BFA_PORT_IS_DISABLED(fcport->bfa)) 2356 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2321 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2357 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2322 "Base port offline: WWN = %s\n", pwwn_buf); 2358 "Base port offline: WWN = %s\n", pwwn_buf);
2323 else 2359 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2360 } else {
2324 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 2361 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2325 "Base port (WWN = %s) " 2362 "Base port (WWN = %s) "
2326 "lost fabric connectivity\n", pwwn_buf); 2363 "lost fabric connectivity\n", pwwn_buf);
2364 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2365 }
2327 break; 2366 break;
2328 2367
2329 default: 2368 default:
@@ -2454,6 +2493,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2454 wwn2str(pwwn_buf, fcport->pwwn); 2493 wwn2str(pwwn_buf, fcport->pwwn);
2455 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2494 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2456 "Base port enabled: WWN = %s\n", pwwn_buf); 2495 "Base port enabled: WWN = %s\n", pwwn_buf);
2496 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2457 break; 2497 break;
2458 2498
2459 case BFA_FCPORT_SM_STOP: 2499 case BFA_FCPORT_SM_STOP:
@@ -2508,6 +2548,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2508 wwn2str(pwwn_buf, fcport->pwwn); 2548 wwn2str(pwwn_buf, fcport->pwwn);
2509 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 2549 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2510 "Base port enabled: WWN = %s\n", pwwn_buf); 2550 "Base port enabled: WWN = %s\n", pwwn_buf);
2551 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2511 break; 2552 break;
2512 2553
2513 case BFA_FCPORT_SM_DISABLE: 2554 case BFA_FCPORT_SM_DISABLE:
@@ -2874,6 +2915,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2874 2915
2875 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; 2916 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2876 2917
2918 INIT_LIST_HEAD(&fcport->stats_pending_q);
2919 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
2920
2877 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); 2921 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2878} 2922}
2879 2923
@@ -3102,30 +3146,38 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3102static void 3146static void
3103__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) 3147__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3104{ 3148{
3105 struct bfa_fcport_s *fcport = cbarg; 3149 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3150 struct bfa_cb_pending_q_s *cb;
3151 struct list_head *qe, *qen;
3152 union bfa_fcport_stats_u *ret;
3106 3153
3107 if (complete) { 3154 if (complete) {
3108 if (fcport->stats_status == BFA_STATUS_OK) { 3155 struct timeval tv;
3109 struct timeval tv; 3156 if (fcport->stats_status == BFA_STATUS_OK)
3110 3157 do_gettimeofday(&tv);
3111 /* Swap FC QoS or FCoE stats */ 3158
3112 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { 3159 list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3113 bfa_fcport_qos_stats_swap( 3160 bfa_q_deq(&fcport->stats_pending_q, &qe);
3114 &fcport->stats_ret->fcqos, 3161 cb = (struct bfa_cb_pending_q_s *)qe;
3115 &fcport->stats->fcqos); 3162 if (fcport->stats_status == BFA_STATUS_OK) {
3116 } else { 3163 ret = (union bfa_fcport_stats_u *)cb->data;
3117 bfa_fcport_fcoe_stats_swap( 3164 /* Swap FC QoS or FCoE stats */
3118 &fcport->stats_ret->fcoe, 3165 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3119 &fcport->stats->fcoe); 3166 bfa_fcport_qos_stats_swap(&ret->fcqos,
3120 3167 &fcport->stats->fcqos);
3121 do_gettimeofday(&tv); 3168 else {
3122 fcport->stats_ret->fcoe.secs_reset = 3169 bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3170 &fcport->stats->fcoe);
3171 ret->fcoe.secs_reset =
3123 tv.tv_sec - fcport->stats_reset_time; 3172 tv.tv_sec - fcport->stats_reset_time;
3173 }
3124 } 3174 }
3175 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3176 fcport->stats_status);
3125 } 3177 }
3126 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 3178 fcport->stats_status = BFA_STATUS_OK;
3127 } else { 3179 } else {
3128 fcport->stats_busy = BFA_FALSE; 3180 INIT_LIST_HEAD(&fcport->stats_pending_q);
3129 fcport->stats_status = BFA_STATUS_OK; 3181 fcport->stats_status = BFA_STATUS_OK;
3130 } 3182 }
3131} 3183}
@@ -3143,8 +3195,7 @@ bfa_fcport_stats_get_timeout(void *cbarg)
3143 } 3195 }
3144 3196
3145 fcport->stats_status = BFA_STATUS_ETIMER; 3197 fcport->stats_status = BFA_STATUS_ETIMER;
3146 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get, 3198 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3147 fcport);
3148} 3199}
3149 3200
3150static void 3201static void
@@ -3174,7 +3225,9 @@ bfa_fcport_send_stats_get(void *cbarg)
3174static void 3225static void
3175__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) 3226__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3176{ 3227{
3177 struct bfa_fcport_s *fcport = cbarg; 3228 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3229 struct bfa_cb_pending_q_s *cb;
3230 struct list_head *qe, *qen;
3178 3231
3179 if (complete) { 3232 if (complete) {
3180 struct timeval tv; 3233 struct timeval tv;
@@ -3184,10 +3237,15 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3184 */ 3237 */
3185 do_gettimeofday(&tv); 3238 do_gettimeofday(&tv);
3186 fcport->stats_reset_time = tv.tv_sec; 3239 fcport->stats_reset_time = tv.tv_sec;
3187 3240 list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3188 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); 3241 bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3242 cb = (struct bfa_cb_pending_q_s *)qe;
3243 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3244 fcport->stats_status);
3245 }
3246 fcport->stats_status = BFA_STATUS_OK;
3189 } else { 3247 } else {
3190 fcport->stats_busy = BFA_FALSE; 3248 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3191 fcport->stats_status = BFA_STATUS_OK; 3249 fcport->stats_status = BFA_STATUS_OK;
3192 } 3250 }
3193} 3251}
@@ -3205,8 +3263,7 @@ bfa_fcport_stats_clr_timeout(void *cbarg)
3205 } 3263 }
3206 3264
3207 fcport->stats_status = BFA_STATUS_ETIMER; 3265 fcport->stats_status = BFA_STATUS_ETIMER;
3208 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3266 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3209 __bfa_cb_fcport_stats_clr, fcport);
3210} 3267}
3211 3268
3212static void 3269static void
@@ -3402,6 +3459,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3402 fcport->use_flash_cfg = BFA_FALSE; 3459 fcport->use_flash_cfg = BFA_FALSE;
3403 } 3460 }
3404 3461
3462 if (fcport->cfg.qos_enabled)
3463 fcport->qos_attr.state = BFA_QOS_OFFLINE;
3464 else
3465 fcport->qos_attr.state = BFA_QOS_DISABLED;
3466
3405 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); 3467 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3406 } 3468 }
3407 break; 3469 break;
@@ -3426,28 +3488,26 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3426 /* 3488 /*
3427 * check for timer pop before processing the rsp 3489 * check for timer pop before processing the rsp
3428 */ 3490 */
3429 if (fcport->stats_busy == BFA_FALSE || 3491 if (list_empty(&fcport->stats_pending_q) ||
3430 fcport->stats_status == BFA_STATUS_ETIMER) 3492 (fcport->stats_status == BFA_STATUS_ETIMER))
3431 break; 3493 break;
3432 3494
3433 bfa_timer_stop(&fcport->timer); 3495 bfa_timer_stop(&fcport->timer);
3434 fcport->stats_status = i2hmsg.pstatsget_rsp->status; 3496 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3435 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3497 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3436 __bfa_cb_fcport_stats_get, fcport);
3437 break; 3498 break;
3438 3499
3439 case BFI_FCPORT_I2H_STATS_CLEAR_RSP: 3500 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3440 /* 3501 /*
3441 * check for timer pop before processing the rsp 3502 * check for timer pop before processing the rsp
3442 */ 3503 */
3443 if (fcport->stats_busy == BFA_FALSE || 3504 if (list_empty(&fcport->statsclr_pending_q) ||
3444 fcport->stats_status == BFA_STATUS_ETIMER) 3505 (fcport->stats_status == BFA_STATUS_ETIMER))
3445 break; 3506 break;
3446 3507
3447 bfa_timer_stop(&fcport->timer); 3508 bfa_timer_stop(&fcport->timer);
3448 fcport->stats_status = BFA_STATUS_OK; 3509 fcport->stats_status = BFA_STATUS_OK;
3449 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, 3510 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3450 __bfa_cb_fcport_stats_clr, fcport);
3451 break; 3511 break;
3452 3512
3453 case BFI_FCPORT_I2H_ENABLE_AEN: 3513 case BFI_FCPORT_I2H_ENABLE_AEN:
@@ -3779,25 +3839,25 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3779 * Fetch port statistics (FCQoS or FCoE). 3839 * Fetch port statistics (FCQoS or FCoE).
3780 */ 3840 */
3781bfa_status_t 3841bfa_status_t
3782bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, 3842bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3783 bfa_cb_port_t cbfn, void *cbarg)
3784{ 3843{
3785 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3844 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3786 3845
3787 if (fcport->stats_busy) { 3846 if (bfa_ioc_is_disabled(&bfa->ioc))
3788 bfa_trc(bfa, fcport->stats_busy); 3847 return BFA_STATUS_IOC_DISABLED;
3789 return BFA_STATUS_DEVBUSY;
3790 }
3791 3848
3792 fcport->stats_busy = BFA_TRUE; 3849 if (!list_empty(&fcport->statsclr_pending_q))
3793 fcport->stats_ret = stats; 3850 return BFA_STATUS_DEVBUSY;
3794 fcport->stats_cbfn = cbfn;
3795 fcport->stats_cbarg = cbarg;
3796 3851
3797 bfa_fcport_send_stats_get(fcport); 3852 if (list_empty(&fcport->stats_pending_q)) {
3853 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3854 bfa_fcport_send_stats_get(fcport);
3855 bfa_timer_start(bfa, &fcport->timer,
3856 bfa_fcport_stats_get_timeout,
3857 fcport, BFA_FCPORT_STATS_TOV);
3858 } else
3859 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3798 3860
3799 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3800 fcport, BFA_FCPORT_STATS_TOV);
3801 return BFA_STATUS_OK; 3861 return BFA_STATUS_OK;
3802} 3862}
3803 3863
@@ -3805,27 +3865,25 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3805 * Reset port statistics (FCQoS or FCoE). 3865 * Reset port statistics (FCQoS or FCoE).
3806 */ 3866 */
3807bfa_status_t 3867bfa_status_t
3808bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) 3868bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3809{ 3869{
3810 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3870 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3811 3871
3812 if (fcport->stats_busy) { 3872 if (!list_empty(&fcport->stats_pending_q))
3813 bfa_trc(bfa, fcport->stats_busy);
3814 return BFA_STATUS_DEVBUSY; 3873 return BFA_STATUS_DEVBUSY;
3815 }
3816
3817 fcport->stats_busy = BFA_TRUE;
3818 fcport->stats_cbfn = cbfn;
3819 fcport->stats_cbarg = cbarg;
3820 3874
3821 bfa_fcport_send_stats_clear(fcport); 3875 if (list_empty(&fcport->statsclr_pending_q)) {
3876 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3877 bfa_fcport_send_stats_clear(fcport);
3878 bfa_timer_start(bfa, &fcport->timer,
3879 bfa_fcport_stats_clr_timeout,
3880 fcport, BFA_FCPORT_STATS_TOV);
3881 } else
3882 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3822 3883
3823 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3824 fcport, BFA_FCPORT_STATS_TOV);
3825 return BFA_STATUS_OK; 3884 return BFA_STATUS_OK;
3826} 3885}
3827 3886
3828
3829/* 3887/*
3830 * Fetch port attributes. 3888 * Fetch port attributes.
3831 */ 3889 */
@@ -4619,6 +4677,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4619 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); 4677 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4620 rp->fw_handle = msg.create_rsp->fw_handle; 4678 rp->fw_handle = msg.create_rsp->fw_handle;
4621 rp->qos_attr = msg.create_rsp->qos_attr; 4679 rp->qos_attr = msg.create_rsp->qos_attr;
4680 bfa_rport_set_lunmask(bfa, rp);
4622 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); 4681 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4623 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); 4682 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4624 break; 4683 break;
@@ -4626,6 +4685,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4626 case BFI_RPORT_I2H_DELETE_RSP: 4685 case BFI_RPORT_I2H_DELETE_RSP:
4627 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); 4686 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4628 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); 4687 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4688 bfa_rport_unset_lunmask(bfa, rp);
4629 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); 4689 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4630 break; 4690 break;
4631 4691
@@ -4706,6 +4766,37 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4706 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); 4766 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4707} 4767}
4708 4768
4769/* Set Rport LUN Mask */
4770void
4771bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4772{
4773 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
4774 wwn_t lp_wwn, rp_wwn;
4775 u8 lp_tag = (u8)rp->rport_info.lp_tag;
4776
4777 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4778 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4779
4780 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4781 rp->lun_mask = BFA_TRUE;
4782 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
4783}
4784
4785/* Unset Rport LUN mask */
4786void
4787bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4788{
4789 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
4790 wwn_t lp_wwn, rp_wwn;
4791
4792 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4793 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4794
4795 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4796 rp->lun_mask = BFA_FALSE;
4797 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
4798 BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
4799}
4709 4800
4710/* 4801/*
4711 * SGPG related functions 4802 * SGPG related functions
@@ -5517,11 +5608,29 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5517 return BFA_STATUS_PORT_NOT_DISABLED; 5608 return BFA_STATUS_PORT_NOT_DISABLED;
5518 } 5609 }
5519 5610
5520 /* Check if the speed is supported */ 5611 /*
5521 bfa_fcport_get_attr(bfa, &attr); 5612 * Check if input speed is supported by the port mode
5522 bfa_trc(fcdiag, attr.speed_supported); 5613 */
5523 if (speed > attr.speed_supported) 5614 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5524 return BFA_STATUS_UNSUPP_SPEED; 5615 if (!(speed == BFA_PORT_SPEED_1GBPS ||
5616 speed == BFA_PORT_SPEED_2GBPS ||
5617 speed == BFA_PORT_SPEED_4GBPS ||
5618 speed == BFA_PORT_SPEED_8GBPS ||
5619 speed == BFA_PORT_SPEED_16GBPS ||
5620 speed == BFA_PORT_SPEED_AUTO)) {
5621 bfa_trc(fcdiag, speed);
5622 return BFA_STATUS_UNSUPP_SPEED;
5623 }
5624 bfa_fcport_get_attr(bfa, &attr);
5625 bfa_trc(fcdiag, attr.speed_supported);
5626 if (speed > attr.speed_supported)
5627 return BFA_STATUS_UNSUPP_SPEED;
5628 } else {
5629 if (speed != BFA_PORT_SPEED_10GBPS) {
5630 bfa_trc(fcdiag, speed);
5631 return BFA_STATUS_UNSUPP_SPEED;
5632 }
5633 }
5525 5634
5526 /* For Mezz card, port speed entered needs to be checked */ 5635 /* For Mezz card, port speed entered needs to be checked */
5527 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) { 5636 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index fbe513a671b5..95adb86d3769 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -297,6 +297,7 @@ struct bfa_rport_s {
297 void *rport_drv; /* fcs/driver rport object */ 297 void *rport_drv; /* fcs/driver rport object */
298 u16 fw_handle; /* firmware rport handle */ 298 u16 fw_handle; /* firmware rport handle */
299 u16 rport_tag; /* BFA rport tag */ 299 u16 rport_tag; /* BFA rport tag */
300 u8 lun_mask; /* LUN mask flag */
300 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */ 301 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
301 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 302 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
302 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */ 303 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
@@ -404,6 +405,7 @@ struct bfa_lps_s {
404 u8 bb_scn; /* local BB_SCN */ 405 u8 bb_scn; /* local BB_SCN */
405 u8 lsrjt_rsn; /* LSRJT reason */ 406 u8 lsrjt_rsn; /* LSRJT reason */
406 u8 lsrjt_expl; /* LSRJT explanation */ 407 u8 lsrjt_expl; /* LSRJT explanation */
408 u8 lun_mask; /* LUN mask flag */
407 wwn_t pwwn; /* port wwn of lport */ 409 wwn_t pwwn; /* port wwn of lport */
408 wwn_t nwwn; /* node wwn of lport */ 410 wwn_t nwwn; /* node wwn of lport */
409 wwn_t pr_pwwn; /* port wwn of lport peer */ 411 wwn_t pr_pwwn; /* port wwn of lport peer */
@@ -441,7 +443,6 @@ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
441 */ 443 */
442 444
443#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) 445#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
444typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
445 446
446/* 447/*
447 * Link notification data structure 448 * Link notification data structure
@@ -495,13 +496,11 @@ struct bfa_fcport_s {
495 u8 *stats_kva; 496 u8 *stats_kva;
496 u64 stats_pa; 497 u64 stats_pa;
497 union bfa_fcport_stats_u *stats; 498 union bfa_fcport_stats_u *stats;
498 union bfa_fcport_stats_u *stats_ret; /* driver stats location */
499 bfa_status_t stats_status; /* stats/statsclr status */ 499 bfa_status_t stats_status; /* stats/statsclr status */
500 bfa_boolean_t stats_busy; /* outstanding stats/statsclr */ 500 struct list_head stats_pending_q;
501 struct list_head statsclr_pending_q;
501 bfa_boolean_t stats_qfull; 502 bfa_boolean_t stats_qfull;
502 u32 stats_reset_time; /* stats reset time stamp */ 503 u32 stats_reset_time; /* stats reset time stamp */
503 bfa_cb_port_t stats_cbfn; /* driver callback function */
504 void *stats_cbarg; /* *!< user callback arg */
505 bfa_boolean_t diag_busy; /* diag busy status */ 504 bfa_boolean_t diag_busy; /* diag busy status */
506 bfa_boolean_t beacon; /* port beacon status */ 505 bfa_boolean_t beacon; /* port beacon status */
507 bfa_boolean_t link_e2e_beacon; /* link beacon status */ 506 bfa_boolean_t link_e2e_beacon; /* link beacon status */
@@ -552,10 +551,9 @@ void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
552 bfa_boolean_t link_e2e_beacon); 551 bfa_boolean_t link_e2e_beacon);
553bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); 552bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
554bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, 553bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
555 union bfa_fcport_stats_u *stats, 554 struct bfa_cb_pending_q_s *cb);
556 bfa_cb_port_t cbfn, void *cbarg); 555bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
557bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, 556 struct bfa_cb_pending_q_s *cb);
558 void *cbarg);
559bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); 557bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
560bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); 558bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
561bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); 559bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
@@ -578,6 +576,19 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
578 struct bfa_rport_qos_attr_s new_qos_attr); 576 struct bfa_rport_qos_attr_s new_qos_attr);
579 577
580/* 578/*
579 * Rport LUN masking related
580 */
581#define BFA_RPORT_TAG_INVALID 0xffff
582#define BFA_LP_TAG_INVALID 0xff
583void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
584void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
585bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
586wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
587struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
588 wwn_t *lpwwn, wwn_t rpwwn);
589void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
590
591/*
581 * bfa fcxp API functions 592 * bfa fcxp API functions
582 */ 593 */
583struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa, 594struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index beb30a748ea5..66fb72531b34 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1348,7 +1348,7 @@ int
1348bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 1348bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1349{ 1349{
1350 struct bfad_s *bfad; 1350 struct bfad_s *bfad;
1351 int error = -ENODEV, retval; 1351 int error = -ENODEV, retval, i;
1352 1352
1353 /* For single port cards - only claim function 0 */ 1353 /* For single port cards - only claim function 0 */
1354 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && 1354 if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
@@ -1372,6 +1372,12 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1372 bfa_trc_init(bfad->trcmod); 1372 bfa_trc_init(bfad->trcmod);
1373 bfa_trc(bfad, bfad_inst); 1373 bfa_trc(bfad, bfad_inst);
1374 1374
1375 /* AEN INIT */
1376 INIT_LIST_HEAD(&bfad->free_aen_q);
1377 INIT_LIST_HEAD(&bfad->active_aen_q);
1378 for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
1379 list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
1380
1375 if (!(bfad_load_fwimg(pdev))) { 1381 if (!(bfad_load_fwimg(pdev))) {
1376 kfree(bfad->trcmod); 1382 kfree(bfad->trcmod);
1377 goto out_alloc_trace_failure; 1383 goto out_alloc_trace_failure;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 89f863ed2334..06fc00caeb41 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -56,7 +56,7 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
56 spin_lock_irqsave(&bfad->bfad_lock, flags); 56 spin_lock_irqsave(&bfad->bfad_lock, flags);
57 if (bfad->disable_active) { 57 if (bfad->disable_active) {
58 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 58 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
59 return EBUSY; 59 return -EBUSY;
60 } 60 }
61 61
62 bfad->disable_active = BFA_TRUE; 62 bfad->disable_active = BFA_TRUE;
@@ -90,6 +90,7 @@ bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
90 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum); 90 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
91 iocmd->factorynwwn = pattr.factorynwwn; 91 iocmd->factorynwwn = pattr.factorynwwn;
92 iocmd->factorypwwn = pattr.factorypwwn; 92 iocmd->factorypwwn = pattr.factorypwwn;
93 iocmd->bfad_num = bfad->inst_no;
93 im_port = bfad->pport.im_port; 94 im_port = bfad->pport.im_port;
94 iocmd->host = im_port->shost->host_no; 95 iocmd->host = im_port->shost->host_no;
95 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 96 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -178,6 +179,38 @@ out:
178} 179}
179 180
180int 181int
182bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
183{
184 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
185 unsigned long flags;
186
187 if (v_cmd == IOCMD_IOC_RESET_STATS) {
188 bfa_ioc_clear_stats(&bfad->bfa);
189 iocmd->status = BFA_STATUS_OK;
190 } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
191 spin_lock_irqsave(&bfad->bfad_lock, flags);
192 iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
193 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
194 }
195
196 return 0;
197}
198
199int
200bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
201{
202 struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
203
204 if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
205 strcpy(bfad->adapter_name, iocmd->name);
206 else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
207 strcpy(bfad->port_name, iocmd->name);
208
209 iocmd->status = BFA_STATUS_OK;
210 return 0;
211}
212
213int
181bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) 214bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
182{ 215{
183 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd; 216 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
@@ -306,6 +339,81 @@ out:
306 return 0; 339 return 0;
307} 340}
308 341
342int
343bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
344{
345 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
346 struct bfad_hal_comp fcomp;
347 unsigned long flags;
348
349 init_completion(&fcomp.comp);
350 spin_lock_irqsave(&bfad->bfad_lock, flags);
351 iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
352 bfad_hcb_comp, &fcomp);
353 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
354 if (iocmd->status != BFA_STATUS_OK) {
355 bfa_trc(bfad, iocmd->status);
356 return 0;
357 }
358 wait_for_completion(&fcomp.comp);
359 iocmd->status = fcomp.status;
360 return 0;
361}
362
363int
364bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
365{
366 struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
367 unsigned long flags;
368
369 spin_lock_irqsave(&bfad->bfad_lock, flags);
370 if (v_cmd == IOCMD_PORT_CFG_TOPO)
371 cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
372 else if (v_cmd == IOCMD_PORT_CFG_SPEED)
373 cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
374 else if (v_cmd == IOCMD_PORT_CFG_ALPA)
375 cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
376 else if (v_cmd == IOCMD_PORT_CLR_ALPA)
377 cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
378 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
379
380 return 0;
381}
382
383int
384bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
385{
386 struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
387 (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
388 unsigned long flags;
389
390 spin_lock_irqsave(&bfad->bfad_lock, flags);
391 iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
392 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
393
394 return 0;
395}
396
397int
398bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
399{
400 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
401 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
402 unsigned long flags;
403
404 spin_lock_irqsave(&bfad->bfad_lock, flags);
405 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
406 if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
407 fcport->cfg.bb_scn_state = BFA_TRUE;
408 else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
409 fcport->cfg.bb_scn_state = BFA_FALSE;
410 }
411 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
412
413 iocmd->status = BFA_STATUS_OK;
414 return 0;
415}
416
309static int 417static int
310bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) 418bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
311{ 419{
@@ -354,6 +462,40 @@ out:
354} 462}
355 463
356int 464int
465bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
466{
467 struct bfa_fcs_lport_s *fcs_port;
468 struct bfa_bsg_reset_stats_s *iocmd =
469 (struct bfa_bsg_reset_stats_s *)cmd;
470 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
471 struct list_head *qe, *qen;
472 struct bfa_itnim_s *itnim;
473 unsigned long flags;
474
475 spin_lock_irqsave(&bfad->bfad_lock, flags);
476 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
477 iocmd->vf_id, iocmd->vpwwn);
478 if (fcs_port == NULL) {
479 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
480 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
481 goto out;
482 }
483
484 bfa_fcs_lport_clear_stats(fcs_port);
485 /* clear IO stats from all active itnims */
486 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
487 itnim = (struct bfa_itnim_s *) qe;
488 if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
489 continue;
490 bfa_itnim_clear_stats(itnim);
491 }
492 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
493 iocmd->status = BFA_STATUS_OK;
494out:
495 return 0;
496}
497
498int
357bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd) 499bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
358{ 500{
359 struct bfa_fcs_lport_s *fcs_port; 501 struct bfa_fcs_lport_s *fcs_port;
@@ -389,7 +531,7 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
389 void *iocmd_bufptr; 531 void *iocmd_bufptr;
390 532
391 if (iocmd->nrports == 0) 533 if (iocmd->nrports == 0)
392 return EINVAL; 534 return -EINVAL;
393 535
394 if (bfad_chk_iocmd_sz(payload_len, 536 if (bfad_chk_iocmd_sz(payload_len,
395 sizeof(struct bfa_bsg_lport_get_rports_s), 537 sizeof(struct bfa_bsg_lport_get_rports_s),
@@ -539,6 +681,152 @@ out:
539 return 0; 681 return 0;
540} 682}
541 683
684int
685bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
686{
687 struct bfa_bsg_rport_reset_stats_s *iocmd =
688 (struct bfa_bsg_rport_reset_stats_s *)cmd;
689 struct bfa_fcs_lport_s *fcs_port;
690 struct bfa_fcs_rport_s *fcs_rport;
691 struct bfa_rport_s *rport;
692 unsigned long flags;
693
694 spin_lock_irqsave(&bfad->bfad_lock, flags);
695 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
696 iocmd->vf_id, iocmd->pwwn);
697 if (fcs_port == NULL) {
698 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
699 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
700 goto out;
701 }
702
703 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
704 if (fcs_rport == NULL) {
705 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
706 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
707 goto out;
708 }
709
710 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
711 rport = bfa_fcs_rport_get_halrport(fcs_rport);
712 memset(&rport->stats, 0, sizeof(rport->stats));
713 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
714 iocmd->status = BFA_STATUS_OK;
715out:
716 return 0;
717}
718
719int
720bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
721{
722 struct bfa_bsg_rport_set_speed_s *iocmd =
723 (struct bfa_bsg_rport_set_speed_s *)cmd;
724 struct bfa_fcs_lport_s *fcs_port;
725 struct bfa_fcs_rport_s *fcs_rport;
726 unsigned long flags;
727
728 spin_lock_irqsave(&bfad->bfad_lock, flags);
729 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
730 iocmd->vf_id, iocmd->pwwn);
731 if (fcs_port == NULL) {
732 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
733 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
734 goto out;
735 }
736
737 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
738 if (fcs_rport == NULL) {
739 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
740 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
741 goto out;
742 }
743
744 fcs_rport->rpf.assigned_speed = iocmd->speed;
745 /* Set this speed in f/w only if the RPSC speed is not available */
746 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
747 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
748 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
749 iocmd->status = BFA_STATUS_OK;
750out:
751 return 0;
752}
753
754int
755bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
756{
757 struct bfa_fcs_vport_s *fcs_vport;
758 struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
759 unsigned long flags;
760
761 spin_lock_irqsave(&bfad->bfad_lock, flags);
762 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
763 iocmd->vf_id, iocmd->vpwwn);
764 if (fcs_vport == NULL) {
765 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
766 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
767 goto out;
768 }
769
770 bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
771 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
772 iocmd->status = BFA_STATUS_OK;
773out:
774 return 0;
775}
776
777int
778bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
779{
780 struct bfa_fcs_vport_s *fcs_vport;
781 struct bfa_bsg_vport_stats_s *iocmd =
782 (struct bfa_bsg_vport_stats_s *)cmd;
783 unsigned long flags;
784
785 spin_lock_irqsave(&bfad->bfad_lock, flags);
786 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
787 iocmd->vf_id, iocmd->vpwwn);
788 if (fcs_vport == NULL) {
789 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
790 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
791 goto out;
792 }
793
794 memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
795 sizeof(struct bfa_vport_stats_s));
796 memcpy((void *)&iocmd->vport_stats.port_stats,
797 (void *)&fcs_vport->lport.stats,
798 sizeof(struct bfa_lport_stats_s));
799 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
800 iocmd->status = BFA_STATUS_OK;
801out:
802 return 0;
803}
804
805int
806bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
807{
808 struct bfa_fcs_vport_s *fcs_vport;
809 struct bfa_bsg_reset_stats_s *iocmd =
810 (struct bfa_bsg_reset_stats_s *)cmd;
811 unsigned long flags;
812
813 spin_lock_irqsave(&bfad->bfad_lock, flags);
814 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
815 iocmd->vf_id, iocmd->vpwwn);
816 if (fcs_vport == NULL) {
817 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
818 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
819 goto out;
820 }
821
822 memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
823 memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
824 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
825 iocmd->status = BFA_STATUS_OK;
826out:
827 return 0;
828}
829
542static int 830static int
543bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd, 831bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
544 unsigned int payload_len) 832 unsigned int payload_len)
@@ -582,6 +870,66 @@ out:
582} 870}
583 871
584int 872int
873bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
874{
875 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
876 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
877 unsigned long flags;
878
879 spin_lock_irqsave(&bfad->bfad_lock, flags);
880
881 if (cmd == IOCMD_RATELIM_ENABLE)
882 fcport->cfg.ratelimit = BFA_TRUE;
883 else if (cmd == IOCMD_RATELIM_DISABLE)
884 fcport->cfg.ratelimit = BFA_FALSE;
885
886 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
887 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
888
889 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
890 iocmd->status = BFA_STATUS_OK;
891
892 return 0;
893}
894
895int
896bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
897{
898 struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
899 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
900 unsigned long flags;
901
902 spin_lock_irqsave(&bfad->bfad_lock, flags);
903
904 /* Auto and speeds greater than the supported speed, are invalid */
905 if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
906 (iocmd->speed > fcport->speed_sup)) {
907 iocmd->status = BFA_STATUS_UNSUPP_SPEED;
908 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
909 return 0;
910 }
911
912 fcport->cfg.trl_def_speed = iocmd->speed;
913 iocmd->status = BFA_STATUS_OK;
914 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
915
916 return 0;
917}
918
919int
920bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
921{
922 struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
923 unsigned long flags;
924
925 spin_lock_irqsave(&bfad->bfad_lock, flags);
926 bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
927 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
928 iocmd->status = BFA_STATUS_OK;
929 return 0;
930}
931
932int
585bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd) 933bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
586{ 934{
587 struct bfa_bsg_fcpim_modstats_s *iocmd = 935 struct bfa_bsg_fcpim_modstats_s *iocmd =
@@ -604,6 +952,28 @@ bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
604} 952}
605 953
606int 954int
955bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
956{
957 struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
958 (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
959 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
960 struct list_head *qe, *qen;
961 struct bfa_itnim_s *itnim;
962 unsigned long flags;
963
964 spin_lock_irqsave(&bfad->bfad_lock, flags);
965 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
966 itnim = (struct bfa_itnim_s *) qe;
967 bfa_itnim_clear_stats(itnim);
968 }
969 memset(&fcpim->del_itn_stats, 0,
970 sizeof(struct bfa_fcpim_del_itn_stats_s));
971 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
972 iocmd->status = BFA_STATUS_OK;
973 return 0;
974}
975
976int
607bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd) 977bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
608{ 978{
609 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd = 979 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
@@ -670,6 +1040,35 @@ bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
670} 1040}
671 1041
672static int 1042static int
1043bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
1044{
1045 struct bfa_bsg_rport_reset_stats_s *iocmd =
1046 (struct bfa_bsg_rport_reset_stats_s *)cmd;
1047 struct bfa_fcs_lport_s *fcs_port;
1048 struct bfa_fcs_itnim_s *itnim;
1049 unsigned long flags;
1050
1051 spin_lock_irqsave(&bfad->bfad_lock, flags);
1052 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1053 iocmd->vf_id, iocmd->pwwn);
1054 if (!fcs_port)
1055 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1056 else {
1057 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1058 if (itnim == NULL)
1059 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1060 else {
1061 iocmd->status = BFA_STATUS_OK;
1062 bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
1063 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
1064 }
1065 }
1066 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1067
1068 return 0;
1069}
1070
1071static int
673bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd) 1072bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
674{ 1073{
675 struct bfa_bsg_itnim_itnstats_s *iocmd = 1074 struct bfa_bsg_itnim_itnstats_s *iocmd =
@@ -1511,11 +1910,545 @@ out:
1511 return 0; 1910 return 0;
1512} 1911}
1513 1912
1913#define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
1914int
1915bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
1916 unsigned int payload_len)
1917{
1918 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
1919 void *iocmd_bufptr;
1920 unsigned long flags;
1921
1922 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
1923 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
1924 iocmd->status = BFA_STATUS_VERSION_FAIL;
1925 return 0;
1926 }
1927
1928 if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
1929 !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
1930 !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
1931 bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
1932 iocmd->status = BFA_STATUS_EINVAL;
1933 goto out;
1934 }
1935
1936 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
1937 spin_lock_irqsave(&bfad->bfad_lock, flags);
1938 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
1939 (u32 *)&iocmd->offset, &iocmd->bufsz);
1940 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1941out:
1942 return 0;
1943}
1944
1945int
1946bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
1947{
1948 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1949 unsigned long flags;
1950
1951 if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
1952 spin_lock_irqsave(&bfad->bfad_lock, flags);
1953 bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
1954 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1955 } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
1956 bfad->plog_buf.head = bfad->plog_buf.tail = 0;
1957 else if (v_cmd == IOCMD_DEBUG_START_DTRC)
1958 bfa_trc_init(bfad->trcmod);
1959 else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
1960 bfa_trc_stop(bfad->trcmod);
1961
1962 iocmd->status = BFA_STATUS_OK;
1963 return 0;
1964}
1965
1966int
1967bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
1968{
1969 struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
1970
1971 if (iocmd->ctl == BFA_TRUE)
1972 bfad->plog_buf.plog_enabled = 1;
1973 else
1974 bfad->plog_buf.plog_enabled = 0;
1975
1976 iocmd->status = BFA_STATUS_OK;
1977 return 0;
1978}
1979
1980int
1981bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
1982{
1983 struct bfa_bsg_fcpim_profile_s *iocmd =
1984 (struct bfa_bsg_fcpim_profile_s *)cmd;
1985 struct timeval tv;
1986 unsigned long flags;
1987
1988 do_gettimeofday(&tv);
1989 spin_lock_irqsave(&bfad->bfad_lock, flags);
1990 if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
1991 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
1992 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
1993 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
1994 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1995
1996 return 0;
1997}
1998
1999static int
2000bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
2001{
2002 struct bfa_bsg_itnim_ioprofile_s *iocmd =
2003 (struct bfa_bsg_itnim_ioprofile_s *)cmd;
2004 struct bfa_fcs_lport_s *fcs_port;
2005 struct bfa_fcs_itnim_s *itnim;
2006 unsigned long flags;
2007
2008 spin_lock_irqsave(&bfad->bfad_lock, flags);
2009 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
2010 iocmd->vf_id, iocmd->lpwwn);
2011 if (!fcs_port)
2012 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
2013 else {
2014 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
2015 if (itnim == NULL)
2016 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
2017 else
2018 iocmd->status = bfa_itnim_get_ioprofile(
2019 bfa_fcs_itnim_get_halitn(itnim),
2020 &iocmd->ioprofile);
2021 }
2022 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2023 return 0;
2024}
2025
2026int
2027bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
2028{
2029 struct bfa_bsg_fcport_stats_s *iocmd =
2030 (struct bfa_bsg_fcport_stats_s *)cmd;
2031 struct bfad_hal_comp fcomp;
2032 unsigned long flags;
2033 struct bfa_cb_pending_q_s cb_qe;
2034
2035 init_completion(&fcomp.comp);
2036 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2037 &fcomp, &iocmd->stats);
2038 spin_lock_irqsave(&bfad->bfad_lock, flags);
2039 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2040 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2041 if (iocmd->status != BFA_STATUS_OK) {
2042 bfa_trc(bfad, iocmd->status);
2043 goto out;
2044 }
2045 wait_for_completion(&fcomp.comp);
2046 iocmd->status = fcomp.status;
2047out:
2048 return 0;
2049}
2050
2051int
2052bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
2053{
2054 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2055 struct bfad_hal_comp fcomp;
2056 unsigned long flags;
2057 struct bfa_cb_pending_q_s cb_qe;
2058
2059 init_completion(&fcomp.comp);
2060 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
2061
2062 spin_lock_irqsave(&bfad->bfad_lock, flags);
2063 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2064 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2065 if (iocmd->status != BFA_STATUS_OK) {
2066 bfa_trc(bfad, iocmd->status);
2067 goto out;
2068 }
2069 wait_for_completion(&fcomp.comp);
2070 iocmd->status = fcomp.status;
2071out:
2072 return 0;
2073}
2074
2075int
2076bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2077{
2078 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2079 struct bfad_hal_comp fcomp;
2080 unsigned long flags;
2081
2082 init_completion(&fcomp.comp);
2083 spin_lock_irqsave(&bfad->bfad_lock, flags);
2084 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2085 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
2086 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2087 bfad_hcb_comp, &fcomp);
2088 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2089 if (iocmd->status != BFA_STATUS_OK)
2090 goto out;
2091 wait_for_completion(&fcomp.comp);
2092 iocmd->status = fcomp.status;
2093out:
2094 return 0;
2095}
2096
2097int
2098bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2099{
2100 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2101 struct bfad_hal_comp fcomp;
2102 unsigned long flags;
2103
2104 init_completion(&fcomp.comp);
2105 spin_lock_irqsave(&bfad->bfad_lock, flags);
2106 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2107 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
2108 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2109 bfad_hcb_comp, &fcomp);
2110 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2111 if (iocmd->status != BFA_STATUS_OK)
2112 goto out;
2113 wait_for_completion(&fcomp.comp);
2114 iocmd->status = fcomp.status;
2115out:
2116 return 0;
2117}
2118
2119int
2120bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
2121{
2122 struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
2123 struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
2124 struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
2125 unsigned long flags;
2126
2127 spin_lock_irqsave(&bfad->bfad_lock, flags);
2128 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
2129 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
2130 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
2131 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
2132 iocmd->status = BFA_STATUS_OK;
2133 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2134
2135 return 0;
2136}
2137
2138int
2139bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
2140{
2141 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2142 struct bfad_hal_comp fcomp;
2143 unsigned long flags;
2144
2145 init_completion(&fcomp.comp);
2146 spin_lock_irqsave(&bfad->bfad_lock, flags);
2147 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2148 BFA_FLASH_PART_PXECFG,
2149 bfad->bfa.ioc.port_id, &iocmd->cfg,
2150 sizeof(struct bfa_ethboot_cfg_s), 0,
2151 bfad_hcb_comp, &fcomp);
2152 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2153 if (iocmd->status != BFA_STATUS_OK)
2154 goto out;
2155 wait_for_completion(&fcomp.comp);
2156 iocmd->status = fcomp.status;
2157out:
2158 return 0;
2159}
2160
2161int
2162bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
2163{
2164 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2165 struct bfad_hal_comp fcomp;
2166 unsigned long flags;
2167
2168 init_completion(&fcomp.comp);
2169 spin_lock_irqsave(&bfad->bfad_lock, flags);
2170 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2171 BFA_FLASH_PART_PXECFG,
2172 bfad->bfa.ioc.port_id, &iocmd->cfg,
2173 sizeof(struct bfa_ethboot_cfg_s), 0,
2174 bfad_hcb_comp, &fcomp);
2175 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2176 if (iocmd->status != BFA_STATUS_OK)
2177 goto out;
2178 wait_for_completion(&fcomp.comp);
2179 iocmd->status = fcomp.status;
2180out:
2181 return 0;
2182}
2183
2184int
2185bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2186{
2187 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2188 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2189 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2190 unsigned long flags;
2191
2192 spin_lock_irqsave(&bfad->bfad_lock, flags);
2193
2194 if (v_cmd == IOCMD_TRUNK_ENABLE) {
2195 trunk->attr.state = BFA_TRUNK_OFFLINE;
2196 bfa_fcport_disable(&bfad->bfa);
2197 fcport->cfg.trunked = BFA_TRUE;
2198 } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2199 trunk->attr.state = BFA_TRUNK_DISABLED;
2200 bfa_fcport_disable(&bfad->bfa);
2201 fcport->cfg.trunked = BFA_FALSE;
2202 }
2203
2204 if (!bfa_fcport_is_disabled(&bfad->bfa))
2205 bfa_fcport_enable(&bfad->bfa);
2206
2207 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2208
2209 iocmd->status = BFA_STATUS_OK;
2210 return 0;
2211}
2212
2213int
2214bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2215{
2216 struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
2217 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2218 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2219 unsigned long flags;
2220
2221 spin_lock_irqsave(&bfad->bfad_lock, flags);
2222 memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2223 sizeof(struct bfa_trunk_attr_s));
2224 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2225 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2226
2227 iocmd->status = BFA_STATUS_OK;
2228 return 0;
2229}
2230
2231int
2232bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2233{
2234 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2235 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2236 unsigned long flags;
2237
2238 spin_lock_irqsave(&bfad->bfad_lock, flags);
2239 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2240 if (v_cmd == IOCMD_QOS_ENABLE)
2241 fcport->cfg.qos_enabled = BFA_TRUE;
2242 else if (v_cmd == IOCMD_QOS_DISABLE)
2243 fcport->cfg.qos_enabled = BFA_FALSE;
2244 }
2245 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2246
2247 iocmd->status = BFA_STATUS_OK;
2248 return 0;
2249}
2250
2251int
2252bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2253{
2254 struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
2255 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2256 unsigned long flags;
2257
2258 spin_lock_irqsave(&bfad->bfad_lock, flags);
2259 iocmd->attr.state = fcport->qos_attr.state;
2260 iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
2261 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2262
2263 iocmd->status = BFA_STATUS_OK;
2264 return 0;
2265}
2266
2267int
2268bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
2269{
2270 struct bfa_bsg_qos_vc_attr_s *iocmd =
2271 (struct bfa_bsg_qos_vc_attr_s *)cmd;
2272 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2273 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
2274 unsigned long flags;
2275 u32 i = 0;
2276
2277 spin_lock_irqsave(&bfad->bfad_lock, flags);
2278 iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
2279 iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
2280 iocmd->attr.elp_opmode_flags =
2281 be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
2282
2283 /* Individual VC info */
2284 while (i < iocmd->attr.total_vc_count) {
2285 iocmd->attr.vc_info[i].vc_credit =
2286 bfa_vc_attr->vc_info[i].vc_credit;
2287 iocmd->attr.vc_info[i].borrow_credit =
2288 bfa_vc_attr->vc_info[i].borrow_credit;
2289 iocmd->attr.vc_info[i].priority =
2290 bfa_vc_attr->vc_info[i].priority;
2291 i++;
2292 }
2293 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2294
2295 iocmd->status = BFA_STATUS_OK;
2296 return 0;
2297}
2298
2299int
2300bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2301{
2302 struct bfa_bsg_fcport_stats_s *iocmd =
2303 (struct bfa_bsg_fcport_stats_s *)cmd;
2304 struct bfad_hal_comp fcomp;
2305 unsigned long flags;
2306 struct bfa_cb_pending_q_s cb_qe;
2307
2308 init_completion(&fcomp.comp);
2309 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2310 &fcomp, &iocmd->stats);
2311
2312 spin_lock_irqsave(&bfad->bfad_lock, flags);
2313 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2314 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2315 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2316 if (iocmd->status != BFA_STATUS_OK) {
2317 bfa_trc(bfad, iocmd->status);
2318 goto out;
2319 }
2320 wait_for_completion(&fcomp.comp);
2321 iocmd->status = fcomp.status;
2322out:
2323 return 0;
2324}
2325
2326int
2327bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2328{
2329 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2330 struct bfad_hal_comp fcomp;
2331 unsigned long flags;
2332 struct bfa_cb_pending_q_s cb_qe;
2333
2334 init_completion(&fcomp.comp);
2335 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2336 &fcomp, NULL);
2337
2338 spin_lock_irqsave(&bfad->bfad_lock, flags);
2339 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2340 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2341 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2342 if (iocmd->status != BFA_STATUS_OK) {
2343 bfa_trc(bfad, iocmd->status);
2344 goto out;
2345 }
2346 wait_for_completion(&fcomp.comp);
2347 iocmd->status = fcomp.status;
2348out:
2349 return 0;
2350}
2351
2352int
2353bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
2354{
2355 struct bfa_bsg_vf_stats_s *iocmd =
2356 (struct bfa_bsg_vf_stats_s *)cmd;
2357 struct bfa_fcs_fabric_s *fcs_vf;
2358 unsigned long flags;
2359
2360 spin_lock_irqsave(&bfad->bfad_lock, flags);
2361 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2362 if (fcs_vf == NULL) {
2363 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2364 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2365 goto out;
2366 }
2367 memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
2368 sizeof(struct bfa_vf_stats_s));
2369 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2370 iocmd->status = BFA_STATUS_OK;
2371out:
2372 return 0;
2373}
2374
2375int
2376bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
2377{
2378 struct bfa_bsg_vf_reset_stats_s *iocmd =
2379 (struct bfa_bsg_vf_reset_stats_s *)cmd;
2380 struct bfa_fcs_fabric_s *fcs_vf;
2381 unsigned long flags;
2382
2383 spin_lock_irqsave(&bfad->bfad_lock, flags);
2384 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2385 if (fcs_vf == NULL) {
2386 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2387 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2388 goto out;
2389 }
2390 memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
2391 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2392 iocmd->status = BFA_STATUS_OK;
2393out:
2394 return 0;
2395}
2396
2397int
2398bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2399{
2400 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
2401 unsigned long flags;
2402
2403 spin_lock_irqsave(&bfad->bfad_lock, flags);
2404 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
2405 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2406 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
2407 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2408 else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2409 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2410 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2411 return 0;
2412}
2413
2414int
2415bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
2416{
2417 struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
2418 (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
2419 struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
2420 unsigned long flags;
2421
2422 spin_lock_irqsave(&bfad->bfad_lock, flags);
2423 iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
2424 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2425 return 0;
2426}
2427
2428int
2429bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2430{
2431 struct bfa_bsg_fcpim_lunmask_s *iocmd =
2432 (struct bfa_bsg_fcpim_lunmask_s *)cmd;
2433 unsigned long flags;
2434
2435 spin_lock_irqsave(&bfad->bfad_lock, flags);
2436 if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
2437 iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
2438 &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
2439 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
2440 iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
2441 iocmd->vf_id, &iocmd->pwwn,
2442 iocmd->rpwwn, iocmd->lun);
2443 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2444 return 0;
2445}
2446
1514static int 2447static int
1515bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, 2448bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1516 unsigned int payload_len) 2449 unsigned int payload_len)
1517{ 2450{
1518 int rc = EINVAL; 2451 int rc = -EINVAL;
1519 2452
1520 switch (cmd) { 2453 switch (cmd) {
1521 case IOCMD_IOC_ENABLE: 2454 case IOCMD_IOC_ENABLE:
@@ -1536,6 +2469,14 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1536 case IOCMD_IOC_GET_FWSTATS: 2469 case IOCMD_IOC_GET_FWSTATS:
1537 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len); 2470 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
1538 break; 2471 break;
2472 case IOCMD_IOC_RESET_STATS:
2473 case IOCMD_IOC_RESET_FWSTATS:
2474 rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
2475 break;
2476 case IOCMD_IOC_SET_ADAPTER_NAME:
2477 case IOCMD_IOC_SET_PORT_NAME:
2478 rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
2479 break;
1539 case IOCMD_IOCFC_GET_ATTR: 2480 case IOCMD_IOCFC_GET_ATTR:
1540 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd); 2481 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
1541 break; 2482 break;
@@ -1554,12 +2495,31 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1554 case IOCMD_PORT_GET_STATS: 2495 case IOCMD_PORT_GET_STATS:
1555 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len); 2496 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
1556 break; 2497 break;
2498 case IOCMD_PORT_RESET_STATS:
2499 rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
2500 break;
2501 case IOCMD_PORT_CFG_TOPO:
2502 case IOCMD_PORT_CFG_SPEED:
2503 case IOCMD_PORT_CFG_ALPA:
2504 case IOCMD_PORT_CLR_ALPA:
2505 rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
2506 break;
2507 case IOCMD_PORT_CFG_MAXFRSZ:
2508 rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
2509 break;
2510 case IOCMD_PORT_BBSC_ENABLE:
2511 case IOCMD_PORT_BBSC_DISABLE:
2512 rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
2513 break;
1557 case IOCMD_LPORT_GET_ATTR: 2514 case IOCMD_LPORT_GET_ATTR:
1558 rc = bfad_iocmd_lport_get_attr(bfad, iocmd); 2515 rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
1559 break; 2516 break;
1560 case IOCMD_LPORT_GET_STATS: 2517 case IOCMD_LPORT_GET_STATS:
1561 rc = bfad_iocmd_lport_get_stats(bfad, iocmd); 2518 rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
1562 break; 2519 break;
2520 case IOCMD_LPORT_RESET_STATS:
2521 rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
2522 break;
1563 case IOCMD_LPORT_GET_IOSTATS: 2523 case IOCMD_LPORT_GET_IOSTATS:
1564 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd); 2524 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
1565 break; 2525 break;
@@ -1575,12 +2535,40 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1575 case IOCMD_RPORT_GET_STATS: 2535 case IOCMD_RPORT_GET_STATS:
1576 rc = bfad_iocmd_rport_get_stats(bfad, iocmd); 2536 rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
1577 break; 2537 break;
2538 case IOCMD_RPORT_RESET_STATS:
2539 rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
2540 break;
2541 case IOCMD_RPORT_SET_SPEED:
2542 rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
2543 break;
2544 case IOCMD_VPORT_GET_ATTR:
2545 rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
2546 break;
2547 case IOCMD_VPORT_GET_STATS:
2548 rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
2549 break;
2550 case IOCMD_VPORT_RESET_STATS:
2551 rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
2552 break;
1578 case IOCMD_FABRIC_GET_LPORTS: 2553 case IOCMD_FABRIC_GET_LPORTS:
1579 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len); 2554 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
1580 break; 2555 break;
2556 case IOCMD_RATELIM_ENABLE:
2557 case IOCMD_RATELIM_DISABLE:
2558 rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
2559 break;
2560 case IOCMD_RATELIM_DEF_SPEED:
2561 rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
2562 break;
2563 case IOCMD_FCPIM_FAILOVER:
2564 rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
2565 break;
1581 case IOCMD_FCPIM_MODSTATS: 2566 case IOCMD_FCPIM_MODSTATS:
1582 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd); 2567 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
1583 break; 2568 break;
2569 case IOCMD_FCPIM_MODSTATSCLR:
2570 rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
2571 break;
1584 case IOCMD_FCPIM_DEL_ITN_STATS: 2572 case IOCMD_FCPIM_DEL_ITN_STATS:
1585 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd); 2573 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
1586 break; 2574 break;
@@ -1590,6 +2578,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1590 case IOCMD_ITNIM_GET_IOSTATS: 2578 case IOCMD_ITNIM_GET_IOSTATS:
1591 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd); 2579 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
1592 break; 2580 break;
2581 case IOCMD_ITNIM_RESET_STATS:
2582 rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
2583 break;
1593 case IOCMD_ITNIM_GET_ITNSTATS: 2584 case IOCMD_ITNIM_GET_ITNSTATS:
1594 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd); 2585 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
1595 break; 2586 break;
@@ -1702,11 +2693,92 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1702 case IOCMD_DEBUG_PORTLOG: 2693 case IOCMD_DEBUG_PORTLOG:
1703 rc = bfad_iocmd_porglog_get(bfad, iocmd); 2694 rc = bfad_iocmd_porglog_get(bfad, iocmd);
1704 break; 2695 break;
2696 case IOCMD_DEBUG_FW_CORE:
2697 rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
2698 break;
2699 case IOCMD_DEBUG_FW_STATE_CLR:
2700 case IOCMD_DEBUG_PORTLOG_CLR:
2701 case IOCMD_DEBUG_START_DTRC:
2702 case IOCMD_DEBUG_STOP_DTRC:
2703 rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
2704 break;
2705 case IOCMD_DEBUG_PORTLOG_CTL:
2706 rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
2707 break;
2708 case IOCMD_FCPIM_PROFILE_ON:
2709 case IOCMD_FCPIM_PROFILE_OFF:
2710 rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
2711 break;
2712 case IOCMD_ITNIM_GET_IOPROFILE:
2713 rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
2714 break;
2715 case IOCMD_FCPORT_GET_STATS:
2716 rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
2717 break;
2718 case IOCMD_FCPORT_RESET_STATS:
2719 rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
2720 break;
2721 case IOCMD_BOOT_CFG:
2722 rc = bfad_iocmd_boot_cfg(bfad, iocmd);
2723 break;
2724 case IOCMD_BOOT_QUERY:
2725 rc = bfad_iocmd_boot_query(bfad, iocmd);
2726 break;
2727 case IOCMD_PREBOOT_QUERY:
2728 rc = bfad_iocmd_preboot_query(bfad, iocmd);
2729 break;
2730 case IOCMD_ETHBOOT_CFG:
2731 rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
2732 break;
2733 case IOCMD_ETHBOOT_QUERY:
2734 rc = bfad_iocmd_ethboot_query(bfad, iocmd);
2735 break;
2736 case IOCMD_TRUNK_ENABLE:
2737 case IOCMD_TRUNK_DISABLE:
2738 rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
2739 break;
2740 case IOCMD_TRUNK_GET_ATTR:
2741 rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
2742 break;
2743 case IOCMD_QOS_ENABLE:
2744 case IOCMD_QOS_DISABLE:
2745 rc = bfad_iocmd_qos(bfad, iocmd, cmd);
2746 break;
2747 case IOCMD_QOS_GET_ATTR:
2748 rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
2749 break;
2750 case IOCMD_QOS_GET_VC_ATTR:
2751 rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
2752 break;
2753 case IOCMD_QOS_GET_STATS:
2754 rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
2755 break;
2756 case IOCMD_QOS_RESET_STATS:
2757 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
2758 break;
2759 case IOCMD_VF_GET_STATS:
2760 rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
2761 break;
2762 case IOCMD_VF_RESET_STATS:
2763 rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
2764 break;
2765 case IOCMD_FCPIM_LUNMASK_ENABLE:
2766 case IOCMD_FCPIM_LUNMASK_DISABLE:
2767 case IOCMD_FCPIM_LUNMASK_CLEAR:
2768 rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
2769 break;
2770 case IOCMD_FCPIM_LUNMASK_QUERY:
2771 rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
2772 break;
2773 case IOCMD_FCPIM_LUNMASK_ADD:
2774 case IOCMD_FCPIM_LUNMASK_DELETE:
2775 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
2776 break;
1705 default: 2777 default:
1706 rc = EINVAL; 2778 rc = -EINVAL;
1707 break; 2779 break;
1708 } 2780 }
1709 return -rc; 2781 return rc;
1710} 2782}
1711 2783
1712static int 2784static int
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 99b0e8a70c89..e859adb9aa9e 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -30,24 +30,48 @@ enum {
30 IOCMD_IOC_GET_INFO, 30 IOCMD_IOC_GET_INFO,
31 IOCMD_IOC_GET_STATS, 31 IOCMD_IOC_GET_STATS,
32 IOCMD_IOC_GET_FWSTATS, 32 IOCMD_IOC_GET_FWSTATS,
33 IOCMD_IOC_RESET_STATS,
34 IOCMD_IOC_RESET_FWSTATS,
35 IOCMD_IOC_SET_ADAPTER_NAME,
36 IOCMD_IOC_SET_PORT_NAME,
33 IOCMD_IOCFC_GET_ATTR, 37 IOCMD_IOCFC_GET_ATTR,
34 IOCMD_IOCFC_SET_INTR, 38 IOCMD_IOCFC_SET_INTR,
35 IOCMD_PORT_ENABLE, 39 IOCMD_PORT_ENABLE,
36 IOCMD_PORT_DISABLE, 40 IOCMD_PORT_DISABLE,
37 IOCMD_PORT_GET_ATTR, 41 IOCMD_PORT_GET_ATTR,
38 IOCMD_PORT_GET_STATS, 42 IOCMD_PORT_GET_STATS,
43 IOCMD_PORT_RESET_STATS,
44 IOCMD_PORT_CFG_TOPO,
45 IOCMD_PORT_CFG_SPEED,
46 IOCMD_PORT_CFG_ALPA,
47 IOCMD_PORT_CFG_MAXFRSZ,
48 IOCMD_PORT_CLR_ALPA,
49 IOCMD_PORT_BBSC_ENABLE,
50 IOCMD_PORT_BBSC_DISABLE,
39 IOCMD_LPORT_GET_ATTR, 51 IOCMD_LPORT_GET_ATTR,
40 IOCMD_LPORT_GET_RPORTS, 52 IOCMD_LPORT_GET_RPORTS,
41 IOCMD_LPORT_GET_STATS, 53 IOCMD_LPORT_GET_STATS,
54 IOCMD_LPORT_RESET_STATS,
42 IOCMD_LPORT_GET_IOSTATS, 55 IOCMD_LPORT_GET_IOSTATS,
43 IOCMD_RPORT_GET_ATTR, 56 IOCMD_RPORT_GET_ATTR,
44 IOCMD_RPORT_GET_ADDR, 57 IOCMD_RPORT_GET_ADDR,
45 IOCMD_RPORT_GET_STATS, 58 IOCMD_RPORT_GET_STATS,
59 IOCMD_RPORT_RESET_STATS,
60 IOCMD_RPORT_SET_SPEED,
61 IOCMD_VPORT_GET_ATTR,
62 IOCMD_VPORT_GET_STATS,
63 IOCMD_VPORT_RESET_STATS,
46 IOCMD_FABRIC_GET_LPORTS, 64 IOCMD_FABRIC_GET_LPORTS,
65 IOCMD_RATELIM_ENABLE,
66 IOCMD_RATELIM_DISABLE,
67 IOCMD_RATELIM_DEF_SPEED,
68 IOCMD_FCPIM_FAILOVER,
47 IOCMD_FCPIM_MODSTATS, 69 IOCMD_FCPIM_MODSTATS,
70 IOCMD_FCPIM_MODSTATSCLR,
48 IOCMD_FCPIM_DEL_ITN_STATS, 71 IOCMD_FCPIM_DEL_ITN_STATS,
49 IOCMD_ITNIM_GET_ATTR, 72 IOCMD_ITNIM_GET_ATTR,
50 IOCMD_ITNIM_GET_IOSTATS, 73 IOCMD_ITNIM_GET_IOSTATS,
74 IOCMD_ITNIM_RESET_STATS,
51 IOCMD_ITNIM_GET_ITNSTATS, 75 IOCMD_ITNIM_GET_ITNSTATS,
52 IOCMD_IOC_PCIFN_CFG, 76 IOCMD_IOC_PCIFN_CFG,
53 IOCMD_FCPORT_ENABLE, 77 IOCMD_FCPORT_ENABLE,
@@ -86,6 +110,39 @@ enum {
86 IOCMD_PHY_READ_FW, 110 IOCMD_PHY_READ_FW,
87 IOCMD_VHBA_QUERY, 111 IOCMD_VHBA_QUERY,
88 IOCMD_DEBUG_PORTLOG, 112 IOCMD_DEBUG_PORTLOG,
113 IOCMD_DEBUG_FW_CORE,
114 IOCMD_DEBUG_FW_STATE_CLR,
115 IOCMD_DEBUG_PORTLOG_CLR,
116 IOCMD_DEBUG_START_DTRC,
117 IOCMD_DEBUG_STOP_DTRC,
118 IOCMD_DEBUG_PORTLOG_CTL,
119 IOCMD_FCPIM_PROFILE_ON,
120 IOCMD_FCPIM_PROFILE_OFF,
121 IOCMD_ITNIM_GET_IOPROFILE,
122 IOCMD_FCPORT_GET_STATS,
123 IOCMD_FCPORT_RESET_STATS,
124 IOCMD_BOOT_CFG,
125 IOCMD_BOOT_QUERY,
126 IOCMD_PREBOOT_QUERY,
127 IOCMD_ETHBOOT_CFG,
128 IOCMD_ETHBOOT_QUERY,
129 IOCMD_TRUNK_ENABLE,
130 IOCMD_TRUNK_DISABLE,
131 IOCMD_TRUNK_GET_ATTR,
132 IOCMD_QOS_ENABLE,
133 IOCMD_QOS_DISABLE,
134 IOCMD_QOS_GET_ATTR,
135 IOCMD_QOS_GET_VC_ATTR,
136 IOCMD_QOS_GET_STATS,
137 IOCMD_QOS_RESET_STATS,
138 IOCMD_VF_GET_STATS,
139 IOCMD_VF_RESET_STATS,
140 IOCMD_FCPIM_LUNMASK_ENABLE,
141 IOCMD_FCPIM_LUNMASK_DISABLE,
142 IOCMD_FCPIM_LUNMASK_CLEAR,
143 IOCMD_FCPIM_LUNMASK_QUERY,
144 IOCMD_FCPIM_LUNMASK_ADD,
145 IOCMD_FCPIM_LUNMASK_DELETE,
89}; 146};
90 147
91struct bfa_bsg_gen_s { 148struct bfa_bsg_gen_s {
@@ -94,6 +151,43 @@ struct bfa_bsg_gen_s {
94 u16 rsvd; 151 u16 rsvd;
95}; 152};
96 153
154struct bfa_bsg_portlogctl_s {
155 bfa_status_t status;
156 u16 bfad_num;
157 u16 rsvd;
158 bfa_boolean_t ctl;
159 int inst_no;
160};
161
162struct bfa_bsg_fcpim_profile_s {
163 bfa_status_t status;
164 u16 bfad_num;
165 u16 rsvd;
166};
167
168struct bfa_bsg_itnim_ioprofile_s {
169 bfa_status_t status;
170 u16 bfad_num;
171 u16 vf_id;
172 wwn_t lpwwn;
173 wwn_t rpwwn;
174 struct bfa_itnim_ioprofile_s ioprofile;
175};
176
177struct bfa_bsg_fcport_stats_s {
178 bfa_status_t status;
179 u16 bfad_num;
180 u16 rsvd;
181 union bfa_fcport_stats_u stats;
182};
183
184struct bfa_bsg_ioc_name_s {
185 bfa_status_t status;
186 u16 bfad_num;
187 u16 rsvd;
188 char name[BFA_ADAPTER_SYM_NAME_LEN];
189};
190
97struct bfa_bsg_ioc_info_s { 191struct bfa_bsg_ioc_info_s {
98 bfa_status_t status; 192 bfa_status_t status;
99 u16 bfad_num; 193 u16 bfad_num;
@@ -164,6 +258,20 @@ struct bfa_bsg_port_attr_s {
164 struct bfa_port_attr_s attr; 258 struct bfa_port_attr_s attr;
165}; 259};
166 260
261struct bfa_bsg_port_cfg_s {
262 bfa_status_t status;
263 u16 bfad_num;
264 u16 rsvd;
265 u32 param;
266 u32 rsvd1;
267};
268
269struct bfa_bsg_port_cfg_maxfrsize_s {
270 bfa_status_t status;
271 u16 bfad_num;
272 u16 maxfrsize;
273};
274
167struct bfa_bsg_port_stats_s { 275struct bfa_bsg_port_stats_s {
168 bfa_status_t status; 276 bfa_status_t status;
169 u16 bfad_num; 277 u16 bfad_num;
@@ -237,6 +345,47 @@ struct bfa_bsg_rport_scsi_addr_s {
237 u32 lun; 345 u32 lun;
238}; 346};
239 347
348struct bfa_bsg_rport_reset_stats_s {
349 bfa_status_t status;
350 u16 bfad_num;
351 u16 vf_id;
352 wwn_t pwwn;
353 wwn_t rpwwn;
354};
355
356struct bfa_bsg_rport_set_speed_s {
357 bfa_status_t status;
358 u16 bfad_num;
359 u16 vf_id;
360 enum bfa_port_speed speed;
361 u32 rsvd;
362 wwn_t pwwn;
363 wwn_t rpwwn;
364};
365
366struct bfa_bsg_vport_attr_s {
367 bfa_status_t status;
368 u16 bfad_num;
369 u16 vf_id;
370 wwn_t vpwwn;
371 struct bfa_vport_attr_s vport_attr;
372};
373
374struct bfa_bsg_vport_stats_s {
375 bfa_status_t status;
376 u16 bfad_num;
377 u16 vf_id;
378 wwn_t vpwwn;
379 struct bfa_vport_stats_s vport_stats;
380};
381
382struct bfa_bsg_reset_stats_s {
383 bfa_status_t status;
384 u16 bfad_num;
385 u16 vf_id;
386 wwn_t vpwwn;
387};
388
240struct bfa_bsg_fabric_get_lports_s { 389struct bfa_bsg_fabric_get_lports_s {
241 bfa_status_t status; 390 bfa_status_t status;
242 u16 bfad_num; 391 u16 bfad_num;
@@ -246,6 +395,19 @@ struct bfa_bsg_fabric_get_lports_s {
246 u32 rsvd; 395 u32 rsvd;
247}; 396};
248 397
398struct bfa_bsg_trl_speed_s {
399 bfa_status_t status;
400 u16 bfad_num;
401 u16 rsvd;
402 enum bfa_port_speed speed;
403};
404
405struct bfa_bsg_fcpim_s {
406 bfa_status_t status;
407 u16 bfad_num;
408 u16 param;
409};
410
249struct bfa_bsg_fcpim_modstats_s { 411struct bfa_bsg_fcpim_modstats_s {
250 bfa_status_t status; 412 bfa_status_t status;
251 u16 bfad_num; 413 u16 bfad_num;
@@ -258,6 +420,11 @@ struct bfa_bsg_fcpim_del_itn_stats_s {
258 struct bfa_fcpim_del_itn_stats_s modstats; 420 struct bfa_fcpim_del_itn_stats_s modstats;
259}; 421};
260 422
423struct bfa_bsg_fcpim_modstatsclr_s {
424 bfa_status_t status;
425 u16 bfad_num;
426};
427
261struct bfa_bsg_itnim_attr_s { 428struct bfa_bsg_itnim_attr_s {
262 bfa_status_t status; 429 bfa_status_t status;
263 u16 bfad_num; 430 u16 bfad_num;
@@ -485,6 +652,76 @@ struct bfa_bsg_vhba_attr_s {
485 struct bfa_vhba_attr_s attr; 652 struct bfa_vhba_attr_s attr;
486}; 653};
487 654
655struct bfa_bsg_boot_s {
656 bfa_status_t status;
657 u16 bfad_num;
658 u16 rsvd;
659 struct bfa_boot_cfg_s cfg;
660};
661
662struct bfa_bsg_preboot_s {
663 bfa_status_t status;
664 u16 bfad_num;
665 u16 rsvd;
666 struct bfa_boot_pbc_s cfg;
667};
668
669struct bfa_bsg_ethboot_s {
670 bfa_status_t status;
671 u16 bfad_num;
672 u16 rsvd;
673 struct bfa_ethboot_cfg_s cfg;
674};
675
676struct bfa_bsg_trunk_attr_s {
677 bfa_status_t status;
678 u16 bfad_num;
679 u16 rsvd;
680 struct bfa_trunk_attr_s attr;
681};
682
683struct bfa_bsg_qos_attr_s {
684 bfa_status_t status;
685 u16 bfad_num;
686 u16 rsvd;
687 struct bfa_qos_attr_s attr;
688};
689
690struct bfa_bsg_qos_vc_attr_s {
691 bfa_status_t status;
692 u16 bfad_num;
693 u16 rsvd;
694 struct bfa_qos_vc_attr_s attr;
695};
696
697struct bfa_bsg_vf_stats_s {
698 bfa_status_t status;
699 u16 bfad_num;
700 u16 vf_id;
701 struct bfa_vf_stats_s stats;
702};
703
704struct bfa_bsg_vf_reset_stats_s {
705 bfa_status_t status;
706 u16 bfad_num;
707 u16 vf_id;
708};
709
710struct bfa_bsg_fcpim_lunmask_query_s {
711 bfa_status_t status;
712 u16 bfad_num;
713 struct bfa_lunmask_cfg_s lun_mask;
714};
715
716struct bfa_bsg_fcpim_lunmask_s {
717 bfa_status_t status;
718 u16 bfad_num;
719 u16 vf_id;
720 wwn_t pwwn;
721 wwn_t rpwwn;
722 struct scsi_lun lun;
723};
724
488struct bfa_bsg_fcpt_s { 725struct bfa_bsg_fcpt_s {
489 bfa_status_t status; 726 bfa_status_t status;
490 u16 vf_id; 727 u16 vf_id;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 48661a2726d7..bda999ad9f52 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -56,7 +56,7 @@
56#ifdef BFA_DRIVER_VERSION 56#ifdef BFA_DRIVER_VERSION
57#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 57#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
58#else 58#else
59#define BFAD_DRIVER_VERSION "3.0.2.1" 59#define BFAD_DRIVER_VERSION "3.0.2.2"
60#endif 60#endif
61 61
62#define BFAD_PROTO_NAME FCPI_NAME 62#define BFAD_PROTO_NAME FCPI_NAME
@@ -224,6 +224,10 @@ struct bfad_s {
224 char *regdata; 224 char *regdata;
225 u32 reglen; 225 u32 reglen;
226 struct dentry *bfad_dentry_files[5]; 226 struct dentry *bfad_dentry_files[5];
227 struct list_head free_aen_q;
228 struct list_head active_aen_q;
229 struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
230 spinlock_t bfad_aen_spinlock;
227}; 231};
228 232
229/* BFAD state machine events */ 233/* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index f2bf81265ae5..01312381639f 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -656,6 +656,31 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
656 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 656 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
657} 657}
658 658
659static void bfad_aen_im_notify_handler(struct work_struct *work)
660{
661 struct bfad_im_s *im =
662 container_of(work, struct bfad_im_s, aen_im_notify_work);
663 struct bfa_aen_entry_s *aen_entry;
664 struct bfad_s *bfad = im->bfad;
665 struct Scsi_Host *shost = bfad->pport.im_port->shost;
666 void *event_data;
667 unsigned long flags;
668
669 while (!list_empty(&bfad->active_aen_q)) {
670 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
671 bfa_q_deq(&bfad->active_aen_q, &aen_entry);
672 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
673 event_data = (char *)aen_entry + sizeof(struct list_head);
674 fc_host_post_vendor_event(shost, fc_get_event_number(),
675 sizeof(struct bfa_aen_entry_s) -
676 sizeof(struct list_head),
677 (char *)event_data, BFAD_NL_VENDOR_ID);
678 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
679 list_add_tail(&aen_entry->qe, &bfad->free_aen_q);
680 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
681 }
682}
683
659bfa_status_t 684bfa_status_t
660bfad_im_probe(struct bfad_s *bfad) 685bfad_im_probe(struct bfad_s *bfad)
661{ 686{
@@ -676,6 +701,7 @@ bfad_im_probe(struct bfad_s *bfad)
676 rc = BFA_STATUS_FAILED; 701 rc = BFA_STATUS_FAILED;
677 } 702 }
678 703
704 INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
679ext: 705ext:
680 return rc; 706 return rc;
681} 707}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 4fe34d576b05..004b6cf848d9 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -115,8 +115,30 @@ struct bfad_im_s {
115 struct bfad_s *bfad; 115 struct bfad_s *bfad;
116 struct workqueue_struct *drv_workq; 116 struct workqueue_struct *drv_workq;
117 char drv_workq_name[KOBJ_NAME_LEN]; 117 char drv_workq_name[KOBJ_NAME_LEN];
118 struct work_struct aen_im_notify_work;
118}; 119};
119 120
121#define bfad_get_aen_entry(_drv, _entry) do { \
122 unsigned long _flags; \
123 spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags); \
124 bfa_q_deq(&(_drv)->free_aen_q, &(_entry)); \
125 if (_entry) \
126 list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q); \
127 spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags); \
128} while (0)
129
130/* post fc_host vendor event */
131#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
132 do_gettimeofday(&(_entry)->aen_tv); \
133 (_entry)->bfad_num = (_drv)->inst_no; \
134 (_entry)->seq_num = (_cnt); \
135 (_entry)->aen_category = (_cat); \
136 (_entry)->aen_type = (_evt); \
137 if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
138 queue_work((_drv)->im->drv_workq, \
139 &(_drv)->im->aen_im_notify_work); \
140} while (0)
141
120struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, 142struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
121 struct bfad_s *); 143 struct bfad_s *);
122bfa_status_t bfad_thread_workq(struct bfad_s *bfad); 144bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 1e258d5f8aec..b2ba0b2e91b2 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -784,6 +784,17 @@ enum bfi_sfp_i2h_e {
784}; 784};
785 785
786/* 786/*
787 * SFP state change notification
788 */
789struct bfi_sfp_scn_s {
790 struct bfi_mhdr_s mhr; /* host msg header */
791 u8 event;
792 u8 sfpid;
793 u8 pomlvl; /* pom level: normal/warning/alarm */
794 u8 is_elb; /* e-loopback */
795};
796
797/*
787 * SFP state 798 * SFP state
788 */ 799 */
789enum bfa_sfp_stat_e { 800enum bfa_sfp_stat_e {
@@ -926,6 +937,15 @@ struct bfi_flash_erase_rsp_s {
926}; 937};
927 938
928/* 939/*
940 * Flash event notification
941 */
942struct bfi_flash_event_s {
943 struct bfi_mhdr_s mh; /* Common msg header */
944 bfa_status_t status;
945 u32 param;
946};
947
948/*
929 *---------------------------------------------------------------------- 949 *----------------------------------------------------------------------
930 * DIAG 950 * DIAG
931 *---------------------------------------------------------------------- 951 *----------------------------------------------------------------------
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index d924236e1b91..42228ca5a9d2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -2,7 +2,7 @@
2#define _BNX2FC_H_ 2#define _BNX2FC_H_
3/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. 3/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
4 * 4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation 5 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -62,7 +62,7 @@
62#include "bnx2fc_constants.h" 62#include "bnx2fc_constants.h"
63 63
64#define BNX2FC_NAME "bnx2fc" 64#define BNX2FC_NAME "bnx2fc"
65#define BNX2FC_VERSION "1.0.3" 65#define BNX2FC_VERSION "1.0.4"
66 66
67#define PFX "bnx2fc: " 67#define PFX "bnx2fc: "
68 68
@@ -141,6 +141,10 @@
141 141
142#define BNX2FC_RNID_HBA 0x7 142#define BNX2FC_RNID_HBA 0x7
143 143
144#define SRR_RETRY_COUNT 5
145#define REC_RETRY_COUNT 1
146#define BNX2FC_NUM_ERR_BITS 63
147
144/* bnx2fc driver uses only one instance of fcoe_percpu_s */ 148/* bnx2fc driver uses only one instance of fcoe_percpu_s */
145extern struct fcoe_percpu_s bnx2fc_global; 149extern struct fcoe_percpu_s bnx2fc_global;
146 150
@@ -153,18 +157,13 @@ struct bnx2fc_percpu_s {
153}; 157};
154 158
155struct bnx2fc_hba { 159struct bnx2fc_hba {
156 struct list_head link; 160 struct list_head list;
157 struct cnic_dev *cnic; 161 struct cnic_dev *cnic;
158 struct pci_dev *pcidev; 162 struct pci_dev *pcidev;
159 struct net_device *netdev;
160 struct net_device *phys_dev; 163 struct net_device *phys_dev;
161 unsigned long reg_with_cnic; 164 unsigned long reg_with_cnic;
162 #define BNX2FC_CNIC_REGISTERED 1 165 #define BNX2FC_CNIC_REGISTERED 1
163 struct packet_type fcoe_packet_type;
164 struct packet_type fip_packet_type;
165 struct bnx2fc_cmd_mgr *cmd_mgr; 166 struct bnx2fc_cmd_mgr *cmd_mgr;
166 struct workqueue_struct *timer_work_queue;
167 struct kref kref;
168 spinlock_t hba_lock; 167 spinlock_t hba_lock;
169 struct mutex hba_mutex; 168 struct mutex hba_mutex;
170 unsigned long adapter_state; 169 unsigned long adapter_state;
@@ -172,15 +171,9 @@ struct bnx2fc_hba {
172 #define ADAPTER_STATE_GOING_DOWN 1 171 #define ADAPTER_STATE_GOING_DOWN 1
173 #define ADAPTER_STATE_LINK_DOWN 2 172 #define ADAPTER_STATE_LINK_DOWN 2
174 #define ADAPTER_STATE_READY 3 173 #define ADAPTER_STATE_READY 3
175 u32 flags; 174 unsigned long flags;
176 unsigned long init_done; 175 #define BNX2FC_FLAG_FW_INIT_DONE 0
177 #define BNX2FC_FW_INIT_DONE 0 176 #define BNX2FC_FLAG_DESTROY_CMPL 1
178 #define BNX2FC_CTLR_INIT_DONE 1
179 #define BNX2FC_CREATE_DONE 2
180 struct fcoe_ctlr ctlr;
181 struct list_head vports;
182 u8 vlan_enabled;
183 int vlan_id;
184 u32 next_conn_id; 177 u32 next_conn_id;
185 struct fcoe_task_ctx_entry **task_ctx; 178 struct fcoe_task_ctx_entry **task_ctx;
186 dma_addr_t *task_ctx_dma; 179 dma_addr_t *task_ctx_dma;
@@ -199,38 +192,41 @@ struct bnx2fc_hba {
199 char *dummy_buffer; 192 char *dummy_buffer;
200 dma_addr_t dummy_buf_dma; 193 dma_addr_t dummy_buf_dma;
201 194
195 /* Active list of offloaded sessions */
196 struct bnx2fc_rport **tgt_ofld_list;
197
198 /* statistics */
202 struct fcoe_statistics_params *stats_buffer; 199 struct fcoe_statistics_params *stats_buffer;
203 dma_addr_t stats_buf_dma; 200 dma_addr_t stats_buf_dma;
204 201 struct completion stat_req_done;
205 /*
206 * PCI related info.
207 */
208 u16 pci_did;
209 u16 pci_vid;
210 u16 pci_sdid;
211 u16 pci_svid;
212 u16 pci_func;
213 u16 pci_devno;
214
215 struct task_struct *l2_thread;
216
217 /* linkdown handling */
218 wait_queue_head_t shutdown_wait;
219 int wait_for_link_down;
220 202
221 /*destroy handling */ 203 /*destroy handling */
222 struct timer_list destroy_timer; 204 struct timer_list destroy_timer;
223 wait_queue_head_t destroy_wait; 205 wait_queue_head_t destroy_wait;
224 206
225 /* Active list of offloaded sessions */ 207 /* linkdown handling */
226 struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS]; 208 wait_queue_head_t shutdown_wait;
209 int wait_for_link_down;
227 int num_ofld_sess; 210 int num_ofld_sess;
211 struct list_head vports;
212};
228 213
229 /* statistics */ 214struct bnx2fc_interface {
230 struct completion stat_req_done; 215 struct list_head list;
216 unsigned long if_flags;
217 #define BNX2FC_CTLR_INIT_DONE 0
218 struct bnx2fc_hba *hba;
219 struct net_device *netdev;
220 struct packet_type fcoe_packet_type;
221 struct packet_type fip_packet_type;
222 struct workqueue_struct *timer_work_queue;
223 struct kref kref;
224 struct fcoe_ctlr ctlr;
225 u8 vlan_enabled;
226 int vlan_id;
231}; 227};
232 228
233#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr) 229#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
234 230
235struct bnx2fc_lport { 231struct bnx2fc_lport {
236 struct list_head list; 232 struct list_head list;
@@ -252,9 +248,11 @@ struct bnx2fc_rport {
252 struct fc_rport_priv *rdata; 248 struct fc_rport_priv *rdata;
253 void __iomem *ctx_base; 249 void __iomem *ctx_base;
254#define DPM_TRIGER_TYPE 0x40 250#define DPM_TRIGER_TYPE 0x40
251 u32 io_timeout;
255 u32 fcoe_conn_id; 252 u32 fcoe_conn_id;
256 u32 context_id; 253 u32 context_id;
257 u32 sid; 254 u32 sid;
255 int dev_type;
258 256
259 unsigned long flags; 257 unsigned long flags;
260#define BNX2FC_FLAG_SESSION_READY 0x1 258#define BNX2FC_FLAG_SESSION_READY 0x1
@@ -262,10 +260,9 @@ struct bnx2fc_rport {
262#define BNX2FC_FLAG_DISABLED 0x3 260#define BNX2FC_FLAG_DISABLED 0x3
263#define BNX2FC_FLAG_DESTROYED 0x4 261#define BNX2FC_FLAG_DESTROYED 0x4
264#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5 262#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
265#define BNX2FC_FLAG_DESTROY_CMPL 0x6 263#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6
266#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7 264#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
267#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8 265#define BNX2FC_FLAG_EXPL_LOGO 0x8
268#define BNX2FC_FLAG_EXPL_LOGO 0x9
269 266
270 u8 src_addr[ETH_ALEN]; 267 u8 src_addr[ETH_ALEN];
271 u32 max_sqes; 268 u32 max_sqes;
@@ -327,12 +324,9 @@ struct bnx2fc_rport {
327 spinlock_t cq_lock; 324 spinlock_t cq_lock;
328 atomic_t num_active_ios; 325 atomic_t num_active_ios;
329 u32 flush_in_prog; 326 u32 flush_in_prog;
330 unsigned long work_time_slice;
331 unsigned long timestamp; 327 unsigned long timestamp;
332 struct list_head free_task_list; 328 struct list_head free_task_list;
333 struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; 329 struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
334 atomic_t pi;
335 atomic_t ci;
336 struct list_head active_cmd_queue; 330 struct list_head active_cmd_queue;
337 struct list_head els_queue; 331 struct list_head els_queue;
338 struct list_head io_retire_queue; 332 struct list_head io_retire_queue;
@@ -367,6 +361,8 @@ struct bnx2fc_els_cb_arg {
367 struct bnx2fc_cmd *aborted_io_req; 361 struct bnx2fc_cmd *aborted_io_req;
368 struct bnx2fc_cmd *io_req; 362 struct bnx2fc_cmd *io_req;
369 u16 l2_oxid; 363 u16 l2_oxid;
364 u32 offset;
365 enum fc_rctl r_ctl;
370}; 366};
371 367
372/* bnx2fc command structure */ 368/* bnx2fc command structure */
@@ -380,6 +376,7 @@ struct bnx2fc_cmd {
380#define BNX2FC_ABTS 3 376#define BNX2FC_ABTS 3
381#define BNX2FC_ELS 4 377#define BNX2FC_ELS 4
382#define BNX2FC_CLEANUP 5 378#define BNX2FC_CLEANUP 5
379#define BNX2FC_SEQ_CLEANUP 6
383 u8 io_req_flags; 380 u8 io_req_flags;
384 struct kref refcount; 381 struct kref refcount;
385 struct fcoe_port *port; 382 struct fcoe_port *port;
@@ -393,6 +390,7 @@ struct bnx2fc_cmd {
393 struct completion tm_done; 390 struct completion tm_done;
394 int wait_for_comp; 391 int wait_for_comp;
395 u16 xid; 392 u16 xid;
393 struct fcoe_err_report_entry err_entry;
396 struct fcoe_task_ctx_entry *task; 394 struct fcoe_task_ctx_entry *task;
397 struct io_bdt *bd_tbl; 395 struct io_bdt *bd_tbl;
398 struct fcp_rsp *rsp; 396 struct fcp_rsp *rsp;
@@ -409,6 +407,12 @@ struct bnx2fc_cmd {
409#define BNX2FC_FLAG_IO_COMPL 0x9 407#define BNX2FC_FLAG_IO_COMPL 0x9
410#define BNX2FC_FLAG_ELS_DONE 0xa 408#define BNX2FC_FLAG_ELS_DONE 0xa
411#define BNX2FC_FLAG_ELS_TIMEOUT 0xb 409#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
410#define BNX2FC_FLAG_CMD_LOST 0xc
411#define BNX2FC_FLAG_SRR_SENT 0xd
412 u8 rec_retry;
413 u8 srr_retry;
414 u32 srr_offset;
415 u8 srr_rctl;
412 u32 fcp_resid; 416 u32 fcp_resid;
413 u32 fcp_rsp_len; 417 u32 fcp_rsp_len;
414 u32 fcp_sns_len; 418 u32 fcp_sns_len;
@@ -439,6 +443,7 @@ struct bnx2fc_unsol_els {
439 443
440 444
441 445
446struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
442struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type); 447struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
443void bnx2fc_cmd_release(struct kref *ref); 448void bnx2fc_cmd_release(struct kref *ref);
444int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd); 449int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
@@ -476,6 +481,10 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
476void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 481void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
477 struct fcoe_task_ctx_entry *task, 482 struct fcoe_task_ctx_entry *task,
478 u16 orig_xid); 483 u16 orig_xid);
484void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req,
485 struct fcoe_task_ctx_entry *task,
486 struct bnx2fc_cmd *orig_io_req,
487 u32 offset);
479void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, 488void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
480 struct fcoe_task_ctx_entry *task); 489 struct fcoe_task_ctx_entry *task);
481void bnx2fc_init_task(struct bnx2fc_cmd *io_req, 490void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
@@ -525,5 +534,13 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
525 unsigned char *buf, 534 unsigned char *buf,
526 u32 frame_len, u16 l2_oxid); 535 u32 frame_len, u16 l2_oxid);
527int bnx2fc_send_stat_req(struct bnx2fc_hba *hba); 536int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
537int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req);
538int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req);
539int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl);
540void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
541 struct fcoe_task_ctx_entry *task,
542 u8 rx_state);
543int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
544 enum fc_rctl r_ctl);
528 545
529#endif 546#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
index 7f6aff68cc53..3416d9a746c7 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -21,21 +21,21 @@ extern unsigned int bnx2fc_debug_level;
21 21
22#define BNX2FC_ELS_DBG(fmt, arg...) \ 22#define BNX2FC_ELS_DBG(fmt, arg...) \
23 BNX2FC_CHK_LOGGING(LOG_ELS, \ 23 BNX2FC_CHK_LOGGING(LOG_ELS, \
24 printk(KERN_ALERT PFX fmt, ##arg)) 24 printk(KERN_INFO PFX fmt, ##arg))
25 25
26#define BNX2FC_MISC_DBG(fmt, arg...) \ 26#define BNX2FC_MISC_DBG(fmt, arg...) \
27 BNX2FC_CHK_LOGGING(LOG_MISC, \ 27 BNX2FC_CHK_LOGGING(LOG_MISC, \
28 printk(KERN_ALERT PFX fmt, ##arg)) 28 printk(KERN_INFO PFX fmt, ##arg))
29 29
30#define BNX2FC_IO_DBG(io_req, fmt, arg...) \ 30#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
31 do { \ 31 do { \
32 if (!io_req || !io_req->port || !io_req->port->lport || \ 32 if (!io_req || !io_req->port || !io_req->port->lport || \
33 !io_req->port->lport->host) \ 33 !io_req->port->lport->host) \
34 BNX2FC_CHK_LOGGING(LOG_IO, \ 34 BNX2FC_CHK_LOGGING(LOG_IO, \
35 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ 35 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
36 else \ 36 else \
37 BNX2FC_CHK_LOGGING(LOG_IO, \ 37 BNX2FC_CHK_LOGGING(LOG_IO, \
38 shost_printk(KERN_ALERT, \ 38 shost_printk(KERN_INFO, \
39 (io_req)->port->lport->host, \ 39 (io_req)->port->lport->host, \
40 PFX "xid:0x%x " fmt, \ 40 PFX "xid:0x%x " fmt, \
41 (io_req)->xid, ##arg)); \ 41 (io_req)->xid, ##arg)); \
@@ -46,10 +46,10 @@ extern unsigned int bnx2fc_debug_level;
46 if (!tgt || !tgt->port || !tgt->port->lport || \ 46 if (!tgt || !tgt->port || !tgt->port->lport || \
47 !tgt->port->lport->host || !tgt->rport) \ 47 !tgt->port->lport->host || !tgt->rport) \
48 BNX2FC_CHK_LOGGING(LOG_TGT, \ 48 BNX2FC_CHK_LOGGING(LOG_TGT, \
49 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ 49 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
50 else \ 50 else \
51 BNX2FC_CHK_LOGGING(LOG_TGT, \ 51 BNX2FC_CHK_LOGGING(LOG_TGT, \
52 shost_printk(KERN_ALERT, \ 52 shost_printk(KERN_INFO, \
53 (tgt)->port->lport->host, \ 53 (tgt)->port->lport->host, \
54 PFX "port:%x " fmt, \ 54 PFX "port:%x " fmt, \
55 (tgt)->rport->port_id, ##arg)); \ 55 (tgt)->rport->port_id, ##arg)); \
@@ -60,10 +60,10 @@ extern unsigned int bnx2fc_debug_level;
60 do { \ 60 do { \
61 if (!lport || !lport->host) \ 61 if (!lport || !lport->host) \
62 BNX2FC_CHK_LOGGING(LOG_HBA, \ 62 BNX2FC_CHK_LOGGING(LOG_HBA, \
63 printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ 63 printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
64 else \ 64 else \
65 BNX2FC_CHK_LOGGING(LOG_HBA, \ 65 BNX2FC_CHK_LOGGING(LOG_HBA, \
66 shost_printk(KERN_ALERT, lport->host, \ 66 shost_printk(KERN_INFO, lport->host, \
67 PFX fmt, ##arg)); \ 67 PFX fmt, ##arg)); \
68 } while (0) 68 } while (0)
69 69
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 7e89143f15cf..d66dcbd0df10 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -3,7 +3,7 @@
3 * This file contains helper routines that handle ELS requests 3 * This file contains helper routines that handle ELS requests
4 * and responses. 4 * and responses.
5 * 5 *
6 * Copyright (c) 2008 - 2010 Broadcom Corporation 6 * Copyright (c) 2008 - 2011 Broadcom Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -253,13 +253,417 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
253 return rc; 253 return rc;
254} 254}
255 255
256void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
257{
258 struct bnx2fc_mp_req *mp_req;
259 struct fc_frame_header *fc_hdr, *fh;
260 struct bnx2fc_cmd *srr_req;
261 struct bnx2fc_cmd *orig_io_req;
262 struct fc_frame *fp;
263 unsigned char *buf;
264 void *resp_buf;
265 u32 resp_len, hdr_len;
266 u8 opcode;
267 int rc = 0;
268
269 orig_io_req = cb_arg->aborted_io_req;
270 srr_req = cb_arg->io_req;
271 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
272 BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed",
273 orig_io_req->xid);
274 goto srr_compl_done;
275 }
276 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
277 BNX2FC_IO_DBG(srr_req, "rec abts in prog "
278 "orig_io - 0x%x\n",
279 orig_io_req->xid);
280 goto srr_compl_done;
281 }
282 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
283 /* SRR timedout */
284 BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
285 "orig_io - 0x%x\n",
286 orig_io_req->xid);
287 rc = bnx2fc_initiate_abts(srr_req);
288 if (rc != SUCCESS) {
289 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
290 "failed. issue cleanup\n");
291 bnx2fc_initiate_cleanup(srr_req);
292 }
293 orig_io_req->srr_retry++;
294 if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
295 struct bnx2fc_rport *tgt = orig_io_req->tgt;
296 spin_unlock_bh(&tgt->tgt_lock);
297 rc = bnx2fc_send_srr(orig_io_req,
298 orig_io_req->srr_offset,
299 orig_io_req->srr_rctl);
300 spin_lock_bh(&tgt->tgt_lock);
301 if (!rc)
302 goto srr_compl_done;
303 }
304
305 rc = bnx2fc_initiate_abts(orig_io_req);
306 if (rc != SUCCESS) {
307 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
308 "failed xid = 0x%x. issue cleanup\n",
309 orig_io_req->xid);
310 bnx2fc_initiate_cleanup(orig_io_req);
311 }
312 goto srr_compl_done;
313 }
314 mp_req = &(srr_req->mp_req);
315 fc_hdr = &(mp_req->resp_fc_hdr);
316 resp_len = mp_req->resp_len;
317 resp_buf = mp_req->resp_buf;
318
319 hdr_len = sizeof(*fc_hdr);
320 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
321 if (!buf) {
322 printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
323 goto srr_compl_done;
324 }
325 memcpy(buf, fc_hdr, hdr_len);
326 memcpy(buf + hdr_len, resp_buf, resp_len);
327
328 fp = fc_frame_alloc(NULL, resp_len);
329 if (!fp) {
330 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
331 goto free_buf;
332 }
333
334 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
335 /* Copy FC Frame header and payload into the frame */
336 memcpy(fh, buf, hdr_len + resp_len);
337
338 opcode = fc_frame_payload_op(fp);
339 switch (opcode) {
340 case ELS_LS_ACC:
341 BNX2FC_IO_DBG(srr_req, "SRR success\n");
342 break;
343 case ELS_LS_RJT:
344 BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
345 rc = bnx2fc_initiate_abts(orig_io_req);
346 if (rc != SUCCESS) {
347 BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
348 "failed xid = 0x%x. issue cleanup\n",
349 orig_io_req->xid);
350 bnx2fc_initiate_cleanup(orig_io_req);
351 }
352 break;
353 default:
354 BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
355 opcode);
356 break;
357 }
358 fc_frame_free(fp);
359free_buf:
360 kfree(buf);
361srr_compl_done:
362 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
363}
364
365void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
366{
367 struct bnx2fc_cmd *orig_io_req, *new_io_req;
368 struct bnx2fc_cmd *rec_req;
369 struct bnx2fc_mp_req *mp_req;
370 struct fc_frame_header *fc_hdr, *fh;
371 struct fc_els_ls_rjt *rjt;
372 struct fc_els_rec_acc *acc;
373 struct bnx2fc_rport *tgt;
374 struct fcoe_err_report_entry *err_entry;
375 struct scsi_cmnd *sc_cmd;
376 enum fc_rctl r_ctl;
377 unsigned char *buf;
378 void *resp_buf;
379 struct fc_frame *fp;
380 u8 opcode;
381 u32 offset;
382 u32 e_stat;
383 u32 resp_len, hdr_len;
384 int rc = 0;
385 bool send_seq_clnp = false;
386 bool abort_io = false;
387
388 BNX2FC_MISC_DBG("Entered rec_compl callback\n");
389 rec_req = cb_arg->io_req;
390 orig_io_req = cb_arg->aborted_io_req;
391 BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
392 tgt = orig_io_req->tgt;
393
394 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
395 BNX2FC_IO_DBG(rec_req, "completed"
396 "orig_io - 0x%x\n",
397 orig_io_req->xid);
398 goto rec_compl_done;
399 }
400 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
401 BNX2FC_IO_DBG(rec_req, "abts in prog "
402 "orig_io - 0x%x\n",
403 orig_io_req->xid);
404 goto rec_compl_done;
405 }
406 /* Handle REC timeout case */
407 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
408 BNX2FC_IO_DBG(rec_req, "timed out, abort "
409 "orig_io - 0x%x\n",
410 orig_io_req->xid);
411 /* els req is timed out. send abts for els */
412 rc = bnx2fc_initiate_abts(rec_req);
413 if (rc != SUCCESS) {
414 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
415 "failed. issue cleanup\n");
416 bnx2fc_initiate_cleanup(rec_req);
417 }
418 orig_io_req->rec_retry++;
419 /* REC timedout. send ABTS to the orig IO req */
420 if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
421 spin_unlock_bh(&tgt->tgt_lock);
422 rc = bnx2fc_send_rec(orig_io_req);
423 spin_lock_bh(&tgt->tgt_lock);
424 if (!rc)
425 goto rec_compl_done;
426 }
427 rc = bnx2fc_initiate_abts(orig_io_req);
428 if (rc != SUCCESS) {
429 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
430 "failed xid = 0x%x. issue cleanup\n",
431 orig_io_req->xid);
432 bnx2fc_initiate_cleanup(orig_io_req);
433 }
434 goto rec_compl_done;
435 }
436 mp_req = &(rec_req->mp_req);
437 fc_hdr = &(mp_req->resp_fc_hdr);
438 resp_len = mp_req->resp_len;
439 acc = resp_buf = mp_req->resp_buf;
440
441 hdr_len = sizeof(*fc_hdr);
442
443 buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
444 if (!buf) {
445 printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
446 goto rec_compl_done;
447 }
448 memcpy(buf, fc_hdr, hdr_len);
449 memcpy(buf + hdr_len, resp_buf, resp_len);
450
451 fp = fc_frame_alloc(NULL, resp_len);
452 if (!fp) {
453 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
454 goto free_buf;
455 }
456
457 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
458 /* Copy FC Frame header and payload into the frame */
459 memcpy(fh, buf, hdr_len + resp_len);
460
461 opcode = fc_frame_payload_op(fp);
462 if (opcode == ELS_LS_RJT) {
463 BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
464 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
465 if ((rjt->er_reason == ELS_RJT_LOGIC ||
466 rjt->er_reason == ELS_RJT_UNAB) &&
467 rjt->er_explan == ELS_EXPL_OXID_RXID) {
468 BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
469 new_io_req = bnx2fc_cmd_alloc(tgt);
470 if (!new_io_req)
471 goto abort_io;
472 new_io_req->sc_cmd = orig_io_req->sc_cmd;
473 /* cleanup orig_io_req that is with the FW */
474 set_bit(BNX2FC_FLAG_CMD_LOST,
475 &orig_io_req->req_flags);
476 bnx2fc_initiate_cleanup(orig_io_req);
477 /* Post a new IO req with the same sc_cmd */
478 BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
479 spin_unlock_bh(&tgt->tgt_lock);
480 rc = bnx2fc_post_io_req(tgt, new_io_req);
481 spin_lock_bh(&tgt->tgt_lock);
482 if (!rc)
483 goto free_frame;
484 BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
485 }
486abort_io:
487 rc = bnx2fc_initiate_abts(orig_io_req);
488 if (rc != SUCCESS) {
489 BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
490 "failed. issue cleanup\n");
491 bnx2fc_initiate_cleanup(orig_io_req);
492 }
493 } else if (opcode == ELS_LS_ACC) {
494 /* REVISIT: Check if the exchange is already aborted */
495 offset = ntohl(acc->reca_fc4value);
496 e_stat = ntohl(acc->reca_e_stat);
497 if (e_stat & ESB_ST_SEQ_INIT) {
498 BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
499 goto free_frame;
500 }
501 BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
502 e_stat, offset);
503 /* Seq initiative is with us */
504 err_entry = (struct fcoe_err_report_entry *)
505 &orig_io_req->err_entry;
506 sc_cmd = orig_io_req->sc_cmd;
507 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
508 /* SCSI WRITE command */
509 if (offset == orig_io_req->data_xfer_len) {
510 BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
511 /* FCP_RSP lost */
512 r_ctl = FC_RCTL_DD_CMD_STATUS;
513 offset = 0;
514 } else {
515 /* start transmitting from offset */
516 BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
517 send_seq_clnp = true;
518 r_ctl = FC_RCTL_DD_DATA_DESC;
519 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
520 offset, r_ctl))
521 abort_io = true;
522 /* XFER_RDY */
523 }
524 } else {
525 /* SCSI READ command */
526 if (err_entry->data.rx_buf_off ==
527 orig_io_req->data_xfer_len) {
528 /* FCP_RSP lost */
529 BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
530 r_ctl = FC_RCTL_DD_CMD_STATUS;
531 offset = 0;
532 } else {
533 /* request retransmission from this offset */
534 send_seq_clnp = true;
535 offset = err_entry->data.rx_buf_off;
536 BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
537 /* FCP_DATA lost */
538 r_ctl = FC_RCTL_DD_SOL_DATA;
539 if (bnx2fc_initiate_seq_cleanup(orig_io_req,
540 offset, r_ctl))
541 abort_io = true;
542 }
543 }
544 if (abort_io) {
545 rc = bnx2fc_initiate_abts(orig_io_req);
546 if (rc != SUCCESS) {
547 BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
548 " failed. issue cleanup\n");
549 bnx2fc_initiate_cleanup(orig_io_req);
550 }
551 } else if (!send_seq_clnp) {
552 BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
553 spin_unlock_bh(&tgt->tgt_lock);
554 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
555 spin_lock_bh(&tgt->tgt_lock);
556
557 if (rc) {
558 BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
559 " IO will abort\n");
560 }
561 }
562 }
563free_frame:
564 fc_frame_free(fp);
565free_buf:
566 kfree(buf);
567rec_compl_done:
568 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
569 kfree(cb_arg);
570}
571
572int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
573{
574 struct fc_els_rec rec;
575 struct bnx2fc_rport *tgt = orig_io_req->tgt;
576 struct fc_lport *lport = tgt->rdata->local_port;
577 struct bnx2fc_els_cb_arg *cb_arg = NULL;
578 u32 sid = tgt->sid;
579 u32 r_a_tov = lport->r_a_tov;
580 int rc;
581
582 BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
583 memset(&rec, 0, sizeof(rec));
584
585 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
586 if (!cb_arg) {
587 printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
588 rc = -ENOMEM;
589 goto rec_err;
590 }
591 kref_get(&orig_io_req->refcount);
592
593 cb_arg->aborted_io_req = orig_io_req;
594
595 rec.rec_cmd = ELS_REC;
596 hton24(rec.rec_s_id, sid);
597 rec.rec_ox_id = htons(orig_io_req->xid);
598 rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
599
600 rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
601 bnx2fc_rec_compl, cb_arg,
602 r_a_tov);
603rec_err:
604 if (rc) {
605 BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
606 spin_lock_bh(&tgt->tgt_lock);
607 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
608 spin_unlock_bh(&tgt->tgt_lock);
609 kfree(cb_arg);
610 }
611 return rc;
612}
613
614int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
615{
616 struct fcp_srr srr;
617 struct bnx2fc_rport *tgt = orig_io_req->tgt;
618 struct fc_lport *lport = tgt->rdata->local_port;
619 struct bnx2fc_els_cb_arg *cb_arg = NULL;
620 u32 r_a_tov = lport->r_a_tov;
621 int rc;
622
623 BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
624 memset(&srr, 0, sizeof(srr));
625
626 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
627 if (!cb_arg) {
628 printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
629 rc = -ENOMEM;
630 goto srr_err;
631 }
632 kref_get(&orig_io_req->refcount);
633
634 cb_arg->aborted_io_req = orig_io_req;
635
636 srr.srr_op = ELS_SRR;
637 srr.srr_ox_id = htons(orig_io_req->xid);
638 srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
639 srr.srr_rel_off = htonl(offset);
640 srr.srr_r_ctl = r_ctl;
641 orig_io_req->srr_offset = offset;
642 orig_io_req->srr_rctl = r_ctl;
643
644 rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
645 bnx2fc_srr_compl, cb_arg,
646 r_a_tov);
647srr_err:
648 if (rc) {
649 BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
650 spin_lock_bh(&tgt->tgt_lock);
651 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
652 spin_unlock_bh(&tgt->tgt_lock);
653 kfree(cb_arg);
654 } else
655 set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
656
657 return rc;
658}
659
256static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, 660static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
257 void *data, u32 data_len, 661 void *data, u32 data_len,
258 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), 662 void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
259 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec) 663 struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
260{ 664{
261 struct fcoe_port *port = tgt->port; 665 struct fcoe_port *port = tgt->port;
262 struct bnx2fc_hba *hba = port->priv; 666 struct bnx2fc_interface *interface = port->priv;
263 struct fc_rport *rport = tgt->rport; 667 struct fc_rport *rport = tgt->rport;
264 struct fc_lport *lport = port->lport; 668 struct fc_lport *lport = port->lport;
265 struct bnx2fc_cmd *els_req; 669 struct bnx2fc_cmd *els_req;
@@ -274,12 +678,12 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
274 678
275 rc = fc_remote_port_chkready(rport); 679 rc = fc_remote_port_chkready(rport);
276 if (rc) { 680 if (rc) {
277 printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op); 681 printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
278 rc = -EINVAL; 682 rc = -EINVAL;
279 goto els_err; 683 goto els_err;
280 } 684 }
281 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 685 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
282 printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op); 686 printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
283 rc = -EINVAL; 687 rc = -EINVAL;
284 goto els_err; 688 goto els_err;
285 } 689 }
@@ -305,7 +709,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
305 mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req); 709 mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
306 rc = bnx2fc_init_mp_req(els_req); 710 rc = bnx2fc_init_mp_req(els_req);
307 if (rc == FAILED) { 711 if (rc == FAILED) {
308 printk(KERN_ALERT PFX "ELS MP request init failed\n"); 712 printk(KERN_ERR PFX "ELS MP request init failed\n");
309 spin_lock_bh(&tgt->tgt_lock); 713 spin_lock_bh(&tgt->tgt_lock);
310 kref_put(&els_req->refcount, bnx2fc_cmd_release); 714 kref_put(&els_req->refcount, bnx2fc_cmd_release);
311 spin_unlock_bh(&tgt->tgt_lock); 715 spin_unlock_bh(&tgt->tgt_lock);
@@ -324,7 +728,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
324 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { 728 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
325 memcpy(mp_req->req_buf, data, data_len); 729 memcpy(mp_req->req_buf, data, data_len);
326 } else { 730 } else {
327 printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op); 731 printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
328 els_req->cb_func = NULL; 732 els_req->cb_func = NULL;
329 els_req->cb_arg = NULL; 733 els_req->cb_arg = NULL;
330 spin_lock_bh(&tgt->tgt_lock); 734 spin_lock_bh(&tgt->tgt_lock);
@@ -342,9 +746,14 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
342 did = tgt->rport->port_id; 746 did = tgt->rport->port_id;
343 sid = tgt->sid; 747 sid = tgt->sid;
344 748
345 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, 749 if (op == ELS_SRR)
346 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 750 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
347 FC_FC_SEQ_INIT, 0); 751 FC_TYPE_FCP, FC_FC_FIRST_SEQ |
752 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
753 else
754 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
755 FC_TYPE_ELS, FC_FC_FIRST_SEQ |
756 FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
348 757
349 /* Obtain exchange id */ 758 /* Obtain exchange id */
350 xid = els_req->xid; 759 xid = els_req->xid;
@@ -352,7 +761,8 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
352 index = xid % BNX2FC_TASKS_PER_PAGE; 761 index = xid % BNX2FC_TASKS_PER_PAGE;
353 762
354 /* Initialize task context for this IO request */ 763 /* Initialize task context for this IO request */
355 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 764 task_page = (struct fcoe_task_ctx_entry *)
765 interface->hba->task_ctx[task_idx];
356 task = &(task_page[index]); 766 task = &(task_page[index]);
357 bnx2fc_init_mp_task(els_req, task); 767 bnx2fc_init_mp_task(els_req, task);
358 768
@@ -496,8 +906,8 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
496 void *arg, u32 timeout) 906 void *arg, u32 timeout)
497{ 907{
498 struct fcoe_port *port = lport_priv(lport); 908 struct fcoe_port *port = lport_priv(lport);
499 struct bnx2fc_hba *hba = port->priv; 909 struct bnx2fc_interface *interface = port->priv;
500 struct fcoe_ctlr *fip = &hba->ctlr; 910 struct fcoe_ctlr *fip = &interface->ctlr;
501 struct fc_frame_header *fh = fc_frame_header_get(fp); 911 struct fc_frame_header *fh = fc_frame_header_get(fp);
502 912
503 switch (op) { 913 switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index a97aff3a0662..7cb2cd48b17b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -3,7 +3,7 @@
3 * cnic modules to create FCoE instances, send/receive non-offloaded 3 * cnic modules to create FCoE instances, send/receive non-offloaded
4 * FIP/FCoE packets, listen to link events etc. 4 * FIP/FCoE packets, listen to link events etc.
5 * 5 *
6 * Copyright (c) 2008 - 2010 Broadcom Corporation 6 * Copyright (c) 2008 - 2011 Broadcom Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -15,13 +15,14 @@
15#include "bnx2fc.h" 15#include "bnx2fc.h"
16 16
17static struct list_head adapter_list; 17static struct list_head adapter_list;
18static struct list_head if_list;
18static u32 adapter_count; 19static u32 adapter_count;
19static DEFINE_MUTEX(bnx2fc_dev_lock); 20static DEFINE_MUTEX(bnx2fc_dev_lock);
20DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); 21DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
21 22
22#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
23#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
24#define DRV_MODULE_RELDATE "Jun 10, 2011" 25#define DRV_MODULE_RELDATE "Jun 23, 2011"
25 26
26 27
27static char version[] __devinitdata = 28static char version[] __devinitdata =
@@ -61,7 +62,7 @@ static int bnx2fc_disable(struct net_device *netdev);
61 62
62static void bnx2fc_recv_frame(struct sk_buff *skb); 63static void bnx2fc_recv_frame(struct sk_buff *skb);
63 64
64static void bnx2fc_start_disc(struct bnx2fc_hba *hba); 65static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
65static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); 66static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
66static int bnx2fc_net_config(struct fc_lport *lp); 67static int bnx2fc_net_config(struct fc_lport *lp);
67static int bnx2fc_lport_config(struct fc_lport *lport); 68static int bnx2fc_lport_config(struct fc_lport *lport);
@@ -70,18 +71,20 @@ static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
70static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); 71static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
71static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); 72static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
72static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); 73static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
73static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, 74static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
74 struct device *parent, int npiv); 75 struct device *parent, int npiv);
75static void bnx2fc_destroy_work(struct work_struct *work); 76static void bnx2fc_destroy_work(struct work_struct *work);
76 77
77static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); 78static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
79static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
80 *phys_dev);
78static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); 81static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
79 82
80static int bnx2fc_fw_init(struct bnx2fc_hba *hba); 83static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
81static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba); 84static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
82 85
83static void bnx2fc_port_shutdown(struct fc_lport *lport); 86static void bnx2fc_port_shutdown(struct fc_lport *lport);
84static void bnx2fc_stop(struct bnx2fc_hba *hba); 87static void bnx2fc_stop(struct bnx2fc_interface *interface);
85static int __init bnx2fc_mod_init(void); 88static int __init bnx2fc_mod_init(void);
86static void __exit bnx2fc_mod_exit(void); 89static void __exit bnx2fc_mod_exit(void);
87 90
@@ -142,7 +145,8 @@ static void bnx2fc_abort_io(struct fc_lport *lport)
142static void bnx2fc_cleanup(struct fc_lport *lport) 145static void bnx2fc_cleanup(struct fc_lport *lport)
143{ 146{
144 struct fcoe_port *port = lport_priv(lport); 147 struct fcoe_port *port = lport_priv(lport);
145 struct bnx2fc_hba *hba = port->priv; 148 struct bnx2fc_interface *interface = port->priv;
149 struct bnx2fc_hba *hba = interface->hba;
146 struct bnx2fc_rport *tgt; 150 struct bnx2fc_rport *tgt;
147 int i; 151 int i;
148 152
@@ -219,7 +223,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
219 struct fcoe_crc_eof *cp; 223 struct fcoe_crc_eof *cp;
220 struct sk_buff *skb; 224 struct sk_buff *skb;
221 struct fc_frame_header *fh; 225 struct fc_frame_header *fh;
222 struct bnx2fc_hba *hba; 226 struct bnx2fc_interface *interface;
227 struct bnx2fc_hba *hba;
223 struct fcoe_port *port; 228 struct fcoe_port *port;
224 struct fcoe_hdr *hp; 229 struct fcoe_hdr *hp;
225 struct bnx2fc_rport *tgt; 230 struct bnx2fc_rport *tgt;
@@ -230,7 +235,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
230 int wlen, rc = 0; 235 int wlen, rc = 0;
231 236
232 port = (struct fcoe_port *)lport_priv(lport); 237 port = (struct fcoe_port *)lport_priv(lport);
233 hba = port->priv; 238 interface = port->priv;
239 hba = interface->hba;
234 240
235 fh = fc_frame_header_get(fp); 241 fh = fc_frame_header_get(fp);
236 242
@@ -242,12 +248,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
242 } 248 }
243 249
244 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { 250 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
245 if (!hba->ctlr.sel_fcf) { 251 if (!interface->ctlr.sel_fcf) {
246 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); 252 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
247 kfree_skb(skb); 253 kfree_skb(skb);
248 return -EINVAL; 254 return -EINVAL;
249 } 255 }
250 if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb)) 256 if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
251 return 0; 257 return 0;
252 } 258 }
253 259
@@ -316,19 +322,19 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
316 skb_reset_network_header(skb); 322 skb_reset_network_header(skb);
317 skb->mac_len = elen; 323 skb->mac_len = elen;
318 skb->protocol = htons(ETH_P_FCOE); 324 skb->protocol = htons(ETH_P_FCOE);
319 skb->dev = hba->netdev; 325 skb->dev = interface->netdev;
320 326
321 /* fill up mac and fcoe headers */ 327 /* fill up mac and fcoe headers */
322 eh = eth_hdr(skb); 328 eh = eth_hdr(skb);
323 eh->h_proto = htons(ETH_P_FCOE); 329 eh->h_proto = htons(ETH_P_FCOE);
324 if (hba->ctlr.map_dest) 330 if (interface->ctlr.map_dest)
325 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); 331 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
326 else 332 else
327 /* insert GW address */ 333 /* insert GW address */
328 memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN); 334 memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
329 335
330 if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 336 if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
331 memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN); 337 memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
332 else 338 else
333 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 339 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
334 340
@@ -377,22 +383,23 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
377 struct packet_type *ptype, struct net_device *olddev) 383 struct packet_type *ptype, struct net_device *olddev)
378{ 384{
379 struct fc_lport *lport; 385 struct fc_lport *lport;
380 struct bnx2fc_hba *hba; 386 struct bnx2fc_interface *interface;
381 struct fc_frame_header *fh; 387 struct fc_frame_header *fh;
382 struct fcoe_rcv_info *fr; 388 struct fcoe_rcv_info *fr;
383 struct fcoe_percpu_s *bg; 389 struct fcoe_percpu_s *bg;
384 unsigned short oxid; 390 unsigned short oxid;
385 391
386 hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type); 392 interface = container_of(ptype, struct bnx2fc_interface,
387 lport = hba->ctlr.lp; 393 fcoe_packet_type);
394 lport = interface->ctlr.lp;
388 395
389 if (unlikely(lport == NULL)) { 396 if (unlikely(lport == NULL)) {
390 printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n"); 397 printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
391 goto err; 398 goto err;
392 } 399 }
393 400
394 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 401 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
395 printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n"); 402 printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
396 goto err; 403 goto err;
397 } 404 }
398 405
@@ -411,7 +418,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
411 418
412 fr = fcoe_dev_from_skb(skb); 419 fr = fcoe_dev_from_skb(skb);
413 fr->fr_dev = lport; 420 fr->fr_dev = lport;
414 fr->ptype = ptype;
415 421
416 bg = &bnx2fc_global; 422 bg = &bnx2fc_global;
417 spin_lock_bh(&bg->fcoe_rx_list.lock); 423 spin_lock_bh(&bg->fcoe_rx_list.lock);
@@ -469,7 +475,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
469 fr = fcoe_dev_from_skb(skb); 475 fr = fcoe_dev_from_skb(skb);
470 lport = fr->fr_dev; 476 lport = fr->fr_dev;
471 if (unlikely(lport == NULL)) { 477 if (unlikely(lport == NULL)) {
472 printk(KERN_ALERT PFX "Invalid lport struct\n"); 478 printk(KERN_ERR PFX "Invalid lport struct\n");
473 kfree_skb(skb); 479 kfree_skb(skb);
474 return; 480 return;
475 } 481 }
@@ -594,7 +600,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
594 struct fc_host_statistics *bnx2fc_stats; 600 struct fc_host_statistics *bnx2fc_stats;
595 struct fc_lport *lport = shost_priv(shost); 601 struct fc_lport *lport = shost_priv(shost);
596 struct fcoe_port *port = lport_priv(lport); 602 struct fcoe_port *port = lport_priv(lport);
597 struct bnx2fc_hba *hba = port->priv; 603 struct bnx2fc_interface *interface = port->priv;
604 struct bnx2fc_hba *hba = interface->hba;
598 struct fcoe_statistics_params *fw_stats; 605 struct fcoe_statistics_params *fw_stats;
599 int rc = 0; 606 int rc = 0;
600 607
@@ -631,7 +638,7 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
631static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) 638static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
632{ 639{
633 struct fcoe_port *port = lport_priv(lport); 640 struct fcoe_port *port = lport_priv(lport);
634 struct bnx2fc_hba *hba = port->priv; 641 struct bnx2fc_interface *interface = port->priv;
635 struct Scsi_Host *shost = lport->host; 642 struct Scsi_Host *shost = lport->host;
636 int rc = 0; 643 int rc = 0;
637 644
@@ -654,7 +661,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
654 fc_host_max_npiv_vports(lport->host) = USHRT_MAX; 661 fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
655 sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", 662 sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
656 BNX2FC_NAME, BNX2FC_VERSION, 663 BNX2FC_NAME, BNX2FC_VERSION,
657 hba->netdev->name); 664 interface->netdev->name);
658 665
659 return 0; 666 return 0;
660} 667}
@@ -662,8 +669,8 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
662static void bnx2fc_link_speed_update(struct fc_lport *lport) 669static void bnx2fc_link_speed_update(struct fc_lport *lport)
663{ 670{
664 struct fcoe_port *port = lport_priv(lport); 671 struct fcoe_port *port = lport_priv(lport);
665 struct bnx2fc_hba *hba = port->priv; 672 struct bnx2fc_interface *interface = port->priv;
666 struct net_device *netdev = hba->netdev; 673 struct net_device *netdev = interface->netdev;
667 struct ethtool_cmd ecmd; 674 struct ethtool_cmd ecmd;
668 675
669 if (!dev_ethtool_get_settings(netdev, &ecmd)) { 676 if (!dev_ethtool_get_settings(netdev, &ecmd)) {
@@ -691,7 +698,8 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
691static int bnx2fc_link_ok(struct fc_lport *lport) 698static int bnx2fc_link_ok(struct fc_lport *lport)
692{ 699{
693 struct fcoe_port *port = lport_priv(lport); 700 struct fcoe_port *port = lport_priv(lport);
694 struct bnx2fc_hba *hba = port->priv; 701 struct bnx2fc_interface *interface = port->priv;
702 struct bnx2fc_hba *hba = interface->hba;
695 struct net_device *dev = hba->phys_dev; 703 struct net_device *dev = hba->phys_dev;
696 int rc = 0; 704 int rc = 0;
697 705
@@ -713,7 +721,7 @@ static int bnx2fc_link_ok(struct fc_lport *lport)
713 */ 721 */
714void bnx2fc_get_link_state(struct bnx2fc_hba *hba) 722void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
715{ 723{
716 if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) 724 if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state))
717 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); 725 set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
718 else 726 else
719 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); 727 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
@@ -722,11 +730,13 @@ void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
722static int bnx2fc_net_config(struct fc_lport *lport) 730static int bnx2fc_net_config(struct fc_lport *lport)
723{ 731{
724 struct bnx2fc_hba *hba; 732 struct bnx2fc_hba *hba;
733 struct bnx2fc_interface *interface;
725 struct fcoe_port *port; 734 struct fcoe_port *port;
726 u64 wwnn, wwpn; 735 u64 wwnn, wwpn;
727 736
728 port = lport_priv(lport); 737 port = lport_priv(lport);
729 hba = port->priv; 738 interface = port->priv;
739 hba = interface->hba;
730 740
731 /* require support for get_pauseparam ethtool op. */ 741 /* require support for get_pauseparam ethtool op. */
732 if (!hba->phys_dev->ethtool_ops || 742 if (!hba->phys_dev->ethtool_ops ||
@@ -743,11 +753,11 @@ static int bnx2fc_net_config(struct fc_lport *lport)
743 bnx2fc_link_speed_update(lport); 753 bnx2fc_link_speed_update(lport);
744 754
745 if (!lport->vport) { 755 if (!lport->vport) {
746 wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0); 756 wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 1, 0);
747 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); 757 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
748 fc_set_wwnn(lport, wwnn); 758 fc_set_wwnn(lport, wwnn);
749 759
750 wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0); 760 wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 2, 0);
751 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); 761 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
752 fc_set_wwpn(lport, wwpn); 762 fc_set_wwpn(lport, wwpn);
753 } 763 }
@@ -759,9 +769,9 @@ static void bnx2fc_destroy_timer(unsigned long data)
759{ 769{
760 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data; 770 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
761 771
762 BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - " 772 BNX2FC_MISC_DBG("ERROR:bnx2fc_destroy_timer - "
763 "Destroy compl not received!!\n"); 773 "Destroy compl not received!!\n");
764 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; 774 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
765 wake_up_interruptible(&hba->destroy_wait); 775 wake_up_interruptible(&hba->destroy_wait);
766} 776}
767 777
@@ -779,54 +789,35 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
779 u16 vlan_id) 789 u16 vlan_id)
780{ 790{
781 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; 791 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
782 struct fc_lport *lport = hba->ctlr.lp; 792 struct fc_lport *lport;
783 struct fc_lport *vport; 793 struct fc_lport *vport;
794 struct bnx2fc_interface *interface;
795 int wait_for_upload = 0;
784 u32 link_possible = 1; 796 u32 link_possible = 1;
785 797
786 /* Ignore vlans for now */ 798 /* Ignore vlans for now */
787 if (vlan_id != 0) 799 if (vlan_id != 0)
788 return; 800 return;
789 801
790 if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
791 BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
792 hba->netdev->name, event);
793 return;
794 }
795
796 /*
797 * ASSUMPTION:
798 * indicate_netevent cannot be called from cnic unless bnx2fc
799 * does register_device
800 */
801 BUG_ON(!lport);
802
803 BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
804 hba->netdev->name, event);
805
806 switch (event) { 802 switch (event) {
807 case NETDEV_UP: 803 case NETDEV_UP:
808 BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
809 hba->adapter_state);
810 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) 804 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
811 printk(KERN_ERR "indicate_netevent: "\ 805 printk(KERN_ERR "indicate_netevent: "\
812 "adapter is not UP!!\n"); 806 "hba is not UP!!\n");
813 break; 807 break;
814 808
815 case NETDEV_DOWN: 809 case NETDEV_DOWN:
816 BNX2FC_HBA_DBG(lport, "Port down\n");
817 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); 810 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
818 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); 811 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
819 link_possible = 0; 812 link_possible = 0;
820 break; 813 break;
821 814
822 case NETDEV_GOING_DOWN: 815 case NETDEV_GOING_DOWN:
823 BNX2FC_HBA_DBG(lport, "Port going down\n");
824 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); 816 set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
825 link_possible = 0; 817 link_possible = 0;
826 break; 818 break;
827 819
828 case NETDEV_CHANGE: 820 case NETDEV_CHANGE:
829 BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
830 break; 821 break;
831 822
832 default: 823 default:
@@ -834,15 +825,22 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
834 return; 825 return;
835 } 826 }
836 827
837 bnx2fc_link_speed_update(lport); 828 mutex_lock(&bnx2fc_dev_lock);
829 list_for_each_entry(interface, &if_list, list) {
838 830
839 if (link_possible && !bnx2fc_link_ok(lport)) { 831 if (interface->hba != hba)
840 printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n"); 832 continue;
841 fcoe_ctlr_link_up(&hba->ctlr); 833
842 } else { 834 lport = interface->ctlr.lp;
843 printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n"); 835 BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
844 if (fcoe_ctlr_link_down(&hba->ctlr)) { 836 interface->netdev->name, event);
845 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); 837
838 bnx2fc_link_speed_update(lport);
839
840 if (link_possible && !bnx2fc_link_ok(lport)) {
841 printk(KERN_ERR "indicate_netevent: ctlr_link_up\n");
842 fcoe_ctlr_link_up(&interface->ctlr);
843 } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
846 mutex_lock(&lport->lp_mutex); 844 mutex_lock(&lport->lp_mutex);
847 list_for_each_entry(vport, &lport->vports, list) 845 list_for_each_entry(vport, &lport->vports, list)
848 fc_host_port_type(vport->host) = 846 fc_host_port_type(vport->host) =
@@ -853,24 +851,26 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
853 get_cpu())->LinkFailureCount++; 851 get_cpu())->LinkFailureCount++;
854 put_cpu(); 852 put_cpu();
855 fcoe_clean_pending_queue(lport); 853 fcoe_clean_pending_queue(lport);
854 wait_for_upload = 1;
855 }
856 }
857 mutex_unlock(&bnx2fc_dev_lock);
856 858
857 init_waitqueue_head(&hba->shutdown_wait); 859 if (wait_for_upload) {
858 BNX2FC_HBA_DBG(lport, "indicate_netevent " 860 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
859 "num_ofld_sess = %d\n", 861 init_waitqueue_head(&hba->shutdown_wait);
860 hba->num_ofld_sess); 862 BNX2FC_MISC_DBG("indicate_netevent "
861 hba->wait_for_link_down = 1; 863 "num_ofld_sess = %d\n",
862 BNX2FC_HBA_DBG(lport, "waiting for uploads to " 864 hba->num_ofld_sess);
863 "compl proc = %s\n", 865 hba->wait_for_link_down = 1;
864 current->comm); 866 wait_event_interruptible(hba->shutdown_wait,
865 wait_event_interruptible(hba->shutdown_wait, 867 (hba->num_ofld_sess == 0));
866 (hba->num_ofld_sess == 0)); 868 BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n",
867 BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
868 hba->num_ofld_sess); 869 hba->num_ofld_sess);
869 hba->wait_for_link_down = 0; 870 hba->wait_for_link_down = 0;
870 871
871 if (signal_pending(current)) 872 if (signal_pending(current))
872 flush_signals(current); 873 flush_signals(current);
873 }
874 } 874 }
875} 875}
876 876
@@ -889,23 +889,12 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
889 889
890static int bnx2fc_em_config(struct fc_lport *lport) 890static int bnx2fc_em_config(struct fc_lport *lport)
891{ 891{
892 struct fcoe_port *port = lport_priv(lport);
893 struct bnx2fc_hba *hba = port->priv;
894
895 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID, 892 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
896 FCOE_MAX_XID, NULL)) { 893 FCOE_MAX_XID, NULL)) {
897 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); 894 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
898 return -ENOMEM; 895 return -ENOMEM;
899 } 896 }
900 897
901 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
902 BNX2FC_MAX_XID);
903
904 if (!hba->cmd_mgr) {
905 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
906 fc_exch_mgr_free(lport);
907 return -ENOMEM;
908 }
909 return 0; 898 return 0;
910} 899}
911 900
@@ -918,11 +907,8 @@ static int bnx2fc_lport_config(struct fc_lport *lport)
918 lport->e_d_tov = 2 * 1000; 907 lport->e_d_tov = 2 * 1000;
919 lport->r_a_tov = 10 * 1000; 908 lport->r_a_tov = 10 * 1000;
920 909
921 /* REVISIT: enable when supporting tape devices
922 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 910 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
923 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); 911 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
924 */
925 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
926 lport->does_npiv = 1; 912 lport->does_npiv = 1;
927 913
928 memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen)); 914 memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
@@ -952,9 +938,10 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
952 struct packet_type *ptype, 938 struct packet_type *ptype,
953 struct net_device *orig_dev) 939 struct net_device *orig_dev)
954{ 940{
955 struct bnx2fc_hba *hba; 941 struct bnx2fc_interface *interface;
956 hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type); 942 interface = container_of(ptype, struct bnx2fc_interface,
957 fcoe_ctlr_recv(&hba->ctlr, skb); 943 fip_packet_type);
944 fcoe_ctlr_recv(&interface->ctlr, skb);
958 return 0; 945 return 0;
959} 946}
960 947
@@ -1005,17 +992,17 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
1005 struct Scsi_Host *shost = vport_to_shost(vport); 992 struct Scsi_Host *shost = vport_to_shost(vport);
1006 struct fc_lport *n_port = shost_priv(shost); 993 struct fc_lport *n_port = shost_priv(shost);
1007 struct fcoe_port *port = lport_priv(n_port); 994 struct fcoe_port *port = lport_priv(n_port);
1008 struct bnx2fc_hba *hba = port->priv; 995 struct bnx2fc_interface *interface = port->priv;
1009 struct net_device *netdev = hba->netdev; 996 struct net_device *netdev = interface->netdev;
1010 struct fc_lport *vn_port; 997 struct fc_lport *vn_port;
1011 998
1012 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 999 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
1013 printk(KERN_ERR PFX "vn ports cannot be created on" 1000 printk(KERN_ERR PFX "vn ports cannot be created on"
1014 "this hba\n"); 1001 "this interface\n");
1015 return -EIO; 1002 return -EIO;
1016 } 1003 }
1017 mutex_lock(&bnx2fc_dev_lock); 1004 mutex_lock(&bnx2fc_dev_lock);
1018 vn_port = bnx2fc_if_create(hba, &vport->dev, 1); 1005 vn_port = bnx2fc_if_create(interface, &vport->dev, 1);
1019 mutex_unlock(&bnx2fc_dev_lock); 1006 mutex_unlock(&bnx2fc_dev_lock);
1020 1007
1021 if (IS_ERR(vn_port)) { 1008 if (IS_ERR(vn_port)) {
@@ -1065,10 +1052,10 @@ static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
1065} 1052}
1066 1053
1067 1054
1068static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) 1055static int bnx2fc_netdev_setup(struct bnx2fc_interface *interface)
1069{ 1056{
1070 struct net_device *netdev = hba->netdev; 1057 struct net_device *netdev = interface->netdev;
1071 struct net_device *physdev = hba->phys_dev; 1058 struct net_device *physdev = interface->hba->phys_dev;
1072 struct netdev_hw_addr *ha; 1059 struct netdev_hw_addr *ha;
1073 int sel_san_mac = 0; 1060 int sel_san_mac = 0;
1074 1061
@@ -1083,7 +1070,8 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
1083 1070
1084 if ((ha->type == NETDEV_HW_ADDR_T_SAN) && 1071 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
1085 (is_valid_ether_addr(ha->addr))) { 1072 (is_valid_ether_addr(ha->addr))) {
1086 memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); 1073 memcpy(interface->ctlr.ctl_src_addr, ha->addr,
1074 ETH_ALEN);
1087 sel_san_mac = 1; 1075 sel_san_mac = 1;
1088 BNX2FC_MISC_DBG("Found SAN MAC\n"); 1076 BNX2FC_MISC_DBG("Found SAN MAC\n");
1089 } 1077 }
@@ -1093,15 +1081,15 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
1093 if (!sel_san_mac) 1081 if (!sel_san_mac)
1094 return -ENODEV; 1082 return -ENODEV;
1095 1083
1096 hba->fip_packet_type.func = bnx2fc_fip_recv; 1084 interface->fip_packet_type.func = bnx2fc_fip_recv;
1097 hba->fip_packet_type.type = htons(ETH_P_FIP); 1085 interface->fip_packet_type.type = htons(ETH_P_FIP);
1098 hba->fip_packet_type.dev = netdev; 1086 interface->fip_packet_type.dev = netdev;
1099 dev_add_pack(&hba->fip_packet_type); 1087 dev_add_pack(&interface->fip_packet_type);
1100 1088
1101 hba->fcoe_packet_type.func = bnx2fc_rcv; 1089 interface->fcoe_packet_type.func = bnx2fc_rcv;
1102 hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); 1090 interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
1103 hba->fcoe_packet_type.dev = netdev; 1091 interface->fcoe_packet_type.dev = netdev;
1104 dev_add_pack(&hba->fcoe_packet_type); 1092 dev_add_pack(&interface->fcoe_packet_type);
1105 1093
1106 return 0; 1094 return 0;
1107} 1095}
@@ -1137,53 +1125,54 @@ static void bnx2fc_release_transport(void)
1137 1125
1138static void bnx2fc_interface_release(struct kref *kref) 1126static void bnx2fc_interface_release(struct kref *kref)
1139{ 1127{
1140 struct bnx2fc_hba *hba; 1128 struct bnx2fc_interface *interface;
1141 struct net_device *netdev; 1129 struct net_device *netdev;
1142 struct net_device *phys_dev;
1143 1130
1144 hba = container_of(kref, struct bnx2fc_hba, kref); 1131 interface = container_of(kref, struct bnx2fc_interface, kref);
1145 BNX2FC_MISC_DBG("Interface is being released\n"); 1132 BNX2FC_MISC_DBG("Interface is being released\n");
1146 1133
1147 netdev = hba->netdev; 1134 netdev = interface->netdev;
1148 phys_dev = hba->phys_dev;
1149 1135
1150 /* tear-down FIP controller */ 1136 /* tear-down FIP controller */
1151 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done)) 1137 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
1152 fcoe_ctlr_destroy(&hba->ctlr); 1138 fcoe_ctlr_destroy(&interface->ctlr);
1139
1140 kfree(interface);
1153 1141
1154 /* Free the command manager */
1155 if (hba->cmd_mgr) {
1156 bnx2fc_cmd_mgr_free(hba->cmd_mgr);
1157 hba->cmd_mgr = NULL;
1158 }
1159 dev_put(netdev); 1142 dev_put(netdev);
1160 module_put(THIS_MODULE); 1143 module_put(THIS_MODULE);
1161} 1144}
1162 1145
1163static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba) 1146static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface)
1164{ 1147{
1165 kref_get(&hba->kref); 1148 kref_get(&interface->kref);
1166} 1149}
1167 1150
1168static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba) 1151static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface)
1169{ 1152{
1170 kref_put(&hba->kref, bnx2fc_interface_release); 1153 kref_put(&interface->kref, bnx2fc_interface_release);
1171} 1154}
1172static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba) 1155static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
1173{ 1156{
1157 /* Free the command manager */
1158 if (hba->cmd_mgr) {
1159 bnx2fc_cmd_mgr_free(hba->cmd_mgr);
1160 hba->cmd_mgr = NULL;
1161 }
1162 kfree(hba->tgt_ofld_list);
1174 bnx2fc_unbind_pcidev(hba); 1163 bnx2fc_unbind_pcidev(hba);
1175 kfree(hba); 1164 kfree(hba);
1176} 1165}
1177 1166
1178/** 1167/**
1179 * bnx2fc_interface_create - create a new fcoe instance 1168 * bnx2fc_hba_create - create a new bnx2fc hba
1180 * 1169 *
1181 * @cnic: pointer to cnic device 1170 * @cnic: pointer to cnic device
1182 * 1171 *
1183 * Creates a new FCoE instance on the given device which include allocating 1172 * Creates a new FCoE hba on the given device.
1184 * hba structure, scsi_host and lport structures. 1173 *
1185 */ 1174 */
1186static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic) 1175static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1187{ 1176{
1188 struct bnx2fc_hba *hba; 1177 struct bnx2fc_hba *hba;
1189 int rc; 1178 int rc;
@@ -1198,65 +1187,83 @@ static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
1198 1187
1199 hba->cnic = cnic; 1188 hba->cnic = cnic;
1200 rc = bnx2fc_bind_pcidev(hba); 1189 rc = bnx2fc_bind_pcidev(hba);
1201 if (rc) 1190 if (rc) {
1191 printk(KERN_ERR PFX "create_adapter: bind error\n");
1202 goto bind_err; 1192 goto bind_err;
1193 }
1203 hba->phys_dev = cnic->netdev; 1194 hba->phys_dev = cnic->netdev;
1204 /* will get overwritten after we do vlan discovery */ 1195 hba->next_conn_id = 0;
1205 hba->netdev = hba->phys_dev; 1196
1197 hba->tgt_ofld_list =
1198 kzalloc(sizeof(struct bnx2fc_rport *) * BNX2FC_NUM_MAX_SESS,
1199 GFP_KERNEL);
1200 if (!hba->tgt_ofld_list) {
1201 printk(KERN_ERR PFX "Unable to allocate tgt offload list\n");
1202 goto tgtofld_err;
1203 }
1204
1205 hba->num_ofld_sess = 0;
1206
1207 hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
1208 BNX2FC_MAX_XID);
1209 if (!hba->cmd_mgr) {
1210 printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
1211 goto cmgr_err;
1212 }
1206 1213
1207 init_waitqueue_head(&hba->shutdown_wait); 1214 init_waitqueue_head(&hba->shutdown_wait);
1208 init_waitqueue_head(&hba->destroy_wait); 1215 init_waitqueue_head(&hba->destroy_wait);
1216 INIT_LIST_HEAD(&hba->vports);
1209 1217
1210 return hba; 1218 return hba;
1219
1220cmgr_err:
1221 kfree(hba->tgt_ofld_list);
1222tgtofld_err:
1223 bnx2fc_unbind_pcidev(hba);
1211bind_err: 1224bind_err:
1212 printk(KERN_ERR PFX "create_interface: bind error\n");
1213 kfree(hba); 1225 kfree(hba);
1214 return NULL; 1226 return NULL;
1215} 1227}
1216 1228
1217static int bnx2fc_interface_setup(struct bnx2fc_hba *hba, 1229struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
1218 enum fip_state fip_mode) 1230 struct net_device *netdev,
1231 enum fip_state fip_mode)
1219{ 1232{
1233 struct bnx2fc_interface *interface;
1220 int rc = 0; 1234 int rc = 0;
1221 struct net_device *netdev = hba->netdev;
1222 struct fcoe_ctlr *fip = &hba->ctlr;
1223 1235
1236 interface = kzalloc(sizeof(*interface), GFP_KERNEL);
1237 if (!interface) {
1238 printk(KERN_ERR PFX "Unable to allocate interface structure\n");
1239 return NULL;
1240 }
1224 dev_hold(netdev); 1241 dev_hold(netdev);
1225 kref_init(&hba->kref); 1242 kref_init(&interface->kref);
1226 1243 interface->hba = hba;
1227 hba->flags = 0; 1244 interface->netdev = netdev;
1228 1245
1229 /* Initialize FIP */ 1246 /* Initialize FIP */
1230 memset(fip, 0, sizeof(*fip)); 1247 fcoe_ctlr_init(&interface->ctlr, fip_mode);
1231 fcoe_ctlr_init(fip, fip_mode); 1248 interface->ctlr.send = bnx2fc_fip_send;
1232 hba->ctlr.send = bnx2fc_fip_send; 1249 interface->ctlr.update_mac = bnx2fc_update_src_mac;
1233 hba->ctlr.update_mac = bnx2fc_update_src_mac; 1250 interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
1234 hba->ctlr.get_src_addr = bnx2fc_get_src_mac; 1251 set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
1235 set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
1236
1237 INIT_LIST_HEAD(&hba->vports);
1238 rc = bnx2fc_netdev_setup(hba);
1239 if (rc)
1240 goto setup_err;
1241 1252
1242 hba->next_conn_id = 0; 1253 rc = bnx2fc_netdev_setup(interface);
1254 if (!rc)
1255 return interface;
1243 1256
1244 memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list)); 1257 fcoe_ctlr_destroy(&interface->ctlr);
1245 hba->num_ofld_sess = 0;
1246
1247 return 0;
1248
1249setup_err:
1250 fcoe_ctlr_destroy(&hba->ctlr);
1251 dev_put(netdev); 1258 dev_put(netdev);
1252 bnx2fc_interface_put(hba); 1259 kfree(interface);
1253 return rc; 1260 return NULL;
1254} 1261}
1255 1262
1256/** 1263/**
1257 * bnx2fc_if_create - Create FCoE instance on a given interface 1264 * bnx2fc_if_create - Create FCoE instance on a given interface
1258 * 1265 *
1259 * @hba: FCoE interface to create a local port on 1266 * @interface: FCoE interface to create a local port on
1260 * @parent: Device pointer to be the parent in sysfs for the SCSI host 1267 * @parent: Device pointer to be the parent in sysfs for the SCSI host
1261 * @npiv: Indicates if the port is vport or not 1268 * @npiv: Indicates if the port is vport or not
1262 * 1269 *
@@ -1264,7 +1271,7 @@ setup_err:
1264 * 1271 *
1265 * Returns: Allocated fc_lport or an error pointer 1272 * Returns: Allocated fc_lport or an error pointer
1266 */ 1273 */
1267static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, 1274static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1268 struct device *parent, int npiv) 1275 struct device *parent, int npiv)
1269{ 1276{
1270 struct fc_lport *lport, *n_port; 1277 struct fc_lport *lport, *n_port;
@@ -1272,11 +1279,12 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1272 struct Scsi_Host *shost; 1279 struct Scsi_Host *shost;
1273 struct fc_vport *vport = dev_to_vport(parent); 1280 struct fc_vport *vport = dev_to_vport(parent);
1274 struct bnx2fc_lport *blport; 1281 struct bnx2fc_lport *blport;
1282 struct bnx2fc_hba *hba;
1275 int rc = 0; 1283 int rc = 0;
1276 1284
1277 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); 1285 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
1278 if (!blport) { 1286 if (!blport) {
1279 BNX2FC_HBA_DBG(hba->ctlr.lp, "Unable to alloc bnx2fc_lport\n"); 1287 BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
1280 return NULL; 1288 return NULL;
1281 } 1289 }
1282 1290
@@ -1293,7 +1301,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1293 shost = lport->host; 1301 shost = lport->host;
1294 port = lport_priv(lport); 1302 port = lport_priv(lport);
1295 port->lport = lport; 1303 port->lport = lport;
1296 port->priv = hba; 1304 port->priv = interface;
1297 INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); 1305 INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
1298 1306
1299 /* Configure fcoe_port */ 1307 /* Configure fcoe_port */
@@ -1317,7 +1325,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1317 rc = bnx2fc_shost_config(lport, parent); 1325 rc = bnx2fc_shost_config(lport, parent);
1318 if (rc) { 1326 if (rc) {
1319 printk(KERN_ERR PFX "Couldnt configure shost for %s\n", 1327 printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
1320 hba->netdev->name); 1328 interface->netdev->name);
1321 goto lp_config_err; 1329 goto lp_config_err;
1322 } 1330 }
1323 1331
@@ -1343,8 +1351,9 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
1343 goto shost_err; 1351 goto shost_err;
1344 } 1352 }
1345 1353
1346 bnx2fc_interface_get(hba); 1354 bnx2fc_interface_get(interface);
1347 1355
1356 hba = interface->hba;
1348 spin_lock_bh(&hba->hba_lock); 1357 spin_lock_bh(&hba->hba_lock);
1349 blport->lport = lport; 1358 blport->lport = lport;
1350 list_add_tail(&blport->list, &hba->vports); 1359 list_add_tail(&blport->list, &hba->vports);
@@ -1361,21 +1370,19 @@ free_blport:
1361 return NULL; 1370 return NULL;
1362} 1371}
1363 1372
1364static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba) 1373static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface)
1365{ 1374{
1366 /* Dont listen for Ethernet packets anymore */ 1375 /* Dont listen for Ethernet packets anymore */
1367 __dev_remove_pack(&hba->fcoe_packet_type); 1376 __dev_remove_pack(&interface->fcoe_packet_type);
1368 __dev_remove_pack(&hba->fip_packet_type); 1377 __dev_remove_pack(&interface->fip_packet_type);
1369 synchronize_net(); 1378 synchronize_net();
1370} 1379}
1371 1380
1372static void bnx2fc_if_destroy(struct fc_lport *lport) 1381static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
1373{ 1382{
1374 struct fcoe_port *port = lport_priv(lport); 1383 struct fcoe_port *port = lport_priv(lport);
1375 struct bnx2fc_hba *hba = port->priv;
1376 struct bnx2fc_lport *blport, *tmp; 1384 struct bnx2fc_lport *blport, *tmp;
1377 1385
1378 BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
1379 /* Stop the transmit retry timer */ 1386 /* Stop the transmit retry timer */
1380 del_timer_sync(&port->timer); 1387 del_timer_sync(&port->timer);
1381 1388
@@ -1409,8 +1416,6 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1409 1416
1410 /* Release Scsi_Host */ 1417 /* Release Scsi_Host */
1411 scsi_host_put(lport->host); 1418 scsi_host_put(lport->host);
1412
1413 bnx2fc_interface_put(hba);
1414} 1419}
1415 1420
1416/** 1421/**
@@ -1425,46 +1430,31 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1425 */ 1430 */
1426static int bnx2fc_destroy(struct net_device *netdev) 1431static int bnx2fc_destroy(struct net_device *netdev)
1427{ 1432{
1428 struct bnx2fc_hba *hba = NULL; 1433 struct bnx2fc_interface *interface = NULL;
1429 struct net_device *phys_dev; 1434 struct bnx2fc_hba *hba;
1435 struct fc_lport *lport;
1430 int rc = 0; 1436 int rc = 0;
1431 1437
1432 rtnl_lock(); 1438 rtnl_lock();
1433
1434 mutex_lock(&bnx2fc_dev_lock); 1439 mutex_lock(&bnx2fc_dev_lock);
1435 /* obtain physical netdev */
1436 if (netdev->priv_flags & IFF_802_1Q_VLAN)
1437 phys_dev = vlan_dev_real_dev(netdev);
1438 else {
1439 printk(KERN_ERR PFX "Not a vlan device\n");
1440 rc = -ENODEV;
1441 goto netdev_err;
1442 }
1443 1440
1444 hba = bnx2fc_hba_lookup(phys_dev); 1441 interface = bnx2fc_interface_lookup(netdev);
1445 if (!hba || !hba->ctlr.lp) { 1442 if (!interface || !interface->ctlr.lp) {
1446 rc = -ENODEV; 1443 rc = -ENODEV;
1447 printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n"); 1444 printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
1448 goto netdev_err;
1449 }
1450
1451 if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1452 printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
1453 goto netdev_err; 1445 goto netdev_err;
1454 } 1446 }
1455 1447
1456 bnx2fc_netdev_cleanup(hba); 1448 hba = interface->hba;
1457
1458 bnx2fc_stop(hba);
1459
1460 bnx2fc_if_destroy(hba->ctlr.lp);
1461 1449
1462 destroy_workqueue(hba->timer_work_queue); 1450 bnx2fc_netdev_cleanup(interface);
1451 lport = interface->ctlr.lp;
1452 bnx2fc_stop(interface);
1453 list_del(&interface->list);
1454 destroy_workqueue(interface->timer_work_queue);
1455 bnx2fc_interface_put(interface);
1456 bnx2fc_if_destroy(lport, hba);
1463 1457
1464 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
1465 bnx2fc_fw_destroy(hba);
1466
1467 clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
1468netdev_err: 1458netdev_err:
1469 mutex_unlock(&bnx2fc_dev_lock); 1459 mutex_unlock(&bnx2fc_dev_lock);
1470 rtnl_unlock(); 1460 rtnl_unlock();
@@ -1475,16 +1465,20 @@ static void bnx2fc_destroy_work(struct work_struct *work)
1475{ 1465{
1476 struct fcoe_port *port; 1466 struct fcoe_port *port;
1477 struct fc_lport *lport; 1467 struct fc_lport *lport;
1468 struct bnx2fc_interface *interface;
1469 struct bnx2fc_hba *hba;
1478 1470
1479 port = container_of(work, struct fcoe_port, destroy_work); 1471 port = container_of(work, struct fcoe_port, destroy_work);
1480 lport = port->lport; 1472 lport = port->lport;
1473 interface = port->priv;
1474 hba = interface->hba;
1481 1475
1482 BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); 1476 BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
1483 1477
1484 bnx2fc_port_shutdown(lport); 1478 bnx2fc_port_shutdown(lport);
1485 rtnl_lock(); 1479 rtnl_lock();
1486 mutex_lock(&bnx2fc_dev_lock); 1480 mutex_lock(&bnx2fc_dev_lock);
1487 bnx2fc_if_destroy(lport); 1481 bnx2fc_if_destroy(lport, hba);
1488 mutex_unlock(&bnx2fc_dev_lock); 1482 mutex_unlock(&bnx2fc_dev_lock);
1489 rtnl_unlock(); 1483 rtnl_unlock();
1490} 1484}
@@ -1556,28 +1550,27 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
1556static void bnx2fc_ulp_start(void *handle) 1550static void bnx2fc_ulp_start(void *handle)
1557{ 1551{
1558 struct bnx2fc_hba *hba = handle; 1552 struct bnx2fc_hba *hba = handle;
1559 struct fc_lport *lport = hba->ctlr.lp; 1553 struct bnx2fc_interface *interface;
1554 struct fc_lport *lport;
1560 1555
1561 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1562 mutex_lock(&bnx2fc_dev_lock); 1556 mutex_lock(&bnx2fc_dev_lock);
1563 1557
1564 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) 1558 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
1565 goto start_disc;
1566
1567 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
1568 bnx2fc_fw_init(hba); 1559 bnx2fc_fw_init(hba);
1569 1560
1570start_disc:
1571 mutex_unlock(&bnx2fc_dev_lock);
1572
1573 BNX2FC_MISC_DBG("bnx2fc started.\n"); 1561 BNX2FC_MISC_DBG("bnx2fc started.\n");
1574 1562
1575 /* Kick off Fabric discovery*/ 1563 list_for_each_entry(interface, &if_list, list) {
1576 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { 1564 if (interface->hba == hba) {
1577 printk(KERN_ERR PFX "ulp_init: start discovery\n"); 1565 lport = interface->ctlr.lp;
1578 lport->tt.frame_send = bnx2fc_xmit; 1566 /* Kick off Fabric discovery*/
1579 bnx2fc_start_disc(hba); 1567 printk(KERN_ERR PFX "ulp_init: start discovery\n");
1568 lport->tt.frame_send = bnx2fc_xmit;
1569 bnx2fc_start_disc(interface);
1570 }
1580 } 1571 }
1572
1573 mutex_unlock(&bnx2fc_dev_lock);
1581} 1574}
1582 1575
1583static void bnx2fc_port_shutdown(struct fc_lport *lport) 1576static void bnx2fc_port_shutdown(struct fc_lport *lport)
@@ -1587,37 +1580,25 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
1587 fc_lport_destroy(lport); 1580 fc_lport_destroy(lport);
1588} 1581}
1589 1582
1590static void bnx2fc_stop(struct bnx2fc_hba *hba) 1583static void bnx2fc_stop(struct bnx2fc_interface *interface)
1591{ 1584{
1592 struct fc_lport *lport; 1585 struct fc_lport *lport;
1593 struct fc_lport *vport; 1586 struct fc_lport *vport;
1594 1587
1595 BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__, 1588 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
1596 hba->init_done); 1589 return;
1597 if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
1598 test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1599 lport = hba->ctlr.lp;
1600 bnx2fc_port_shutdown(lport);
1601 BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
1602 "offloaded sessions\n",
1603 hba->num_ofld_sess);
1604 wait_event_interruptible(hba->shutdown_wait,
1605 (hba->num_ofld_sess == 0));
1606 mutex_lock(&lport->lp_mutex);
1607 list_for_each_entry(vport, &lport->vports, list)
1608 fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
1609 mutex_unlock(&lport->lp_mutex);
1610 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1611 fcoe_ctlr_link_down(&hba->ctlr);
1612 fcoe_clean_pending_queue(lport);
1613
1614 mutex_lock(&hba->hba_mutex);
1615 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1616 clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
1617 1590
1618 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); 1591 lport = interface->ctlr.lp;
1619 mutex_unlock(&hba->hba_mutex); 1592 bnx2fc_port_shutdown(lport);
1620 } 1593
1594 mutex_lock(&lport->lp_mutex);
1595 list_for_each_entry(vport, &lport->vports, list)
1596 fc_host_port_type(vport->host) =
1597 FC_PORTTYPE_UNKNOWN;
1598 mutex_unlock(&lport->lp_mutex);
1599 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1600 fcoe_ctlr_link_down(&interface->ctlr);
1601 fcoe_clean_pending_queue(lport);
1621} 1602}
1622 1603
1623static int bnx2fc_fw_init(struct bnx2fc_hba *hba) 1604static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
@@ -1656,8 +1637,7 @@ static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
1656 } 1637 }
1657 1638
1658 1639
1659 /* Mark HBA to indicate that the FW INIT is done */ 1640 set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags);
1660 set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
1661 return 0; 1641 return 0;
1662 1642
1663err_unbind: 1643err_unbind:
@@ -1668,7 +1648,7 @@ err_out:
1668 1648
1669static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) 1649static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
1670{ 1650{
1671 if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 1651 if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) {
1672 if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { 1652 if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
1673 init_timer(&hba->destroy_timer); 1653 init_timer(&hba->destroy_timer);
1674 hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + 1654 hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
@@ -1677,8 +1657,8 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
1677 hba->destroy_timer.data = (unsigned long)hba; 1657 hba->destroy_timer.data = (unsigned long)hba;
1678 add_timer(&hba->destroy_timer); 1658 add_timer(&hba->destroy_timer);
1679 wait_event_interruptible(hba->destroy_wait, 1659 wait_event_interruptible(hba->destroy_wait,
1680 (hba->flags & 1660 test_bit(BNX2FC_FLAG_DESTROY_CMPL,
1681 BNX2FC_FLAG_DESTROY_CMPL)); 1661 &hba->flags));
1682 /* This should never happen */ 1662 /* This should never happen */
1683 if (signal_pending(current)) 1663 if (signal_pending(current))
1684 flush_signals(current); 1664 flush_signals(current);
@@ -1699,40 +1679,57 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
1699 */ 1679 */
1700static void bnx2fc_ulp_stop(void *handle) 1680static void bnx2fc_ulp_stop(void *handle)
1701{ 1681{
1702 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle; 1682 struct bnx2fc_hba *hba = handle;
1683 struct bnx2fc_interface *interface;
1703 1684
1704 printk(KERN_ERR "ULP_STOP\n"); 1685 printk(KERN_ERR "ULP_STOP\n");
1705 1686
1706 mutex_lock(&bnx2fc_dev_lock); 1687 mutex_lock(&bnx2fc_dev_lock);
1707 bnx2fc_stop(hba); 1688 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
1689 goto exit;
1690 list_for_each_entry(interface, &if_list, list) {
1691 if (interface->hba == hba)
1692 bnx2fc_stop(interface);
1693 }
1694 BUG_ON(hba->num_ofld_sess != 0);
1695
1696 mutex_lock(&hba->hba_mutex);
1697 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1698 clear_bit(ADAPTER_STATE_GOING_DOWN,
1699 &hba->adapter_state);
1700
1701 clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
1702 mutex_unlock(&hba->hba_mutex);
1703
1708 bnx2fc_fw_destroy(hba); 1704 bnx2fc_fw_destroy(hba);
1705exit:
1709 mutex_unlock(&bnx2fc_dev_lock); 1706 mutex_unlock(&bnx2fc_dev_lock);
1710} 1707}
1711 1708
1712static void bnx2fc_start_disc(struct bnx2fc_hba *hba) 1709static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
1713{ 1710{
1714 struct fc_lport *lport; 1711 struct fc_lport *lport;
1715 int wait_cnt = 0; 1712 int wait_cnt = 0;
1716 1713
1717 BNX2FC_MISC_DBG("Entered %s\n", __func__); 1714 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1718 /* Kick off FIP/FLOGI */ 1715 /* Kick off FIP/FLOGI */
1719 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 1716 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
1720 printk(KERN_ERR PFX "Init not done yet\n"); 1717 printk(KERN_ERR PFX "Init not done yet\n");
1721 return; 1718 return;
1722 } 1719 }
1723 1720
1724 lport = hba->ctlr.lp; 1721 lport = interface->ctlr.lp;
1725 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); 1722 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
1726 1723
1727 if (!bnx2fc_link_ok(lport)) { 1724 if (!bnx2fc_link_ok(lport)) {
1728 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); 1725 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
1729 fcoe_ctlr_link_up(&hba->ctlr); 1726 fcoe_ctlr_link_up(&interface->ctlr);
1730 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1727 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1731 set_bit(ADAPTER_STATE_READY, &hba->adapter_state); 1728 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
1732 } 1729 }
1733 1730
1734 /* wait for the FCF to be selected before issuing FLOGI */ 1731 /* wait for the FCF to be selected before issuing FLOGI */
1735 while (!hba->ctlr.sel_fcf) { 1732 while (!interface->ctlr.sel_fcf) {
1736 msleep(250); 1733 msleep(250);
1737 /* give up after 3 secs */ 1734 /* give up after 3 secs */
1738 if (++wait_cnt > 12) 1735 if (++wait_cnt > 12)
@@ -1758,15 +1755,15 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1758 1755
1759 BNX2FC_MISC_DBG("Entered %s\n", __func__); 1756 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1760 /* bnx2fc works only when bnx2x is loaded */ 1757 /* bnx2fc works only when bnx2x is loaded */
1761 if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 1758 if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) ||
1759 (dev->max_fcoe_conn == 0)) {
1762 printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s," 1760 printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
1763 " flags: %lx\n", 1761 " flags: %lx fcoe_conn: %d\n",
1764 dev->netdev->name, dev->flags); 1762 dev->netdev->name, dev->flags, dev->max_fcoe_conn);
1765 return; 1763 return;
1766 } 1764 }
1767 1765
1768 /* Configure FCoE interface */ 1766 hba = bnx2fc_hba_create(dev);
1769 hba = bnx2fc_interface_create(dev);
1770 if (!hba) { 1767 if (!hba) {
1771 printk(KERN_ERR PFX "hba initialization failed\n"); 1768 printk(KERN_ERR PFX "hba initialization failed\n");
1772 return; 1769 return;
@@ -1774,7 +1771,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1774 1771
1775 /* Add HBA to the adapter list */ 1772 /* Add HBA to the adapter list */
1776 mutex_lock(&bnx2fc_dev_lock); 1773 mutex_lock(&bnx2fc_dev_lock);
1777 list_add_tail(&hba->link, &adapter_list); 1774 list_add_tail(&hba->list, &adapter_list);
1778 adapter_count++; 1775 adapter_count++;
1779 mutex_unlock(&bnx2fc_dev_lock); 1776 mutex_unlock(&bnx2fc_dev_lock);
1780 1777
@@ -1782,7 +1779,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1782 rc = dev->register_device(dev, CNIC_ULP_FCOE, 1779 rc = dev->register_device(dev, CNIC_ULP_FCOE,
1783 (void *) hba); 1780 (void *) hba);
1784 if (rc) 1781 if (rc)
1785 printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc); 1782 printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc);
1786 else 1783 else
1787 set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); 1784 set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
1788} 1785}
@@ -1790,52 +1787,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1790 1787
1791static int bnx2fc_disable(struct net_device *netdev) 1788static int bnx2fc_disable(struct net_device *netdev)
1792{ 1789{
1793 struct bnx2fc_hba *hba; 1790 struct bnx2fc_interface *interface;
1794 struct net_device *phys_dev;
1795 struct ethtool_drvinfo drvinfo;
1796 int rc = 0; 1791 int rc = 0;
1797 1792
1798 rtnl_lock(); 1793 rtnl_lock();
1799
1800 mutex_lock(&bnx2fc_dev_lock); 1794 mutex_lock(&bnx2fc_dev_lock);
1801 1795
1802 /* obtain physical netdev */ 1796 interface = bnx2fc_interface_lookup(netdev);
1803 if (netdev->priv_flags & IFF_802_1Q_VLAN) 1797 if (!interface || !interface->ctlr.lp) {
1804 phys_dev = vlan_dev_real_dev(netdev);
1805 else {
1806 printk(KERN_ERR PFX "Not a vlan device\n");
1807 rc = -ENODEV;
1808 goto nodev;
1809 }
1810
1811 /* verify if the physical device is a netxtreme2 device */
1812 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1813 memset(&drvinfo, 0, sizeof(drvinfo));
1814 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1815 if (strcmp(drvinfo.driver, "bnx2x")) {
1816 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1817 rc = -ENODEV;
1818 goto nodev;
1819 }
1820 } else {
1821 printk(KERN_ERR PFX "unable to obtain drv_info\n");
1822 rc = -ENODEV;
1823 goto nodev;
1824 }
1825
1826 printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
1827
1828 /* obtain hba and initialize rest of the structure */
1829 hba = bnx2fc_hba_lookup(phys_dev);
1830 if (!hba || !hba->ctlr.lp) {
1831 rc = -ENODEV; 1798 rc = -ENODEV;
1832 printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n"); 1799 printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
1833 } else { 1800 } else {
1834 fcoe_ctlr_link_down(&hba->ctlr); 1801 fcoe_ctlr_link_down(&interface->ctlr);
1835 fcoe_clean_pending_queue(hba->ctlr.lp); 1802 fcoe_clean_pending_queue(interface->ctlr.lp);
1836 } 1803 }
1837 1804
1838nodev:
1839 mutex_unlock(&bnx2fc_dev_lock); 1805 mutex_unlock(&bnx2fc_dev_lock);
1840 rtnl_unlock(); 1806 rtnl_unlock();
1841 return rc; 1807 return rc;
@@ -1844,48 +1810,19 @@ nodev:
1844 1810
1845static int bnx2fc_enable(struct net_device *netdev) 1811static int bnx2fc_enable(struct net_device *netdev)
1846{ 1812{
1847 struct bnx2fc_hba *hba; 1813 struct bnx2fc_interface *interface;
1848 struct net_device *phys_dev;
1849 struct ethtool_drvinfo drvinfo;
1850 int rc = 0; 1814 int rc = 0;
1851 1815
1852 rtnl_lock(); 1816 rtnl_lock();
1853
1854 BNX2FC_MISC_DBG("Entered %s\n", __func__);
1855 mutex_lock(&bnx2fc_dev_lock); 1817 mutex_lock(&bnx2fc_dev_lock);
1856 1818
1857 /* obtain physical netdev */ 1819 interface = bnx2fc_interface_lookup(netdev);
1858 if (netdev->priv_flags & IFF_802_1Q_VLAN) 1820 if (!interface || !interface->ctlr.lp) {
1859 phys_dev = vlan_dev_real_dev(netdev);
1860 else {
1861 printk(KERN_ERR PFX "Not a vlan device\n");
1862 rc = -ENODEV;
1863 goto nodev;
1864 }
1865 /* verify if the physical device is a netxtreme2 device */
1866 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1867 memset(&drvinfo, 0, sizeof(drvinfo));
1868 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1869 if (strcmp(drvinfo.driver, "bnx2x")) {
1870 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1871 rc = -ENODEV;
1872 goto nodev;
1873 }
1874 } else {
1875 printk(KERN_ERR PFX "unable to obtain drv_info\n");
1876 rc = -ENODEV; 1821 rc = -ENODEV;
1877 goto nodev; 1822 printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
1878 } 1823 } else if (!bnx2fc_link_ok(interface->ctlr.lp))
1879 1824 fcoe_ctlr_link_up(&interface->ctlr);
1880 /* obtain hba and initialize rest of the structure */
1881 hba = bnx2fc_hba_lookup(phys_dev);
1882 if (!hba || !hba->ctlr.lp) {
1883 rc = -ENODEV;
1884 printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
1885 } else if (!bnx2fc_link_ok(hba->ctlr.lp))
1886 fcoe_ctlr_link_up(&hba->ctlr);
1887 1825
1888nodev:
1889 mutex_unlock(&bnx2fc_dev_lock); 1826 mutex_unlock(&bnx2fc_dev_lock);
1890 rtnl_unlock(); 1827 rtnl_unlock();
1891 return rc; 1828 return rc;
@@ -1903,6 +1840,7 @@ nodev:
1903 */ 1840 */
1904static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) 1841static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1905{ 1842{
1843 struct bnx2fc_interface *interface;
1906 struct bnx2fc_hba *hba; 1844 struct bnx2fc_hba *hba;
1907 struct net_device *phys_dev; 1845 struct net_device *phys_dev;
1908 struct fc_lport *lport; 1846 struct fc_lport *lport;
@@ -1938,7 +1876,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1938 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { 1876 if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
1939 memset(&drvinfo, 0, sizeof(drvinfo)); 1877 memset(&drvinfo, 0, sizeof(drvinfo));
1940 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); 1878 phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
1941 if (strcmp(drvinfo.driver, "bnx2x")) { 1879 if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) {
1942 printk(KERN_ERR PFX "Not a netxtreme2 device\n"); 1880 printk(KERN_ERR PFX "Not a netxtreme2 device\n");
1943 rc = -EINVAL; 1881 rc = -EINVAL;
1944 goto netdev_err; 1882 goto netdev_err;
@@ -1949,7 +1887,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1949 goto netdev_err; 1887 goto netdev_err;
1950 } 1888 }
1951 1889
1952 /* obtain hba and initialize rest of the structure */ 1890 /* obtain interface and initialize rest of the structure */
1953 hba = bnx2fc_hba_lookup(phys_dev); 1891 hba = bnx2fc_hba_lookup(phys_dev);
1954 if (!hba) { 1892 if (!hba) {
1955 rc = -ENODEV; 1893 rc = -ENODEV;
@@ -1957,67 +1895,61 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1957 goto netdev_err; 1895 goto netdev_err;
1958 } 1896 }
1959 1897
1960 if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { 1898 if (bnx2fc_interface_lookup(netdev)) {
1961 rc = bnx2fc_fw_init(hba);
1962 if (rc)
1963 goto netdev_err;
1964 }
1965
1966 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
1967 rc = -EEXIST; 1899 rc = -EEXIST;
1968 goto netdev_err; 1900 goto netdev_err;
1969 } 1901 }
1970 1902
1971 /* update netdev with vlan netdev */ 1903 interface = bnx2fc_interface_create(hba, netdev, fip_mode);
1972 hba->netdev = netdev; 1904 if (!interface) {
1973 hba->vlan_id = vlan_id; 1905 printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
1974 hba->vlan_enabled = 1;
1975
1976 rc = bnx2fc_interface_setup(hba, fip_mode);
1977 if (rc) {
1978 printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
1979 goto ifput_err; 1906 goto ifput_err;
1980 } 1907 }
1981 1908
1982 hba->timer_work_queue = 1909 interface->vlan_id = vlan_id;
1910 interface->vlan_enabled = 1;
1911
1912 interface->timer_work_queue =
1983 create_singlethread_workqueue("bnx2fc_timer_wq"); 1913 create_singlethread_workqueue("bnx2fc_timer_wq");
1984 if (!hba->timer_work_queue) { 1914 if (!interface->timer_work_queue) {
1985 printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); 1915 printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
1986 rc = -EINVAL; 1916 rc = -EINVAL;
1987 goto ifput_err; 1917 goto ifput_err;
1988 } 1918 }
1989 1919
1990 lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0); 1920 lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0);
1991 if (!lport) { 1921 if (!lport) {
1992 printk(KERN_ERR PFX "Failed to create interface (%s)\n", 1922 printk(KERN_ERR PFX "Failed to create interface (%s)\n",
1993 netdev->name); 1923 netdev->name);
1994 bnx2fc_netdev_cleanup(hba); 1924 bnx2fc_netdev_cleanup(interface);
1995 rc = -EINVAL; 1925 rc = -EINVAL;
1996 goto if_create_err; 1926 goto if_create_err;
1997 } 1927 }
1998 1928
1929 /* Add interface to if_list */
1930 list_add_tail(&interface->list, &if_list);
1931
1999 lport->boot_time = jiffies; 1932 lport->boot_time = jiffies;
2000 1933
2001 /* Make this master N_port */ 1934 /* Make this master N_port */
2002 hba->ctlr.lp = lport; 1935 interface->ctlr.lp = lport;
2003 1936
2004 set_bit(BNX2FC_CREATE_DONE, &hba->init_done); 1937 BNX2FC_HBA_DBG(lport, "create: START DISC\n");
2005 printk(KERN_ERR PFX "create: START DISC\n"); 1938 bnx2fc_start_disc(interface);
2006 bnx2fc_start_disc(hba);
2007 /* 1939 /*
2008 * Release from kref_init in bnx2fc_interface_setup, on success 1940 * Release from kref_init in bnx2fc_interface_setup, on success
2009 * lport should be holding a reference taken in bnx2fc_if_create 1941 * lport should be holding a reference taken in bnx2fc_if_create
2010 */ 1942 */
2011 bnx2fc_interface_put(hba); 1943 bnx2fc_interface_put(interface);
2012 /* put netdev that was held while calling dev_get_by_name */ 1944 /* put netdev that was held while calling dev_get_by_name */
2013 mutex_unlock(&bnx2fc_dev_lock); 1945 mutex_unlock(&bnx2fc_dev_lock);
2014 rtnl_unlock(); 1946 rtnl_unlock();
2015 return 0; 1947 return 0;
2016 1948
2017if_create_err: 1949if_create_err:
2018 destroy_workqueue(hba->timer_work_queue); 1950 destroy_workqueue(interface->timer_work_queue);
2019ifput_err: 1951ifput_err:
2020 bnx2fc_interface_put(hba); 1952 bnx2fc_interface_put(interface);
2021netdev_err: 1953netdev_err:
2022 module_put(THIS_MODULE); 1954 module_put(THIS_MODULE);
2023mod_err: 1955mod_err:
@@ -2027,7 +1959,7 @@ mod_err:
2027} 1959}
2028 1960
2029/** 1961/**
2030 * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance 1962 * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
2031 * 1963 *
2032 * @cnic: Pointer to cnic device instance 1964 * @cnic: Pointer to cnic device instance
2033 * 1965 *
@@ -2047,19 +1979,30 @@ static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
2047 return NULL; 1979 return NULL;
2048} 1980}
2049 1981
2050static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev) 1982static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
1983 *netdev)
1984{
1985 struct bnx2fc_interface *interface;
1986
1987 /* Called with bnx2fc_dev_lock held */
1988 list_for_each_entry(interface, &if_list, list) {
1989 if (interface->netdev == netdev)
1990 return interface;
1991 }
1992 return NULL;
1993}
1994
1995static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device
1996 *phys_dev)
2051{ 1997{
2052 struct list_head *list;
2053 struct list_head *temp;
2054 struct bnx2fc_hba *hba; 1998 struct bnx2fc_hba *hba;
2055 1999
2056 /* Called with bnx2fc_dev_lock held */ 2000 /* Called with bnx2fc_dev_lock held */
2057 list_for_each_safe(list, temp, &adapter_list) { 2001 list_for_each_entry(hba, &adapter_list, list) {
2058 hba = (struct bnx2fc_hba *)list;
2059 if (hba->phys_dev == phys_dev) 2002 if (hba->phys_dev == phys_dev)
2060 return hba; 2003 return hba;
2061 } 2004 }
2062 printk(KERN_ERR PFX "hba_lookup: hba NULL\n"); 2005 printk(KERN_ERR PFX "adapter_lookup: hba NULL\n");
2063 return NULL; 2006 return NULL;
2064} 2007}
2065 2008
@@ -2071,6 +2014,8 @@ static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
2071static void bnx2fc_ulp_exit(struct cnic_dev *dev) 2014static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2072{ 2015{
2073 struct bnx2fc_hba *hba; 2016 struct bnx2fc_hba *hba;
2017 struct bnx2fc_interface *interface, *tmp;
2018 struct fc_lport *lport;
2074 2019
2075 BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); 2020 BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
2076 2021
@@ -2089,13 +2034,20 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2089 return; 2034 return;
2090 } 2035 }
2091 2036
2092 list_del_init(&hba->link); 2037 list_del_init(&hba->list);
2093 adapter_count--; 2038 adapter_count--;
2094 2039
2095 if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { 2040 list_for_each_entry_safe(interface, tmp, &if_list, list) {
2096 /* destroy not called yet, move to quiesced list */ 2041 /* destroy not called yet, move to quiesced list */
2097 bnx2fc_netdev_cleanup(hba); 2042 if (interface->hba == hba) {
2098 bnx2fc_if_destroy(hba->ctlr.lp); 2043 bnx2fc_netdev_cleanup(interface);
2044 bnx2fc_stop(interface);
2045
2046 list_del(&interface->list);
2047 lport = interface->ctlr.lp;
2048 bnx2fc_interface_put(interface);
2049 bnx2fc_if_destroy(lport, hba);
2050 }
2099 } 2051 }
2100 mutex_unlock(&bnx2fc_dev_lock); 2052 mutex_unlock(&bnx2fc_dev_lock);
2101 2053
@@ -2103,7 +2055,7 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2103 /* unregister cnic device */ 2055 /* unregister cnic device */
2104 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) 2056 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
2105 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); 2057 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
2106 bnx2fc_interface_destroy(hba); 2058 bnx2fc_hba_destroy(hba);
2107} 2059}
2108 2060
2109/** 2061/**
@@ -2259,6 +2211,7 @@ static int __init bnx2fc_mod_init(void)
2259 } 2211 }
2260 2212
2261 INIT_LIST_HEAD(&adapter_list); 2213 INIT_LIST_HEAD(&adapter_list);
2214 INIT_LIST_HEAD(&if_list);
2262 mutex_init(&bnx2fc_dev_lock); 2215 mutex_init(&bnx2fc_dev_lock);
2263 adapter_count = 0; 2216 adapter_count = 0;
2264 2217
@@ -2336,16 +2289,17 @@ static void __exit bnx2fc_mod_exit(void)
2336 mutex_unlock(&bnx2fc_dev_lock); 2289 mutex_unlock(&bnx2fc_dev_lock);
2337 2290
2338 /* Unregister with cnic */ 2291 /* Unregister with cnic */
2339 list_for_each_entry_safe(hba, next, &to_be_deleted, link) { 2292 list_for_each_entry_safe(hba, next, &to_be_deleted, list) {
2340 list_del_init(&hba->link); 2293 list_del_init(&hba->list);
2341 printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n", 2294 printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n",
2342 hba, atomic_read(&hba->kref.refcount)); 2295 hba);
2343 bnx2fc_ulp_stop(hba); 2296 bnx2fc_ulp_stop(hba);
2344 /* unregister cnic device */ 2297 /* unregister cnic device */
2345 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, 2298 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
2346 &hba->reg_with_cnic)) 2299 &hba->reg_with_cnic))
2347 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); 2300 hba->cnic->unregister_device(hba->cnic,
2348 bnx2fc_interface_destroy(hba); 2301 CNIC_ULP_FCOE);
2302 bnx2fc_hba_destroy(hba);
2349 } 2303 }
2350 cnic_unregister_driver(CNIC_ULP_FCOE); 2304 cnic_unregister_driver(CNIC_ULP_FCOE);
2351 2305
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 09bdd9b88d1a..72cfb14acd3a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -2,7 +2,7 @@
2 * This file contains the code that low level functions that interact 2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware. 3 * with 57712 FCoE firmware.
4 * 4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation 5 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -23,7 +23,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23 struct fcoe_kcqe *ofld_kcqe); 23 struct fcoe_kcqe *ofld_kcqe);
24static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); 24static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, 25static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26 struct fcoe_kcqe *conn_destroy); 26 struct fcoe_kcqe *destroy_kcqe);
27 27
28int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) 28int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
29{ 29{
@@ -67,7 +67,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
67 int rc = 0; 67 int rc = 0;
68 68
69 if (!hba->cnic) { 69 if (!hba->cnic) {
70 printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n"); 70 printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
71 return -ENODEV; 71 return -ENODEV;
72 } 72 }
73 73
@@ -103,6 +103,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
103 fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; 103 fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
104 fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; 104 fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
105 105
106
106 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; 107 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
107 fcoe_init2.hash_tbl_pbl_addr_hi = (u32) 108 fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
108 ((u64) hba->hash_tbl_pbl_dma >> 32); 109 ((u64) hba->hash_tbl_pbl_dma >> 32);
@@ -165,7 +166,8 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
165 struct bnx2fc_rport *tgt) 166 struct bnx2fc_rport *tgt)
166{ 167{
167 struct fc_lport *lport = port->lport; 168 struct fc_lport *lport = port->lport;
168 struct bnx2fc_hba *hba = port->priv; 169 struct bnx2fc_interface *interface = port->priv;
170 struct bnx2fc_hba *hba = interface->hba;
169 struct kwqe *kwqe_arr[4]; 171 struct kwqe *kwqe_arr[4];
170 struct fcoe_kwqe_conn_offload1 ofld_req1; 172 struct fcoe_kwqe_conn_offload1 ofld_req1;
171 struct fcoe_kwqe_conn_offload2 ofld_req2; 173 struct fcoe_kwqe_conn_offload2 ofld_req2;
@@ -227,7 +229,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
227 ofld_req3.hdr.flags = 229 ofld_req3.hdr.flags =
228 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 230 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
229 231
230 ofld_req3.vlan_tag = hba->vlan_id << 232 ofld_req3.vlan_tag = interface->vlan_id <<
231 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; 233 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
232 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; 234 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
233 235
@@ -277,8 +279,20 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
277 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << 279 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
278 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); 280 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
279 281
282 /*
283 * Info from PRLI response, this info is used for sequence level error
284 * recovery support
285 */
286 if (tgt->dev_type == TYPE_TAPE) {
287 ofld_req3.flags |= 1 <<
288 FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
289 ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
290 ? 1 : 0) <<
291 FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
292 }
293
280 /* vlan flag */ 294 /* vlan flag */
281 ofld_req3.flags |= (hba->vlan_enabled << 295 ofld_req3.flags |= (interface->vlan_enabled <<
282 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); 296 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
283 297
284 /* C2_VALID and ACK flags are not set as they are not suppported */ 298 /* C2_VALID and ACK flags are not set as they are not suppported */
@@ -300,12 +314,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
300 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; 314 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
301 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; 315 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
302 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; 316 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
303 ofld_req4.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 317 ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
304 ofld_req4.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; 318 /* fcf mac */
305 ofld_req4.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; 319 ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
306 ofld_req4.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; 320 ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
307 ofld_req4.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; 321 ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
308 ofld_req4.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; 322 ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
323 ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
309 324
310 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; 325 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
311 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); 326 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -335,7 +350,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
335 struct bnx2fc_rport *tgt) 350 struct bnx2fc_rport *tgt)
336{ 351{
337 struct kwqe *kwqe_arr[2]; 352 struct kwqe *kwqe_arr[2];
338 struct bnx2fc_hba *hba = port->priv; 353 struct bnx2fc_interface *interface = port->priv;
354 struct bnx2fc_hba *hba = interface->hba;
339 struct fcoe_kwqe_conn_enable_disable enbl_req; 355 struct fcoe_kwqe_conn_enable_disable enbl_req;
340 struct fc_lport *lport = port->lport; 356 struct fc_lport *lport = port->lport;
341 struct fc_rport *rport = tgt->rport; 357 struct fc_rport *rport = tgt->rport;
@@ -358,12 +374,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
358 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; 374 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
359 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); 375 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
360 376
361 enbl_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 377 enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
362 enbl_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; 378 enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
363 enbl_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; 379 enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
364 enbl_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; 380 enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
365 enbl_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; 381 enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
366 enbl_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; 382 enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
367 383
368 port_id = fc_host_port_id(lport->host); 384 port_id = fc_host_port_id(lport->host);
369 if (port_id != tgt->sid) { 385 if (port_id != tgt->sid) {
@@ -379,10 +395,10 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
379 enbl_req.d_id[0] = (port_id & 0x000000FF); 395 enbl_req.d_id[0] = (port_id & 0x000000FF);
380 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; 396 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
381 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; 397 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
382 enbl_req.vlan_tag = hba->vlan_id << 398 enbl_req.vlan_tag = interface->vlan_id <<
383 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; 399 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
384 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; 400 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
385 enbl_req.vlan_flag = hba->vlan_enabled; 401 enbl_req.vlan_flag = interface->vlan_enabled;
386 enbl_req.context_id = tgt->context_id; 402 enbl_req.context_id = tgt->context_id;
387 enbl_req.conn_id = tgt->fcoe_conn_id; 403 enbl_req.conn_id = tgt->fcoe_conn_id;
388 404
@@ -402,7 +418,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
402int bnx2fc_send_session_disable_req(struct fcoe_port *port, 418int bnx2fc_send_session_disable_req(struct fcoe_port *port,
403 struct bnx2fc_rport *tgt) 419 struct bnx2fc_rport *tgt)
404{ 420{
405 struct bnx2fc_hba *hba = port->priv; 421 struct bnx2fc_interface *interface = port->priv;
422 struct bnx2fc_hba *hba = interface->hba;
406 struct fcoe_kwqe_conn_enable_disable disable_req; 423 struct fcoe_kwqe_conn_enable_disable disable_req;
407 struct kwqe *kwqe_arr[2]; 424 struct kwqe *kwqe_arr[2];
408 struct fc_rport *rport = tgt->rport; 425 struct fc_rport *rport = tgt->rport;
@@ -423,12 +440,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
423 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; 440 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
424 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; 441 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
425 442
426 disable_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */ 443 disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
427 disable_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4]; 444 disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
428 disable_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3]; 445 disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
429 disable_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2]; 446 disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
430 disable_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1]; 447 disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
431 disable_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0]; 448 disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
432 449
433 port_id = tgt->sid; 450 port_id = tgt->sid;
434 disable_req.s_id[0] = (port_id & 0x000000FF); 451 disable_req.s_id[0] = (port_id & 0x000000FF);
@@ -442,11 +459,11 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
442 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; 459 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
443 disable_req.context_id = tgt->context_id; 460 disable_req.context_id = tgt->context_id;
444 disable_req.conn_id = tgt->fcoe_conn_id; 461 disable_req.conn_id = tgt->fcoe_conn_id;
445 disable_req.vlan_tag = hba->vlan_id << 462 disable_req.vlan_tag = interface->vlan_id <<
446 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; 463 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
447 disable_req.vlan_tag |= 464 disable_req.vlan_tag |=
448 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; 465 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
449 disable_req.vlan_flag = hba->vlan_enabled; 466 disable_req.vlan_flag = interface->vlan_enabled;
450 467
451 kwqe_arr[0] = (struct kwqe *) &disable_req; 468 kwqe_arr[0] = (struct kwqe *) &disable_req;
452 469
@@ -525,7 +542,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
525{ 542{
526 struct fcoe_port *port = tgt->port; 543 struct fcoe_port *port = tgt->port;
527 struct fc_lport *lport = port->lport; 544 struct fc_lport *lport = port->lport;
528 struct bnx2fc_hba *hba = port->priv; 545 struct bnx2fc_interface *interface = port->priv;
529 struct bnx2fc_unsol_els *unsol_els; 546 struct bnx2fc_unsol_els *unsol_els;
530 struct fc_frame_header *fh; 547 struct fc_frame_header *fh;
531 struct fc_frame *fp; 548 struct fc_frame *fp;
@@ -586,7 +603,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
586 fr_eof(fp) = FC_EOF_T; 603 fr_eof(fp) = FC_EOF_T;
587 fr_crc(fp) = cpu_to_le32(~crc); 604 fr_crc(fp) = cpu_to_le32(~crc);
588 unsol_els->lport = lport; 605 unsol_els->lport = lport;
589 unsol_els->hba = hba; 606 unsol_els->hba = interface->hba;
590 unsol_els->fp = fp; 607 unsol_els->fp = fp;
591 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); 608 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
592 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); 609 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
@@ -608,9 +625,12 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
608 u32 frame_len, len; 625 u32 frame_len, len;
609 struct bnx2fc_cmd *io_req = NULL; 626 struct bnx2fc_cmd *io_req = NULL;
610 struct fcoe_task_ctx_entry *task, *task_page; 627 struct fcoe_task_ctx_entry *task, *task_page;
611 struct bnx2fc_hba *hba = tgt->port->priv; 628 struct bnx2fc_interface *interface = tgt->port->priv;
629 struct bnx2fc_hba *hba = interface->hba;
612 int task_idx, index; 630 int task_idx, index;
613 int rc = 0; 631 int rc = 0;
632 u64 err_warn_bit_map;
633 u8 err_warn = 0xff;
614 634
615 635
616 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); 636 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
@@ -673,39 +693,43 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
673 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", 693 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
674 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 694 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
675 695
676 bnx2fc_return_rqe(tgt, 1);
677 696
678 if (xid > BNX2FC_MAX_XID) { 697 if (xid > BNX2FC_MAX_XID) {
679 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", 698 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
680 xid); 699 xid);
681 spin_unlock_bh(&tgt->tgt_lock); 700 goto ret_err_rqe;
682 break;
683 } 701 }
684 702
685 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 703 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
686 index = xid % BNX2FC_TASKS_PER_PAGE; 704 index = xid % BNX2FC_TASKS_PER_PAGE;
687 task_page = (struct fcoe_task_ctx_entry *) 705 task_page = (struct fcoe_task_ctx_entry *)
688 hba->task_ctx[task_idx]; 706 hba->task_ctx[task_idx];
689 task = &(task_page[index]); 707 task = &(task_page[index]);
690 708
691 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 709 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
692 if (!io_req) { 710 if (!io_req)
693 spin_unlock_bh(&tgt->tgt_lock); 711 goto ret_err_rqe;
694 break;
695 }
696 712
697 if (io_req->cmd_type != BNX2FC_SCSI_CMD) { 713 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
698 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); 714 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
699 spin_unlock_bh(&tgt->tgt_lock); 715 goto ret_err_rqe;
700 break;
701 } 716 }
702 717
703 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, 718 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
704 &io_req->req_flags)) { 719 &io_req->req_flags)) {
705 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " 720 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
706 "progress.. ignore unsol err\n"); 721 "progress.. ignore unsol err\n");
707 spin_unlock_bh(&tgt->tgt_lock); 722 goto ret_err_rqe;
708 break; 723 }
724
725 err_warn_bit_map = (u64)
726 ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
727 (u64)err_entry->data.err_warn_bitmap_lo;
728 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
729 if (err_warn_bit_map & (u64)((u64)1 << i)) {
730 err_warn = i;
731 break;
732 }
709 } 733 }
710 734
711 /* 735 /*
@@ -715,26 +739,61 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
715 * logging out the target, when the ABTS eventually 739 * logging out the target, when the ABTS eventually
716 * times out. 740 * times out.
717 */ 741 */
718 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 742 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
719 &io_req->req_flags)) {
720 /*
721 * Cancel the timeout_work, as we received IO
722 * completion with FW error.
723 */
724 if (cancel_delayed_work(&io_req->timeout_work))
725 kref_put(&io_req->refcount,
726 bnx2fc_cmd_release); /* timer hold */
727
728 rc = bnx2fc_initiate_abts(io_req);
729 if (rc != SUCCESS) {
730 BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
731 "failed. issue cleanup\n");
732 rc = bnx2fc_initiate_cleanup(io_req);
733 BUG_ON(rc);
734 }
735 } else
736 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " 743 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
737 "in ABTS processing\n", xid); 744 "in ABTS processing\n", xid);
745 goto ret_err_rqe;
746 }
747 BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
748 if (tgt->dev_type != TYPE_TAPE)
749 goto skip_rec;
750 switch (err_warn) {
751 case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
752 case FCOE_ERROR_CODE_DATA_OOO_RO:
753 case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
754 case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
755 case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
756 case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
757 BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
758 xid);
759 memset(&io_req->err_entry, 0,
760 sizeof(struct fcoe_err_report_entry));
761 memcpy(&io_req->err_entry, err_entry,
762 sizeof(struct fcoe_err_report_entry));
763 if (!test_bit(BNX2FC_FLAG_SRR_SENT,
764 &io_req->req_flags)) {
765 spin_unlock_bh(&tgt->tgt_lock);
766 rc = bnx2fc_send_rec(io_req);
767 spin_lock_bh(&tgt->tgt_lock);
768
769 if (rc)
770 goto skip_rec;
771 } else
772 printk(KERN_ERR PFX "SRR in progress\n");
773 goto ret_err_rqe;
774 break;
775 default:
776 break;
777 }
778
779skip_rec:
780 set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
781 /*
782 * Cancel the timeout_work, as we received IO
783 * completion with FW error.
784 */
785 if (cancel_delayed_work(&io_req->timeout_work))
786 kref_put(&io_req->refcount, bnx2fc_cmd_release);
787
788 rc = bnx2fc_initiate_abts(io_req);
789 if (rc != SUCCESS) {
790 printk(KERN_ERR PFX "err_warn: initiate_abts "
791 "failed xid = 0x%x. issue cleanup\n",
792 io_req->xid);
793 bnx2fc_initiate_cleanup(io_req);
794 }
795ret_err_rqe:
796 bnx2fc_return_rqe(tgt, 1);
738 spin_unlock_bh(&tgt->tgt_lock); 797 spin_unlock_bh(&tgt->tgt_lock);
739 break; 798 break;
740 799
@@ -755,6 +814,47 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
755 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", 814 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
756 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 815 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
757 816
817 if (xid > BNX2FC_MAX_XID) {
818 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
819 goto ret_warn_rqe;
820 }
821
822 err_warn_bit_map = (u64)
823 ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
824 (u64)err_entry->data.err_warn_bitmap_lo;
825 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
826 if (err_warn_bit_map & (u64) (1 << i)) {
827 err_warn = i;
828 break;
829 }
830 }
831 BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
832
833 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
834 index = xid % BNX2FC_TASKS_PER_PAGE;
835 task_page = (struct fcoe_task_ctx_entry *)
836 interface->hba->task_ctx[task_idx];
837 task = &(task_page[index]);
838 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
839 if (!io_req)
840 goto ret_warn_rqe;
841
842 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
843 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
844 goto ret_warn_rqe;
845 }
846
847 memset(&io_req->err_entry, 0,
848 sizeof(struct fcoe_err_report_entry));
849 memcpy(&io_req->err_entry, err_entry,
850 sizeof(struct fcoe_err_report_entry));
851
852 if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
853 /* REC_TOV is not a warning code */
854 BUG_ON(1);
855 else
856 BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
857ret_warn_rqe:
758 bnx2fc_return_rqe(tgt, 1); 858 bnx2fc_return_rqe(tgt, 1);
759 spin_unlock_bh(&tgt->tgt_lock); 859 spin_unlock_bh(&tgt->tgt_lock);
760 break; 860 break;
@@ -770,7 +870,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
770 struct fcoe_task_ctx_entry *task; 870 struct fcoe_task_ctx_entry *task;
771 struct fcoe_task_ctx_entry *task_page; 871 struct fcoe_task_ctx_entry *task_page;
772 struct fcoe_port *port = tgt->port; 872 struct fcoe_port *port = tgt->port;
773 struct bnx2fc_hba *hba = port->priv; 873 struct bnx2fc_interface *interface = port->priv;
874 struct bnx2fc_hba *hba = interface->hba;
774 struct bnx2fc_cmd *io_req; 875 struct bnx2fc_cmd *io_req;
775 int task_idx, index; 876 int task_idx, index;
776 u16 xid; 877 u16 xid;
@@ -781,7 +882,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
781 spin_lock_bh(&tgt->tgt_lock); 882 spin_lock_bh(&tgt->tgt_lock);
782 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; 883 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
783 if (xid >= BNX2FC_MAX_TASKS) { 884 if (xid >= BNX2FC_MAX_TASKS) {
784 printk(KERN_ALERT PFX "ERROR:xid out of range\n"); 885 printk(KERN_ERR PFX "ERROR:xid out of range\n");
785 spin_unlock_bh(&tgt->tgt_lock); 886 spin_unlock_bh(&tgt->tgt_lock);
786 return; 887 return;
787 } 888 }
@@ -861,6 +962,13 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
861 kref_put(&io_req->refcount, bnx2fc_cmd_release); 962 kref_put(&io_req->refcount, bnx2fc_cmd_release);
862 break; 963 break;
863 964
965 case BNX2FC_SEQ_CLEANUP:
966 BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
967 io_req->xid);
968 bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
969 kref_put(&io_req->refcount, bnx2fc_cmd_release);
970 break;
971
864 default: 972 default:
865 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); 973 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
866 break; 974 break;
@@ -962,8 +1070,10 @@ unlock:
962 1 - tgt->cq_curr_toggle_bit; 1070 1 - tgt->cq_curr_toggle_bit;
963 } 1071 }
964 } 1072 }
965 bnx2fc_arm_cq(tgt); 1073 if (num_free_sqes) {
966 atomic_add(num_free_sqes, &tgt->free_sqes); 1074 bnx2fc_arm_cq(tgt);
1075 atomic_add(num_free_sqes, &tgt->free_sqes);
1076 }
967 spin_unlock_bh(&tgt->cq_lock); 1077 spin_unlock_bh(&tgt->cq_lock);
968 return 0; 1078 return 0;
969} 1079}
@@ -983,7 +1093,7 @@ static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
983 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; 1093 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
984 1094
985 if (!tgt) { 1095 if (!tgt) {
986 printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id); 1096 printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
987 return; 1097 return;
988 } 1098 }
989 1099
@@ -1004,6 +1114,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1004{ 1114{
1005 struct bnx2fc_rport *tgt; 1115 struct bnx2fc_rport *tgt;
1006 struct fcoe_port *port; 1116 struct fcoe_port *port;
1117 struct bnx2fc_interface *interface;
1007 u32 conn_id; 1118 u32 conn_id;
1008 u32 context_id; 1119 u32 context_id;
1009 int rc; 1120 int rc;
@@ -1018,8 +1129,9 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1018 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", 1129 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1019 ofld_kcqe->fcoe_conn_context_id); 1130 ofld_kcqe->fcoe_conn_context_id);
1020 port = tgt->port; 1131 port = tgt->port;
1021 if (hba != tgt->port->priv) { 1132 interface = tgt->port->priv;
1022 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n"); 1133 if (hba != interface->hba) {
1134 printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1023 goto ofld_cmpl_err; 1135 goto ofld_cmpl_err;
1024 } 1136 }
1025 /* 1137 /*
@@ -1040,7 +1152,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1040 /* now enable the session */ 1152 /* now enable the session */
1041 rc = bnx2fc_send_session_enable_req(port, tgt); 1153 rc = bnx2fc_send_session_enable_req(port, tgt);
1042 if (rc) { 1154 if (rc) {
1043 printk(KERN_ALERT PFX "enable session failed\n"); 1155 printk(KERN_ERR PFX "enable session failed\n");
1044 goto ofld_cmpl_err; 1156 goto ofld_cmpl_err;
1045 } 1157 }
1046 } 1158 }
@@ -1063,6 +1175,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1063 struct fcoe_kcqe *ofld_kcqe) 1175 struct fcoe_kcqe *ofld_kcqe)
1064{ 1176{
1065 struct bnx2fc_rport *tgt; 1177 struct bnx2fc_rport *tgt;
1178 struct bnx2fc_interface *interface;
1066 u32 conn_id; 1179 u32 conn_id;
1067 u32 context_id; 1180 u32 context_id;
1068 1181
@@ -1070,7 +1183,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1070 conn_id = ofld_kcqe->fcoe_conn_id; 1183 conn_id = ofld_kcqe->fcoe_conn_id;
1071 tgt = hba->tgt_ofld_list[conn_id]; 1184 tgt = hba->tgt_ofld_list[conn_id];
1072 if (!tgt) { 1185 if (!tgt) {
1073 printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n"); 1186 printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1074 return; 1187 return;
1075 } 1188 }
1076 1189
@@ -1082,16 +1195,17 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1082 * and enable 1195 * and enable
1083 */ 1196 */
1084 if (tgt->context_id != context_id) { 1197 if (tgt->context_id != context_id) {
1085 printk(KERN_ALERT PFX "context id mis-match\n"); 1198 printk(KERN_ERR PFX "context id mis-match\n");
1086 return; 1199 return;
1087 } 1200 }
1088 if (hba != tgt->port->priv) { 1201 interface = tgt->port->priv;
1089 printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); 1202 if (hba != interface->hba) {
1203 printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1090 goto enbl_cmpl_err; 1204 goto enbl_cmpl_err;
1091 } 1205 }
1092 if (ofld_kcqe->completion_status) { 1206 if (ofld_kcqe->completion_status)
1093 goto enbl_cmpl_err; 1207 goto enbl_cmpl_err;
1094 } else { 1208 else {
1095 /* enable successful - rport ready for issuing IOs */ 1209 /* enable successful - rport ready for issuing IOs */
1096 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 1210 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1097 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 1211 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
@@ -1114,14 +1228,14 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1114 conn_id = disable_kcqe->fcoe_conn_id; 1228 conn_id = disable_kcqe->fcoe_conn_id;
1115 tgt = hba->tgt_ofld_list[conn_id]; 1229 tgt = hba->tgt_ofld_list[conn_id];
1116 if (!tgt) { 1230 if (!tgt) {
1117 printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n"); 1231 printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
1118 return; 1232 return;
1119 } 1233 }
1120 1234
1121 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); 1235 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1122 1236
1123 if (disable_kcqe->completion_status) { 1237 if (disable_kcqe->completion_status) {
1124 printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n", 1238 printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1125 disable_kcqe->completion_status); 1239 disable_kcqe->completion_status);
1126 return; 1240 return;
1127 } else { 1241 } else {
@@ -1143,14 +1257,14 @@ static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1143 conn_id = destroy_kcqe->fcoe_conn_id; 1257 conn_id = destroy_kcqe->fcoe_conn_id;
1144 tgt = hba->tgt_ofld_list[conn_id]; 1258 tgt = hba->tgt_ofld_list[conn_id];
1145 if (!tgt) { 1259 if (!tgt) {
1146 printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n"); 1260 printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
1147 return; 1261 return;
1148 } 1262 }
1149 1263
1150 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); 1264 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1151 1265
1152 if (destroy_kcqe->completion_status) { 1266 if (destroy_kcqe->completion_status) {
1153 printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n", 1267 printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
1154 destroy_kcqe->completion_status); 1268 destroy_kcqe->completion_status);
1155 return; 1269 return;
1156 } else { 1270 } else {
@@ -1182,6 +1296,7 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1182 break; 1296 break;
1183 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: 1297 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1184 printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); 1298 printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1299 break;
1185 default: 1300 default:
1186 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); 1301 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1187 } 1302 }
@@ -1240,7 +1355,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1240 } else { 1355 } else {
1241 printk(KERN_ERR PFX "DESTROY success\n"); 1356 printk(KERN_ERR PFX "DESTROY success\n");
1242 } 1357 }
1243 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; 1358 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1244 wake_up_interruptible(&hba->destroy_wait); 1359 wake_up_interruptible(&hba->destroy_wait);
1245 break; 1360 break;
1246 1361
@@ -1262,7 +1377,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1262 case FCOE_KCQE_OPCODE_FCOE_ERROR: 1377 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1263 /* fall thru */ 1378 /* fall thru */
1264 default: 1379 default:
1265 printk(KERN_ALERT PFX "unknown opcode 0x%x\n", 1380 printk(KERN_ERR PFX "unknown opcode 0x%x\n",
1266 kcqe->op_code); 1381 kcqe->op_code);
1267 } 1382 }
1268 } 1383 }
@@ -1305,7 +1420,8 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1305 struct fcoe_port *port = tgt->port; 1420 struct fcoe_port *port = tgt->port;
1306 u32 reg_off; 1421 u32 reg_off;
1307 resource_size_t reg_base; 1422 resource_size_t reg_base;
1308 struct bnx2fc_hba *hba = port->priv; 1423 struct bnx2fc_interface *interface = port->priv;
1424 struct bnx2fc_hba *hba = interface->hba;
1309 1425
1310 reg_base = pci_resource_start(hba->pcidev, 1426 reg_base = pci_resource_start(hba->pcidev,
1311 BNX2X_DOORBELL_PCI_BAR); 1427 BNX2X_DOORBELL_PCI_BAR);
@@ -1344,6 +1460,96 @@ void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1344 tgt->conn_db->rq_prod = tgt->rq_prod_idx; 1460 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1345} 1461}
1346 1462
1463void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1464 struct fcoe_task_ctx_entry *task,
1465 struct bnx2fc_cmd *orig_io_req,
1466 u32 offset)
1467{
1468 struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1469 struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1470 struct bnx2fc_interface *interface = tgt->port->priv;
1471 struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1472 struct fcoe_task_ctx_entry *orig_task;
1473 struct fcoe_task_ctx_entry *task_page;
1474 struct fcoe_ext_mul_sges_ctx *sgl;
1475 u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1476 u8 orig_task_type;
1477 u16 orig_xid = orig_io_req->xid;
1478 u32 context_id = tgt->context_id;
1479 u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1480 u32 orig_offset = offset;
1481 int bd_count;
1482 int orig_task_idx, index;
1483 int i;
1484
1485 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1486
1487 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1488 orig_task_type = FCOE_TASK_TYPE_WRITE;
1489 else
1490 orig_task_type = FCOE_TASK_TYPE_READ;
1491
1492 /* Tx flags */
1493 task->txwr_rxrd.const_ctx.tx_flags =
1494 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1495 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1496 /* init flags */
1497 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1498 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1499 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1500 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1501 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1502 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1503 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1504 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1505
1506 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1507
1508 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1509 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1510
1511 bd_count = orig_io_req->bd_tbl->bd_valid;
1512
1513 /* obtain the appropriate bd entry from relative offset */
1514 for (i = 0; i < bd_count; i++) {
1515 if (offset < bd[i].buf_len)
1516 break;
1517 offset -= bd[i].buf_len;
1518 }
1519 phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1520
1521 if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1522 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1523 (u32)phys_addr;
1524 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1525 (u32)((u64)phys_addr >> 32);
1526 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1527 bd_count;
1528 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1529 offset; /* adjusted offset */
1530 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1531 } else {
1532 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1533 index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1534
1535 task_page = (struct fcoe_task_ctx_entry *)
1536 interface->hba->task_ctx[orig_task_idx];
1537 orig_task = &(task_page[index]);
1538
1539 /* Multiple SGEs were used for this IO */
1540 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1541 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1542 sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1543 sgl->mul_sgl.sgl_size = bd_count;
1544 sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1545 sgl->mul_sgl.cur_sge_idx = i;
1546
1547 memset(&task->rxwr_only.rx_seq_ctx, 0,
1548 sizeof(struct fcoe_rx_seq_ctx));
1549 task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1550 task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1551 }
1552}
1347void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 1553void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1348 struct fcoe_task_ctx_entry *task, 1554 struct fcoe_task_ctx_entry *task,
1349 u16 orig_xid) 1555 u16 orig_xid)
@@ -1360,7 +1566,12 @@ void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1360 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1566 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1361 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1567 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1362 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; 1568 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1363 task->txwr_rxrd.const_ctx.init_flags |= 1569 if (tgt->dev_type == TYPE_TAPE)
1570 task->txwr_rxrd.const_ctx.init_flags |=
1571 FCOE_TASK_DEV_TYPE_TAPE <<
1572 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1573 else
1574 task->txwr_rxrd.const_ctx.init_flags |=
1364 FCOE_TASK_DEV_TYPE_DISK << 1575 FCOE_TASK_DEV_TYPE_DISK <<
1365 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1576 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1366 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; 1577 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
@@ -1420,7 +1631,12 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1420 /* init flags */ 1631 /* init flags */
1421 task->txwr_rxrd.const_ctx.init_flags = task_type << 1632 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1422 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1633 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1423 task->txwr_rxrd.const_ctx.init_flags |= 1634 if (tgt->dev_type == TYPE_TAPE)
1635 task->txwr_rxrd.const_ctx.init_flags |=
1636 FCOE_TASK_DEV_TYPE_TAPE <<
1637 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1638 else
1639 task->txwr_rxrd.const_ctx.init_flags |=
1424 FCOE_TASK_DEV_TYPE_DISK << 1640 FCOE_TASK_DEV_TYPE_DISK <<
1425 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1641 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1426 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1642 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
@@ -1477,6 +1693,7 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1477 struct bnx2fc_rport *tgt = io_req->tgt; 1693 struct bnx2fc_rport *tgt = io_req->tgt;
1478 struct fcoe_cached_sge_ctx *cached_sge; 1694 struct fcoe_cached_sge_ctx *cached_sge;
1479 struct fcoe_ext_mul_sges_ctx *sgl; 1695 struct fcoe_ext_mul_sges_ctx *sgl;
1696 int dev_type = tgt->dev_type;
1480 u64 *fcp_cmnd; 1697 u64 *fcp_cmnd;
1481 u64 tmp_fcp_cmnd[4]; 1698 u64 tmp_fcp_cmnd[4];
1482 u32 context_id; 1699 u32 context_id;
@@ -1494,20 +1711,40 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1494 task_type = FCOE_TASK_TYPE_READ; 1711 task_type = FCOE_TASK_TYPE_READ;
1495 1712
1496 /* Tx only */ 1713 /* Tx only */
1714 bd_count = bd_tbl->bd_valid;
1497 if (task_type == FCOE_TASK_TYPE_WRITE) { 1715 if (task_type == FCOE_TASK_TYPE_WRITE) {
1498 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = 1716 if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
1499 (u32)bd_tbl->bd_tbl_dma; 1717 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1500 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = 1718
1501 (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1719 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1502 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1720 fcoe_bd_tbl->buf_addr_lo;
1503 bd_tbl->bd_valid; 1721 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1722 fcoe_bd_tbl->buf_addr_hi;
1723 task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1724 fcoe_bd_tbl->buf_len;
1725
1726 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1727 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1728 } else {
1729 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1730 (u32)bd_tbl->bd_tbl_dma;
1731 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1732 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1733 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1734 bd_tbl->bd_valid;
1735 }
1504 } 1736 }
1505 1737
1506 /*Tx Write Rx Read */ 1738 /*Tx Write Rx Read */
1507 /* Init state to NORMAL */ 1739 /* Init state to NORMAL */
1508 task->txwr_rxrd.const_ctx.init_flags = task_type << 1740 task->txwr_rxrd.const_ctx.init_flags |= task_type <<
1509 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1741 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1510 task->txwr_rxrd.const_ctx.init_flags |= 1742 if (dev_type == TYPE_TAPE)
1743 task->txwr_rxrd.const_ctx.init_flags |=
1744 FCOE_TASK_DEV_TYPE_TAPE <<
1745 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1746 else
1747 task->txwr_rxrd.const_ctx.init_flags |=
1511 FCOE_TASK_DEV_TYPE_DISK << 1748 FCOE_TASK_DEV_TYPE_DISK <<
1512 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1749 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1513 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1750 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
@@ -1550,7 +1787,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1550 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; 1787 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1551 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; 1788 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1552 bd_count = bd_tbl->bd_valid; 1789 bd_count = bd_tbl->bd_valid;
1553 if (task_type == FCOE_TASK_TYPE_READ) { 1790 if (task_type == FCOE_TASK_TYPE_READ &&
1791 dev_type == TYPE_DISK) {
1554 if (bd_count == 1) { 1792 if (bd_count == 1) {
1555 1793
1556 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; 1794 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
@@ -1582,6 +1820,11 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1582 (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1820 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1583 sgl->mul_sgl.sgl_size = bd_count; 1821 sgl->mul_sgl.sgl_size = bd_count;
1584 } 1822 }
1823 } else {
1824 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1825 sgl->mul_sgl.cur_sge_addr.hi =
1826 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1827 sgl->mul_sgl.sgl_size = bd_count;
1585 } 1828 }
1586} 1829}
1587 1830
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 45eba6d609c9..6cc3789075bc 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1,7 +1,7 @@
1/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. 1/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * IO manager and SCSI IO processing. 2 * IO manager and SCSI IO processing.
3 * 3 *
4 * Copyright (c) 2008 - 2010 Broadcom Corporation 4 * Copyright (c) 2008 - 2011 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -18,8 +18,6 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
18 int bd_index); 18 int bd_index);
19static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); 19static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
20static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); 20static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
21static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
22 struct bnx2fc_cmd *io_req);
23static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); 21static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
24static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 22static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
25static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 23static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
@@ -29,10 +27,11 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
29void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, 27void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
30 unsigned int timer_msec) 28 unsigned int timer_msec)
31{ 29{
32 struct bnx2fc_hba *hba = io_req->port->priv; 30 struct bnx2fc_interface *interface = io_req->port->priv;
33 31
34 if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work, 32 if (queue_delayed_work(interface->timer_work_queue,
35 msecs_to_jiffies(timer_msec))) 33 &io_req->timeout_work,
34 msecs_to_jiffies(timer_msec)))
36 kref_get(&io_req->refcount); 35 kref_get(&io_req->refcount);
37} 36}
38 37
@@ -217,6 +216,11 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
217 return; 216 return;
218 217
219 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); 218 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
219 if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
220 /* Do not call scsi done for this IO */
221 return;
222 }
223
220 bnx2fc_unmap_sg_list(io_req); 224 bnx2fc_unmap_sg_list(io_req);
221 io_req->sc_cmd = NULL; 225 io_req->sc_cmd = NULL;
222 if (!sc_cmd) { 226 if (!sc_cmd) {
@@ -419,8 +423,8 @@ free_cmgr:
419struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) 423struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
420{ 424{
421 struct fcoe_port *port = tgt->port; 425 struct fcoe_port *port = tgt->port;
422 struct bnx2fc_hba *hba = port->priv; 426 struct bnx2fc_interface *interface = port->priv;
423 struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; 427 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
424 struct bnx2fc_cmd *io_req; 428 struct bnx2fc_cmd *io_req;
425 struct list_head *listp; 429 struct list_head *listp;
426 struct io_bdt *bd_tbl; 430 struct io_bdt *bd_tbl;
@@ -485,11 +489,12 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
485 kref_init(&io_req->refcount); 489 kref_init(&io_req->refcount);
486 return io_req; 490 return io_req;
487} 491}
488static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) 492
493struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
489{ 494{
490 struct fcoe_port *port = tgt->port; 495 struct fcoe_port *port = tgt->port;
491 struct bnx2fc_hba *hba = port->priv; 496 struct bnx2fc_interface *interface = port->priv;
492 struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; 497 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
493 struct bnx2fc_cmd *io_req; 498 struct bnx2fc_cmd *io_req;
494 struct list_head *listp; 499 struct list_head *listp;
495 struct io_bdt *bd_tbl; 500 struct io_bdt *bd_tbl;
@@ -570,7 +575,8 @@ void bnx2fc_cmd_release(struct kref *ref)
570static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) 575static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
571{ 576{
572 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 577 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
573 struct bnx2fc_hba *hba = io_req->port->priv; 578 struct bnx2fc_interface *interface = io_req->port->priv;
579 struct bnx2fc_hba *hba = interface->hba;
574 size_t sz = sizeof(struct fcoe_bd_ctx); 580 size_t sz = sizeof(struct fcoe_bd_ctx);
575 581
576 /* clear tm flags */ 582 /* clear tm flags */
@@ -606,7 +612,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
606 struct bnx2fc_mp_req *mp_req; 612 struct bnx2fc_mp_req *mp_req;
607 struct fcoe_bd_ctx *mp_req_bd; 613 struct fcoe_bd_ctx *mp_req_bd;
608 struct fcoe_bd_ctx *mp_resp_bd; 614 struct fcoe_bd_ctx *mp_resp_bd;
609 struct bnx2fc_hba *hba = io_req->port->priv; 615 struct bnx2fc_interface *interface = io_req->port->priv;
616 struct bnx2fc_hba *hba = interface->hba;
610 dma_addr_t addr; 617 dma_addr_t addr;
611 size_t sz; 618 size_t sz;
612 619
@@ -682,7 +689,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
682 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 689 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
683 struct fc_rport_libfc_priv *rp = rport->dd_data; 690 struct fc_rport_libfc_priv *rp = rport->dd_data;
684 struct fcoe_port *port; 691 struct fcoe_port *port;
685 struct bnx2fc_hba *hba; 692 struct bnx2fc_interface *interface;
686 struct bnx2fc_rport *tgt; 693 struct bnx2fc_rport *tgt;
687 struct bnx2fc_cmd *io_req; 694 struct bnx2fc_cmd *io_req;
688 struct bnx2fc_mp_req *tm_req; 695 struct bnx2fc_mp_req *tm_req;
@@ -699,10 +706,10 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
699 706
700 lport = shost_priv(host); 707 lport = shost_priv(host);
701 port = lport_priv(lport); 708 port = lport_priv(lport);
702 hba = port->priv; 709 interface = port->priv;
703 710
704 if (rport == NULL) { 711 if (rport == NULL) {
705 printk(KERN_ALERT PFX "device_reset: rport is NULL\n"); 712 printk(KERN_ERR PFX "device_reset: rport is NULL\n");
706 rc = FAILED; 713 rc = FAILED;
707 goto tmf_err; 714 goto tmf_err;
708 } 715 }
@@ -745,7 +752,9 @@ retry_tmf:
745 rc = bnx2fc_init_mp_req(io_req); 752 rc = bnx2fc_init_mp_req(io_req);
746 if (rc == FAILED) { 753 if (rc == FAILED) {
747 printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); 754 printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
755 spin_lock_bh(&tgt->tgt_lock);
748 kref_put(&io_req->refcount, bnx2fc_cmd_release); 756 kref_put(&io_req->refcount, bnx2fc_cmd_release);
757 spin_unlock_bh(&tgt->tgt_lock);
749 goto tmf_err; 758 goto tmf_err;
750 } 759 }
751 760
@@ -774,7 +783,8 @@ retry_tmf:
774 index = xid % BNX2FC_TASKS_PER_PAGE; 783 index = xid % BNX2FC_TASKS_PER_PAGE;
775 784
776 /* Initialize task context for this IO request */ 785 /* Initialize task context for this IO request */
777 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 786 task_page = (struct fcoe_task_ctx_entry *)
787 interface->hba->task_ctx[task_idx];
778 task = &(task_page[index]); 788 task = &(task_page[index]);
779 bnx2fc_init_mp_task(io_req, task); 789 bnx2fc_init_mp_task(io_req, task);
780 790
@@ -806,10 +816,10 @@ retry_tmf:
806 spin_unlock_bh(&tgt->tgt_lock); 816 spin_unlock_bh(&tgt->tgt_lock);
807 817
808 if (!rc) { 818 if (!rc) {
809 printk(KERN_ERR PFX "task mgmt command failed...\n"); 819 BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
810 rc = FAILED; 820 rc = FAILED;
811 } else { 821 } else {
812 printk(KERN_ERR PFX "task mgmt command success...\n"); 822 BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
813 rc = SUCCESS; 823 rc = SUCCESS;
814 } 824 }
815tmf_err: 825tmf_err:
@@ -822,7 +832,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
822 struct bnx2fc_rport *tgt = io_req->tgt; 832 struct bnx2fc_rport *tgt = io_req->tgt;
823 struct fc_rport *rport = tgt->rport; 833 struct fc_rport *rport = tgt->rport;
824 struct fc_rport_priv *rdata = tgt->rdata; 834 struct fc_rport_priv *rdata = tgt->rdata;
825 struct bnx2fc_hba *hba; 835 struct bnx2fc_interface *interface;
826 struct fcoe_port *port; 836 struct fcoe_port *port;
827 struct bnx2fc_cmd *abts_io_req; 837 struct bnx2fc_cmd *abts_io_req;
828 struct fcoe_task_ctx_entry *task; 838 struct fcoe_task_ctx_entry *task;
@@ -839,7 +849,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
839 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); 849 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
840 850
841 port = io_req->port; 851 port = io_req->port;
842 hba = port->priv; 852 interface = port->priv;
843 lport = port->lport; 853 lport = port->lport;
844 854
845 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 855 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
@@ -849,7 +859,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
849 } 859 }
850 860
851 if (rport == NULL) { 861 if (rport == NULL) {
852 printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n"); 862 printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
853 rc = FAILED; 863 rc = FAILED;
854 goto abts_err; 864 goto abts_err;
855 } 865 }
@@ -896,7 +906,8 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
896 index = xid % BNX2FC_TASKS_PER_PAGE; 906 index = xid % BNX2FC_TASKS_PER_PAGE;
897 907
898 /* Initialize task context for this IO request */ 908 /* Initialize task context for this IO request */
899 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 909 task_page = (struct fcoe_task_ctx_entry *)
910 interface->hba->task_ctx[task_idx];
900 task = &(task_page[index]); 911 task = &(task_page[index]);
901 bnx2fc_init_mp_task(abts_io_req, task); 912 bnx2fc_init_mp_task(abts_io_req, task);
902 913
@@ -924,11 +935,81 @@ abts_err:
924 return rc; 935 return rc;
925} 936}
926 937
938int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
939 enum fc_rctl r_ctl)
940{
941 struct fc_lport *lport;
942 struct bnx2fc_rport *tgt = orig_io_req->tgt;
943 struct bnx2fc_interface *interface;
944 struct fcoe_port *port;
945 struct bnx2fc_cmd *seq_clnp_req;
946 struct fcoe_task_ctx_entry *task;
947 struct fcoe_task_ctx_entry *task_page;
948 struct bnx2fc_els_cb_arg *cb_arg = NULL;
949 int task_idx, index;
950 u16 xid;
951 int rc = 0;
952
953 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
954 orig_io_req->xid);
955 kref_get(&orig_io_req->refcount);
956
957 port = orig_io_req->port;
958 interface = port->priv;
959 lport = port->lport;
960
961 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
962 if (!cb_arg) {
963 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
964 rc = -ENOMEM;
965 goto cleanup_err;
966 }
967
968 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
969 if (!seq_clnp_req) {
970 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
971 rc = -ENOMEM;
972 kfree(cb_arg);
973 goto cleanup_err;
974 }
975 /* Initialize rest of io_req fields */
976 seq_clnp_req->sc_cmd = NULL;
977 seq_clnp_req->port = port;
978 seq_clnp_req->tgt = tgt;
979 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
980
981 xid = seq_clnp_req->xid;
982
983 task_idx = xid/BNX2FC_TASKS_PER_PAGE;
984 index = xid % BNX2FC_TASKS_PER_PAGE;
985
986 /* Initialize task context for this IO request */
987 task_page = (struct fcoe_task_ctx_entry *)
988 interface->hba->task_ctx[task_idx];
989 task = &(task_page[index]);
990 cb_arg->aborted_io_req = orig_io_req;
991 cb_arg->io_req = seq_clnp_req;
992 cb_arg->r_ctl = r_ctl;
993 cb_arg->offset = offset;
994 seq_clnp_req->cb_arg = cb_arg;
995
996 printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
997 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
998
999 /* Obtain free SQ entry */
1000 bnx2fc_add_2_sq(tgt, xid);
1001
1002 /* Ring doorbell */
1003 bnx2fc_ring_doorbell(tgt);
1004cleanup_err:
1005 return rc;
1006}
1007
927int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) 1008int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
928{ 1009{
929 struct fc_lport *lport; 1010 struct fc_lport *lport;
930 struct bnx2fc_rport *tgt = io_req->tgt; 1011 struct bnx2fc_rport *tgt = io_req->tgt;
931 struct bnx2fc_hba *hba; 1012 struct bnx2fc_interface *interface;
932 struct fcoe_port *port; 1013 struct fcoe_port *port;
933 struct bnx2fc_cmd *cleanup_io_req; 1014 struct bnx2fc_cmd *cleanup_io_req;
934 struct fcoe_task_ctx_entry *task; 1015 struct fcoe_task_ctx_entry *task;
@@ -941,7 +1022,7 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
941 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); 1022 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
942 1023
943 port = io_req->port; 1024 port = io_req->port;
944 hba = port->priv; 1025 interface = port->priv;
945 lport = port->lport; 1026 lport = port->lport;
946 1027
947 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); 1028 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
@@ -963,7 +1044,8 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
963 index = xid % BNX2FC_TASKS_PER_PAGE; 1044 index = xid % BNX2FC_TASKS_PER_PAGE;
964 1045
965 /* Initialize task context for this IO request */ 1046 /* Initialize task context for this IO request */
966 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 1047 task_page = (struct fcoe_task_ctx_entry *)
1048 interface->hba->task_ctx[task_idx];
967 task = &(task_page[index]); 1049 task = &(task_page[index]);
968 orig_xid = io_req->xid; 1050 orig_xid = io_req->xid;
969 1051
@@ -1031,7 +1113,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1031 1113
1032 lport = shost_priv(sc_cmd->device->host); 1114 lport = shost_priv(sc_cmd->device->host);
1033 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1115 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1034 printk(KERN_ALERT PFX "eh_abort: link not ready\n"); 1116 printk(KERN_ERR PFX "eh_abort: link not ready\n");
1035 return rc; 1117 return rc;
1036 } 1118 }
1037 1119
@@ -1062,7 +1144,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1062 * io_req is no longer in the active_q. 1144 * io_req is no longer in the active_q.
1063 */ 1145 */
1064 if (tgt->flush_in_prog) { 1146 if (tgt->flush_in_prog) {
1065 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1147 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1066 "flush in progress\n", io_req->xid); 1148 "flush in progress\n", io_req->xid);
1067 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1149 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1068 spin_unlock_bh(&tgt->tgt_lock); 1150 spin_unlock_bh(&tgt->tgt_lock);
@@ -1070,7 +1152,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1070 } 1152 }
1071 1153
1072 if (io_req->on_active_queue == 0) { 1154 if (io_req->on_active_queue == 0) {
1073 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1155 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1074 "not on active_q\n", io_req->xid); 1156 "not on active_q\n", io_req->xid);
1075 /* 1157 /*
1076 * This condition can happen only due to the FW bug, 1158 * This condition can happen only due to the FW bug,
@@ -1108,7 +1190,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1108 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); 1190 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1109 rc = bnx2fc_initiate_abts(io_req); 1191 rc = bnx2fc_initiate_abts(io_req);
1110 } else { 1192 } else {
1111 printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " 1193 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1112 "already in abts processing\n", io_req->xid); 1194 "already in abts processing\n", io_req->xid);
1113 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1195 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1114 spin_unlock_bh(&tgt->tgt_lock); 1196 spin_unlock_bh(&tgt->tgt_lock);
@@ -1149,6 +1231,42 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1149 return rc; 1231 return rc;
1150} 1232}
1151 1233
1234void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
1235 struct fcoe_task_ctx_entry *task,
1236 u8 rx_state)
1237{
1238 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
1239 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
1240 u32 offset = cb_arg->offset;
1241 enum fc_rctl r_ctl = cb_arg->r_ctl;
1242 int rc = 0;
1243 struct bnx2fc_rport *tgt = orig_io_req->tgt;
1244
1245 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
1246 "cmd_type = %d\n",
1247 seq_clnp_req->xid, seq_clnp_req->cmd_type);
1248
1249 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
1250 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
1251 seq_clnp_req->xid);
1252 goto free_cb_arg;
1253 }
1254 kref_get(&orig_io_req->refcount);
1255
1256 spin_unlock_bh(&tgt->tgt_lock);
1257 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
1258 spin_lock_bh(&tgt->tgt_lock);
1259
1260 if (rc)
1261 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
1262 " IO will abort\n");
1263 seq_clnp_req->cb_arg = NULL;
1264 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
1265free_cb_arg:
1266 kfree(cb_arg);
1267 return;
1268}
1269
1152void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, 1270void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
1153 struct fcoe_task_ctx_entry *task, 1271 struct fcoe_task_ctx_entry *task,
1154 u8 num_rq) 1272 u8 num_rq)
@@ -1378,7 +1496,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1378 fc_hdr->fh_r_ctl); 1496 fc_hdr->fh_r_ctl);
1379 } 1497 }
1380 if (!sc_cmd->SCp.ptr) { 1498 if (!sc_cmd->SCp.ptr) {
1381 printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n"); 1499 printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
1382 return; 1500 return;
1383 } 1501 }
1384 switch (io_req->fcp_status) { 1502 switch (io_req->fcp_status) {
@@ -1410,7 +1528,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1410 io_req->on_tmf_queue = 0; 1528 io_req->on_tmf_queue = 0;
1411 } else { 1529 } else {
1412 1530
1413 printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n"); 1531 printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
1414 return; 1532 return;
1415 } 1533 }
1416 1534
@@ -1597,7 +1715,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1597 1715
1598 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { 1716 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
1599 /* Invalid sense sense length. */ 1717 /* Invalid sense sense length. */
1600 printk(KERN_ALERT PFX "invalid sns length %d\n", 1718 printk(KERN_ERR PFX "invalid sns length %d\n",
1601 rq_buff_len); 1719 rq_buff_len);
1602 /* reset rq_buff_len */ 1720 /* reset rq_buff_len */
1603 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; 1721 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
@@ -1780,7 +1898,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1780 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1898 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1781 break; 1899 break;
1782 default: 1900 default:
1783 printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n", 1901 printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
1784 io_req->fcp_status); 1902 io_req->fcp_status);
1785 break; 1903 break;
1786 } 1904 }
@@ -1789,14 +1907,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1789 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1907 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1790} 1908}
1791 1909
1792static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, 1910int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1793 struct bnx2fc_cmd *io_req) 1911 struct bnx2fc_cmd *io_req)
1794{ 1912{
1795 struct fcoe_task_ctx_entry *task; 1913 struct fcoe_task_ctx_entry *task;
1796 struct fcoe_task_ctx_entry *task_page; 1914 struct fcoe_task_ctx_entry *task_page;
1797 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1915 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1798 struct fcoe_port *port = tgt->port; 1916 struct fcoe_port *port = tgt->port;
1799 struct bnx2fc_hba *hba = port->priv; 1917 struct bnx2fc_interface *interface = port->priv;
1918 struct bnx2fc_hba *hba = interface->hba;
1800 struct fc_lport *lport = port->lport; 1919 struct fc_lport *lport = port->lport;
1801 struct fcoe_dev_stats *stats; 1920 struct fcoe_dev_stats *stats;
1802 int task_idx, index; 1921 int task_idx, index;
@@ -1854,7 +1973,8 @@ static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1854 } 1973 }
1855 1974
1856 /* Time IO req */ 1975 /* Time IO req */
1857 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); 1976 if (tgt->io_timeout)
1977 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
1858 /* Obtain free SQ entry */ 1978 /* Obtain free SQ entry */
1859 bnx2fc_add_2_sq(tgt, xid); 1979 bnx2fc_add_2_sq(tgt, xid);
1860 1980
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 3e892bd66fbe..d5311b577cca 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -2,7 +2,7 @@
2 * Handles operations such as session offload/upload etc, and manages 2 * Handles operations such as session offload/upload etc, and manages
3 * session resources such as connection id and qp resources. 3 * session resources such as connection id and qp resources.
4 * 4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation 5 * Copyright (c) 2008 - 2011 Broadcom Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -65,7 +65,8 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
65{ 65{
66 struct fc_lport *lport = rdata->local_port; 66 struct fc_lport *lport = rdata->local_port;
67 struct fc_rport *rport = rdata->rport; 67 struct fc_rport *rport = rdata->rport;
68 struct bnx2fc_hba *hba = port->priv; 68 struct bnx2fc_interface *interface = port->priv;
69 struct bnx2fc_hba *hba = interface->hba;
69 int rval; 70 int rval;
70 int i = 0; 71 int i = 0;
71 72
@@ -237,7 +238,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
237static void bnx2fc_upload_session(struct fcoe_port *port, 238static void bnx2fc_upload_session(struct fcoe_port *port,
238 struct bnx2fc_rport *tgt) 239 struct bnx2fc_rport *tgt)
239{ 240{
240 struct bnx2fc_hba *hba = port->priv; 241 struct bnx2fc_interface *interface = port->priv;
242 struct bnx2fc_hba *hba = interface->hba;
241 243
242 BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n", 244 BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
243 tgt->num_active_ios.counter); 245 tgt->num_active_ios.counter);
@@ -316,7 +318,8 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
316{ 318{
317 319
318 struct fc_rport *rport = rdata->rport; 320 struct fc_rport *rport = rdata->rport;
319 struct bnx2fc_hba *hba = port->priv; 321 struct bnx2fc_interface *interface = port->priv;
322 struct bnx2fc_hba *hba = interface->hba;
320 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; 323 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
321 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; 324 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
322 325
@@ -350,6 +353,14 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
350 tgt->rq_cons_idx = 0; 353 tgt->rq_cons_idx = 0;
351 atomic_set(&tgt->num_active_ios, 0); 354 atomic_set(&tgt->num_active_ios, 0);
352 355
356 if (rdata->flags & FC_RP_FLAGS_RETRY) {
357 tgt->dev_type = TYPE_TAPE;
358 tgt->io_timeout = 0; /* use default ULP timeout */
359 } else {
360 tgt->dev_type = TYPE_DISK;
361 tgt->io_timeout = BNX2FC_IO_TIMEOUT;
362 }
363
353 /* initialize sq doorbell */ 364 /* initialize sq doorbell */
354 sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE; 365 sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
355 sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE << 366 sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
@@ -392,7 +403,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
392 enum fc_rport_event event) 403 enum fc_rport_event event)
393{ 404{
394 struct fcoe_port *port = lport_priv(lport); 405 struct fcoe_port *port = lport_priv(lport);
395 struct bnx2fc_hba *hba = port->priv; 406 struct bnx2fc_interface *interface = port->priv;
407 struct bnx2fc_hba *hba = interface->hba;
396 struct fc_rport *rport = rdata->rport; 408 struct fc_rport *rport = rdata->rport;
397 struct fc_rport_libfc_priv *rp; 409 struct fc_rport_libfc_priv *rp;
398 struct bnx2fc_rport *tgt; 410 struct bnx2fc_rport *tgt;
@@ -403,7 +415,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
403 switch (event) { 415 switch (event) {
404 case RPORT_EV_READY: 416 case RPORT_EV_READY:
405 if (!rport) { 417 if (!rport) {
406 printk(KERN_ALERT PFX "rport is NULL: ERROR!\n"); 418 printk(KERN_ERR PFX "rport is NULL: ERROR!\n");
407 break; 419 break;
408 } 420 }
409 421
@@ -415,7 +427,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
415 * We should not come here, as lport will 427 * We should not come here, as lport will
416 * take care of fabric login 428 * take care of fabric login
417 */ 429 */
418 printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n", 430 printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n",
419 rdata->ids.port_id); 431 rdata->ids.port_id);
420 break; 432 break;
421 } 433 }
@@ -483,7 +495,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
483 break; 495 break;
484 496
485 if (!rport) { 497 if (!rport) {
486 printk(KERN_ALERT PFX "%x - rport not created Yet!!\n", 498 printk(KERN_INFO PFX "%x - rport not created Yet!!\n",
487 port_id); 499 port_id);
488 break; 500 break;
489 } 501 }
@@ -537,7 +549,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
537struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, 549struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
538 u32 port_id) 550 u32 port_id)
539{ 551{
540 struct bnx2fc_hba *hba = port->priv; 552 struct bnx2fc_interface *interface = port->priv;
553 struct bnx2fc_hba *hba = interface->hba;
541 struct bnx2fc_rport *tgt; 554 struct bnx2fc_rport *tgt;
542 struct fc_rport_priv *rdata; 555 struct fc_rport_priv *rdata;
543 int i; 556 int i;
@@ -552,7 +565,7 @@ struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
552 "obtained\n"); 565 "obtained\n");
553 return tgt; 566 return tgt;
554 } else { 567 } else {
555 printk(KERN_ERR PFX "rport 0x%x " 568 BNX2FC_TGT_DBG(tgt, "rport 0x%x "
556 "is in DELETED state\n", 569 "is in DELETED state\n",
557 rdata->ids.port_id); 570 rdata->ids.port_id);
558 return NULL; 571 return NULL;
@@ -633,7 +646,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
633 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 646 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
634 &tgt->sq_dma, GFP_KERNEL); 647 &tgt->sq_dma, GFP_KERNEL);
635 if (!tgt->sq) { 648 if (!tgt->sq) {
636 printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n", 649 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
637 tgt->sq_mem_size); 650 tgt->sq_mem_size);
638 goto mem_alloc_failure; 651 goto mem_alloc_failure;
639 } 652 }
@@ -646,7 +659,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
646 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 659 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
647 &tgt->cq_dma, GFP_KERNEL); 660 &tgt->cq_dma, GFP_KERNEL);
648 if (!tgt->cq) { 661 if (!tgt->cq) {
649 printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n", 662 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
650 tgt->cq_mem_size); 663 tgt->cq_mem_size);
651 goto mem_alloc_failure; 664 goto mem_alloc_failure;
652 } 665 }
@@ -659,7 +672,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
659 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 672 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
660 &tgt->rq_dma, GFP_KERNEL); 673 &tgt->rq_dma, GFP_KERNEL);
661 if (!tgt->rq) { 674 if (!tgt->rq) {
662 printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n", 675 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
663 tgt->rq_mem_size); 676 tgt->rq_mem_size);
664 goto mem_alloc_failure; 677 goto mem_alloc_failure;
665 } 678 }
@@ -671,7 +684,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
671 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 684 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
672 &tgt->rq_pbl_dma, GFP_KERNEL); 685 &tgt->rq_pbl_dma, GFP_KERNEL);
673 if (!tgt->rq_pbl) { 686 if (!tgt->rq_pbl) {
674 printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n", 687 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
675 tgt->rq_pbl_size); 688 tgt->rq_pbl_size);
676 goto mem_alloc_failure; 689 goto mem_alloc_failure;
677 } 690 }
@@ -697,7 +710,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
697 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, 710 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
698 &tgt->xferq_dma, GFP_KERNEL); 711 &tgt->xferq_dma, GFP_KERNEL);
699 if (!tgt->xferq) { 712 if (!tgt->xferq) {
700 printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n", 713 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
701 tgt->xferq_mem_size); 714 tgt->xferq_mem_size);
702 goto mem_alloc_failure; 715 goto mem_alloc_failure;
703 } 716 }
@@ -711,7 +724,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
711 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, 724 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
712 &tgt->confq_dma, GFP_KERNEL); 725 &tgt->confq_dma, GFP_KERNEL);
713 if (!tgt->confq) { 726 if (!tgt->confq) {
714 printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n", 727 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
715 tgt->confq_mem_size); 728 tgt->confq_mem_size);
716 goto mem_alloc_failure; 729 goto mem_alloc_failure;
717 } 730 }
@@ -726,7 +739,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
726 tgt->confq_pbl_size, 739 tgt->confq_pbl_size,
727 &tgt->confq_pbl_dma, GFP_KERNEL); 740 &tgt->confq_pbl_dma, GFP_KERNEL);
728 if (!tgt->confq_pbl) { 741 if (!tgt->confq_pbl) {
729 printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n", 742 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
730 tgt->confq_pbl_size); 743 tgt->confq_pbl_size);
731 goto mem_alloc_failure; 744 goto mem_alloc_failure;
732 } 745 }
@@ -751,7 +764,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
751 tgt->conn_db_mem_size, 764 tgt->conn_db_mem_size,
752 &tgt->conn_db_dma, GFP_KERNEL); 765 &tgt->conn_db_dma, GFP_KERNEL);
753 if (!tgt->conn_db) { 766 if (!tgt->conn_db) {
754 printk(KERN_ALERT PFX "unable to allocate conn_db %d\n", 767 printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
755 tgt->conn_db_mem_size); 768 tgt->conn_db_mem_size);
756 goto mem_alloc_failure; 769 goto mem_alloc_failure;
757 } 770 }
@@ -767,7 +780,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
767 &tgt->lcq_dma, GFP_KERNEL); 780 &tgt->lcq_dma, GFP_KERNEL);
768 781
769 if (!tgt->lcq) { 782 if (!tgt->lcq) {
770 printk(KERN_ALERT PFX "unable to allocate lcq %d\n", 783 printk(KERN_ERR PFX "unable to allocate lcq %d\n",
771 tgt->lcq_mem_size); 784 tgt->lcq_mem_size);
772 goto mem_alloc_failure; 785 goto mem_alloc_failure;
773 } 786 }
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 2e7c136bb805..27c9d65d54a9 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -128,25 +128,7 @@ struct c4_inquiry {
128 u8 reserved[2]; 128 u8 reserved[2];
129}; 129};
130 130
131struct rdac_controller { 131#define UNIQUE_ID_LEN 16
132 u8 subsys_id[SUBSYS_ID_LEN];
133 u8 slot_id[SLOT_ID_LEN];
134 int use_ms10;
135 struct kref kref;
136 struct list_head node; /* list of all controllers */
137 union {
138 struct rdac_pg_legacy legacy;
139 struct rdac_pg_expanded expanded;
140 } mode_select;
141 u8 index;
142 u8 array_name[ARRAY_LABEL_LEN];
143 spinlock_t ms_lock;
144 int ms_queued;
145 struct work_struct ms_work;
146 struct scsi_device *ms_sdev;
147 struct list_head ms_head;
148};
149
150struct c8_inquiry { 132struct c8_inquiry {
151 u8 peripheral_info; 133 u8 peripheral_info;
152 u8 page_code; /* 0xC8 */ 134 u8 page_code; /* 0xC8 */
@@ -159,12 +141,31 @@ struct c8_inquiry {
159 u8 vol_user_label_len; 141 u8 vol_user_label_len;
160 u8 vol_user_label[60]; 142 u8 vol_user_label[60];
161 u8 array_uniq_id_len; 143 u8 array_uniq_id_len;
162 u8 array_unique_id[16]; 144 u8 array_unique_id[UNIQUE_ID_LEN];
163 u8 array_user_label_len; 145 u8 array_user_label_len;
164 u8 array_user_label[60]; 146 u8 array_user_label[60];
165 u8 lun[8]; 147 u8 lun[8];
166}; 148};
167 149
150struct rdac_controller {
151 u8 array_id[UNIQUE_ID_LEN];
152 int use_ms10;
153 struct kref kref;
154 struct list_head node; /* list of all controllers */
155 union {
156 struct rdac_pg_legacy legacy;
157 struct rdac_pg_expanded expanded;
158 } mode_select;
159 u8 index;
160 u8 array_name[ARRAY_LABEL_LEN];
161 struct Scsi_Host *host;
162 spinlock_t ms_lock;
163 int ms_queued;
164 struct work_struct ms_work;
165 struct scsi_device *ms_sdev;
166 struct list_head ms_head;
167};
168
168struct c2_inquiry { 169struct c2_inquiry {
169 u8 peripheral_info; 170 u8 peripheral_info;
170 u8 page_code; /* 0xC2 */ 171 u8 page_code; /* 0xC2 */
@@ -369,16 +370,17 @@ static void release_controller(struct kref *kref)
369 kfree(ctlr); 370 kfree(ctlr);
370} 371}
371 372
372static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id, 373static struct rdac_controller *get_controller(int index, char *array_name,
373 char *array_name) 374 u8 *array_id, struct scsi_device *sdev)
374{ 375{
375 struct rdac_controller *ctlr, *tmp; 376 struct rdac_controller *ctlr, *tmp;
376 377
377 spin_lock(&list_lock); 378 spin_lock(&list_lock);
378 379
379 list_for_each_entry(tmp, &ctlr_list, node) { 380 list_for_each_entry(tmp, &ctlr_list, node) {
380 if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) && 381 if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
381 (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) { 382 (tmp->index == index) &&
383 (tmp->host == sdev->host)) {
382 kref_get(&tmp->kref); 384 kref_get(&tmp->kref);
383 spin_unlock(&list_lock); 385 spin_unlock(&list_lock);
384 return tmp; 386 return tmp;
@@ -389,16 +391,11 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
389 goto done; 391 goto done;
390 392
391 /* initialize fields of controller */ 393 /* initialize fields of controller */
392 memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN); 394 memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
393 memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN); 395 ctlr->index = index;
396 ctlr->host = sdev->host;
394 memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN); 397 memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
395 398
396 /* update the controller index */
397 if (slot_id[1] == 0x31)
398 ctlr->index = 0;
399 else
400 ctlr->index = 1;
401
402 kref_init(&ctlr->kref); 399 kref_init(&ctlr->kref);
403 ctlr->use_ms10 = -1; 400 ctlr->use_ms10 = -1;
404 ctlr->ms_queued = 0; 401 ctlr->ms_queued = 0;
@@ -444,7 +441,7 @@ done:
444} 441}
445 442
446static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, 443static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
447 char *array_name) 444 char *array_name, u8 *array_id)
448{ 445{
449 int err, i; 446 int err, i;
450 struct c8_inquiry *inqp; 447 struct c8_inquiry *inqp;
@@ -463,6 +460,8 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
463 *(array_name+i) = inqp->array_user_label[(2*i)+1]; 460 *(array_name+i) = inqp->array_user_label[(2*i)+1];
464 461
465 *(array_name+ARRAY_LABEL_LEN-1) = '\0'; 462 *(array_name+ARRAY_LABEL_LEN-1) = '\0';
463 memset(array_id, 0, UNIQUE_ID_LEN);
464 memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
466 } 465 }
467 return err; 466 return err;
468} 467}
@@ -504,16 +503,20 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
504} 503}
505 504
506static int initialize_controller(struct scsi_device *sdev, 505static int initialize_controller(struct scsi_device *sdev,
507 struct rdac_dh_data *h, char *array_name) 506 struct rdac_dh_data *h, char *array_name, u8 *array_id)
508{ 507{
509 int err; 508 int err, index;
510 struct c4_inquiry *inqp; 509 struct c4_inquiry *inqp;
511 510
512 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h); 511 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
513 if (err == SCSI_DH_OK) { 512 if (err == SCSI_DH_OK) {
514 inqp = &h->inq.c4; 513 inqp = &h->inq.c4;
515 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id, 514 /* get the controller index */
516 array_name); 515 if (inqp->slot_id[1] == 0x31)
516 index = 0;
517 else
518 index = 1;
519 h->ctlr = get_controller(index, array_name, array_id, sdev);
517 if (!h->ctlr) 520 if (!h->ctlr)
518 err = SCSI_DH_RES_TEMP_UNAVAIL; 521 err = SCSI_DH_RES_TEMP_UNAVAIL;
519 } 522 }
@@ -835,6 +838,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
835 unsigned long flags; 838 unsigned long flags;
836 int err; 839 int err;
837 char array_name[ARRAY_LABEL_LEN]; 840 char array_name[ARRAY_LABEL_LEN];
841 char array_id[UNIQUE_ID_LEN];
838 842
839 scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) 843 scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
840 + sizeof(*h) , GFP_KERNEL); 844 + sizeof(*h) , GFP_KERNEL);
@@ -849,11 +853,11 @@ static int rdac_bus_attach(struct scsi_device *sdev)
849 h->lun = UNINITIALIZED_LUN; 853 h->lun = UNINITIALIZED_LUN;
850 h->state = RDAC_STATE_ACTIVE; 854 h->state = RDAC_STATE_ACTIVE;
851 855
852 err = get_lun_info(sdev, h, array_name); 856 err = get_lun_info(sdev, h, array_name, array_id);
853 if (err != SCSI_DH_OK) 857 if (err != SCSI_DH_OK)
854 goto failed; 858 goto failed;
855 859
856 err = initialize_controller(sdev, h, array_name); 860 err = initialize_controller(sdev, h, array_name, array_id);
857 if (err != SCSI_DH_OK) 861 if (err != SCSI_DH_OK)
858 goto failed; 862 goto failed;
859 863
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 204fa8d4b4ab..ba710e350ac5 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -487,6 +487,19 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
487} 487}
488 488
489/** 489/**
490 * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame
491 * @port: The FCoE port
492 * @skb: The FIP/FCoE packet to be sent
493 */
494static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
495{
496 if (port->fcoe_pending_queue.qlen)
497 fcoe_check_wait_queue(port->lport, skb);
498 else if (fcoe_start_io(skb))
499 fcoe_check_wait_queue(port->lport, skb);
500}
501
502/**
490 * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame 503 * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
491 * @fip: The FCoE controller 504 * @fip: The FCoE controller
492 * @skb: The FIP packet to be sent 505 * @skb: The FIP packet to be sent
@@ -494,7 +507,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
494static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 507static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
495{ 508{
496 skb->dev = fcoe_from_ctlr(fip)->netdev; 509 skb->dev = fcoe_from_ctlr(fip)->netdev;
497 dev_queue_xmit(skb); 510 fcoe_port_send(lport_priv(fip->lp), skb);
498} 511}
499 512
500/** 513/**
@@ -1257,30 +1270,20 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
1257/** 1270/**
1258 * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming 1271 * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
1259 * command. 1272 * command.
1260 * @curr_cpu: CPU which received request
1261 * 1273 *
1262 * This routine selects next CPU based on cpumask. 1274 * This routine selects next CPU based on cpumask to distribute
1275 * incoming requests in round robin.
1263 * 1276 *
1264 * Returns: int (CPU number). Caller to verify if returned CPU is online or not. 1277 * Returns: int CPU number
1265 */ 1278 */
1266static unsigned int fcoe_select_cpu(unsigned int curr_cpu) 1279static inline unsigned int fcoe_select_cpu(void)
1267{ 1280{
1268 static unsigned int selected_cpu; 1281 static unsigned int selected_cpu;
1269 1282
1270 if (num_online_cpus() == 1) 1283 selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
1271 return curr_cpu; 1284 if (selected_cpu >= nr_cpu_ids)
1272 /* 1285 selected_cpu = cpumask_first(cpu_online_mask);
1273 * Doing following check, to skip "curr_cpu (smp_processor_id)" 1286
1274 * from selection of CPU is intentional. This is to avoid same CPU
1275 * doing post-processing of command. "curr_cpu" to just receive
1276 * incoming request in case where rx_id is UNKNOWN and all other
1277 * CPU to actually process the command(s)
1278 */
1279 do {
1280 selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
1281 if (selected_cpu >= nr_cpu_ids)
1282 selected_cpu = cpumask_first(cpu_online_mask);
1283 } while (selected_cpu == curr_cpu);
1284 return selected_cpu; 1287 return selected_cpu;
1285} 1288}
1286 1289
@@ -1350,30 +1353,26 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1350 1353
1351 fr = fcoe_dev_from_skb(skb); 1354 fr = fcoe_dev_from_skb(skb);
1352 fr->fr_dev = lport; 1355 fr->fr_dev = lport;
1353 fr->ptype = ptype;
1354 1356
1355 /* 1357 /*
1356 * In case the incoming frame's exchange is originated from 1358 * In case the incoming frame's exchange is originated from
1357 * the initiator, then received frame's exchange id is ANDed 1359 * the initiator, then received frame's exchange id is ANDed
1358 * with fc_cpu_mask bits to get the same cpu on which exchange 1360 * with fc_cpu_mask bits to get the same cpu on which exchange
1359 * was originated, otherwise just use the current cpu. 1361 * was originated, otherwise select cpu using rx exchange id
1362 * or fcoe_select_cpu().
1360 */ 1363 */
1361 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX) 1364 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1362 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask; 1365 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1363 else { 1366 else {
1364 cpu = smp_processor_id(); 1367 if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)
1365 1368 cpu = fcoe_select_cpu();
1366 if ((fh->fh_type == FC_TYPE_FCP) && 1369 else
1367 (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
1368 do {
1369 cpu = fcoe_select_cpu(cpu);
1370 } while (!cpu_online(cpu));
1371 } else if ((fh->fh_type == FC_TYPE_FCP) &&
1372 (ntohs(fh->fh_rx_id) != FC_XID_UNKNOWN)) {
1373 cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask; 1370 cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
1374 } else
1375 cpu = smp_processor_id();
1376 } 1371 }
1372
1373 if (cpu >= nr_cpu_ids)
1374 goto err;
1375
1377 fps = &per_cpu(fcoe_percpu, cpu); 1376 fps = &per_cpu(fcoe_percpu, cpu);
1378 spin_lock_bh(&fps->fcoe_rx_list.lock); 1377 spin_lock_bh(&fps->fcoe_rx_list.lock);
1379 if (unlikely(!fps->thread)) { 1378 if (unlikely(!fps->thread)) {
@@ -1572,11 +1571,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1572 1571
1573 /* send down to lld */ 1572 /* send down to lld */
1574 fr_dev(fp) = lport; 1573 fr_dev(fp) = lport;
1575 if (port->fcoe_pending_queue.qlen) 1574 fcoe_port_send(port, skb);
1576 fcoe_check_wait_queue(lport, skb);
1577 else if (fcoe_start_io(skb))
1578 fcoe_check_wait_queue(lport, skb);
1579
1580 return 0; 1575 return 0;
1581} 1576}
1582 1577
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c6f99b1d2383..ec61bdb833ac 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1219,8 +1219,8 @@ static void complete_scsi_command(struct CommandList *cp)
1219 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); 1219 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1220 break; 1220 break;
1221 case CMD_UNSOLICITED_ABORT: 1221 case CMD_UNSOLICITED_ABORT:
1222 cmd->result = DID_RESET << 16; 1222 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1223 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " 1223 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1224 "abort\n", cp); 1224 "abort\n", cp);
1225 break; 1225 break;
1226 case CMD_TIMEOUT: 1226 case CMD_TIMEOUT:
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6d8dcd4dd06b..7f53ceaa7239 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -214,7 +214,7 @@ static void SA5_submit_command(struct ctlr_info *h,
214 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, 214 dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
215 c->Header.Tag.lower); 215 c->Header.Tag.lower);
216 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 216 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
217 (void) readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); 217 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
218 h->commands_outstanding++; 218 h->commands_outstanding++;
219 if (h->commands_outstanding > h->max_outstanding) 219 if (h->commands_outstanding > h->max_outstanding)
220 h->max_outstanding = h->commands_outstanding; 220 h->max_outstanding = h->commands_outstanding;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 888086c4e709..8d636301e32c 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -8778,14 +8778,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8778 if (rc != PCIBIOS_SUCCESSFUL) { 8778 if (rc != PCIBIOS_SUCCESSFUL) {
8779 dev_err(&pdev->dev, "Failed to save PCI config space\n"); 8779 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8780 rc = -EIO; 8780 rc = -EIO;
8781 goto cleanup_nomem; 8781 goto out_msi_disable;
8782 } 8782 }
8783 8783
8784 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) 8784 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8785 goto cleanup_nomem; 8785 goto out_msi_disable;
8786 8786
8787 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 8787 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8788 goto cleanup_nomem; 8788 goto out_msi_disable;
8789 8789
8790 if (ioa_cfg->sis64) 8790 if (ioa_cfg->sis64)
8791 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) 8791 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
@@ -8800,7 +8800,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8800 if (rc < 0) { 8800 if (rc < 0) {
8801 dev_err(&pdev->dev, 8801 dev_err(&pdev->dev,
8802 "Couldn't allocate enough memory for device driver!\n"); 8802 "Couldn't allocate enough memory for device driver!\n");
8803 goto cleanup_nomem; 8803 goto out_msi_disable;
8804 } 8804 }
8805 8805
8806 /* 8806 /*
@@ -8845,10 +8845,10 @@ out:
8845 8845
8846cleanup_nolog: 8846cleanup_nolog:
8847 ipr_free_mem(ioa_cfg); 8847 ipr_free_mem(ioa_cfg);
8848cleanup_nomem:
8849 iounmap(ipr_regs);
8850out_msi_disable: 8848out_msi_disable:
8851 pci_disable_msi(pdev); 8849 pci_disable_msi(pdev);
8850cleanup_nomem:
8851 iounmap(ipr_regs);
8852out_release_regions: 8852out_release_regions:
8853 pci_release_regions(pdev); 8853 pci_release_regions(pdev);
8854out_scsi_host_put: 8854out_scsi_host_put:
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index f5a0665b6773..01ff082dc34c 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -802,10 +802,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
802 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); 802 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
803 spin_lock_bh(&pool->lock); 803 spin_lock_bh(&pool->lock);
804 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); 804 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
805 if (ep) { 805 if (ep && ep->xid == xid)
806 fc_exch_hold(ep); 806 fc_exch_hold(ep);
807 WARN_ON(ep->xid != xid);
808 }
809 spin_unlock_bh(&pool->lock); 807 spin_unlock_bh(&pool->lock);
810 } 808 }
811 return ep; 809 return ep;
@@ -2465,8 +2463,11 @@ int fc_setup_exch_mgr(void)
2465 2463
2466 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); 2464 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2467 if (!fc_exch_workqueue) 2465 if (!fc_exch_workqueue)
2468 return -ENOMEM; 2466 goto err;
2469 return 0; 2467 return 0;
2468err:
2469 kmem_cache_destroy(fc_em_cachep);
2470 return -ENOMEM;
2470} 2471}
2471 2472
2472/** 2473/**
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 9cd2149519ac..afb63c843144 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -498,7 +498,7 @@ crc_err:
498 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 498 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
499 stats->ErrorFrames++; 499 stats->ErrorFrames++;
500 /* per cpu count, not total count, but OK for limit */ 500 /* per cpu count, not total count, but OK for limit */
501 if (stats->InvalidCRCCount++ < 5) 501 if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
502 printk(KERN_WARNING "libfc: CRC error on data " 502 printk(KERN_WARNING "libfc: CRC error on data "
503 "frame for port (%6.6x)\n", 503 "frame for port (%6.6x)\n",
504 lport->port_id); 504 lport->port_id);
@@ -690,7 +690,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
690} 690}
691 691
692/** 692/**
693 * fc_fcp_abts_resp() - Send an ABTS response 693 * fc_fcp_abts_resp() - Receive an ABTS response
694 * @fsp: The FCP packet that is being aborted 694 * @fsp: The FCP packet that is being aborted
695 * @fp: The response frame 695 * @fp: The response frame
696 */ 696 */
@@ -730,7 +730,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
730} 730}
731 731
732/** 732/**
733 * fc_fcp_recv() - Reveive an FCP frame 733 * fc_fcp_recv() - Receive an FCP frame
734 * @seq: The sequence the frame is on 734 * @seq: The sequence the frame is on
735 * @fp: The received frame 735 * @fp: The received frame
736 * @arg: The related FCP packet 736 * @arg: The related FCP packet
@@ -1084,6 +1084,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1084 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); 1084 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
1085 if (unlikely(rc)) { 1085 if (unlikely(rc)) {
1086 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1086 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1087 fsp->cmd->SCp.ptr = NULL;
1087 list_del(&fsp->list); 1088 list_del(&fsp->list);
1088 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1089 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1089 } 1090 }
@@ -1645,12 +1646,10 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1645 struct fc_seq *seq; 1646 struct fc_seq *seq;
1646 struct fcp_srr *srr; 1647 struct fcp_srr *srr;
1647 struct fc_frame *fp; 1648 struct fc_frame *fp;
1648 u8 cdb_op;
1649 unsigned int rec_tov; 1649 unsigned int rec_tov;
1650 1650
1651 rport = fsp->rport; 1651 rport = fsp->rport;
1652 rpriv = rport->dd_data; 1652 rpriv = rport->dd_data;
1653 cdb_op = fsp->cdb_cmd.fc_cdb[0];
1654 1653
1655 if (!(rpriv->flags & FC_RP_FLAGS_RETRY) || 1654 if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
1656 rpriv->rp_state != RPORT_ST_READY) 1655 rpriv->rp_state != RPORT_ST_READY)
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e008b1673507..e55ed9cf23fb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1352,7 +1352,6 @@ static void fc_lport_timeout(struct work_struct *work)
1352 WARN_ON(1); 1352 WARN_ON(1);
1353 break; 1353 break;
1354 case LPORT_ST_READY: 1354 case LPORT_ST_READY:
1355 WARN_ON(1);
1356 break; 1355 break;
1357 case LPORT_ST_RESET: 1356 case LPORT_ST_RESET:
1358 break; 1357 break;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 874e29d9533f..f84084bba2f0 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -849,6 +849,9 @@ static struct domain_device *sas_ex_discover_expander(
849 849
850 res = sas_discover_expander(child); 850 res = sas_discover_expander(child);
851 if (res) { 851 if (res) {
852 spin_lock_irq(&parent->port->dev_list_lock);
853 list_del(&child->dev_list_node);
854 spin_unlock_irq(&parent->port->dev_list_lock);
852 kfree(child); 855 kfree(child);
853 return NULL; 856 return NULL;
854 } 857 }
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 8ec2c86a49d4..c088a36d1f33 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -20,6 +20,11 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <scsi/scsi_host.h> 22#include <scsi/scsi_host.h>
23
24#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
25#define CONFIG_SCSI_LPFC_DEBUG_FS
26#endif
27
23struct lpfc_sli2_slim; 28struct lpfc_sli2_slim;
24 29
25#define LPFC_PCI_DEV_LP 0x1 30#define LPFC_PCI_DEV_LP 0x1
@@ -465,9 +470,10 @@ enum intr_type_t {
465struct unsol_rcv_ct_ctx { 470struct unsol_rcv_ct_ctx {
466 uint32_t ctxt_id; 471 uint32_t ctxt_id;
467 uint32_t SID; 472 uint32_t SID;
468 uint32_t oxid;
469 uint32_t flags; 473 uint32_t flags;
470#define UNSOL_VALID 0x00000001 474#define UNSOL_VALID 0x00000001
475 uint16_t oxid;
476 uint16_t rxid;
471}; 477};
472 478
473#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/ 479#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/
@@ -674,6 +680,9 @@ struct lpfc_hba {
674 uint32_t cfg_enable_rrq; 680 uint32_t cfg_enable_rrq;
675 uint32_t cfg_topology; 681 uint32_t cfg_topology;
676 uint32_t cfg_link_speed; 682 uint32_t cfg_link_speed;
683#define LPFC_FCF_FOV 1 /* Fast fcf failover */
684#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
685 uint32_t cfg_fcf_failover_policy;
677 uint32_t cfg_cr_delay; 686 uint32_t cfg_cr_delay;
678 uint32_t cfg_cr_count; 687 uint32_t cfg_cr_count;
679 uint32_t cfg_multi_ring_support; 688 uint32_t cfg_multi_ring_support;
@@ -845,9 +854,13 @@ struct lpfc_hba {
845 /* iDiag debugfs sub-directory */ 854 /* iDiag debugfs sub-directory */
846 struct dentry *idiag_root; 855 struct dentry *idiag_root;
847 struct dentry *idiag_pci_cfg; 856 struct dentry *idiag_pci_cfg;
857 struct dentry *idiag_bar_acc;
848 struct dentry *idiag_que_info; 858 struct dentry *idiag_que_info;
849 struct dentry *idiag_que_acc; 859 struct dentry *idiag_que_acc;
850 struct dentry *idiag_drb_acc; 860 struct dentry *idiag_drb_acc;
861 struct dentry *idiag_ctl_acc;
862 struct dentry *idiag_mbx_acc;
863 struct dentry *idiag_ext_acc;
851#endif 864#endif
852 865
853 /* Used for deferred freeing of ELS data buffers */ 866 /* Used for deferred freeing of ELS data buffers */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 135a53baa735..2542f1f8bf86 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,47 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
755} 755}
756 756
757/** 757/**
758 * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
759 * @phba: lpfc_hba pointer.
760 *
761 * Description:
762 * SLI4 interface type-2 device to wait on the sliport status register for
763 * the readyness after performing a firmware reset.
764 *
765 * Returns:
766 * zero for success
767 **/
768static int
769lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
770{
771 struct lpfc_register portstat_reg;
772 int i;
773
774
775 lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
776 &portstat_reg.word0);
777
778 /* wait for the SLI port firmware ready after firmware reset */
779 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
780 msleep(10);
781 lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
782 &portstat_reg.word0);
783 if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
784 continue;
785 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
786 continue;
787 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
788 continue;
789 break;
790 }
791
792 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
793 return 0;
794 else
795 return -EIO;
796}
797
798/**
758 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc 799 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
759 * @phba: lpfc_hba pointer. 800 * @phba: lpfc_hba pointer.
760 * 801 *
@@ -769,6 +810,7 @@ static ssize_t
769lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) 810lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
770{ 811{
771 struct completion online_compl; 812 struct completion online_compl;
813 struct pci_dev *pdev = phba->pcidev;
772 uint32_t reg_val; 814 uint32_t reg_val;
773 int status = 0; 815 int status = 0;
774 int rc; 816 int rc;
@@ -781,6 +823,14 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
781 LPFC_SLI_INTF_IF_TYPE_2)) 823 LPFC_SLI_INTF_IF_TYPE_2))
782 return -EPERM; 824 return -EPERM;
783 825
826 if (!pdev->is_physfn)
827 return -EPERM;
828
829 /* Disable SR-IOV virtual functions if enabled */
830 if (phba->cfg_sriov_nr_virtfn) {
831 pci_disable_sriov(pdev);
832 phba->cfg_sriov_nr_virtfn = 0;
833 }
784 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 834 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
785 835
786 if (status != 0) 836 if (status != 0)
@@ -805,7 +855,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
805 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 855 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
806 856
807 /* delay driver action following IF_TYPE_2 reset */ 857 /* delay driver action following IF_TYPE_2 reset */
808 msleep(100); 858 rc = lpfc_sli4_pdev_status_reg_wait(phba);
859
860 if (rc)
861 return -EIO;
809 862
810 init_completion(&online_compl); 863 init_completion(&online_compl);
811 rc = lpfc_workq_post_event(phba, &status, &online_compl, 864 rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@ -895,6 +948,10 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
895 948
896 if (!phba->cfg_enable_hba_reset) 949 if (!phba->cfg_enable_hba_reset)
897 return -EACCES; 950 return -EACCES;
951
952 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
953 "3050 lpfc_board_mode set to %s\n", buf);
954
898 init_completion(&online_compl); 955 init_completion(&online_compl);
899 956
900 if(strncmp(buf, "online", sizeof("online") - 1) == 0) { 957 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
@@ -1290,6 +1347,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
1290 if (phba->sli_rev == LPFC_SLI_REV4) 1347 if (phba->sli_rev == LPFC_SLI_REV4)
1291 val = 0; 1348 val = 0;
1292 1349
1350 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1351 "3051 lpfc_poll changed from %d to %d\n",
1352 phba->cfg_poll, val);
1353
1293 spin_lock_irq(&phba->hbalock); 1354 spin_lock_irq(&phba->hbalock);
1294 1355
1295 old_val = phba->cfg_poll; 1356 old_val = phba->cfg_poll;
@@ -1414,80 +1475,10 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
1414 struct Scsi_Host *shost = class_to_shost(dev); 1475 struct Scsi_Host *shost = class_to_shost(dev);
1415 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1476 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1416 struct lpfc_hba *phba = vport->phba; 1477 struct lpfc_hba *phba = vport->phba;
1417 struct pci_dev *pdev = phba->pcidev; 1478 uint16_t max_nr_virtfn;
1418 union lpfc_sli4_cfg_shdr *shdr;
1419 uint32_t shdr_status, shdr_add_status;
1420 LPFC_MBOXQ_t *mboxq;
1421 struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
1422 struct lpfc_rsrc_desc_pcie *desc;
1423 uint32_t max_nr_virtfn;
1424 uint32_t desc_count;
1425 int length, rc, i;
1426
1427 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1428 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1429 LPFC_SLI_INTF_IF_TYPE_2))
1430 return -EPERM;
1431
1432 if (!pdev->is_physfn)
1433 return snprintf(buf, PAGE_SIZE, "%d\n", 0);
1434
1435 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1436 if (!mboxq)
1437 return -ENOMEM;
1438
1439 /* get the maximum number of virtfn support by physfn */
1440 length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
1441 sizeof(struct lpfc_sli4_cfg_mhdr));
1442 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
1443 LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
1444 length, LPFC_SLI4_MBX_EMBED);
1445 shdr = (union lpfc_sli4_cfg_shdr *)
1446 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
1447 bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
1448 phba->sli4_hba.iov.pf_number + 1);
1449
1450 get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
1451 bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
1452 LPFC_CFG_TYPE_CURRENT_ACTIVE);
1453
1454 rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
1455 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
1456
1457 if (rc != MBX_TIMEOUT) {
1458 /* check return status */
1459 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1460 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
1461 &shdr->response);
1462 if (shdr_status || shdr_add_status || rc)
1463 goto error_out;
1464
1465 } else
1466 goto error_out;
1467
1468 desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
1469
1470 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
1471 desc = (struct lpfc_rsrc_desc_pcie *)
1472 &get_prof_cfg->u.response.prof_cfg.desc[i];
1473 if (LPFC_RSRC_DESC_TYPE_PCIE ==
1474 bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
1475 max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
1476 desc);
1477 break;
1478 }
1479 }
1480
1481 if (i < LPFC_RSRC_DESC_MAX_NUM) {
1482 if (rc != MBX_TIMEOUT)
1483 mempool_free(mboxq, phba->mbox_mem_pool);
1484 return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
1485 }
1486 1479
1487error_out: 1480 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
1488 if (rc != MBX_TIMEOUT) 1481 return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
1489 mempool_free(mboxq, phba->mbox_mem_pool);
1490 return -EIO;
1491} 1482}
1492 1483
1493/** 1484/**
@@ -1605,6 +1596,9 @@ static int \
1605lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \ 1596lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
1606{ \ 1597{ \
1607 if (val >= minval && val <= maxval) {\ 1598 if (val >= minval && val <= maxval) {\
1599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
1600 "3052 lpfc_" #attr " changed from %d to %d\n", \
1601 phba->cfg_##attr, val); \
1608 phba->cfg_##attr = val;\ 1602 phba->cfg_##attr = val;\
1609 return 0;\ 1603 return 0;\
1610 }\ 1604 }\
@@ -1762,6 +1756,9 @@ static int \
1762lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ 1756lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
1763{ \ 1757{ \
1764 if (val >= minval && val <= maxval) {\ 1758 if (val >= minval && val <= maxval) {\
1759 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
1760 "3053 lpfc_" #attr " changed from %d to %d\n", \
1761 vport->cfg_##attr, val); \
1765 vport->cfg_##attr = val;\ 1762 vport->cfg_##attr = val;\
1766 return 0;\ 1763 return 0;\
1767 }\ 1764 }\
@@ -2196,6 +2193,9 @@ lpfc_param_show(enable_npiv);
2196lpfc_param_init(enable_npiv, 1, 0, 1); 2193lpfc_param_init(enable_npiv, 1, 0, 1);
2197static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); 2194static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
2198 2195
2196LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
2197 "FCF Fast failover=1 Priority failover=2");
2198
2199int lpfc_enable_rrq; 2199int lpfc_enable_rrq;
2200module_param(lpfc_enable_rrq, int, S_IRUGO); 2200module_param(lpfc_enable_rrq, int, S_IRUGO);
2201MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); 2201MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
@@ -2678,6 +2678,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
2678 if (nolip) 2678 if (nolip)
2679 return strlen(buf); 2679 return strlen(buf);
2680 2680
2681 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2682 "3054 lpfc_topology changed from %d to %d\n",
2683 prev_val, val);
2681 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); 2684 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
2682 if (err) { 2685 if (err) {
2683 phba->cfg_topology = prev_val; 2686 phba->cfg_topology = prev_val;
@@ -3101,6 +3104,10 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
3101 if (sscanf(val_buf, "%i", &val) != 1) 3104 if (sscanf(val_buf, "%i", &val) != 1)
3102 return -EINVAL; 3105 return -EINVAL;
3103 3106
3107 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3108 "3055 lpfc_link_speed changed from %d to %d %s\n",
3109 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
3110
3104 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || 3111 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
3105 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || 3112 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
3106 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || 3113 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
@@ -3678,7 +3685,9 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
3678# - Default will result in registering capabilities for all profiles. 3685# - Default will result in registering capabilities for all profiles.
3679# 3686#
3680*/ 3687*/
3681unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION; 3688unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
3689 SHOST_DIX_TYPE0_PROTECTION |
3690 SHOST_DIX_TYPE1_PROTECTION;
3682 3691
3683module_param(lpfc_prot_mask, uint, S_IRUGO); 3692module_param(lpfc_prot_mask, uint, S_IRUGO);
3684MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); 3693MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
@@ -3769,6 +3778,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3769 &dev_attr_lpfc_fdmi_on, 3778 &dev_attr_lpfc_fdmi_on,
3770 &dev_attr_lpfc_max_luns, 3779 &dev_attr_lpfc_max_luns,
3771 &dev_attr_lpfc_enable_npiv, 3780 &dev_attr_lpfc_enable_npiv,
3781 &dev_attr_lpfc_fcf_failover_policy,
3772 &dev_attr_lpfc_enable_rrq, 3782 &dev_attr_lpfc_enable_rrq,
3773 &dev_attr_nport_evt_cnt, 3783 &dev_attr_nport_evt_cnt,
3774 &dev_attr_board_mode, 3784 &dev_attr_board_mode,
@@ -4989,6 +4999,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4989 lpfc_link_speed_init(phba, lpfc_link_speed); 4999 lpfc_link_speed_init(phba, lpfc_link_speed);
4990 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 5000 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
4991 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 5001 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
5002 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
4992 lpfc_enable_rrq_init(phba, lpfc_enable_rrq); 5003 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
4993 lpfc_use_msi_init(phba, lpfc_use_msi); 5004 lpfc_use_msi_init(phba, lpfc_use_msi);
4994 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 5005 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 7fb0ba4cbfa7..6760c69f5253 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -42,6 +42,7 @@
42#include "lpfc.h" 42#include "lpfc.h"
43#include "lpfc_logmsg.h" 43#include "lpfc_logmsg.h"
44#include "lpfc_crtn.h" 44#include "lpfc_crtn.h"
45#include "lpfc_debugfs.h"
45#include "lpfc_vport.h" 46#include "lpfc_vport.h"
46#include "lpfc_version.h" 47#include "lpfc_version.h"
47 48
@@ -960,8 +961,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
960 evt_dat->immed_dat].oxid, 961 evt_dat->immed_dat].oxid,
961 phba->ct_ctx[ 962 phba->ct_ctx[
962 evt_dat->immed_dat].SID); 963 evt_dat->immed_dat].SID);
964 phba->ct_ctx[evt_dat->immed_dat].rxid =
965 piocbq->iocb.ulpContext;
963 phba->ct_ctx[evt_dat->immed_dat].oxid = 966 phba->ct_ctx[evt_dat->immed_dat].oxid =
964 piocbq->iocb.ulpContext; 967 piocbq->iocb.unsli3.rcvsli3.ox_id;
965 phba->ct_ctx[evt_dat->immed_dat].SID = 968 phba->ct_ctx[evt_dat->immed_dat].SID =
966 piocbq->iocb.un.rcvels.remoteID; 969 piocbq->iocb.un.rcvels.remoteID;
967 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID; 970 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
@@ -1312,7 +1315,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1312 rc = IOCB_ERROR; 1315 rc = IOCB_ERROR;
1313 goto issue_ct_rsp_exit; 1316 goto issue_ct_rsp_exit;
1314 } 1317 }
1315 icmd->ulpContext = phba->ct_ctx[tag].oxid; 1318 icmd->ulpContext = phba->ct_ctx[tag].rxid;
1319 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1316 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1320 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1317 if (!ndlp) { 1321 if (!ndlp) {
1318 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1322 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -1337,9 +1341,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1337 goto issue_ct_rsp_exit; 1341 goto issue_ct_rsp_exit;
1338 } 1342 }
1339 1343
1340 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1344 icmd->un.ulpWord[3] =
1341 if (phba->sli_rev == LPFC_SLI_REV4)
1342 icmd->ulpContext =
1343 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 1345 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1344 1346
1345 /* The exchange is done, mark the entry as invalid */ 1347 /* The exchange is done, mark the entry as invalid */
@@ -1351,8 +1353,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1351 1353
1352 /* Xmit CT response on exchange <xid> */ 1354 /* Xmit CT response on exchange <xid> */
1353 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1355 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1354 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n", 1356 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1355 icmd->ulpContext, icmd->ulpIoTag, phba->link_state); 1357 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1356 1358
1357 ctiocb->iocb_cmpl = NULL; 1359 ctiocb->iocb_cmpl = NULL;
1358 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1360 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
@@ -1471,13 +1473,12 @@ send_mgmt_rsp_exit:
1471/** 1473/**
1472 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode 1474 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1473 * @phba: Pointer to HBA context object. 1475 * @phba: Pointer to HBA context object.
1474 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1475 * 1476 *
1476 * This function is responsible for preparing driver for diag loopback 1477 * This function is responsible for preparing driver for diag loopback
1477 * on device. 1478 * on device.
1478 */ 1479 */
1479static int 1480static int
1480lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job) 1481lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1481{ 1482{
1482 struct lpfc_vport **vports; 1483 struct lpfc_vport **vports;
1483 struct Scsi_Host *shost; 1484 struct Scsi_Host *shost;
@@ -1521,7 +1522,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
1521/** 1522/**
1522 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode 1523 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1523 * @phba: Pointer to HBA context object. 1524 * @phba: Pointer to HBA context object.
1524 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1525 * 1525 *
1526 * This function is responsible for driver exit processing of setting up 1526 * This function is responsible for driver exit processing of setting up
1527 * diag loopback mode on device. 1527 * diag loopback mode on device.
@@ -1567,7 +1567,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1567 uint32_t link_flags; 1567 uint32_t link_flags;
1568 uint32_t timeout; 1568 uint32_t timeout;
1569 LPFC_MBOXQ_t *pmboxq; 1569 LPFC_MBOXQ_t *pmboxq;
1570 int mbxstatus; 1570 int mbxstatus = MBX_SUCCESS;
1571 int i = 0; 1571 int i = 0;
1572 int rc = 0; 1572 int rc = 0;
1573 1573
@@ -1586,7 +1586,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1586 goto job_error; 1586 goto job_error;
1587 } 1587 }
1588 1588
1589 rc = lpfc_bsg_diag_mode_enter(phba, job); 1589 rc = lpfc_bsg_diag_mode_enter(phba);
1590 if (rc) 1590 if (rc)
1591 goto job_error; 1591 goto job_error;
1592 1592
@@ -1741,7 +1741,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1741 uint32_t link_flags, timeout, req_len, alloc_len; 1741 uint32_t link_flags, timeout, req_len, alloc_len;
1742 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1742 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1743 LPFC_MBOXQ_t *pmboxq = NULL; 1743 LPFC_MBOXQ_t *pmboxq = NULL;
1744 int mbxstatus, i, rc = 0; 1744 int mbxstatus = MBX_SUCCESS, i, rc = 0;
1745 1745
1746 /* no data to return just the return code */ 1746 /* no data to return just the return code */
1747 job->reply->reply_payload_rcv_len = 0; 1747 job->reply->reply_payload_rcv_len = 0;
@@ -1758,7 +1758,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1758 goto job_error; 1758 goto job_error;
1759 } 1759 }
1760 1760
1761 rc = lpfc_bsg_diag_mode_enter(phba, job); 1761 rc = lpfc_bsg_diag_mode_enter(phba);
1762 if (rc) 1762 if (rc)
1763 goto job_error; 1763 goto job_error;
1764 1764
@@ -1982,7 +1982,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
1982 goto job_error; 1982 goto job_error;
1983 } 1983 }
1984 1984
1985 rc = lpfc_bsg_diag_mode_enter(phba, job); 1985 rc = lpfc_bsg_diag_mode_enter(phba);
1986 if (rc) 1986 if (rc)
1987 goto job_error; 1987 goto job_error;
1988 1988
@@ -3178,6 +3178,11 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3178 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3178 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3179 phba->mbox_ext_buf_ctx.nembType, 3179 phba->mbox_ext_buf_ctx.nembType,
3180 phba->mbox_ext_buf_ctx.mboxType, size); 3180 phba->mbox_ext_buf_ctx.mboxType, size);
3181 lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3182 phba->mbox_ext_buf_ctx.nembType,
3183 phba->mbox_ext_buf_ctx.mboxType,
3184 dma_ebuf, sta_pos_addr,
3185 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3181 } else 3186 } else
3182 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3187 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3183 3188
@@ -3430,6 +3435,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3430 "ext_buf_cnt:%d\n", ext_buf_cnt); 3435 "ext_buf_cnt:%d\n", ext_buf_cnt);
3431 } 3436 }
3432 3437
3438 /* before dma descriptor setup */
3439 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3440 sta_pre_addr, dmabuf, ext_buf_cnt);
3441
3433 /* reject non-embedded mailbox command with none external buffer */ 3442 /* reject non-embedded mailbox command with none external buffer */
3434 if (ext_buf_cnt == 0) { 3443 if (ext_buf_cnt == 0) {
3435 rc = -EPERM; 3444 rc = -EPERM;
@@ -3477,6 +3486,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3477 } 3486 }
3478 } 3487 }
3479 3488
3489 /* after dma descriptor setup */
3490 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3491 sta_pos_addr, dmabuf, ext_buf_cnt);
3492
3480 /* construct base driver mbox command */ 3493 /* construct base driver mbox command */
3481 pmb = &pmboxq->u.mb; 3494 pmb = &pmboxq->u.mb;
3482 pmbx = (uint8_t *)dmabuf->virt; 3495 pmbx = (uint8_t *)dmabuf->virt;
@@ -3511,7 +3524,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3511 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3524 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3512 "2947 Issued SLI_CONFIG ext-buffer " 3525 "2947 Issued SLI_CONFIG ext-buffer "
3513 "maibox command, rc:x%x\n", rc); 3526 "maibox command, rc:x%x\n", rc);
3514 return 1; 3527 return SLI_CONFIG_HANDLED;
3515 } 3528 }
3516 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3529 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3517 "2948 Failed to issue SLI_CONFIG ext-buffer " 3530 "2948 Failed to issue SLI_CONFIG ext-buffer "
@@ -3549,7 +3562,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3549 LPFC_MBOXQ_t *pmboxq = NULL; 3562 LPFC_MBOXQ_t *pmboxq = NULL;
3550 MAILBOX_t *pmb; 3563 MAILBOX_t *pmb;
3551 uint8_t *mbx; 3564 uint8_t *mbx;
3552 int rc = 0, i; 3565 int rc = SLI_CONFIG_NOT_HANDLED, i;
3553 3566
3554 mbox_req = 3567 mbox_req =
3555 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 3568 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
@@ -3591,12 +3604,20 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3591 "ext_buf_cnt:%d\n", ext_buf_cnt); 3604 "ext_buf_cnt:%d\n", ext_buf_cnt);
3592 } 3605 }
3593 3606
3607 /* before dma buffer descriptor setup */
3608 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3609 sta_pre_addr, dmabuf, ext_buf_cnt);
3610
3594 if (ext_buf_cnt == 0) 3611 if (ext_buf_cnt == 0)
3595 return -EPERM; 3612 return -EPERM;
3596 3613
3597 /* for the first external buffer */ 3614 /* for the first external buffer */
3598 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3615 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3599 3616
3617 /* after dma descriptor setup */
3618 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3619 sta_pos_addr, dmabuf, ext_buf_cnt);
3620
3600 /* log for looking forward */ 3621 /* log for looking forward */
3601 for (i = 1; i < ext_buf_cnt; i++) { 3622 for (i = 1; i < ext_buf_cnt; i++) {
3602 if (nemb_tp == nemb_mse) 3623 if (nemb_tp == nemb_mse)
@@ -3660,7 +3681,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3660 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3681 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3661 "2955 Issued SLI_CONFIG ext-buffer " 3682 "2955 Issued SLI_CONFIG ext-buffer "
3662 "maibox command, rc:x%x\n", rc); 3683 "maibox command, rc:x%x\n", rc);
3663 return 1; 3684 return SLI_CONFIG_HANDLED;
3664 } 3685 }
3665 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3686 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3666 "2956 Failed to issue SLI_CONFIG ext-buffer " 3687 "2956 Failed to issue SLI_CONFIG ext-buffer "
@@ -3668,6 +3689,11 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3668 rc = -EPIPE; 3689 rc = -EPIPE;
3669 } 3690 }
3670 3691
3692 /* wait for additoinal external buffers */
3693 job->reply->result = 0;
3694 job->job_done(job);
3695 return SLI_CONFIG_HANDLED;
3696
3671job_error: 3697job_error:
3672 if (pmboxq) 3698 if (pmboxq)
3673 mempool_free(pmboxq, phba->mbox_mem_pool); 3699 mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -3840,6 +3866,12 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
3840 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, 3866 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
3841 struct lpfc_dmabuf, list); 3867 struct lpfc_dmabuf, list);
3842 list_del_init(&dmabuf->list); 3868 list_del_init(&dmabuf->list);
3869
3870 /* after dma buffer descriptor setup */
3871 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
3872 mbox_rd, dma_ebuf, sta_pos_addr,
3873 dmabuf, index);
3874
3843 pbuf = (uint8_t *)dmabuf->virt; 3875 pbuf = (uint8_t *)dmabuf->virt;
3844 job->reply->reply_payload_rcv_len = 3876 job->reply->reply_payload_rcv_len =
3845 sg_copy_from_buffer(job->reply_payload.sg_list, 3877 sg_copy_from_buffer(job->reply_payload.sg_list,
@@ -3922,6 +3954,11 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3922 dmabuf); 3954 dmabuf);
3923 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3955 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3924 3956
3957 /* after write dma buffer */
3958 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
3959 mbox_wr, dma_ebuf, sta_pos_addr,
3960 dmabuf, index);
3961
3925 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 3962 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3926 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3963 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3927 "2968 SLI_CONFIG ext-buffer wr all %d " 3964 "2968 SLI_CONFIG ext-buffer wr all %d "
@@ -3959,7 +3996,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3959 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3996 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3960 "2969 Issued SLI_CONFIG ext-buffer " 3997 "2969 Issued SLI_CONFIG ext-buffer "
3961 "maibox command, rc:x%x\n", rc); 3998 "maibox command, rc:x%x\n", rc);
3962 return 1; 3999 return SLI_CONFIG_HANDLED;
3963 } 4000 }
3964 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4001 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3965 "2970 Failed to issue SLI_CONFIG ext-buffer " 4002 "2970 Failed to issue SLI_CONFIG ext-buffer "
@@ -4039,14 +4076,14 @@ lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4039 struct lpfc_dmabuf *dmabuf) 4076 struct lpfc_dmabuf *dmabuf)
4040{ 4077{
4041 struct dfc_mbox_req *mbox_req; 4078 struct dfc_mbox_req *mbox_req;
4042 int rc; 4079 int rc = SLI_CONFIG_NOT_HANDLED;
4043 4080
4044 mbox_req = 4081 mbox_req =
4045 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 4082 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4046 4083
4047 /* mbox command with/without single external buffer */ 4084 /* mbox command with/without single external buffer */
4048 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4085 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4049 return SLI_CONFIG_NOT_HANDLED; 4086 return rc;
4050 4087
4051 /* mbox command and first external buffer */ 4088 /* mbox command and first external buffer */
4052 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { 4089 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
@@ -4249,7 +4286,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4249 * mailbox extension size 4286 * mailbox extension size
4250 */ 4287 */
4251 if ((transmit_length > receive_length) || 4288 if ((transmit_length > receive_length) ||
4252 (transmit_length > MAILBOX_EXT_SIZE)) { 4289 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4253 rc = -ERANGE; 4290 rc = -ERANGE;
4254 goto job_done; 4291 goto job_done;
4255 } 4292 }
@@ -4272,7 +4309,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4272 /* receive length cannot be greater than mailbox 4309 /* receive length cannot be greater than mailbox
4273 * extension size 4310 * extension size
4274 */ 4311 */
4275 if (receive_length > MAILBOX_EXT_SIZE) { 4312 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4276 rc = -ERANGE; 4313 rc = -ERANGE;
4277 goto job_done; 4314 goto job_done;
4278 } 4315 }
@@ -4306,7 +4343,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4306 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4343 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4307 4344
4308 /* bde size cannot be greater than mailbox ext size */ 4345 /* bde size cannot be greater than mailbox ext size */
4309 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) { 4346 if (bde->tus.f.bdeSize >
4347 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4310 rc = -ERANGE; 4348 rc = -ERANGE;
4311 goto job_done; 4349 goto job_done;
4312 } 4350 }
@@ -4332,7 +4370,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4332 * mailbox extension size 4370 * mailbox extension size
4333 */ 4371 */
4334 if ((receive_length == 0) || 4372 if ((receive_length == 0) ||
4335 (receive_length > MAILBOX_EXT_SIZE)) { 4373 (receive_length >
4374 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4336 rc = -ERANGE; 4375 rc = -ERANGE;
4337 goto job_done; 4376 goto job_done;
4338 } 4377 }
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fc20c247f36b..a6db6aef1331 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -235,9 +235,11 @@ int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
235void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); 235void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
236void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); 236void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
237uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); 237uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
238void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t);
238int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); 239int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
239void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); 240void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
240int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); 241int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
242void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
241 243
242int lpfc_mem_alloc(struct lpfc_hba *, int align); 244int lpfc_mem_alloc(struct lpfc_hba *, int align);
243void lpfc_mem_free(struct lpfc_hba *); 245void lpfc_mem_free(struct lpfc_hba *);
@@ -371,6 +373,10 @@ extern struct lpfc_hbq_init *lpfc_hbq_defs[];
371/* SLI4 if_type 2 externs. */ 373/* SLI4 if_type 2 externs. */
372int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *); 374int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
373int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *); 375int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
376int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
377 uint16_t *, uint16_t *);
378int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
379 uint16_t *, uint16_t *);
374 380
375/* externs BlockGuard */ 381/* externs BlockGuard */
376extern char *_dump_buf_data; 382extern char *_dump_buf_data;
@@ -432,10 +438,16 @@ void lpfc_handle_rrq_active(struct lpfc_hba *);
432int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *); 438int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
433int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, 439int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
434 uint16_t, uint16_t, uint16_t); 440 uint16_t, uint16_t, uint16_t);
441uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
435void lpfc_cleanup_wt_rrqs(struct lpfc_hba *); 442void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
436void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); 443void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
437struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, 444struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
438 uint32_t); 445 uint32_t);
446void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *, enum nemb_type,
447 enum mbox_type, enum dma_type, enum sta_type,
448 struct lpfc_dmabuf *, uint32_t);
449void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *, MAILBOX_t *);
439int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *); 450int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
440/* functions to support SR-IOV */ 451/* functions to support SR-IOV */
441int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int); 452int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
453uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 30b25c5fdd7e..a0424dd90e40 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -48,6 +48,7 @@
48#include "lpfc_version.h" 48#include "lpfc_version.h"
49#include "lpfc_compat.h" 49#include "lpfc_compat.h"
50#include "lpfc_debugfs.h" 50#include "lpfc_debugfs.h"
51#include "lpfc_bsg.h"
51 52
52#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 53#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
53/* 54/*
@@ -135,7 +136,11 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
135 int i, index, len, enable; 136 int i, index, len, enable;
136 uint32_t ms; 137 uint32_t ms;
137 struct lpfc_debugfs_trc *dtp; 138 struct lpfc_debugfs_trc *dtp;
138 char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; 139 char *buffer;
140
141 buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
142 if (!buffer)
143 return 0;
139 144
140 enable = lpfc_debugfs_enable; 145 enable = lpfc_debugfs_enable;
141 lpfc_debugfs_enable = 0; 146 lpfc_debugfs_enable = 0;
@@ -167,6 +172,8 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
167 } 172 }
168 173
169 lpfc_debugfs_enable = enable; 174 lpfc_debugfs_enable = enable;
175 kfree(buffer);
176
170 return len; 177 return len;
171} 178}
172 179
@@ -195,8 +202,11 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
195 int i, index, len, enable; 202 int i, index, len, enable;
196 uint32_t ms; 203 uint32_t ms;
197 struct lpfc_debugfs_trc *dtp; 204 struct lpfc_debugfs_trc *dtp;
198 char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE]; 205 char *buffer;
199 206
207 buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
208 if (!buffer)
209 return 0;
200 210
201 enable = lpfc_debugfs_enable; 211 enable = lpfc_debugfs_enable;
202 lpfc_debugfs_enable = 0; 212 lpfc_debugfs_enable = 0;
@@ -228,6 +238,8 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
228 } 238 }
229 239
230 lpfc_debugfs_enable = enable; 240 lpfc_debugfs_enable = enable;
241 kfree(buffer);
242
231 return len; 243 return len;
232} 244}
233 245
@@ -378,7 +390,11 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
378 int len = 0; 390 int len = 0;
379 int i, off; 391 int i, off;
380 uint32_t *ptr; 392 uint32_t *ptr;
381 char buffer[1024]; 393 char *buffer;
394
395 buffer = kmalloc(1024, GFP_KERNEL);
396 if (!buffer)
397 return 0;
382 398
383 off = 0; 399 off = 0;
384 spin_lock_irq(&phba->hbalock); 400 spin_lock_irq(&phba->hbalock);
@@ -407,6 +423,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
407 } 423 }
408 424
409 spin_unlock_irq(&phba->hbalock); 425 spin_unlock_irq(&phba->hbalock);
426 kfree(buffer);
427
410 return len; 428 return len;
411} 429}
412 430
@@ -1327,8 +1345,8 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
1327 return 0; 1345 return 0;
1328 1346
1329 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { 1347 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
1330 where = idiag.cmd.data[0]; 1348 where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
1331 count = idiag.cmd.data[1]; 1349 count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
1332 } else 1350 } else
1333 return 0; 1351 return 0;
1334 1352
@@ -1373,6 +1391,11 @@ pcicfg_browse:
1373 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, 1391 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
1374 "%08x ", u32val); 1392 "%08x ", u32val);
1375 offset += sizeof(uint32_t); 1393 offset += sizeof(uint32_t);
1394 if (offset >= LPFC_PCI_CFG_SIZE) {
1395 len += snprintf(pbuffer+len,
1396 LPFC_PCI_CFG_SIZE-len, "\n");
1397 break;
1398 }
1376 index -= sizeof(uint32_t); 1399 index -= sizeof(uint32_t);
1377 if (!index) 1400 if (!index)
1378 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, 1401 len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
@@ -1385,8 +1408,11 @@ pcicfg_browse:
1385 } 1408 }
1386 1409
1387 /* Set up the offset for next portion of pci cfg read */ 1410 /* Set up the offset for next portion of pci cfg read */
1388 idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE; 1411 if (index == 0) {
1389 if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE) 1412 idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
1413 if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
1414 idiag.offset.last_rd = 0;
1415 } else
1390 idiag.offset.last_rd = 0; 1416 idiag.offset.last_rd = 0;
1391 1417
1392 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 1418 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
@@ -1439,8 +1465,8 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
1439 if (rc != LPFC_PCI_CFG_RD_CMD_ARG) 1465 if (rc != LPFC_PCI_CFG_RD_CMD_ARG)
1440 goto error_out; 1466 goto error_out;
1441 /* Read command from PCI config space, set up command fields */ 1467 /* Read command from PCI config space, set up command fields */
1442 where = idiag.cmd.data[0]; 1468 where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
1443 count = idiag.cmd.data[1]; 1469 count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
1444 if (count == LPFC_PCI_CFG_BROWSE) { 1470 if (count == LPFC_PCI_CFG_BROWSE) {
1445 if (where % sizeof(uint32_t)) 1471 if (where % sizeof(uint32_t))
1446 goto error_out; 1472 goto error_out;
@@ -1475,9 +1501,9 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
1475 if (rc != LPFC_PCI_CFG_WR_CMD_ARG) 1501 if (rc != LPFC_PCI_CFG_WR_CMD_ARG)
1476 goto error_out; 1502 goto error_out;
1477 /* Write command to PCI config space, read-modify-write */ 1503 /* Write command to PCI config space, read-modify-write */
1478 where = idiag.cmd.data[0]; 1504 where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
1479 count = idiag.cmd.data[1]; 1505 count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
1480 value = idiag.cmd.data[2]; 1506 value = idiag.cmd.data[IDIAG_PCICFG_VALUE_INDX];
1481 /* Sanity checks */ 1507 /* Sanity checks */
1482 if ((count != sizeof(uint8_t)) && 1508 if ((count != sizeof(uint8_t)) &&
1483 (count != sizeof(uint16_t)) && 1509 (count != sizeof(uint16_t)) &&
@@ -1570,6 +1596,292 @@ error_out:
1570} 1596}
1571 1597
1572/** 1598/**
1599 * lpfc_idiag_baracc_read - idiag debugfs pci bar access read
1600 * @file: The file pointer to read from.
1601 * @buf: The buffer to copy the data to.
1602 * @nbytes: The number of bytes to read.
1603 * @ppos: The position in the file to start reading from.
1604 *
1605 * Description:
1606 * This routine reads data from the @phba pci bar memory mapped space
1607 * according to the idiag command, and copies to user @buf.
1608 *
1609 * Returns:
1610 * This function returns the amount of data that was read (this could be less
1611 * than @nbytes if the end of the file was reached) or a negative error value.
1612 **/
1613static ssize_t
1614lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
1615 loff_t *ppos)
1616{
1617 struct lpfc_debug *debug = file->private_data;
1618 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
1619 int offset_label, offset, offset_run, len = 0, index;
1620 int bar_num, acc_range, bar_size;
1621 char *pbuffer;
1622 void __iomem *mem_mapped_bar;
1623 uint32_t if_type;
1624 struct pci_dev *pdev;
1625 uint32_t u32val;
1626
1627 pdev = phba->pcidev;
1628 if (!pdev)
1629 return 0;
1630
1631 /* This is a user read operation */
1632 debug->op = LPFC_IDIAG_OP_RD;
1633
1634 if (!debug->buffer)
1635 debug->buffer = kmalloc(LPFC_PCI_BAR_RD_BUF_SIZE, GFP_KERNEL);
1636 if (!debug->buffer)
1637 return 0;
1638 pbuffer = debug->buffer;
1639
1640 if (*ppos)
1641 return 0;
1642
1643 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
1644 bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
1645 offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
1646 acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
1647 bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
1648 } else
1649 return 0;
1650
1651 if (acc_range == 0)
1652 return 0;
1653
1654 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1655 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
1656 if (bar_num == IDIAG_BARACC_BAR_0)
1657 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1658 else if (bar_num == IDIAG_BARACC_BAR_1)
1659 mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
1660 else if (bar_num == IDIAG_BARACC_BAR_2)
1661 mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
1662 else
1663 return 0;
1664 } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
1665 if (bar_num == IDIAG_BARACC_BAR_0)
1666 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1667 else
1668 return 0;
1669 } else
1670 return 0;
1671
1672 /* Read single PCI bar space register */
1673 if (acc_range == SINGLE_WORD) {
1674 offset_run = offset;
1675 u32val = readl(mem_mapped_bar + offset_run);
1676 len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
1677 "%05x: %08x\n", offset_run, u32val);
1678 } else
1679 goto baracc_browse;
1680
1681 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1682
1683baracc_browse:
1684
1685 /* Browse all PCI bar space registers */
1686 offset_label = idiag.offset.last_rd;
1687 offset_run = offset_label;
1688
1689 /* Read PCI bar memory mapped space */
1690 len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
1691 "%05x: ", offset_label);
1692 index = LPFC_PCI_BAR_RD_SIZE;
1693 while (index > 0) {
1694 u32val = readl(mem_mapped_bar + offset_run);
1695 len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
1696 "%08x ", u32val);
1697 offset_run += sizeof(uint32_t);
1698 if (acc_range == LPFC_PCI_BAR_BROWSE) {
1699 if (offset_run >= bar_size) {
1700 len += snprintf(pbuffer+len,
1701 LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
1702 break;
1703 }
1704 } else {
1705 if (offset_run >= offset +
1706 (acc_range * sizeof(uint32_t))) {
1707 len += snprintf(pbuffer+len,
1708 LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
1709 break;
1710 }
1711 }
1712 index -= sizeof(uint32_t);
1713 if (!index)
1714 len += snprintf(pbuffer+len,
1715 LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
1716 else if (!(index % (8 * sizeof(uint32_t)))) {
1717 offset_label += (8 * sizeof(uint32_t));
1718 len += snprintf(pbuffer+len,
1719 LPFC_PCI_BAR_RD_BUF_SIZE-len,
1720 "\n%05x: ", offset_label);
1721 }
1722 }
1723
1724 /* Set up the offset for next portion of pci bar read */
1725 if (index == 0) {
1726 idiag.offset.last_rd += LPFC_PCI_BAR_RD_SIZE;
1727 if (acc_range == LPFC_PCI_BAR_BROWSE) {
1728 if (idiag.offset.last_rd >= bar_size)
1729 idiag.offset.last_rd = 0;
1730 } else {
1731 if (offset_run >= offset +
1732 (acc_range * sizeof(uint32_t)))
1733 idiag.offset.last_rd = offset;
1734 }
1735 } else {
1736 if (acc_range == LPFC_PCI_BAR_BROWSE)
1737 idiag.offset.last_rd = 0;
1738 else
1739 idiag.offset.last_rd = offset;
1740 }
1741
1742 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
1743}
1744
1745/**
1746 * lpfc_idiag_baracc_write - Syntax check and set up idiag bar access commands
1747 * @file: The file pointer to read from.
1748 * @buf: The buffer to copy the user data from.
1749 * @nbytes: The number of bytes to get.
1750 * @ppos: The position in the file to start reading from.
1751 *
1752 * This routine get the debugfs idiag command struct from user space and
1753 * then perform the syntax check for PCI bar memory mapped space read or
1754 * write command accordingly. In the case of PCI bar memory mapped space
1755 * read command, it sets up the command in the idiag command struct for
1756 * the debugfs read operation. In the case of PCI bar memorpy mapped space
1757 * write operation, it executes the write operation into the PCI bar memory
1758 * mapped space accordingly.
1759 *
1760 * It returns the @nbytges passing in from debugfs user space when successful.
1761 * In case of error conditions, it returns proper error code back to the user
1762 * space.
1763 */
1764static ssize_t
1765lpfc_idiag_baracc_write(struct file *file, const char __user *buf,
1766 size_t nbytes, loff_t *ppos)
1767{
1768 struct lpfc_debug *debug = file->private_data;
1769 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
1770 uint32_t bar_num, bar_size, offset, value, acc_range;
1771 struct pci_dev *pdev;
1772 void __iomem *mem_mapped_bar;
1773 uint32_t if_type;
1774 uint32_t u32val;
1775 int rc;
1776
1777 pdev = phba->pcidev;
1778 if (!pdev)
1779 return -EFAULT;
1780
1781 /* This is a user write operation */
1782 debug->op = LPFC_IDIAG_OP_WR;
1783
1784 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
1785 if (rc < 0)
1786 return rc;
1787
1788 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1789 bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
1790
1791 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
1792 if ((bar_num != IDIAG_BARACC_BAR_0) &&
1793 (bar_num != IDIAG_BARACC_BAR_1) &&
1794 (bar_num != IDIAG_BARACC_BAR_2))
1795 goto error_out;
1796 } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
1797 if (bar_num != IDIAG_BARACC_BAR_0)
1798 goto error_out;
1799 } else
1800 goto error_out;
1801
1802 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
1803 if (bar_num == IDIAG_BARACC_BAR_0) {
1804 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1805 LPFC_PCI_IF0_BAR0_SIZE;
1806 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1807 } else if (bar_num == IDIAG_BARACC_BAR_1) {
1808 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1809 LPFC_PCI_IF0_BAR1_SIZE;
1810 mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
1811 } else if (bar_num == IDIAG_BARACC_BAR_2) {
1812 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1813 LPFC_PCI_IF0_BAR2_SIZE;
1814 mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
1815 } else
1816 goto error_out;
1817 } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
1818 if (bar_num == IDIAG_BARACC_BAR_0) {
1819 idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
1820 LPFC_PCI_IF2_BAR0_SIZE;
1821 mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
1822 } else
1823 goto error_out;
1824 } else
1825 goto error_out;
1826
1827 offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
1828 if (offset % sizeof(uint32_t))
1829 goto error_out;
1830
1831 bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
1832 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
1833 /* Sanity check on PCI config read command line arguments */
1834 if (rc != LPFC_PCI_BAR_RD_CMD_ARG)
1835 goto error_out;
1836 acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
1837 if (acc_range == LPFC_PCI_BAR_BROWSE) {
1838 if (offset > bar_size - sizeof(uint32_t))
1839 goto error_out;
1840 /* Starting offset to browse */
1841 idiag.offset.last_rd = offset;
1842 } else if (acc_range > SINGLE_WORD) {
1843 if (offset + acc_range * sizeof(uint32_t) > bar_size)
1844 goto error_out;
1845 /* Starting offset to browse */
1846 idiag.offset.last_rd = offset;
1847 } else if (acc_range != SINGLE_WORD)
1848 goto error_out;
1849 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR ||
1850 idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST ||
1851 idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
1852 /* Sanity check on PCI bar write command line arguments */
1853 if (rc != LPFC_PCI_BAR_WR_CMD_ARG)
1854 goto error_out;
1855 /* Write command to PCI bar space, read-modify-write */
1856 acc_range = SINGLE_WORD;
1857 value = idiag.cmd.data[IDIAG_BARACC_REG_VAL_INDX];
1858 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR) {
1859 writel(value, mem_mapped_bar + offset);
1860 readl(mem_mapped_bar + offset);
1861 }
1862 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST) {
1863 u32val = readl(mem_mapped_bar + offset);
1864 u32val |= value;
1865 writel(u32val, mem_mapped_bar + offset);
1866 readl(mem_mapped_bar + offset);
1867 }
1868 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
1869 u32val = readl(mem_mapped_bar + offset);
1870 u32val &= ~value;
1871 writel(u32val, mem_mapped_bar + offset);
1872 readl(mem_mapped_bar + offset);
1873 }
1874 } else
1875 /* All other opecodes are illegal for now */
1876 goto error_out;
1877
1878 return nbytes;
1879error_out:
1880 memset(&idiag, 0, sizeof(idiag));
1881 return -EINVAL;
1882}
1883
1884/**
1573 * lpfc_idiag_queinfo_read - idiag debugfs read queue information 1885 * lpfc_idiag_queinfo_read - idiag debugfs read queue information
1574 * @file: The file pointer to read from. 1886 * @file: The file pointer to read from.
1575 * @buf: The buffer to copy the data to. 1887 * @buf: The buffer to copy the data to.
@@ -1871,8 +2183,8 @@ lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes,
1871 return 0; 2183 return 0;
1872 2184
1873 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) { 2185 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
1874 index = idiag.cmd.data[2]; 2186 index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
1875 count = idiag.cmd.data[3]; 2187 count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
1876 pque = (struct lpfc_queue *)idiag.ptr_private; 2188 pque = (struct lpfc_queue *)idiag.ptr_private;
1877 } else 2189 } else
1878 return 0; 2190 return 0;
@@ -1944,12 +2256,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
1944 return rc; 2256 return rc;
1945 2257
1946 /* Get and sanity check on command feilds */ 2258 /* Get and sanity check on command feilds */
1947 quetp = idiag.cmd.data[0]; 2259 quetp = idiag.cmd.data[IDIAG_QUEACC_QUETP_INDX];
1948 queid = idiag.cmd.data[1]; 2260 queid = idiag.cmd.data[IDIAG_QUEACC_QUEID_INDX];
1949 index = idiag.cmd.data[2]; 2261 index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
1950 count = idiag.cmd.data[3]; 2262 count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
1951 offset = idiag.cmd.data[4]; 2263 offset = idiag.cmd.data[IDIAG_QUEACC_OFFST_INDX];
1952 value = idiag.cmd.data[5]; 2264 value = idiag.cmd.data[IDIAG_QUEACC_VALUE_INDX];
1953 2265
1954 /* Sanity check on command line arguments */ 2266 /* Sanity check on command line arguments */
1955 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR || 2267 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
@@ -2218,7 +2530,7 @@ lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes,
2218 return 0; 2530 return 0;
2219 2531
2220 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) 2532 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD)
2221 drb_reg_id = idiag.cmd.data[0]; 2533 drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
2222 else 2534 else
2223 return 0; 2535 return 0;
2224 2536
@@ -2257,7 +2569,7 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
2257{ 2569{
2258 struct lpfc_debug *debug = file->private_data; 2570 struct lpfc_debug *debug = file->private_data;
2259 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; 2571 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2260 uint32_t drb_reg_id, value, reg_val; 2572 uint32_t drb_reg_id, value, reg_val = 0;
2261 void __iomem *drb_reg; 2573 void __iomem *drb_reg;
2262 int rc; 2574 int rc;
2263 2575
@@ -2269,8 +2581,8 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
2269 return rc; 2581 return rc;
2270 2582
2271 /* Sanity check on command line arguments */ 2583 /* Sanity check on command line arguments */
2272 drb_reg_id = idiag.cmd.data[0]; 2584 drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
2273 value = idiag.cmd.data[1]; 2585 value = idiag.cmd.data[IDIAG_DRBACC_VALUE_INDX];
2274 2586
2275 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR || 2587 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
2276 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST || 2588 idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
@@ -2330,6 +2642,679 @@ error_out:
2330 return -EINVAL; 2642 return -EINVAL;
2331} 2643}
2332 2644
2645/**
2646 * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers
2647 * @phba: The pointer to hba structure.
2648 * @pbuffer: The pointer to the buffer to copy the data to.
2649 * @len: The lenght of bytes to copied.
2650 * @drbregid: The id to doorbell registers.
2651 *
2652 * Description:
2653 * This routine reads a control register and copies its content to the
2654 * user buffer pointed to by @pbuffer.
2655 *
2656 * Returns:
2657 * This function returns the amount of data that was copied into @pbuffer.
2658 **/
2659static int
2660lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
2661 int len, uint32_t ctlregid)
2662{
2663
2664 if (!pbuffer)
2665 return 0;
2666
2667 switch (ctlregid) {
2668 case LPFC_CTL_PORT_SEM:
2669 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2670 "Port SemReg: 0x%08x\n",
2671 readl(phba->sli4_hba.conf_regs_memmap_p +
2672 LPFC_CTL_PORT_SEM_OFFSET));
2673 break;
2674 case LPFC_CTL_PORT_STA:
2675 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2676 "Port StaReg: 0x%08x\n",
2677 readl(phba->sli4_hba.conf_regs_memmap_p +
2678 LPFC_CTL_PORT_STA_OFFSET));
2679 break;
2680 case LPFC_CTL_PORT_CTL:
2681 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2682 "Port CtlReg: 0x%08x\n",
2683 readl(phba->sli4_hba.conf_regs_memmap_p +
2684 LPFC_CTL_PORT_CTL_OFFSET));
2685 break;
2686 case LPFC_CTL_PORT_ER1:
2687 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2688 "Port Er1Reg: 0x%08x\n",
2689 readl(phba->sli4_hba.conf_regs_memmap_p +
2690 LPFC_CTL_PORT_ER1_OFFSET));
2691 break;
2692 case LPFC_CTL_PORT_ER2:
2693 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2694 "Port Er2Reg: 0x%08x\n",
2695 readl(phba->sli4_hba.conf_regs_memmap_p +
2696 LPFC_CTL_PORT_ER2_OFFSET));
2697 break;
2698 case LPFC_CTL_PDEV_CTL:
2699 len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
2700 "PDev CtlReg: 0x%08x\n",
2701 readl(phba->sli4_hba.conf_regs_memmap_p +
2702 LPFC_CTL_PDEV_CTL_OFFSET));
2703 break;
2704 default:
2705 break;
2706 }
2707 return len;
2708}
2709
2710/**
2711 * lpfc_idiag_ctlacc_read - idiag debugfs read port and device control register
2712 * @file: The file pointer to read from.
2713 * @buf: The buffer to copy the data to.
2714 * @nbytes: The number of bytes to read.
2715 * @ppos: The position in the file to start reading from.
2716 *
2717 * Description:
2718 * This routine reads data from the @phba port and device registers according
2719 * to the idiag command, and copies to user @buf.
2720 *
2721 * Returns:
2722 * This function returns the amount of data that was read (this could be less
2723 * than @nbytes if the end of the file was reached) or a negative error value.
2724 **/
2725static ssize_t
2726lpfc_idiag_ctlacc_read(struct file *file, char __user *buf, size_t nbytes,
2727 loff_t *ppos)
2728{
2729 struct lpfc_debug *debug = file->private_data;
2730 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2731 uint32_t ctl_reg_id, i;
2732 char *pbuffer;
2733 int len = 0;
2734
2735 /* This is a user read operation */
2736 debug->op = LPFC_IDIAG_OP_RD;
2737
2738 if (!debug->buffer)
2739 debug->buffer = kmalloc(LPFC_CTL_ACC_BUF_SIZE, GFP_KERNEL);
2740 if (!debug->buffer)
2741 return 0;
2742 pbuffer = debug->buffer;
2743
2744 if (*ppos)
2745 return 0;
2746
2747 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD)
2748 ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
2749 else
2750 return 0;
2751
2752 if (ctl_reg_id == LPFC_CTL_ACC_ALL)
2753 for (i = 1; i <= LPFC_CTL_MAX; i++)
2754 len = lpfc_idiag_ctlacc_read_reg(phba,
2755 pbuffer, len, i);
2756 else
2757 len = lpfc_idiag_ctlacc_read_reg(phba,
2758 pbuffer, len, ctl_reg_id);
2759
2760 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2761}
2762
2763/**
2764 * lpfc_idiag_ctlacc_write - Syntax check and set up idiag ctlacc commands
2765 * @file: The file pointer to read from.
2766 * @buf: The buffer to copy the user data from.
2767 * @nbytes: The number of bytes to get.
2768 * @ppos: The position in the file to start reading from.
2769 *
2770 * This routine get the debugfs idiag command struct from user space and then
2771 * perform the syntax check for port and device control register read (dump)
2772 * or write (set) command accordingly.
2773 *
2774 * It returns the @nbytges passing in from debugfs user space when successful.
2775 * In case of error conditions, it returns proper error code back to the user
2776 * space.
2777 **/
2778static ssize_t
2779lpfc_idiag_ctlacc_write(struct file *file, const char __user *buf,
2780 size_t nbytes, loff_t *ppos)
2781{
2782 struct lpfc_debug *debug = file->private_data;
2783 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2784 uint32_t ctl_reg_id, value, reg_val = 0;
2785 void __iomem *ctl_reg;
2786 int rc;
2787
2788 /* This is a user write operation */
2789 debug->op = LPFC_IDIAG_OP_WR;
2790
2791 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
2792 if (rc < 0)
2793 return rc;
2794
2795 /* Sanity check on command line arguments */
2796 ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
2797 value = idiag.cmd.data[IDIAG_CTLACC_VALUE_INDX];
2798
2799 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
2800 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
2801 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
2802 if (rc != LPFC_CTL_ACC_WR_CMD_ARG)
2803 goto error_out;
2804 if (ctl_reg_id > LPFC_CTL_MAX)
2805 goto error_out;
2806 } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) {
2807 if (rc != LPFC_CTL_ACC_RD_CMD_ARG)
2808 goto error_out;
2809 if ((ctl_reg_id > LPFC_CTL_MAX) &&
2810 (ctl_reg_id != LPFC_CTL_ACC_ALL))
2811 goto error_out;
2812 } else
2813 goto error_out;
2814
2815 /* Perform the write access operation */
2816 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
2817 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
2818 idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
2819 switch (ctl_reg_id) {
2820 case LPFC_CTL_PORT_SEM:
2821 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2822 LPFC_CTL_PORT_SEM_OFFSET;
2823 break;
2824 case LPFC_CTL_PORT_STA:
2825 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2826 LPFC_CTL_PORT_STA_OFFSET;
2827 break;
2828 case LPFC_CTL_PORT_CTL:
2829 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2830 LPFC_CTL_PORT_CTL_OFFSET;
2831 break;
2832 case LPFC_CTL_PORT_ER1:
2833 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2834 LPFC_CTL_PORT_ER1_OFFSET;
2835 break;
2836 case LPFC_CTL_PORT_ER2:
2837 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2838 LPFC_CTL_PORT_ER2_OFFSET;
2839 break;
2840 case LPFC_CTL_PDEV_CTL:
2841 ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
2842 LPFC_CTL_PDEV_CTL_OFFSET;
2843 break;
2844 default:
2845 goto error_out;
2846 }
2847
2848 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR)
2849 reg_val = value;
2850 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST) {
2851 reg_val = readl(ctl_reg);
2852 reg_val |= value;
2853 }
2854 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
2855 reg_val = readl(ctl_reg);
2856 reg_val &= ~value;
2857 }
2858 writel(reg_val, ctl_reg);
2859 readl(ctl_reg); /* flush */
2860 }
2861 return nbytes;
2862
2863error_out:
2864 /* Clean out command structure on command error out */
2865 memset(&idiag, 0, sizeof(idiag));
2866 return -EINVAL;
2867}
2868
2869/**
2870 * lpfc_idiag_mbxacc_get_setup - idiag debugfs get mailbox access setup
2871 * @phba: Pointer to HBA context object.
2872 * @pbuffer: Pointer to data buffer.
2873 *
2874 * Description:
2875 * This routine gets the driver mailbox access debugfs setup information.
2876 *
2877 * Returns:
2878 * This function returns the amount of data that was read (this could be less
2879 * than @nbytes if the end of the file was reached) or a negative error value.
2880 **/
2881static int
2882lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer)
2883{
2884 uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
2885 int len = 0;
2886
2887 mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
2888 mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
2889 mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
2890 mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
2891
2892 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2893 "mbx_dump_map: 0x%08x\n", mbx_dump_map);
2894 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2895 "mbx_dump_cnt: %04d\n", mbx_dump_cnt);
2896 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2897 "mbx_word_cnt: %04d\n", mbx_word_cnt);
2898 len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
2899 "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
2900
2901 return len;
2902}
2903
2904/**
2905 * lpfc_idiag_mbxacc_read - idiag debugfs read on mailbox access
2906 * @file: The file pointer to read from.
2907 * @buf: The buffer to copy the data to.
2908 * @nbytes: The number of bytes to read.
2909 * @ppos: The position in the file to start reading from.
2910 *
2911 * Description:
2912 * This routine reads data from the @phba driver mailbox access debugfs setup
2913 * information.
2914 *
2915 * Returns:
2916 * This function returns the amount of data that was read (this could be less
2917 * than @nbytes if the end of the file was reached) or a negative error value.
2918 **/
2919static ssize_t
2920lpfc_idiag_mbxacc_read(struct file *file, char __user *buf, size_t nbytes,
2921 loff_t *ppos)
2922{
2923 struct lpfc_debug *debug = file->private_data;
2924 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2925 char *pbuffer;
2926 int len = 0;
2927
2928 /* This is a user read operation */
2929 debug->op = LPFC_IDIAG_OP_RD;
2930
2931 if (!debug->buffer)
2932 debug->buffer = kmalloc(LPFC_MBX_ACC_BUF_SIZE, GFP_KERNEL);
2933 if (!debug->buffer)
2934 return 0;
2935 pbuffer = debug->buffer;
2936
2937 if (*ppos)
2938 return 0;
2939
2940 if ((idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) &&
2941 (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP))
2942 return 0;
2943
2944 len = lpfc_idiag_mbxacc_get_setup(phba, pbuffer);
2945
2946 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2947}
2948
2949/**
2950 * lpfc_idiag_mbxacc_write - Syntax check and set up idiag mbxacc commands
2951 * @file: The file pointer to read from.
2952 * @buf: The buffer to copy the user data from.
2953 * @nbytes: The number of bytes to get.
2954 * @ppos: The position in the file to start reading from.
2955 *
2956 * This routine get the debugfs idiag command struct from user space and then
2957 * perform the syntax check for driver mailbox command (dump) and sets up the
2958 * necessary states in the idiag command struct accordingly.
2959 *
2960 * It returns the @nbytges passing in from debugfs user space when successful.
2961 * In case of error conditions, it returns proper error code back to the user
2962 * space.
2963 **/
2964static ssize_t
2965lpfc_idiag_mbxacc_write(struct file *file, const char __user *buf,
2966 size_t nbytes, loff_t *ppos)
2967{
2968 struct lpfc_debug *debug = file->private_data;
2969 uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
2970 int rc;
2971
2972 /* This is a user write operation */
2973 debug->op = LPFC_IDIAG_OP_WR;
2974
2975 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
2976 if (rc < 0)
2977 return rc;
2978
2979 /* Sanity check on command line arguments */
2980 mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
2981 mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
2982 mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
2983 mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
2984
2985 if (idiag.cmd.opcode == LPFC_IDIAG_CMD_MBXACC_DP) {
2986 if (!(mbx_dump_map & LPFC_MBX_DMP_MBX_ALL))
2987 goto error_out;
2988 if ((mbx_dump_map & ~LPFC_MBX_DMP_MBX_ALL) &&
2989 (mbx_dump_map != LPFC_MBX_DMP_ALL))
2990 goto error_out;
2991 if (mbx_word_cnt > sizeof(MAILBOX_t))
2992 goto error_out;
2993 } else if (idiag.cmd.opcode == LPFC_IDIAG_BSG_MBXACC_DP) {
2994 if (!(mbx_dump_map & LPFC_BSG_DMP_MBX_ALL))
2995 goto error_out;
2996 if ((mbx_dump_map & ~LPFC_BSG_DMP_MBX_ALL) &&
2997 (mbx_dump_map != LPFC_MBX_DMP_ALL))
2998 goto error_out;
2999 if (mbx_word_cnt > (BSG_MBOX_SIZE)/4)
3000 goto error_out;
3001 if (mbx_mbox_cmd != 0x9b)
3002 goto error_out;
3003 } else
3004 goto error_out;
3005
3006 if (mbx_word_cnt == 0)
3007 goto error_out;
3008 if (rc != LPFC_MBX_DMP_ARG)
3009 goto error_out;
3010 if (mbx_mbox_cmd & ~0xff)
3011 goto error_out;
3012
3013 /* condition for stop mailbox dump */
3014 if (mbx_dump_cnt == 0)
3015 goto reset_out;
3016
3017 return nbytes;
3018
3019reset_out:
3020 /* Clean out command structure on command error out */
3021 memset(&idiag, 0, sizeof(idiag));
3022 return nbytes;
3023
3024error_out:
3025 /* Clean out command structure on command error out */
3026 memset(&idiag, 0, sizeof(idiag));
3027 return -EINVAL;
3028}
3029
3030/**
3031 * lpfc_idiag_extacc_avail_get - get the available extents information
3032 * @phba: pointer to lpfc hba data structure.
3033 * @pbuffer: pointer to internal buffer.
3034 * @len: length into the internal buffer data has been copied.
3035 *
3036 * Description:
3037 * This routine is to get the available extent information.
3038 *
3039 * Returns:
3040 * overall lenth of the data read into the internal buffer.
3041 **/
3042static int
3043lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
3044{
3045 uint16_t ext_cnt, ext_size;
3046
3047 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3048 "\nAvailable Extents Information:\n");
3049
3050 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3051 "\tPort Available VPI extents: ");
3052 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
3053 &ext_cnt, &ext_size);
3054 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3055 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3056
3057 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3058 "\tPort Available VFI extents: ");
3059 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
3060 &ext_cnt, &ext_size);
3061 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3062 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3063
3064 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3065 "\tPort Available RPI extents: ");
3066 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
3067 &ext_cnt, &ext_size);
3068 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3069 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3070
3071 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3072 "\tPort Available XRI extents: ");
3073 lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
3074 &ext_cnt, &ext_size);
3075 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3076 "Count %3d, Size %3d\n", ext_cnt, ext_size);
3077
3078 return len;
3079}
3080
3081/**
3082 * lpfc_idiag_extacc_alloc_get - get the allocated extents information
3083 * @phba: pointer to lpfc hba data structure.
3084 * @pbuffer: pointer to internal buffer.
3085 * @len: length into the internal buffer data has been copied.
3086 *
3087 * Description:
3088 * This routine is to get the allocated extent information.
3089 *
3090 * Returns:
3091 * overall lenth of the data read into the internal buffer.
3092 **/
3093static int
3094lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
3095{
3096 uint16_t ext_cnt, ext_size;
3097 int rc;
3098
3099 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3100 "\nAllocated Extents Information:\n");
3101
3102 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3103 "\tHost Allocated VPI extents: ");
3104 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
3105 &ext_cnt, &ext_size);
3106 if (!rc)
3107 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3108 "Port %d Extent %3d, Size %3d\n",
3109 phba->brd_no, ext_cnt, ext_size);
3110 else
3111 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3112 "N/A\n");
3113
3114 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3115 "\tHost Allocated VFI extents: ");
3116 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
3117 &ext_cnt, &ext_size);
3118 if (!rc)
3119 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3120 "Port %d Extent %3d, Size %3d\n",
3121 phba->brd_no, ext_cnt, ext_size);
3122 else
3123 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3124 "N/A\n");
3125
3126 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3127 "\tHost Allocated RPI extents: ");
3128 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
3129 &ext_cnt, &ext_size);
3130 if (!rc)
3131 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3132 "Port %d Extent %3d, Size %3d\n",
3133 phba->brd_no, ext_cnt, ext_size);
3134 else
3135 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3136 "N/A\n");
3137
3138 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3139 "\tHost Allocated XRI extents: ");
3140 rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
3141 &ext_cnt, &ext_size);
3142 if (!rc)
3143 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3144 "Port %d Extent %3d, Size %3d\n",
3145 phba->brd_no, ext_cnt, ext_size);
3146 else
3147 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3148 "N/A\n");
3149
3150 return len;
3151}
3152
3153/**
3154 * lpfc_idiag_extacc_drivr_get - get driver extent information
3155 * @phba: pointer to lpfc hba data structure.
3156 * @pbuffer: pointer to internal buffer.
3157 * @len: length into the internal buffer data has been copied.
3158 *
3159 * Description:
3160 * This routine is to get the driver extent information.
3161 *
3162 * Returns:
3163 * overall lenth of the data read into the internal buffer.
3164 **/
3165static int
3166lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
3167{
3168 struct lpfc_rsrc_blks *rsrc_blks;
3169 int index;
3170
3171 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3172 "\nDriver Extents Information:\n");
3173
3174 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3175 "\tVPI extents:\n");
3176 index = 0;
3177 list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
3178 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3179 "\t\tBlock %3d: Start %4d, Count %4d\n",
3180 index, rsrc_blks->rsrc_start,
3181 rsrc_blks->rsrc_size);
3182 index++;
3183 }
3184 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3185 "\tVFI extents:\n");
3186 index = 0;
3187 list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
3188 list) {
3189 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3190 "\t\tBlock %3d: Start %4d, Count %4d\n",
3191 index, rsrc_blks->rsrc_start,
3192 rsrc_blks->rsrc_size);
3193 index++;
3194 }
3195
3196 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3197 "\tRPI extents:\n");
3198 index = 0;
3199 list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
3200 list) {
3201 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3202 "\t\tBlock %3d: Start %4d, Count %4d\n",
3203 index, rsrc_blks->rsrc_start,
3204 rsrc_blks->rsrc_size);
3205 index++;
3206 }
3207
3208 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3209 "\tXRI extents:\n");
3210 index = 0;
3211 list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
3212 list) {
3213 len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
3214 "\t\tBlock %3d: Start %4d, Count %4d\n",
3215 index, rsrc_blks->rsrc_start,
3216 rsrc_blks->rsrc_size);
3217 index++;
3218 }
3219
3220 return len;
3221}
3222
3223/**
3224 * lpfc_idiag_extacc_write - Syntax check and set up idiag extacc commands
3225 * @file: The file pointer to read from.
3226 * @buf: The buffer to copy the user data from.
3227 * @nbytes: The number of bytes to get.
3228 * @ppos: The position in the file to start reading from.
3229 *
3230 * This routine get the debugfs idiag command struct from user space and then
3231 * perform the syntax check for extent information access commands and sets
3232 * up the necessary states in the idiag command struct accordingly.
3233 *
3234 * It returns the @nbytges passing in from debugfs user space when successful.
3235 * In case of error conditions, it returns proper error code back to the user
3236 * space.
3237 **/
3238static ssize_t
3239lpfc_idiag_extacc_write(struct file *file, const char __user *buf,
3240 size_t nbytes, loff_t *ppos)
3241{
3242 struct lpfc_debug *debug = file->private_data;
3243 uint32_t ext_map;
3244 int rc;
3245
3246 /* This is a user write operation */
3247 debug->op = LPFC_IDIAG_OP_WR;
3248
3249 rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
3250 if (rc < 0)
3251 return rc;
3252
3253 ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
3254
3255 if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
3256 goto error_out;
3257 if (rc != LPFC_EXT_ACC_CMD_ARG)
3258 goto error_out;
3259 if (!(ext_map & LPFC_EXT_ACC_ALL))
3260 goto error_out;
3261
3262 return nbytes;
3263error_out:
3264 /* Clean out command structure on command error out */
3265 memset(&idiag, 0, sizeof(idiag));
3266 return -EINVAL;
3267}
3268
3269/**
3270 * lpfc_idiag_extacc_read - idiag debugfs read access to extent information
3271 * @file: The file pointer to read from.
3272 * @buf: The buffer to copy the data to.
3273 * @nbytes: The number of bytes to read.
3274 * @ppos: The position in the file to start reading from.
3275 *
3276 * Description:
3277 * This routine reads data from the proper extent information according to
3278 * the idiag command, and copies to user @buf.
3279 *
3280 * Returns:
3281 * This function returns the amount of data that was read (this could be less
3282 * than @nbytes if the end of the file was reached) or a negative error value.
3283 **/
3284static ssize_t
3285lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
3286 loff_t *ppos)
3287{
3288 struct lpfc_debug *debug = file->private_data;
3289 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
3290 char *pbuffer;
3291 uint32_t ext_map;
3292 int len = 0;
3293
3294 /* This is a user read operation */
3295 debug->op = LPFC_IDIAG_OP_RD;
3296
3297 if (!debug->buffer)
3298 debug->buffer = kmalloc(LPFC_EXT_ACC_BUF_SIZE, GFP_KERNEL);
3299 if (!debug->buffer)
3300 return 0;
3301 pbuffer = debug->buffer;
3302 if (*ppos)
3303 return 0;
3304 if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
3305 return 0;
3306
3307 ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
3308 if (ext_map & LPFC_EXT_ACC_AVAIL)
3309 len = lpfc_idiag_extacc_avail_get(phba, pbuffer, len);
3310 if (ext_map & LPFC_EXT_ACC_ALLOC)
3311 len = lpfc_idiag_extacc_alloc_get(phba, pbuffer, len);
3312 if (ext_map & LPFC_EXT_ACC_DRIVR)
3313 len = lpfc_idiag_extacc_drivr_get(phba, pbuffer, len);
3314
3315 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
3316}
3317
2333#undef lpfc_debugfs_op_disc_trc 3318#undef lpfc_debugfs_op_disc_trc
2334static const struct file_operations lpfc_debugfs_op_disc_trc = { 3319static const struct file_operations lpfc_debugfs_op_disc_trc = {
2335 .owner = THIS_MODULE, 3320 .owner = THIS_MODULE,
@@ -2420,6 +3405,16 @@ static const struct file_operations lpfc_idiag_op_pciCfg = {
2420 .release = lpfc_idiag_cmd_release, 3405 .release = lpfc_idiag_cmd_release,
2421}; 3406};
2422 3407
3408#undef lpfc_idiag_op_barAcc
3409static const struct file_operations lpfc_idiag_op_barAcc = {
3410 .owner = THIS_MODULE,
3411 .open = lpfc_idiag_open,
3412 .llseek = lpfc_debugfs_lseek,
3413 .read = lpfc_idiag_baracc_read,
3414 .write = lpfc_idiag_baracc_write,
3415 .release = lpfc_idiag_cmd_release,
3416};
3417
2423#undef lpfc_idiag_op_queInfo 3418#undef lpfc_idiag_op_queInfo
2424static const struct file_operations lpfc_idiag_op_queInfo = { 3419static const struct file_operations lpfc_idiag_op_queInfo = {
2425 .owner = THIS_MODULE, 3420 .owner = THIS_MODULE,
@@ -2428,7 +3423,7 @@ static const struct file_operations lpfc_idiag_op_queInfo = {
2428 .release = lpfc_idiag_release, 3423 .release = lpfc_idiag_release,
2429}; 3424};
2430 3425
2431#undef lpfc_idiag_op_queacc 3426#undef lpfc_idiag_op_queAcc
2432static const struct file_operations lpfc_idiag_op_queAcc = { 3427static const struct file_operations lpfc_idiag_op_queAcc = {
2433 .owner = THIS_MODULE, 3428 .owner = THIS_MODULE,
2434 .open = lpfc_idiag_open, 3429 .open = lpfc_idiag_open,
@@ -2438,7 +3433,7 @@ static const struct file_operations lpfc_idiag_op_queAcc = {
2438 .release = lpfc_idiag_cmd_release, 3433 .release = lpfc_idiag_cmd_release,
2439}; 3434};
2440 3435
2441#undef lpfc_idiag_op_drbacc 3436#undef lpfc_idiag_op_drbAcc
2442static const struct file_operations lpfc_idiag_op_drbAcc = { 3437static const struct file_operations lpfc_idiag_op_drbAcc = {
2443 .owner = THIS_MODULE, 3438 .owner = THIS_MODULE,
2444 .open = lpfc_idiag_open, 3439 .open = lpfc_idiag_open,
@@ -2448,8 +3443,234 @@ static const struct file_operations lpfc_idiag_op_drbAcc = {
2448 .release = lpfc_idiag_cmd_release, 3443 .release = lpfc_idiag_cmd_release,
2449}; 3444};
2450 3445
3446#undef lpfc_idiag_op_ctlAcc
3447static const struct file_operations lpfc_idiag_op_ctlAcc = {
3448 .owner = THIS_MODULE,
3449 .open = lpfc_idiag_open,
3450 .llseek = lpfc_debugfs_lseek,
3451 .read = lpfc_idiag_ctlacc_read,
3452 .write = lpfc_idiag_ctlacc_write,
3453 .release = lpfc_idiag_cmd_release,
3454};
3455
3456#undef lpfc_idiag_op_mbxAcc
3457static const struct file_operations lpfc_idiag_op_mbxAcc = {
3458 .owner = THIS_MODULE,
3459 .open = lpfc_idiag_open,
3460 .llseek = lpfc_debugfs_lseek,
3461 .read = lpfc_idiag_mbxacc_read,
3462 .write = lpfc_idiag_mbxacc_write,
3463 .release = lpfc_idiag_cmd_release,
3464};
3465
3466#undef lpfc_idiag_op_extAcc
3467static const struct file_operations lpfc_idiag_op_extAcc = {
3468 .owner = THIS_MODULE,
3469 .open = lpfc_idiag_open,
3470 .llseek = lpfc_debugfs_lseek,
3471 .read = lpfc_idiag_extacc_read,
3472 .write = lpfc_idiag_extacc_write,
3473 .release = lpfc_idiag_cmd_release,
3474};
3475
2451#endif 3476#endif
2452 3477
3478/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
3479 * @phba: Pointer to HBA context object.
3480 * @dmabuf: Pointer to a DMA buffer descriptor.
3481 *
3482 * Description:
3483 * This routine dump a bsg pass-through non-embedded mailbox command with
3484 * external buffer.
3485 **/
3486void
3487lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3488 enum mbox_type mbox_tp, enum dma_type dma_tp,
3489 enum sta_type sta_tp,
3490 struct lpfc_dmabuf *dmabuf, uint32_t ext_buf)
3491{
3492#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3493 uint32_t *mbx_mbox_cmd, *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt;
3494 char line_buf[LPFC_MBX_ACC_LBUF_SZ];
3495 int len = 0;
3496 uint32_t do_dump = 0;
3497 uint32_t *pword;
3498 uint32_t i;
3499
3500 if (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP)
3501 return;
3502
3503 mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
3504 mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
3505 mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
3506 mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
3507
3508 if (!(*mbx_dump_map & LPFC_MBX_DMP_ALL) ||
3509 (*mbx_dump_cnt == 0) ||
3510 (*mbx_word_cnt == 0))
3511 return;
3512
3513 if (*mbx_mbox_cmd != 0x9B)
3514 return;
3515
3516 if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) {
3517 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) {
3518 do_dump |= LPFC_BSG_DMP_MBX_RD_MBX;
3519 printk(KERN_ERR "\nRead mbox command (x%x), "
3520 "nemb:0x%x, extbuf_cnt:%d:\n",
3521 sta_tp, nemb_tp, ext_buf);
3522 }
3523 }
3524 if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) {
3525 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) {
3526 do_dump |= LPFC_BSG_DMP_MBX_RD_BUF;
3527 printk(KERN_ERR "\nRead mbox buffer (x%x), "
3528 "nemb:0x%x, extbuf_seq:%d:\n",
3529 sta_tp, nemb_tp, ext_buf);
3530 }
3531 }
3532 if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) {
3533 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) {
3534 do_dump |= LPFC_BSG_DMP_MBX_WR_MBX;
3535 printk(KERN_ERR "\nWrite mbox command (x%x), "
3536 "nemb:0x%x, extbuf_cnt:%d:\n",
3537 sta_tp, nemb_tp, ext_buf);
3538 }
3539 }
3540 if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) {
3541 if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) {
3542 do_dump |= LPFC_BSG_DMP_MBX_WR_BUF;
3543 printk(KERN_ERR "\nWrite mbox buffer (x%x), "
3544 "nemb:0x%x, extbuf_seq:%d:\n",
3545 sta_tp, nemb_tp, ext_buf);
3546 }
3547 }
3548
3549 /* dump buffer content */
3550 if (do_dump) {
3551 pword = (uint32_t *)dmabuf->virt;
3552 for (i = 0; i < *mbx_word_cnt; i++) {
3553 if (!(i % 8)) {
3554 if (i != 0)
3555 printk(KERN_ERR "%s\n", line_buf);
3556 len = 0;
3557 len += snprintf(line_buf+len,
3558 LPFC_MBX_ACC_LBUF_SZ-len,
3559 "%03d: ", i);
3560 }
3561 len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
3562 "%08x ", (uint32_t)*pword);
3563 pword++;
3564 }
3565 if ((i - 1) % 8)
3566 printk(KERN_ERR "%s\n", line_buf);
3567 (*mbx_dump_cnt)--;
3568 }
3569
3570 /* Clean out command structure on reaching dump count */
3571 if (*mbx_dump_cnt == 0)
3572 memset(&idiag, 0, sizeof(idiag));
3573 return;
3574#endif
3575}
3576
3577/* lpfc_idiag_mbxacc_dump_issue_mbox - idiag debugfs dump issue mailbox command
3578 * @phba: Pointer to HBA context object.
3579 * @dmabuf: Pointer to a DMA buffer descriptor.
3580 *
3581 * Description:
3582 * This routine dump a pass-through non-embedded mailbox command from issue
3583 * mailbox command.
3584 **/
3585void
3586lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
3587{
3588#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3589 uint32_t *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt, *mbx_mbox_cmd;
3590 char line_buf[LPFC_MBX_ACC_LBUF_SZ];
3591 int len = 0;
3592 uint32_t *pword;
3593 uint8_t *pbyte;
3594 uint32_t i, j;
3595
3596 if (idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP)
3597 return;
3598
3599 mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
3600 mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
3601 mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
3602 mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
3603
3604 if (!(*mbx_dump_map & LPFC_MBX_DMP_MBX_ALL) ||
3605 (*mbx_dump_cnt == 0) ||
3606 (*mbx_word_cnt == 0))
3607 return;
3608
3609 if ((*mbx_mbox_cmd != LPFC_MBX_ALL_CMD) &&
3610 (*mbx_mbox_cmd != pmbox->mbxCommand))
3611 return;
3612
3613 /* dump buffer content */
3614 if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) {
3615 printk(KERN_ERR "Mailbox command:0x%x dump by word:\n",
3616 pmbox->mbxCommand);
3617 pword = (uint32_t *)pmbox;
3618 for (i = 0; i < *mbx_word_cnt; i++) {
3619 if (!(i % 8)) {
3620 if (i != 0)
3621 printk(KERN_ERR "%s\n", line_buf);
3622 len = 0;
3623 memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
3624 len += snprintf(line_buf+len,
3625 LPFC_MBX_ACC_LBUF_SZ-len,
3626 "%03d: ", i);
3627 }
3628 len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
3629 "%08x ",
3630 ((uint32_t)*pword) & 0xffffffff);
3631 pword++;
3632 }
3633 if ((i - 1) % 8)
3634 printk(KERN_ERR "%s\n", line_buf);
3635 printk(KERN_ERR "\n");
3636 }
3637 if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) {
3638 printk(KERN_ERR "Mailbox command:0x%x dump by byte:\n",
3639 pmbox->mbxCommand);
3640 pbyte = (uint8_t *)pmbox;
3641 for (i = 0; i < *mbx_word_cnt; i++) {
3642 if (!(i % 8)) {
3643 if (i != 0)
3644 printk(KERN_ERR "%s\n", line_buf);
3645 len = 0;
3646 memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
3647 len += snprintf(line_buf+len,
3648 LPFC_MBX_ACC_LBUF_SZ-len,
3649 "%03d: ", i);
3650 }
3651 for (j = 0; j < 4; j++) {
3652 len += snprintf(line_buf+len,
3653 LPFC_MBX_ACC_LBUF_SZ-len,
3654 "%02x",
3655 ((uint8_t)*pbyte) & 0xff);
3656 pbyte++;
3657 }
3658 len += snprintf(line_buf+len,
3659 LPFC_MBX_ACC_LBUF_SZ-len, " ");
3660 }
3661 if ((i - 1) % 8)
3662 printk(KERN_ERR "%s\n", line_buf);
3663 printk(KERN_ERR "\n");
3664 }
3665 (*mbx_dump_cnt)--;
3666
3667 /* Clean out command structure on reaching dump count */
3668 if (*mbx_dump_cnt == 0)
3669 memset(&idiag, 0, sizeof(idiag));
3670 return;
3671#endif
3672}
3673
2453/** 3674/**
2454 * lpfc_debugfs_initialize - Initialize debugfs for a vport 3675 * lpfc_debugfs_initialize - Initialize debugfs for a vport
2455 * @vport: The vport pointer to initialize. 3676 * @vport: The vport pointer to initialize.
@@ -2673,7 +3894,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2673 vport, &lpfc_debugfs_op_nodelist); 3894 vport, &lpfc_debugfs_op_nodelist);
2674 if (!vport->debug_nodelist) { 3895 if (!vport->debug_nodelist) {
2675 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 3896 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2676 "0409 Can't create debugfs nodelist\n"); 3897 "2985 Can't create debugfs nodelist\n");
2677 goto debug_failed; 3898 goto debug_failed;
2678 } 3899 }
2679 3900
@@ -2710,6 +3931,20 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2710 idiag.offset.last_rd = 0; 3931 idiag.offset.last_rd = 0;
2711 } 3932 }
2712 3933
3934 /* iDiag PCI BAR access */
3935 snprintf(name, sizeof(name), "barAcc");
3936 if (!phba->idiag_bar_acc) {
3937 phba->idiag_bar_acc =
3938 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3939 phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
3940 if (!phba->idiag_bar_acc) {
3941 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3942 "3056 Can't create idiag debugfs\n");
3943 goto debug_failed;
3944 }
3945 idiag.offset.last_rd = 0;
3946 }
3947
2713 /* iDiag get PCI function queue information */ 3948 /* iDiag get PCI function queue information */
2714 snprintf(name, sizeof(name), "queInfo"); 3949 snprintf(name, sizeof(name), "queInfo");
2715 if (!phba->idiag_que_info) { 3950 if (!phba->idiag_que_info) {
@@ -2749,6 +3984,50 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
2749 } 3984 }
2750 } 3985 }
2751 3986
3987 /* iDiag access PCI function control registers */
3988 snprintf(name, sizeof(name), "ctlAcc");
3989 if (!phba->idiag_ctl_acc) {
3990 phba->idiag_ctl_acc =
3991 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3992 phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
3993 if (!phba->idiag_ctl_acc) {
3994 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3995 "2981 Can't create idiag debugfs\n");
3996 goto debug_failed;
3997 }
3998 }
3999
4000 /* iDiag access mbox commands */
4001 snprintf(name, sizeof(name), "mbxAcc");
4002 if (!phba->idiag_mbx_acc) {
4003 phba->idiag_mbx_acc =
4004 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
4005 phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
4006 if (!phba->idiag_mbx_acc) {
4007 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4008 "2980 Can't create idiag debugfs\n");
4009 goto debug_failed;
4010 }
4011 }
4012
4013 /* iDiag extents access commands */
4014 if (phba->sli4_hba.extents_in_use) {
4015 snprintf(name, sizeof(name), "extAcc");
4016 if (!phba->idiag_ext_acc) {
4017 phba->idiag_ext_acc =
4018 debugfs_create_file(name,
4019 S_IFREG|S_IRUGO|S_IWUSR,
4020 phba->idiag_root, phba,
4021 &lpfc_idiag_op_extAcc);
4022 if (!phba->idiag_ext_acc) {
4023 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4024 "2986 Cant create "
4025 "idiag debugfs\n");
4026 goto debug_failed;
4027 }
4028 }
4029 }
4030
2752debug_failed: 4031debug_failed:
2753 return; 4032 return;
2754#endif 4033#endif
@@ -2783,7 +4062,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
2783 debugfs_remove(vport->debug_nodelist); /* nodelist */ 4062 debugfs_remove(vport->debug_nodelist); /* nodelist */
2784 vport->debug_nodelist = NULL; 4063 vport->debug_nodelist = NULL;
2785 } 4064 }
2786
2787 if (vport->vport_debugfs_root) { 4065 if (vport->vport_debugfs_root) {
2788 debugfs_remove(vport->vport_debugfs_root); /* vportX */ 4066 debugfs_remove(vport->vport_debugfs_root); /* vportX */
2789 vport->vport_debugfs_root = NULL; 4067 vport->vport_debugfs_root = NULL;
@@ -2827,6 +4105,21 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
2827 * iDiag release 4105 * iDiag release
2828 */ 4106 */
2829 if (phba->sli_rev == LPFC_SLI_REV4) { 4107 if (phba->sli_rev == LPFC_SLI_REV4) {
4108 if (phba->idiag_ext_acc) {
4109 /* iDiag extAcc */
4110 debugfs_remove(phba->idiag_ext_acc);
4111 phba->idiag_ext_acc = NULL;
4112 }
4113 if (phba->idiag_mbx_acc) {
4114 /* iDiag mbxAcc */
4115 debugfs_remove(phba->idiag_mbx_acc);
4116 phba->idiag_mbx_acc = NULL;
4117 }
4118 if (phba->idiag_ctl_acc) {
4119 /* iDiag ctlAcc */
4120 debugfs_remove(phba->idiag_ctl_acc);
4121 phba->idiag_ctl_acc = NULL;
4122 }
2830 if (phba->idiag_drb_acc) { 4123 if (phba->idiag_drb_acc) {
2831 /* iDiag drbAcc */ 4124 /* iDiag drbAcc */
2832 debugfs_remove(phba->idiag_drb_acc); 4125 debugfs_remove(phba->idiag_drb_acc);
@@ -2842,6 +4135,11 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
2842 debugfs_remove(phba->idiag_que_info); 4135 debugfs_remove(phba->idiag_que_info);
2843 phba->idiag_que_info = NULL; 4136 phba->idiag_que_info = NULL;
2844 } 4137 }
4138 if (phba->idiag_bar_acc) {
4139 /* iDiag barAcc */
4140 debugfs_remove(phba->idiag_bar_acc);
4141 phba->idiag_bar_acc = NULL;
4142 }
2845 if (phba->idiag_pci_cfg) { 4143 if (phba->idiag_pci_cfg) {
2846 /* iDiag pciCfg */ 4144 /* iDiag pciCfg */
2847 debugfs_remove(phba->idiag_pci_cfg); 4145 debugfs_remove(phba->idiag_pci_cfg);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 6525a5e62d27..f83bd944edd8 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -39,14 +39,51 @@
39/* hbqinfo output buffer size */ 39/* hbqinfo output buffer size */
40#define LPFC_HBQINFO_SIZE 8192 40#define LPFC_HBQINFO_SIZE 8192
41 41
42/*
43 * For SLI4 iDiag debugfs diagnostics tool
44 */
45
42/* pciConf */ 46/* pciConf */
43#define LPFC_PCI_CFG_BROWSE 0xffff 47#define LPFC_PCI_CFG_BROWSE 0xffff
44#define LPFC_PCI_CFG_RD_CMD_ARG 2 48#define LPFC_PCI_CFG_RD_CMD_ARG 2
45#define LPFC_PCI_CFG_WR_CMD_ARG 3 49#define LPFC_PCI_CFG_WR_CMD_ARG 3
46#define LPFC_PCI_CFG_SIZE 4096 50#define LPFC_PCI_CFG_SIZE 4096
47#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
48#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4) 51#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
49 52
53#define IDIAG_PCICFG_WHERE_INDX 0
54#define IDIAG_PCICFG_COUNT_INDX 1
55#define IDIAG_PCICFG_VALUE_INDX 2
56
57/* barAcc */
58#define LPFC_PCI_BAR_BROWSE 0xffff
59#define LPFC_PCI_BAR_RD_CMD_ARG 3
60#define LPFC_PCI_BAR_WR_CMD_ARG 3
61
62#define LPFC_PCI_IF0_BAR0_SIZE (1024 * 16)
63#define LPFC_PCI_IF0_BAR1_SIZE (1024 * 128)
64#define LPFC_PCI_IF0_BAR2_SIZE (1024 * 128)
65#define LPFC_PCI_IF2_BAR0_SIZE (1024 * 32)
66
67#define LPFC_PCI_BAR_RD_BUF_SIZE 4096
68#define LPFC_PCI_BAR_RD_SIZE (LPFC_PCI_BAR_RD_BUF_SIZE/4)
69
70#define LPFC_PCI_IF0_BAR0_RD_SIZE (LPFC_PCI_IF0_BAR0_SIZE/4)
71#define LPFC_PCI_IF0_BAR1_RD_SIZE (LPFC_PCI_IF0_BAR1_SIZE/4)
72#define LPFC_PCI_IF0_BAR2_RD_SIZE (LPFC_PCI_IF0_BAR2_SIZE/4)
73#define LPFC_PCI_IF2_BAR0_RD_SIZE (LPFC_PCI_IF2_BAR0_SIZE/4)
74
75#define IDIAG_BARACC_BAR_NUM_INDX 0
76#define IDIAG_BARACC_OFF_SET_INDX 1
77#define IDIAG_BARACC_ACC_MOD_INDX 2
78#define IDIAG_BARACC_REG_VAL_INDX 2
79#define IDIAG_BARACC_BAR_SZE_INDX 3
80
81#define IDIAG_BARACC_BAR_0 0
82#define IDIAG_BARACC_BAR_1 1
83#define IDIAG_BARACC_BAR_2 2
84
85#define SINGLE_WORD 1
86
50/* queue info */ 87/* queue info */
51#define LPFC_QUE_INFO_GET_BUF_SIZE 4096 88#define LPFC_QUE_INFO_GET_BUF_SIZE 4096
52 89
@@ -63,7 +100,14 @@
63#define LPFC_IDIAG_WQ 4 100#define LPFC_IDIAG_WQ 4
64#define LPFC_IDIAG_RQ 5 101#define LPFC_IDIAG_RQ 5
65 102
66/* doorbell acc */ 103#define IDIAG_QUEACC_QUETP_INDX 0
104#define IDIAG_QUEACC_QUEID_INDX 1
105#define IDIAG_QUEACC_INDEX_INDX 2
106#define IDIAG_QUEACC_COUNT_INDX 3
107#define IDIAG_QUEACC_OFFST_INDX 4
108#define IDIAG_QUEACC_VALUE_INDX 5
109
110/* doorbell register acc */
67#define LPFC_DRB_ACC_ALL 0xffff 111#define LPFC_DRB_ACC_ALL 0xffff
68#define LPFC_DRB_ACC_RD_CMD_ARG 1 112#define LPFC_DRB_ACC_RD_CMD_ARG 1
69#define LPFC_DRB_ACC_WR_CMD_ARG 2 113#define LPFC_DRB_ACC_WR_CMD_ARG 2
@@ -76,6 +120,67 @@
76 120
77#define LPFC_DRB_MAX 4 121#define LPFC_DRB_MAX 4
78 122
123#define IDIAG_DRBACC_REGID_INDX 0
124#define IDIAG_DRBACC_VALUE_INDX 1
125
126/* control register acc */
127#define LPFC_CTL_ACC_ALL 0xffff
128#define LPFC_CTL_ACC_RD_CMD_ARG 1
129#define LPFC_CTL_ACC_WR_CMD_ARG 2
130#define LPFC_CTL_ACC_BUF_SIZE 256
131
132#define LPFC_CTL_PORT_SEM 1
133#define LPFC_CTL_PORT_STA 2
134#define LPFC_CTL_PORT_CTL 3
135#define LPFC_CTL_PORT_ER1 4
136#define LPFC_CTL_PORT_ER2 5
137#define LPFC_CTL_PDEV_CTL 6
138
139#define LPFC_CTL_MAX 6
140
141#define IDIAG_CTLACC_REGID_INDX 0
142#define IDIAG_CTLACC_VALUE_INDX 1
143
144/* mailbox access */
145#define LPFC_MBX_DMP_ARG 4
146
147#define LPFC_MBX_ACC_BUF_SIZE 512
148#define LPFC_MBX_ACC_LBUF_SZ 128
149
150#define LPFC_MBX_DMP_MBX_WORD 0x00000001
151#define LPFC_MBX_DMP_MBX_BYTE 0x00000002
152#define LPFC_MBX_DMP_MBX_ALL (LPFC_MBX_DMP_MBX_WORD | LPFC_MBX_DMP_MBX_BYTE)
153
154#define LPFC_BSG_DMP_MBX_RD_MBX 0x00000001
155#define LPFC_BSG_DMP_MBX_RD_BUF 0x00000002
156#define LPFC_BSG_DMP_MBX_WR_MBX 0x00000004
157#define LPFC_BSG_DMP_MBX_WR_BUF 0x00000008
158#define LPFC_BSG_DMP_MBX_ALL (LPFC_BSG_DMP_MBX_RD_MBX | \
159 LPFC_BSG_DMP_MBX_RD_BUF | \
160 LPFC_BSG_DMP_MBX_WR_MBX | \
161 LPFC_BSG_DMP_MBX_WR_BUF)
162
163#define LPFC_MBX_DMP_ALL 0xffff
164#define LPFC_MBX_ALL_CMD 0xff
165
166#define IDIAG_MBXACC_MBCMD_INDX 0
167#define IDIAG_MBXACC_DPMAP_INDX 1
168#define IDIAG_MBXACC_DPCNT_INDX 2
169#define IDIAG_MBXACC_WDCNT_INDX 3
170
171/* extents access */
172#define LPFC_EXT_ACC_CMD_ARG 1
173#define LPFC_EXT_ACC_BUF_SIZE 4096
174
175#define LPFC_EXT_ACC_AVAIL 0x1
176#define LPFC_EXT_ACC_ALLOC 0x2
177#define LPFC_EXT_ACC_DRIVR 0x4
178#define LPFC_EXT_ACC_ALL (LPFC_EXT_ACC_DRIVR | \
179 LPFC_EXT_ACC_AVAIL | \
180 LPFC_EXT_ACC_ALLOC)
181
182#define IDIAG_EXTACC_EXMAP_INDX 0
183
79#define SIZE_U8 sizeof(uint8_t) 184#define SIZE_U8 sizeof(uint8_t)
80#define SIZE_U16 sizeof(uint16_t) 185#define SIZE_U16 sizeof(uint16_t)
81#define SIZE_U32 sizeof(uint32_t) 186#define SIZE_U32 sizeof(uint32_t)
@@ -110,6 +215,11 @@ struct lpfc_idiag_cmd {
110#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003 215#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
111#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004 216#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
112 217
218#define LPFC_IDIAG_CMD_BARACC_RD 0x00000008
219#define LPFC_IDIAG_CMD_BARACC_WR 0x00000009
220#define LPFC_IDIAG_CMD_BARACC_ST 0x0000000a
221#define LPFC_IDIAG_CMD_BARACC_CL 0x0000000b
222
113#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011 223#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011
114#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012 224#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012
115#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013 225#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013
@@ -119,6 +229,17 @@ struct lpfc_idiag_cmd {
119#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022 229#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022
120#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023 230#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023
121#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024 231#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024
232
233#define LPFC_IDIAG_CMD_CTLACC_RD 0x00000031
234#define LPFC_IDIAG_CMD_CTLACC_WR 0x00000032
235#define LPFC_IDIAG_CMD_CTLACC_ST 0x00000033
236#define LPFC_IDIAG_CMD_CTLACC_CL 0x00000034
237
238#define LPFC_IDIAG_CMD_MBXACC_DP 0x00000041
239#define LPFC_IDIAG_BSG_MBXACC_DP 0x00000042
240
241#define LPFC_IDIAG_CMD_EXTACC_RD 0x00000051
242
122 uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE]; 243 uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
123}; 244};
124 245
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 32a084534f3e..023da0e00d38 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -647,21 +647,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
647 } 647 }
648 lpfc_cleanup_pending_mbox(vport); 648 lpfc_cleanup_pending_mbox(vport);
649 649
650 if (phba->sli_rev == LPFC_SLI_REV4) 650 if (phba->sli_rev == LPFC_SLI_REV4) {
651 lpfc_sli4_unreg_all_rpis(vport); 651 lpfc_sli4_unreg_all_rpis(vport);
652
653 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
654 lpfc_mbx_unreg_vpi(vport); 652 lpfc_mbx_unreg_vpi(vport);
655 spin_lock_irq(shost->host_lock); 653 spin_lock_irq(shost->host_lock);
656 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 654 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
657 spin_unlock_irq(shost->host_lock); 655 /*
658 } 656 * If VPI is unreged, driver need to do INIT_VPI
659 /* 657 * before re-registering
660 * If VPI is unreged, driver need to do INIT_VPI 658 */
661 * before re-registering
662 */
663 if (phba->sli_rev == LPFC_SLI_REV4) {
664 spin_lock_irq(shost->host_lock);
665 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 659 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
666 spin_unlock_irq(shost->host_lock); 660 spin_unlock_irq(shost->host_lock);
667 } 661 }
@@ -880,6 +874,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
880 phba->fcf.current_rec.fcf_indx, 874 phba->fcf.current_rec.fcf_indx,
881 irsp->ulpStatus, irsp->un.ulpWord[4], 875 irsp->ulpStatus, irsp->un.ulpWord[4],
882 irsp->ulpTimeout); 876 irsp->ulpTimeout);
877 lpfc_sli4_set_fcf_flogi_fail(phba,
878 phba->fcf.current_rec.fcf_indx);
883 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 879 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
884 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 880 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
885 if (rc) 881 if (rc)
@@ -1096,11 +1092,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1096 /* Set the fcfi to the fcfi we registered with */ 1092 /* Set the fcfi to the fcfi we registered with */
1097 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1093 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1098 } 1094 }
1099 } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1095 } else {
1100 sp->cmn.request_multiple_Nport = 1; 1096 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1101 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1097 sp->cmn.request_multiple_Nport = 1;
1102 icmd->ulpCt_h = 1; 1098 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1103 icmd->ulpCt_l = 0; 1099 icmd->ulpCt_h = 1;
1100 icmd->ulpCt_l = 0;
1101 } else
1102 sp->cmn.request_multiple_Nport = 0;
1104 } 1103 }
1105 1104
1106 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1105 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
@@ -3656,7 +3655,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3656 } 3655 }
3657 3656
3658 icmd = &elsiocb->iocb; 3657 icmd = &elsiocb->iocb;
3659 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3658 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3659 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3660 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3660 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3661 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3661 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3662 pcmd += sizeof(uint32_t); 3662 pcmd += sizeof(uint32_t);
@@ -3673,7 +3673,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3673 return 1; 3673 return 1;
3674 3674
3675 icmd = &elsiocb->iocb; 3675 icmd = &elsiocb->iocb;
3676 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3676 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3677 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3677 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3678 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3678 3679
3679 if (mbox) 3680 if (mbox)
@@ -3695,7 +3696,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3695 return 1; 3696 return 1;
3696 3697
3697 icmd = &elsiocb->iocb; 3698 icmd = &elsiocb->iocb;
3698 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3699 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3700 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3699 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3701 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3700 3702
3701 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 3703 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
@@ -3781,7 +3783,8 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3781 3783
3782 icmd = &elsiocb->iocb; 3784 icmd = &elsiocb->iocb;
3783 oldcmd = &oldiocb->iocb; 3785 oldcmd = &oldiocb->iocb;
3784 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3786 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3787 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3785 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3788 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3786 3789
3787 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 3790 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
@@ -3853,7 +3856,8 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3853 3856
3854 icmd = &elsiocb->iocb; 3857 icmd = &elsiocb->iocb;
3855 oldcmd = &oldiocb->iocb; 3858 oldcmd = &oldiocb->iocb;
3856 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3859 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3860 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3857 3861
3858 /* Xmit ADISC ACC response tag <ulpIoTag> */ 3862 /* Xmit ADISC ACC response tag <ulpIoTag> */
3859 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3863 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -3931,7 +3935,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3931 3935
3932 icmd = &elsiocb->iocb; 3936 icmd = &elsiocb->iocb;
3933 oldcmd = &oldiocb->iocb; 3937 oldcmd = &oldiocb->iocb;
3934 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 3938 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
3939 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
3940
3935 /* Xmit PRLI ACC response tag <ulpIoTag> */ 3941 /* Xmit PRLI ACC response tag <ulpIoTag> */
3936 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3942 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3937 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 3943 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
@@ -4035,7 +4041,9 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
4035 4041
4036 icmd = &elsiocb->iocb; 4042 icmd = &elsiocb->iocb;
4037 oldcmd = &oldiocb->iocb; 4043 oldcmd = &oldiocb->iocb;
4038 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 4044 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4045 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4046
4039 /* Xmit RNID ACC response tag <ulpIoTag> */ 4047 /* Xmit RNID ACC response tag <ulpIoTag> */
4040 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4048 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4041 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 4049 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
@@ -4163,7 +4171,9 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4163 if (!elsiocb) 4171 if (!elsiocb)
4164 return 1; 4172 return 1;
4165 4173
4166 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */ 4174 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
4175 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
4176
4167 /* Xmit ECHO ACC response tag <ulpIoTag> */ 4177 /* Xmit ECHO ACC response tag <ulpIoTag> */
4168 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4178 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4169 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 4179 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
@@ -5054,13 +5064,15 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5054 uint8_t *pcmd; 5064 uint8_t *pcmd;
5055 struct lpfc_iocbq *elsiocb; 5065 struct lpfc_iocbq *elsiocb;
5056 struct lpfc_nodelist *ndlp; 5066 struct lpfc_nodelist *ndlp;
5057 uint16_t xri; 5067 uint16_t oxid;
5068 uint16_t rxid;
5058 uint32_t cmdsize; 5069 uint32_t cmdsize;
5059 5070
5060 mb = &pmb->u.mb; 5071 mb = &pmb->u.mb;
5061 5072
5062 ndlp = (struct lpfc_nodelist *) pmb->context2; 5073 ndlp = (struct lpfc_nodelist *) pmb->context2;
5063 xri = (uint16_t) ((unsigned long)(pmb->context1)); 5074 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5075 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
5064 pmb->context1 = NULL; 5076 pmb->context1 = NULL;
5065 pmb->context2 = NULL; 5077 pmb->context2 = NULL;
5066 5078
@@ -5082,7 +5094,8 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5082 return; 5094 return;
5083 5095
5084 icmd = &elsiocb->iocb; 5096 icmd = &elsiocb->iocb;
5085 icmd->ulpContext = xri; 5097 icmd->ulpContext = rxid;
5098 icmd->unsli3.rcvsli3.ox_id = oxid;
5086 5099
5087 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5100 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5088 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5101 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -5137,13 +5150,16 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5137 uint8_t *pcmd; 5150 uint8_t *pcmd;
5138 struct lpfc_iocbq *elsiocb; 5151 struct lpfc_iocbq *elsiocb;
5139 struct lpfc_nodelist *ndlp; 5152 struct lpfc_nodelist *ndlp;
5140 uint16_t xri, status; 5153 uint16_t status;
5154 uint16_t oxid;
5155 uint16_t rxid;
5141 uint32_t cmdsize; 5156 uint32_t cmdsize;
5142 5157
5143 mb = &pmb->u.mb; 5158 mb = &pmb->u.mb;
5144 5159
5145 ndlp = (struct lpfc_nodelist *) pmb->context2; 5160 ndlp = (struct lpfc_nodelist *) pmb->context2;
5146 xri = (uint16_t) ((unsigned long)(pmb->context1)); 5161 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
5162 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
5147 pmb->context1 = NULL; 5163 pmb->context1 = NULL;
5148 pmb->context2 = NULL; 5164 pmb->context2 = NULL;
5149 5165
@@ -5165,7 +5181,8 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5165 return; 5181 return;
5166 5182
5167 icmd = &elsiocb->iocb; 5183 icmd = &elsiocb->iocb;
5168 icmd->ulpContext = xri; 5184 icmd->ulpContext = rxid;
5185 icmd->unsli3.rcvsli3.ox_id = oxid;
5169 5186
5170 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5187 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5171 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5188 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -5238,8 +5255,9 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5238 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5255 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5239 if (mbox) { 5256 if (mbox) {
5240 lpfc_read_lnk_stat(phba, mbox); 5257 lpfc_read_lnk_stat(phba, mbox);
5241 mbox->context1 = 5258 mbox->context1 = (void *)((unsigned long)
5242 (void *)((unsigned long) cmdiocb->iocb.ulpContext); 5259 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5260 cmdiocb->iocb.ulpContext)); /* rx_id */
5243 mbox->context2 = lpfc_nlp_get(ndlp); 5261 mbox->context2 = lpfc_nlp_get(ndlp);
5244 mbox->vport = vport; 5262 mbox->vport = vport;
5245 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 5263 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
@@ -5314,7 +5332,8 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5314 pcmd += sizeof(uint32_t); /* Skip past command */ 5332 pcmd += sizeof(uint32_t); /* Skip past command */
5315 5333
5316 /* use the command's xri in the response */ 5334 /* use the command's xri in the response */
5317 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; 5335 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
5336 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
5318 5337
5319 rtv_rsp = (struct RTV_RSP *)pcmd; 5338 rtv_rsp = (struct RTV_RSP *)pcmd;
5320 5339
@@ -5399,8 +5418,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 5418 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
5400 if (mbox) { 5419 if (mbox) {
5401 lpfc_read_lnk_stat(phba, mbox); 5420 lpfc_read_lnk_stat(phba, mbox);
5402 mbox->context1 = 5421 mbox->context1 = (void *)((unsigned long)
5403 (void *)((unsigned long) cmdiocb->iocb.ulpContext); 5422 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
5423 cmdiocb->iocb.ulpContext)); /* rx_id */
5404 mbox->context2 = lpfc_nlp_get(ndlp); 5424 mbox->context2 = lpfc_nlp_get(ndlp);
5405 mbox->vport = vport; 5425 mbox->vport = vport;
5406 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 5426 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
@@ -5554,7 +5574,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
5554 5574
5555 icmd = &elsiocb->iocb; 5575 icmd = &elsiocb->iocb;
5556 oldcmd = &oldiocb->iocb; 5576 oldcmd = &oldiocb->iocb;
5557 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 5577 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5578 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5558 5579
5559 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5580 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5560 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5581 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -6586,7 +6607,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6586{ 6607{
6587 struct lpfc_vport *vport; 6608 struct lpfc_vport *vport;
6588 unsigned long flags; 6609 unsigned long flags;
6589 int i; 6610 int i = 0;
6590 6611
6591 /* The physical ports are always vpi 0 - translate is unnecessary. */ 6612 /* The physical ports are always vpi 0 - translate is unnecessary. */
6592 if (vpi > 0) { 6613 if (vpi > 0) {
@@ -6609,7 +6630,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6609 6630
6610 spin_lock_irqsave(&phba->hbalock, flags); 6631 spin_lock_irqsave(&phba->hbalock, flags);
6611 list_for_each_entry(vport, &phba->port_list, listentry) { 6632 list_for_each_entry(vport, &phba->port_list, listentry) {
6612 if (vport->vpi == vpi) { 6633 if (vport->vpi == i) {
6613 spin_unlock_irqrestore(&phba->hbalock, flags); 6634 spin_unlock_irqrestore(&phba->hbalock, flags);
6614 return vport; 6635 return vport;
6615 } 6636 }
@@ -7787,6 +7808,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7787{ 7808{
7788 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 7809 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
7789 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 7810 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
7811 uint16_t lxri = 0;
7790 7812
7791 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 7813 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7792 unsigned long iflag = 0; 7814 unsigned long iflag = 0;
@@ -7815,7 +7837,12 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7815 } 7837 }
7816 } 7838 }
7817 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 7839 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7818 sglq_entry = __lpfc_get_active_sglq(phba, xri); 7840 lxri = lpfc_sli4_xri_inrange(phba, xri);
7841 if (lxri == NO_XRI) {
7842 spin_unlock_irqrestore(&phba->hbalock, iflag);
7843 return;
7844 }
7845 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
7819 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 7846 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
7820 spin_unlock_irqrestore(&phba->hbalock, iflag); 7847 spin_unlock_irqrestore(&phba->hbalock, iflag);
7821 return; 7848 return;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 18d0dbfda2bc..0b47adf9fee8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1109,6 +1109,28 @@ out:
1109 return; 1109 return;
1110} 1110}
1111 1111
1112/**
1113 * lpfc_sli4_clear_fcf_rr_bmask
1114 * @phba pointer to the struct lpfc_hba for this port.
1115 * This fucnction resets the round robin bit mask and clears the
1116 * fcf priority list. The list deletions are done while holding the
1117 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1118 * from the lpfc_fcf_pri record.
1119 **/
1120void
1121lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1122{
1123 struct lpfc_fcf_pri *fcf_pri;
1124 struct lpfc_fcf_pri *next_fcf_pri;
1125 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1126 spin_lock_irq(&phba->hbalock);
1127 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1128 &phba->fcf.fcf_pri_list, list) {
1129 list_del_init(&fcf_pri->list);
1130 fcf_pri->fcf_rec.flag = 0;
1131 }
1132 spin_unlock_irq(&phba->hbalock);
1133}
1112static void 1134static void
1113lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1135lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1114{ 1136{
@@ -1130,7 +1152,8 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1130 spin_unlock_irq(&phba->hbalock); 1152 spin_unlock_irq(&phba->hbalock);
1131 1153
1132 /* If there is a pending FCoE event, restart FCF table scan. */ 1154 /* If there is a pending FCoE event, restart FCF table scan. */
1133 if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1155 if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1156 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1134 goto fail_out; 1157 goto fail_out;
1135 1158
1136 /* Mark successful completion of FCF table scan */ 1159 /* Mark successful completion of FCF table scan */
@@ -1250,6 +1273,30 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1250} 1273}
1251 1274
1252/** 1275/**
1276 * lpfc_update_fcf_record - Update driver fcf record
1277 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1278 * @phba: pointer to lpfc hba data structure.
1279 * @fcf_index: Index for the lpfc_fcf_record.
1280 * @new_fcf_record: pointer to hba fcf record.
1281 *
1282 * This routine updates the driver FCF priority record from the new HBA FCF
1283 * record. This routine is called with the host lock held.
1284 **/
1285static void
1286__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1287 struct fcf_record *new_fcf_record
1288 )
1289{
1290 struct lpfc_fcf_pri *fcf_pri;
1291
1292 fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1293 fcf_pri->fcf_rec.fcf_index = fcf_index;
1294 /* FCF record priority */
1295 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1296
1297}
1298
1299/**
1253 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1300 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1254 * @fcf: pointer to driver fcf record. 1301 * @fcf: pointer to driver fcf record.
1255 * @new_fcf_record: pointer to fcf record. 1302 * @new_fcf_record: pointer to fcf record.
@@ -1332,6 +1379,9 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1332 fcf_rec->addr_mode = addr_mode; 1379 fcf_rec->addr_mode = addr_mode;
1333 fcf_rec->vlan_id = vlan_id; 1380 fcf_rec->vlan_id = vlan_id;
1334 fcf_rec->flag |= (flag | RECORD_VALID); 1381 fcf_rec->flag |= (flag | RECORD_VALID);
1382 __lpfc_update_fcf_record_pri(phba,
1383 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1384 new_fcf_record);
1335} 1385}
1336 1386
1337/** 1387/**
@@ -1834,6 +1884,8 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1834 return false; 1884 return false;
1835 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) 1885 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
1836 return false; 1886 return false;
1887 if (fcf_rec->priority != new_fcf_record->fip_priority)
1888 return false;
1837 return true; 1889 return true;
1838} 1890}
1839 1891
@@ -1897,6 +1949,152 @@ stop_flogi_current_fcf:
1897} 1949}
1898 1950
1899/** 1951/**
1952 * lpfc_sli4_fcf_pri_list_del
1953 * @phba: pointer to lpfc hba data structure.
1954 * @fcf_index the index of the fcf record to delete
1955 * This routine checks the on list flag of the fcf_index to be deleted.
1956 * If it is one the list then it is removed from the list, and the flag
1957 * is cleared. This routine grab the hbalock before removing the fcf
1958 * record from the list.
1959 **/
1960static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
1961 uint16_t fcf_index)
1962{
1963 struct lpfc_fcf_pri *new_fcf_pri;
1964
1965 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1966 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1967 "3058 deleting idx x%x pri x%x flg x%x\n",
1968 fcf_index, new_fcf_pri->fcf_rec.priority,
1969 new_fcf_pri->fcf_rec.flag);
1970 spin_lock_irq(&phba->hbalock);
1971 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
1972 if (phba->fcf.current_rec.priority ==
1973 new_fcf_pri->fcf_rec.priority)
1974 phba->fcf.eligible_fcf_cnt--;
1975 list_del_init(&new_fcf_pri->list);
1976 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
1977 }
1978 spin_unlock_irq(&phba->hbalock);
1979}
1980
1981/**
1982 * lpfc_sli4_set_fcf_flogi_fail
1983 * @phba: pointer to lpfc hba data structure.
1984 * @fcf_index the index of the fcf record to update
1985 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
1986 * flag so the the round robin slection for the particular priority level
1987 * will try a different fcf record that does not have this bit set.
1988 * If the fcf record is re-read for any reason this flag is cleared brfore
1989 * adding it to the priority list.
1990 **/
1991void
1992lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
1993{
1994 struct lpfc_fcf_pri *new_fcf_pri;
1995 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1996 spin_lock_irq(&phba->hbalock);
1997 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
1998 spin_unlock_irq(&phba->hbalock);
1999}
2000
2001/**
2002 * lpfc_sli4_fcf_pri_list_add
2003 * @phba: pointer to lpfc hba data structure.
2004 * @fcf_index the index of the fcf record to add
2005 * This routine checks the priority of the fcf_index to be added.
2006 * If it is a lower priority than the current head of the fcf_pri list
2007 * then it is added to the list in the right order.
2008 * If it is the same priority as the current head of the list then it
2009 * is added to the head of the list and its bit in the rr_bmask is set.
2010 * If the fcf_index to be added is of a higher priority than the current
2011 * head of the list then the rr_bmask is cleared, its bit is set in the
2012 * rr_bmask and it is added to the head of the list.
2013 * returns:
2014 * 0=success 1=failure
2015 **/
2016int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
2017 struct fcf_record *new_fcf_record)
2018{
2019 uint16_t current_fcf_pri;
2020 uint16_t last_index;
2021 struct lpfc_fcf_pri *fcf_pri;
2022 struct lpfc_fcf_pri *next_fcf_pri;
2023 struct lpfc_fcf_pri *new_fcf_pri;
2024 int ret;
2025
2026 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2027 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2028 "3059 adding idx x%x pri x%x flg x%x\n",
2029 fcf_index, new_fcf_record->fip_priority,
2030 new_fcf_pri->fcf_rec.flag);
2031 spin_lock_irq(&phba->hbalock);
2032 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2033 list_del_init(&new_fcf_pri->list);
2034 new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2035 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2036 if (list_empty(&phba->fcf.fcf_pri_list)) {
2037 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2038 ret = lpfc_sli4_fcf_rr_index_set(phba,
2039 new_fcf_pri->fcf_rec.fcf_index);
2040 goto out;
2041 }
2042
2043 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2044 LPFC_SLI4_FCF_TBL_INDX_MAX);
2045 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2046 ret = 0; /* Empty rr list */
2047 goto out;
2048 }
2049 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2050 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2051 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2052 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2053 memset(phba->fcf.fcf_rr_bmask, 0,
2054 sizeof(*phba->fcf.fcf_rr_bmask));
2055 /* fcfs_at_this_priority_level = 1; */
2056 phba->fcf.eligible_fcf_cnt = 1;
2057 } else
2058 /* fcfs_at_this_priority_level++; */
2059 phba->fcf.eligible_fcf_cnt++;
2060 ret = lpfc_sli4_fcf_rr_index_set(phba,
2061 new_fcf_pri->fcf_rec.fcf_index);
2062 goto out;
2063 }
2064
2065 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2066 &phba->fcf.fcf_pri_list, list) {
2067 if (new_fcf_pri->fcf_rec.priority <=
2068 fcf_pri->fcf_rec.priority) {
2069 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2070 list_add(&new_fcf_pri->list,
2071 &phba->fcf.fcf_pri_list);
2072 else
2073 list_add(&new_fcf_pri->list,
2074 &((struct lpfc_fcf_pri *)
2075 fcf_pri->list.prev)->list);
2076 ret = 0;
2077 goto out;
2078 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2079 || new_fcf_pri->fcf_rec.priority <
2080 next_fcf_pri->fcf_rec.priority) {
2081 list_add(&new_fcf_pri->list, &fcf_pri->list);
2082 ret = 0;
2083 goto out;
2084 }
2085 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2086 continue;
2087
2088 }
2089 ret = 1;
2090out:
2091 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2092 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2093 spin_unlock_irq(&phba->hbalock);
2094 return ret;
2095}
2096
2097/**
1900 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 2098 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1901 * @phba: pointer to lpfc hba data structure. 2099 * @phba: pointer to lpfc hba data structure.
1902 * @mboxq: pointer to mailbox object. 2100 * @mboxq: pointer to mailbox object.
@@ -1958,6 +2156,9 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1958 * record for roundrobin FCF failover. 2156 * record for roundrobin FCF failover.
1959 */ 2157 */
1960 if (!rc) { 2158 if (!rc) {
2159 lpfc_sli4_fcf_pri_list_del(phba,
2160 bf_get(lpfc_fcf_record_fcf_index,
2161 new_fcf_record));
1961 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2162 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1962 "2781 FCF (x%x) failed connection " 2163 "2781 FCF (x%x) failed connection "
1963 "list check: (x%x/x%x)\n", 2164 "list check: (x%x/x%x)\n",
@@ -2005,7 +2206,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2005 goto read_next_fcf; 2206 goto read_next_fcf;
2006 } else { 2207 } else {
2007 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2208 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2008 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); 2209 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2210 new_fcf_record);
2009 if (rc) 2211 if (rc)
2010 goto read_next_fcf; 2212 goto read_next_fcf;
2011 } 2213 }
@@ -2018,7 +2220,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2018 */ 2220 */
2019 spin_lock_irq(&phba->hbalock); 2221 spin_lock_irq(&phba->hbalock);
2020 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2222 if (phba->fcf.fcf_flag & FCF_IN_USE) {
2021 if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2223 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2224 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2022 new_fcf_record, vlan_id)) { 2225 new_fcf_record, vlan_id)) {
2023 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == 2226 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2024 phba->fcf.current_rec.fcf_indx) { 2227 phba->fcf.current_rec.fcf_indx) {
@@ -2232,7 +2435,8 @@ read_next_fcf:
2232 (phba->fcf.fcf_flag & FCF_REDISC_PEND)) 2435 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2233 return; 2436 return;
2234 2437
2235 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2438 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2439 phba->fcf.fcf_flag & FCF_IN_USE) {
2236 /* 2440 /*
2237 * In case the current in-use FCF record no 2441 * In case the current in-use FCF record no
2238 * longer existed during FCF discovery that 2442 * longer existed during FCF discovery that
@@ -2247,7 +2451,6 @@ read_next_fcf:
2247 spin_lock_irq(&phba->hbalock); 2451 spin_lock_irq(&phba->hbalock);
2248 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 2452 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2249 spin_unlock_irq(&phba->hbalock); 2453 spin_unlock_irq(&phba->hbalock);
2250 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2251 lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2454 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2252 LPFC_FCOE_FCF_GET_FIRST); 2455 LPFC_FCOE_FCF_GET_FIRST);
2253 return; 2456 return;
@@ -2424,7 +2627,8 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2424 2627
2425 /* Update the eligible FCF record index bmask */ 2628 /* Update the eligible FCF record index bmask */
2426 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2629 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2427 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); 2630
2631 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2428 2632
2429out: 2633out:
2430 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2634 lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -2645,6 +2849,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2645 vport->vpi_state |= LPFC_VPI_REGISTERED; 2849 vport->vpi_state |= LPFC_VPI_REGISTERED;
2646 vport->fc_flag |= FC_VFI_REGISTERED; 2850 vport->fc_flag |= FC_VFI_REGISTERED;
2647 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2851 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2852 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2648 spin_unlock_irq(shost->host_lock); 2853 spin_unlock_irq(shost->host_lock);
2649 2854
2650 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2855 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
@@ -2893,8 +3098,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2893 goto out; 3098 goto out;
2894 } 3099 }
2895 /* Reset FCF roundrobin bmask for new discovery */ 3100 /* Reset FCF roundrobin bmask for new discovery */
2896 memset(phba->fcf.fcf_rr_bmask, 0, 3101 lpfc_sli4_clear_fcf_rr_bmask(phba);
2897 sizeof(*phba->fcf.fcf_rr_bmask));
2898 } 3102 }
2899 3103
2900 return; 3104 return;
@@ -5592,7 +5796,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
5592 spin_unlock_irq(&phba->hbalock); 5796 spin_unlock_irq(&phba->hbalock);
5593 5797
5594 /* Reset FCF roundrobin bmask for new discovery */ 5798 /* Reset FCF roundrobin bmask for new discovery */
5595 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); 5799 lpfc_sli4_clear_fcf_rr_bmask(phba);
5596 5800
5597 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5801 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5598 5802
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index ab4c4d651d0c..046edc4ab35f 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -3470,11 +3470,16 @@ typedef struct {
3470 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */ 3470 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
3471 3471
3472struct rcv_sli3 { 3472struct rcv_sli3 {
3473 uint32_t word8Rsvd;
3474#ifdef __BIG_ENDIAN_BITFIELD 3473#ifdef __BIG_ENDIAN_BITFIELD
3474 uint16_t ox_id;
3475 uint16_t seq_cnt;
3476
3475 uint16_t vpi; 3477 uint16_t vpi;
3476 uint16_t word9Rsvd; 3478 uint16_t word9Rsvd;
3477#else /* __LITTLE_ENDIAN */ 3479#else /* __LITTLE_ENDIAN */
3480 uint16_t seq_cnt;
3481 uint16_t ox_id;
3482
3478 uint16_t word9Rsvd; 3483 uint16_t word9Rsvd;
3479 uint16_t vpi; 3484 uint16_t vpi;
3480#endif 3485#endif
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 11e26a26b5d1..7f8003b5181e 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,15 +170,8 @@ struct lpfc_sli_intf {
170#define LPFC_PCI_FUNC3 3 170#define LPFC_PCI_FUNC3 3
171#define LPFC_PCI_FUNC4 4 171#define LPFC_PCI_FUNC4 4
172 172
173/* SLI4 interface type-2 control register offsets */ 173/* SLI4 interface type-2 PDEV_CTL register */
174#define LPFC_CTL_PORT_SEM_OFFSET 0x400
175#define LPFC_CTL_PORT_STA_OFFSET 0x404
176#define LPFC_CTL_PORT_CTL_OFFSET 0x408
177#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
178#define LPFC_CTL_PORT_ER2_OFFSET 0x410
179#define LPFC_CTL_PDEV_CTL_OFFSET 0x414 174#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
180
181/* Some SLI4 interface type-2 PDEV_CTL register bits */
182#define LPFC_CTL_PDEV_CTL_DRST 0x00000001 175#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
183#define LPFC_CTL_PDEV_CTL_FRST 0x00000002 176#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
184#define LPFC_CTL_PDEV_CTL_DD 0x00000004 177#define LPFC_CTL_PDEV_CTL_DD 0x00000004
@@ -337,6 +330,7 @@ struct lpfc_cqe {
337#define CQE_CODE_RELEASE_WQE 0x2 330#define CQE_CODE_RELEASE_WQE 0x2
338#define CQE_CODE_RECEIVE 0x4 331#define CQE_CODE_RECEIVE 0x4
339#define CQE_CODE_XRI_ABORTED 0x5 332#define CQE_CODE_XRI_ABORTED 0x5
333#define CQE_CODE_RECEIVE_V1 0x9
340 334
341/* completion queue entry for wqe completions */ 335/* completion queue entry for wqe completions */
342struct lpfc_wcqe_complete { 336struct lpfc_wcqe_complete {
@@ -440,7 +434,10 @@ struct lpfc_rcqe {
440#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ 434#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
441#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ 435#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
442#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ 436#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
443 uint32_t reserved1; 437 uint32_t word1;
438#define lpfc_rcqe_fcf_id_v1_SHIFT 0
439#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
440#define lpfc_rcqe_fcf_id_v1_WORD word1
444 uint32_t word2; 441 uint32_t word2;
445#define lpfc_rcqe_length_SHIFT 16 442#define lpfc_rcqe_length_SHIFT 16
446#define lpfc_rcqe_length_MASK 0x0000FFFF 443#define lpfc_rcqe_length_MASK 0x0000FFFF
@@ -451,6 +448,9 @@ struct lpfc_rcqe {
451#define lpfc_rcqe_fcf_id_SHIFT 0 448#define lpfc_rcqe_fcf_id_SHIFT 0
452#define lpfc_rcqe_fcf_id_MASK 0x0000003F 449#define lpfc_rcqe_fcf_id_MASK 0x0000003F
453#define lpfc_rcqe_fcf_id_WORD word2 450#define lpfc_rcqe_fcf_id_WORD word2
451#define lpfc_rcqe_rq_id_v1_SHIFT 0
452#define lpfc_rcqe_rq_id_v1_MASK 0x0000FFFF
453#define lpfc_rcqe_rq_id_v1_WORD word2
454 uint32_t word3; 454 uint32_t word3;
455#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT 455#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
456#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK 456#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
@@ -515,7 +515,7 @@ struct lpfc_register {
515/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */ 515/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
516#define LPFC_SLI_INTF 0x0058 516#define LPFC_SLI_INTF 0x0058
517 517
518#define LPFC_SLIPORT_IF2_SMPHR 0x0400 518#define LPFC_CTL_PORT_SEM_OFFSET 0x400
519#define lpfc_port_smphr_perr_SHIFT 31 519#define lpfc_port_smphr_perr_SHIFT 31
520#define lpfc_port_smphr_perr_MASK 0x1 520#define lpfc_port_smphr_perr_MASK 0x1
521#define lpfc_port_smphr_perr_WORD word0 521#define lpfc_port_smphr_perr_WORD word0
@@ -575,7 +575,7 @@ struct lpfc_register {
575#define LPFC_POST_STAGE_PORT_READY 0xC000 575#define LPFC_POST_STAGE_PORT_READY 0xC000
576#define LPFC_POST_STAGE_PORT_UE 0xF000 576#define LPFC_POST_STAGE_PORT_UE 0xF000
577 577
578#define LPFC_SLIPORT_STATUS 0x0404 578#define LPFC_CTL_PORT_STA_OFFSET 0x404
579#define lpfc_sliport_status_err_SHIFT 31 579#define lpfc_sliport_status_err_SHIFT 31
580#define lpfc_sliport_status_err_MASK 0x1 580#define lpfc_sliport_status_err_MASK 0x1
581#define lpfc_sliport_status_err_WORD word0 581#define lpfc_sliport_status_err_WORD word0
@@ -593,7 +593,7 @@ struct lpfc_register {
593#define lpfc_sliport_status_rdy_WORD word0 593#define lpfc_sliport_status_rdy_WORD word0
594#define MAX_IF_TYPE_2_RESETS 1000 594#define MAX_IF_TYPE_2_RESETS 1000
595 595
596#define LPFC_SLIPORT_CNTRL 0x0408 596#define LPFC_CTL_PORT_CTL_OFFSET 0x408
597#define lpfc_sliport_ctrl_end_SHIFT 30 597#define lpfc_sliport_ctrl_end_SHIFT 30
598#define lpfc_sliport_ctrl_end_MASK 0x1 598#define lpfc_sliport_ctrl_end_MASK 0x1
599#define lpfc_sliport_ctrl_end_WORD word0 599#define lpfc_sliport_ctrl_end_WORD word0
@@ -604,8 +604,8 @@ struct lpfc_register {
604#define lpfc_sliport_ctrl_ip_WORD word0 604#define lpfc_sliport_ctrl_ip_WORD word0
605#define LPFC_SLIPORT_INIT_PORT 1 605#define LPFC_SLIPORT_INIT_PORT 1
606 606
607#define LPFC_SLIPORT_ERR_1 0x040C 607#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
608#define LPFC_SLIPORT_ERR_2 0x0410 608#define LPFC_CTL_PORT_ER2_OFFSET 0x410
609 609
610/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically 610/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
611 * reside in BAR 2. 611 * reside in BAR 2.
@@ -3198,6 +3198,8 @@ struct lpfc_grp_hdr {
3198#define lpfc_grp_hdr_id_MASK 0x000000FF 3198#define lpfc_grp_hdr_id_MASK 0x000000FF
3199#define lpfc_grp_hdr_id_WORD word2 3199#define lpfc_grp_hdr_id_WORD word2
3200 uint8_t rev_name[128]; 3200 uint8_t rev_name[128];
3201 uint8_t date[12];
3202 uint8_t revision[32];
3201}; 3203};
3202 3204
3203#define FCP_COMMAND 0x0 3205#define FCP_COMMAND 0x0
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 148b98ddbb1d..a3c820083c36 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2927,6 +2927,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
2927 sizeof fc_host_symbolic_name(shost)); 2927 sizeof fc_host_symbolic_name(shost));
2928 2928
2929 fc_host_supported_speeds(shost) = 0; 2929 fc_host_supported_speeds(shost) = 0;
2930 if (phba->lmt & LMT_16Gb)
2931 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
2930 if (phba->lmt & LMT_10Gb) 2932 if (phba->lmt & LMT_10Gb)
2931 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2933 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2932 if (phba->lmt & LMT_8Gb) 2934 if (phba->lmt & LMT_8Gb)
@@ -3632,8 +3634,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3632 lpfc_sli4_fcf_dead_failthrough(phba); 3634 lpfc_sli4_fcf_dead_failthrough(phba);
3633 } else { 3635 } else {
3634 /* Reset FCF roundrobin bmask for new discovery */ 3636 /* Reset FCF roundrobin bmask for new discovery */
3635 memset(phba->fcf.fcf_rr_bmask, 0, 3637 lpfc_sli4_clear_fcf_rr_bmask(phba);
3636 sizeof(*phba->fcf.fcf_rr_bmask));
3637 /* 3638 /*
3638 * Handling fast FCF failover to a DEAD FCF event is 3639 * Handling fast FCF failover to a DEAD FCF event is
3639 * considered equalivant to receiving CVL to all vports. 3640 * considered equalivant to receiving CVL to all vports.
@@ -3647,7 +3648,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3647 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3648 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3648 3649
3649 vport = lpfc_find_vport_by_vpid(phba, 3650 vport = lpfc_find_vport_by_vpid(phba,
3650 acqe_fip->index - phba->vpi_base); 3651 acqe_fip->index);
3651 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3652 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3652 if (!ndlp) 3653 if (!ndlp)
3653 break; 3654 break;
@@ -3719,8 +3720,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3719 * Reset FCF roundrobin bmask for new 3720 * Reset FCF roundrobin bmask for new
3720 * discovery. 3721 * discovery.
3721 */ 3722 */
3722 memset(phba->fcf.fcf_rr_bmask, 0, 3723 lpfc_sli4_clear_fcf_rr_bmask(phba);
3723 sizeof(*phba->fcf.fcf_rr_bmask));
3724 } 3724 }
3725 break; 3725 break;
3726 default: 3726 default:
@@ -4035,6 +4035,34 @@ lpfc_reset_hba(struct lpfc_hba *phba)
4035} 4035}
4036 4036
4037/** 4037/**
4038 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4039 * @phba: pointer to lpfc hba data structure.
4040 *
4041 * This function enables the PCI SR-IOV virtual functions to a physical
4042 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4043 * enable the number of virtual functions to the physical function. As
4044 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4045 * API call does not considered as an error condition for most of the device.
4046 **/
4047uint16_t
4048lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4049{
4050 struct pci_dev *pdev = phba->pcidev;
4051 uint16_t nr_virtfn;
4052 int pos;
4053
4054 if (!pdev->is_physfn)
4055 return 0;
4056
4057 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4058 if (pos == 0)
4059 return 0;
4060
4061 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4062 return nr_virtfn;
4063}
4064
4065/**
4038 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4066 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4039 * @phba: pointer to lpfc hba data structure. 4067 * @phba: pointer to lpfc hba data structure.
4040 * @nr_vfn: number of virtual functions to be enabled. 4068 * @nr_vfn: number of virtual functions to be enabled.
@@ -4049,8 +4077,17 @@ int
4049lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4077lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4050{ 4078{
4051 struct pci_dev *pdev = phba->pcidev; 4079 struct pci_dev *pdev = phba->pcidev;
4080 uint16_t max_nr_vfn;
4052 int rc; 4081 int rc;
4053 4082
4083 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4084 if (nr_vfn > max_nr_vfn) {
4085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4086 "3057 Requested vfs (%d) greater than "
4087 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4088 return -EINVAL;
4089 }
4090
4054 rc = pci_enable_sriov(pdev, nr_vfn); 4091 rc = pci_enable_sriov(pdev, nr_vfn);
4055 if (rc) { 4092 if (rc) {
4056 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4093 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -4516,7 +4553,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4516 } 4553 }
4517 } 4554 }
4518 4555
4519 return rc; 4556 return 0;
4520 4557
4521out_free_fcp_eq_hdl: 4558out_free_fcp_eq_hdl:
4522 kfree(phba->sli4_hba.fcp_eq_hdl); 4559 kfree(phba->sli4_hba.fcp_eq_hdl);
@@ -4966,17 +5003,14 @@ out_free_mem:
4966 * @phba: pointer to lpfc hba data structure. 5003 * @phba: pointer to lpfc hba data structure.
4967 * 5004 *
4968 * This routine is invoked to post rpi header templates to the 5005 * This routine is invoked to post rpi header templates to the
4969 * HBA consistent with the SLI-4 interface spec. This routine 5006 * port for those SLI4 ports that do not support extents. This routine
4970 * posts a PAGE_SIZE memory region to the port to hold up to 5007 * posts a PAGE_SIZE memory region to the port to hold up to
4971 * PAGE_SIZE modulo 64 rpi context headers. 5008 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
4972 * No locks are held here because this is an initialization routine 5009 * and should be called only when interrupts are disabled.
4973 * called only from probe or lpfc_online when interrupts are not
4974 * enabled and the driver is reinitializing the device.
4975 * 5010 *
4976 * Return codes 5011 * Return codes
4977 * 0 - successful 5012 * 0 - successful
4978 * -ENOMEM - No available memory 5013 * -ERROR - otherwise.
4979 * -EIO - The mailbox failed to complete successfully.
4980 **/ 5014 **/
4981int 5015int
4982lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5016lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
@@ -5687,17 +5721,22 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5687 break; 5721 break;
5688 case LPFC_SLI_INTF_IF_TYPE_2: 5722 case LPFC_SLI_INTF_IF_TYPE_2:
5689 phba->sli4_hba.u.if_type2.ERR1regaddr = 5723 phba->sli4_hba.u.if_type2.ERR1regaddr =
5690 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1; 5724 phba->sli4_hba.conf_regs_memmap_p +
5725 LPFC_CTL_PORT_ER1_OFFSET;
5691 phba->sli4_hba.u.if_type2.ERR2regaddr = 5726 phba->sli4_hba.u.if_type2.ERR2regaddr =
5692 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2; 5727 phba->sli4_hba.conf_regs_memmap_p +
5728 LPFC_CTL_PORT_ER2_OFFSET;
5693 phba->sli4_hba.u.if_type2.CTRLregaddr = 5729 phba->sli4_hba.u.if_type2.CTRLregaddr =
5694 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL; 5730 phba->sli4_hba.conf_regs_memmap_p +
5731 LPFC_CTL_PORT_CTL_OFFSET;
5695 phba->sli4_hba.u.if_type2.STATUSregaddr = 5732 phba->sli4_hba.u.if_type2.STATUSregaddr =
5696 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS; 5733 phba->sli4_hba.conf_regs_memmap_p +
5734 LPFC_CTL_PORT_STA_OFFSET;
5697 phba->sli4_hba.SLIINTFregaddr = 5735 phba->sli4_hba.SLIINTFregaddr =
5698 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5736 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5699 phba->sli4_hba.PSMPHRregaddr = 5737 phba->sli4_hba.PSMPHRregaddr =
5700 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR; 5738 phba->sli4_hba.conf_regs_memmap_p +
5739 LPFC_CTL_PORT_SEM_OFFSET;
5701 phba->sli4_hba.RQDBregaddr = 5740 phba->sli4_hba.RQDBregaddr =
5702 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 5741 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5703 phba->sli4_hba.WQDBregaddr = 5742 phba->sli4_hba.WQDBregaddr =
@@ -8859,11 +8898,11 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8859 return -EINVAL; 8898 return -EINVAL;
8860 } 8899 }
8861 lpfc_decode_firmware_rev(phba, fwrev, 1); 8900 lpfc_decode_firmware_rev(phba, fwrev, 1);
8862 if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) { 8901 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
8863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8864 "3023 Updating Firmware. Current Version:%s " 8903 "3023 Updating Firmware. Current Version:%s "
8865 "New Version:%s\n", 8904 "New Version:%s\n",
8866 fwrev, image->rev_name); 8905 fwrev, image->revision);
8867 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 8906 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
8868 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 8907 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
8869 GFP_KERNEL); 8908 GFP_KERNEL);
@@ -8892,9 +8931,9 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8892 fw->size - offset); 8931 fw->size - offset);
8893 break; 8932 break;
8894 } 8933 }
8895 temp_offset += SLI4_PAGE_SIZE;
8896 memcpy(dmabuf->virt, fw->data + temp_offset, 8934 memcpy(dmabuf->virt, fw->data + temp_offset,
8897 SLI4_PAGE_SIZE); 8935 SLI4_PAGE_SIZE);
8936 temp_offset += SLI4_PAGE_SIZE;
8898 } 8937 }
8899 rc = lpfc_wr_object(phba, &dma_buffer_list, 8938 rc = lpfc_wr_object(phba, &dma_buffer_list,
8900 (fw->size - offset), &offset); 8939 (fw->size - offset), &offset);
@@ -9005,6 +9044,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9005 } 9044 }
9006 9045
9007 INIT_LIST_HEAD(&phba->active_rrq_list); 9046 INIT_LIST_HEAD(&phba->active_rrq_list);
9047 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
9008 9048
9009 /* Set up common device driver resources */ 9049 /* Set up common device driver resources */
9010 error = lpfc_setup_driver_resource_phase2(phba); 9050 error = lpfc_setup_driver_resource_phase2(phba);
@@ -9112,7 +9152,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9112 9152
9113 /* Check if there are static vports to be created. */ 9153 /* Check if there are static vports to be created. */
9114 lpfc_create_static_vport(phba); 9154 lpfc_create_static_vport(phba);
9115
9116 return 0; 9155 return 0;
9117 9156
9118out_disable_intr: 9157out_disable_intr:
@@ -9483,6 +9522,13 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
9483 } 9522 }
9484 9523
9485 pci_restore_state(pdev); 9524 pci_restore_state(pdev);
9525
9526 /*
9527 * As the new kernel behavior of pci_restore_state() API call clears
9528 * device saved_state flag, need to save the restored state again.
9529 */
9530 pci_save_state(pdev);
9531
9486 if (pdev->is_busmaster) 9532 if (pdev->is_busmaster)
9487 pci_set_master(pdev); 9533 pci_set_master(pdev);
9488 9534
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 556767028353..83450cc5c4d3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2031,7 +2031,7 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2031 bf_set(lpfc_init_vfi_vp, init_vfi, 1); 2031 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
2032 bf_set(lpfc_init_vfi_vfi, init_vfi, 2032 bf_set(lpfc_init_vfi_vfi, init_vfi,
2033 vport->phba->sli4_hba.vfi_ids[vport->vfi]); 2033 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2034 bf_set(lpfc_init_vpi_vpi, init_vfi, 2034 bf_set(lpfc_init_vfi_vpi, init_vfi,
2035 vport->phba->vpi_ids[vport->vpi]); 2035 vport->phba->vpi_ids[vport->vpi]);
2036 bf_set(lpfc_init_vfi_fcfi, init_vfi, 2036 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2037 vport->phba->fcf.fcfi); 2037 vport->phba->fcf.fcfi);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3ccc97496ebf..eadd241eeff1 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1302,13 +1302,13 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1302 case SCSI_PROT_NORMAL: 1302 case SCSI_PROT_NORMAL:
1303 default: 1303 default:
1304 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1304 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1305 "9063 BLKGRD: Bad op/guard:%d/%d combination\n", 1305 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1306 scsi_get_prot_op(sc), guard_type); 1306 scsi_get_prot_op(sc));
1307 ret = 1; 1307 ret = 1;
1308 break; 1308 break;
1309 1309
1310 } 1310 }
1311 } else if (guard_type == SHOST_DIX_GUARD_CRC) { 1311 } else {
1312 switch (scsi_get_prot_op(sc)) { 1312 switch (scsi_get_prot_op(sc)) {
1313 case SCSI_PROT_READ_STRIP: 1313 case SCSI_PROT_READ_STRIP:
1314 case SCSI_PROT_WRITE_INSERT: 1314 case SCSI_PROT_WRITE_INSERT:
@@ -1324,17 +1324,18 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1324 1324
1325 case SCSI_PROT_READ_INSERT: 1325 case SCSI_PROT_READ_INSERT:
1326 case SCSI_PROT_WRITE_STRIP: 1326 case SCSI_PROT_WRITE_STRIP:
1327 *txop = BG_OP_IN_CRC_OUT_NODIF;
1328 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1329 break;
1330
1327 case SCSI_PROT_NORMAL: 1331 case SCSI_PROT_NORMAL:
1328 default: 1332 default:
1329 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1333 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1330 "9075 BLKGRD: Bad op/guard:%d/%d combination\n", 1334 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1331 scsi_get_prot_op(sc), guard_type); 1335 scsi_get_prot_op(sc));
1332 ret = 1; 1336 ret = 1;
1333 break; 1337 break;
1334 } 1338 }
1335 } else {
1336 /* unsupported format */
1337 BUG();
1338 } 1339 }
1339 1340
1340 return ret; 1341 return ret;
@@ -1352,45 +1353,6 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
1352 return sc->device->sector_size; 1353 return sc->device->sector_size;
1353} 1354}
1354 1355
1355/**
1356 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
1357 * @sc: in: SCSI command
1358 * @apptagmask: out: app tag mask
1359 * @apptagval: out: app tag value
1360 * @reftag: out: ref tag (reference tag)
1361 *
1362 * Description:
1363 * Extract DIF parameters from the command if possible. Otherwise,
1364 * use default parameters.
1365 *
1366 **/
1367static inline void
1368lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
1369 uint16_t *apptagval, uint32_t *reftag)
1370{
1371 struct scsi_dif_tuple *spt;
1372 unsigned char op = scsi_get_prot_op(sc);
1373 unsigned int protcnt = scsi_prot_sg_count(sc);
1374 static int cnt;
1375
1376 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
1377 op == SCSI_PROT_WRITE_PASS)) {
1378
1379 cnt++;
1380 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
1381 scsi_prot_sglist(sc)[0].offset;
1382 *apptagmask = 0;
1383 *apptagval = 0;
1384 *reftag = cpu_to_be32(spt->ref_tag);
1385
1386 } else {
1387 /* SBC defines ref tag to be lower 32bits of LBA */
1388 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
1389 *apptagmask = 0;
1390 *apptagval = 0;
1391 }
1392}
1393
1394/* 1356/*
1395 * This function sets up buffer list for protection groups of 1357 * This function sets up buffer list for protection groups of
1396 * type LPFC_PG_TYPE_NO_DIF 1358 * type LPFC_PG_TYPE_NO_DIF
@@ -1427,9 +1389,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1427 dma_addr_t physaddr; 1389 dma_addr_t physaddr;
1428 int i = 0, num_bde = 0, status; 1390 int i = 0, num_bde = 0, status;
1429 int datadir = sc->sc_data_direction; 1391 int datadir = sc->sc_data_direction;
1430 unsigned blksize;
1431 uint32_t reftag; 1392 uint32_t reftag;
1432 uint16_t apptagmask, apptagval; 1393 unsigned blksize;
1433 uint8_t txop, rxop; 1394 uint8_t txop, rxop;
1434 1395
1435 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1396 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
@@ -1438,17 +1399,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1438 1399
1439 /* extract some info from the scsi command for pde*/ 1400 /* extract some info from the scsi command for pde*/
1440 blksize = lpfc_cmd_blksize(sc); 1401 blksize = lpfc_cmd_blksize(sc);
1441 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1402 reftag = scsi_get_lba(sc) & 0xffffffff;
1442 1403
1443 /* setup PDE5 with what we have */ 1404 /* setup PDE5 with what we have */
1444 pde5 = (struct lpfc_pde5 *) bpl; 1405 pde5 = (struct lpfc_pde5 *) bpl;
1445 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1406 memset(pde5, 0, sizeof(struct lpfc_pde5));
1446 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1407 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1447 pde5->reftag = reftag;
1448 1408
1449 /* Endianness conversion if necessary for PDE5 */ 1409 /* Endianness conversion if necessary for PDE5 */
1450 pde5->word0 = cpu_to_le32(pde5->word0); 1410 pde5->word0 = cpu_to_le32(pde5->word0);
1451 pde5->reftag = cpu_to_le32(pde5->reftag); 1411 pde5->reftag = cpu_to_le32(reftag);
1452 1412
1453 /* advance bpl and increment bde count */ 1413 /* advance bpl and increment bde count */
1454 num_bde++; 1414 num_bde++;
@@ -1463,10 +1423,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1463 if (datadir == DMA_FROM_DEVICE) { 1423 if (datadir == DMA_FROM_DEVICE) {
1464 bf_set(pde6_ce, pde6, 1); 1424 bf_set(pde6_ce, pde6, 1);
1465 bf_set(pde6_re, pde6, 1); 1425 bf_set(pde6_re, pde6, 1);
1466 bf_set(pde6_ae, pde6, 1);
1467 } 1426 }
1468 bf_set(pde6_ai, pde6, 1); 1427 bf_set(pde6_ai, pde6, 1);
1469 bf_set(pde6_apptagval, pde6, apptagval); 1428 bf_set(pde6_ae, pde6, 0);
1429 bf_set(pde6_apptagval, pde6, 0);
1470 1430
1471 /* Endianness conversion if necessary for PDE6 */ 1431 /* Endianness conversion if necessary for PDE6 */
1472 pde6->word0 = cpu_to_le32(pde6->word0); 1432 pde6->word0 = cpu_to_le32(pde6->word0);
@@ -1551,7 +1511,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1551 unsigned char pgdone = 0, alldone = 0; 1511 unsigned char pgdone = 0, alldone = 0;
1552 unsigned blksize; 1512 unsigned blksize;
1553 uint32_t reftag; 1513 uint32_t reftag;
1554 uint16_t apptagmask, apptagval;
1555 uint8_t txop, rxop; 1514 uint8_t txop, rxop;
1556 int num_bde = 0; 1515 int num_bde = 0;
1557 1516
@@ -1571,7 +1530,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1571 1530
1572 /* extract some info from the scsi command */ 1531 /* extract some info from the scsi command */
1573 blksize = lpfc_cmd_blksize(sc); 1532 blksize = lpfc_cmd_blksize(sc);
1574 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1533 reftag = scsi_get_lba(sc) & 0xffffffff;
1575 1534
1576 split_offset = 0; 1535 split_offset = 0;
1577 do { 1536 do {
@@ -1579,11 +1538,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1579 pde5 = (struct lpfc_pde5 *) bpl; 1538 pde5 = (struct lpfc_pde5 *) bpl;
1580 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1539 memset(pde5, 0, sizeof(struct lpfc_pde5));
1581 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1540 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1582 pde5->reftag = reftag;
1583 1541
1584 /* Endianness conversion if necessary for PDE5 */ 1542 /* Endianness conversion if necessary for PDE5 */
1585 pde5->word0 = cpu_to_le32(pde5->word0); 1543 pde5->word0 = cpu_to_le32(pde5->word0);
1586 pde5->reftag = cpu_to_le32(pde5->reftag); 1544 pde5->reftag = cpu_to_le32(reftag);
1587 1545
1588 /* advance bpl and increment bde count */ 1546 /* advance bpl and increment bde count */
1589 num_bde++; 1547 num_bde++;
@@ -1597,9 +1555,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1597 bf_set(pde6_oprx, pde6, rxop); 1555 bf_set(pde6_oprx, pde6, rxop);
1598 bf_set(pde6_ce, pde6, 1); 1556 bf_set(pde6_ce, pde6, 1);
1599 bf_set(pde6_re, pde6, 1); 1557 bf_set(pde6_re, pde6, 1);
1600 bf_set(pde6_ae, pde6, 1);
1601 bf_set(pde6_ai, pde6, 1); 1558 bf_set(pde6_ai, pde6, 1);
1602 bf_set(pde6_apptagval, pde6, apptagval); 1559 bf_set(pde6_ae, pde6, 0);
1560 bf_set(pde6_apptagval, pde6, 0);
1603 1561
1604 /* Endianness conversion if necessary for PDE6 */ 1562 /* Endianness conversion if necessary for PDE6 */
1605 pde6->word0 = cpu_to_le32(pde6->word0); 1563 pde6->word0 = cpu_to_le32(pde6->word0);
@@ -1621,8 +1579,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1621 memset(pde7, 0, sizeof(struct lpfc_pde7)); 1579 memset(pde7, 0, sizeof(struct lpfc_pde7));
1622 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); 1580 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1623 1581
1624 pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); 1582 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1625 pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); 1583 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1626 1584
1627 protgrp_blks = protgroup_len / 8; 1585 protgrp_blks = protgroup_len / 8;
1628 protgrp_bytes = protgrp_blks * blksize; 1586 protgrp_bytes = protgrp_blks * blksize;
@@ -1632,7 +1590,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1632 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); 1590 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1633 protgroup_offset += protgroup_remainder; 1591 protgroup_offset += protgroup_remainder;
1634 protgrp_blks = protgroup_remainder / 8; 1592 protgrp_blks = protgroup_remainder / 8;
1635 protgrp_bytes = protgroup_remainder * blksize; 1593 protgrp_bytes = protgrp_blks * blksize;
1636 } else { 1594 } else {
1637 protgroup_offset = 0; 1595 protgroup_offset = 0;
1638 curr_prot++; 1596 curr_prot++;
@@ -2006,16 +1964,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2006 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 1964 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2007 /* 1965 /*
2008 * setup sense data descriptor 0 per SPC-4 as an information 1966 * setup sense data descriptor 0 per SPC-4 as an information
2009 * field, and put the failing LBA in it 1967 * field, and put the failing LBA in it.
1968 * This code assumes there was also a guard/app/ref tag error
1969 * indication.
2010 */ 1970 */
2011 cmd->sense_buffer[8] = 0; /* Information */ 1971 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
2012 cmd->sense_buffer[9] = 0xa; /* Add. length */ 1972 cmd->sense_buffer[8] = 0; /* Information descriptor type */
1973 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
1974 cmd->sense_buffer[10] = 0x80; /* Validity bit */
2013 bghm /= cmd->device->sector_size; 1975 bghm /= cmd->device->sector_size;
2014 1976
2015 failing_sector = scsi_get_lba(cmd); 1977 failing_sector = scsi_get_lba(cmd);
2016 failing_sector += bghm; 1978 failing_sector += bghm;
2017 1979
2018 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]); 1980 /* Descriptor Information */
1981 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
2019 } 1982 }
2020 1983
2021 if (!ret) { 1984 if (!ret) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 98999bbd8cbf..8b799f047a99 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -560,7 +560,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
561 if (rrq) { 561 if (rrq) {
562 rrq->send_rrq = send_rrq; 562 rrq->send_rrq = send_rrq;
563 rrq->xritag = phba->sli4_hba.xri_ids[xritag]; 563 rrq->xritag = xritag;
564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
565 rrq->ndlp = ndlp; 565 rrq->ndlp = ndlp;
566 rrq->nlp_DID = ndlp->nlp_DID; 566 rrq->nlp_DID = ndlp->nlp_DID;
@@ -2452,7 +2452,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2452 2452
2453 /* search continue save q for same XRI */ 2453 /* search continue save q for same XRI */
2454 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2454 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2455 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { 2455 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2456 saveq->iocb.unsli3.rcvsli3.ox_id) {
2456 list_add_tail(&saveq->list, &iocbq->list); 2457 list_add_tail(&saveq->list, &iocbq->list);
2457 found = 1; 2458 found = 1;
2458 break; 2459 break;
@@ -3355,6 +3356,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3355 irspiocbq); 3356 irspiocbq);
3356 break; 3357 break;
3357 case CQE_CODE_RECEIVE: 3358 case CQE_CODE_RECEIVE:
3359 case CQE_CODE_RECEIVE_V1:
3358 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3360 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3359 cq_event); 3361 cq_event);
3360 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3362 lpfc_sli4_handle_received_buffer(phba, dmabuf);
@@ -4712,10 +4714,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4712 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 4714 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4713 * @phba: Pointer to HBA context object. 4715 * @phba: Pointer to HBA context object.
4714 * @type: The resource extent type. 4716 * @type: The resource extent type.
4717 * @extnt_count: buffer to hold port available extent count.
4718 * @extnt_size: buffer to hold element count per extent.
4715 * 4719 *
4716 * This function allocates all SLI4 resource identifiers. 4720 * This function calls the port and retrievs the number of available
4721 * extents and their size for a particular extent type.
4722 *
4723 * Returns: 0 if successful. Nonzero otherwise.
4717 **/ 4724 **/
4718static int 4725int
4719lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 4726lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4720 uint16_t *extnt_count, uint16_t *extnt_size) 4727 uint16_t *extnt_count, uint16_t *extnt_size)
4721{ 4728{
@@ -4892,7 +4899,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
4892 req_len, *emb); 4899 req_len, *emb);
4893 if (alloc_len < req_len) { 4900 if (alloc_len < req_len) {
4894 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4901 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4895 "9000 Allocated DMA memory size (x%x) is " 4902 "2982 Allocated DMA memory size (x%x) is "
4896 "less than the requested DMA memory " 4903 "less than the requested DMA memory "
4897 "size (x%x)\n", alloc_len, req_len); 4904 "size (x%x)\n", alloc_len, req_len);
4898 return -ENOMEM; 4905 return -ENOMEM;
@@ -5506,6 +5513,154 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5506} 5513}
5507 5514
5508/** 5515/**
5516 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5517 * @phba: Pointer to HBA context object.
5518 * @type: The resource extent type.
5519 * @extnt_count: buffer to hold port extent count response
5520 * @extnt_size: buffer to hold port extent size response.
5521 *
5522 * This function calls the port to read the host allocated extents
5523 * for a particular type.
5524 **/
5525int
5526lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5527 uint16_t *extnt_cnt, uint16_t *extnt_size)
5528{
5529 bool emb;
5530 int rc = 0;
5531 uint16_t curr_blks = 0;
5532 uint32_t req_len, emb_len;
5533 uint32_t alloc_len, mbox_tmo;
5534 struct list_head *blk_list_head;
5535 struct lpfc_rsrc_blks *rsrc_blk;
5536 LPFC_MBOXQ_t *mbox;
5537 void *virtaddr = NULL;
5538 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5539 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5540 union lpfc_sli4_cfg_shdr *shdr;
5541
5542 switch (type) {
5543 case LPFC_RSC_TYPE_FCOE_VPI:
5544 blk_list_head = &phba->lpfc_vpi_blk_list;
5545 break;
5546 case LPFC_RSC_TYPE_FCOE_XRI:
5547 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5548 break;
5549 case LPFC_RSC_TYPE_FCOE_VFI:
5550 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5551 break;
5552 case LPFC_RSC_TYPE_FCOE_RPI:
5553 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5554 break;
5555 default:
5556 return -EIO;
5557 }
5558
5559 /* Count the number of extents currently allocatd for this type. */
5560 list_for_each_entry(rsrc_blk, blk_list_head, list) {
5561 if (curr_blks == 0) {
5562 /*
5563 * The GET_ALLOCATED mailbox does not return the size,
5564 * just the count. The size should be just the size
5565 * stored in the current allocated block and all sizes
5566 * for an extent type are the same so set the return
5567 * value now.
5568 */
5569 *extnt_size = rsrc_blk->rsrc_size;
5570 }
5571 curr_blks++;
5572 }
5573
5574 /* Calculate the total requested length of the dma memory. */
5575 req_len = curr_blks * sizeof(uint16_t);
5576
5577 /*
5578 * Calculate the size of an embedded mailbox. The uint32_t
5579 * accounts for extents-specific word.
5580 */
5581 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5582 sizeof(uint32_t);
5583
5584 /*
5585 * Presume the allocation and response will fit into an embedded
5586 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5587 */
5588 emb = LPFC_SLI4_MBX_EMBED;
5589 req_len = emb_len;
5590 if (req_len > emb_len) {
5591 req_len = curr_blks * sizeof(uint16_t) +
5592 sizeof(union lpfc_sli4_cfg_shdr) +
5593 sizeof(uint32_t);
5594 emb = LPFC_SLI4_MBX_NEMBED;
5595 }
5596
5597 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5598 if (!mbox)
5599 return -ENOMEM;
5600 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5601
5602 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5603 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5604 req_len, emb);
5605 if (alloc_len < req_len) {
5606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5607 "2983 Allocated DMA memory size (x%x) is "
5608 "less than the requested DMA memory "
5609 "size (x%x)\n", alloc_len, req_len);
5610 rc = -ENOMEM;
5611 goto err_exit;
5612 }
5613 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5614 if (unlikely(rc)) {
5615 rc = -EIO;
5616 goto err_exit;
5617 }
5618
5619 if (!phba->sli4_hba.intr_enable)
5620 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5621 else {
5622 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
5623 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5624 }
5625
5626 if (unlikely(rc)) {
5627 rc = -EIO;
5628 goto err_exit;
5629 }
5630
5631 /*
5632 * Figure out where the response is located. Then get local pointers
5633 * to the response data. The port does not guarantee to respond to
5634 * all extents counts request so update the local variable with the
5635 * allocated count from the port.
5636 */
5637 if (emb == LPFC_SLI4_MBX_EMBED) {
5638 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5639 shdr = &rsrc_ext->header.cfg_shdr;
5640 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5641 } else {
5642 virtaddr = mbox->sge_array->addr[0];
5643 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5644 shdr = &n_rsrc->cfg_shdr;
5645 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5646 }
5647
5648 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5649 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5650 "2984 Failed to read allocated resources "
5651 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5652 type,
5653 bf_get(lpfc_mbox_hdr_status, &shdr->response),
5654 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5655 rc = -EIO;
5656 goto err_exit;
5657 }
5658 err_exit:
5659 lpfc_sli4_mbox_cmd_free(phba, mbox);
5660 return rc;
5661}
5662
5663/**
5509 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 5664 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
5510 * @phba: Pointer to HBA context object. 5665 * @phba: Pointer to HBA context object.
5511 * 5666 *
@@ -5837,6 +5992,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5837 "Advanced Error Reporting (AER)\n"); 5992 "Advanced Error Reporting (AER)\n");
5838 phba->cfg_aer_support = 0; 5993 phba->cfg_aer_support = 0;
5839 } 5994 }
5995 rc = 0;
5840 } 5996 }
5841 5997
5842 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 5998 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -6634,6 +6790,9 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
6634 unsigned long iflags; 6790 unsigned long iflags;
6635 int rc; 6791 int rc;
6636 6792
6793 /* dump from issue mailbox command if setup */
6794 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
6795
6637 rc = lpfc_mbox_dev_check(phba); 6796 rc = lpfc_mbox_dev_check(phba);
6638 if (unlikely(rc)) { 6797 if (unlikely(rc)) {
6639 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7318,12 +7477,12 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7318 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 7477 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
7319 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7478 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7320 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 7479 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
7321 break; 7480 break;
7322 case CMD_XMIT_SEQUENCE64_CX: 7481 case CMD_XMIT_SEQUENCE64_CX:
7323 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 7482 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
7324 iocbq->iocb.un.ulpWord[3]); 7483 iocbq->iocb.un.ulpWord[3]);
7325 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 7484 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
7326 iocbq->iocb.ulpContext); 7485 iocbq->iocb.unsli3.rcvsli3.ox_id);
7327 /* The entire sequence is transmitted for this IOCB */ 7486 /* The entire sequence is transmitted for this IOCB */
7328 xmit_len = total_len; 7487 xmit_len = total_len;
7329 cmnd = CMD_XMIT_SEQUENCE64_CR; 7488 cmnd = CMD_XMIT_SEQUENCE64_CR;
@@ -7341,7 +7500,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7341 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 7500 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
7342 wqe->xmit_sequence.xmit_len = xmit_len; 7501 wqe->xmit_sequence.xmit_len = xmit_len;
7343 command_type = OTHER_COMMAND; 7502 command_type = OTHER_COMMAND;
7344 break; 7503 break;
7345 case CMD_XMIT_BCAST64_CN: 7504 case CMD_XMIT_BCAST64_CN:
7346 /* word3 iocb=iotag32 wqe=seq_payload_len */ 7505 /* word3 iocb=iotag32 wqe=seq_payload_len */
7347 wqe->xmit_bcast64.seq_payload_len = xmit_len; 7506 wqe->xmit_bcast64.seq_payload_len = xmit_len;
@@ -7355,7 +7514,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7355 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 7514 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
7356 LPFC_WQE_LENLOC_WORD3); 7515 LPFC_WQE_LENLOC_WORD3);
7357 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 7516 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
7358 break; 7517 break;
7359 case CMD_FCP_IWRITE64_CR: 7518 case CMD_FCP_IWRITE64_CR:
7360 command_type = FCP_COMMAND_DATA_OUT; 7519 command_type = FCP_COMMAND_DATA_OUT;
7361 /* word3 iocb=iotag wqe=payload_offset_len */ 7520 /* word3 iocb=iotag wqe=payload_offset_len */
@@ -7375,7 +7534,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7375 LPFC_WQE_LENLOC_WORD4); 7534 LPFC_WQE_LENLOC_WORD4);
7376 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 7535 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
7377 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 7536 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
7378 break; 7537 break;
7379 case CMD_FCP_IREAD64_CR: 7538 case CMD_FCP_IREAD64_CR:
7380 /* word3 iocb=iotag wqe=payload_offset_len */ 7539 /* word3 iocb=iotag wqe=payload_offset_len */
7381 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 7540 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
@@ -7394,7 +7553,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7394 LPFC_WQE_LENLOC_WORD4); 7553 LPFC_WQE_LENLOC_WORD4);
7395 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 7554 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
7396 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 7555 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
7397 break; 7556 break;
7398 case CMD_FCP_ICMND64_CR: 7557 case CMD_FCP_ICMND64_CR:
7399 /* word3 iocb=IO_TAG wqe=reserved */ 7558 /* word3 iocb=IO_TAG wqe=reserved */
7400 wqe->fcp_icmd.rsrvd3 = 0; 7559 wqe->fcp_icmd.rsrvd3 = 0;
@@ -7407,7 +7566,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7407 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 7566 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
7408 LPFC_WQE_LENLOC_NONE); 7567 LPFC_WQE_LENLOC_NONE);
7409 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 7568 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
7410 break; 7569 break;
7411 case CMD_GEN_REQUEST64_CR: 7570 case CMD_GEN_REQUEST64_CR:
7412 /* For this command calculate the xmit length of the 7571 /* For this command calculate the xmit length of the
7413 * request bde. 7572 * request bde.
@@ -7442,7 +7601,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7442 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 7601 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7443 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 7602 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
7444 command_type = OTHER_COMMAND; 7603 command_type = OTHER_COMMAND;
7445 break; 7604 break;
7446 case CMD_XMIT_ELS_RSP64_CX: 7605 case CMD_XMIT_ELS_RSP64_CX:
7447 ndlp = (struct lpfc_nodelist *)iocbq->context1; 7606 ndlp = (struct lpfc_nodelist *)iocbq->context1;
7448 /* words0-2 BDE memcpy */ 7607 /* words0-2 BDE memcpy */
@@ -7457,7 +7616,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7457 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 7616 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
7458 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 7617 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
7459 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 7618 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
7460 iocbq->iocb.ulpContext); 7619 iocbq->iocb.unsli3.rcvsli3.ox_id);
7461 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 7620 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
7462 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 7621 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
7463 phba->vpi_ids[iocbq->vport->vpi]); 7622 phba->vpi_ids[iocbq->vport->vpi]);
@@ -7470,7 +7629,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7470 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 7629 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
7471 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7630 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
7472 command_type = OTHER_COMMAND; 7631 command_type = OTHER_COMMAND;
7473 break; 7632 break;
7474 case CMD_CLOSE_XRI_CN: 7633 case CMD_CLOSE_XRI_CN:
7475 case CMD_ABORT_XRI_CN: 7634 case CMD_ABORT_XRI_CN:
7476 case CMD_ABORT_XRI_CX: 7635 case CMD_ABORT_XRI_CX:
@@ -7509,7 +7668,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7509 cmnd = CMD_ABORT_XRI_CX; 7668 cmnd = CMD_ABORT_XRI_CX;
7510 command_type = OTHER_COMMAND; 7669 command_type = OTHER_COMMAND;
7511 xritag = 0; 7670 xritag = 0;
7512 break; 7671 break;
7513 case CMD_XMIT_BLS_RSP64_CX: 7672 case CMD_XMIT_BLS_RSP64_CX:
7514 /* As BLS ABTS RSP WQE is very different from other WQEs, 7673 /* As BLS ABTS RSP WQE is very different from other WQEs,
7515 * we re-construct this WQE here based on information in 7674 * we re-construct this WQE here based on information in
@@ -7553,7 +7712,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7553 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 7712 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
7554 } 7713 }
7555 7714
7556 break; 7715 break;
7557 case CMD_XRI_ABORTED_CX: 7716 case CMD_XRI_ABORTED_CX:
7558 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 7717 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
7559 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 7718 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
@@ -7565,7 +7724,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7565 "2014 Invalid command 0x%x\n", 7724 "2014 Invalid command 0x%x\n",
7566 iocbq->iocb.ulpCommand); 7725 iocbq->iocb.ulpCommand);
7567 return IOCB_ERROR; 7726 return IOCB_ERROR;
7568 break; 7727 break;
7569 } 7728 }
7570 7729
7571 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 7730 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
@@ -10481,10 +10640,14 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
10481 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 10640 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
10482 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 10641 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
10483 struct hbq_dmabuf *dma_buf; 10642 struct hbq_dmabuf *dma_buf;
10484 uint32_t status; 10643 uint32_t status, rq_id;
10485 unsigned long iflags; 10644 unsigned long iflags;
10486 10645
10487 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) 10646 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
10647 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
10648 else
10649 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
10650 if (rq_id != hrq->queue_id)
10488 goto out; 10651 goto out;
10489 10652
10490 status = bf_get(lpfc_rcqe_status, rcqe); 10653 status = bf_get(lpfc_rcqe_status, rcqe);
@@ -10563,6 +10726,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
10563 (struct sli4_wcqe_xri_aborted *)&cqevt); 10726 (struct sli4_wcqe_xri_aborted *)&cqevt);
10564 break; 10727 break;
10565 case CQE_CODE_RECEIVE: 10728 case CQE_CODE_RECEIVE:
10729 case CQE_CODE_RECEIVE_V1:
10566 /* Process the RQ event */ 10730 /* Process the RQ event */
10567 phba->last_completion_time = jiffies; 10731 phba->last_completion_time = jiffies;
10568 workposted = lpfc_sli4_sp_handle_rcqe(phba, 10732 workposted = lpfc_sli4_sp_handle_rcqe(phba,
@@ -12345,19 +12509,18 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
12345} 12509}
12346 12510
12347/** 12511/**
12348 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 12512 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
12349 * @phba: pointer to lpfc hba data structure. 12513 * @phba: pointer to lpfc hba data structure.
12350 * 12514 *
12351 * This routine is invoked to post rpi header templates to the 12515 * This routine is invoked to post rpi header templates to the
12352 * port for those SLI4 ports that do not support extents. This routine 12516 * HBA consistent with the SLI-4 interface spec. This routine
12353 * posts a PAGE_SIZE memory region to the port to hold up to 12517 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
12354 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 12518 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
12355 * and should be called only when interrupts are disabled.
12356 * 12519 *
12357 * Return codes 12520 * Returns
12358 * 0 - successful 12521 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
12359 * -ERROR - otherwise. 12522 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
12360 */ 12523 **/
12361uint16_t 12524uint16_t
12362lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 12525lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
12363{ 12526{
@@ -13406,7 +13569,7 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
13406 * This function validates the xri maps to the known range of XRIs allocated an 13569 * This function validates the xri maps to the known range of XRIs allocated an
13407 * used by the driver. 13570 * used by the driver.
13408 **/ 13571 **/
13409static uint16_t 13572uint16_t
13410lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 13573lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
13411 uint16_t xri) 13574 uint16_t xri)
13412{ 13575{
@@ -13643,10 +13806,12 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
13643static struct lpfc_iocbq * 13806static struct lpfc_iocbq *
13644lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 13807lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13645{ 13808{
13809 struct hbq_dmabuf *hbq_buf;
13646 struct lpfc_dmabuf *d_buf, *n_buf; 13810 struct lpfc_dmabuf *d_buf, *n_buf;
13647 struct lpfc_iocbq *first_iocbq, *iocbq; 13811 struct lpfc_iocbq *first_iocbq, *iocbq;
13648 struct fc_frame_header *fc_hdr; 13812 struct fc_frame_header *fc_hdr;
13649 uint32_t sid; 13813 uint32_t sid;
13814 uint32_t len, tot_len;
13650 struct ulp_bde64 *pbde; 13815 struct ulp_bde64 *pbde;
13651 13816
13652 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 13817 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
@@ -13655,6 +13820,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13655 lpfc_update_rcv_time_stamp(vport); 13820 lpfc_update_rcv_time_stamp(vport);
13656 /* get the Remote Port's SID */ 13821 /* get the Remote Port's SID */
13657 sid = sli4_sid_from_fc_hdr(fc_hdr); 13822 sid = sli4_sid_from_fc_hdr(fc_hdr);
13823 tot_len = 0;
13658 /* Get an iocbq struct to fill in. */ 13824 /* Get an iocbq struct to fill in. */
13659 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 13825 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
13660 if (first_iocbq) { 13826 if (first_iocbq) {
@@ -13662,9 +13828,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13662 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 13828 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
13663 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 13829 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
13664 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 13830 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
13665 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 13831 first_iocbq->iocb.ulpContext = NO_XRI;
13666 /* iocbq is prepped for internal consumption. Logical vpi. */ 13832 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
13667 first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi; 13833 be16_to_cpu(fc_hdr->fh_ox_id);
13834 /* iocbq is prepped for internal consumption. Physical vpi. */
13835 first_iocbq->iocb.unsli3.rcvsli3.vpi =
13836 vport->phba->vpi_ids[vport->vpi];
13668 /* put the first buffer into the first IOCBq */ 13837 /* put the first buffer into the first IOCBq */
13669 first_iocbq->context2 = &seq_dmabuf->dbuf; 13838 first_iocbq->context2 = &seq_dmabuf->dbuf;
13670 first_iocbq->context3 = NULL; 13839 first_iocbq->context3 = NULL;
@@ -13672,9 +13841,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13672 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 13841 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
13673 LPFC_DATA_BUF_SIZE; 13842 LPFC_DATA_BUF_SIZE;
13674 first_iocbq->iocb.un.rcvels.remoteID = sid; 13843 first_iocbq->iocb.un.rcvels.remoteID = sid;
13675 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 13844 tot_len = bf_get(lpfc_rcqe_length,
13676 bf_get(lpfc_rcqe_length,
13677 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 13845 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
13846 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
13678 } 13847 }
13679 iocbq = first_iocbq; 13848 iocbq = first_iocbq;
13680 /* 13849 /*
@@ -13692,9 +13861,13 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13692 pbde = (struct ulp_bde64 *) 13861 pbde = (struct ulp_bde64 *)
13693 &iocbq->iocb.unsli3.sli3Words[4]; 13862 &iocbq->iocb.unsli3.sli3Words[4];
13694 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 13863 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
13695 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 13864
13696 bf_get(lpfc_rcqe_length, 13865 /* We need to get the size out of the right CQE */
13697 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 13866 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
13867 len = bf_get(lpfc_rcqe_length,
13868 &hbq_buf->cq_event.cqe.rcqe_cmpl);
13869 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
13870 tot_len += len;
13698 } else { 13871 } else {
13699 iocbq = lpfc_sli_get_iocbq(vport->phba); 13872 iocbq = lpfc_sli_get_iocbq(vport->phba);
13700 if (!iocbq) { 13873 if (!iocbq) {
@@ -13712,9 +13885,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
13712 iocbq->iocb.ulpBdeCount = 1; 13885 iocbq->iocb.ulpBdeCount = 1;
13713 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 13886 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
13714 LPFC_DATA_BUF_SIZE; 13887 LPFC_DATA_BUF_SIZE;
13715 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 13888
13716 bf_get(lpfc_rcqe_length, 13889 /* We need to get the size out of the right CQE */
13717 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 13890 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
13891 len = bf_get(lpfc_rcqe_length,
13892 &hbq_buf->cq_event.cqe.rcqe_cmpl);
13893 tot_len += len;
13894 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
13895
13718 iocbq->iocb.un.rcvels.remoteID = sid; 13896 iocbq->iocb.un.rcvels.remoteID = sid;
13719 list_add_tail(&iocbq->list, &first_iocbq->list); 13897 list_add_tail(&iocbq->list, &first_iocbq->list);
13720 } 13898 }
@@ -13787,7 +13965,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
13787 lpfc_in_buf_free(phba, &dmabuf->dbuf); 13965 lpfc_in_buf_free(phba, &dmabuf->dbuf);
13788 return; 13966 return;
13789 } 13967 }
13790 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); 13968 if ((bf_get(lpfc_cqe_code,
13969 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
13970 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
13971 &dmabuf->cq_event.cqe.rcqe_cmpl);
13972 else
13973 fcfi = bf_get(lpfc_rcqe_fcf_id,
13974 &dmabuf->cq_event.cqe.rcqe_cmpl);
13791 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 13975 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
13792 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { 13976 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
13793 /* throw out the frame */ 13977 /* throw out the frame */
@@ -14451,6 +14635,92 @@ fail_fcf_read:
14451} 14635}
14452 14636
14453/** 14637/**
14638 * lpfc_check_next_fcf_pri
14639 * phba pointer to the lpfc_hba struct for this port.
14640 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
14641 * routine when the rr_bmask is empty. The FCF indecies are put into the
14642 * rr_bmask based on their priority level. Starting from the highest priority
14643 * to the lowest. The most likely FCF candidate will be in the highest
14644 * priority group. When this routine is called it searches the fcf_pri list for
14645 * next lowest priority group and repopulates the rr_bmask with only those
14646 * fcf_indexes.
14647 * returns:
14648 * 1=success 0=failure
14649 **/
14650int
14651lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
14652{
14653 uint16_t next_fcf_pri;
14654 uint16_t last_index;
14655 struct lpfc_fcf_pri *fcf_pri;
14656 int rc;
14657 int ret = 0;
14658
14659 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
14660 LPFC_SLI4_FCF_TBL_INDX_MAX);
14661 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
14662 "3060 Last IDX %d\n", last_index);
14663 if (list_empty(&phba->fcf.fcf_pri_list)) {
14664 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
14665 "3061 Last IDX %d\n", last_index);
14666 return 0; /* Empty rr list */
14667 }
14668 next_fcf_pri = 0;
14669 /*
14670 * Clear the rr_bmask and set all of the bits that are at this
14671 * priority.
14672 */
14673 memset(phba->fcf.fcf_rr_bmask, 0,
14674 sizeof(*phba->fcf.fcf_rr_bmask));
14675 spin_lock_irq(&phba->hbalock);
14676 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14677 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
14678 continue;
14679 /*
14680 * the 1st priority that has not FLOGI failed
14681 * will be the highest.
14682 */
14683 if (!next_fcf_pri)
14684 next_fcf_pri = fcf_pri->fcf_rec.priority;
14685 spin_unlock_irq(&phba->hbalock);
14686 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
14687 rc = lpfc_sli4_fcf_rr_index_set(phba,
14688 fcf_pri->fcf_rec.fcf_index);
14689 if (rc)
14690 return 0;
14691 }
14692 spin_lock_irq(&phba->hbalock);
14693 }
14694 /*
14695 * if next_fcf_pri was not set above and the list is not empty then
14696 * we have failed flogis on all of them. So reset flogi failed
14697 * and start at the begining.
14698 */
14699 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
14700 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14701 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
14702 /*
14703 * the 1st priority that has not FLOGI failed
14704 * will be the highest.
14705 */
14706 if (!next_fcf_pri)
14707 next_fcf_pri = fcf_pri->fcf_rec.priority;
14708 spin_unlock_irq(&phba->hbalock);
14709 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
14710 rc = lpfc_sli4_fcf_rr_index_set(phba,
14711 fcf_pri->fcf_rec.fcf_index);
14712 if (rc)
14713 return 0;
14714 }
14715 spin_lock_irq(&phba->hbalock);
14716 }
14717 } else
14718 ret = 1;
14719 spin_unlock_irq(&phba->hbalock);
14720
14721 return ret;
14722}
14723/**
14454 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 14724 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
14455 * @phba: pointer to lpfc hba data structure. 14725 * @phba: pointer to lpfc hba data structure.
14456 * 14726 *
@@ -14466,6 +14736,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
14466 uint16_t next_fcf_index; 14736 uint16_t next_fcf_index;
14467 14737
14468 /* Search start from next bit of currently registered FCF index */ 14738 /* Search start from next bit of currently registered FCF index */
14739next_priority:
14469 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 14740 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
14470 LPFC_SLI4_FCF_TBL_INDX_MAX; 14741 LPFC_SLI4_FCF_TBL_INDX_MAX;
14471 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 14742 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
@@ -14473,17 +14744,46 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
14473 next_fcf_index); 14744 next_fcf_index);
14474 14745
14475 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 14746 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
14476 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 14747 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
14748 /*
14749 * If we have wrapped then we need to clear the bits that
14750 * have been tested so that we can detect when we should
14751 * change the priority level.
14752 */
14477 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 14753 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
14478 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 14754 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
14755 }
14756
14479 14757
14480 /* Check roundrobin failover list empty condition */ 14758 /* Check roundrobin failover list empty condition */
14481 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14759 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
14760 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
14761 /*
14762 * If next fcf index is not found check if there are lower
14763 * Priority level fcf's in the fcf_priority list.
14764 * Set up the rr_bmask with all of the avaiable fcf bits
14765 * at that level and continue the selection process.
14766 */
14767 if (lpfc_check_next_fcf_pri_level(phba))
14768 goto next_priority;
14482 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 14769 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
14483 "2844 No roundrobin failover FCF available\n"); 14770 "2844 No roundrobin failover FCF available\n");
14484 return LPFC_FCOE_FCF_NEXT_NONE; 14771 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
14772 return LPFC_FCOE_FCF_NEXT_NONE;
14773 else {
14774 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
14775 "3063 Only FCF available idx %d, flag %x\n",
14776 next_fcf_index,
14777 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
14778 return next_fcf_index;
14779 }
14485 } 14780 }
14486 14781
14782 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
14783 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
14784 LPFC_FCF_FLOGI_FAILED)
14785 goto next_priority;
14786
14487 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14787 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
14488 "2845 Get next roundrobin failover FCF (x%x)\n", 14788 "2845 Get next roundrobin failover FCF (x%x)\n",
14489 next_fcf_index); 14789 next_fcf_index);
@@ -14535,6 +14835,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
14535void 14835void
14536lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 14836lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
14537{ 14837{
14838 struct lpfc_fcf_pri *fcf_pri;
14538 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 14839 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
14539 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 14840 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
14540 "2762 FCF (x%x) reached driver's book " 14841 "2762 FCF (x%x) reached driver's book "
@@ -14543,6 +14844,14 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
14543 return; 14844 return;
14544 } 14845 }
14545 /* Clear the eligible FCF record index bmask */ 14846 /* Clear the eligible FCF record index bmask */
14847 spin_lock_irq(&phba->hbalock);
14848 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14849 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
14850 list_del_init(&fcf_pri->list);
14851 break;
14852 }
14853 }
14854 spin_unlock_irq(&phba->hbalock);
14546 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 14855 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
14547 14856
14548 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 14857 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 4b1703554a26..19bb87ae8597 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -81,6 +81,8 @@
81 (fc_hdr)->fh_f_ctl[1] << 8 | \ 81 (fc_hdr)->fh_f_ctl[1] << 8 | \
82 (fc_hdr)->fh_f_ctl[2]) 82 (fc_hdr)->fh_f_ctl[2])
83 83
84#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
85
84enum lpfc_sli4_queue_type { 86enum lpfc_sli4_queue_type {
85 LPFC_EQ, 87 LPFC_EQ,
86 LPFC_GCQ, 88 LPFC_GCQ,
@@ -157,6 +159,25 @@ struct lpfc_fcf_rec {
157#define RECORD_VALID 0x02 159#define RECORD_VALID 0x02
158}; 160};
159 161
162struct lpfc_fcf_pri_rec {
163 uint16_t fcf_index;
164#define LPFC_FCF_ON_PRI_LIST 0x0001
165#define LPFC_FCF_FLOGI_FAILED 0x0002
166 uint16_t flag;
167 uint32_t priority;
168};
169
170struct lpfc_fcf_pri {
171 struct list_head list;
172 struct lpfc_fcf_pri_rec fcf_rec;
173};
174
175/*
176 * Maximum FCF table index, it is for driver internal book keeping, it
177 * just needs to be no less than the supported HBA's FCF table size.
178 */
179#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
180
160struct lpfc_fcf { 181struct lpfc_fcf {
161 uint16_t fcfi; 182 uint16_t fcfi;
162 uint32_t fcf_flag; 183 uint32_t fcf_flag;
@@ -176,15 +197,13 @@ struct lpfc_fcf {
176 uint32_t eligible_fcf_cnt; 197 uint32_t eligible_fcf_cnt;
177 struct lpfc_fcf_rec current_rec; 198 struct lpfc_fcf_rec current_rec;
178 struct lpfc_fcf_rec failover_rec; 199 struct lpfc_fcf_rec failover_rec;
200 struct list_head fcf_pri_list;
201 struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
202 uint32_t current_fcf_scan_pri;
179 struct timer_list redisc_wait; 203 struct timer_list redisc_wait;
180 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */ 204 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
181}; 205};
182 206
183/*
184 * Maximum FCF table index, it is for driver internal book keeping, it
185 * just needs to be no less than the supported HBA's FCF table size.
186 */
187#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
188 207
189#define LPFC_REGION23_SIGNATURE "RG23" 208#define LPFC_REGION23_SIGNATURE "RG23"
190#define LPFC_REGION23_VERSION 1 209#define LPFC_REGION23_VERSION 1
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c03921b1232c..c1e0ae94d9f4 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.23" 21#define LPFC_DRIVER_VERSION "8.3.25"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 7370c084b178..3948a00d81f4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.05.38-rc1" 36#define MEGASAS_VERSION "00.00.05.40-rc1"
37#define MEGASAS_RELDATE "May. 11, 2011" 37#define MEGASAS_RELDATE "Jul. 26, 2011"
38#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011" 38#define MEGASAS_EXT_VERSION "Tue. Jul. 26 17:00:00 PDT 2011"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 2d8cdce7b2f5..776d01988660 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.05.38-rc1 21 * Version : v00.00.05.40-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -54,6 +54,7 @@
54#include <scsi/scsi_cmnd.h> 54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h> 55#include <scsi/scsi_device.h>
56#include <scsi/scsi_host.h> 56#include <scsi/scsi_host.h>
57#include <scsi/scsi_tcq.h>
57#include "megaraid_sas_fusion.h" 58#include "megaraid_sas_fusion.h"
58#include "megaraid_sas.h" 59#include "megaraid_sas.h"
59 60
@@ -2057,6 +2058,20 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2057 } 2058 }
2058} 2059}
2059 2060
2061static int megasas_change_queue_depth(struct scsi_device *sdev,
2062 int queue_depth, int reason)
2063{
2064 if (reason != SCSI_QDEPTH_DEFAULT)
2065 return -EOPNOTSUPP;
2066
2067 if (queue_depth > sdev->host->can_queue)
2068 queue_depth = sdev->host->can_queue;
2069 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
2070 queue_depth);
2071
2072 return queue_depth;
2073}
2074
2060/* 2075/*
2061 * Scsi host template for megaraid_sas driver 2076 * Scsi host template for megaraid_sas driver
2062 */ 2077 */
@@ -2074,6 +2089,7 @@ static struct scsi_host_template megasas_template = {
2074 .eh_timed_out = megasas_reset_timer, 2089 .eh_timed_out = megasas_reset_timer,
2075 .bios_param = megasas_bios_param, 2090 .bios_param = megasas_bios_param,
2076 .use_clustering = ENABLE_CLUSTERING, 2091 .use_clustering = ENABLE_CLUSTERING,
2092 .change_queue_depth = megasas_change_queue_depth,
2077}; 2093};
2078 2094
2079/** 2095/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 8fe3a45794fc..5a5af1fe7581 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -288,7 +288,6 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
288 /* Get dev handle from Pd */ 288 /* Get dev handle from Pd */
289 *pDevHandle = MR_PdDevHandleGet(pd, map); 289 *pDevHandle = MR_PdDevHandleGet(pd, map);
290 } 290 }
291 retval = FALSE;
292 } 291 }
293 292
294 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 293 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 939f283d0c28..6abd2fcc43e2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -4258,6 +4258,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4258 u32 log_info; 4258 u32 log_info;
4259 struct MPT2SAS_DEVICE *sas_device_priv_data; 4259 struct MPT2SAS_DEVICE *sas_device_priv_data;
4260 u32 response_code = 0; 4260 u32 response_code = 0;
4261 unsigned long flags;
4261 4262
4262 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 4263 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
4263 scmd = _scsih_scsi_lookup_get_clear(ioc, smid); 4264 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@@ -4282,6 +4283,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4282 * the failed direct I/O should be redirected to volume 4283 * the failed direct I/O should be redirected to volume
4283 */ 4284 */
4284 if (_scsih_scsi_direct_io_get(ioc, smid)) { 4285 if (_scsih_scsi_direct_io_get(ioc, smid)) {
4286 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4287 ioc->scsi_lookup[smid - 1].scmd = scmd;
4288 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4285 _scsih_scsi_direct_io_set(ioc, smid, 0); 4289 _scsih_scsi_direct_io_set(ioc, smid, 0);
4286 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 4290 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4287 mpi_request->DevHandle = 4291 mpi_request->DevHandle =
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
index c82b012aba37..78f7e20a0c1c 100644
--- a/drivers/scsi/mvsas/Kconfig
+++ b/drivers/scsi/mvsas/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4# Copyright 2007 Red Hat, Inc. 4# Copyright 2007 Red Hat, Inc.
5# Copyright 2008 Marvell. <kewei@marvell.com> 5# Copyright 2008 Marvell. <kewei@marvell.com>
6# Copyright 2009-20011 Marvell. <yuxiangl@marvell.com> 6# Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
7# 7#
8# This file is licensed under GPLv2. 8# This file is licensed under GPLv2.
9# 9#
@@ -41,3 +41,10 @@ config SCSI_MVSAS_DEBUG
41 help 41 help
42 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode, 42 Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
43 the driver prints some messages to the console. 43 the driver prints some messages to the console.
44config SCSI_MVSAS_TASKLET
45 bool "Support for interrupt tasklet"
46 default n
47 depends on SCSI_MVSAS
48 help
49 Compiles the 88SE64xx/88SE94xx driver in interrupt tasklet mode.In this mode,
50 the interrupt will schedule a tasklet.
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 13c960481391..8ba47229049f 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -33,7 +33,6 @@ static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
33 u32 reg; 33 u32 reg;
34 struct mvs_phy *phy = &mvi->phy[i]; 34 struct mvs_phy *phy = &mvi->phy[i];
35 35
36 /* TODO check & save device type */
37 reg = mr32(MVS_GBL_PORT_TYPE); 36 reg = mr32(MVS_GBL_PORT_TYPE);
38 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 37 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
39 if (reg & MODE_SAS_SATA & (1 << i)) 38 if (reg & MODE_SAS_SATA & (1 << i))
@@ -48,7 +47,7 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
48 u32 tmp; 47 u32 tmp;
49 48
50 tmp = mr32(MVS_PCS); 49 tmp = mr32(MVS_PCS);
51 if (mvi->chip->n_phy <= 4) 50 if (mvi->chip->n_phy <= MVS_SOC_PORTS)
52 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT); 51 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
53 else 52 else
54 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); 53 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
@@ -58,24 +57,16 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
58static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi) 57static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
59{ 58{
60 void __iomem *regs = mvi->regs; 59 void __iomem *regs = mvi->regs;
60 int i;
61 61
62 mvs_phy_hacks(mvi); 62 mvs_phy_hacks(mvi);
63 63
64 if (!(mvi->flags & MVF_FLAG_SOC)) { 64 if (!(mvi->flags & MVF_FLAG_SOC)) {
65 /* TEST - for phy decoding error, adjust voltage levels */ 65 for (i = 0; i < MVS_SOC_PORTS; i++) {
66 mw32(MVS_P0_VSR_ADDR + 0, 0x8); 66 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8);
67 mw32(MVS_P0_VSR_DATA + 0, 0x2F0); 67 mvs_write_port_vsr_data(mvi, i, 0x2F0);
68 68 }
69 mw32(MVS_P0_VSR_ADDR + 8, 0x8);
70 mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
71
72 mw32(MVS_P0_VSR_ADDR + 16, 0x8);
73 mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
74
75 mw32(MVS_P0_VSR_ADDR + 24, 0x8);
76 mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
77 } else { 69 } else {
78 int i;
79 /* disable auto port detection */ 70 /* disable auto port detection */
80 mw32(MVS_GBL_PORT_TYPE, 0); 71 mw32(MVS_GBL_PORT_TYPE, 0);
81 for (i = 0; i < mvi->chip->n_phy; i++) { 72 for (i = 0; i < mvi->chip->n_phy; i++) {
@@ -95,7 +86,7 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
95 u32 reg, tmp; 86 u32 reg, tmp;
96 87
97 if (!(mvi->flags & MVF_FLAG_SOC)) { 88 if (!(mvi->flags & MVF_FLAG_SOC)) {
98 if (phy_id < 4) 89 if (phy_id < MVS_SOC_PORTS)
99 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg); 90 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
100 else 91 else
101 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg); 92 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
@@ -104,13 +95,13 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
104 reg = mr32(MVS_PHY_CTL); 95 reg = mr32(MVS_PHY_CTL);
105 96
106 tmp = reg; 97 tmp = reg;
107 if (phy_id < 4) 98 if (phy_id < MVS_SOC_PORTS)
108 tmp |= (1U << phy_id) << PCTL_LINK_OFFS; 99 tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
109 else 100 else
110 tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS; 101 tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS;
111 102
112 if (!(mvi->flags & MVF_FLAG_SOC)) { 103 if (!(mvi->flags & MVF_FLAG_SOC)) {
113 if (phy_id < 4) { 104 if (phy_id < MVS_SOC_PORTS) {
114 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); 105 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
115 mdelay(10); 106 mdelay(10);
116 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg); 107 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
@@ -133,9 +124,9 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
133 tmp &= ~PHYEV_RDY_CH; 124 tmp &= ~PHYEV_RDY_CH;
134 mvs_write_port_irq_stat(mvi, phy_id, tmp); 125 mvs_write_port_irq_stat(mvi, phy_id, tmp);
135 tmp = mvs_read_phy_ctl(mvi, phy_id); 126 tmp = mvs_read_phy_ctl(mvi, phy_id);
136 if (hard == 1) 127 if (hard == MVS_HARD_RESET)
137 tmp |= PHY_RST_HARD; 128 tmp |= PHY_RST_HARD;
138 else if (hard == 0) 129 else if (hard == MVS_SOFT_RESET)
139 tmp |= PHY_RST; 130 tmp |= PHY_RST;
140 mvs_write_phy_ctl(mvi, phy_id, tmp); 131 mvs_write_phy_ctl(mvi, phy_id, tmp);
141 if (hard) { 132 if (hard) {
@@ -321,6 +312,11 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
321 /* init phys */ 312 /* init phys */
322 mvs_64xx_phy_hacks(mvi); 313 mvs_64xx_phy_hacks(mvi);
323 314
315 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
316 tmp &= 0x0000ffff;
317 tmp |= 0x00fa0000;
318 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
319
324 /* enable auto port detection */ 320 /* enable auto port detection */
325 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN); 321 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
326 322
@@ -346,7 +342,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
346 342
347 mvs_64xx_enable_xmt(mvi, i); 343 mvs_64xx_enable_xmt(mvi, i);
348 344
349 mvs_64xx_phy_reset(mvi, i, 1); 345 mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET);
350 msleep(500); 346 msleep(500);
351 mvs_64xx_detect_porttype(mvi, i); 347 mvs_64xx_detect_porttype(mvi, i);
352 } 348 }
@@ -377,13 +373,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
377 mvs_update_phyinfo(mvi, i, 1); 373 mvs_update_phyinfo(mvi, i, 1);
378 } 374 }
379 375
380 /* FIXME: update wide port bitmaps */
381
382 /* little endian for open address and command table, etc. */ 376 /* little endian for open address and command table, etc. */
383 /*
384 * it seems that ( from the spec ) turning on big-endian won't
385 * do us any good on big-endian machines, need further confirmation
386 */
387 cctl = mr32(MVS_CTL); 377 cctl = mr32(MVS_CTL);
388 cctl |= CCTL_ENDIAN_CMD; 378 cctl |= CCTL_ENDIAN_CMD;
389 cctl |= CCTL_ENDIAN_DATA; 379 cctl |= CCTL_ENDIAN_DATA;
@@ -394,15 +384,19 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
394 /* reset CMD queue */ 384 /* reset CMD queue */
395 tmp = mr32(MVS_PCS); 385 tmp = mr32(MVS_PCS);
396 tmp |= PCS_CMD_RST; 386 tmp |= PCS_CMD_RST;
387 tmp &= ~PCS_SELF_CLEAR;
397 mw32(MVS_PCS, tmp); 388 mw32(MVS_PCS, tmp);
398 /* interrupt coalescing may cause missing HW interrput in some case, 389 /*
399 * and the max count is 0x1ff, while our max slot is 0x200, 390 * the max count is 0x1ff, while our max slot is 0x200,
400 * it will make count 0. 391 * it will make count 0.
401 */ 392 */
402 tmp = 0; 393 tmp = 0;
403 mw32(MVS_INT_COAL, tmp); 394 if (MVS_CHIP_SLOT_SZ > 0x1ff)
395 mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
396 else
397 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
404 398
405 tmp = 0x100; 399 tmp = 0x10000 | interrupt_coalescing;
406 mw32(MVS_INT_COAL_TMOUT, tmp); 400 mw32(MVS_INT_COAL_TMOUT, tmp);
407 401
408 /* ladies and gentlemen, start your engines */ 402 /* ladies and gentlemen, start your engines */
@@ -477,13 +471,11 @@ static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
477 471
478 /* clear CMD_CMPLT ASAP */ 472 /* clear CMD_CMPLT ASAP */
479 mw32_f(MVS_INT_STAT, CINT_DONE); 473 mw32_f(MVS_INT_STAT, CINT_DONE);
480#ifndef MVS_USE_TASKLET 474
481 spin_lock(&mvi->lock); 475 spin_lock(&mvi->lock);
482#endif
483 mvs_int_full(mvi); 476 mvs_int_full(mvi);
484#ifndef MVS_USE_TASKLET
485 spin_unlock(&mvi->lock); 477 spin_unlock(&mvi->lock);
486#endif 478
487 return IRQ_HANDLED; 479 return IRQ_HANDLED;
488} 480}
489 481
@@ -630,7 +622,6 @@ static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
630{ 622{
631 u32 tmp; 623 u32 tmp;
632 struct mvs_phy *phy = &mvi->phy[i]; 624 struct mvs_phy *phy = &mvi->phy[i];
633 /* workaround for HW phy decoding error on 1.5g disk drive */
634 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); 625 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
635 tmp = mvs_read_port_vsr_data(mvi, i); 626 tmp = mvs_read_port_vsr_data(mvi, i);
636 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> 627 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
@@ -661,7 +652,7 @@ void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
661 tmp |= lrmax; 652 tmp |= lrmax;
662 } 653 }
663 mvs_write_phy_ctl(mvi, phy_id, tmp); 654 mvs_write_phy_ctl(mvi, phy_id, tmp);
664 mvs_64xx_phy_reset(mvi, phy_id, 1); 655 mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET);
665} 656}
666 657
667static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi) 658static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
@@ -744,11 +735,13 @@ int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
744 return -1; 735 return -1;
745} 736}
746 737
747#ifndef DISABLE_HOTPLUG_DMA_FIX 738void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
748void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) 739 int buf_len, int from, void *prd)
749{ 740{
750 int i; 741 int i;
751 struct mvs_prd *buf_prd = prd; 742 struct mvs_prd *buf_prd = prd;
743 dma_addr_t buf_dma = mvi->bulk_buffer_dma;
744
752 buf_prd += from; 745 buf_prd += from;
753 for (i = 0; i < MAX_SG_ENTRY - from; i++) { 746 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
754 buf_prd->addr = cpu_to_le64(buf_dma); 747 buf_prd->addr = cpu_to_le64(buf_dma);
@@ -756,7 +749,28 @@ void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
756 ++buf_prd; 749 ++buf_prd;
757 } 750 }
758} 751}
759#endif 752
753static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time)
754{
755 void __iomem *regs = mvi->regs;
756 u32 tmp = 0;
757 /*
758 * the max count is 0x1ff, while our max slot is 0x200,
759 * it will make count 0.
760 */
761 if (time == 0) {
762 mw32(MVS_INT_COAL, 0);
763 mw32(MVS_INT_COAL_TMOUT, 0x10000);
764 } else {
765 if (MVS_CHIP_SLOT_SZ > 0x1ff)
766 mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
767 else
768 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
769
770 tmp = 0x10000 | time;
771 mw32(MVS_INT_COAL_TMOUT, tmp);
772 }
773}
760 774
761const struct mvs_dispatch mvs_64xx_dispatch = { 775const struct mvs_dispatch mvs_64xx_dispatch = {
762 "mv64xx", 776 "mv64xx",
@@ -780,7 +794,6 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
780 mvs_write_port_irq_stat, 794 mvs_write_port_irq_stat,
781 mvs_read_port_irq_mask, 795 mvs_read_port_irq_mask,
782 mvs_write_port_irq_mask, 796 mvs_write_port_irq_mask,
783 mvs_get_sas_addr,
784 mvs_64xx_command_active, 797 mvs_64xx_command_active,
785 mvs_64xx_clear_srs_irq, 798 mvs_64xx_clear_srs_irq,
786 mvs_64xx_issue_stop, 799 mvs_64xx_issue_stop,
@@ -808,8 +821,8 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
808 mvs_64xx_spi_buildcmd, 821 mvs_64xx_spi_buildcmd,
809 mvs_64xx_spi_issuecmd, 822 mvs_64xx_spi_issuecmd,
810 mvs_64xx_spi_waitdataready, 823 mvs_64xx_spi_waitdataready,
811#ifndef DISABLE_HOTPLUG_DMA_FIX
812 mvs_64xx_fix_dma, 824 mvs_64xx_fix_dma,
813#endif 825 mvs_64xx_tune_interrupt,
826 NULL,
814}; 827};
815 828
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 78162c3c36e6..3501291618fd 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -48,6 +48,216 @@ static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
48 } 48 }
49} 49}
50 50
51void set_phy_tuning(struct mvs_info *mvi, int phy_id,
52 struct phy_tuning phy_tuning)
53{
54 u32 tmp, setting_0 = 0, setting_1 = 0;
55 u8 i;
56
57 /* Remap information for B0 chip:
58 *
59 * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
60 * R0Dh -> R118h[31:16] (Generation 1 Setting 0)
61 * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
62 * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
63 * R10h -> R120h[15:0] (Generation 2 Setting 1)
64 * R11h -> R120h[31:16] (Generation 3 Setting 0)
65 * R12h -> R124h[15:0] (Generation 3 Setting 1)
66 * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
67 */
68
69 /* A0 has a different set of registers */
70 if (mvi->pdev->revision == VANIR_A0_REV)
71 return;
72
73 for (i = 0; i < 3; i++) {
74 /* loop 3 times, set Gen 1, Gen 2, Gen 3 */
75 switch (i) {
76 case 0:
77 setting_0 = GENERATION_1_SETTING;
78 setting_1 = GENERATION_1_2_SETTING;
79 break;
80 case 1:
81 setting_0 = GENERATION_1_2_SETTING;
82 setting_1 = GENERATION_2_3_SETTING;
83 break;
84 case 2:
85 setting_0 = GENERATION_2_3_SETTING;
86 setting_1 = GENERATION_3_4_SETTING;
87 break;
88 }
89
90 /* Set:
91 *
92 * Transmitter Emphasis Enable
93 * Transmitter Emphasis Amplitude
94 * Transmitter Amplitude
95 */
96 mvs_write_port_vsr_addr(mvi, phy_id, setting_0);
97 tmp = mvs_read_port_vsr_data(mvi, phy_id);
98 tmp &= ~(0xFBE << 16);
99 tmp |= (((phy_tuning.trans_emp_en << 11) |
100 (phy_tuning.trans_emp_amp << 7) |
101 (phy_tuning.trans_amp << 1)) << 16);
102 mvs_write_port_vsr_data(mvi, phy_id, tmp);
103
104 /* Set Transmitter Amplitude Adjust */
105 mvs_write_port_vsr_addr(mvi, phy_id, setting_1);
106 tmp = mvs_read_port_vsr_data(mvi, phy_id);
107 tmp &= ~(0xC000);
108 tmp |= (phy_tuning.trans_amp_adj << 14);
109 mvs_write_port_vsr_data(mvi, phy_id, tmp);
110 }
111}
112
113void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
114 struct ffe_control ffe)
115{
116 u32 tmp;
117
118 /* Don't run this if A0/B0 */
119 if ((mvi->pdev->revision == VANIR_A0_REV)
120 || (mvi->pdev->revision == VANIR_B0_REV))
121 return;
122
123 /* FFE Resistor and Capacitor */
124 /* R10Ch DFE Resolution Control/Squelch and FFE Setting
125 *
126 * FFE_FORCE [7]
127 * FFE_RES_SEL [6:4]
128 * FFE_CAP_SEL [3:0]
129 */
130 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL);
131 tmp = mvs_read_port_vsr_data(mvi, phy_id);
132 tmp &= ~0xFF;
133
134 /* Read from HBA_Info_Page */
135 tmp |= ((0x1 << 7) |
136 (ffe.ffe_rss_sel << 4) |
137 (ffe.ffe_cap_sel << 0));
138
139 mvs_write_port_vsr_data(mvi, phy_id, tmp);
140
141 /* R064h PHY Mode Register 1
142 *
143 * DFE_DIS 18
144 */
145 mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
146 tmp = mvs_read_port_vsr_data(mvi, phy_id);
147 tmp &= ~0x40001;
148 /* Hard coding */
149 /* No defines in HBA_Info_Page */
150 tmp |= (0 << 18);
151 mvs_write_port_vsr_data(mvi, phy_id, tmp);
152
153 /* R110h DFE F0-F1 Coefficient Control/DFE Update Control
154 *
155 * DFE_UPDATE_EN [11:6]
156 * DFE_FX_FORCE [5:0]
157 */
158 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL);
159 tmp = mvs_read_port_vsr_data(mvi, phy_id);
160 tmp &= ~0xFFF;
161 /* Hard coding */
162 /* No defines in HBA_Info_Page */
163 tmp |= ((0x3F << 6) | (0x0 << 0));
164 mvs_write_port_vsr_data(mvi, phy_id, tmp);
165
166 /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
167 *
168 * FFE_TRAIN_EN 3
169 */
170 mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
171 tmp = mvs_read_port_vsr_data(mvi, phy_id);
172 tmp &= ~0x8;
173 /* Hard coding */
174 /* No defines in HBA_Info_Page */
175 tmp |= (0 << 3);
176 mvs_write_port_vsr_data(mvi, phy_id, tmp);
177}
178
179/*Notice: this function must be called when phy is disabled*/
180void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
181{
182 union reg_phy_cfg phy_cfg, phy_cfg_tmp;
183 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
184 phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id);
185 phy_cfg.v = 0;
186 phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy;
187 phy_cfg.u.sas_support = 1;
188 phy_cfg.u.sata_support = 1;
189 phy_cfg.u.sata_host_mode = 1;
190
191 switch (rate) {
192 case 0x0:
193 /* support 1.5 Gbps */
194 phy_cfg.u.speed_support = 1;
195 phy_cfg.u.snw_3_support = 0;
196 phy_cfg.u.tx_lnk_parity = 1;
197 phy_cfg.u.tx_spt_phs_lnk_rate = 0x30;
198 break;
199 case 0x1:
200
201 /* support 1.5, 3.0 Gbps */
202 phy_cfg.u.speed_support = 3;
203 phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c;
204 phy_cfg.u.tx_lgcl_lnk_rate = 0x08;
205 break;
206 case 0x2:
207 default:
208 /* support 1.5, 3.0, 6.0 Gbps */
209 phy_cfg.u.speed_support = 7;
210 phy_cfg.u.snw_3_support = 1;
211 phy_cfg.u.tx_lnk_parity = 1;
212 phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f;
213 phy_cfg.u.tx_lgcl_lnk_rate = 0x09;
214 break;
215 }
216 mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
217}
218
219static void __devinit
220mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
221{
222 u32 temp;
223 temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
224 if (temp == 0xFFFFFFFFL) {
225 mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6;
226 mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A;
227 mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3;
228 }
229
230 temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]);
231 if (temp == 0xFFL) {
232 switch (mvi->pdev->revision) {
233 case VANIR_A0_REV:
234 case VANIR_B0_REV:
235 mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
236 mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7;
237 break;
238 case VANIR_C0_REV:
239 case VANIR_C1_REV:
240 case VANIR_C2_REV:
241 default:
242 mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
243 mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC;
244 break;
245 }
246 }
247
248 temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]);
249 if (temp == 0xFFL)
250 /*set default phy_rate = 6Gbps*/
251 mvi->hba_info_param.phy_rate[phy_id] = 0x2;
252
253 set_phy_tuning(mvi, phy_id,
254 mvi->hba_info_param.phy_tuning[phy_id]);
255 set_phy_ffe_tuning(mvi, phy_id,
256 mvi->hba_info_param.ffe_ctl[phy_id]);
257 set_phy_rate(mvi, phy_id,
258 mvi->hba_info_param.phy_rate[phy_id]);
259}
260
51static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) 261static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
52{ 262{
53 void __iomem *regs = mvi->regs; 263 void __iomem *regs = mvi->regs;
@@ -61,7 +271,14 @@ static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
61static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) 271static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
62{ 272{
63 u32 tmp; 273 u32 tmp;
64 274 u32 delay = 5000;
275 if (hard == MVS_PHY_TUNE) {
276 mvs_write_port_cfg_addr(mvi, phy_id, PHYR_SATA_CTL);
277 tmp = mvs_read_port_cfg_data(mvi, phy_id);
278 mvs_write_port_cfg_data(mvi, phy_id, tmp|0x20000000);
279 mvs_write_port_cfg_data(mvi, phy_id, tmp|0x100000);
280 return;
281 }
65 tmp = mvs_read_port_irq_stat(mvi, phy_id); 282 tmp = mvs_read_port_irq_stat(mvi, phy_id);
66 tmp &= ~PHYEV_RDY_CH; 283 tmp &= ~PHYEV_RDY_CH;
67 mvs_write_port_irq_stat(mvi, phy_id, tmp); 284 mvs_write_port_irq_stat(mvi, phy_id, tmp);
@@ -71,12 +288,15 @@ static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
71 mvs_write_phy_ctl(mvi, phy_id, tmp); 288 mvs_write_phy_ctl(mvi, phy_id, tmp);
72 do { 289 do {
73 tmp = mvs_read_phy_ctl(mvi, phy_id); 290 tmp = mvs_read_phy_ctl(mvi, phy_id);
74 } while (tmp & PHY_RST_HARD); 291 udelay(10);
292 delay--;
293 } while ((tmp & PHY_RST_HARD) && delay);
294 if (!delay)
295 mv_dprintk("phy hard reset failed.\n");
75 } else { 296 } else {
76 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT); 297 tmp = mvs_read_phy_ctl(mvi, phy_id);
77 tmp = mvs_read_port_vsr_data(mvi, phy_id);
78 tmp |= PHY_RST; 298 tmp |= PHY_RST;
79 mvs_write_port_vsr_data(mvi, phy_id, tmp); 299 mvs_write_phy_ctl(mvi, phy_id, tmp);
80 } 300 }
81} 301}
82 302
@@ -90,12 +310,25 @@ static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
90 310
91static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) 311static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
92{ 312{
93 mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4); 313 u32 tmp;
94 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1); 314 u8 revision = 0;
95 mvs_write_port_vsr_addr(mvi, phy_id, 0x104); 315
96 mvs_write_port_vsr_data(mvi, phy_id, 0x00018080); 316 revision = mvi->pdev->revision;
317 if (revision == VANIR_A0_REV) {
318 mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
319 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
320 }
321 if (revision == VANIR_B0_REV) {
322 mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL);
323 mvs_write_port_vsr_data(mvi, phy_id, 0x08001006);
324 mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
325 mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f);
326 }
327
97 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); 328 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
98 mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff); 329 tmp = mvs_read_port_vsr_data(mvi, phy_id);
330 tmp |= bit(0);
331 mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
99} 332}
100 333
101static int __devinit mvs_94xx_init(struct mvs_info *mvi) 334static int __devinit mvs_94xx_init(struct mvs_info *mvi)
@@ -103,7 +336,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
103 void __iomem *regs = mvi->regs; 336 void __iomem *regs = mvi->regs;
104 int i; 337 int i;
105 u32 tmp, cctl; 338 u32 tmp, cctl;
339 u8 revision;
106 340
341 revision = mvi->pdev->revision;
107 mvs_show_pcie_usage(mvi); 342 mvs_show_pcie_usage(mvi);
108 if (mvi->flags & MVF_FLAG_SOC) { 343 if (mvi->flags & MVF_FLAG_SOC) {
109 tmp = mr32(MVS_PHY_CTL); 344 tmp = mr32(MVS_PHY_CTL);
@@ -133,6 +368,28 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
133 msleep(100); 368 msleep(100);
134 } 369 }
135 370
371 /* disable Multiplexing, enable phy implemented */
372 mw32(MVS_PORTS_IMP, 0xFF);
373
374 if (revision == VANIR_A0_REV) {
375 mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET);
376 mw32(MVS_PA_VSR_PORT, 0x00018080);
377 }
378 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2);
379 if (revision == VANIR_A0_REV || revision == VANIR_B0_REV)
380 /* set 6G/3G/1.5G, multiplexing, without SSC */
381 mw32(MVS_PA_VSR_PORT, 0x0084d4fe);
382 else
383 /* set 6G/3G/1.5G, multiplexing, with and without SSC */
384 mw32(MVS_PA_VSR_PORT, 0x0084fffe);
385
386 if (revision == VANIR_B0_REV) {
387 mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL);
388 mw32(MVS_PA_VSR_PORT, 0x08001006);
389 mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA);
390 mw32(MVS_PA_VSR_PORT, 0x0000705f);
391 }
392
136 /* reset control */ 393 /* reset control */
137 mw32(MVS_PCS, 0); /* MVS_PCS */ 394 mw32(MVS_PCS, 0); /* MVS_PCS */
138 mw32(MVS_STP_REG_SET_0, 0); 395 mw32(MVS_STP_REG_SET_0, 0);
@@ -141,17 +398,8 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
141 /* init phys */ 398 /* init phys */
142 mvs_phy_hacks(mvi); 399 mvs_phy_hacks(mvi);
143 400
144 /* disable Multiplexing, enable phy implemented */
145 mw32(MVS_PORTS_IMP, 0xFF);
146
147
148 mw32(MVS_PA_VSR_ADDR, 0x00000104);
149 mw32(MVS_PA_VSR_PORT, 0x00018080);
150 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
151 mw32(MVS_PA_VSR_PORT, 0x0084ffff);
152
153 /* set LED blink when IO*/ 401 /* set LED blink when IO*/
154 mw32(MVS_PA_VSR_ADDR, 0x00000030); 402 mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED);
155 tmp = mr32(MVS_PA_VSR_PORT); 403 tmp = mr32(MVS_PA_VSR_PORT);
156 tmp &= 0xFFFF00FF; 404 tmp &= 0xFFFF00FF;
157 tmp |= 0x00003300; 405 tmp |= 0x00003300;
@@ -175,12 +423,13 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
175 mvs_94xx_phy_disable(mvi, i); 423 mvs_94xx_phy_disable(mvi, i);
176 /* set phy local SAS address */ 424 /* set phy local SAS address */
177 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4, 425 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
178 (mvi->phy[i].dev_sas_addr)); 426 cpu_to_le64(mvi->phy[i].dev_sas_addr));
179 427
180 mvs_94xx_enable_xmt(mvi, i); 428 mvs_94xx_enable_xmt(mvi, i);
429 mvs_94xx_config_reg_from_hba(mvi, i);
181 mvs_94xx_phy_enable(mvi, i); 430 mvs_94xx_phy_enable(mvi, i);
182 431
183 mvs_94xx_phy_reset(mvi, i, 1); 432 mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD);
184 msleep(500); 433 msleep(500);
185 mvs_94xx_detect_porttype(mvi, i); 434 mvs_94xx_detect_porttype(mvi, i);
186 } 435 }
@@ -211,16 +460,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
211 mvs_update_phyinfo(mvi, i, 1); 460 mvs_update_phyinfo(mvi, i, 1);
212 } 461 }
213 462
214 /* FIXME: update wide port bitmaps */
215
216 /* little endian for open address and command table, etc. */ 463 /* little endian for open address and command table, etc. */
217 /*
218 * it seems that ( from the spec ) turning on big-endian won't
219 * do us any good on big-endian machines, need further confirmation
220 */
221 cctl = mr32(MVS_CTL); 464 cctl = mr32(MVS_CTL);
222 cctl |= CCTL_ENDIAN_CMD; 465 cctl |= CCTL_ENDIAN_CMD;
223 cctl |= CCTL_ENDIAN_DATA;
224 cctl &= ~CCTL_ENDIAN_OPEN; 466 cctl &= ~CCTL_ENDIAN_OPEN;
225 cctl |= CCTL_ENDIAN_RSP; 467 cctl |= CCTL_ENDIAN_RSP;
226 mw32_f(MVS_CTL, cctl); 468 mw32_f(MVS_CTL, cctl);
@@ -228,15 +470,20 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
228 /* reset CMD queue */ 470 /* reset CMD queue */
229 tmp = mr32(MVS_PCS); 471 tmp = mr32(MVS_PCS);
230 tmp |= PCS_CMD_RST; 472 tmp |= PCS_CMD_RST;
473 tmp &= ~PCS_SELF_CLEAR;
231 mw32(MVS_PCS, tmp); 474 mw32(MVS_PCS, tmp);
232 /* interrupt coalescing may cause missing HW interrput in some case, 475 /*
233 * and the max count is 0x1ff, while our max slot is 0x200, 476 * the max count is 0x1ff, while our max slot is 0x200,
234 * it will make count 0. 477 * it will make count 0.
235 */ 478 */
236 tmp = 0; 479 tmp = 0;
237 mw32(MVS_INT_COAL, tmp); 480 if (MVS_CHIP_SLOT_SZ > 0x1ff)
481 mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
482 else
483 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
238 484
239 tmp = 0x100; 485 /* default interrupt coalescing time is 128us */
486 tmp = 0x10000 | interrupt_coalescing;
240 mw32(MVS_INT_COAL_TMOUT, tmp); 487 mw32(MVS_INT_COAL_TMOUT, tmp);
241 488
242 /* ladies and gentlemen, start your engines */ 489 /* ladies and gentlemen, start your engines */
@@ -249,7 +496,7 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
249 496
250 /* enable completion queue interrupt */ 497 /* enable completion queue interrupt */
251 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | 498 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
252 CINT_DMA_PCIE); 499 CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR);
253 tmp |= CINT_PHY_MASK; 500 tmp |= CINT_PHY_MASK;
254 mw32(MVS_INT_MASK, tmp); 501 mw32(MVS_INT_MASK, tmp);
255 502
@@ -332,13 +579,10 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
332 if (((stat & IRQ_SAS_A) && mvi->id == 0) || 579 if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
333 ((stat & IRQ_SAS_B) && mvi->id == 1)) { 580 ((stat & IRQ_SAS_B) && mvi->id == 1)) {
334 mw32_f(MVS_INT_STAT, CINT_DONE); 581 mw32_f(MVS_INT_STAT, CINT_DONE);
335 #ifndef MVS_USE_TASKLET 582
336 spin_lock(&mvi->lock); 583 spin_lock(&mvi->lock);
337 #endif
338 mvs_int_full(mvi); 584 mvs_int_full(mvi);
339 #ifndef MVS_USE_TASKLET
340 spin_unlock(&mvi->lock); 585 spin_unlock(&mvi->lock);
341 #endif
342 } 586 }
343 return IRQ_HANDLED; 587 return IRQ_HANDLED;
344} 588}
@@ -346,10 +590,48 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
346static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) 590static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
347{ 591{
348 u32 tmp; 592 u32 tmp;
349 mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32)); 593 tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
350 do { 594 if (tmp && 1 << (slot_idx % 32)) {
351 tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3)); 595 mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
352 } while (tmp & 1 << (slot_idx % 32)); 596 mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
597 1 << (slot_idx % 32));
598 do {
599 tmp = mvs_cr32(mvi,
600 MVS_COMMAND_ACTIVE + (slot_idx >> 3));
601 } while (tmp & 1 << (slot_idx % 32));
602 }
603}
604
605void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
606{
607 void __iomem *regs = mvi->regs;
608 u32 tmp;
609
610 if (clear_all) {
611 tmp = mr32(MVS_INT_STAT_SRS_0);
612 if (tmp) {
613 mv_dprintk("check SRS 0 %08X.\n", tmp);
614 mw32(MVS_INT_STAT_SRS_0, tmp);
615 }
616 tmp = mr32(MVS_INT_STAT_SRS_1);
617 if (tmp) {
618 mv_dprintk("check SRS 1 %08X.\n", tmp);
619 mw32(MVS_INT_STAT_SRS_1, tmp);
620 }
621 } else {
622 if (reg_set > 31)
623 tmp = mr32(MVS_INT_STAT_SRS_1);
624 else
625 tmp = mr32(MVS_INT_STAT_SRS_0);
626
627 if (tmp & (1 << (reg_set % 32))) {
628 mv_dprintk("register set 0x%x was stopped.\n", reg_set);
629 if (reg_set > 31)
630 mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32));
631 else
632 mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
633 }
634 }
353} 635}
354 636
355static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, 637static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
@@ -357,37 +639,56 @@ static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
357{ 639{
358 void __iomem *regs = mvi->regs; 640 void __iomem *regs = mvi->regs;
359 u32 tmp; 641 u32 tmp;
642 mvs_94xx_clear_srs_irq(mvi, 0, 1);
360 643
361 if (type == PORT_TYPE_SATA) { 644 tmp = mr32(MVS_INT_STAT);
362 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); 645 mw32(MVS_INT_STAT, tmp | CINT_CI_STOP);
363 mw32(MVS_INT_STAT_SRS_0, tmp);
364 }
365 mw32(MVS_INT_STAT, CINT_CI_STOP);
366 tmp = mr32(MVS_PCS) | 0xFF00; 646 tmp = mr32(MVS_PCS) | 0xFF00;
367 mw32(MVS_PCS, tmp); 647 mw32(MVS_PCS, tmp);
368} 648}
369 649
650static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi)
651{
652 void __iomem *regs = mvi->regs;
653 u32 err_0, err_1;
654 u8 i;
655 struct mvs_device *device;
656
657 err_0 = mr32(MVS_NON_NCQ_ERR_0);
658 err_1 = mr32(MVS_NON_NCQ_ERR_1);
659
660 mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
661 err_0, err_1);
662 for (i = 0; i < 32; i++) {
663 if (err_0 & bit(i)) {
664 device = mvs_find_dev_by_reg_set(mvi, i);
665 if (device)
666 mvs_release_task(mvi, device->sas_device);
667 }
668 if (err_1 & bit(i)) {
669 device = mvs_find_dev_by_reg_set(mvi, i+32);
670 if (device)
671 mvs_release_task(mvi, device->sas_device);
672 }
673 }
674
675 mw32(MVS_NON_NCQ_ERR_0, err_0);
676 mw32(MVS_NON_NCQ_ERR_1, err_1);
677}
678
370static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) 679static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
371{ 680{
372 void __iomem *regs = mvi->regs; 681 void __iomem *regs = mvi->regs;
373 u32 tmp;
374 u8 reg_set = *tfs; 682 u8 reg_set = *tfs;
375 683
376 if (*tfs == MVS_ID_NOT_MAPPED) 684 if (*tfs == MVS_ID_NOT_MAPPED)
377 return; 685 return;
378 686
379 mvi->sata_reg_set &= ~bit(reg_set); 687 mvi->sata_reg_set &= ~bit(reg_set);
380 if (reg_set < 32) { 688 if (reg_set < 32)
381 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set); 689 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
382 tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set; 690 else
383 if (tmp) 691 w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32));
384 mw32(MVS_INT_STAT_SRS_0, tmp);
385 } else {
386 w_reg_set_enable(reg_set, mvi->sata_reg_set);
387 tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
388 if (tmp)
389 mw32(MVS_INT_STAT_SRS_1, tmp);
390 }
391 692
392 *tfs = MVS_ID_NOT_MAPPED; 693 *tfs = MVS_ID_NOT_MAPPED;
393 694
@@ -403,7 +704,7 @@ static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
403 return 0; 704 return 0;
404 705
405 i = mv_ffc64(mvi->sata_reg_set); 706 i = mv_ffc64(mvi->sata_reg_set);
406 if (i > 32) { 707 if (i >= 32) {
407 mvi->sata_reg_set |= bit(i); 708 mvi->sata_reg_set |= bit(i);
408 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32)); 709 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
409 *tfs = i; 710 *tfs = i;
@@ -422,9 +723,12 @@ static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
422 int i; 723 int i;
423 struct scatterlist *sg; 724 struct scatterlist *sg;
424 struct mvs_prd *buf_prd = prd; 725 struct mvs_prd *buf_prd = prd;
726 struct mvs_prd_imt im_len;
727 *(u32 *)&im_len = 0;
425 for_each_sg(scatter, sg, nr, i) { 728 for_each_sg(scatter, sg, nr, i) {
426 buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); 729 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
427 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); 730 im_len.len = sg_dma_len(sg);
731 buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
428 buf_prd++; 732 buf_prd++;
429 } 733 }
430} 734}
@@ -433,7 +737,7 @@ static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
433{ 737{
434 u32 phy_st; 738 u32 phy_st;
435 phy_st = mvs_read_phy_ctl(mvi, i); 739 phy_st = mvs_read_phy_ctl(mvi, i);
436 if (phy_st & PHY_READY_MASK) /* phy ready */ 740 if (phy_st & PHY_READY_MASK)
437 return 1; 741 return 1;
438 return 0; 742 return 0;
439} 743}
@@ -447,7 +751,7 @@ static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
447 for (i = 0; i < 7; i++) { 751 for (i = 0; i < 7; i++) {
448 mvs_write_port_cfg_addr(mvi, port_id, 752 mvs_write_port_cfg_addr(mvi, port_id,
449 CONFIG_ID_FRAME0 + i * 4); 753 CONFIG_ID_FRAME0 + i * 4);
450 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); 754 id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
451 } 755 }
452 memcpy(id, id_frame, 28); 756 memcpy(id, id_frame, 28);
453} 757}
@@ -458,15 +762,13 @@ static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
458 int i; 762 int i;
459 u32 id_frame[7]; 763 u32 id_frame[7];
460 764
461 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
462 for (i = 0; i < 7; i++) { 765 for (i = 0; i < 7; i++) {
463 mvs_write_port_cfg_addr(mvi, port_id, 766 mvs_write_port_cfg_addr(mvi, port_id,
464 CONFIG_ATT_ID_FRAME0 + i * 4); 767 CONFIG_ATT_ID_FRAME0 + i * 4);
465 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); 768 id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
466 mv_dprintk("94xx phy %d atta frame %d %x.\n", 769 mv_dprintk("94xx phy %d atta frame %d %x.\n",
467 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]); 770 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
468 } 771 }
469 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
470 memcpy(id, id_frame, 28); 772 memcpy(id, id_frame, 28);
471} 773}
472 774
@@ -526,7 +828,18 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
526void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, 828void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
527 struct sas_phy_linkrates *rates) 829 struct sas_phy_linkrates *rates)
528{ 830{
529 /* TODO */ 831 u32 lrmax = 0;
832 u32 tmp;
833
834 tmp = mvs_read_phy_ctl(mvi, phy_id);
835 lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12;
836
837 if (lrmax) {
838 tmp &= ~(0x3 << 12);
839 tmp |= lrmax;
840 }
841 mvs_write_phy_ctl(mvi, phy_id, tmp);
842 mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD);
530} 843}
531 844
532static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi) 845static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
@@ -603,27 +916,59 @@ int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
603 return -1; 916 return -1;
604} 917}
605 918
606#ifndef DISABLE_HOTPLUG_DMA_FIX 919void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
607void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) 920 int buf_len, int from, void *prd)
608{ 921{
609 int i; 922 int i;
610 struct mvs_prd *buf_prd = prd; 923 struct mvs_prd *buf_prd = prd;
924 dma_addr_t buf_dma;
925 struct mvs_prd_imt im_len;
926
927 *(u32 *)&im_len = 0;
611 buf_prd += from; 928 buf_prd += from;
612 for (i = 0; i < MAX_SG_ENTRY - from; i++) { 929
613 buf_prd->addr = cpu_to_le64(buf_dma); 930#define PRD_CHAINED_ENTRY 0x01
614 buf_prd->im_len.len = cpu_to_le32(buf_len); 931 if ((mvi->pdev->revision == VANIR_A0_REV) ||
615 ++buf_prd; 932 (mvi->pdev->revision == VANIR_B0_REV))
933 buf_dma = (phy_mask <= 0x08) ?
934 mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1;
935 else
936 return;
937
938 for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) {
939 if (i == MAX_SG_ENTRY - 1) {
940 buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1));
941 im_len.len = 2;
942 im_len.misc_ctl = PRD_CHAINED_ENTRY;
943 } else {
944 buf_prd->addr = cpu_to_le64(buf_dma);
945 im_len.len = buf_len;
946 }
947 buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
616 } 948 }
617} 949}
618#endif
619 950
620/* 951static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time)
621 * FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
622 * with 64xx fixes
623 */
624static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
625 u8 clear_all)
626{ 952{
953 void __iomem *regs = mvi->regs;
954 u32 tmp = 0;
955 /*
956 * the max count is 0x1ff, while our max slot is 0x200,
957 * it will make count 0.
958 */
959 if (time == 0) {
960 mw32(MVS_INT_COAL, 0);
961 mw32(MVS_INT_COAL_TMOUT, 0x10000);
962 } else {
963 if (MVS_CHIP_SLOT_SZ > 0x1ff)
964 mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
965 else
966 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
967
968 tmp = 0x10000 | time;
969 mw32(MVS_INT_COAL_TMOUT, tmp);
970 }
971
627} 972}
628 973
629const struct mvs_dispatch mvs_94xx_dispatch = { 974const struct mvs_dispatch mvs_94xx_dispatch = {
@@ -648,7 +993,6 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
648 mvs_write_port_irq_stat, 993 mvs_write_port_irq_stat,
649 mvs_read_port_irq_mask, 994 mvs_read_port_irq_mask,
650 mvs_write_port_irq_mask, 995 mvs_write_port_irq_mask,
651 mvs_get_sas_addr,
652 mvs_94xx_command_active, 996 mvs_94xx_command_active,
653 mvs_94xx_clear_srs_irq, 997 mvs_94xx_clear_srs_irq,
654 mvs_94xx_issue_stop, 998 mvs_94xx_issue_stop,
@@ -676,8 +1020,8 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
676 mvs_94xx_spi_buildcmd, 1020 mvs_94xx_spi_buildcmd,
677 mvs_94xx_spi_issuecmd, 1021 mvs_94xx_spi_issuecmd,
678 mvs_94xx_spi_waitdataready, 1022 mvs_94xx_spi_waitdataready,
679#ifndef DISABLE_HOTPLUG_DMA_FIX
680 mvs_94xx_fix_dma, 1023 mvs_94xx_fix_dma,
681#endif 1024 mvs_94xx_tune_interrupt,
1025 mvs_94xx_non_spec_ncq_error,
682}; 1026};
683 1027
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 8835befe2c0e..8f7eb4f21140 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -30,6 +30,14 @@
30 30
31#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS 31#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
32 32
33enum VANIR_REVISION_ID {
34 VANIR_A0_REV = 0xA0,
35 VANIR_B0_REV = 0x01,
36 VANIR_C0_REV = 0x02,
37 VANIR_C1_REV = 0x03,
38 VANIR_C2_REV = 0xC2,
39};
40
33enum hw_registers { 41enum hw_registers {
34 MVS_GBL_CTL = 0x04, /* global control */ 42 MVS_GBL_CTL = 0x04, /* global control */
35 MVS_GBL_INT_STAT = 0x00, /* global irq status */ 43 MVS_GBL_INT_STAT = 0x00, /* global irq status */
@@ -101,6 +109,7 @@ enum hw_registers {
101 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */ 109 MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
102 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */ 110 MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
103 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */ 111 MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
112 MVS_COMMAND_ACTIVE = 0x300,
104}; 113};
105 114
106enum pci_cfg_registers { 115enum pci_cfg_registers {
@@ -112,26 +121,29 @@ enum pci_cfg_registers {
112 121
113/* SAS/SATA Vendor Specific Port Registers */ 122/* SAS/SATA Vendor Specific Port Registers */
114enum sas_sata_vsp_regs { 123enum sas_sata_vsp_regs {
115 VSR_PHY_STAT = 0x00 * 4, /* Phy Status */ 124 VSR_PHY_STAT = 0x00 * 4, /* Phy Interrupt Status */
116 VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */ 125 VSR_PHY_MODE1 = 0x01 * 4, /* phy Interrupt Enable */
117 VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */ 126 VSR_PHY_MODE2 = 0x02 * 4, /* Phy Configuration */
118 VSR_PHY_MODE3 = 0x03 * 4, /* pll */ 127 VSR_PHY_MODE3 = 0x03 * 4, /* Phy Status */
119 VSR_PHY_MODE4 = 0x04 * 4, /* VCO */ 128 VSR_PHY_MODE4 = 0x04 * 4, /* Phy Counter 0 */
120 VSR_PHY_MODE5 = 0x05 * 4, /* Rx */ 129 VSR_PHY_MODE5 = 0x05 * 4, /* Phy Counter 1 */
121 VSR_PHY_MODE6 = 0x06 * 4, /* CDR */ 130 VSR_PHY_MODE6 = 0x06 * 4, /* Event Counter Control */
122 VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */ 131 VSR_PHY_MODE7 = 0x07 * 4, /* Event Counter Select */
123 VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */ 132 VSR_PHY_MODE8 = 0x08 * 4, /* Event Counter 0 */
124 VSR_PHY_MODE9 = 0x09 * 4, /* Test */ 133 VSR_PHY_MODE9 = 0x09 * 4, /* Event Counter 1 */
125 VSR_PHY_MODE10 = 0x0A * 4, /* Power */ 134 VSR_PHY_MODE10 = 0x0A * 4, /* Event Counter 2 */
126 VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */ 135 VSR_PHY_MODE11 = 0x0B * 4, /* Event Counter 3 */
127 VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */ 136 VSR_PHY_ACT_LED = 0x0C * 4, /* Activity LED control */
128 VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */ 137
138 VSR_PHY_FFE_CONTROL = 0x10C,
139 VSR_PHY_DFE_UPDATE_CRTL = 0x110,
140 VSR_REF_CLOCK_CRTL = 0x1A0,
129}; 141};
130 142
131enum chip_register_bits { 143enum chip_register_bits {
132 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), 144 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
133 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), 145 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 12),
134 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12), 146 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
135 PHY_NEG_SPP_PHYS_LINK_RATE_MASK = 147 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
136 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), 148 (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
137}; 149};
@@ -169,22 +181,75 @@ enum pci_interrupt_cause {
169 IRQ_PCIE_ERR = (1 << 31), 181 IRQ_PCIE_ERR = (1 << 31),
170}; 182};
171 183
184union reg_phy_cfg {
185 u32 v;
186 struct {
187 u32 phy_reset:1;
188 u32 sas_support:1;
189 u32 sata_support:1;
190 u32 sata_host_mode:1;
191 /*
192 * bit 2: 6Gbps support
193 * bit 1: 3Gbps support
194 * bit 0: 1.5Gbps support
195 */
196 u32 speed_support:3;
197 u32 snw_3_support:1;
198 u32 tx_lnk_parity:1;
199 /*
200 * bit 5: G1 (1.5Gbps) Without SSC
201 * bit 4: G1 (1.5Gbps) with SSC
202 * bit 3: G2 (3.0Gbps) Without SSC
203 * bit 2: G2 (3.0Gbps) with SSC
204 * bit 1: G3 (6.0Gbps) without SSC
205 * bit 0: G3 (6.0Gbps) with SSC
206 */
207 u32 tx_spt_phs_lnk_rate:6;
208 /* 8h: 1.5Gbps 9h: 3Gbps Ah: 6Gbps */
209 u32 tx_lgcl_lnk_rate:4;
210 u32 tx_ssc_type:1;
211 u32 sata_spin_up_spt:1;
212 u32 sata_spin_up_en:1;
213 u32 bypass_oob:1;
214 u32 disable_phy:1;
215 u32 rsvd:8;
216 } u;
217};
218
172#define MAX_SG_ENTRY 255 219#define MAX_SG_ENTRY 255
173 220
174struct mvs_prd_imt { 221struct mvs_prd_imt {
222#ifndef __BIG_ENDIAN
175 __le32 len:22; 223 __le32 len:22;
176 u8 _r_a:2; 224 u8 _r_a:2;
177 u8 misc_ctl:4; 225 u8 misc_ctl:4;
178 u8 inter_sel:4; 226 u8 inter_sel:4;
227#else
228 u32 inter_sel:4;
229 u32 misc_ctl:4;
230 u32 _r_a:2;
231 u32 len:22;
232#endif
179}; 233};
180 234
181struct mvs_prd { 235struct mvs_prd {
182 /* 64-bit buffer address */ 236 /* 64-bit buffer address */
183 __le64 addr; 237 __le64 addr;
184 /* 22-bit length */ 238 /* 22-bit length */
185 struct mvs_prd_imt im_len; 239 __le32 im_len;
186} __attribute__ ((packed)); 240} __attribute__ ((packed));
187 241
242/*
243 * these registers are accessed through port vendor
244 * specific address/data registers
245 */
246enum sas_sata_phy_regs {
247 GENERATION_1_SETTING = 0x118,
248 GENERATION_1_2_SETTING = 0x11C,
249 GENERATION_2_3_SETTING = 0x120,
250 GENERATION_3_4_SETTING = 0x124,
251};
252
188#define SPI_CTRL_REG_94XX 0xc800 253#define SPI_CTRL_REG_94XX 0xc800
189#define SPI_ADDR_REG_94XX 0xc804 254#define SPI_ADDR_REG_94XX 0xc804
190#define SPI_WR_DATA_REG_94XX 0xc808 255#define SPI_WR_DATA_REG_94XX 0xc808
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
index 1753a6fc42d0..bcc408042cee 100644
--- a/drivers/scsi/mvsas/mv_chips.h
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -164,7 +164,6 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
164{ 164{
165 u32 tmp; 165 u32 tmp;
166 166
167 /* workaround for SATA R-ERR, to ignore phy glitch */
168 tmp = mvs_cr32(mvi, CMD_PHY_TIMER); 167 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
169 tmp &= ~(1 << 9); 168 tmp &= ~(1 << 9);
170 tmp |= (1 << 10); 169 tmp |= (1 << 10);
@@ -179,23 +178,10 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
179 tmp |= 0x3fff; 178 tmp |= 0x3fff;
180 mvs_cw32(mvi, CMD_SAS_CTL0, tmp); 179 mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
181 180
182 /* workaround for WDTIMEOUT , set to 550 ms */
183 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000); 181 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
184 182
185 /* not to halt for different port op during wideport link change */ 183 /* not to halt for different port op during wideport link change */
186 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d); 184 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
187
188 /* workaround for Seagate disk not-found OOB sequence, recv
189 * COMINIT before sending out COMWAKE */
190 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
191 tmp &= 0x0000ffff;
192 tmp |= 0x00fa0000;
193 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
194
195 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
196 tmp &= 0x1fffffff;
197 tmp |= (2U << 29); /* 8 ms retry */
198 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
199} 185}
200 186
201static inline void mvs_int_sata(struct mvs_info *mvi) 187static inline void mvs_int_sata(struct mvs_info *mvi)
@@ -223,6 +209,9 @@ static inline void mvs_int_full(struct mvs_info *mvi)
223 mvs_int_port(mvi, i, tmp); 209 mvs_int_port(mvi, i, tmp);
224 } 210 }
225 211
212 if (stat & CINT_NON_SPEC_NCQ_ERROR)
213 MVS_CHIP_DISP->non_spec_ncq_error(mvi);
214
226 if (stat & CINT_SRS) 215 if (stat & CINT_SRS)
227 mvs_int_sata(mvi); 216 mvs_int_sata(mvi);
228 217
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index bc00c940743c..dec7cadb7485 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -43,7 +43,6 @@ enum chip_flavors {
43 43
44/* driver compile-time configuration */ 44/* driver compile-time configuration */
45enum driver_configuration { 45enum driver_configuration {
46 MVS_SLOTS = 512, /* command slots */
47 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ 46 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
48 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ 47 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
49 /* software requires power-of-2 48 /* software requires power-of-2
@@ -56,8 +55,7 @@ enum driver_configuration {
56 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ 55 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
57 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ 56 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
58 MVS_OAF_SZ = 64, /* Open address frame buffer size */ 57 MVS_OAF_SZ = 64, /* Open address frame buffer size */
59 MVS_QUEUE_SIZE = 32, /* Support Queue depth */ 58 MVS_QUEUE_SIZE = 64, /* Support Queue depth */
60 MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
61 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, 59 MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
62}; 60};
63 61
@@ -144,6 +142,7 @@ enum hw_register_bits {
144 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ 142 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
145 CINT_MEM = (1U << 26), /* int mem parity err */ 143 CINT_MEM = (1U << 26), /* int mem parity err */
146 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ 144 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
145 CINT_NON_SPEC_NCQ_ERROR = (1U << 25), /* Non specific NCQ error */
147 CINT_SRS = (1U << 3), /* SRS event */ 146 CINT_SRS = (1U << 3), /* SRS event */
148 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ 147 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
149 CINT_DONE = (1U << 0), /* cmd completion */ 148 CINT_DONE = (1U << 0), /* cmd completion */
@@ -161,7 +160,7 @@ enum hw_register_bits {
161 TXQ_CMD_SSP = 1, /* SSP protocol */ 160 TXQ_CMD_SSP = 1, /* SSP protocol */
162 TXQ_CMD_SMP = 2, /* SMP protocol */ 161 TXQ_CMD_SMP = 2, /* SMP protocol */
163 TXQ_CMD_STP = 3, /* STP/SATA protocol */ 162 TXQ_CMD_STP = 3, /* STP/SATA protocol */
164 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ 163 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP target free list */
165 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ 164 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
166 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ 165 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
167 TXQ_MODE_TARGET = 0, 166 TXQ_MODE_TARGET = 0,
@@ -391,15 +390,15 @@ enum sas_cmd_port_registers {
391}; 390};
392 391
393enum mvs_info_flags { 392enum mvs_info_flags {
394 MVF_MSI = (1U << 0), /* MSI is enabled */
395 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ 393 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
396 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */ 394 MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
397}; 395};
398 396
399enum mvs_event_flags { 397enum mvs_event_flags {
400 PHY_PLUG_EVENT = (3U), 398 PHY_PLUG_EVENT = (3U),
401 PHY_PLUG_IN = (1U << 0), /* phy plug in */ 399 PHY_PLUG_IN = (1U << 0), /* phy plug in */
402 PHY_PLUG_OUT = (1U << 1), /* phy plug out */ 400 PHY_PLUG_OUT = (1U << 1), /* phy plug out */
401 EXP_BRCT_CHG = (1U << 2), /* broadcast change */
403}; 402};
404 403
405enum mvs_port_type { 404enum mvs_port_type {
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 90b636611cde..4e9af66fd1d3 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -34,22 +34,25 @@ MODULE_PARM_DESC(collector, "\n"
34 "\tThe mvsas SAS LLDD supports both modes.\n" 34 "\tThe mvsas SAS LLDD supports both modes.\n"
35 "\tDefault: 1 (Direct Mode).\n"); 35 "\tDefault: 1 (Direct Mode).\n");
36 36
37int interrupt_coalescing = 0x80;
38
37static struct scsi_transport_template *mvs_stt; 39static struct scsi_transport_template *mvs_stt;
38struct kmem_cache *mvs_task_list_cache; 40struct kmem_cache *mvs_task_list_cache;
39static const struct mvs_chip_info mvs_chips[] = { 41static const struct mvs_chip_info mvs_chips[] = {
40 [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 42 [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
41 [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 43 [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
42 [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, 44 [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, },
43 [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 45 [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
44 [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 46 [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
45 [chip_9445] = { 1, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, }, 47 [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
46 [chip_9485] = { 2, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, }, 48 [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
47 [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, 49 [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
48 [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, 50 [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
49}; 51};
50 52
53struct device_attribute *mvst_host_attrs[];
54
51#define SOC_SAS_NUM 2 55#define SOC_SAS_NUM 2
52#define SG_MX 64
53 56
54static struct scsi_host_template mvs_sht = { 57static struct scsi_host_template mvs_sht = {
55 .module = THIS_MODULE, 58 .module = THIS_MODULE,
@@ -66,7 +69,7 @@ static struct scsi_host_template mvs_sht = {
66 .can_queue = 1, 69 .can_queue = 1,
67 .cmd_per_lun = 1, 70 .cmd_per_lun = 1,
68 .this_id = -1, 71 .this_id = -1,
69 .sg_tablesize = SG_MX, 72 .sg_tablesize = SG_ALL,
70 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 73 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
71 .use_clustering = ENABLE_CLUSTERING, 74 .use_clustering = ENABLE_CLUSTERING,
72 .eh_device_reset_handler = sas_eh_device_reset_handler, 75 .eh_device_reset_handler = sas_eh_device_reset_handler,
@@ -74,6 +77,7 @@ static struct scsi_host_template mvs_sht = {
74 .slave_alloc = mvs_slave_alloc, 77 .slave_alloc = mvs_slave_alloc,
75 .target_destroy = sas_target_destroy, 78 .target_destroy = sas_target_destroy,
76 .ioctl = sas_ioctl, 79 .ioctl = sas_ioctl,
80 .shost_attrs = mvst_host_attrs,
77}; 81};
78 82
79static struct sas_domain_function_template mvs_transport_ops = { 83static struct sas_domain_function_template mvs_transport_ops = {
@@ -100,6 +104,7 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
100 struct asd_sas_phy *sas_phy = &phy->sas_phy; 104 struct asd_sas_phy *sas_phy = &phy->sas_phy;
101 105
102 phy->mvi = mvi; 106 phy->mvi = mvi;
107 phy->port = NULL;
103 init_timer(&phy->timer); 108 init_timer(&phy->timer);
104 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; 109 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
105 sas_phy->class = SAS; 110 sas_phy->class = SAS;
@@ -128,7 +133,7 @@ static void mvs_free(struct mvs_info *mvi)
128 if (mvi->flags & MVF_FLAG_SOC) 133 if (mvi->flags & MVF_FLAG_SOC)
129 slot_nr = MVS_SOC_SLOTS; 134 slot_nr = MVS_SOC_SLOTS;
130 else 135 else
131 slot_nr = MVS_SLOTS; 136 slot_nr = MVS_CHIP_SLOT_SZ;
132 137
133 if (mvi->dma_pool) 138 if (mvi->dma_pool)
134 pci_pool_destroy(mvi->dma_pool); 139 pci_pool_destroy(mvi->dma_pool);
@@ -148,25 +153,26 @@ static void mvs_free(struct mvs_info *mvi)
148 dma_free_coherent(mvi->dev, 153 dma_free_coherent(mvi->dev,
149 sizeof(*mvi->slot) * slot_nr, 154 sizeof(*mvi->slot) * slot_nr,
150 mvi->slot, mvi->slot_dma); 155 mvi->slot, mvi->slot_dma);
151#ifndef DISABLE_HOTPLUG_DMA_FIX 156
152 if (mvi->bulk_buffer) 157 if (mvi->bulk_buffer)
153 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, 158 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
154 mvi->bulk_buffer, mvi->bulk_buffer_dma); 159 mvi->bulk_buffer, mvi->bulk_buffer_dma);
155#endif 160 if (mvi->bulk_buffer1)
161 dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
162 mvi->bulk_buffer1, mvi->bulk_buffer_dma1);
156 163
157 MVS_CHIP_DISP->chip_iounmap(mvi); 164 MVS_CHIP_DISP->chip_iounmap(mvi);
158 if (mvi->shost) 165 if (mvi->shost)
159 scsi_host_put(mvi->shost); 166 scsi_host_put(mvi->shost);
160 list_for_each_entry(mwq, &mvi->wq_list, entry) 167 list_for_each_entry(mwq, &mvi->wq_list, entry)
161 cancel_delayed_work(&mwq->work_q); 168 cancel_delayed_work(&mwq->work_q);
169 kfree(mvi->tags);
162 kfree(mvi); 170 kfree(mvi);
163} 171}
164 172
165#ifdef MVS_USE_TASKLET 173#ifdef CONFIG_SCSI_MVSAS_TASKLET
166struct tasklet_struct mv_tasklet;
167static void mvs_tasklet(unsigned long opaque) 174static void mvs_tasklet(unsigned long opaque)
168{ 175{
169 unsigned long flags;
170 u32 stat; 176 u32 stat;
171 u16 core_nr, i = 0; 177 u16 core_nr, i = 0;
172 178
@@ -179,35 +185,49 @@ static void mvs_tasklet(unsigned long opaque)
179 if (unlikely(!mvi)) 185 if (unlikely(!mvi))
180 BUG_ON(1); 186 BUG_ON(1);
181 187
188 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq);
189 if (!stat)
190 goto out;
191
182 for (i = 0; i < core_nr; i++) { 192 for (i = 0; i < core_nr; i++) {
183 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 193 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
184 stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq); 194 MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat);
185 if (stat)
186 MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
187 } 195 }
196out:
197 MVS_CHIP_DISP->interrupt_enable(mvi);
188 198
189} 199}
190#endif 200#endif
191 201
192static irqreturn_t mvs_interrupt(int irq, void *opaque) 202static irqreturn_t mvs_interrupt(int irq, void *opaque)
193{ 203{
194 u32 core_nr, i = 0; 204 u32 core_nr;
195 u32 stat; 205 u32 stat;
196 struct mvs_info *mvi; 206 struct mvs_info *mvi;
197 struct sas_ha_struct *sha = opaque; 207 struct sas_ha_struct *sha = opaque;
208#ifndef CONFIG_SCSI_MVSAS_TASKLET
209 u32 i;
210#endif
198 211
199 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 212 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
200 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; 213 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
201 214
202 if (unlikely(!mvi)) 215 if (unlikely(!mvi))
203 return IRQ_NONE; 216 return IRQ_NONE;
217#ifdef CONFIG_SCSI_MVSAS_TASKLET
218 MVS_CHIP_DISP->interrupt_disable(mvi);
219#endif
204 220
205 stat = MVS_CHIP_DISP->isr_status(mvi, irq); 221 stat = MVS_CHIP_DISP->isr_status(mvi, irq);
206 if (!stat) 222 if (!stat) {
223 #ifdef CONFIG_SCSI_MVSAS_TASKLET
224 MVS_CHIP_DISP->interrupt_enable(mvi);
225 #endif
207 return IRQ_NONE; 226 return IRQ_NONE;
227 }
208 228
209#ifdef MVS_USE_TASKLET 229#ifdef CONFIG_SCSI_MVSAS_TASKLET
210 tasklet_schedule(&mv_tasklet); 230 tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
211#else 231#else
212 for (i = 0; i < core_nr; i++) { 232 for (i = 0; i < core_nr; i++) {
213 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 233 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
@@ -225,7 +245,7 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
225 if (mvi->flags & MVF_FLAG_SOC) 245 if (mvi->flags & MVF_FLAG_SOC)
226 slot_nr = MVS_SOC_SLOTS; 246 slot_nr = MVS_SOC_SLOTS;
227 else 247 else
228 slot_nr = MVS_SLOTS; 248 slot_nr = MVS_CHIP_SLOT_SZ;
229 249
230 spin_lock_init(&mvi->lock); 250 spin_lock_init(&mvi->lock);
231 for (i = 0; i < mvi->chip->n_phy; i++) { 251 for (i = 0; i < mvi->chip->n_phy; i++) {
@@ -273,13 +293,18 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
273 goto err_out; 293 goto err_out;
274 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr); 294 memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
275 295
276#ifndef DISABLE_HOTPLUG_DMA_FIX
277 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, 296 mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
278 TRASH_BUCKET_SIZE, 297 TRASH_BUCKET_SIZE,
279 &mvi->bulk_buffer_dma, GFP_KERNEL); 298 &mvi->bulk_buffer_dma, GFP_KERNEL);
280 if (!mvi->bulk_buffer) 299 if (!mvi->bulk_buffer)
281 goto err_out; 300 goto err_out;
282#endif 301
302 mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev,
303 TRASH_BUCKET_SIZE,
304 &mvi->bulk_buffer_dma1, GFP_KERNEL);
305 if (!mvi->bulk_buffer1)
306 goto err_out;
307
283 sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id); 308 sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
284 mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0); 309 mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0);
285 if (!mvi->dma_pool) { 310 if (!mvi->dma_pool) {
@@ -354,11 +379,12 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
354 const struct pci_device_id *ent, 379 const struct pci_device_id *ent,
355 struct Scsi_Host *shost, unsigned int id) 380 struct Scsi_Host *shost, unsigned int id)
356{ 381{
357 struct mvs_info *mvi; 382 struct mvs_info *mvi = NULL;
358 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 383 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
359 384
360 mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info), 385 mvi = kzalloc(sizeof(*mvi) +
361 GFP_KERNEL); 386 (1L << mvs_chips[ent->driver_data].slot_width) *
387 sizeof(struct mvs_slot_info), GFP_KERNEL);
362 if (!mvi) 388 if (!mvi)
363 return NULL; 389 return NULL;
364 390
@@ -367,7 +393,6 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
367 mvi->chip_id = ent->driver_data; 393 mvi->chip_id = ent->driver_data;
368 mvi->chip = &mvs_chips[mvi->chip_id]; 394 mvi->chip = &mvs_chips[mvi->chip_id];
369 INIT_LIST_HEAD(&mvi->wq_list); 395 INIT_LIST_HEAD(&mvi->wq_list);
370 mvi->irq = pdev->irq;
371 396
372 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; 397 ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
373 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; 398 ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
@@ -375,9 +400,10 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
375 mvi->id = id; 400 mvi->id = id;
376 mvi->sas = sha; 401 mvi->sas = sha;
377 mvi->shost = shost; 402 mvi->shost = shost;
378#ifdef MVS_USE_TASKLET 403
379 tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha); 404 mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL);
380#endif 405 if (!mvi->tags)
406 goto err_out;
381 407
382 if (MVS_CHIP_DISP->chip_ioremap(mvi)) 408 if (MVS_CHIP_DISP->chip_ioremap(mvi))
383 goto err_out; 409 goto err_out;
@@ -388,7 +414,6 @@ err_out:
388 return NULL; 414 return NULL;
389} 415}
390 416
391/* move to PCI layer or libata core? */
392static int pci_go_64(struct pci_dev *pdev) 417static int pci_go_64(struct pci_dev *pdev)
393{ 418{
394 int rc; 419 int rc;
@@ -450,7 +475,7 @@ static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
450 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; 475 ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
451 476
452 shost->transportt = mvs_stt; 477 shost->transportt = mvs_stt;
453 shost->max_id = 128; 478 shost->max_id = MVS_MAX_DEVICES;
454 shost->max_lun = ~0; 479 shost->max_lun = ~0;
455 shost->max_channel = 1; 480 shost->max_channel = 1;
456 shost->max_cmd_len = 16; 481 shost->max_cmd_len = 16;
@@ -493,11 +518,12 @@ static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
493 if (mvi->flags & MVF_FLAG_SOC) 518 if (mvi->flags & MVF_FLAG_SOC)
494 can_queue = MVS_SOC_CAN_QUEUE; 519 can_queue = MVS_SOC_CAN_QUEUE;
495 else 520 else
496 can_queue = MVS_CAN_QUEUE; 521 can_queue = MVS_CHIP_SLOT_SZ;
497 522
498 sha->lldd_queue_size = can_queue; 523 sha->lldd_queue_size = can_queue;
524 shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
499 shost->can_queue = can_queue; 525 shost->can_queue = can_queue;
500 mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys; 526 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
501 sha->core.shost = mvi->shost; 527 sha->core.shost = mvi->shost;
502} 528}
503 529
@@ -518,6 +544,7 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
518{ 544{
519 unsigned int rc, nhost = 0; 545 unsigned int rc, nhost = 0;
520 struct mvs_info *mvi; 546 struct mvs_info *mvi;
547 struct mvs_prv_info *mpi;
521 irq_handler_t irq_handler = mvs_interrupt; 548 irq_handler_t irq_handler = mvs_interrupt;
522 struct Scsi_Host *shost = NULL; 549 struct Scsi_Host *shost = NULL;
523 const struct mvs_chip_info *chip; 550 const struct mvs_chip_info *chip;
@@ -569,6 +596,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
569 goto err_out_regions; 596 goto err_out_regions;
570 } 597 }
571 598
599 memset(&mvi->hba_info_param, 0xFF,
600 sizeof(struct hba_info_page));
601
572 mvs_init_sas_add(mvi); 602 mvs_init_sas_add(mvi);
573 603
574 mvi->instance = nhost; 604 mvi->instance = nhost;
@@ -579,8 +609,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
579 } 609 }
580 nhost++; 610 nhost++;
581 } while (nhost < chip->n_host); 611 } while (nhost < chip->n_host);
582#ifdef MVS_USE_TASKLET 612 mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha);
583 tasklet_init(&mv_tasklet, mvs_tasklet, 613#ifdef CONFIG_SCSI_MVSAS_TASKLET
614 tasklet_init(&(mpi->mv_tasklet), mvs_tasklet,
584 (unsigned long)SHOST_TO_SAS_HA(shost)); 615 (unsigned long)SHOST_TO_SAS_HA(shost));
585#endif 616#endif
586 617
@@ -625,8 +656,8 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
625 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 656 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
626 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; 657 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
627 658
628#ifdef MVS_USE_TASKLET 659#ifdef CONFIG_SCSI_MVSAS_TASKLET
629 tasklet_kill(&mv_tasklet); 660 tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
630#endif 661#endif
631 662
632 pci_set_drvdata(pdev, NULL); 663 pci_set_drvdata(pdev, NULL);
@@ -635,7 +666,7 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
635 scsi_remove_host(mvi->shost); 666 scsi_remove_host(mvi->shost);
636 667
637 MVS_CHIP_DISP->interrupt_disable(mvi); 668 MVS_CHIP_DISP->interrupt_disable(mvi);
638 free_irq(mvi->irq, sha); 669 free_irq(mvi->pdev->irq, sha);
639 for (i = 0; i < core_nr; i++) { 670 for (i = 0; i < core_nr; i++) {
640 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; 671 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
641 mvs_free(mvi); 672 mvs_free(mvi);
@@ -703,6 +734,70 @@ static struct pci_driver mvs_pci_driver = {
703 .remove = __devexit_p(mvs_pci_remove), 734 .remove = __devexit_p(mvs_pci_remove),
704}; 735};
705 736
737static ssize_t
738mvs_show_driver_version(struct device *cdev,
739 struct device_attribute *attr, char *buffer)
740{
741 return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
742}
743
744static DEVICE_ATTR(driver_version,
745 S_IRUGO,
746 mvs_show_driver_version,
747 NULL);
748
749static ssize_t
750mvs_store_interrupt_coalescing(struct device *cdev,
751 struct device_attribute *attr,
752 const char *buffer, size_t size)
753{
754 int val = 0;
755 struct mvs_info *mvi = NULL;
756 struct Scsi_Host *shost = class_to_shost(cdev);
757 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
758 u8 i, core_nr;
759 if (buffer == NULL)
760 return size;
761
762 if (sscanf(buffer, "%d", &val) != 1)
763 return -EINVAL;
764
765 if (val >= 0x10000) {
766 mv_dprintk("interrupt coalescing timer %d us is"
767 "too long\n", val);
768 return strlen(buffer);
769 }
770
771 interrupt_coalescing = val;
772
773 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
774 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
775
776 if (unlikely(!mvi))
777 return -EINVAL;
778
779 for (i = 0; i < core_nr; i++) {
780 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
781 if (MVS_CHIP_DISP->tune_interrupt)
782 MVS_CHIP_DISP->tune_interrupt(mvi,
783 interrupt_coalescing);
784 }
785 mv_dprintk("set interrupt coalescing time to %d us\n",
786 interrupt_coalescing);
787 return strlen(buffer);
788}
789
790static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
791 struct device_attribute *attr, char *buffer)
792{
793 return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
794}
795
796static DEVICE_ATTR(interrupt_coalescing,
797 S_IRUGO|S_IWUSR,
798 mvs_show_interrupt_coalescing,
799 mvs_store_interrupt_coalescing);
800
706/* task handler */ 801/* task handler */
707struct task_struct *mvs_th; 802struct task_struct *mvs_th;
708static int __init mvs_init(void) 803static int __init mvs_init(void)
@@ -739,6 +834,12 @@ static void __exit mvs_exit(void)
739 kmem_cache_destroy(mvs_task_list_cache); 834 kmem_cache_destroy(mvs_task_list_cache);
740} 835}
741 836
837struct device_attribute *mvst_host_attrs[] = {
838 &dev_attr_driver_version,
839 &dev_attr_interrupt_coalescing,
840 NULL,
841};
842
742module_init(mvs_init); 843module_init(mvs_init);
743module_exit(mvs_exit); 844module_exit(mvs_exit);
744 845
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 0ef27425c447..4958fefff365 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -38,7 +38,7 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
38 38
39void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 39void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
40{ 40{
41 void *bitmap = &mvi->tags; 41 void *bitmap = mvi->tags;
42 clear_bit(tag, bitmap); 42 clear_bit(tag, bitmap);
43} 43}
44 44
@@ -49,14 +49,14 @@ void mvs_tag_free(struct mvs_info *mvi, u32 tag)
49 49
50void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 50void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
51{ 51{
52 void *bitmap = &mvi->tags; 52 void *bitmap = mvi->tags;
53 set_bit(tag, bitmap); 53 set_bit(tag, bitmap);
54} 54}
55 55
56inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 56inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
57{ 57{
58 unsigned int index, tag; 58 unsigned int index, tag;
59 void *bitmap = &mvi->tags; 59 void *bitmap = mvi->tags;
60 60
61 index = find_first_zero_bit(bitmap, mvi->tags_num); 61 index = find_first_zero_bit(bitmap, mvi->tags_num);
62 tag = index; 62 tag = index;
@@ -74,126 +74,6 @@ void mvs_tag_init(struct mvs_info *mvi)
74 mvs_tag_clear(mvi, i); 74 mvs_tag_clear(mvi, i);
75} 75}
76 76
77void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
78{
79 u32 i;
80 u32 run;
81 u32 offset;
82
83 offset = 0;
84 while (size) {
85 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
86 if (size >= 16)
87 run = 16;
88 else
89 run = size;
90 size -= run;
91 for (i = 0; i < 16; i++) {
92 if (i < run)
93 printk(KERN_DEBUG"%02X ", (u32)data[i]);
94 else
95 printk(KERN_DEBUG" ");
96 }
97 printk(KERN_DEBUG": ");
98 for (i = 0; i < run; i++)
99 printk(KERN_DEBUG"%c",
100 isalnum(data[i]) ? data[i] : '.');
101 printk(KERN_DEBUG"\n");
102 data = &data[16];
103 offset += run;
104 }
105 printk(KERN_DEBUG"\n");
106}
107
108#if (_MV_DUMP > 1)
109static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
110 enum sas_protocol proto)
111{
112 u32 offset;
113 struct mvs_slot_info *slot = &mvi->slot_info[tag];
114
115 offset = slot->cmd_size + MVS_OAF_SZ +
116 MVS_CHIP_DISP->prd_size() * slot->n_elem;
117 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
118 tag);
119 mvs_hexdump(32, (u8 *) slot->response,
120 (u32) slot->buf_dma + offset);
121}
122#endif
123
124static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
125 enum sas_protocol proto)
126{
127#if (_MV_DUMP > 1)
128 u32 sz, w_ptr;
129 u64 addr;
130 struct mvs_slot_info *slot = &mvi->slot_info[tag];
131
132 /*Delivery Queue */
133 sz = MVS_CHIP_SLOT_SZ;
134 w_ptr = slot->tx;
135 addr = mvi->tx_dma;
136 dev_printk(KERN_DEBUG, mvi->dev,
137 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
138 dev_printk(KERN_DEBUG, mvi->dev,
139 "Delivery Queue Base Address=0x%llX (PA)"
140 "(tx_dma=0x%llX), Entry=%04d\n",
141 addr, (unsigned long long)mvi->tx_dma, w_ptr);
142 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
143 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
144 /*Command List */
145 addr = mvi->slot_dma;
146 dev_printk(KERN_DEBUG, mvi->dev,
147 "Command List Base Address=0x%llX (PA)"
148 "(slot_dma=0x%llX), Header=%03d\n",
149 addr, (unsigned long long)slot->buf_dma, tag);
150 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
151 /*mvs_cmd_hdr */
152 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
153 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
154 /*1.command table area */
155 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
156 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
157 /*2.open address frame area */
158 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
159 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
160 (u32) slot->buf_dma + slot->cmd_size);
161 /*3.status buffer */
162 mvs_hba_sb_dump(mvi, tag, proto);
163 /*4.PRD table */
164 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
165 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
166 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
167 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
168#endif
169}
170
171static void mvs_hba_cq_dump(struct mvs_info *mvi)
172{
173#if (_MV_DUMP > 2)
174 u64 addr;
175 void __iomem *regs = mvi->regs;
176 u32 entry = mvi->rx_cons + 1;
177 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
178
179 /*Completion Queue */
180 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
181 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
182 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
183 dev_printk(KERN_DEBUG, mvi->dev,
184 "Completion List Base Address=0x%llX (PA), "
185 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
186 addr, entry - 1, mvi->rx[0]);
187 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
188 mvi->rx_dma + sizeof(u32) * entry);
189#endif
190}
191
192void mvs_get_sas_addr(void *buf, u32 buflen)
193{
194 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
195}
196
197struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) 77struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
198{ 78{
199 unsigned long i = 0, j = 0, hi = 0; 79 unsigned long i = 0, j = 0, hi = 0;
@@ -222,7 +102,6 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
222 102
223} 103}
224 104
225/* FIXME */
226int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) 105int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
227{ 106{
228 unsigned long i = 0, j = 0, n = 0, num = 0; 107 unsigned long i = 0, j = 0, n = 0, num = 0;
@@ -253,6 +132,20 @@ int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
253 return num; 132 return num;
254} 133}
255 134
135struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
136 u8 reg_set)
137{
138 u32 dev_no;
139 for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
140 if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
141 continue;
142
143 if (mvi->devices[dev_no].taskfileset == reg_set)
144 return &mvi->devices[dev_no];
145 }
146 return NULL;
147}
148
256static inline void mvs_free_reg_set(struct mvs_info *mvi, 149static inline void mvs_free_reg_set(struct mvs_info *mvi,
257 struct mvs_device *dev) 150 struct mvs_device *dev)
258{ 151{
@@ -283,7 +176,6 @@ void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
283 } 176 }
284} 177}
285 178
286/* FIXME: locking? */
287int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 179int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
288 void *funcdata) 180 void *funcdata)
289{ 181{
@@ -309,12 +201,12 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
309 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); 201 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
310 if (tmp & PHY_RST_HARD) 202 if (tmp & PHY_RST_HARD)
311 break; 203 break;
312 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1); 204 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
313 break; 205 break;
314 206
315 case PHY_FUNC_LINK_RESET: 207 case PHY_FUNC_LINK_RESET:
316 MVS_CHIP_DISP->phy_enable(mvi, phy_id); 208 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
317 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0); 209 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
318 break; 210 break;
319 211
320 case PHY_FUNC_DISABLE: 212 case PHY_FUNC_DISABLE:
@@ -406,14 +298,10 @@ int mvs_slave_configure(struct scsi_device *sdev)
406 298
407 if (ret) 299 if (ret)
408 return ret; 300 return ret;
409 if (dev_is_sata(dev)) { 301 if (!dev_is_sata(dev)) {
410 /* may set PIO mode */ 302 sas_change_queue_depth(sdev,
411 #if MV_DISABLE_NCQ 303 MVS_QUEUE_SIZE,
412 struct ata_port *ap = dev->sata_dev.ap; 304 SCSI_QDEPTH_DEFAULT);
413 struct ata_device *adev = ap->link.device;
414 adev->flags |= ATA_DFLAG_NCQ_OFF;
415 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
416 #endif
417 } 305 }
418 return 0; 306 return 0;
419} 307}
@@ -424,6 +312,7 @@ void mvs_scan_start(struct Scsi_Host *shost)
424 unsigned short core_nr; 312 unsigned short core_nr;
425 struct mvs_info *mvi; 313 struct mvs_info *mvi;
426 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 314 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
315 struct mvs_prv_info *mvs_prv = sha->lldd_ha;
427 316
428 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 317 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
429 318
@@ -432,15 +321,17 @@ void mvs_scan_start(struct Scsi_Host *shost)
432 for (i = 0; i < mvi->chip->n_phy; ++i) 321 for (i = 0; i < mvi->chip->n_phy; ++i)
433 mvs_bytes_dmaed(mvi, i); 322 mvs_bytes_dmaed(mvi, i);
434 } 323 }
324 mvs_prv->scan_finished = 1;
435} 325}
436 326
437int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) 327int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
438{ 328{
439 /* give the phy enabling interrupt event time to come in (1s 329 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
440 * is empirically about all it takes) */ 330 struct mvs_prv_info *mvs_prv = sha->lldd_ha;
441 if (time < HZ) 331
332 if (mvs_prv->scan_finished == 0)
442 return 0; 333 return 0;
443 /* Wait for discovery to finish */ 334
444 scsi_flush_work(shost); 335 scsi_flush_work(shost);
445 return 1; 336 return 1;
446} 337}
@@ -461,10 +352,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
461 void *buf_prd; 352 void *buf_prd;
462 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 353 struct mvs_slot_info *slot = &mvi->slot_info[tag];
463 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 354 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
464#if _MV_DUMP 355
465 u8 *buf_cmd;
466 void *from;
467#endif
468 /* 356 /*
469 * DMA-map SMP request, response buffers 357 * DMA-map SMP request, response buffers
470 */ 358 */
@@ -496,15 +384,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
496 buf_tmp = slot->buf; 384 buf_tmp = slot->buf;
497 buf_tmp_dma = slot->buf_dma; 385 buf_tmp_dma = slot->buf_dma;
498 386
499#if _MV_DUMP
500 buf_cmd = buf_tmp;
501 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
502 buf_tmp += req_len;
503 buf_tmp_dma += req_len;
504 slot->cmd_size = req_len;
505#else
506 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); 387 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
507#endif
508 388
509 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 389 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
510 buf_oaf = buf_tmp; 390 buf_oaf = buf_tmp;
@@ -553,12 +433,6 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
553 /* fill in PRD (scatter/gather) table, if any */ 433 /* fill in PRD (scatter/gather) table, if any */
554 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 434 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
555 435
556#if _MV_DUMP
557 /* copy cmd table */
558 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
559 memcpy(buf_cmd, from + sg_req->offset, req_len);
560 kunmap_atomic(from, KM_IRQ0);
561#endif
562 return 0; 436 return 0;
563 437
564err_out_2: 438err_out_2:
@@ -616,14 +490,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
616 (mvi_dev->taskfileset << TXQ_SRS_SHIFT); 490 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
617 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); 491 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
618 492
619#ifndef DISABLE_HOTPLUG_DMA_FIX
620 if (task->data_dir == DMA_FROM_DEVICE) 493 if (task->data_dir == DMA_FROM_DEVICE)
621 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); 494 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
622 else 495 else
623 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 496 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
624#else 497
625 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
626#endif
627 if (task->ata_task.use_ncq) 498 if (task->ata_task.use_ncq)
628 flags |= MCH_FPDMA; 499 flags |= MCH_FPDMA;
629 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { 500 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
@@ -631,11 +502,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
631 flags |= MCH_ATAPI; 502 flags |= MCH_ATAPI;
632 } 503 }
633 504
634 /* FIXME: fill in port multiplier number */
635
636 hdr->flags = cpu_to_le32(flags); 505 hdr->flags = cpu_to_le32(flags);
637 506
638 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
639 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) 507 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
640 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 508 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
641 else 509 else
@@ -657,9 +525,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
657 525
658 buf_tmp += MVS_ATA_CMD_SZ; 526 buf_tmp += MVS_ATA_CMD_SZ;
659 buf_tmp_dma += MVS_ATA_CMD_SZ; 527 buf_tmp_dma += MVS_ATA_CMD_SZ;
660#if _MV_DUMP
661 slot->cmd_size = MVS_ATA_CMD_SZ;
662#endif
663 528
664 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 529 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
665 /* used for STP. unused for SATA? */ 530 /* used for STP. unused for SATA? */
@@ -682,9 +547,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
682 buf_tmp_dma += i; 547 buf_tmp_dma += i;
683 548
684 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 549 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
685 /* FIXME: probably unused, for SATA. kept here just in case
686 * we get a STP/SATA error information record
687 */
688 slot->response = buf_tmp; 550 slot->response = buf_tmp;
689 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 551 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
690 if (mvi->flags & MVF_FLAG_SOC) 552 if (mvi->flags & MVF_FLAG_SOC)
@@ -715,11 +577,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
715 577
716 /* fill in PRD (scatter/gather) table, if any */ 578 /* fill in PRD (scatter/gather) table, if any */
717 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 579 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
718#ifndef DISABLE_HOTPLUG_DMA_FIX 580
719 if (task->data_dir == DMA_FROM_DEVICE) 581 if (task->data_dir == DMA_FROM_DEVICE)
720 MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma, 582 MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
721 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); 583 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
722#endif 584
723 return 0; 585 return 0;
724} 586}
725 587
@@ -761,6 +623,9 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
761 } 623 }
762 if (is_tmf) 624 if (is_tmf)
763 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); 625 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
626 else
627 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
628
764 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); 629 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
765 hdr->tags = cpu_to_le32(tag); 630 hdr->tags = cpu_to_le32(tag);
766 hdr->data_len = cpu_to_le32(task->total_xfer_len); 631 hdr->data_len = cpu_to_le32(task->total_xfer_len);
@@ -777,9 +642,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
777 642
778 buf_tmp += MVS_SSP_CMD_SZ; 643 buf_tmp += MVS_SSP_CMD_SZ;
779 buf_tmp_dma += MVS_SSP_CMD_SZ; 644 buf_tmp_dma += MVS_SSP_CMD_SZ;
780#if _MV_DUMP
781 slot->cmd_size = MVS_SSP_CMD_SZ;
782#endif
783 645
784 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 646 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
785 buf_oaf = buf_tmp; 647 buf_oaf = buf_tmp;
@@ -986,7 +848,6 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
986 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 848 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
987 spin_unlock(&task->task_state_lock); 849 spin_unlock(&task->task_state_lock);
988 850
989 mvs_hba_memory_dump(mvi, tag, task->task_proto);
990 mvi_dev->running_req++; 851 mvi_dev->running_req++;
991 ++(*pass); 852 ++(*pass);
992 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 853 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
@@ -1189,9 +1050,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1189 mvs_slot_free(mvi, slot_idx); 1050 mvs_slot_free(mvi, slot_idx);
1190} 1051}
1191 1052
1192static void mvs_update_wideport(struct mvs_info *mvi, int i) 1053static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
1193{ 1054{
1194 struct mvs_phy *phy = &mvi->phy[i]; 1055 struct mvs_phy *phy = &mvi->phy[phy_no];
1195 struct mvs_port *port = phy->port; 1056 struct mvs_port *port = phy->port;
1196 int j, no; 1057 int j, no;
1197 1058
@@ -1246,18 +1107,17 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1246 return NULL; 1107 return NULL;
1247 1108
1248 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 1109 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1249 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1110 s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1250 1111
1251 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 1112 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1252 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1113 s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1253 1114
1254 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 1115 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1255 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1116 s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1256 1117
1257 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 1118 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1258 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1119 s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1259 1120
1260 /* Workaround: take some ATAPI devices for ATA */
1261 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) 1121 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1262 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); 1122 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1263 1123
@@ -1269,6 +1129,13 @@ static u32 mvs_is_sig_fis_received(u32 irq_status)
1269 return irq_status & PHYEV_SIG_FIS; 1129 return irq_status & PHYEV_SIG_FIS;
1270} 1130}
1271 1131
1132static void mvs_sig_remove_timer(struct mvs_phy *phy)
1133{
1134 if (phy->timer.function)
1135 del_timer(&phy->timer);
1136 phy->timer.function = NULL;
1137}
1138
1272void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) 1139void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1273{ 1140{
1274 struct mvs_phy *phy = &mvi->phy[i]; 1141 struct mvs_phy *phy = &mvi->phy[i];
@@ -1291,6 +1158,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1291 if (phy->phy_type & PORT_TYPE_SATA) { 1158 if (phy->phy_type & PORT_TYPE_SATA) {
1292 phy->identify.target_port_protocols = SAS_PROTOCOL_STP; 1159 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1293 if (mvs_is_sig_fis_received(phy->irq_status)) { 1160 if (mvs_is_sig_fis_received(phy->irq_status)) {
1161 mvs_sig_remove_timer(phy);
1294 phy->phy_attached = 1; 1162 phy->phy_attached = 1;
1295 phy->att_dev_sas_addr = 1163 phy->att_dev_sas_addr =
1296 i + mvi->id * mvi->chip->n_phy; 1164 i + mvi->id * mvi->chip->n_phy;
@@ -1308,7 +1176,6 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1308 tmp | PHYEV_SIG_FIS); 1176 tmp | PHYEV_SIG_FIS);
1309 phy->phy_attached = 0; 1177 phy->phy_attached = 0;
1310 phy->phy_type &= ~PORT_TYPE_SATA; 1178 phy->phy_type &= ~PORT_TYPE_SATA;
1311 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1312 goto out_done; 1179 goto out_done;
1313 } 1180 }
1314 } else if (phy->phy_type & PORT_TYPE_SAS 1181 } else if (phy->phy_type & PORT_TYPE_SAS
@@ -1334,9 +1201,9 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1334 if (MVS_CHIP_DISP->phy_work_around) 1201 if (MVS_CHIP_DISP->phy_work_around)
1335 MVS_CHIP_DISP->phy_work_around(mvi, i); 1202 MVS_CHIP_DISP->phy_work_around(mvi, i);
1336 } 1203 }
1337 mv_dprintk("port %d attach dev info is %x\n", 1204 mv_dprintk("phy %d attach dev info is %x\n",
1338 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); 1205 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1339 mv_dprintk("port %d attach sas addr is %llx\n", 1206 mv_dprintk("phy %d attach sas addr is %llx\n",
1340 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); 1207 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1341out_done: 1208out_done:
1342 if (get_st) 1209 if (get_st)
@@ -1361,10 +1228,10 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1361 } 1228 }
1362 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; 1229 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1363 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; 1230 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1364 if (sas_port->id >= mvi->chip->n_phy) 1231 if (i >= mvi->chip->n_phy)
1365 port = &mvi->port[sas_port->id - mvi->chip->n_phy]; 1232 port = &mvi->port[i - mvi->chip->n_phy];
1366 else 1233 else
1367 port = &mvi->port[sas_port->id]; 1234 port = &mvi->port[i];
1368 if (lock) 1235 if (lock)
1369 spin_lock_irqsave(&mvi->lock, flags); 1236 spin_lock_irqsave(&mvi->lock, flags);
1370 port->port_attached = 1; 1237 port->port_attached = 1;
@@ -1393,7 +1260,7 @@ static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1393 return; 1260 return;
1394 } 1261 }
1395 list_for_each_entry(dev, &port->dev_list, dev_list_node) 1262 list_for_each_entry(dev, &port->dev_list, dev_list_node)
1396 mvs_do_release_task(phy->mvi, phy_no, NULL); 1263 mvs_do_release_task(phy->mvi, phy_no, dev);
1397 1264
1398} 1265}
1399 1266
@@ -1457,6 +1324,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock)
1457 mvi_device->dev_status = MVS_DEV_NORMAL; 1324 mvi_device->dev_status = MVS_DEV_NORMAL;
1458 mvi_device->dev_type = dev->dev_type; 1325 mvi_device->dev_type = dev->dev_type;
1459 mvi_device->mvi_info = mvi; 1326 mvi_device->mvi_info = mvi;
1327 mvi_device->sas_device = dev;
1460 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 1328 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1461 int phy_id; 1329 int phy_id;
1462 u8 phy_num = parent_dev->ex_dev.num_phys; 1330 u8 phy_num = parent_dev->ex_dev.num_phys;
@@ -1508,6 +1376,7 @@ void mvs_dev_gone_notify(struct domain_device *dev)
1508 mv_dprintk("found dev has gone.\n"); 1376 mv_dprintk("found dev has gone.\n");
1509 } 1377 }
1510 dev->lldd_dev = NULL; 1378 dev->lldd_dev = NULL;
1379 mvi_dev->sas_device = NULL;
1511 1380
1512 spin_unlock_irqrestore(&mvi->lock, flags); 1381 spin_unlock_irqrestore(&mvi->lock, flags);
1513} 1382}
@@ -1555,7 +1424,6 @@ static void mvs_tmf_timedout(unsigned long data)
1555 complete(&task->completion); 1424 complete(&task->completion);
1556} 1425}
1557 1426
1558/* XXX */
1559#define MVS_TASK_TIMEOUT 20 1427#define MVS_TASK_TIMEOUT 20
1560static int mvs_exec_internal_tmf_task(struct domain_device *dev, 1428static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1561 void *parameter, u32 para_len, struct mvs_tmf_task *tmf) 1429 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
@@ -1588,7 +1456,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1588 } 1456 }
1589 1457
1590 wait_for_completion(&task->completion); 1458 wait_for_completion(&task->completion);
1591 res = -TMF_RESP_FUNC_FAILED; 1459 res = TMF_RESP_FUNC_FAILED;
1592 /* Even TMF timed out, return direct. */ 1460 /* Even TMF timed out, return direct. */
1593 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1461 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1594 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1462 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
@@ -1638,11 +1506,10 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1638 u8 *lun, struct mvs_tmf_task *tmf) 1506 u8 *lun, struct mvs_tmf_task *tmf)
1639{ 1507{
1640 struct sas_ssp_task ssp_task; 1508 struct sas_ssp_task ssp_task;
1641 DECLARE_COMPLETION_ONSTACK(completion);
1642 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 1509 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1643 return TMF_RESP_FUNC_ESUPP; 1510 return TMF_RESP_FUNC_ESUPP;
1644 1511
1645 strncpy((u8 *)&ssp_task.LUN, lun, 8); 1512 memcpy(ssp_task.LUN, lun, 8);
1646 1513
1647 return mvs_exec_internal_tmf_task(dev, &ssp_task, 1514 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1648 sizeof(ssp_task), tmf); 1515 sizeof(ssp_task), tmf);
@@ -1666,7 +1533,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1666int mvs_lu_reset(struct domain_device *dev, u8 *lun) 1533int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1667{ 1534{
1668 unsigned long flags; 1535 unsigned long flags;
1669 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; 1536 int rc = TMF_RESP_FUNC_FAILED;
1670 struct mvs_tmf_task tmf_task; 1537 struct mvs_tmf_task tmf_task;
1671 struct mvs_device * mvi_dev = dev->lldd_dev; 1538 struct mvs_device * mvi_dev = dev->lldd_dev;
1672 struct mvs_info *mvi = mvi_dev->mvi_info; 1539 struct mvs_info *mvi = mvi_dev->mvi_info;
@@ -1675,10 +1542,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1675 mvi_dev->dev_status = MVS_DEV_EH; 1542 mvi_dev->dev_status = MVS_DEV_EH;
1676 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1543 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1677 if (rc == TMF_RESP_FUNC_COMPLETE) { 1544 if (rc == TMF_RESP_FUNC_COMPLETE) {
1678 num = mvs_find_dev_phyno(dev, phyno);
1679 spin_lock_irqsave(&mvi->lock, flags); 1545 spin_lock_irqsave(&mvi->lock, flags);
1680 for (i = 0; i < num; i++) 1546 mvs_release_task(mvi, dev);
1681 mvs_release_task(mvi, dev);
1682 spin_unlock_irqrestore(&mvi->lock, flags); 1547 spin_unlock_irqrestore(&mvi->lock, flags);
1683 } 1548 }
1684 /* If failed, fall-through I_T_Nexus reset */ 1549 /* If failed, fall-through I_T_Nexus reset */
@@ -1696,11 +1561,12 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
1696 1561
1697 if (mvi_dev->dev_status != MVS_DEV_EH) 1562 if (mvi_dev->dev_status != MVS_DEV_EH)
1698 return TMF_RESP_FUNC_COMPLETE; 1563 return TMF_RESP_FUNC_COMPLETE;
1564 else
1565 mvi_dev->dev_status = MVS_DEV_NORMAL;
1699 rc = mvs_debug_I_T_nexus_reset(dev); 1566 rc = mvs_debug_I_T_nexus_reset(dev);
1700 mv_printk("%s for device[%x]:rc= %d\n", 1567 mv_printk("%s for device[%x]:rc= %d\n",
1701 __func__, mvi_dev->device_id, rc); 1568 __func__, mvi_dev->device_id, rc);
1702 1569
1703 /* housekeeper */
1704 spin_lock_irqsave(&mvi->lock, flags); 1570 spin_lock_irqsave(&mvi->lock, flags);
1705 mvs_release_task(mvi, dev); 1571 mvs_release_task(mvi, dev);
1706 spin_unlock_irqrestore(&mvi->lock, flags); 1572 spin_unlock_irqrestore(&mvi->lock, flags);
@@ -1739,9 +1605,6 @@ int mvs_query_task(struct sas_task *task)
1739 case TMF_RESP_FUNC_FAILED: 1605 case TMF_RESP_FUNC_FAILED:
1740 case TMF_RESP_FUNC_COMPLETE: 1606 case TMF_RESP_FUNC_COMPLETE:
1741 break; 1607 break;
1742 default:
1743 rc = TMF_RESP_FUNC_COMPLETE;
1744 break;
1745 } 1608 }
1746 } 1609 }
1747 mv_printk("%s:rc= %d\n", __func__, rc); 1610 mv_printk("%s:rc= %d\n", __func__, rc);
@@ -1761,8 +1624,8 @@ int mvs_abort_task(struct sas_task *task)
1761 u32 tag; 1624 u32 tag;
1762 1625
1763 if (!mvi_dev) { 1626 if (!mvi_dev) {
1764 mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__); 1627 mv_printk("Device has removed\n");
1765 rc = TMF_RESP_FUNC_FAILED; 1628 return TMF_RESP_FUNC_FAILED;
1766 } 1629 }
1767 1630
1768 mvi = mvi_dev->mvi_info; 1631 mvi = mvi_dev->mvi_info;
@@ -1807,25 +1670,17 @@ int mvs_abort_task(struct sas_task *task)
1807 1670
1808 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1671 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1809 task->task_proto & SAS_PROTOCOL_STP) { 1672 task->task_proto & SAS_PROTOCOL_STP) {
1810 /* to do free register_set */
1811 if (SATA_DEV == dev->dev_type) { 1673 if (SATA_DEV == dev->dev_type) {
1812 struct mvs_slot_info *slot = task->lldd_task; 1674 struct mvs_slot_info *slot = task->lldd_task;
1813 struct task_status_struct *tstat;
1814 u32 slot_idx = (u32)(slot - mvi->slot_info); 1675 u32 slot_idx = (u32)(slot - mvi->slot_info);
1815 tstat = &task->task_status; 1676 mv_dprintk("mvs_abort_task() mvi=%p task=%p "
1816 mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
1817 "slot=%p slot_idx=x%x\n", 1677 "slot=%p slot_idx=x%x\n",
1818 mvi, task, slot, slot_idx); 1678 mvi, task, slot, slot_idx);
1819 tstat->stat = SAS_ABORTED_TASK; 1679 mvs_tmf_timedout((unsigned long)task);
1820 if (mvi_dev && mvi_dev->running_req)
1821 mvi_dev->running_req--;
1822 if (sas_protocol_ata(task->task_proto))
1823 mvs_free_reg_set(mvi, mvi_dev);
1824 mvs_slot_task_free(mvi, task, slot, slot_idx); 1680 mvs_slot_task_free(mvi, task, slot, slot_idx);
1825 return -1; 1681 rc = TMF_RESP_FUNC_COMPLETE;
1682 goto out;
1826 } 1683 }
1827 } else {
1828 /* SMP */
1829 1684
1830 } 1685 }
1831out: 1686out:
@@ -1891,12 +1746,63 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1891 return stat; 1746 return stat;
1892} 1747}
1893 1748
1749void mvs_set_sense(u8 *buffer, int len, int d_sense,
1750 int key, int asc, int ascq)
1751{
1752 memset(buffer, 0, len);
1753
1754 if (d_sense) {
1755 /* Descriptor format */
1756 if (len < 4) {
1757 mv_printk("Length %d of sense buffer too small to "
1758 "fit sense %x:%x:%x", len, key, asc, ascq);
1759 }
1760
1761 buffer[0] = 0x72; /* Response Code */
1762 if (len > 1)
1763 buffer[1] = key; /* Sense Key */
1764 if (len > 2)
1765 buffer[2] = asc; /* ASC */
1766 if (len > 3)
1767 buffer[3] = ascq; /* ASCQ */
1768 } else {
1769 if (len < 14) {
1770 mv_printk("Length %d of sense buffer too small to "
1771 "fit sense %x:%x:%x", len, key, asc, ascq);
1772 }
1773
1774 buffer[0] = 0x70; /* Response Code */
1775 if (len > 2)
1776 buffer[2] = key; /* Sense Key */
1777 if (len > 7)
1778 buffer[7] = 0x0a; /* Additional Sense Length */
1779 if (len > 12)
1780 buffer[12] = asc; /* ASC */
1781 if (len > 13)
1782 buffer[13] = ascq; /* ASCQ */
1783 }
1784
1785 return;
1786}
1787
1788void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
1789 u8 key, u8 asc, u8 asc_q)
1790{
1791 iu->datapres = 2;
1792 iu->response_data_len = 0;
1793 iu->sense_data_len = 17;
1794 iu->status = 02;
1795 mvs_set_sense(iu->sense_data, 17, 0,
1796 key, asc, asc_q);
1797}
1798
1894static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, 1799static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1895 u32 slot_idx) 1800 u32 slot_idx)
1896{ 1801{
1897 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1802 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1898 int stat; 1803 int stat;
1899 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); 1804 u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
1805 u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
1900 u32 tfs = 0; 1806 u32 tfs = 0;
1901 enum mvs_port_type type = PORT_TYPE_SAS; 1807 enum mvs_port_type type = PORT_TYPE_SAS;
1902 1808
@@ -1908,8 +1814,19 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1908 stat = SAM_STAT_CHECK_CONDITION; 1814 stat = SAM_STAT_CHECK_CONDITION;
1909 switch (task->task_proto) { 1815 switch (task->task_proto) {
1910 case SAS_PROTOCOL_SSP: 1816 case SAS_PROTOCOL_SSP:
1817 {
1911 stat = SAS_ABORTED_TASK; 1818 stat = SAS_ABORTED_TASK;
1819 if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
1820 struct ssp_response_iu *iu = slot->response +
1821 sizeof(struct mvs_err_info);
1822 mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
1823 sas_ssp_task_response(mvi->dev, task, iu);
1824 stat = SAM_STAT_CHECK_CONDITION;
1825 }
1826 if (err_dw1 & bit(31))
1827 mv_printk("reuse same slot, retry command.\n");
1912 break; 1828 break;
1829 }
1913 case SAS_PROTOCOL_SMP: 1830 case SAS_PROTOCOL_SMP:
1914 stat = SAM_STAT_CHECK_CONDITION; 1831 stat = SAM_STAT_CHECK_CONDITION;
1915 break; 1832 break;
@@ -1918,10 +1835,8 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1918 case SAS_PROTOCOL_STP: 1835 case SAS_PROTOCOL_STP:
1919 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1836 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1920 { 1837 {
1921 if (err_dw0 == 0x80400002)
1922 mv_printk("find reserved error, why?\n");
1923
1924 task->ata_task.use_ncq = 0; 1838 task->ata_task.use_ncq = 0;
1839 stat = SAS_PROTO_RESPONSE;
1925 mvs_sata_done(mvi, task, slot_idx, err_dw0); 1840 mvs_sata_done(mvi, task, slot_idx, err_dw0);
1926 } 1841 }
1927 break; 1842 break;
@@ -1945,8 +1860,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1945 void *to; 1860 void *to;
1946 enum exec_status sts; 1861 enum exec_status sts;
1947 1862
1948 if (mvi->exp_req)
1949 mvi->exp_req--;
1950 if (unlikely(!task || !task->lldd_task || !task->dev)) 1863 if (unlikely(!task || !task->lldd_task || !task->dev))
1951 return -1; 1864 return -1;
1952 1865
@@ -1954,8 +1867,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1954 dev = task->dev; 1867 dev = task->dev;
1955 mvi_dev = dev->lldd_dev; 1868 mvi_dev = dev->lldd_dev;
1956 1869
1957 mvs_hba_cq_dump(mvi);
1958
1959 spin_lock(&task->task_state_lock); 1870 spin_lock(&task->task_state_lock);
1960 task->task_state_flags &= 1871 task->task_state_flags &=
1961 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1872 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
@@ -1978,6 +1889,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1978 return -1; 1889 return -1;
1979 } 1890 }
1980 1891
1892 /* when no device attaching, go ahead and complete by error handling*/
1981 if (unlikely(!mvi_dev || flags)) { 1893 if (unlikely(!mvi_dev || flags)) {
1982 if (!mvi_dev) 1894 if (!mvi_dev)
1983 mv_dprintk("port has not device.\n"); 1895 mv_dprintk("port has not device.\n");
@@ -1987,6 +1899,9 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1987 1899
1988 /* error info record present */ 1900 /* error info record present */
1989 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { 1901 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1902 mv_dprintk("port %d slot %d rx_desc %X has error info"
1903 "%016llX.\n", slot->port->sas_port.id, slot_idx,
1904 rx_desc, (u64)(*(u64 *)slot->response));
1990 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1905 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1991 tstat->resp = SAS_TASK_COMPLETE; 1906 tstat->resp = SAS_TASK_COMPLETE;
1992 goto out; 1907 goto out;
@@ -2048,8 +1963,7 @@ out:
2048 spin_unlock(&mvi->lock); 1963 spin_unlock(&mvi->lock);
2049 if (task->task_done) 1964 if (task->task_done)
2050 task->task_done(task); 1965 task->task_done(task);
2051 else 1966
2052 mv_dprintk("why has not task_done.\n");
2053 spin_lock(&mvi->lock); 1967 spin_lock(&mvi->lock);
2054 1968
2055 return sts; 1969 return sts;
@@ -2092,7 +2006,6 @@ void mvs_release_task(struct mvs_info *mvi,
2092 struct domain_device *dev) 2006 struct domain_device *dev)
2093{ 2007{
2094 int i, phyno[WIDE_PORT_MAX_PHY], num; 2008 int i, phyno[WIDE_PORT_MAX_PHY], num;
2095 /* housekeeper */
2096 num = mvs_find_dev_phyno(dev, phyno); 2009 num = mvs_find_dev_phyno(dev, phyno);
2097 for (i = 0; i < num; i++) 2010 for (i = 0; i < num; i++)
2098 mvs_do_release_task(mvi, phyno[i], dev); 2011 mvs_do_release_task(mvi, phyno[i], dev);
@@ -2111,13 +2024,13 @@ static void mvs_work_queue(struct work_struct *work)
2111 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); 2024 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
2112 struct mvs_info *mvi = mwq->mvi; 2025 struct mvs_info *mvi = mwq->mvi;
2113 unsigned long flags; 2026 unsigned long flags;
2027 u32 phy_no = (unsigned long) mwq->data;
2028 struct sas_ha_struct *sas_ha = mvi->sas;
2029 struct mvs_phy *phy = &mvi->phy[phy_no];
2030 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2114 2031
2115 spin_lock_irqsave(&mvi->lock, flags); 2032 spin_lock_irqsave(&mvi->lock, flags);
2116 if (mwq->handler & PHY_PLUG_EVENT) { 2033 if (mwq->handler & PHY_PLUG_EVENT) {
2117 u32 phy_no = (unsigned long) mwq->data;
2118 struct sas_ha_struct *sas_ha = mvi->sas;
2119 struct mvs_phy *phy = &mvi->phy[phy_no];
2120 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2121 2034
2122 if (phy->phy_event & PHY_PLUG_OUT) { 2035 if (phy->phy_event & PHY_PLUG_OUT) {
2123 u32 tmp; 2036 u32 tmp;
@@ -2139,6 +2052,11 @@ static void mvs_work_queue(struct work_struct *work)
2139 mv_dprintk("phy%d Attached Device\n", phy_no); 2052 mv_dprintk("phy%d Attached Device\n", phy_no);
2140 } 2053 }
2141 } 2054 }
2055 } else if (mwq->handler & EXP_BRCT_CHG) {
2056 phy->phy_event &= ~EXP_BRCT_CHG;
2057 sas_ha->notify_port_event(sas_phy,
2058 PORTE_BROADCAST_RCVD);
2059 mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
2142 } 2060 }
2143 list_del(&mwq->entry); 2061 list_del(&mwq->entry);
2144 spin_unlock_irqrestore(&mvi->lock, flags); 2062 spin_unlock_irqrestore(&mvi->lock, flags);
@@ -2174,29 +2092,21 @@ static void mvs_sig_time_out(unsigned long tphy)
2174 if (&mvi->phy[phy_no] == phy) { 2092 if (&mvi->phy[phy_no] == phy) {
2175 mv_dprintk("Get signature time out, reset phy %d\n", 2093 mv_dprintk("Get signature time out, reset phy %d\n",
2176 phy_no+mvi->id*mvi->chip->n_phy); 2094 phy_no+mvi->id*mvi->chip->n_phy);
2177 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1); 2095 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
2178 } 2096 }
2179 } 2097 }
2180} 2098}
2181 2099
2182static void mvs_sig_remove_timer(struct mvs_phy *phy)
2183{
2184 if (phy->timer.function)
2185 del_timer(&phy->timer);
2186 phy->timer.function = NULL;
2187}
2188
2189void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 2100void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2190{ 2101{
2191 u32 tmp; 2102 u32 tmp;
2192 struct sas_ha_struct *sas_ha = mvi->sas;
2193 struct mvs_phy *phy = &mvi->phy[phy_no]; 2103 struct mvs_phy *phy = &mvi->phy[phy_no];
2194 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2195 2104
2196 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); 2105 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2197 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy, 2106 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2107 mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2198 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); 2108 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2199 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy, 2109 mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
2200 phy->irq_status); 2110 phy->irq_status);
2201 2111
2202 /* 2112 /*
@@ -2205,11 +2115,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2205 */ 2115 */
2206 2116
2207 if (phy->irq_status & PHYEV_DCDR_ERR) { 2117 if (phy->irq_status & PHYEV_DCDR_ERR) {
2208 mv_dprintk("port %d STP decoding error.\n", 2118 mv_dprintk("phy %d STP decoding error.\n",
2209 phy_no + mvi->id*mvi->chip->n_phy); 2119 phy_no + mvi->id*mvi->chip->n_phy);
2210 } 2120 }
2211 2121
2212 if (phy->irq_status & PHYEV_POOF) { 2122 if (phy->irq_status & PHYEV_POOF) {
2123 mdelay(500);
2213 if (!(phy->phy_event & PHY_PLUG_OUT)) { 2124 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2214 int dev_sata = phy->phy_type & PORT_TYPE_SATA; 2125 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2215 int ready; 2126 int ready;
@@ -2220,17 +2131,13 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2220 (void *)(unsigned long)phy_no, 2131 (void *)(unsigned long)phy_no,
2221 PHY_PLUG_EVENT); 2132 PHY_PLUG_EVENT);
2222 ready = mvs_is_phy_ready(mvi, phy_no); 2133 ready = mvs_is_phy_ready(mvi, phy_no);
2223 if (!ready)
2224 mv_dprintk("phy%d Unplug Notice\n",
2225 phy_no +
2226 mvi->id * mvi->chip->n_phy);
2227 if (ready || dev_sata) { 2134 if (ready || dev_sata) {
2228 if (MVS_CHIP_DISP->stp_reset) 2135 if (MVS_CHIP_DISP->stp_reset)
2229 MVS_CHIP_DISP->stp_reset(mvi, 2136 MVS_CHIP_DISP->stp_reset(mvi,
2230 phy_no); 2137 phy_no);
2231 else 2138 else
2232 MVS_CHIP_DISP->phy_reset(mvi, 2139 MVS_CHIP_DISP->phy_reset(mvi,
2233 phy_no, 0); 2140 phy_no, MVS_SOFT_RESET);
2234 return; 2141 return;
2235 } 2142 }
2236 } 2143 }
@@ -2243,13 +2150,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2243 if (phy->timer.function == NULL) { 2150 if (phy->timer.function == NULL) {
2244 phy->timer.data = (unsigned long)phy; 2151 phy->timer.data = (unsigned long)phy;
2245 phy->timer.function = mvs_sig_time_out; 2152 phy->timer.function = mvs_sig_time_out;
2246 phy->timer.expires = jiffies + 10*HZ; 2153 phy->timer.expires = jiffies + 5*HZ;
2247 add_timer(&phy->timer); 2154 add_timer(&phy->timer);
2248 } 2155 }
2249 } 2156 }
2250 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 2157 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2251 phy->phy_status = mvs_is_phy_ready(mvi, phy_no); 2158 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2252 mvs_sig_remove_timer(phy);
2253 mv_dprintk("notify plug in on phy[%d]\n", phy_no); 2159 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2254 if (phy->phy_status) { 2160 if (phy->phy_status) {
2255 mdelay(10); 2161 mdelay(10);
@@ -2263,14 +2169,14 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2263 } 2169 }
2264 mvs_update_phyinfo(mvi, phy_no, 0); 2170 mvs_update_phyinfo(mvi, phy_no, 0);
2265 if (phy->phy_type & PORT_TYPE_SAS) { 2171 if (phy->phy_type & PORT_TYPE_SAS) {
2266 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2); 2172 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
2267 mdelay(10); 2173 mdelay(10);
2268 } 2174 }
2269 2175
2270 mvs_bytes_dmaed(mvi, phy_no); 2176 mvs_bytes_dmaed(mvi, phy_no);
2271 /* whether driver is going to handle hot plug */ 2177 /* whether driver is going to handle hot plug */
2272 if (phy->phy_event & PHY_PLUG_OUT) { 2178 if (phy->phy_event & PHY_PLUG_OUT) {
2273 mvs_port_notify_formed(sas_phy, 0); 2179 mvs_port_notify_formed(&phy->sas_phy, 0);
2274 phy->phy_event &= ~PHY_PLUG_OUT; 2180 phy->phy_event &= ~PHY_PLUG_OUT;
2275 } 2181 }
2276 } else { 2182 } else {
@@ -2278,13 +2184,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2278 phy_no + mvi->id*mvi->chip->n_phy); 2184 phy_no + mvi->id*mvi->chip->n_phy);
2279 } 2185 }
2280 } else if (phy->irq_status & PHYEV_BROAD_CH) { 2186 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2281 mv_dprintk("port %d broadcast change.\n", 2187 mv_dprintk("phy %d broadcast change.\n",
2282 phy_no + mvi->id*mvi->chip->n_phy); 2188 phy_no + mvi->id*mvi->chip->n_phy);
2283 /* exception for Samsung disk drive*/ 2189 mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
2284 mdelay(1000); 2190 EXP_BRCT_CHG);
2285 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2286 } 2191 }
2287 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2288} 2192}
2289 2193
2290int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 2194int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 1367d8b9350d..44d7885a4a1d 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -48,12 +48,8 @@
48 48
49#define DRV_NAME "mvsas" 49#define DRV_NAME "mvsas"
50#define DRV_VERSION "0.8.2" 50#define DRV_VERSION "0.8.2"
51#define _MV_DUMP 0
52#define MVS_ID_NOT_MAPPED 0x7f 51#define MVS_ID_NOT_MAPPED 0x7f
53/* #define DISABLE_HOTPLUG_DMA_FIX */
54// #define MAX_EXP_RUNNING_REQ 2
55#define WIDE_PORT_MAX_PHY 4 52#define WIDE_PORT_MAX_PHY 4
56#define MV_DISABLE_NCQ 0
57#define mv_printk(fmt, arg ...) \ 53#define mv_printk(fmt, arg ...) \
58 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg) 54 printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
59#ifdef MV_DEBUG 55#ifdef MV_DEBUG
@@ -64,6 +60,7 @@
64#endif 60#endif
65#define MV_MAX_U32 0xffffffff 61#define MV_MAX_U32 0xffffffff
66 62
63extern int interrupt_coalescing;
67extern struct mvs_tgt_initiator mvs_tgt; 64extern struct mvs_tgt_initiator mvs_tgt;
68extern struct mvs_info *tgt_mvi; 65extern struct mvs_info *tgt_mvi;
69extern const struct mvs_dispatch mvs_64xx_dispatch; 66extern const struct mvs_dispatch mvs_64xx_dispatch;
@@ -99,6 +96,11 @@ enum dev_status {
99 MVS_DEV_EH = 0x1, 96 MVS_DEV_EH = 0x1,
100}; 97};
101 98
99enum dev_reset {
100 MVS_SOFT_RESET = 0,
101 MVS_HARD_RESET = 1,
102 MVS_PHY_TUNE = 2,
103};
102 104
103struct mvs_info; 105struct mvs_info;
104 106
@@ -130,7 +132,6 @@ struct mvs_dispatch {
130 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port); 132 u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
131 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val); 133 void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
132 134
133 void (*get_sas_addr)(void *buf, u32 buflen);
134 void (*command_active)(struct mvs_info *mvi, u32 slot_idx); 135 void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
135 void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all); 136 void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all);
136 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type, 137 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
@@ -167,9 +168,10 @@ struct mvs_dispatch {
167 ); 168 );
168 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd); 169 int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
169 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout); 170 int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
170#ifndef DISABLE_HOTPLUG_DMA_FIX 171 void (*dma_fix)(struct mvs_info *mvi, u32 phy_mask,
171 void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd); 172 int buf_len, int from, void *prd);
172#endif 173 void (*tune_interrupt)(struct mvs_info *mvi, u32 time);
174 void (*non_spec_ncq_error)(struct mvs_info *mvi);
173 175
174}; 176};
175 177
@@ -179,9 +181,11 @@ struct mvs_chip_info {
179 u32 fis_offs; 181 u32 fis_offs;
180 u32 fis_count; 182 u32 fis_count;
181 u32 srs_sz; 183 u32 srs_sz;
184 u32 sg_width;
182 u32 slot_width; 185 u32 slot_width;
183 const struct mvs_dispatch *dispatch; 186 const struct mvs_dispatch *dispatch;
184}; 187};
188#define MVS_MAX_SG (1U << mvi->chip->sg_width)
185#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) 189#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
186#define MVS_RX_FISL_SZ \ 190#define MVS_RX_FISL_SZ \
187 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100)) 191 (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
@@ -249,6 +253,73 @@ struct mvs_device {
249 u16 reserved; 253 u16 reserved;
250}; 254};
251 255
256/* Generate PHY tunning parameters */
257struct phy_tuning {
258 /* 1 bit, transmitter emphasis enable */
259 u8 trans_emp_en:1;
260 /* 4 bits, transmitter emphasis amplitude */
261 u8 trans_emp_amp:4;
262 /* 3 bits, reserved space */
263 u8 Reserved_2bit_1:3;
264 /* 5 bits, transmitter amplitude */
265 u8 trans_amp:5;
266 /* 2 bits, transmitter amplitude adjust */
267 u8 trans_amp_adj:2;
268 /* 1 bit, reserved space */
269 u8 resv_2bit_2:1;
270 /* 2 bytes, reserved space */
271 u8 reserved[2];
272};
273
274struct ffe_control {
275 /* 4 bits, FFE Capacitor Select (value range 0~F) */
276 u8 ffe_cap_sel:4;
277 /* 3 bits, FFE Resistor Select (value range 0~7) */
278 u8 ffe_rss_sel:3;
279 /* 1 bit reserve*/
280 u8 reserved:1;
281};
282
283/*
284 * HBA_Info_Page is saved in Flash/NVRAM, total 256 bytes.
285 * The data area is valid only Signature="MRVL".
286 * If any member fills with 0xFF, the member is invalid.
287 */
288struct hba_info_page {
289 /* Dword 0 */
290 /* 4 bytes, structure signature,should be "MRVL" at first initial */
291 u8 signature[4];
292
293 /* Dword 1-13 */
294 u32 reserved1[13];
295
296 /* Dword 14-29 */
297 /* 64 bytes, SAS address for each port */
298 u64 sas_addr[8];
299
300 /* Dword 30-31 */
301 /* 8 bytes for vanir 8 port PHY FFE seeting
302 * BIT 0~3 : FFE Capacitor select(value range 0~F)
303 * BIT 4~6 : FFE Resistor select(value range 0~7)
304 * BIT 7: reserve.
305 */
306
307 struct ffe_control ffe_ctl[8];
308 /* Dword 32 -43 */
309 u32 reserved2[12];
310
311 /* Dword 44-45 */
312 /* 8 bytes, 0: 1.5G, 1: 3.0G, should be 0x01 at first initial */
313 u8 phy_rate[8];
314
315 /* Dword 46-53 */
316 /* 32 bytes, PHY tuning parameters for each PHY*/
317 struct phy_tuning phy_tuning[8];
318
319 /* Dword 54-63 */
320 u32 reserved3[10];
321}; /* total 256 bytes */
322
252struct mvs_slot_info { 323struct mvs_slot_info {
253 struct list_head entry; 324 struct list_head entry;
254 union { 325 union {
@@ -264,9 +335,6 @@ struct mvs_slot_info {
264 */ 335 */
265 void *buf; 336 void *buf;
266 dma_addr_t buf_dma; 337 dma_addr_t buf_dma;
267#if _MV_DUMP
268 u32 cmd_size;
269#endif
270 void *response; 338 void *response;
271 struct mvs_port *port; 339 struct mvs_port *port;
272 struct mvs_device *device; 340 struct mvs_device *device;
@@ -320,12 +388,10 @@ struct mvs_info {
320 const struct mvs_chip_info *chip; 388 const struct mvs_chip_info *chip;
321 389
322 int tags_num; 390 int tags_num;
323 DECLARE_BITMAP(tags, MVS_SLOTS); 391 unsigned long *tags;
324 /* further per-slot information */ 392 /* further per-slot information */
325 struct mvs_phy phy[MVS_MAX_PHYS]; 393 struct mvs_phy phy[MVS_MAX_PHYS];
326 struct mvs_port port[MVS_MAX_PHYS]; 394 struct mvs_port port[MVS_MAX_PHYS];
327 u32 irq;
328 u32 exp_req;
329 u32 id; 395 u32 id;
330 u64 sata_reg_set; 396 u64 sata_reg_set;
331 struct list_head *hba_list; 397 struct list_head *hba_list;
@@ -337,12 +403,13 @@ struct mvs_info {
337 u32 flashsectSize; 403 u32 flashsectSize;
338 404
339 void *addon; 405 void *addon;
406 struct hba_info_page hba_info_param;
340 struct mvs_device devices[MVS_MAX_DEVICES]; 407 struct mvs_device devices[MVS_MAX_DEVICES];
341#ifndef DISABLE_HOTPLUG_DMA_FIX
342 void *bulk_buffer; 408 void *bulk_buffer;
343 dma_addr_t bulk_buffer_dma; 409 dma_addr_t bulk_buffer_dma;
410 void *bulk_buffer1;
411 dma_addr_t bulk_buffer_dma1;
344#define TRASH_BUCKET_SIZE 0x20000 412#define TRASH_BUCKET_SIZE 0x20000
345#endif
346 void *dma_pool; 413 void *dma_pool;
347 struct mvs_slot_info slot_info[0]; 414 struct mvs_slot_info slot_info[0];
348}; 415};
@@ -350,8 +417,10 @@ struct mvs_info {
350struct mvs_prv_info{ 417struct mvs_prv_info{
351 u8 n_host; 418 u8 n_host;
352 u8 n_phy; 419 u8 n_phy;
353 u16 reserve; 420 u8 scan_finished;
421 u8 reserve;
354 struct mvs_info *mvi[2]; 422 struct mvs_info *mvi[2];
423 struct tasklet_struct mv_tasklet;
355}; 424};
356 425
357struct mvs_wq { 426struct mvs_wq {
@@ -415,6 +484,6 @@ void mvs_do_release_task(struct mvs_info *mvi, int phy_no,
415void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events); 484void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
416void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); 485void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
417int mvs_int_rx(struct mvs_info *mvi, bool self_clear); 486int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
418void mvs_hexdump(u32 size, u8 *data, u32 baseaddr); 487struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set);
419#endif 488#endif
420 489
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index fca6a8953070..d079f9a3c6b3 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3871,6 +3871,9 @@ static long pmcraid_ioctl_passthrough(
3871 pmcraid_err("couldn't build passthrough ioadls\n"); 3871 pmcraid_err("couldn't build passthrough ioadls\n");
3872 goto out_free_buffer; 3872 goto out_free_buffer;
3873 } 3873 }
3874 } else if (request_size < 0) {
3875 rc = -EINVAL;
3876 goto out_free_buffer;
3874 } 3877 }
3875 3878
3876 /* If data is being written into the device, copy the data from user 3879 /* If data is being written into the device, copy the data from user
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 532313e0725e..7836eb01c7fc 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -42,8 +42,8 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
42 int reading; 42 int reading;
43 43
44 if (IS_QLA82XX(ha)) { 44 if (IS_QLA82XX(ha)) {
45 DEBUG2(qla_printk(KERN_INFO, ha, 45 ql_dbg(ql_dbg_user, vha, 0x705b,
46 "Firmware dump not supported for ISP82xx\n")); 46 "Firmware dump not supported for ISP82xx\n");
47 return count; 47 return count;
48 } 48 }
49 49
@@ -56,7 +56,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
56 if (!ha->fw_dump_reading) 56 if (!ha->fw_dump_reading)
57 break; 57 break;
58 58
59 qla_printk(KERN_INFO, ha, 59 ql_log(ql_log_info, vha, 0x705d,
60 "Firmware dump cleared on (%ld).\n", vha->host_no); 60 "Firmware dump cleared on (%ld).\n", vha->host_no);
61 61
62 ha->fw_dump_reading = 0; 62 ha->fw_dump_reading = 0;
@@ -66,7 +66,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
66 if (ha->fw_dumped && !ha->fw_dump_reading) { 66 if (ha->fw_dumped && !ha->fw_dump_reading) {
67 ha->fw_dump_reading = 1; 67 ha->fw_dump_reading = 1;
68 68
69 qla_printk(KERN_INFO, ha, 69 ql_log(ql_log_info, vha, 0x705e,
70 "Raw firmware dump ready for read on (%ld).\n", 70 "Raw firmware dump ready for read on (%ld).\n",
71 vha->host_no); 71 vha->host_no);
72 } 72 }
@@ -148,7 +148,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
148 } 148 }
149 149
150 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 150 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
151 qla_printk(KERN_WARNING, ha, 151 ql_log(ql_log_warn, vha, 0x705f,
152 "HBA not online, failing NVRAM update.\n"); 152 "HBA not online, failing NVRAM update.\n");
153 return -EAGAIN; 153 return -EAGAIN;
154 } 154 }
@@ -158,6 +158,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
158 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, 158 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
159 count); 159 count);
160 160
161 ql_dbg(ql_dbg_user, vha, 0x7060,
162 "Setting ISP_ABORT_NEEDED\n");
161 /* NVRAM settings take effect immediately. */ 163 /* NVRAM settings take effect immediately. */
162 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 164 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
163 qla2xxx_wake_dpc(vha); 165 qla2xxx_wake_dpc(vha);
@@ -255,9 +257,9 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
255 257
256 ha->optrom_state = QLA_SWAITING; 258 ha->optrom_state = QLA_SWAITING;
257 259
258 DEBUG2(qla_printk(KERN_INFO, ha, 260 ql_dbg(ql_dbg_user, vha, 0x7061,
259 "Freeing flash region allocation -- 0x%x bytes.\n", 261 "Freeing flash region allocation -- 0x%x bytes.\n",
260 ha->optrom_region_size)); 262 ha->optrom_region_size);
261 263
262 vfree(ha->optrom_buffer); 264 vfree(ha->optrom_buffer);
263 ha->optrom_buffer = NULL; 265 ha->optrom_buffer = NULL;
@@ -273,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
273 ha->optrom_state = QLA_SREADING; 275 ha->optrom_state = QLA_SREADING;
274 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 276 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
275 if (ha->optrom_buffer == NULL) { 277 if (ha->optrom_buffer == NULL) {
276 qla_printk(KERN_WARNING, ha, 278 ql_log(ql_log_warn, vha, 0x7062,
277 "Unable to allocate memory for optrom retrieval " 279 "Unable to allocate memory for optrom retrieval "
278 "(%x).\n", ha->optrom_region_size); 280 "(%x).\n", ha->optrom_region_size);
279 281
@@ -282,14 +284,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
282 } 284 }
283 285
284 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 286 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
285 qla_printk(KERN_WARNING, ha, 287 ql_log(ql_log_warn, vha, 0x7063,
286 "HBA not online, failing NVRAM update.\n"); 288 "HBA not online, failing NVRAM update.\n");
287 return -EAGAIN; 289 return -EAGAIN;
288 } 290 }
289 291
290 DEBUG2(qla_printk(KERN_INFO, ha, 292 ql_dbg(ql_dbg_user, vha, 0x7064,
291 "Reading flash region -- 0x%x/0x%x.\n", 293 "Reading flash region -- 0x%x/0x%x.\n",
292 ha->optrom_region_start, ha->optrom_region_size)); 294 ha->optrom_region_start, ha->optrom_region_size);
293 295
294 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 296 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
295 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 297 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
@@ -328,7 +330,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
328 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha)) 330 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
329 valid = 1; 331 valid = 1;
330 if (!valid) { 332 if (!valid) {
331 qla_printk(KERN_WARNING, ha, 333 ql_log(ql_log_warn, vha, 0x7065,
332 "Invalid start region 0x%x/0x%x.\n", start, size); 334 "Invalid start region 0x%x/0x%x.\n", start, size);
333 return -EINVAL; 335 return -EINVAL;
334 } 336 }
@@ -340,17 +342,17 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
340 ha->optrom_state = QLA_SWRITING; 342 ha->optrom_state = QLA_SWRITING;
341 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 343 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
342 if (ha->optrom_buffer == NULL) { 344 if (ha->optrom_buffer == NULL) {
343 qla_printk(KERN_WARNING, ha, 345 ql_log(ql_log_warn, vha, 0x7066,
344 "Unable to allocate memory for optrom update " 346 "Unable to allocate memory for optrom update "
345 "(%x).\n", ha->optrom_region_size); 347 "(%x)\n", ha->optrom_region_size);
346 348
347 ha->optrom_state = QLA_SWAITING; 349 ha->optrom_state = QLA_SWAITING;
348 return count; 350 return count;
349 } 351 }
350 352
351 DEBUG2(qla_printk(KERN_INFO, ha, 353 ql_dbg(ql_dbg_user, vha, 0x7067,
352 "Staging flash region write -- 0x%x/0x%x.\n", 354 "Staging flash region write -- 0x%x/0x%x.\n",
353 ha->optrom_region_start, ha->optrom_region_size)); 355 ha->optrom_region_start, ha->optrom_region_size);
354 356
355 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 357 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
356 break; 358 break;
@@ -359,14 +361,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
359 break; 361 break;
360 362
361 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 363 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
362 qla_printk(KERN_WARNING, ha, 364 ql_log(ql_log_warn, vha, 0x7068,
363 "HBA not online, failing flash update.\n"); 365 "HBA not online, failing flash update.\n");
364 return -EAGAIN; 366 return -EAGAIN;
365 } 367 }
366 368
367 DEBUG2(qla_printk(KERN_INFO, ha, 369 ql_dbg(ql_dbg_user, vha, 0x7069,
368 "Writing flash region -- 0x%x/0x%x.\n", 370 "Writing flash region -- 0x%x/0x%x.\n",
369 ha->optrom_region_start, ha->optrom_region_size)); 371 ha->optrom_region_start, ha->optrom_region_size);
370 372
371 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 373 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
372 ha->optrom_region_start, ha->optrom_region_size); 374 ha->optrom_region_start, ha->optrom_region_size);
@@ -425,7 +427,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
425 return 0; 427 return 0;
426 428
427 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 429 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
428 qla_printk(KERN_WARNING, ha, 430 ql_log(ql_log_warn, vha, 0x706a,
429 "HBA not online, failing VPD update.\n"); 431 "HBA not online, failing VPD update.\n");
430 return -EAGAIN; 432 return -EAGAIN;
431 } 433 }
@@ -440,7 +442,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
440 442
441 tmp_data = vmalloc(256); 443 tmp_data = vmalloc(256);
442 if (!tmp_data) { 444 if (!tmp_data) {
443 qla_printk(KERN_WARNING, ha, 445 ql_log(ql_log_warn, vha, 0x706b,
444 "Unable to allocate memory for VPD information update.\n"); 446 "Unable to allocate memory for VPD information update.\n");
445 goto done; 447 goto done;
446 } 448 }
@@ -480,7 +482,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
480 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 482 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
481 &ha->sfp_data_dma); 483 &ha->sfp_data_dma);
482 if (!ha->sfp_data) { 484 if (!ha->sfp_data) {
483 qla_printk(KERN_WARNING, ha, 485 ql_log(ql_log_warn, vha, 0x706c,
484 "Unable to allocate memory for SFP read-data.\n"); 486 "Unable to allocate memory for SFP read-data.\n");
485 return 0; 487 return 0;
486 } 488 }
@@ -499,9 +501,10 @@ do_read:
499 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, 501 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
500 addr, offset, SFP_BLOCK_SIZE, 0); 502 addr, offset, SFP_BLOCK_SIZE, 0);
501 if (rval != QLA_SUCCESS) { 503 if (rval != QLA_SUCCESS) {
502 qla_printk(KERN_WARNING, ha, 504 ql_log(ql_log_warn, vha, 0x706d,
503 "Unable to read SFP data (%x/%x/%x).\n", rval, 505 "Unable to read SFP data (%x/%x/%x).\n", rval,
504 addr, offset); 506 addr, offset);
507
505 count = 0; 508 count = 0;
506 break; 509 break;
507 } 510 }
@@ -538,8 +541,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
538 type = simple_strtol(buf, NULL, 10); 541 type = simple_strtol(buf, NULL, 10);
539 switch (type) { 542 switch (type) {
540 case 0x2025c: 543 case 0x2025c:
541 qla_printk(KERN_INFO, ha, 544 ql_log(ql_log_info, vha, 0x706e,
542 "Issuing ISP reset on (%ld).\n", vha->host_no); 545 "Issuing ISP reset.\n");
543 546
544 scsi_block_requests(vha->host); 547 scsi_block_requests(vha->host);
545 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 548 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -551,8 +554,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
551 if (!IS_QLA81XX(ha)) 554 if (!IS_QLA81XX(ha))
552 break; 555 break;
553 556
554 qla_printk(KERN_INFO, ha, 557 ql_log(ql_log_info, vha, 0x706f,
555 "Issuing MPI reset on (%ld).\n", vha->host_no); 558 "Issuing MPI reset.\n");
556 559
557 /* Make sure FC side is not in reset */ 560 /* Make sure FC side is not in reset */
558 qla2x00_wait_for_hba_online(vha); 561 qla2x00_wait_for_hba_online(vha);
@@ -560,20 +563,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
560 /* Issue MPI reset */ 563 /* Issue MPI reset */
561 scsi_block_requests(vha->host); 564 scsi_block_requests(vha->host);
562 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 565 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
563 qla_printk(KERN_WARNING, ha, 566 ql_log(ql_log_warn, vha, 0x7070,
564 "MPI reset failed on (%ld).\n", vha->host_no); 567 "MPI reset failed.\n");
565 scsi_unblock_requests(vha->host); 568 scsi_unblock_requests(vha->host);
566 break; 569 break;
567 case 0x2025e: 570 case 0x2025e:
568 if (!IS_QLA82XX(ha) || vha != base_vha) { 571 if (!IS_QLA82XX(ha) || vha != base_vha) {
569 qla_printk(KERN_INFO, ha, 572 ql_log(ql_log_info, vha, 0x7071,
570 "FCoE ctx reset not supported for host%ld.\n", 573 "FCoE ctx reset no supported.\n");
571 vha->host_no);
572 return count; 574 return count;
573 } 575 }
574 576
575 qla_printk(KERN_INFO, ha, 577 ql_log(ql_log_info, vha, 0x7072,
576 "Issuing FCoE CTX reset on host%ld.\n", vha->host_no); 578 "Issuing FCoE ctx reset.\n");
577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 579 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
578 qla2xxx_wake_dpc(vha); 580 qla2xxx_wake_dpc(vha);
579 qla2x00_wait_for_fcoe_ctx_reset(vha); 581 qla2x00_wait_for_fcoe_ctx_reset(vha);
@@ -611,8 +613,8 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
611 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 613 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
612 &ha->edc_data_dma); 614 &ha->edc_data_dma);
613 if (!ha->edc_data) { 615 if (!ha->edc_data) {
614 DEBUG2(qla_printk(KERN_INFO, ha, 616 ql_log(ql_log_warn, vha, 0x7073,
615 "Unable to allocate memory for EDC write.\n")); 617 "Unable to allocate memory for EDC write.\n");
616 return 0; 618 return 0;
617 } 619 }
618 } 620 }
@@ -631,9 +633,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
631 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data, 633 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
632 dev, adr, len, opt); 634 dev, adr, len, opt);
633 if (rval != QLA_SUCCESS) { 635 if (rval != QLA_SUCCESS) {
634 DEBUG2(qla_printk(KERN_INFO, ha, 636 ql_log(ql_log_warn, vha, 0x7074,
635 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n", 637 "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
636 rval, dev, adr, opt, len, buf[8])); 638 rval, dev, adr, opt, len, buf[8]);
637 return 0; 639 return 0;
638 } 640 }
639 641
@@ -669,8 +671,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
669 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 671 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
670 &ha->edc_data_dma); 672 &ha->edc_data_dma);
671 if (!ha->edc_data) { 673 if (!ha->edc_data) {
672 DEBUG2(qla_printk(KERN_INFO, ha, 674 ql_log(ql_log_warn, vha, 0x708c,
673 "Unable to allocate memory for EDC status.\n")); 675 "Unable to allocate memory for EDC status.\n");
674 return 0; 676 return 0;
675 } 677 }
676 } 678 }
@@ -688,9 +690,9 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
688 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data, 690 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
689 dev, adr, len, opt); 691 dev, adr, len, opt);
690 if (rval != QLA_SUCCESS) { 692 if (rval != QLA_SUCCESS) {
691 DEBUG2(qla_printk(KERN_INFO, ha, 693 ql_log(ql_log_info, vha, 0x7075,
692 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n", 694 "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
693 rval, dev, adr, opt, len)); 695 rval, dev, adr, opt, len);
694 return 0; 696 return 0;
695 } 697 }
696 698
@@ -749,7 +751,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
749 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 751 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
750 &ha->xgmac_data_dma, GFP_KERNEL); 752 &ha->xgmac_data_dma, GFP_KERNEL);
751 if (!ha->xgmac_data) { 753 if (!ha->xgmac_data) {
752 qla_printk(KERN_WARNING, ha, 754 ql_log(ql_log_warn, vha, 0x7076,
753 "Unable to allocate memory for XGMAC read-data.\n"); 755 "Unable to allocate memory for XGMAC read-data.\n");
754 return 0; 756 return 0;
755 } 757 }
@@ -761,7 +763,7 @@ do_read:
761 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 763 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
762 XGMAC_DATA_SIZE, &actual_size); 764 XGMAC_DATA_SIZE, &actual_size);
763 if (rval != QLA_SUCCESS) { 765 if (rval != QLA_SUCCESS) {
764 qla_printk(KERN_WARNING, ha, 766 ql_log(ql_log_warn, vha, 0x7077,
765 "Unable to read XGMAC data (%x).\n", rval); 767 "Unable to read XGMAC data (%x).\n", rval);
766 count = 0; 768 count = 0;
767 } 769 }
@@ -801,7 +803,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
801 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 803 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
802 &ha->dcbx_tlv_dma, GFP_KERNEL); 804 &ha->dcbx_tlv_dma, GFP_KERNEL);
803 if (!ha->dcbx_tlv) { 805 if (!ha->dcbx_tlv) {
804 qla_printk(KERN_WARNING, ha, 806 ql_log(ql_log_warn, vha, 0x7078,
805 "Unable to allocate memory for DCBX TLV read-data.\n"); 807 "Unable to allocate memory for DCBX TLV read-data.\n");
806 return 0; 808 return 0;
807 } 809 }
@@ -813,8 +815,8 @@ do_read:
813 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 815 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
814 DCBX_TLV_DATA_SIZE); 816 DCBX_TLV_DATA_SIZE);
815 if (rval != QLA_SUCCESS) { 817 if (rval != QLA_SUCCESS) {
816 qla_printk(KERN_WARNING, ha, 818 ql_log(ql_log_warn, vha, 0x7079,
817 "Unable to read DCBX TLV data (%x).\n", rval); 819 "Unable to read DCBX TLV (%x).\n", rval);
818 count = 0; 820 count = 0;
819 } 821 }
820 822
@@ -869,9 +871,13 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
869 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 871 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
870 iter->attr); 872 iter->attr);
871 if (ret) 873 if (ret)
872 qla_printk(KERN_INFO, vha->hw, 874 ql_log(ql_log_warn, vha, 0x00f3,
873 "Unable to create sysfs %s binary attribute " 875 "Unable to create sysfs %s binary attribute (%d).\n",
874 "(%d).\n", iter->name, ret); 876 iter->name, ret);
877 else
878 ql_dbg(ql_dbg_init, vha, 0x00f4,
879 "Successfully created sysfs %s binary attribure.\n",
880 iter->name);
875 } 881 }
876} 882}
877 883
@@ -1126,7 +1132,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1126 return -EPERM; 1132 return -EPERM;
1127 1133
1128 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 1134 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1129 qla_printk(KERN_WARNING, ha, 1135 ql_log(ql_log_warn, vha, 0x707a,
1130 "Abort ISP active -- ignoring beacon request.\n"); 1136 "Abort ISP active -- ignoring beacon request.\n");
1131 return -EBUSY; 1137 return -EBUSY;
1132 } 1138 }
@@ -1322,9 +1328,8 @@ qla2x00_thermal_temp_show(struct device *dev,
1322 temp = frac = 0; 1328 temp = frac = 0;
1323 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1329 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1324 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 1330 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1325 DEBUG2_3_11(printk(KERN_WARNING 1331 ql_log(ql_log_warn, vha, 0x707b,
1326 "%s(%ld): isp reset in progress.\n", 1332 "ISP reset active.\n");
1327 __func__, vha->host_no));
1328 else if (!vha->hw->flags.eeh_busy) 1333 else if (!vha->hw->flags.eeh_busy)
1329 rval = qla2x00_get_thermal_temp(vha, &temp, &frac); 1334 rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1330 if (rval != QLA_SUCCESS) 1335 if (rval != QLA_SUCCESS)
@@ -1343,8 +1348,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1343 1348
1344 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1349 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1345 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 1350 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1346 DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n", 1351 ql_log(ql_log_warn, vha, 0x707c,
1347 __func__, vha->host_no)); 1352 "ISP reset active.\n");
1348 else if (!vha->hw->flags.eeh_busy) 1353 else if (!vha->hw->flags.eeh_busy)
1349 rval = qla2x00_get_firmware_state(vha, state); 1354 rval = qla2x00_get_firmware_state(vha, state);
1350 if (rval != QLA_SUCCESS) 1355 if (rval != QLA_SUCCESS)
@@ -1645,8 +1650,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1645 1650
1646 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1651 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1647 if (stats == NULL) { 1652 if (stats == NULL) {
1648 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1653 ql_log(ql_log_warn, vha, 0x707d,
1649 __func__, base_vha->host_no)); 1654 "Failed to allocate memory for stats.\n");
1650 goto done; 1655 goto done;
1651 } 1656 }
1652 memset(stats, 0, DMA_POOL_SIZE); 1657 memset(stats, 0, DMA_POOL_SIZE);
@@ -1746,15 +1751,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1746 1751
1747 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1752 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1748 if (ret) { 1753 if (ret) {
1749 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, " 1754 ql_log(ql_log_warn, vha, 0x707e,
1750 "status %x\n", ret)); 1755 "Vport sanity check failed, status %x\n", ret);
1751 return (ret); 1756 return (ret);
1752 } 1757 }
1753 1758
1754 vha = qla24xx_create_vhost(fc_vport); 1759 vha = qla24xx_create_vhost(fc_vport);
1755 if (vha == NULL) { 1760 if (vha == NULL) {
1756 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n", 1761 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
1757 vha));
1758 return FC_VPORT_FAILED; 1762 return FC_VPORT_FAILED;
1759 } 1763 }
1760 if (disable) { 1764 if (disable) {
@@ -1764,8 +1768,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1764 atomic_set(&vha->vp_state, VP_FAILED); 1768 atomic_set(&vha->vp_state, VP_FAILED);
1765 1769
1766 /* ready to create vport */ 1770 /* ready to create vport */
1767 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n", 1771 ql_log(ql_log_info, vha, 0x7080,
1768 vha->vp_idx); 1772 "VP entry id %d assigned.\n", vha->vp_idx);
1769 1773
1770 /* initialized vport states */ 1774 /* initialized vport states */
1771 atomic_set(&vha->loop_state, LOOP_DOWN); 1775 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1775,8 +1779,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1775 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 1779 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1776 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 1780 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1777 /* Don't retry or attempt login of this virtual port */ 1781 /* Don't retry or attempt login of this virtual port */
1778 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n", 1782 ql_dbg(ql_dbg_user, vha, 0x7081,
1779 base_vha->host_no)); 1783 "Vport loop state is not UP.\n");
1780 atomic_set(&vha->loop_state, LOOP_DEAD); 1784 atomic_set(&vha->loop_state, LOOP_DEAD);
1781 if (!disable) 1785 if (!disable)
1782 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1786 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@@ -1785,9 +1789,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1785 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 1789 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
1786 if (ha->fw_attributes & BIT_4) { 1790 if (ha->fw_attributes & BIT_4) {
1787 vha->flags.difdix_supported = 1; 1791 vha->flags.difdix_supported = 1;
1788 DEBUG18(qla_printk(KERN_INFO, ha, 1792 ql_dbg(ql_dbg_user, vha, 0x7082,
1789 "Registering for DIF/DIX type 1 and 3" 1793 "Registered for DIF/DIX type 1 and 3 protection.\n");
1790 " protection.\n"));
1791 scsi_host_set_prot(vha->host, 1794 scsi_host_set_prot(vha->host,
1792 SHOST_DIF_TYPE1_PROTECTION 1795 SHOST_DIF_TYPE1_PROTECTION
1793 | SHOST_DIF_TYPE2_PROTECTION 1796 | SHOST_DIF_TYPE2_PROTECTION
@@ -1802,8 +1805,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1802 1805
1803 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 1806 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1804 &ha->pdev->dev)) { 1807 &ha->pdev->dev)) {
1805 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", 1808 ql_dbg(ql_dbg_user, vha, 0x7083,
1806 vha->host_no, vha->vp_idx)); 1809 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
1807 goto vport_create_failed_2; 1810 goto vport_create_failed_2;
1808 } 1811 }
1809 1812
@@ -1820,6 +1823,10 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1820 1823
1821 if (ha->flags.cpu_affinity_enabled) { 1824 if (ha->flags.cpu_affinity_enabled) {
1822 req = ha->req_q_map[1]; 1825 req = ha->req_q_map[1];
1826 ql_dbg(ql_dbg_multiq, vha, 0xc000,
1827 "Request queue %p attached with "
1828 "VP[%d], cpu affinity =%d\n",
1829 req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
1823 goto vport_queue; 1830 goto vport_queue;
1824 } else if (ql2xmaxqueues == 1 || !ha->npiv_info) 1831 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1825 goto vport_queue; 1832 goto vport_queue;
@@ -1836,13 +1843,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1836 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, 1843 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1837 qos); 1844 qos);
1838 if (!ret) 1845 if (!ret)
1839 qla_printk(KERN_WARNING, ha, 1846 ql_log(ql_log_warn, vha, 0x7084,
1840 "Can't create request queue for vp_idx:%d\n", 1847 "Can't create request queue for VP[%d]\n",
1841 vha->vp_idx); 1848 vha->vp_idx);
1842 else { 1849 else {
1843 DEBUG2(qla_printk(KERN_INFO, ha, 1850 ql_dbg(ql_dbg_multiq, vha, 0xc001,
1844 "Request Que:%d (QoS: %d) created for vp_idx:%d\n", 1851 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1845 ret, qos, vha->vp_idx)); 1852 ret, qos, vha->vp_idx);
1853 ql_dbg(ql_dbg_user, vha, 0x7085,
1854 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1855 ret, qos, vha->vp_idx);
1846 req = ha->req_q_map[ret]; 1856 req = ha->req_q_map[ret];
1847 } 1857 }
1848 } 1858 }
@@ -1882,12 +1892,13 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1882 1892
1883 if (vha->timer_active) { 1893 if (vha->timer_active) {
1884 qla2x00_vp_stop_timer(vha); 1894 qla2x00_vp_stop_timer(vha);
1885 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" 1895 ql_dbg(ql_dbg_user, vha, 0x7086,
1886 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); 1896 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1887 } 1897 }
1888 1898
1889 /* No pending activities shall be there on the vha now */ 1899 /* No pending activities shall be there on the vha now */
1890 DEBUG(msleep(random32()%10)); /* Just to see if something falls on 1900 if (ql2xextended_error_logging & ql_dbg_user)
1901 msleep(random32()%10); /* Just to see if something falls on
1891 * the net we have placed below */ 1902 * the net we have placed below */
1892 1903
1893 BUG_ON(atomic_read(&vha->vref_count)); 1904 BUG_ON(atomic_read(&vha->vref_count));
@@ -1901,12 +1912,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1901 1912
1902 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 1913 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1903 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1914 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1904 qla_printk(KERN_WARNING, ha, 1915 ql_log(ql_log_warn, vha, 0x7087,
1905 "Queue delete failed.\n"); 1916 "Queue delete failed.\n");
1906 } 1917 }
1907 1918
1908 scsi_host_put(vha->host); 1919 scsi_host_put(vha->host);
1909 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); 1920 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
1910 return 0; 1921 return 0;
1911} 1922}
1912 1923
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 8c10e2c4928e..07d1767cd26b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -36,7 +36,8 @@ done:
36} 36}
37 37
38int 38int
39qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 39qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
40 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
40{ 41{
41 int i, ret, num_valid; 42 int i, ret, num_valid;
42 uint8_t *bcode; 43 uint8_t *bcode;
@@ -51,18 +52,17 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
51 52
52 if (bcode_val == 0xFFFFFFFF) { 53 if (bcode_val == 0xFFFFFFFF) {
53 /* No FCP Priority config data in flash */ 54 /* No FCP Priority config data in flash */
54 DEBUG2(printk(KERN_INFO 55 ql_dbg(ql_dbg_user, vha, 0x7051,
55 "%s: No FCP priority config data.\n", 56 "No FCP Priority config data.\n");
56 __func__));
57 return 0; 57 return 0;
58 } 58 }
59 59
60 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' || 60 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
61 bcode[3] != 'S') { 61 bcode[3] != 'S') {
62 /* Invalid FCP priority data header*/ 62 /* Invalid FCP priority data header*/
63 DEBUG2(printk(KERN_ERR 63 ql_dbg(ql_dbg_user, vha, 0x7052,
64 "%s: Invalid FCP Priority data header. bcode=0x%x\n", 64 "Invalid FCP Priority data header. bcode=0x%x.\n",
65 __func__, bcode_val)); 65 bcode_val);
66 return 0; 66 return 0;
67 } 67 }
68 if (flag != 1) 68 if (flag != 1)
@@ -77,15 +77,14 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
77 77
78 if (num_valid == 0) { 78 if (num_valid == 0) {
79 /* No valid FCP priority data entries */ 79 /* No valid FCP priority data entries */
80 DEBUG2(printk(KERN_ERR 80 ql_dbg(ql_dbg_user, vha, 0x7053,
81 "%s: No valid FCP Priority data entries.\n", 81 "No valid FCP Priority data entries.\n");
82 __func__));
83 ret = 0; 82 ret = 0;
84 } else { 83 } else {
85 /* FCP priority data is valid */ 84 /* FCP priority data is valid */
86 DEBUG2(printk(KERN_INFO 85 ql_dbg(ql_dbg_user, vha, 0x7054,
87 "%s: Valid FCP priority data. num entries = %d\n", 86 "Valid FCP priority data. num entries = %d.\n",
88 __func__, num_valid)); 87 num_valid);
89 } 88 }
90 89
91 return ret; 90 return ret;
@@ -182,10 +181,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
182 if (!ha->fcp_prio_cfg) { 181 if (!ha->fcp_prio_cfg) {
183 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 182 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
184 if (!ha->fcp_prio_cfg) { 183 if (!ha->fcp_prio_cfg) {
185 qla_printk(KERN_WARNING, ha, 184 ql_log(ql_log_warn, vha, 0x7050,
186 "Unable to allocate memory " 185 "Unable to allocate memory for fcp prio "
187 "for fcp prio config data (%x).\n", 186 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
188 FCP_PRIO_CFG_SIZE);
189 bsg_job->reply->result = (DID_ERROR << 16); 187 bsg_job->reply->result = (DID_ERROR << 16);
190 ret = -ENOMEM; 188 ret = -ENOMEM;
191 goto exit_fcp_prio_cfg; 189 goto exit_fcp_prio_cfg;
@@ -198,9 +196,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
198 FCP_PRIO_CFG_SIZE); 196 FCP_PRIO_CFG_SIZE);
199 197
200 /* validate fcp priority data */ 198 /* validate fcp priority data */
201 if (!qla24xx_fcp_prio_cfg_valid( 199
202 (struct qla_fcp_prio_cfg *) 200 if (!qla24xx_fcp_prio_cfg_valid(vha,
203 ha->fcp_prio_cfg, 1)) { 201 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
204 bsg_job->reply->result = (DID_ERROR << 16); 202 bsg_job->reply->result = (DID_ERROR << 16);
205 ret = -EINVAL; 203 ret = -EINVAL;
206 /* If buffer was invalidatic int 204 /* If buffer was invalidatic int
@@ -256,9 +254,8 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
256 254
257 /* pass through is supported only for ISP 4Gb or higher */ 255 /* pass through is supported only for ISP 4Gb or higher */
258 if (!IS_FWI2_CAPABLE(ha)) { 256 if (!IS_FWI2_CAPABLE(ha)) {
259 DEBUG2(qla_printk(KERN_INFO, ha, 257 ql_dbg(ql_dbg_user, vha, 0x7001,
260 "scsi(%ld):ELS passthru not supported for ISP23xx based " 258 "ELS passthru not supported for ISP23xx based adapters.\n");
261 "adapters\n", vha->host_no));
262 rval = -EPERM; 259 rval = -EPERM;
263 goto done; 260 goto done;
264 } 261 }
@@ -266,11 +263,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
266 /* Multiple SG's are not supported for ELS requests */ 263 /* Multiple SG's are not supported for ELS requests */
267 if (bsg_job->request_payload.sg_cnt > 1 || 264 if (bsg_job->request_payload.sg_cnt > 1 ||
268 bsg_job->reply_payload.sg_cnt > 1) { 265 bsg_job->reply_payload.sg_cnt > 1) {
269 DEBUG2(printk(KERN_INFO 266 ql_dbg(ql_dbg_user, vha, 0x7002,
270 "multiple SG's are not supported for ELS requests" 267 "Multiple SG's are not suppored for ELS requests, "
271 " [request_sg_cnt: %x reply_sg_cnt: %x]\n", 268 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
272 bsg_job->request_payload.sg_cnt, 269 bsg_job->request_payload.sg_cnt,
273 bsg_job->reply_payload.sg_cnt)); 270 bsg_job->reply_payload.sg_cnt);
274 rval = -EPERM; 271 rval = -EPERM;
275 goto done; 272 goto done;
276 } 273 }
@@ -281,9 +278,9 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
281 * if not perform fabric login 278 * if not perform fabric login
282 */ 279 */
283 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 280 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
284 DEBUG2(qla_printk(KERN_WARNING, ha, 281 ql_dbg(ql_dbg_user, vha, 0x7003,
285 "failed to login port %06X for ELS passthru\n", 282 "Failed to login port %06X for ELS passthru.\n",
286 fcport->d_id.b24)); 283 fcport->d_id.b24);
287 rval = -EIO; 284 rval = -EIO;
288 goto done; 285 goto done;
289 } 286 }
@@ -314,8 +311,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
314 } 311 }
315 312
316 if (!vha->flags.online) { 313 if (!vha->flags.online) {
317 DEBUG2(qla_printk(KERN_WARNING, ha, 314 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
318 "host not online\n"));
319 rval = -EIO; 315 rval = -EIO;
320 goto done; 316 goto done;
321 } 317 }
@@ -337,12 +333,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
337 333
338 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 334 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
339 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 335 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
340 DEBUG2(printk(KERN_INFO 336 ql_log(ql_log_warn, vha, 0x7008,
341 "dma mapping resulted in different sg counts \ 337 "dma mapping resulted in different sg counts, "
342 [request_sg_cnt: %x dma_request_sg_cnt: %x\ 338 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
343 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 339 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
344 bsg_job->request_payload.sg_cnt, req_sg_cnt, 340 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
345 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
346 rval = -EAGAIN; 341 rval = -EAGAIN;
347 goto done_unmap_sg; 342 goto done_unmap_sg;
348 } 343 }
@@ -363,15 +358,16 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
363 "bsg_els_rpt" : "bsg_els_hst"); 358 "bsg_els_rpt" : "bsg_els_hst");
364 els->u.bsg_job = bsg_job; 359 els->u.bsg_job = bsg_job;
365 360
366 DEBUG2(qla_printk(KERN_INFO, ha, 361 ql_dbg(ql_dbg_user, vha, 0x700a,
367 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " 362 "bsg rqst type: %s els type: %x - loop-id=%x "
368 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, 363 "portid=%-2x%02x%02x.\n", type,
369 bsg_job->request->rqst_data.h_els.command_code, 364 bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
370 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 365 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
371 fcport->d_id.b.al_pa));
372 366
373 rval = qla2x00_start_sp(sp); 367 rval = qla2x00_start_sp(sp);
374 if (rval != QLA_SUCCESS) { 368 if (rval != QLA_SUCCESS) {
369 ql_log(ql_log_warn, vha, 0x700e,
370 "qla2x00_start_sp failed = %d\n", rval);
375 kfree(sp->ctx); 371 kfree(sp->ctx);
376 mempool_free(sp, ha->srb_mempool); 372 mempool_free(sp, ha->srb_mempool);
377 rval = -EIO; 373 rval = -EIO;
@@ -411,6 +407,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
411 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 407 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
412 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 408 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
413 if (!req_sg_cnt) { 409 if (!req_sg_cnt) {
410 ql_log(ql_log_warn, vha, 0x700f,
411 "dma_map_sg return %d for request\n", req_sg_cnt);
414 rval = -ENOMEM; 412 rval = -ENOMEM;
415 goto done; 413 goto done;
416 } 414 }
@@ -418,24 +416,25 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
418 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 416 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
419 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 417 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
420 if (!rsp_sg_cnt) { 418 if (!rsp_sg_cnt) {
419 ql_log(ql_log_warn, vha, 0x7010,
420 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
421 rval = -ENOMEM; 421 rval = -ENOMEM;
422 goto done; 422 goto done;
423 } 423 }
424 424
425 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 425 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
426 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 426 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
427 DEBUG2(qla_printk(KERN_WARNING, ha, 427 ql_log(ql_log_warn, vha, 0x7011,
428 "[request_sg_cnt: %x dma_request_sg_cnt: %x\ 428 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
429 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 429 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
430 bsg_job->request_payload.sg_cnt, req_sg_cnt, 430 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
431 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
432 rval = -EAGAIN; 431 rval = -EAGAIN;
433 goto done_unmap_sg; 432 goto done_unmap_sg;
434 } 433 }
435 434
436 if (!vha->flags.online) { 435 if (!vha->flags.online) {
437 DEBUG2(qla_printk(KERN_WARNING, ha, 436 ql_log(ql_log_warn, vha, 0x7012,
438 "host not online\n")); 437 "Host is not online.\n");
439 rval = -EIO; 438 rval = -EIO;
440 goto done_unmap_sg; 439 goto done_unmap_sg;
441 } 440 }
@@ -451,8 +450,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
451 loop_id = vha->mgmt_svr_loop_id; 450 loop_id = vha->mgmt_svr_loop_id;
452 break; 451 break;
453 default: 452 default:
454 DEBUG2(qla_printk(KERN_INFO, ha, 453 ql_dbg(ql_dbg_user, vha, 0x7013,
455 "Unknown loop id: %x\n", loop_id)); 454 "Unknown loop id: %x.\n", loop_id);
456 rval = -EINVAL; 455 rval = -EINVAL;
457 goto done_unmap_sg; 456 goto done_unmap_sg;
458 } 457 }
@@ -464,6 +463,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
464 */ 463 */
465 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 464 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
466 if (!fcport) { 465 if (!fcport) {
466 ql_log(ql_log_warn, vha, 0x7014,
467 "Failed to allocate fcport.\n");
467 rval = -ENOMEM; 468 rval = -ENOMEM;
468 goto done_unmap_sg; 469 goto done_unmap_sg;
469 } 470 }
@@ -479,6 +480,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
479 /* Alloc SRB structure */ 480 /* Alloc SRB structure */
480 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx)); 481 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
481 if (!sp) { 482 if (!sp) {
483 ql_log(ql_log_warn, vha, 0x7015,
484 "qla2x00_get_ctx_bsg_sp failed.\n");
482 rval = -ENOMEM; 485 rval = -ENOMEM;
483 goto done_free_fcport; 486 goto done_free_fcport;
484 } 487 }
@@ -488,15 +491,17 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
488 ct->name = "bsg_ct"; 491 ct->name = "bsg_ct";
489 ct->u.bsg_job = bsg_job; 492 ct->u.bsg_job = bsg_job;
490 493
491 DEBUG2(qla_printk(KERN_INFO, ha, 494 ql_dbg(ql_dbg_user, vha, 0x7016,
492 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x " 495 "bsg rqst type: %s else type: %x - "
493 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type, 496 "loop-id=%x portid=%02x%02x%02x.\n", type,
494 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), 497 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
495 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 498 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
496 fcport->d_id.b.al_pa)); 499 fcport->d_id.b.al_pa);
497 500
498 rval = qla2x00_start_sp(sp); 501 rval = qla2x00_start_sp(sp);
499 if (rval != QLA_SUCCESS) { 502 if (rval != QLA_SUCCESS) {
503 ql_log(ql_log_warn, vha, 0x7017,
504 "qla2x00_start_sp failed=%d.\n", rval);
500 kfree(sp->ctx); 505 kfree(sp->ctx);
501 mempool_free(sp, ha->srb_mempool); 506 mempool_free(sp, ha->srb_mempool);
502 rval = -EIO; 507 rval = -EIO;
@@ -535,9 +540,8 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
535 ha->notify_dcbx_comp = 1; 540 ha->notify_dcbx_comp = 1;
536 ret = qla81xx_set_port_config(vha, new_config); 541 ret = qla81xx_set_port_config(vha, new_config);
537 if (ret != QLA_SUCCESS) { 542 if (ret != QLA_SUCCESS) {
538 DEBUG2(printk(KERN_ERR 543 ql_log(ql_log_warn, vha, 0x7021,
539 "%s(%lu): Set port config failed\n", 544 "set port config failed.\n");
540 __func__, vha->host_no));
541 ha->notify_dcbx_comp = 0; 545 ha->notify_dcbx_comp = 0;
542 rval = -EINVAL; 546 rval = -EINVAL;
543 goto done_set_internal; 547 goto done_set_internal;
@@ -545,11 +549,11 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
545 549
546 /* Wait for DCBX complete event */ 550 /* Wait for DCBX complete event */
547 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { 551 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
548 DEBUG2(qla_printk(KERN_WARNING, ha, 552 ql_dbg(ql_dbg_user, vha, 0x7022,
549 "State change notificaition not received.\n")); 553 "State change notification not received.\n");
550 } else 554 } else
551 DEBUG2(qla_printk(KERN_INFO, ha, 555 ql_dbg(ql_dbg_user, vha, 0x7023,
552 "State change RECEIVED\n")); 556 "State change received.\n");
553 557
554 ha->notify_dcbx_comp = 0; 558 ha->notify_dcbx_comp = 0;
555 559
@@ -581,9 +585,8 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
581 ha->notify_dcbx_comp = wait; 585 ha->notify_dcbx_comp = wait;
582 ret = qla81xx_set_port_config(vha, new_config); 586 ret = qla81xx_set_port_config(vha, new_config);
583 if (ret != QLA_SUCCESS) { 587 if (ret != QLA_SUCCESS) {
584 DEBUG2(printk(KERN_ERR 588 ql_log(ql_log_warn, vha, 0x7025,
585 "%s(%lu): Set port config failed\n", 589 "Set port config failed.\n");
586 __func__, vha->host_no));
587 ha->notify_dcbx_comp = 0; 590 ha->notify_dcbx_comp = 0;
588 rval = -EINVAL; 591 rval = -EINVAL;
589 goto done_reset_internal; 592 goto done_reset_internal;
@@ -592,14 +595,14 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
592 /* Wait for DCBX complete event */ 595 /* Wait for DCBX complete event */
593 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 596 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
594 (20 * HZ))) { 597 (20 * HZ))) {
595 DEBUG2(qla_printk(KERN_WARNING, ha, 598 ql_dbg(ql_dbg_user, vha, 0x7026,
596 "State change notificaition not received.\n")); 599 "State change notification not received.\n");
597 ha->notify_dcbx_comp = 0; 600 ha->notify_dcbx_comp = 0;
598 rval = -EINVAL; 601 rval = -EINVAL;
599 goto done_reset_internal; 602 goto done_reset_internal;
600 } else 603 } else
601 DEBUG2(qla_printk(KERN_INFO, ha, 604 ql_dbg(ql_dbg_user, vha, 0x7027,
602 "State change RECEIVED\n")); 605 "State change received.\n");
603 606
604 ha->notify_dcbx_comp = 0; 607 ha->notify_dcbx_comp = 0;
605 } 608 }
@@ -629,11 +632,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
629 632
630 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 633 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
631 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 634 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
632 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 635 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
636 ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
633 return -EBUSY; 637 return -EBUSY;
638 }
634 639
635 if (!vha->flags.online) { 640 if (!vha->flags.online) {
636 DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n")); 641 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
637 return -EIO; 642 return -EIO;
638 } 643 }
639 644
@@ -641,26 +646,31 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
641 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 646 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
642 DMA_TO_DEVICE); 647 DMA_TO_DEVICE);
643 648
644 if (!elreq.req_sg_cnt) 649 if (!elreq.req_sg_cnt) {
650 ql_log(ql_log_warn, vha, 0x701a,
651 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
645 return -ENOMEM; 652 return -ENOMEM;
653 }
646 654
647 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 655 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
648 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 656 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
649 DMA_FROM_DEVICE); 657 DMA_FROM_DEVICE);
650 658
651 if (!elreq.rsp_sg_cnt) { 659 if (!elreq.rsp_sg_cnt) {
660 ql_log(ql_log_warn, vha, 0x701b,
661 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
652 rval = -ENOMEM; 662 rval = -ENOMEM;
653 goto done_unmap_req_sg; 663 goto done_unmap_req_sg;
654 } 664 }
655 665
656 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 666 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
657 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 667 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
658 DEBUG2(printk(KERN_INFO 668 ql_log(ql_log_warn, vha, 0x701c,
659 "dma mapping resulted in different sg counts " 669 "dma mapping resulted in different sg counts, "
660 "[request_sg_cnt: %x dma_request_sg_cnt: %x " 670 "request_sg_cnt: %x dma_request_sg_cnt: %x "
661 "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n", 671 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
662 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 672 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
663 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt)); 673 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
664 rval = -EAGAIN; 674 rval = -EAGAIN;
665 goto done_unmap_sg; 675 goto done_unmap_sg;
666 } 676 }
@@ -668,8 +678,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
668 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 678 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
669 &req_data_dma, GFP_KERNEL); 679 &req_data_dma, GFP_KERNEL);
670 if (!req_data) { 680 if (!req_data) {
671 DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data " 681 ql_log(ql_log_warn, vha, 0x701d,
672 "failed for host=%lu\n", __func__, vha->host_no)); 682 "dma alloc failed for req_data.\n");
673 rval = -ENOMEM; 683 rval = -ENOMEM;
674 goto done_unmap_sg; 684 goto done_unmap_sg;
675 } 685 }
@@ -677,8 +687,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
677 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 687 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
678 &rsp_data_dma, GFP_KERNEL); 688 &rsp_data_dma, GFP_KERNEL);
679 if (!rsp_data) { 689 if (!rsp_data) {
680 DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data " 690 ql_log(ql_log_warn, vha, 0x7004,
681 "failed for host=%lu\n", __func__, vha->host_no)); 691 "dma alloc failed for rsp_data.\n");
682 rval = -ENOMEM; 692 rval = -ENOMEM;
683 goto done_free_dma_req; 693 goto done_free_dma_req;
684 } 694 }
@@ -699,8 +709,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
699 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 709 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
700 elreq.options == EXTERNAL_LOOPBACK) { 710 elreq.options == EXTERNAL_LOOPBACK) {
701 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 711 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
702 DEBUG2(qla_printk(KERN_INFO, ha, 712 ql_dbg(ql_dbg_user, vha, 0x701e,
703 "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type)); 713 "BSG request type: %s.\n", type);
704 command_sent = INT_DEF_LB_ECHO_CMD; 714 command_sent = INT_DEF_LB_ECHO_CMD;
705 rval = qla2x00_echo_test(vha, &elreq, response); 715 rval = qla2x00_echo_test(vha, &elreq, response);
706 } else { 716 } else {
@@ -708,9 +718,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
708 memset(config, 0, sizeof(config)); 718 memset(config, 0, sizeof(config));
709 memset(new_config, 0, sizeof(new_config)); 719 memset(new_config, 0, sizeof(new_config));
710 if (qla81xx_get_port_config(vha, config)) { 720 if (qla81xx_get_port_config(vha, config)) {
711 DEBUG2(printk(KERN_ERR 721 ql_log(ql_log_warn, vha, 0x701f,
712 "%s(%lu): Get port config failed\n", 722 "Get port config failed.\n");
713 __func__, vha->host_no));
714 bsg_job->reply->reply_payload_rcv_len = 0; 723 bsg_job->reply->reply_payload_rcv_len = 0;
715 bsg_job->reply->result = (DID_ERROR << 16); 724 bsg_job->reply->result = (DID_ERROR << 16);
716 rval = -EPERM; 725 rval = -EPERM;
@@ -718,11 +727,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
718 } 727 }
719 728
720 if (elreq.options != EXTERNAL_LOOPBACK) { 729 if (elreq.options != EXTERNAL_LOOPBACK) {
721 DEBUG2(qla_printk(KERN_INFO, ha, 730 ql_dbg(ql_dbg_user, vha, 0x7020,
722 "Internal: current port config = %x\n", 731 "Internal: curent port config = %x\n",
723 config[0])); 732 config[0]);
724 if (qla81xx_set_internal_loopback(vha, config, 733 if (qla81xx_set_internal_loopback(vha, config,
725 new_config)) { 734 new_config)) {
735 ql_log(ql_log_warn, vha, 0x7024,
736 "Internal loopback failed.\n");
726 bsg_job->reply->reply_payload_rcv_len = 737 bsg_job->reply->reply_payload_rcv_len =
727 0; 738 0;
728 bsg_job->reply->result = 739 bsg_job->reply->result =
@@ -746,9 +757,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
746 } 757 }
747 758
748 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 759 type = "FC_BSG_HST_VENDOR_LOOPBACK";
749 DEBUG2(qla_printk(KERN_INFO, ha, 760 ql_dbg(ql_dbg_user, vha, 0x7028,
750 "scsi(%ld) bsg rqst type: %s\n", 761 "BSG request type: %s.\n", type);
751 vha->host_no, type));
752 762
753 command_sent = INT_DEF_LB_LOOPBACK_CMD; 763 command_sent = INT_DEF_LB_LOOPBACK_CMD;
754 rval = qla2x00_loopback_test(vha, &elreq, response); 764 rval = qla2x00_loopback_test(vha, &elreq, response);
@@ -763,17 +773,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
763 773
764 if (response[0] == MBS_COMMAND_ERROR && 774 if (response[0] == MBS_COMMAND_ERROR &&
765 response[1] == MBS_LB_RESET) { 775 response[1] == MBS_LB_RESET) {
766 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing " 776 ql_log(ql_log_warn, vha, 0x7029,
767 "ISP\n", __func__, vha->host_no)); 777 "MBX command error, Aborting ISP.\n");
768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 778 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
769 qla2xxx_wake_dpc(vha); 779 qla2xxx_wake_dpc(vha);
770 qla2x00_wait_for_chip_reset(vha); 780 qla2x00_wait_for_chip_reset(vha);
771 /* Also reset the MPI */ 781 /* Also reset the MPI */
772 if (qla81xx_restart_mpi_firmware(vha) != 782 if (qla81xx_restart_mpi_firmware(vha) !=
773 QLA_SUCCESS) { 783 QLA_SUCCESS) {
774 qla_printk(KERN_INFO, ha, 784 ql_log(ql_log_warn, vha, 0x702a,
775 "MPI reset failed for host%ld.\n", 785 "MPI reset failed.\n");
776 vha->host_no);
777 } 786 }
778 787
779 bsg_job->reply->reply_payload_rcv_len = 0; 788 bsg_job->reply->reply_payload_rcv_len = 0;
@@ -783,17 +792,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
783 } 792 }
784 } else { 793 } else {
785 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 794 type = "FC_BSG_HST_VENDOR_LOOPBACK";
786 DEBUG2(qla_printk(KERN_INFO, ha, 795 ql_dbg(ql_dbg_user, vha, 0x702b,
787 "scsi(%ld) bsg rqst type: %s\n", 796 "BSG request type: %s.\n", type);
788 vha->host_no, type));
789 command_sent = INT_DEF_LB_LOOPBACK_CMD; 797 command_sent = INT_DEF_LB_LOOPBACK_CMD;
790 rval = qla2x00_loopback_test(vha, &elreq, response); 798 rval = qla2x00_loopback_test(vha, &elreq, response);
791 } 799 }
792 } 800 }
793 801
794 if (rval) { 802 if (rval) {
795 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 803 ql_log(ql_log_warn, vha, 0x702c,
796 "request %s failed\n", vha->host_no, type)); 804 "Vendor request %s failed.\n", type);
797 805
798 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 806 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
799 sizeof(struct fc_bsg_reply); 807 sizeof(struct fc_bsg_reply);
@@ -805,8 +813,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
805 bsg_job->reply->reply_payload_rcv_len = 0; 813 bsg_job->reply->reply_payload_rcv_len = 0;
806 bsg_job->reply->result = (DID_ERROR << 16); 814 bsg_job->reply->result = (DID_ERROR << 16);
807 } else { 815 } else {
808 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 816 ql_dbg(ql_dbg_user, vha, 0x702d,
809 "request %s completed\n", vha->host_no, type)); 817 "Vendor request %s completed.\n", type);
810 818
811 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 819 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
812 sizeof(response) + sizeof(uint8_t); 820 sizeof(response) + sizeof(uint8_t);
@@ -851,12 +859,13 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
851 859
852 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 860 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
853 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 861 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
854 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 862 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
863 ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
855 return -EBUSY; 864 return -EBUSY;
865 }
856 866
857 if (!IS_QLA84XX(ha)) { 867 if (!IS_QLA84XX(ha)) {
858 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 868 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
859 "exiting.\n", vha->host_no));
860 return -EINVAL; 869 return -EINVAL;
861 } 870 }
862 871
@@ -865,14 +874,14 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
865 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 874 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
866 875
867 if (rval) { 876 if (rval) {
868 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 877 ql_log(ql_log_warn, vha, 0x7030,
869 "request 84xx reset failed\n", vha->host_no)); 878 "Vendor request 84xx reset failed.\n");
870 rval = bsg_job->reply->reply_payload_rcv_len = 0; 879 rval = bsg_job->reply->reply_payload_rcv_len = 0;
871 bsg_job->reply->result = (DID_ERROR << 16); 880 bsg_job->reply->result = (DID_ERROR << 16);
872 881
873 } else { 882 } else {
874 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 883 ql_dbg(ql_dbg_user, vha, 0x7031,
875 "request 84xx reset completed\n", vha->host_no)); 884 "Vendor request 84xx reset completed.\n");
876 bsg_job->reply->result = DID_OK; 885 bsg_job->reply->result = DID_OK;
877 } 886 }
878 887
@@ -902,21 +911,24 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
902 return -EBUSY; 911 return -EBUSY;
903 912
904 if (!IS_QLA84XX(ha)) { 913 if (!IS_QLA84XX(ha)) {
905 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 914 ql_dbg(ql_dbg_user, vha, 0x7032,
906 "exiting.\n", vha->host_no)); 915 "Not 84xx, exiting.\n");
907 return -EINVAL; 916 return -EINVAL;
908 } 917 }
909 918
910 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 919 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
911 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 920 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
912 if (!sg_cnt) 921 if (!sg_cnt) {
922 ql_log(ql_log_warn, vha, 0x7033,
923 "dma_map_sg returned %d for request.\n", sg_cnt);
913 return -ENOMEM; 924 return -ENOMEM;
925 }
914 926
915 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 927 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
916 DEBUG2(printk(KERN_INFO 928 ql_log(ql_log_warn, vha, 0x7034,
917 "dma mapping resulted in different sg counts " 929 "DMA mapping resulted in different sg counts, "
918 "request_sg_cnt: %x dma_request_sg_cnt: %x ", 930 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
919 bsg_job->request_payload.sg_cnt, sg_cnt)); 931 bsg_job->request_payload.sg_cnt, sg_cnt);
920 rval = -EAGAIN; 932 rval = -EAGAIN;
921 goto done_unmap_sg; 933 goto done_unmap_sg;
922 } 934 }
@@ -925,8 +937,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
925 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 937 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
926 &fw_dma, GFP_KERNEL); 938 &fw_dma, GFP_KERNEL);
927 if (!fw_buf) { 939 if (!fw_buf) {
928 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf " 940 ql_log(ql_log_warn, vha, 0x7035,
929 "failed for host=%lu\n", __func__, vha->host_no)); 941 "DMA alloc failed for fw_buf.\n");
930 rval = -ENOMEM; 942 rval = -ENOMEM;
931 goto done_unmap_sg; 943 goto done_unmap_sg;
932 } 944 }
@@ -936,8 +948,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
936 948
937 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 949 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
938 if (!mn) { 950 if (!mn) {
939 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer " 951 ql_log(ql_log_warn, vha, 0x7036,
940 "failed for host=%lu\n", __func__, vha->host_no)); 952 "DMA alloc failed for fw buffer.\n");
941 rval = -ENOMEM; 953 rval = -ENOMEM;
942 goto done_free_fw_buf; 954 goto done_free_fw_buf;
943 } 955 }
@@ -965,15 +977,15 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
965 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 977 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
966 978
967 if (rval) { 979 if (rval) {
968 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 980 ql_log(ql_log_warn, vha, 0x7037,
969 "request 84xx updatefw failed\n", vha->host_no)); 981 "Vendor request 84xx updatefw failed.\n");
970 982
971 rval = bsg_job->reply->reply_payload_rcv_len = 0; 983 rval = bsg_job->reply->reply_payload_rcv_len = 0;
972 bsg_job->reply->result = (DID_ERROR << 16); 984 bsg_job->reply->result = (DID_ERROR << 16);
973 985
974 } else { 986 } else {
975 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 987 ql_dbg(ql_dbg_user, vha, 0x7038,
976 "request 84xx updatefw completed\n", vha->host_no)); 988 "Vendor request 84xx updatefw completed.\n");
977 989
978 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 990 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
979 bsg_job->reply->result = DID_OK; 991 bsg_job->reply->result = DID_OK;
@@ -1009,27 +1021,30 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1009 1021
1010 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1022 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1011 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1023 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1012 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 1024 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1025 ql_log(ql_log_warn, vha, 0x7039,
1026 "Abort active or needed.\n");
1013 return -EBUSY; 1027 return -EBUSY;
1028 }
1014 1029
1015 if (!IS_QLA84XX(ha)) { 1030 if (!IS_QLA84XX(ha)) {
1016 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, " 1031 ql_log(ql_log_warn, vha, 0x703a,
1017 "exiting.\n", vha->host_no)); 1032 "Not 84xx, exiting.\n");
1018 return -EINVAL; 1033 return -EINVAL;
1019 } 1034 }
1020 1035
1021 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request + 1036 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1022 sizeof(struct fc_bsg_request)); 1037 sizeof(struct fc_bsg_request));
1023 if (!ql84_mgmt) { 1038 if (!ql84_mgmt) {
1024 DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n", 1039 ql_log(ql_log_warn, vha, 0x703b,
1025 __func__, vha->host_no)); 1040 "MGMT header not provided, exiting.\n");
1026 return -EINVAL; 1041 return -EINVAL;
1027 } 1042 }
1028 1043
1029 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1044 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1030 if (!mn) { 1045 if (!mn) {
1031 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer " 1046 ql_log(ql_log_warn, vha, 0x703c,
1032 "failed for host=%lu\n", __func__, vha->host_no)); 1047 "DMA alloc failed for fw buffer.\n");
1033 return -ENOMEM; 1048 return -ENOMEM;
1034 } 1049 }
1035 1050
@@ -1044,6 +1059,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1044 bsg_job->reply_payload.sg_list, 1059 bsg_job->reply_payload.sg_list,
1045 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1060 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1046 if (!sg_cnt) { 1061 if (!sg_cnt) {
1062 ql_log(ql_log_warn, vha, 0x703d,
1063 "dma_map_sg returned %d for reply.\n", sg_cnt);
1047 rval = -ENOMEM; 1064 rval = -ENOMEM;
1048 goto exit_mgmt; 1065 goto exit_mgmt;
1049 } 1066 }
@@ -1051,10 +1068,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1051 dma_direction = DMA_FROM_DEVICE; 1068 dma_direction = DMA_FROM_DEVICE;
1052 1069
1053 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1070 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1054 DEBUG2(printk(KERN_INFO 1071 ql_log(ql_log_warn, vha, 0x703e,
1055 "dma mapping resulted in different sg counts " 1072 "DMA mapping resulted in different sg counts, "
1056 "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n", 1073 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1057 bsg_job->reply_payload.sg_cnt, sg_cnt)); 1074 bsg_job->reply_payload.sg_cnt, sg_cnt);
1058 rval = -EAGAIN; 1075 rval = -EAGAIN;
1059 goto done_unmap_sg; 1076 goto done_unmap_sg;
1060 } 1077 }
@@ -1064,9 +1081,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1064 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1081 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1065 &mgmt_dma, GFP_KERNEL); 1082 &mgmt_dma, GFP_KERNEL);
1066 if (!mgmt_b) { 1083 if (!mgmt_b) {
1067 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b " 1084 ql_log(ql_log_warn, vha, 0x703f,
1068 "failed for host=%lu\n", 1085 "DMA alloc failed for mgmt_b.\n");
1069 __func__, vha->host_no));
1070 rval = -ENOMEM; 1086 rval = -ENOMEM;
1071 goto done_unmap_sg; 1087 goto done_unmap_sg;
1072 } 1088 }
@@ -1094,6 +1110,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1094 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1110 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1095 1111
1096 if (!sg_cnt) { 1112 if (!sg_cnt) {
1113 ql_log(ql_log_warn, vha, 0x7040,
1114 "dma_map_sg returned %d.\n", sg_cnt);
1097 rval = -ENOMEM; 1115 rval = -ENOMEM;
1098 goto exit_mgmt; 1116 goto exit_mgmt;
1099 } 1117 }
@@ -1101,10 +1119,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1101 dma_direction = DMA_TO_DEVICE; 1119 dma_direction = DMA_TO_DEVICE;
1102 1120
1103 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1121 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1104 DEBUG2(printk(KERN_INFO 1122 ql_log(ql_log_warn, vha, 0x7041,
1105 "dma mapping resulted in different sg counts " 1123 "DMA mapping resulted in different sg counts, "
1106 "request_sg_cnt: %x dma_request_sg_cnt: %x ", 1124 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1107 bsg_job->request_payload.sg_cnt, sg_cnt)); 1125 bsg_job->request_payload.sg_cnt, sg_cnt);
1108 rval = -EAGAIN; 1126 rval = -EAGAIN;
1109 goto done_unmap_sg; 1127 goto done_unmap_sg;
1110 } 1128 }
@@ -1113,9 +1131,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1113 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1131 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1114 &mgmt_dma, GFP_KERNEL); 1132 &mgmt_dma, GFP_KERNEL);
1115 if (!mgmt_b) { 1133 if (!mgmt_b) {
1116 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b " 1134 ql_log(ql_log_warn, vha, 0x7042,
1117 "failed for host=%lu\n", 1135 "DMA alloc failed for mgmt_b.\n");
1118 __func__, vha->host_no));
1119 rval = -ENOMEM; 1136 rval = -ENOMEM;
1120 goto done_unmap_sg; 1137 goto done_unmap_sg;
1121 } 1138 }
@@ -1156,15 +1173,15 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1156 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1173 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1157 1174
1158 if (rval) { 1175 if (rval) {
1159 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 1176 ql_log(ql_log_warn, vha, 0x7043,
1160 "request 84xx mgmt failed\n", vha->host_no)); 1177 "Vendor request 84xx mgmt failed.\n");
1161 1178
1162 rval = bsg_job->reply->reply_payload_rcv_len = 0; 1179 rval = bsg_job->reply->reply_payload_rcv_len = 0;
1163 bsg_job->reply->result = (DID_ERROR << 16); 1180 bsg_job->reply->result = (DID_ERROR << 16);
1164 1181
1165 } else { 1182 } else {
1166 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor " 1183 ql_dbg(ql_dbg_user, vha, 0x7044,
1167 "request 84xx mgmt completed\n", vha->host_no)); 1184 "Vendor request 84xx mgmt completed.\n");
1168 1185
1169 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1186 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1170 bsg_job->reply->result = DID_OK; 1187 bsg_job->reply->result = DID_OK;
@@ -1204,7 +1221,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1204{ 1221{
1205 struct Scsi_Host *host = bsg_job->shost; 1222 struct Scsi_Host *host = bsg_job->shost;
1206 scsi_qla_host_t *vha = shost_priv(host); 1223 scsi_qla_host_t *vha = shost_priv(host);
1207 struct qla_hw_data *ha = vha->hw;
1208 int rval = 0; 1224 int rval = 0;
1209 struct qla_port_param *port_param = NULL; 1225 struct qla_port_param *port_param = NULL;
1210 fc_port_t *fcport = NULL; 1226 fc_port_t *fcport = NULL;
@@ -1215,26 +1231,27 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1215 1231
1216 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1232 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1217 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1233 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1218 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) 1234 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1235 ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
1219 return -EBUSY; 1236 return -EBUSY;
1237 }
1220 1238
1221 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1239 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1222 DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not " 1240 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1223 "supported\n", __func__, vha->host_no));
1224 return -EINVAL; 1241 return -EINVAL;
1225 } 1242 }
1226 1243
1227 port_param = (struct qla_port_param *)((char *)bsg_job->request + 1244 port_param = (struct qla_port_param *)((char *)bsg_job->request +
1228 sizeof(struct fc_bsg_request)); 1245 sizeof(struct fc_bsg_request));
1229 if (!port_param) { 1246 if (!port_param) {
1230 DEBUG2(printk("%s(%ld): port_param header not provided, " 1247 ql_log(ql_log_warn, vha, 0x7047,
1231 "exiting.\n", __func__, vha->host_no)); 1248 "port_param header not provided.\n");
1232 return -EINVAL; 1249 return -EINVAL;
1233 } 1250 }
1234 1251
1235 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1252 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1236 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n", 1253 ql_log(ql_log_warn, vha, 0x7048,
1237 __func__, vha->host_no)); 1254 "Invalid destination type.\n");
1238 return -EINVAL; 1255 return -EINVAL;
1239 } 1256 }
1240 1257
@@ -1249,21 +1266,20 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1249 } 1266 }
1250 1267
1251 if (!fcport) { 1268 if (!fcport) {
1252 DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n", 1269 ql_log(ql_log_warn, vha, 0x7049,
1253 __func__, vha->host_no)); 1270 "Failed to find port.\n");
1254 return -EINVAL; 1271 return -EINVAL;
1255 } 1272 }
1256 1273
1257 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1274 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1258 DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n", 1275 ql_log(ql_log_warn, vha, 0x704a,
1259 __func__, vha->host_no)); 1276 "Port is not online.\n");
1260 return -EINVAL; 1277 return -EINVAL;
1261 } 1278 }
1262 1279
1263 if (fcport->flags & FCF_LOGIN_NEEDED) { 1280 if (fcport->flags & FCF_LOGIN_NEEDED) {
1264 DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, " 1281 ql_log(ql_log_warn, vha, 0x704b,
1265 "flags = 0x%x\n", 1282 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1266 __func__, vha->host_no, fcport->flags));
1267 return -EINVAL; 1283 return -EINVAL;
1268 } 1284 }
1269 1285
@@ -1275,15 +1291,13 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1275 &port_param->speed, mb); 1291 &port_param->speed, mb);
1276 1292
1277 if (rval) { 1293 if (rval) {
1278 DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for " 1294 ql_log(ql_log_warn, vha, 0x704c,
1279 "%02x%02x%02x%02x%02x%02x%02x%02x -- " 1295 "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1280 "%04x %x %04x %04x.\n", 1296 "%04x %x %04x %04x.\n", fcport->port_name[0],
1281 vha->host_no, fcport->port_name[0], 1297 fcport->port_name[1], fcport->port_name[2],
1282 fcport->port_name[1], 1298 fcport->port_name[3], fcport->port_name[4],
1283 fcport->port_name[2], fcport->port_name[3], 1299 fcport->port_name[5], fcport->port_name[6],
1284 fcport->port_name[4], fcport->port_name[5], 1300 fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1285 fcport->port_name[6], fcport->port_name[7], rval,
1286 fcport->fp_speed, mb[0], mb[1]));
1287 rval = 0; 1301 rval = 0;
1288 bsg_job->reply->result = (DID_ERROR << 16); 1302 bsg_job->reply->result = (DID_ERROR << 16);
1289 1303
@@ -1307,11 +1321,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1307} 1321}
1308 1322
1309static int 1323static int
1310qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha, 1324qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1311 uint8_t is_update) 1325 uint8_t is_update)
1312{ 1326{
1313 uint32_t start = 0; 1327 uint32_t start = 0;
1314 int valid = 0; 1328 int valid = 0;
1329 struct qla_hw_data *ha = vha->hw;
1315 1330
1316 bsg_job->reply->reply_payload_rcv_len = 0; 1331 bsg_job->reply->reply_payload_rcv_len = 0;
1317 1332
@@ -1319,14 +1334,20 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1319 return -EINVAL; 1334 return -EINVAL;
1320 1335
1321 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1336 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1322 if (start > ha->optrom_size) 1337 if (start > ha->optrom_size) {
1338 ql_log(ql_log_warn, vha, 0x7055,
1339 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1323 return -EINVAL; 1340 return -EINVAL;
1341 }
1324 1342
1325 if (ha->optrom_state != QLA_SWAITING) 1343 if (ha->optrom_state != QLA_SWAITING) {
1344 ql_log(ql_log_info, vha, 0x7056,
1345 "optrom_state %d.\n", ha->optrom_state);
1326 return -EBUSY; 1346 return -EBUSY;
1347 }
1327 1348
1328 ha->optrom_region_start = start; 1349 ha->optrom_region_start = start;
1329 1350 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1330 if (is_update) { 1351 if (is_update) {
1331 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1352 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1332 valid = 1; 1353 valid = 1;
@@ -1337,9 +1358,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1337 IS_QLA8XXX_TYPE(ha)) 1358 IS_QLA8XXX_TYPE(ha))
1338 valid = 1; 1359 valid = 1;
1339 if (!valid) { 1360 if (!valid) {
1340 qla_printk(KERN_WARNING, ha, 1361 ql_log(ql_log_warn, vha, 0x7058,
1341 "Invalid start region 0x%x/0x%x.\n", 1362 "Invalid start region 0x%x/0x%x.\n", start,
1342 start, bsg_job->request_payload.payload_len); 1363 bsg_job->request_payload.payload_len);
1343 return -EINVAL; 1364 return -EINVAL;
1344 } 1365 }
1345 1366
@@ -1358,9 +1379,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1358 1379
1359 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 1380 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1360 if (!ha->optrom_buffer) { 1381 if (!ha->optrom_buffer) {
1361 qla_printk(KERN_WARNING, ha, 1382 ql_log(ql_log_warn, vha, 0x7059,
1362 "Read: Unable to allocate memory for optrom retrieval " 1383 "Read: Unable to allocate memory for optrom retrieval "
1363 "(%x).\n", ha->optrom_region_size); 1384 "(%x)\n", ha->optrom_region_size);
1364 1385
1365 ha->optrom_state = QLA_SWAITING; 1386 ha->optrom_state = QLA_SWAITING;
1366 return -ENOMEM; 1387 return -ENOMEM;
@@ -1378,7 +1399,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1378 struct qla_hw_data *ha = vha->hw; 1399 struct qla_hw_data *ha = vha->hw;
1379 int rval = 0; 1400 int rval = 0;
1380 1401
1381 rval = qla2x00_optrom_setup(bsg_job, ha, 0); 1402 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1382 if (rval) 1403 if (rval)
1383 return rval; 1404 return rval;
1384 1405
@@ -1406,7 +1427,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1406 struct qla_hw_data *ha = vha->hw; 1427 struct qla_hw_data *ha = vha->hw;
1407 int rval = 0; 1428 int rval = 0;
1408 1429
1409 rval = qla2x00_optrom_setup(bsg_job, ha, 1); 1430 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1410 if (rval) 1431 if (rval)
1411 return rval; 1432 return rval;
1412 1433
@@ -1464,6 +1485,23 @@ int
1464qla24xx_bsg_request(struct fc_bsg_job *bsg_job) 1485qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1465{ 1486{
1466 int ret = -EINVAL; 1487 int ret = -EINVAL;
1488 struct fc_rport *rport;
1489 fc_port_t *fcport = NULL;
1490 struct Scsi_Host *host;
1491 scsi_qla_host_t *vha;
1492
1493 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1494 rport = bsg_job->rport;
1495 fcport = *(fc_port_t **) rport->dd_data;
1496 host = rport_to_shost(rport);
1497 vha = shost_priv(host);
1498 } else {
1499 host = bsg_job->shost;
1500 vha = shost_priv(host);
1501 }
1502
1503 ql_dbg(ql_dbg_user, vha, 0x7000,
1504 "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
1467 1505
1468 switch (bsg_job->request->msgcode) { 1506 switch (bsg_job->request->msgcode) {
1469 case FC_BSG_RPT_ELS: 1507 case FC_BSG_RPT_ELS:
@@ -1480,7 +1518,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1480 case FC_BSG_HST_DEL_RPORT: 1518 case FC_BSG_HST_DEL_RPORT:
1481 case FC_BSG_RPT_CT: 1519 case FC_BSG_RPT_CT:
1482 default: 1520 default:
1483 DEBUG2(printk("qla2xxx: unsupported BSG request\n")); 1521 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1484 break; 1522 break;
1485 } 1523 }
1486 return ret; 1524 return ret;
@@ -1514,17 +1552,15 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1514 && (sp_bsg->u.bsg_job == bsg_job)) { 1552 && (sp_bsg->u.bsg_job == bsg_job)) {
1515 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1553 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1516 if (ha->isp_ops->abort_command(sp)) { 1554 if (ha->isp_ops->abort_command(sp)) {
1517 DEBUG2(qla_printk(KERN_INFO, ha, 1555 ql_log(ql_log_warn, vha, 0x7089,
1518 "scsi(%ld): mbx " 1556 "mbx abort_command "
1519 "abort_command failed\n", 1557 "failed.\n");
1520 vha->host_no));
1521 bsg_job->req->errors = 1558 bsg_job->req->errors =
1522 bsg_job->reply->result = -EIO; 1559 bsg_job->reply->result = -EIO;
1523 } else { 1560 } else {
1524 DEBUG2(qla_printk(KERN_INFO, ha, 1561 ql_dbg(ql_dbg_user, vha, 0x708a,
1525 "scsi(%ld): mbx " 1562 "mbx abort_command "
1526 "abort_command success\n", 1563 "success.\n");
1527 vha->host_no));
1528 bsg_job->req->errors = 1564 bsg_job->req->errors =
1529 bsg_job->reply->result = 0; 1565 bsg_job->reply->result = 0;
1530 } 1566 }
@@ -1535,8 +1571,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1535 } 1571 }
1536 } 1572 }
1537 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1573 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1538 DEBUG2(qla_printk(KERN_INFO, ha, 1574 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
1539 "scsi(%ld) SRB not found to abort\n", vha->host_no));
1540 bsg_job->req->errors = bsg_job->reply->result = -ENXIO; 1575 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1541 return 0; 1576 return 0;
1542 1577
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index c53719a9a747..2155071f3100 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -4,10 +4,36 @@
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7
8/*
9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages.
11 * -----------------------------------------------------
12 * | Level | Last Value Used |
13 * -----------------------------------------------------
14 * | Module Init and Probe | 0x0116 |
15 * | Mailbox commands | 0x111e |
16 * | Device Discovery | 0x2083 |
17 * | Queue Command and IO tracing | 0x302e |
18 * | DPC Thread | 0x401c |
19 * | Async Events | 0x5059 |
20 * | Timer Routines | 0x600d |
21 * | User Space Interactions | 0x709c |
22 * | Task Management | 0x8043 |
23 * | AER/EEH | 0x900f |
24 * | Virtual Port | 0xa007 |
25 * | ISP82XX Specific | 0xb027 |
26 * | MultiQ | 0xc00b |
27 * | Misc | 0xd00b |
28 * -----------------------------------------------------
29 */
30
7#include "qla_def.h" 31#include "qla_def.h"
8 32
9#include <linux/delay.h> 33#include <linux/delay.h>
10 34
35static uint32_t ql_dbg_offset = 0x800;
36
11static inline void 37static inline void
12qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) 38qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
13{ 39{
@@ -383,11 +409,11 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
383 struct qla_hw_data *ha = vha->hw; 409 struct qla_hw_data *ha = vha->hw;
384 410
385 if (rval != QLA_SUCCESS) { 411 if (rval != QLA_SUCCESS) {
386 qla_printk(KERN_WARNING, ha, 412 ql_log(ql_log_warn, vha, 0xd000,
387 "Failed to dump firmware (%x)!!!\n", rval); 413 "Failed to dump firmware (%x).\n", rval);
388 ha->fw_dumped = 0; 414 ha->fw_dumped = 0;
389 } else { 415 } else {
390 qla_printk(KERN_INFO, ha, 416 ql_log(ql_log_info, vha, 0xd001,
391 "Firmware dump saved to temp buffer (%ld/%p).\n", 417 "Firmware dump saved to temp buffer (%ld/%p).\n",
392 vha->host_no, ha->fw_dump); 418 vha->host_no, ha->fw_dump);
393 ha->fw_dumped = 1; 419 ha->fw_dumped = 1;
@@ -419,15 +445,16 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
419 spin_lock_irqsave(&ha->hardware_lock, flags); 445 spin_lock_irqsave(&ha->hardware_lock, flags);
420 446
421 if (!ha->fw_dump) { 447 if (!ha->fw_dump) {
422 qla_printk(KERN_WARNING, ha, 448 ql_log(ql_log_warn, vha, 0xd002,
423 "No buffer available for dump!!!\n"); 449 "No buffer available for dump.\n");
424 goto qla2300_fw_dump_failed; 450 goto qla2300_fw_dump_failed;
425 } 451 }
426 452
427 if (ha->fw_dumped) { 453 if (ha->fw_dumped) {
428 qla_printk(KERN_WARNING, ha, 454 ql_log(ql_log_warn, vha, 0xd003,
429 "Firmware has been previously dumped (%p) -- ignoring " 455 "Firmware has been previously dumped (%p) "
430 "request...\n", ha->fw_dump); 456 "-- ignoring request.\n",
457 ha->fw_dump);
431 goto qla2300_fw_dump_failed; 458 goto qla2300_fw_dump_failed;
432 } 459 }
433 fw = &ha->fw_dump->isp.isp23; 460 fw = &ha->fw_dump->isp.isp23;
@@ -582,15 +609,16 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
582 spin_lock_irqsave(&ha->hardware_lock, flags); 609 spin_lock_irqsave(&ha->hardware_lock, flags);
583 610
584 if (!ha->fw_dump) { 611 if (!ha->fw_dump) {
585 qla_printk(KERN_WARNING, ha, 612 ql_log(ql_log_warn, vha, 0xd004,
586 "No buffer available for dump!!!\n"); 613 "No buffer available for dump.\n");
587 goto qla2100_fw_dump_failed; 614 goto qla2100_fw_dump_failed;
588 } 615 }
589 616
590 if (ha->fw_dumped) { 617 if (ha->fw_dumped) {
591 qla_printk(KERN_WARNING, ha, 618 ql_log(ql_log_warn, vha, 0xd005,
592 "Firmware has been previously dumped (%p) -- ignoring " 619 "Firmware has been previously dumped (%p) "
593 "request...\n", ha->fw_dump); 620 "-- ignoring request.\n",
621 ha->fw_dump);
594 goto qla2100_fw_dump_failed; 622 goto qla2100_fw_dump_failed;
595 } 623 }
596 fw = &ha->fw_dump->isp.isp21; 624 fw = &ha->fw_dump->isp.isp21;
@@ -779,15 +807,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
779 spin_lock_irqsave(&ha->hardware_lock, flags); 807 spin_lock_irqsave(&ha->hardware_lock, flags);
780 808
781 if (!ha->fw_dump) { 809 if (!ha->fw_dump) {
782 qla_printk(KERN_WARNING, ha, 810 ql_log(ql_log_warn, vha, 0xd006,
783 "No buffer available for dump!!!\n"); 811 "No buffer available for dump.\n");
784 goto qla24xx_fw_dump_failed; 812 goto qla24xx_fw_dump_failed;
785 } 813 }
786 814
787 if (ha->fw_dumped) { 815 if (ha->fw_dumped) {
788 qla_printk(KERN_WARNING, ha, 816 ql_log(ql_log_warn, vha, 0xd007,
789 "Firmware has been previously dumped (%p) -- ignoring " 817 "Firmware has been previously dumped (%p) "
790 "request...\n", ha->fw_dump); 818 "-- ignoring request.\n",
819 ha->fw_dump);
791 goto qla24xx_fw_dump_failed; 820 goto qla24xx_fw_dump_failed;
792 } 821 }
793 fw = &ha->fw_dump->isp.isp24; 822 fw = &ha->fw_dump->isp.isp24;
@@ -1017,15 +1046,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1017 spin_lock_irqsave(&ha->hardware_lock, flags); 1046 spin_lock_irqsave(&ha->hardware_lock, flags);
1018 1047
1019 if (!ha->fw_dump) { 1048 if (!ha->fw_dump) {
1020 qla_printk(KERN_WARNING, ha, 1049 ql_log(ql_log_warn, vha, 0xd008,
1021 "No buffer available for dump!!!\n"); 1050 "No buffer available for dump.\n");
1022 goto qla25xx_fw_dump_failed; 1051 goto qla25xx_fw_dump_failed;
1023 } 1052 }
1024 1053
1025 if (ha->fw_dumped) { 1054 if (ha->fw_dumped) {
1026 qla_printk(KERN_WARNING, ha, 1055 ql_log(ql_log_warn, vha, 0xd009,
1027 "Firmware has been previously dumped (%p) -- ignoring " 1056 "Firmware has been previously dumped (%p) "
1028 "request...\n", ha->fw_dump); 1057 "-- ignoring request.\n",
1058 ha->fw_dump);
1029 goto qla25xx_fw_dump_failed; 1059 goto qla25xx_fw_dump_failed;
1030 } 1060 }
1031 fw = &ha->fw_dump->isp.isp25; 1061 fw = &ha->fw_dump->isp.isp25;
@@ -1328,15 +1358,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1328 spin_lock_irqsave(&ha->hardware_lock, flags); 1358 spin_lock_irqsave(&ha->hardware_lock, flags);
1329 1359
1330 if (!ha->fw_dump) { 1360 if (!ha->fw_dump) {
1331 qla_printk(KERN_WARNING, ha, 1361 ql_log(ql_log_warn, vha, 0xd00a,
1332 "No buffer available for dump!!!\n"); 1362 "No buffer available for dump.\n");
1333 goto qla81xx_fw_dump_failed; 1363 goto qla81xx_fw_dump_failed;
1334 } 1364 }
1335 1365
1336 if (ha->fw_dumped) { 1366 if (ha->fw_dumped) {
1337 qla_printk(KERN_WARNING, ha, 1367 ql_log(ql_log_warn, vha, 0xd00b,
1338 "Firmware has been previously dumped (%p) -- ignoring " 1368 "Firmware has been previously dumped (%p) "
1339 "request...\n", ha->fw_dump); 1369 "-- ignoring request.\n",
1370 ha->fw_dump);
1340 goto qla81xx_fw_dump_failed; 1371 goto qla81xx_fw_dump_failed;
1341 } 1372 }
1342 fw = &ha->fw_dump->isp.isp81; 1373 fw = &ha->fw_dump->isp.isp81;
@@ -1619,106 +1650,255 @@ qla81xx_fw_dump_failed:
1619/****************************************************************************/ 1650/****************************************************************************/
1620/* Driver Debug Functions. */ 1651/* Driver Debug Functions. */
1621/****************************************************************************/ 1652/****************************************************************************/
1622 1653/*
1654 * This function is for formatting and logging debug information.
1655 * It is to be used when vha is available. It formats the message
1656 * and logs it to the messages file.
1657 * parameters:
1658 * level: The level of the debug messages to be printed.
1659 * If ql2xextended_error_logging value is correctly set,
1660 * this message will appear in the messages file.
1661 * vha: Pointer to the scsi_qla_host_t.
1662 * id: This is a unique identifier for the level. It identifies the
1663 * part of the code from where the message originated.
1664 * msg: The message to be displayed.
1665 */
1623void 1666void
1624qla2x00_dump_regs(scsi_qla_host_t *vha) 1667ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
1625{ 1668
1626 int i; 1669 char pbuf[QL_DBG_BUF_LEN];
1627 struct qla_hw_data *ha = vha->hw; 1670 va_list ap;
1628 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1671 uint32_t len;
1629 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 1672 struct pci_dev *pdev = NULL;
1630 uint16_t __iomem *mbx_reg; 1673
1674 memset(pbuf, 0, QL_DBG_BUF_LEN);
1675
1676 va_start(ap, msg);
1677
1678 if ((level & ql2xextended_error_logging) == level) {
1679 if (vha != NULL) {
1680 pdev = vha->hw->pdev;
1681 /* <module-name> <pci-name> <msg-id>:<host> Message */
1682 sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
1683 dev_name(&(pdev->dev)), id + ql_dbg_offset,
1684 vha->host_no);
1685 } else
1686 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1687 "0000:00:00.0", id + ql_dbg_offset);
1688
1689 len = strlen(pbuf);
1690 vsprintf(pbuf+len, msg, ap);
1691 pr_warning("%s", pbuf);
1692 }
1631 1693
1632 mbx_reg = IS_FWI2_CAPABLE(ha) ? &reg24->mailbox0: 1694 va_end(ap);
1633 MAILBOX_REG(ha, reg, 0);
1634 1695
1635 printk("Mailbox registers:\n");
1636 for (i = 0; i < 6; i++)
1637 printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
1638 RD_REG_WORD(mbx_reg++));
1639} 1696}
1640 1697
1641 1698/*
1699 * This function is for formatting and logging debug information.
1700 * It is to be used when vha is not available and pci is availble,
1701 * i.e., before host allocation. It formats the message and logs it
1702 * to the messages file.
1703 * parameters:
1704 * level: The level of the debug messages to be printed.
1705 * If ql2xextended_error_logging value is correctly set,
1706 * this message will appear in the messages file.
1707 * pdev: Pointer to the struct pci_dev.
1708 * id: This is a unique id for the level. It identifies the part
1709 * of the code from where the message originated.
1710 * msg: The message to be displayed.
1711 */
1642void 1712void
1643qla2x00_dump_buffer(uint8_t * b, uint32_t size) 1713ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
1644{
1645 uint32_t cnt;
1646 uint8_t c;
1647 1714
1648 printk(" 0 1 2 3 4 5 6 7 8 9 " 1715 char pbuf[QL_DBG_BUF_LEN];
1649 "Ah Bh Ch Dh Eh Fh\n"); 1716 va_list ap;
1650 printk("----------------------------------------" 1717 uint32_t len;
1651 "----------------------\n"); 1718
1652 1719 if (pdev == NULL)
1653 for (cnt = 0; cnt < size;) { 1720 return;
1654 c = *b++; 1721
1655 printk("%02x",(uint32_t) c); 1722 memset(pbuf, 0, QL_DBG_BUF_LEN);
1656 cnt++; 1723
1657 if (!(cnt % 16)) 1724 va_start(ap, msg);
1658 printk("\n"); 1725
1659 else 1726 if ((level & ql2xextended_error_logging) == level) {
1660 printk(" "); 1727 /* <module-name> <dev-name>:<msg-id> Message */
1728 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1729 dev_name(&(pdev->dev)), id + ql_dbg_offset);
1730
1731 len = strlen(pbuf);
1732 vsprintf(pbuf+len, msg, ap);
1733 pr_warning("%s", pbuf);
1661 } 1734 }
1662 if (cnt % 16) 1735
1663 printk("\n"); 1736 va_end(ap);
1737
1664} 1738}
1665 1739
1740/*
1741 * This function is for formatting and logging log messages.
1742 * It is to be used when vha is available. It formats the message
1743 * and logs it to the messages file. All the messages will be logged
1744 * irrespective of value of ql2xextended_error_logging.
1745 * parameters:
1746 * level: The level of the log messages to be printed in the
1747 * messages file.
1748 * vha: Pointer to the scsi_qla_host_t
1749 * id: This is a unique id for the level. It identifies the
1750 * part of the code from where the message originated.
1751 * msg: The message to be displayed.
1752 */
1666void 1753void
1667qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size) 1754ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
1668{
1669 uint32_t cnt;
1670 uint8_t c;
1671 uint8_t last16[16], cur16[16];
1672 uint32_t lc = 0, num_same16 = 0, j;
1673 1755
1674 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 " 1756 char pbuf[QL_DBG_BUF_LEN];
1675 "Ah Bh Ch Dh Eh Fh\n"); 1757 va_list ap;
1676 printk(KERN_DEBUG "----------------------------------------" 1758 uint32_t len;
1677 "----------------------\n"); 1759 struct pci_dev *pdev = NULL;
1678 1760
1679 for (cnt = 0; cnt < size;) { 1761 memset(pbuf, 0, QL_DBG_BUF_LEN);
1680 c = *b++;
1681 1762
1682 cur16[lc++] = c; 1763 va_start(ap, msg);
1683 1764
1684 cnt++; 1765 if (level <= ql_errlev) {
1685 if (cnt % 16) 1766 if (vha != NULL) {
1686 continue; 1767 pdev = vha->hw->pdev;
1687 1768 /* <module-name> <msg-id>:<host> Message */
1688 /* We have 16 now */ 1769 sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
1689 lc = 0; 1770 dev_name(&(pdev->dev)), id, vha->host_no);
1690 if (num_same16 == 0) { 1771 } else
1691 memcpy(last16, cur16, 16); 1772 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1692 num_same16++; 1773 "0000:00:00.0", id);
1693 continue; 1774
1775 len = strlen(pbuf);
1776 vsprintf(pbuf+len, msg, ap);
1777
1778 switch (level) {
1779 case 0: /* FATAL LOG */
1780 pr_crit("%s", pbuf);
1781 break;
1782 case 1:
1783 pr_err("%s", pbuf);
1784 break;
1785 case 2:
1786 pr_warn("%s", pbuf);
1787 break;
1788 default:
1789 pr_info("%s", pbuf);
1790 break;
1694 } 1791 }
1695 if (memcmp(cur16, last16, 16) == 0) { 1792 }
1696 num_same16++; 1793
1697 continue; 1794 va_end(ap);
1795}
1796
1797/*
1798 * This function is for formatting and logging log messages.
1799 * It is to be used when vha is not available and pci is availble,
1800 * i.e., before host allocation. It formats the message and logs
1801 * it to the messages file. All the messages are logged irrespective
1802 * of the value of ql2xextended_error_logging.
1803 * parameters:
1804 * level: The level of the log messages to be printed in the
1805 * messages file.
1806 * pdev: Pointer to the struct pci_dev.
1807 * id: This is a unique id for the level. It identifies the
1808 * part of the code from where the message originated.
1809 * msg: The message to be displayed.
1810 */
1811void
1812ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
1813
1814 char pbuf[QL_DBG_BUF_LEN];
1815 va_list ap;
1816 uint32_t len;
1817
1818 if (pdev == NULL)
1819 return;
1820
1821 memset(pbuf, 0, QL_DBG_BUF_LEN);
1822
1823 va_start(ap, msg);
1824
1825 if (level <= ql_errlev) {
1826 /* <module-name> <dev-name>:<msg-id> Message */
1827 sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
1828 dev_name(&(pdev->dev)), id);
1829
1830 len = strlen(pbuf);
1831 vsprintf(pbuf+len, msg, ap);
1832 switch (level) {
1833 case 0: /* FATAL LOG */
1834 pr_crit("%s", pbuf);
1835 break;
1836 case 1:
1837 pr_err("%s", pbuf);
1838 break;
1839 case 2:
1840 pr_warn("%s", pbuf);
1841 break;
1842 default:
1843 pr_info("%s", pbuf);
1844 break;
1698 } 1845 }
1699 for (j = 0; j < 16; j++)
1700 printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
1701 printk(KERN_DEBUG "\n");
1702
1703 if (num_same16 > 1)
1704 printk(KERN_DEBUG "> prev pattern repeats (%u)"
1705 "more times\n", num_same16-1);
1706 memcpy(last16, cur16, 16);
1707 num_same16 = 1;
1708 } 1846 }
1709 1847
1710 if (num_same16) { 1848 va_end(ap);
1711 for (j = 0; j < 16; j++) 1849}
1712 printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
1713 printk(KERN_DEBUG "\n");
1714 1850
1715 if (num_same16 > 1) 1851void
1716 printk(KERN_DEBUG "> prev pattern repeats (%u)" 1852ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
1717 "more times\n", num_same16-1); 1853{
1854 int i;
1855 struct qla_hw_data *ha = vha->hw;
1856 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1857 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1858 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1859 uint16_t __iomem *mbx_reg;
1860
1861 if ((level & ql2xextended_error_logging) == level) {
1862
1863 if (IS_QLA82XX(ha))
1864 mbx_reg = &reg82->mailbox_in[0];
1865 else if (IS_FWI2_CAPABLE(ha))
1866 mbx_reg = &reg24->mailbox0;
1867 else
1868 mbx_reg = MAILBOX_REG(ha, reg, 0);
1869
1870 ql_dbg(level, vha, id, "Mailbox registers:\n");
1871 for (i = 0; i < 6; i++)
1872 ql_dbg(level, vha, id,
1873 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
1718 } 1874 }
1719 if (lc) { 1875}
1720 for (j = 0; j < lc; j++) 1876
1721 printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]); 1877
1722 printk(KERN_DEBUG "\n"); 1878void
1879ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
1880 uint8_t *b, uint32_t size)
1881{
1882 uint32_t cnt;
1883 uint8_t c;
1884 if ((level & ql2xextended_error_logging) == level) {
1885
1886 ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
1887 "9 Ah Bh Ch Dh Eh Fh\n");
1888 ql_dbg(level, vha, id, "----------------------------------"
1889 "----------------------------\n");
1890
1891 ql_dbg(level, vha, id, "");
1892 for (cnt = 0; cnt < size;) {
1893 c = *b++;
1894 printk("%02x", (uint32_t) c);
1895 cnt++;
1896 if (!(cnt % 16))
1897 printk("\n");
1898 else
1899 printk(" ");
1900 }
1901 if (cnt % 16)
1902 ql_dbg(level, vha, id, "\n");
1723 } 1903 }
1724} 1904}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 930414541ec6..98a377b99017 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -8,146 +8,6 @@
8#include "qla_def.h" 8#include "qla_def.h"
9 9
10/* 10/*
11 * Driver debug definitions.
12 */
13/* #define QL_DEBUG_LEVEL_1 */ /* Output register accesses to COM1 */
14/* #define QL_DEBUG_LEVEL_2 */ /* Output error msgs to COM1 */
15/* #define QL_DEBUG_LEVEL_3 */ /* Output function trace msgs to COM1 */
16/* #define QL_DEBUG_LEVEL_4 */ /* Output NVRAM trace msgs to COM1 */
17/* #define QL_DEBUG_LEVEL_5 */ /* Output ring trace msgs to COM1 */
18/* #define QL_DEBUG_LEVEL_6 */ /* Output WATCHDOG timer trace to COM1 */
19/* #define QL_DEBUG_LEVEL_7 */ /* Output RISC load trace msgs to COM1 */
20/* #define QL_DEBUG_LEVEL_8 */ /* Output ring saturation msgs to COM1 */
21/* #define QL_DEBUG_LEVEL_9 */ /* Output IOCTL trace msgs */
22/* #define QL_DEBUG_LEVEL_10 */ /* Output IOCTL error msgs */
23/* #define QL_DEBUG_LEVEL_11 */ /* Output Mbx Cmd trace msgs */
24/* #define QL_DEBUG_LEVEL_12 */ /* Output IP trace msgs */
25/* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */
26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
31
32/*
33* Macros use for debugging the driver.
34*/
35
36#define DEBUG(x) do { if (ql2xextended_error_logging) { x; } } while (0)
37
38#if defined(QL_DEBUG_LEVEL_1)
39#define DEBUG1(x) do {x;} while (0)
40#else
41#define DEBUG1(x) do {} while (0)
42#endif
43
44#define DEBUG2(x) do { if (ql2xextended_error_logging) { x; } } while (0)
45#define DEBUG2_3(x) do { if (ql2xextended_error_logging) { x; } } while (0)
46#define DEBUG2_3_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
47#define DEBUG2_9_10(x) do { if (ql2xextended_error_logging) { x; } } while (0)
48#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
49#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0)
50#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0)
51#define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0)
52
53#if defined(QL_DEBUG_LEVEL_3)
54#define DEBUG3(x) do {x;} while (0)
55#define DEBUG3_11(x) do {x;} while (0)
56#else
57#define DEBUG3(x) do {} while (0)
58#endif
59
60#if defined(QL_DEBUG_LEVEL_4)
61#define DEBUG4(x) do {x;} while (0)
62#else
63#define DEBUG4(x) do {} while (0)
64#endif
65
66#if defined(QL_DEBUG_LEVEL_5)
67#define DEBUG5(x) do {x;} while (0)
68#else
69#define DEBUG5(x) do {} while (0)
70#endif
71
72#if defined(QL_DEBUG_LEVEL_7)
73#define DEBUG7(x) do {x;} while (0)
74#else
75#define DEBUG7(x) do {} while (0)
76#endif
77
78#if defined(QL_DEBUG_LEVEL_9)
79#define DEBUG9(x) do {x;} while (0)
80#define DEBUG9_10(x) do {x;} while (0)
81#else
82#define DEBUG9(x) do {} while (0)
83#endif
84
85#if defined(QL_DEBUG_LEVEL_10)
86#define DEBUG10(x) do {x;} while (0)
87#define DEBUG9_10(x) do {x;} while (0)
88#else
89#define DEBUG10(x) do {} while (0)
90 #if !defined(DEBUG9_10)
91 #define DEBUG9_10(x) do {} while (0)
92 #endif
93#endif
94
95#if defined(QL_DEBUG_LEVEL_11)
96#define DEBUG11(x) do{x;} while(0)
97#if !defined(DEBUG3_11)
98#define DEBUG3_11(x) do{x;} while(0)
99#endif
100#else
101#define DEBUG11(x) do{} while(0)
102 #if !defined(QL_DEBUG_LEVEL_3)
103 #define DEBUG3_11(x) do{} while(0)
104 #endif
105#endif
106
107#if defined(QL_DEBUG_LEVEL_12)
108#define DEBUG12(x) do {x;} while (0)
109#else
110#define DEBUG12(x) do {} while (0)
111#endif
112
113#if defined(QL_DEBUG_LEVEL_13)
114#define DEBUG13(x) do {x;} while (0)
115#else
116#define DEBUG13(x) do {} while (0)
117#endif
118
119#if defined(QL_DEBUG_LEVEL_14)
120#define DEBUG14(x) do {x;} while (0)
121#else
122#define DEBUG14(x) do {} while (0)
123#endif
124
125#if defined(QL_DEBUG_LEVEL_15)
126#define DEBUG15(x) do {x;} while (0)
127#else
128#define DEBUG15(x) do {} while (0)
129#endif
130
131#if defined(QL_DEBUG_LEVEL_16)
132#define DEBUG16(x) do {x;} while (0)
133#else
134#define DEBUG16(x) do {} while (0)
135#endif
136
137#if defined(QL_DEBUG_LEVEL_17)
138#define DEBUG17(x) do {x;} while (0)
139#else
140#define DEBUG17(x) do {} while (0)
141#endif
142
143#if defined(QL_DEBUG_LEVEL_18)
144#define DEBUG18(x) do {if (ql2xextended_error_logging) x; } while (0)
145#else
146#define DEBUG18(x) do {} while (0)
147#endif
148
149
150/*
151 * Firmware Dump structure definition 11 * Firmware Dump structure definition
152 */ 12 */
153 13
@@ -370,3 +230,50 @@ struct qla2xxx_fw_dump {
370 struct qla81xx_fw_dump isp81; 230 struct qla81xx_fw_dump isp81;
371 } isp; 231 } isp;
372}; 232};
233
234#define QL_MSGHDR "qla2xxx"
235
236#define ql_log_fatal 0 /* display fatal errors */
237#define ql_log_warn 1 /* display critical errors */
238#define ql_log_info 2 /* display all recovered errors */
239#define ql_log_all 3 /* This value is only used by ql_errlev.
240 * No messages will use this value.
241 * This should be always highest value
242 * as compared to other log levels.
243 */
244
245extern int ql_errlev;
246
247void
248ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
249void
250ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
251
252void
253ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
254void
255ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
256
257/* Debug Levels */
258/* The 0x40000000 is the max value any debug level can have
259 * as ql2xextended_error_logging is of type signed int
260 */
261#define ql_dbg_init 0x40000000 /* Init Debug */
262#define ql_dbg_mbx 0x20000000 /* MBX Debug */
263#define ql_dbg_disc 0x10000000 /* Device Discovery Debug */
264#define ql_dbg_io 0x08000000 /* IO Tracing Debug */
265#define ql_dbg_dpc 0x04000000 /* DPC Thead Debug */
266#define ql_dbg_async 0x02000000 /* Async events Debug */
267#define ql_dbg_timer 0x01000000 /* Timer Debug */
268#define ql_dbg_user 0x00800000 /* User Space Interations Debug */
269#define ql_dbg_taskm 0x00400000 /* Task Management Debug */
270#define ql_dbg_aer 0x00200000 /* AER/EEH Debug */
271#define ql_dbg_multiq 0x00100000 /* MultiQ Debug */
272#define ql_dbg_p3p 0x00080000 /* P3P specific Debug */
273#define ql_dbg_vport 0x00040000 /* Virtual Port Debug */
274#define ql_dbg_buffer 0x00020000 /* For dumping the buffer/regs */
275#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
276 * not covered by upper categories
277 */
278
279#define QL_DBG_BUF_LEN 512
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index a5a4e1275bf2..0b4c2b794c6f 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -64,7 +64,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
64 /* Pause tracing to flush FCE buffers. */ 64 /* Pause tracing to flush FCE buffers. */
65 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd); 65 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
66 if (rval) 66 if (rval)
67 qla_printk(KERN_WARNING, ha, 67 ql_dbg(ql_dbg_user, vha, 0x705c,
68 "DebugFS: Unable to disable FCE (%d).\n", rval); 68 "DebugFS: Unable to disable FCE (%d).\n", rval);
69 69
70 ha->flags.fce_enabled = 0; 70 ha->flags.fce_enabled = 0;
@@ -92,7 +92,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
92 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs, 92 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
93 ha->fce_mb, &ha->fce_bufs); 93 ha->fce_mb, &ha->fce_bufs);
94 if (rval) { 94 if (rval) {
95 qla_printk(KERN_WARNING, ha, 95 ql_dbg(ql_dbg_user, vha, 0x700d,
96 "DebugFS: Unable to reinitialize FCE (%d).\n", rval); 96 "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
97 ha->flags.fce_enabled = 0; 97 ha->flags.fce_enabled = 0;
98 } 98 }
@@ -125,8 +125,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
125 atomic_set(&qla2x00_dfs_root_count, 0); 125 atomic_set(&qla2x00_dfs_root_count, 0);
126 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL); 126 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
127 if (!qla2x00_dfs_root) { 127 if (!qla2x00_dfs_root) {
128 qla_printk(KERN_NOTICE, ha, 128 ql_log(ql_log_warn, vha, 0x00f7,
129 "DebugFS: Unable to create root directory.\n"); 129 "Unable to create debugfs root directory.\n");
130 goto out; 130 goto out;
131 } 131 }
132 132
@@ -137,8 +137,8 @@ create_dir:
137 mutex_init(&ha->fce_mutex); 137 mutex_init(&ha->fce_mutex);
138 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root); 138 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
139 if (!ha->dfs_dir) { 139 if (!ha->dfs_dir) {
140 qla_printk(KERN_NOTICE, ha, 140 ql_log(ql_log_warn, vha, 0x00f8,
141 "DebugFS: Unable to create ha directory.\n"); 141 "Unable to create debugfs ha directory.\n");
142 goto out; 142 goto out;
143 } 143 }
144 144
@@ -148,8 +148,8 @@ create_nodes:
148 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, 148 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
149 &dfs_fce_ops); 149 &dfs_fce_ops);
150 if (!ha->dfs_fce) { 150 if (!ha->dfs_fce) {
151 qla_printk(KERN_NOTICE, ha, 151 ql_log(ql_log_warn, vha, 0x00f9,
152 "DebugFS: Unable to fce node.\n"); 152 "Unable to create debugfs fce node.\n");
153 goto out; 153 goto out;
154 } 154 }
155out: 155out:
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b381224ae4b..29b1a3e28231 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -185,7 +185,7 @@ extern int qla24xx_start_scsi(srb_t *sp);
185int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, 185int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
186 uint16_t, uint16_t, uint8_t); 186 uint16_t, uint16_t, uint8_t);
187extern int qla2x00_start_sp(srb_t *); 187extern int qla2x00_start_sp(srb_t *);
188extern uint16_t qla24xx_calc_iocbs(uint16_t); 188extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); 189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
190extern int qla24xx_dif_start_scsi(srb_t *); 190extern int qla24xx_dif_start_scsi(srb_t *);
191 191
@@ -439,6 +439,9 @@ extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
439extern void qla2x00_dump_regs(scsi_qla_host_t *); 439extern void qla2x00_dump_regs(scsi_qla_host_t *);
440extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 440extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
441extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); 441extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
442extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
443extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
444 uint8_t *, uint32_t);
442 445
443/* 446/*
444 * Global Function Prototypes in qla_gs.c source file. 447 * Global Function Prototypes in qla_gs.c source file.
@@ -478,7 +481,8 @@ extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16
478extern int qla2x00_echo_test(scsi_qla_host_t *, 481extern int qla2x00_echo_test(scsi_qla_host_t *,
479 struct msg_echo_lb *, uint16_t *); 482 struct msg_echo_lb *, uint16_t *);
480extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *); 483extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
481extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t); 484extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
485 struct qla_fcp_prio_cfg *, uint8_t);
482 486
483/* 487/*
484 * Global Function Prototypes in qla_dfs.c source file. 488 * Global Function Prototypes in qla_dfs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 8cd9066ad906..37937aa3c3b8 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -121,11 +121,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
121 121
122 rval = QLA_FUNCTION_FAILED; 122 rval = QLA_FUNCTION_FAILED;
123 if (ms_pkt->entry_status != 0) { 123 if (ms_pkt->entry_status != 0) {
124 DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status " 124 ql_dbg(ql_dbg_disc, vha, 0x2031,
125 "(%x) on port_id: %02x%02x%02x.\n", 125 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
126 vha->host_no, routine, ms_pkt->entry_status, 126 routine, ms_pkt->entry_status, vha->d_id.b.domain,
127 vha->d_id.b.domain, vha->d_id.b.area, 127 vha->d_id.b.area, vha->d_id.b.al_pa);
128 vha->d_id.b.al_pa));
129 } else { 128 } else {
130 if (IS_FWI2_CAPABLE(ha)) 129 if (IS_FWI2_CAPABLE(ha))
131 comp_status = le16_to_cpu( 130 comp_status = le16_to_cpu(
@@ -138,24 +137,24 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
138 case CS_DATA_OVERRUN: /* Overrun? */ 137 case CS_DATA_OVERRUN: /* Overrun? */
139 if (ct_rsp->header.response != 138 if (ct_rsp->header.response !=
140 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { 139 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
141 DEBUG2_3(printk("scsi(%ld): %s failed, " 140 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
142 "rejected request on port_id: %02x%02x%02x\n", 141 "%s failed rejected request on port_id: "
143 vha->host_no, routine, 142 "%02x%02x%02x.\n", routine,
144 vha->d_id.b.domain, vha->d_id.b.area, 143 vha->d_id.b.domain, vha->d_id.b.area,
145 vha->d_id.b.al_pa)); 144 vha->d_id.b.al_pa);
146 DEBUG2_3(qla2x00_dump_buffer( 145 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
147 (uint8_t *)&ct_rsp->header, 146 0x2078, (uint8_t *)&ct_rsp->header,
148 sizeof(struct ct_rsp_hdr))); 147 sizeof(struct ct_rsp_hdr));
149 rval = QLA_INVALID_COMMAND; 148 rval = QLA_INVALID_COMMAND;
150 } else 149 } else
151 rval = QLA_SUCCESS; 150 rval = QLA_SUCCESS;
152 break; 151 break;
153 default: 152 default:
154 DEBUG2_3(printk("scsi(%ld): %s failed, completion " 153 ql_dbg(ql_dbg_disc, vha, 0x2033,
155 "status (%x) on port_id: %02x%02x%02x.\n", 154 "%s failed, completion status (%x) on port_id: "
156 vha->host_no, routine, comp_status, 155 "%02x%02x%02x.\n", routine, comp_status,
157 vha->d_id.b.domain, vha->d_id.b.area, 156 vha->d_id.b.domain, vha->d_id.b.area,
158 vha->d_id.b.al_pa)); 157 vha->d_id.b.al_pa);
159 break; 158 break;
160 } 159 }
161 } 160 }
@@ -202,8 +201,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
202 sizeof(ms_iocb_entry_t)); 201 sizeof(ms_iocb_entry_t));
203 if (rval != QLA_SUCCESS) { 202 if (rval != QLA_SUCCESS) {
204 /*EMPTY*/ 203 /*EMPTY*/
205 DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n", 204 ql_dbg(ql_dbg_disc, vha, 0x2062,
206 vha->host_no, rval)); 205 "GA_NXT issue IOCB failed (%d).\n", rval);
207 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") != 206 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
208 QLA_SUCCESS) { 207 QLA_SUCCESS) {
209 rval = QLA_FUNCTION_FAILED; 208 rval = QLA_FUNCTION_FAILED;
@@ -222,11 +221,10 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
222 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) 221 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
223 fcport->d_id.b.domain = 0xf0; 222 fcport->d_id.b.domain = 0xf0;
224 223
225 DEBUG2_3(printk("scsi(%ld): GA_NXT entry - " 224 ql_dbg(ql_dbg_disc, vha, 0x2063,
226 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 225 "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
227 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 226 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
228 "portid=%02x%02x%02x.\n", 227 "port_id=%02x%02x%02x.\n",
229 vha->host_no,
230 fcport->node_name[0], fcport->node_name[1], 228 fcport->node_name[0], fcport->node_name[1],
231 fcport->node_name[2], fcport->node_name[3], 229 fcport->node_name[2], fcport->node_name[3],
232 fcport->node_name[4], fcport->node_name[5], 230 fcport->node_name[4], fcport->node_name[5],
@@ -236,7 +234,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
236 fcport->port_name[4], fcport->port_name[5], 234 fcport->port_name[4], fcport->port_name[5],
237 fcport->port_name[6], fcport->port_name[7], 235 fcport->port_name[6], fcport->port_name[7],
238 fcport->d_id.b.domain, fcport->d_id.b.area, 236 fcport->d_id.b.domain, fcport->d_id.b.area,
239 fcport->d_id.b.al_pa)); 237 fcport->d_id.b.al_pa);
240 } 238 }
241 239
242 return (rval); 240 return (rval);
@@ -287,8 +285,8 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
287 sizeof(ms_iocb_entry_t)); 285 sizeof(ms_iocb_entry_t));
288 if (rval != QLA_SUCCESS) { 286 if (rval != QLA_SUCCESS) {
289 /*EMPTY*/ 287 /*EMPTY*/
290 DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n", 288 ql_dbg(ql_dbg_disc, vha, 0x2055,
291 vha->host_no, rval)); 289 "GID_PT issue IOCB failed (%d).\n", rval);
292 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != 290 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
293 QLA_SUCCESS) { 291 QLA_SUCCESS) {
294 rval = QLA_FUNCTION_FAILED; 292 rval = QLA_FUNCTION_FAILED;
@@ -364,8 +362,8 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
364 sizeof(ms_iocb_entry_t)); 362 sizeof(ms_iocb_entry_t));
365 if (rval != QLA_SUCCESS) { 363 if (rval != QLA_SUCCESS) {
366 /*EMPTY*/ 364 /*EMPTY*/
367 DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed " 365 ql_dbg(ql_dbg_disc, vha, 0x2056,
368 "(%d).\n", vha->host_no, rval)); 366 "GPN_ID issue IOCB failed (%d).\n", rval);
369 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 367 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
370 "GPN_ID") != QLA_SUCCESS) { 368 "GPN_ID") != QLA_SUCCESS) {
371 rval = QLA_FUNCTION_FAILED; 369 rval = QLA_FUNCTION_FAILED;
@@ -424,8 +422,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
424 sizeof(ms_iocb_entry_t)); 422 sizeof(ms_iocb_entry_t));
425 if (rval != QLA_SUCCESS) { 423 if (rval != QLA_SUCCESS) {
426 /*EMPTY*/ 424 /*EMPTY*/
427 DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed " 425 ql_dbg(ql_dbg_disc, vha, 0x2057,
428 "(%d).\n", vha->host_no, rval)); 426 "GNN_ID issue IOCB failed (%d).\n", rval);
429 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 427 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
430 "GNN_ID") != QLA_SUCCESS) { 428 "GNN_ID") != QLA_SUCCESS) {
431 rval = QLA_FUNCTION_FAILED; 429 rval = QLA_FUNCTION_FAILED;
@@ -434,11 +432,10 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
434 memcpy(list[i].node_name, 432 memcpy(list[i].node_name,
435 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); 433 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
436 434
437 DEBUG2_3(printk("scsi(%ld): GID_PT entry - " 435 ql_dbg(ql_dbg_disc, vha, 0x2058,
438 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 436 "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x "
439 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 437 "pn %02x%02x%02x%02x%02x%02x%02X%02x "
440 "portid=%02x%02x%02x.\n", 438 "portid=%02x%02x%02x.\n",
441 vha->host_no,
442 list[i].node_name[0], list[i].node_name[1], 439 list[i].node_name[0], list[i].node_name[1],
443 list[i].node_name[2], list[i].node_name[3], 440 list[i].node_name[2], list[i].node_name[3],
444 list[i].node_name[4], list[i].node_name[5], 441 list[i].node_name[4], list[i].node_name[5],
@@ -448,7 +445,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
448 list[i].port_name[4], list[i].port_name[5], 445 list[i].port_name[4], list[i].port_name[5],
449 list[i].port_name[6], list[i].port_name[7], 446 list[i].port_name[6], list[i].port_name[7],
450 list[i].d_id.b.domain, list[i].d_id.b.area, 447 list[i].d_id.b.domain, list[i].d_id.b.area,
451 list[i].d_id.b.al_pa)); 448 list[i].d_id.b.al_pa);
452 } 449 }
453 450
454 /* Last device exit. */ 451 /* Last device exit. */
@@ -499,14 +496,14 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
499 sizeof(ms_iocb_entry_t)); 496 sizeof(ms_iocb_entry_t));
500 if (rval != QLA_SUCCESS) { 497 if (rval != QLA_SUCCESS) {
501 /*EMPTY*/ 498 /*EMPTY*/
502 DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n", 499 ql_dbg(ql_dbg_disc, vha, 0x2043,
503 vha->host_no, rval)); 500 "RFT_ID issue IOCB failed (%d).\n", rval);
504 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") != 501 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
505 QLA_SUCCESS) { 502 QLA_SUCCESS) {
506 rval = QLA_FUNCTION_FAILED; 503 rval = QLA_FUNCTION_FAILED;
507 } else { 504 } else {
508 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 505 ql_dbg(ql_dbg_disc, vha, 0x2044,
509 vha->host_no)); 506 "RFT_ID exiting normally.\n");
510 } 507 }
511 508
512 return (rval); 509 return (rval);
@@ -528,8 +525,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
528 struct ct_sns_rsp *ct_rsp; 525 struct ct_sns_rsp *ct_rsp;
529 526
530 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 527 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
531 DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on " 528 ql_dbg(ql_dbg_disc, vha, 0x2046,
532 "ISP2100/ISP2200.\n", vha->host_no)); 529 "RFF_ID call not supported on ISP2100/ISP2200.\n");
533 return (QLA_SUCCESS); 530 return (QLA_SUCCESS);
534 } 531 }
535 532
@@ -556,14 +553,14 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
556 sizeof(ms_iocb_entry_t)); 553 sizeof(ms_iocb_entry_t));
557 if (rval != QLA_SUCCESS) { 554 if (rval != QLA_SUCCESS) {
558 /*EMPTY*/ 555 /*EMPTY*/
559 DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n", 556 ql_dbg(ql_dbg_disc, vha, 0x2047,
560 vha->host_no, rval)); 557 "RFF_ID issue IOCB failed (%d).\n", rval);
561 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") != 558 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
562 QLA_SUCCESS) { 559 QLA_SUCCESS) {
563 rval = QLA_FUNCTION_FAILED; 560 rval = QLA_FUNCTION_FAILED;
564 } else { 561 } else {
565 DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n", 562 ql_dbg(ql_dbg_disc, vha, 0x2048,
566 vha->host_no)); 563 "RFF_ID exiting normally.\n");
567 } 564 }
568 565
569 return (rval); 566 return (rval);
@@ -609,14 +606,14 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
609 sizeof(ms_iocb_entry_t)); 606 sizeof(ms_iocb_entry_t));
610 if (rval != QLA_SUCCESS) { 607 if (rval != QLA_SUCCESS) {
611 /*EMPTY*/ 608 /*EMPTY*/
612 DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n", 609 ql_dbg(ql_dbg_disc, vha, 0x204d,
613 vha->host_no, rval)); 610 "RNN_ID issue IOCB failed (%d).\n", rval);
614 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") != 611 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
615 QLA_SUCCESS) { 612 QLA_SUCCESS) {
616 rval = QLA_FUNCTION_FAILED; 613 rval = QLA_FUNCTION_FAILED;
617 } else { 614 } else {
618 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 615 ql_dbg(ql_dbg_disc, vha, 0x204e,
619 vha->host_no)); 616 "RNN_ID exiting normally.\n");
620 } 617 }
621 618
622 return (rval); 619 return (rval);
@@ -647,8 +644,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
647 struct ct_sns_rsp *ct_rsp; 644 struct ct_sns_rsp *ct_rsp;
648 645
649 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 646 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
650 DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on " 647 ql_dbg(ql_dbg_disc, vha, 0x2050,
651 "ISP2100/ISP2200.\n", vha->host_no)); 648 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
652 return (QLA_SUCCESS); 649 return (QLA_SUCCESS);
653 } 650 }
654 651
@@ -682,14 +679,14 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
682 sizeof(ms_iocb_entry_t)); 679 sizeof(ms_iocb_entry_t));
683 if (rval != QLA_SUCCESS) { 680 if (rval != QLA_SUCCESS) {
684 /*EMPTY*/ 681 /*EMPTY*/
685 DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n", 682 ql_dbg(ql_dbg_disc, vha, 0x2051,
686 vha->host_no, rval)); 683 "RSNN_NN issue IOCB failed (%d).\n", rval);
687 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") != 684 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
688 QLA_SUCCESS) { 685 QLA_SUCCESS) {
689 rval = QLA_FUNCTION_FAILED; 686 rval = QLA_FUNCTION_FAILED;
690 } else { 687 } else {
691 DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n", 688 ql_dbg(ql_dbg_disc, vha, 0x2052,
692 vha->host_no)); 689 "RSNN_NN exiting normally.\n");
693 } 690 }
694 691
695 return (rval); 692 return (rval);
@@ -757,13 +754,14 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
757 sizeof(struct sns_cmd_pkt)); 754 sizeof(struct sns_cmd_pkt));
758 if (rval != QLA_SUCCESS) { 755 if (rval != QLA_SUCCESS) {
759 /*EMPTY*/ 756 /*EMPTY*/
760 DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n", 757 ql_dbg(ql_dbg_disc, vha, 0x205f,
761 vha->host_no, rval)); 758 "GA_NXT Send SNS failed (%d).\n", rval);
762 } else if (sns_cmd->p.gan_data[8] != 0x80 || 759 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
763 sns_cmd->p.gan_data[9] != 0x02) { 760 sns_cmd->p.gan_data[9] != 0x02) {
764 DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, " 761 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
765 "ga_nxt_rsp:\n", vha->host_no)); 762 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
766 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16)); 763 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
764 sns_cmd->p.gan_data, 16);
767 rval = QLA_FUNCTION_FAILED; 765 rval = QLA_FUNCTION_FAILED;
768 } else { 766 } else {
769 /* Populate fc_port_t entry. */ 767 /* Populate fc_port_t entry. */
@@ -778,11 +776,10 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
778 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE) 776 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
779 fcport->d_id.b.domain = 0xf0; 777 fcport->d_id.b.domain = 0xf0;
780 778
781 DEBUG2_3(printk("scsi(%ld): GA_NXT entry - " 779 ql_dbg(ql_dbg_disc, vha, 0x2061,
782 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 780 "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
783 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 781 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
784 "portid=%02x%02x%02x.\n", 782 "port_id=%02x%02x%02x.\n",
785 vha->host_no,
786 fcport->node_name[0], fcport->node_name[1], 783 fcport->node_name[0], fcport->node_name[1],
787 fcport->node_name[2], fcport->node_name[3], 784 fcport->node_name[2], fcport->node_name[3],
788 fcport->node_name[4], fcport->node_name[5], 785 fcport->node_name[4], fcport->node_name[5],
@@ -792,7 +789,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
792 fcport->port_name[4], fcport->port_name[5], 789 fcport->port_name[4], fcport->port_name[5],
793 fcport->port_name[6], fcport->port_name[7], 790 fcport->port_name[6], fcport->port_name[7],
794 fcport->d_id.b.domain, fcport->d_id.b.area, 791 fcport->d_id.b.domain, fcport->d_id.b.area,
795 fcport->d_id.b.al_pa)); 792 fcport->d_id.b.al_pa);
796 } 793 }
797 794
798 return (rval); 795 return (rval);
@@ -831,13 +828,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
831 sizeof(struct sns_cmd_pkt)); 828 sizeof(struct sns_cmd_pkt));
832 if (rval != QLA_SUCCESS) { 829 if (rval != QLA_SUCCESS) {
833 /*EMPTY*/ 830 /*EMPTY*/
834 DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n", 831 ql_dbg(ql_dbg_disc, vha, 0x206d,
835 vha->host_no, rval)); 832 "GID_PT Send SNS failed (%d).\n", rval);
836 } else if (sns_cmd->p.gid_data[8] != 0x80 || 833 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
837 sns_cmd->p.gid_data[9] != 0x02) { 834 sns_cmd->p.gid_data[9] != 0x02) {
838 DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, " 835 ql_dbg(ql_dbg_disc, vha, 0x202f,
839 "gid_rsp:\n", vha->host_no)); 836 "GID_PT failed, rejected request, gid_rsp:\n");
840 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16)); 837 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
838 sns_cmd->p.gid_data, 16);
841 rval = QLA_FUNCTION_FAILED; 839 rval = QLA_FUNCTION_FAILED;
842 } else { 840 } else {
843 /* Set port IDs in switch info list. */ 841 /* Set port IDs in switch info list. */
@@ -900,13 +898,14 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
900 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 898 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
901 if (rval != QLA_SUCCESS) { 899 if (rval != QLA_SUCCESS) {
902 /*EMPTY*/ 900 /*EMPTY*/
903 DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed " 901 ql_dbg(ql_dbg_disc, vha, 0x2032,
904 "(%d).\n", vha->host_no, rval)); 902 "GPN_ID Send SNS failed (%d).\n", rval);
905 } else if (sns_cmd->p.gpn_data[8] != 0x80 || 903 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
906 sns_cmd->p.gpn_data[9] != 0x02) { 904 sns_cmd->p.gpn_data[9] != 0x02) {
907 DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected " 905 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
908 "request, gpn_rsp:\n", vha->host_no)); 906 "GPN_ID failed, rejected request, gpn_rsp:\n");
909 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16)); 907 ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
908 sns_cmd->p.gpn_data, 16);
910 rval = QLA_FUNCTION_FAILED; 909 rval = QLA_FUNCTION_FAILED;
911 } else { 910 } else {
912 /* Save portname */ 911 /* Save portname */
@@ -955,24 +954,24 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
955 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 954 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
956 if (rval != QLA_SUCCESS) { 955 if (rval != QLA_SUCCESS) {
957 /*EMPTY*/ 956 /*EMPTY*/
958 DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed " 957 ql_dbg(ql_dbg_disc, vha, 0x203f,
959 "(%d).\n", vha->host_no, rval)); 958 "GNN_ID Send SNS failed (%d).\n", rval);
960 } else if (sns_cmd->p.gnn_data[8] != 0x80 || 959 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
961 sns_cmd->p.gnn_data[9] != 0x02) { 960 sns_cmd->p.gnn_data[9] != 0x02) {
962 DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected " 961 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
963 "request, gnn_rsp:\n", vha->host_no)); 962 "GNN_ID failed, rejected request, gnn_rsp:\n");
964 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16)); 963 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
964 sns_cmd->p.gnn_data, 16);
965 rval = QLA_FUNCTION_FAILED; 965 rval = QLA_FUNCTION_FAILED;
966 } else { 966 } else {
967 /* Save nodename */ 967 /* Save nodename */
968 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16], 968 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
969 WWN_SIZE); 969 WWN_SIZE);
970 970
971 DEBUG2_3(printk("scsi(%ld): GID_PT entry - " 971 ql_dbg(ql_dbg_disc, vha, 0x206e,
972 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 972 "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
973 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 973 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
974 "portid=%02x%02x%02x.\n", 974 "port_id=%02x%02x%02x.\n",
975 vha->host_no,
976 list[i].node_name[0], list[i].node_name[1], 975 list[i].node_name[0], list[i].node_name[1],
977 list[i].node_name[2], list[i].node_name[3], 976 list[i].node_name[2], list[i].node_name[3],
978 list[i].node_name[4], list[i].node_name[5], 977 list[i].node_name[4], list[i].node_name[5],
@@ -982,7 +981,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
982 list[i].port_name[4], list[i].port_name[5], 981 list[i].port_name[4], list[i].port_name[5],
983 list[i].port_name[6], list[i].port_name[7], 982 list[i].port_name[6], list[i].port_name[7],
984 list[i].d_id.b.domain, list[i].d_id.b.area, 983 list[i].d_id.b.domain, list[i].d_id.b.area,
985 list[i].d_id.b.al_pa)); 984 list[i].d_id.b.al_pa);
986 } 985 }
987 986
988 /* Last device exit. */ 987 /* Last device exit. */
@@ -1025,17 +1024,18 @@ qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1025 sizeof(struct sns_cmd_pkt)); 1024 sizeof(struct sns_cmd_pkt));
1026 if (rval != QLA_SUCCESS) { 1025 if (rval != QLA_SUCCESS) {
1027 /*EMPTY*/ 1026 /*EMPTY*/
1028 DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n", 1027 ql_dbg(ql_dbg_disc, vha, 0x2060,
1029 vha->host_no, rval)); 1028 "RFT_ID Send SNS failed (%d).\n", rval);
1030 } else if (sns_cmd->p.rft_data[8] != 0x80 || 1029 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1031 sns_cmd->p.rft_data[9] != 0x02) { 1030 sns_cmd->p.rft_data[9] != 0x02) {
1032 DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, " 1031 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1033 "rft_rsp:\n", vha->host_no)); 1032 "RFT_ID failed, rejected request rft_rsp:\n");
1034 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16)); 1033 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1034 sns_cmd->p.rft_data, 16);
1035 rval = QLA_FUNCTION_FAILED; 1035 rval = QLA_FUNCTION_FAILED;
1036 } else { 1036 } else {
1037 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 1037 ql_dbg(ql_dbg_disc, vha, 0x2073,
1038 vha->host_no)); 1038 "RFT_ID exiting normally.\n");
1039 } 1039 }
1040 1040
1041 return (rval); 1041 return (rval);
@@ -1081,17 +1081,18 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1081 sizeof(struct sns_cmd_pkt)); 1081 sizeof(struct sns_cmd_pkt));
1082 if (rval != QLA_SUCCESS) { 1082 if (rval != QLA_SUCCESS) {
1083 /*EMPTY*/ 1083 /*EMPTY*/
1084 DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n", 1084 ql_dbg(ql_dbg_disc, vha, 0x204a,
1085 vha->host_no, rval)); 1085 "RNN_ID Send SNS failed (%d).\n", rval);
1086 } else if (sns_cmd->p.rnn_data[8] != 0x80 || 1086 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1087 sns_cmd->p.rnn_data[9] != 0x02) { 1087 sns_cmd->p.rnn_data[9] != 0x02) {
1088 DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, " 1088 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1089 "rnn_rsp:\n", vha->host_no)); 1089 "RNN_ID failed, rejected request, rnn_rsp:\n");
1090 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16)); 1090 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1091 sns_cmd->p.rnn_data, 16);
1091 rval = QLA_FUNCTION_FAILED; 1092 rval = QLA_FUNCTION_FAILED;
1092 } else { 1093 } else {
1093 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 1094 ql_dbg(ql_dbg_disc, vha, 0x204c,
1094 vha->host_no)); 1095 "RNN_ID exiting normally.\n");
1095 } 1096 }
1096 1097
1097 return (rval); 1098 return (rval);
@@ -1116,10 +1117,10 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1116 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1117 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1117 mb, BIT_1|BIT_0); 1118 mb, BIT_1|BIT_0);
1118 if (mb[0] != MBS_COMMAND_COMPLETE) { 1119 if (mb[0] != MBS_COMMAND_COMPLETE) {
1119 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1120 ql_dbg(ql_dbg_disc, vha, 0x2024,
1120 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1121 "Failed management_server login: loopid=%x mb[0]=%x "
1121 __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1], 1122 "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1122 mb[2], mb[6], mb[7])); 1123 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]);
1123 ret = QLA_FUNCTION_FAILED; 1124 ret = QLA_FUNCTION_FAILED;
1124 } else 1125 } else
1125 vha->flags.management_server_logged_in = 1; 1126 vha->flags.management_server_logged_in = 1;
@@ -1292,11 +1293,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1292 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE); 1293 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1293 size += 4 + WWN_SIZE; 1294 size += 4 + WWN_SIZE;
1294 1295
1295 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n", 1296 ql_dbg(ql_dbg_disc, vha, 0x2025,
1296 __func__, vha->host_no, 1297 "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
1297 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2], 1298 eiter->a.node_name[0], eiter->a.node_name[1],
1298 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5], 1299 eiter->a.node_name[2], eiter->a.node_name[3],
1299 eiter->a.node_name[6], eiter->a.node_name[7])); 1300 eiter->a.node_name[4], eiter->a.node_name[5],
1301 eiter->a.node_name[6], eiter->a.node_name[7]);
1300 1302
1301 /* Manufacturer. */ 1303 /* Manufacturer. */
1302 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1304 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1307,8 +1309,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1307 eiter->len = cpu_to_be16(4 + alen); 1309 eiter->len = cpu_to_be16(4 + alen);
1308 size += 4 + alen; 1310 size += 4 + alen;
1309 1311
1310 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no, 1312 ql_dbg(ql_dbg_disc, vha, 0x2026,
1311 eiter->a.manufacturer)); 1313 "Manufacturer = %s.\n", eiter->a.manufacturer);
1312 1314
1313 /* Serial number. */ 1315 /* Serial number. */
1314 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1316 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1320,8 +1322,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1320 eiter->len = cpu_to_be16(4 + alen); 1322 eiter->len = cpu_to_be16(4 + alen);
1321 size += 4 + alen; 1323 size += 4 + alen;
1322 1324
1323 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no, 1325 ql_dbg(ql_dbg_disc, vha, 0x2027,
1324 eiter->a.serial_num)); 1326 "Serial no. = %s.\n", eiter->a.serial_num);
1325 1327
1326 /* Model name. */ 1328 /* Model name. */
1327 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1329 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1332,8 +1334,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1332 eiter->len = cpu_to_be16(4 + alen); 1334 eiter->len = cpu_to_be16(4 + alen);
1333 size += 4 + alen; 1335 size += 4 + alen;
1334 1336
1335 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no, 1337 ql_dbg(ql_dbg_disc, vha, 0x2028,
1336 eiter->a.model)); 1338 "Model Name = %s.\n", eiter->a.model);
1337 1339
1338 /* Model description. */ 1340 /* Model description. */
1339 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1341 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1345,8 +1347,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1345 eiter->len = cpu_to_be16(4 + alen); 1347 eiter->len = cpu_to_be16(4 + alen);
1346 size += 4 + alen; 1348 size += 4 + alen;
1347 1349
1348 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no, 1350 ql_dbg(ql_dbg_disc, vha, 0x2029,
1349 eiter->a.model_desc)); 1351 "Model Desc = %s.\n", eiter->a.model_desc);
1350 1352
1351 /* Hardware version. */ 1353 /* Hardware version. */
1352 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1354 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1357,8 +1359,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1357 eiter->len = cpu_to_be16(4 + alen); 1359 eiter->len = cpu_to_be16(4 + alen);
1358 size += 4 + alen; 1360 size += 4 + alen;
1359 1361
1360 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no, 1362 ql_dbg(ql_dbg_disc, vha, 0x202a,
1361 eiter->a.hw_version)); 1363 "Hardware ver = %s.\n", eiter->a.hw_version);
1362 1364
1363 /* Driver version. */ 1365 /* Driver version. */
1364 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1366 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1369,8 +1371,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1369 eiter->len = cpu_to_be16(4 + alen); 1371 eiter->len = cpu_to_be16(4 + alen);
1370 size += 4 + alen; 1372 size += 4 + alen;
1371 1373
1372 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no, 1374 ql_dbg(ql_dbg_disc, vha, 0x202b,
1373 eiter->a.driver_version)); 1375 "Driver ver = %s.\n", eiter->a.driver_version);
1374 1376
1375 /* Option ROM version. */ 1377 /* Option ROM version. */
1376 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1378 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1381,8 +1383,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1381 eiter->len = cpu_to_be16(4 + alen); 1383 eiter->len = cpu_to_be16(4 + alen);
1382 size += 4 + alen; 1384 size += 4 + alen;
1383 1385
1384 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no, 1386 ql_dbg(ql_dbg_disc, vha , 0x202c,
1385 eiter->a.orom_version)); 1387 "Optrom vers = %s.\n", eiter->a.orom_version);
1386 1388
1387 /* Firmware version */ 1389 /* Firmware version */
1388 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1390 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1393,44 +1395,46 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1393 eiter->len = cpu_to_be16(4 + alen); 1395 eiter->len = cpu_to_be16(4 + alen);
1394 size += 4 + alen; 1396 size += 4 + alen;
1395 1397
1396 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no, 1398 ql_dbg(ql_dbg_disc, vha, 0x202d,
1397 eiter->a.fw_version)); 1399 "Firmware vers = %s.\n", eiter->a.fw_version);
1398 1400
1399 /* Update MS request size. */ 1401 /* Update MS request size. */
1400 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 1402 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1401 1403
1402 DEBUG13(printk("%s(%ld): RHBA identifier=" 1404 ql_dbg(ql_dbg_disc, vha, 0x202e,
1403 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1405 "RHBA identifier = "
1404 vha->host_no, ct_req->req.rhba.hba_identifier[0], 1406 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n",
1407 ct_req->req.rhba.hba_identifier[0],
1405 ct_req->req.rhba.hba_identifier[1], 1408 ct_req->req.rhba.hba_identifier[1],
1406 ct_req->req.rhba.hba_identifier[2], 1409 ct_req->req.rhba.hba_identifier[2],
1407 ct_req->req.rhba.hba_identifier[3], 1410 ct_req->req.rhba.hba_identifier[3],
1408 ct_req->req.rhba.hba_identifier[4], 1411 ct_req->req.rhba.hba_identifier[4],
1409 ct_req->req.rhba.hba_identifier[5], 1412 ct_req->req.rhba.hba_identifier[5],
1410 ct_req->req.rhba.hba_identifier[6], 1413 ct_req->req.rhba.hba_identifier[6],
1411 ct_req->req.rhba.hba_identifier[7], size)); 1414 ct_req->req.rhba.hba_identifier[7], size);
1412 DEBUG13(qla2x00_dump_buffer(entries, size)); 1415 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1416 entries, size);
1413 1417
1414 /* Execute MS IOCB */ 1418 /* Execute MS IOCB */
1415 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1419 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1416 sizeof(ms_iocb_entry_t)); 1420 sizeof(ms_iocb_entry_t));
1417 if (rval != QLA_SUCCESS) { 1421 if (rval != QLA_SUCCESS) {
1418 /*EMPTY*/ 1422 /*EMPTY*/
1419 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n", 1423 ql_dbg(ql_dbg_disc, vha, 0x2030,
1420 vha->host_no, rval)); 1424 "RHBA issue IOCB failed (%d).\n", rval);
1421 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") != 1425 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1422 QLA_SUCCESS) { 1426 QLA_SUCCESS) {
1423 rval = QLA_FUNCTION_FAILED; 1427 rval = QLA_FUNCTION_FAILED;
1424 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 1428 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1425 ct_rsp->header.explanation_code == 1429 ct_rsp->header.explanation_code ==
1426 CT_EXPL_ALREADY_REGISTERED) { 1430 CT_EXPL_ALREADY_REGISTERED) {
1427 DEBUG2_13(printk("%s(%ld): HBA already registered.\n", 1431 ql_dbg(ql_dbg_disc, vha, 0x2034,
1428 __func__, vha->host_no)); 1432 "HBA already registered.\n");
1429 rval = QLA_ALREADY_REGISTERED; 1433 rval = QLA_ALREADY_REGISTERED;
1430 } 1434 }
1431 } else { 1435 } else {
1432 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n", 1436 ql_dbg(ql_dbg_disc, vha, 0x2035,
1433 vha->host_no)); 1437 "RHBA exiting normally.\n");
1434 } 1438 }
1435 1439
1436 return rval; 1440 return rval;
@@ -1464,26 +1468,26 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
1464 /* Prepare FDMI command arguments -- portname. */ 1468 /* Prepare FDMI command arguments -- portname. */
1465 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); 1469 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
1466 1470
1467 DEBUG13(printk("%s(%ld): DHBA portname=" 1471 ql_dbg(ql_dbg_disc, vha, 0x2036,
1468 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no, 1472 "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
1469 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1], 1473 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
1470 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3], 1474 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
1471 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5], 1475 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
1472 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7])); 1476 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
1473 1477
1474 /* Execute MS IOCB */ 1478 /* Execute MS IOCB */
1475 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1479 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1476 sizeof(ms_iocb_entry_t)); 1480 sizeof(ms_iocb_entry_t));
1477 if (rval != QLA_SUCCESS) { 1481 if (rval != QLA_SUCCESS) {
1478 /*EMPTY*/ 1482 /*EMPTY*/
1479 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n", 1483 ql_dbg(ql_dbg_disc, vha, 0x2037,
1480 vha->host_no, rval)); 1484 "DHBA issue IOCB failed (%d).\n", rval);
1481 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") != 1485 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
1482 QLA_SUCCESS) { 1486 QLA_SUCCESS) {
1483 rval = QLA_FUNCTION_FAILED; 1487 rval = QLA_FUNCTION_FAILED;
1484 } else { 1488 } else {
1485 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n", 1489 ql_dbg(ql_dbg_disc, vha, 0x2038,
1486 vha->host_no)); 1490 "DHBA exiting normally.\n");
1487 } 1491 }
1488 1492
1489 return rval; 1493 return rval;
@@ -1534,9 +1538,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1534 eiter->a.fc4_types[2] = 0x01; 1538 eiter->a.fc4_types[2] = 0x01;
1535 size += 4 + 32; 1539 size += 4 + 32;
1536 1540
1537 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, 1541 ql_dbg(ql_dbg_disc, vha, 0x2039,
1538 vha->host_no, eiter->a.fc4_types[2], 1542 "FC4_TYPES=%02x %02x.\n",
1539 eiter->a.fc4_types[1])); 1543 eiter->a.fc4_types[2],
1544 eiter->a.fc4_types[1]);
1540 1545
1541 /* Supported speed. */ 1546 /* Supported speed. */
1542 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1547 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1561,8 +1566,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1561 FDMI_PORT_SPEED_1GB); 1566 FDMI_PORT_SPEED_1GB);
1562 size += 4 + 4; 1567 size += 4 + 4;
1563 1568
1564 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no, 1569 ql_dbg(ql_dbg_disc, vha, 0x203a,
1565 eiter->a.sup_speed)); 1570 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1566 1571
1567 /* Current speed. */ 1572 /* Current speed. */
1568 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1573 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1596,8 +1601,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1596 } 1601 }
1597 size += 4 + 4; 1602 size += 4 + 4;
1598 1603
1599 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no, 1604 ql_dbg(ql_dbg_disc, vha, 0x203b,
1600 eiter->a.cur_speed)); 1605 "Current_Speed=%x.\n", eiter->a.cur_speed);
1601 1606
1602 /* Max frame size. */ 1607 /* Max frame size. */
1603 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1608 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1609,8 +1614,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1609 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 1614 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
1610 size += 4 + 4; 1615 size += 4 + 4;
1611 1616
1612 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no, 1617 ql_dbg(ql_dbg_disc, vha, 0x203c,
1613 eiter->a.max_frame_size)); 1618 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1614 1619
1615 /* OS device name. */ 1620 /* OS device name. */
1616 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1621 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1621,8 +1626,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1621 eiter->len = cpu_to_be16(4 + alen); 1626 eiter->len = cpu_to_be16(4 + alen);
1622 size += 4 + alen; 1627 size += 4 + alen;
1623 1628
1624 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no, 1629 ql_dbg(ql_dbg_disc, vha, 0x204b,
1625 eiter->a.os_dev_name)); 1630 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1626 1631
1627 /* Hostname. */ 1632 /* Hostname. */
1628 if (strlen(fc_host_system_hostname(vha->host))) { 1633 if (strlen(fc_host_system_hostname(vha->host))) {
@@ -1637,35 +1642,36 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1637 eiter->len = cpu_to_be16(4 + alen); 1642 eiter->len = cpu_to_be16(4 + alen);
1638 size += 4 + alen; 1643 size += 4 + alen;
1639 1644
1640 DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__, 1645 ql_dbg(ql_dbg_disc, vha, 0x203d,
1641 vha->host_no, eiter->a.host_name)); 1646 "HostName=%s.\n", eiter->a.host_name);
1642 } 1647 }
1643 1648
1644 /* Update MS request size. */ 1649 /* Update MS request size. */
1645 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 1650 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1646 1651
1647 DEBUG13(printk("%s(%ld): RPA portname=" 1652 ql_dbg(ql_dbg_disc, vha, 0x203e,
1648 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1653 "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
1649 vha->host_no, ct_req->req.rpa.port_name[0], 1654 ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
1650 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2], 1655 ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
1651 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4], 1656 ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
1652 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6], 1657 ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
1653 ct_req->req.rpa.port_name[7], size)); 1658 size);
1654 DEBUG13(qla2x00_dump_buffer(entries, size)); 1659 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1660 entries, size);
1655 1661
1656 /* Execute MS IOCB */ 1662 /* Execute MS IOCB */
1657 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 1663 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1658 sizeof(ms_iocb_entry_t)); 1664 sizeof(ms_iocb_entry_t));
1659 if (rval != QLA_SUCCESS) { 1665 if (rval != QLA_SUCCESS) {
1660 /*EMPTY*/ 1666 /*EMPTY*/
1661 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n", 1667 ql_dbg(ql_dbg_disc, vha, 0x2040,
1662 vha->host_no, rval)); 1668 "RPA issue IOCB failed (%d).\n", rval);
1663 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") != 1669 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1664 QLA_SUCCESS) { 1670 QLA_SUCCESS) {
1665 rval = QLA_FUNCTION_FAILED; 1671 rval = QLA_FUNCTION_FAILED;
1666 } else { 1672 } else {
1667 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n", 1673 ql_dbg(ql_dbg_disc, vha, 0x2041,
1668 vha->host_no)); 1674 "RPA exiting nornally.\n");
1669 } 1675 }
1670 1676
1671 return rval; 1677 return rval;
@@ -1749,8 +1755,8 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1749 sizeof(ms_iocb_entry_t)); 1755 sizeof(ms_iocb_entry_t));
1750 if (rval != QLA_SUCCESS) { 1756 if (rval != QLA_SUCCESS) {
1751 /*EMPTY*/ 1757 /*EMPTY*/
1752 DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB " 1758 ql_dbg(ql_dbg_disc, vha, 0x2023,
1753 "failed (%d).\n", vha->host_no, rval)); 1759 "GFPN_ID issue IOCB failed (%d).\n", rval);
1754 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1760 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1755 "GFPN_ID") != QLA_SUCCESS) { 1761 "GFPN_ID") != QLA_SUCCESS) {
1756 rval = QLA_FUNCTION_FAILED; 1762 rval = QLA_FUNCTION_FAILED;
@@ -1860,8 +1866,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1860 sizeof(ms_iocb_entry_t)); 1866 sizeof(ms_iocb_entry_t));
1861 if (rval != QLA_SUCCESS) { 1867 if (rval != QLA_SUCCESS) {
1862 /*EMPTY*/ 1868 /*EMPTY*/
1863 DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB " 1869 ql_dbg(ql_dbg_disc, vha, 0x2059,
1864 "failed (%d).\n", vha->host_no, rval)); 1870 "GPSC issue IOCB failed (%d).\n", rval);
1865 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1871 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1866 "GPSC")) != QLA_SUCCESS) { 1872 "GPSC")) != QLA_SUCCESS) {
1867 /* FM command unsupported? */ 1873 /* FM command unsupported? */
@@ -1870,9 +1876,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1870 CT_REASON_INVALID_COMMAND_CODE || 1876 CT_REASON_INVALID_COMMAND_CODE ||
1871 ct_rsp->header.reason_code == 1877 ct_rsp->header.reason_code ==
1872 CT_REASON_COMMAND_UNSUPPORTED)) { 1878 CT_REASON_COMMAND_UNSUPPORTED)) {
1873 DEBUG2(printk("scsi(%ld): GPSC command " 1879 ql_dbg(ql_dbg_disc, vha, 0x205a,
1874 "unsupported, disabling query...\n", 1880 "GPSC command unsupported, disabling "
1875 vha->host_no)); 1881 "query.\n");
1876 ha->flags.gpsc_supported = 0; 1882 ha->flags.gpsc_supported = 0;
1877 rval = QLA_FUNCTION_FAILED; 1883 rval = QLA_FUNCTION_FAILED;
1878 break; 1884 break;
@@ -1898,9 +1904,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1898 break; 1904 break;
1899 } 1905 }
1900 1906
1901 DEBUG2_3(printk("scsi(%ld): GPSC ext entry - " 1907 ql_dbg(ql_dbg_disc, vha, 0x205b,
1902 "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " 1908 "GPSC ext entry - fpn "
1903 "speed=%04x.\n", vha->host_no, 1909 "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
1910 "speed=%04x.\n",
1904 list[i].fabric_port_name[0], 1911 list[i].fabric_port_name[0],
1905 list[i].fabric_port_name[1], 1912 list[i].fabric_port_name[1],
1906 list[i].fabric_port_name[2], 1913 list[i].fabric_port_name[2],
@@ -1910,7 +1917,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1910 list[i].fabric_port_name[6], 1917 list[i].fabric_port_name[6],
1911 list[i].fabric_port_name[7], 1918 list[i].fabric_port_name[7],
1912 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 1919 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
1913 be16_to_cpu(ct_rsp->rsp.gpsc.speed))); 1920 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
1914 } 1921 }
1915 1922
1916 /* Last device exit. */ 1923 /* Last device exit. */
@@ -1968,14 +1975,12 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
1968 sizeof(ms_iocb_entry_t)); 1975 sizeof(ms_iocb_entry_t));
1969 1976
1970 if (rval != QLA_SUCCESS) { 1977 if (rval != QLA_SUCCESS) {
1971 DEBUG2_3(printk(KERN_INFO 1978 ql_dbg(ql_dbg_disc, vha, 0x205c,
1972 "scsi(%ld): GFF_ID issue IOCB failed " 1979 "GFF_ID issue IOCB failed (%d).\n", rval);
1973 "(%d).\n", vha->host_no, rval));
1974 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1980 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1975 "GFF_ID") != QLA_SUCCESS) { 1981 "GFF_ID") != QLA_SUCCESS) {
1976 DEBUG2_3(printk(KERN_INFO 1982 ql_dbg(ql_dbg_disc, vha, 0x205d,
1977 "scsi(%ld): GFF_ID IOCB status had a " 1983 "GFF_ID IOCB status had a failure status code.\n");
1978 "failure status code\n", vha->host_no));
1979 } else { 1984 } else {
1980 fcp_scsi_features = 1985 fcp_scsi_features =
1981 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; 1986 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 920b76bfbb93..def694271bf7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -153,11 +153,10 @@ qla2x00_async_iocb_timeout(srb_t *sp)
153 fc_port_t *fcport = sp->fcport; 153 fc_port_t *fcport = sp->fcport;
154 struct srb_ctx *ctx = sp->ctx; 154 struct srb_ctx *ctx = sp->ctx;
155 155
156 DEBUG2(printk(KERN_WARNING 156 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
157 "scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n", 157 "Async-%s timeout - portid=%02x%02x%02x.\n",
158 fcport->vha->host_no, sp->handle, 158 ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
159 ctx->name, fcport->d_id.b.domain, 159 fcport->d_id.b.al_pa);
160 fcport->d_id.b.area, fcport->d_id.b.al_pa));
161 160
162 fcport->flags &= ~FCF_ASYNC_SENT; 161 fcport->flags &= ~FCF_ASYNC_SENT;
163 if (ctx->type == SRB_LOGIN_CMD) { 162 if (ctx->type == SRB_LOGIN_CMD) {
@@ -211,11 +210,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
211 if (rval != QLA_SUCCESS) 210 if (rval != QLA_SUCCESS)
212 goto done_free_sp; 211 goto done_free_sp;
213 212
214 DEBUG2(printk(KERN_DEBUG 213 ql_dbg(ql_dbg_disc, vha, 0x2072,
215 "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x " 214 "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
216 "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id, 215 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
217 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 216 fcport->d_id.b.al_pa, fcport->login_retry);
218 fcport->login_retry));
219 return rval; 217 return rval;
220 218
221done_free_sp: 219done_free_sp:
@@ -259,10 +257,10 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
259 if (rval != QLA_SUCCESS) 257 if (rval != QLA_SUCCESS)
260 goto done_free_sp; 258 goto done_free_sp;
261 259
262 DEBUG2(printk(KERN_DEBUG 260 ql_dbg(ql_dbg_disc, vha, 0x2070,
263 "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n", 261 "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
264 fcport->vha->host_no, sp->handle, fcport->loop_id, 262 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
265 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 263 fcport->d_id.b.al_pa);
266 return rval; 264 return rval;
267 265
268done_free_sp: 266done_free_sp:
@@ -309,11 +307,10 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
309 if (rval != QLA_SUCCESS) 307 if (rval != QLA_SUCCESS)
310 goto done_free_sp; 308 goto done_free_sp;
311 309
312 DEBUG2(printk(KERN_DEBUG 310 ql_dbg(ql_dbg_disc, vha, 0x206f,
313 "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n", 311 "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
314 fcport->vha->host_no, sp->handle, fcport->loop_id, 312 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
315 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 313 fcport->d_id.b.al_pa);
316
317 return rval; 314 return rval;
318 315
319done_free_sp: 316done_free_sp:
@@ -362,11 +359,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
362 if (rval != QLA_SUCCESS) 359 if (rval != QLA_SUCCESS)
363 goto done_free_sp; 360 goto done_free_sp;
364 361
365 DEBUG2(printk(KERN_DEBUG 362 ql_dbg(ql_dbg_taskm, vha, 0x802f,
366 "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n", 363 "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
367 fcport->vha->host_no, sp->handle, fcport->loop_id, 364 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
368 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); 365 fcport->d_id.b.al_pa);
369
370 return rval; 366 return rval;
371 367
372done_free_sp: 368done_free_sp:
@@ -471,9 +467,8 @@ qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
471 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 467 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
472 468
473 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { 469 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
474 DEBUG2_3_11(printk(KERN_WARNING 470 ql_dbg(ql_dbg_taskm, vha, 0x8030,
475 "%s(%ld): TM IOCB failed (%x).\n", 471 "TM IOCB failed (%x).\n", rval);
476 __func__, vha->host_no, rval));
477 } 472 }
478 473
479 return; 474 return;
@@ -519,11 +514,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
519 set_bit(0, ha->req_qid_map); 514 set_bit(0, ha->req_qid_map);
520 set_bit(0, ha->rsp_qid_map); 515 set_bit(0, ha->rsp_qid_map);
521 516
522 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 517 ql_log(ql_log_info, vha, 0x0040,
518 "Configuring PCI space...\n");
523 rval = ha->isp_ops->pci_config(vha); 519 rval = ha->isp_ops->pci_config(vha);
524 if (rval) { 520 if (rval) {
525 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 521 ql_log(ql_log_warn, vha, 0x0044,
526 vha->host_no)); 522 "Unable to configure PCI space.\n");
527 return (rval); 523 return (rval);
528 } 524 }
529 525
@@ -531,20 +527,21 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
531 527
532 rval = qla2xxx_get_flash_info(vha); 528 rval = qla2xxx_get_flash_info(vha);
533 if (rval) { 529 if (rval) {
534 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", 530 ql_log(ql_log_fatal, vha, 0x004f,
535 vha->host_no)); 531 "Unable to validate FLASH data.\n");
536 return (rval); 532 return (rval);
537 } 533 }
538 534
539 ha->isp_ops->get_flash_version(vha, req->ring); 535 ha->isp_ops->get_flash_version(vha, req->ring);
540 536 ql_log(ql_log_info, vha, 0x0061,
541 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 537 "Configure NVRAM parameters...\n");
542 538
543 ha->isp_ops->nvram_config(vha); 539 ha->isp_ops->nvram_config(vha);
544 540
545 if (ha->flags.disable_serdes) { 541 if (ha->flags.disable_serdes) {
546 /* Mask HBA via NVRAM settings? */ 542 /* Mask HBA via NVRAM settings? */
547 qla_printk(KERN_INFO, ha, "Masking HBA WWPN " 543 ql_log(ql_log_info, vha, 0x0077,
544 "Masking HBA WWPN "
548 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 545 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
549 vha->port_name[0], vha->port_name[1], 546 vha->port_name[0], vha->port_name[1],
550 vha->port_name[2], vha->port_name[3], 547 vha->port_name[2], vha->port_name[3],
@@ -553,7 +550,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
553 return QLA_FUNCTION_FAILED; 550 return QLA_FUNCTION_FAILED;
554 } 551 }
555 552
556 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 553 ql_log(ql_log_info, vha, 0x0078,
554 "Verifying loaded RISC code...\n");
557 555
558 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) { 556 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
559 rval = ha->isp_ops->chip_diag(vha); 557 rval = ha->isp_ops->chip_diag(vha);
@@ -567,7 +565,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
567 if (IS_QLA84XX(ha)) { 565 if (IS_QLA84XX(ha)) {
568 ha->cs84xx = qla84xx_get_chip(vha); 566 ha->cs84xx = qla84xx_get_chip(vha);
569 if (!ha->cs84xx) { 567 if (!ha->cs84xx) {
570 qla_printk(KERN_ERR, ha, 568 ql_log(ql_log_warn, vha, 0x00d0,
571 "Unable to configure ISP84XX.\n"); 569 "Unable to configure ISP84XX.\n");
572 return QLA_FUNCTION_FAILED; 570 return QLA_FUNCTION_FAILED;
573 } 571 }
@@ -579,8 +577,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
579 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 577 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
580 rval = qla84xx_init_chip(vha); 578 rval = qla84xx_init_chip(vha);
581 if (rval != QLA_SUCCESS) { 579 if (rval != QLA_SUCCESS) {
582 qla_printk(KERN_ERR, ha, 580 ql_log(ql_log_warn, vha, 0x00d4,
583 "Unable to initialize ISP84XX.\n"); 581 "Unable to initialize ISP84XX.\n");
584 qla84xx_put_chip(vha); 582 qla84xx_put_chip(vha);
585 } 583 }
586 } 584 }
@@ -797,9 +795,7 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
797 rval = QLA_FUNCTION_FAILED; 795 rval = QLA_FUNCTION_FAILED;
798 796
799 if (ha->flags.disable_risc_code_load) { 797 if (ha->flags.disable_risc_code_load) {
800 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", 798 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
801 vha->host_no));
802 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
803 799
804 /* Verify checksum of loaded RISC code. */ 800 /* Verify checksum of loaded RISC code. */
805 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address); 801 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
@@ -810,10 +806,9 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
810 } 806 }
811 } 807 }
812 808
813 if (rval) { 809 if (rval)
814 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", 810 ql_dbg(ql_dbg_init, vha, 0x007a,
815 vha->host_no)); 811 "**** Load RISC code ****.\n");
816 }
817 812
818 return (rval); 813 return (rval);
819} 814}
@@ -1105,8 +1100,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1105 /* Assume a failed state */ 1100 /* Assume a failed state */
1106 rval = QLA_FUNCTION_FAILED; 1101 rval = QLA_FUNCTION_FAILED;
1107 1102
1108 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", 1103 ql_dbg(ql_dbg_init, vha, 0x007b,
1109 vha->host_no, (u_long)&reg->flash_address)); 1104 "Testing device at %lx.\n", (u_long)&reg->flash_address);
1110 1105
1111 spin_lock_irqsave(&ha->hardware_lock, flags); 1106 spin_lock_irqsave(&ha->hardware_lock, flags);
1112 1107
@@ -1128,8 +1123,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1128 if (!cnt) 1123 if (!cnt)
1129 goto chip_diag_failed; 1124 goto chip_diag_failed;
1130 1125
1131 DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", 1126 ql_dbg(ql_dbg_init, vha, 0x007c,
1132 vha->host_no)); 1127 "Reset register cleared by chip reset.\n");
1133 1128
1134 /* Reset RISC processor. */ 1129 /* Reset RISC processor. */
1135 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 1130 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -1150,7 +1145,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1150 goto chip_diag_failed; 1145 goto chip_diag_failed;
1151 1146
1152 /* Check product ID of chip */ 1147 /* Check product ID of chip */
1153 DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no)); 1148 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
1154 1149
1155 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 1150 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
1156 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 1151 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -1158,8 +1153,9 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1158 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4)); 1153 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
1159 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) || 1154 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
1160 mb[3] != PROD_ID_3) { 1155 mb[3] != PROD_ID_3) {
1161 qla_printk(KERN_WARNING, ha, 1156 ql_log(ql_log_warn, vha, 0x0062,
1162 "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]); 1157 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
1158 mb[1], mb[2], mb[3]);
1163 1159
1164 goto chip_diag_failed; 1160 goto chip_diag_failed;
1165 } 1161 }
@@ -1178,8 +1174,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1178 if (IS_QLA2200(ha) && 1174 if (IS_QLA2200(ha) &&
1179 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 1175 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
1180 /* Limit firmware transfer size with a 2200A */ 1176 /* Limit firmware transfer size with a 2200A */
1181 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", 1177 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
1182 vha->host_no));
1183 1178
1184 ha->device_type |= DT_ISP2200A; 1179 ha->device_type |= DT_ISP2200A;
1185 ha->fw_transfer_size = 128; 1180 ha->fw_transfer_size = 128;
@@ -1188,24 +1183,20 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
1188 /* Wrap Incoming Mailboxes Test. */ 1183 /* Wrap Incoming Mailboxes Test. */
1189 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1184 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1190 1185
1191 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no)); 1186 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
1192 rval = qla2x00_mbx_reg_test(vha); 1187 rval = qla2x00_mbx_reg_test(vha);
1193 if (rval) { 1188 if (rval)
1194 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 1189 ql_log(ql_log_warn, vha, 0x0080,
1195 vha->host_no)); 1190 "Failed mailbox send register test.\n");
1196 qla_printk(KERN_WARNING, ha, 1191 else
1197 "Failed mailbox send register test\n");
1198 }
1199 else {
1200 /* Flag a successful rval */ 1192 /* Flag a successful rval */
1201 rval = QLA_SUCCESS; 1193 rval = QLA_SUCCESS;
1202 }
1203 spin_lock_irqsave(&ha->hardware_lock, flags); 1194 spin_lock_irqsave(&ha->hardware_lock, flags);
1204 1195
1205chip_diag_failed: 1196chip_diag_failed:
1206 if (rval) 1197 if (rval)
1207 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " 1198 ql_log(ql_log_info, vha, 0x0081,
1208 "****\n", vha->host_no)); 1199 "Chip diagnostics **** FAILED ****.\n");
1209 1200
1210 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1201 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1211 1202
@@ -1232,10 +1223,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
1232 1223
1233 rval = qla2x00_mbx_reg_test(vha); 1224 rval = qla2x00_mbx_reg_test(vha);
1234 if (rval) { 1225 if (rval) {
1235 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 1226 ql_log(ql_log_warn, vha, 0x0082,
1236 vha->host_no)); 1227 "Failed mailbox send register test.\n");
1237 qla_printk(KERN_WARNING, ha,
1238 "Failed mailbox send register test\n");
1239 } else { 1228 } else {
1240 /* Flag a successful rval */ 1229 /* Flag a successful rval */
1241 rval = QLA_SUCCESS; 1230 rval = QLA_SUCCESS;
@@ -1257,8 +1246,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1257 struct rsp_que *rsp = ha->rsp_q_map[0]; 1246 struct rsp_que *rsp = ha->rsp_q_map[0];
1258 1247
1259 if (ha->fw_dump) { 1248 if (ha->fw_dump) {
1260 qla_printk(KERN_WARNING, ha, 1249 ql_dbg(ql_dbg_init, vha, 0x00bd,
1261 "Firmware dump previously allocated.\n"); 1250 "Firmware dump already allocated.\n");
1262 return; 1251 return;
1263 } 1252 }
1264 1253
@@ -1288,8 +1277,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1288 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 1277 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1289 GFP_KERNEL); 1278 GFP_KERNEL);
1290 if (!tc) { 1279 if (!tc) {
1291 qla_printk(KERN_WARNING, ha, "Unable to allocate " 1280 ql_log(ql_log_warn, vha, 0x00be,
1292 "(%d KB) for FCE.\n", FCE_SIZE / 1024); 1281 "Unable to allocate (%d KB) for FCE.\n",
1282 FCE_SIZE / 1024);
1293 goto try_eft; 1283 goto try_eft;
1294 } 1284 }
1295 1285
@@ -1297,16 +1287,15 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1297 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, 1287 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
1298 ha->fce_mb, &ha->fce_bufs); 1288 ha->fce_mb, &ha->fce_bufs);
1299 if (rval) { 1289 if (rval) {
1300 qla_printk(KERN_WARNING, ha, "Unable to initialize " 1290 ql_log(ql_log_warn, vha, 0x00bf,
1301 "FCE (%d).\n", rval); 1291 "Unable to initialize FCE (%d).\n", rval);
1302 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 1292 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
1303 tc_dma); 1293 tc_dma);
1304 ha->flags.fce_enabled = 0; 1294 ha->flags.fce_enabled = 0;
1305 goto try_eft; 1295 goto try_eft;
1306 } 1296 }
1307 1297 ql_log(ql_log_info, vha, 0x00c0,
1308 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", 1298 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
1309 FCE_SIZE / 1024);
1310 1299
1311 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 1300 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
1312 ha->flags.fce_enabled = 1; 1301 ha->flags.fce_enabled = 1;
@@ -1317,23 +1306,23 @@ try_eft:
1317 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 1306 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1318 GFP_KERNEL); 1307 GFP_KERNEL);
1319 if (!tc) { 1308 if (!tc) {
1320 qla_printk(KERN_WARNING, ha, "Unable to allocate " 1309 ql_log(ql_log_warn, vha, 0x00c1,
1321 "(%d KB) for EFT.\n", EFT_SIZE / 1024); 1310 "Unable to allocate (%d KB) for EFT.\n",
1311 EFT_SIZE / 1024);
1322 goto cont_alloc; 1312 goto cont_alloc;
1323 } 1313 }
1324 1314
1325 memset(tc, 0, EFT_SIZE); 1315 memset(tc, 0, EFT_SIZE);
1326 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 1316 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
1327 if (rval) { 1317 if (rval) {
1328 qla_printk(KERN_WARNING, ha, "Unable to initialize " 1318 ql_log(ql_log_warn, vha, 0x00c2,
1329 "EFT (%d).\n", rval); 1319 "Unable to initialize EFT (%d).\n", rval);
1330 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 1320 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
1331 tc_dma); 1321 tc_dma);
1332 goto cont_alloc; 1322 goto cont_alloc;
1333 } 1323 }
1334 1324 ql_log(ql_log_info, vha, 0x00c3,
1335 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n", 1325 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
1336 EFT_SIZE / 1024);
1337 1326
1338 eft_size = EFT_SIZE; 1327 eft_size = EFT_SIZE;
1339 ha->eft_dma = tc_dma; 1328 ha->eft_dma = tc_dma;
@@ -1350,8 +1339,9 @@ cont_alloc:
1350 1339
1351 ha->fw_dump = vmalloc(dump_size); 1340 ha->fw_dump = vmalloc(dump_size);
1352 if (!ha->fw_dump) { 1341 if (!ha->fw_dump) {
1353 qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " 1342 ql_log(ql_log_warn, vha, 0x00c4,
1354 "firmware dump!!!\n", dump_size / 1024); 1343 "Unable to allocate (%d KB) for firmware dump.\n",
1344 dump_size / 1024);
1355 1345
1356 if (ha->fce) { 1346 if (ha->fce) {
1357 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 1347 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
@@ -1368,8 +1358,8 @@ cont_alloc:
1368 } 1358 }
1369 return; 1359 return;
1370 } 1360 }
1371 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", 1361 ql_log(ql_log_info, vha, 0x00c5,
1372 dump_size / 1024); 1362 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
1373 1363
1374 ha->fw_dump_len = dump_size; 1364 ha->fw_dump_len = dump_size;
1375 ha->fw_dump->signature[0] = 'Q'; 1365 ha->fw_dump->signature[0] = 'Q';
@@ -1398,23 +1388,21 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
1398 int rval; 1388 int rval;
1399 uint16_t dc; 1389 uint16_t dc;
1400 uint32_t dw; 1390 uint32_t dw;
1401 struct qla_hw_data *ha = vha->hw;
1402 1391
1403 if (!IS_QLA81XX(vha->hw)) 1392 if (!IS_QLA81XX(vha->hw))
1404 return QLA_SUCCESS; 1393 return QLA_SUCCESS;
1405 1394
1406 rval = qla2x00_write_ram_word(vha, 0x7c00, 1); 1395 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
1407 if (rval != QLA_SUCCESS) { 1396 if (rval != QLA_SUCCESS) {
1408 DEBUG2(qla_printk(KERN_WARNING, ha, 1397 ql_log(ql_log_warn, vha, 0x0105,
1409 "Sync-MPI: Unable to acquire semaphore.\n")); 1398 "Unable to acquire semaphore.\n");
1410 goto done; 1399 goto done;
1411 } 1400 }
1412 1401
1413 pci_read_config_word(vha->hw->pdev, 0x54, &dc); 1402 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
1414 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); 1403 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
1415 if (rval != QLA_SUCCESS) { 1404 if (rval != QLA_SUCCESS) {
1416 DEBUG2(qla_printk(KERN_WARNING, ha, 1405 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
1417 "Sync-MPI: Unable to read sync.\n"));
1418 goto done_release; 1406 goto done_release;
1419 } 1407 }
1420 1408
@@ -1426,15 +1414,14 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
1426 dw |= dc; 1414 dw |= dc;
1427 rval = qla2x00_write_ram_word(vha, 0x7a15, dw); 1415 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
1428 if (rval != QLA_SUCCESS) { 1416 if (rval != QLA_SUCCESS) {
1429 DEBUG2(qla_printk(KERN_WARNING, ha, 1417 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
1430 "Sync-MPI: Unable to gain sync.\n"));
1431 } 1418 }
1432 1419
1433done_release: 1420done_release:
1434 rval = qla2x00_write_ram_word(vha, 0x7c00, 0); 1421 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
1435 if (rval != QLA_SUCCESS) { 1422 if (rval != QLA_SUCCESS) {
1436 DEBUG2(qla_printk(KERN_WARNING, ha, 1423 ql_log(ql_log_warn, vha, 0x006d,
1437 "Sync-MPI: Unable to release semaphore.\n")); 1424 "Unable to release semaphore.\n");
1438 } 1425 }
1439 1426
1440done: 1427done:
@@ -1479,14 +1466,14 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1479 /* Load firmware sequences */ 1466 /* Load firmware sequences */
1480 rval = ha->isp_ops->load_risc(vha, &srisc_address); 1467 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1481 if (rval == QLA_SUCCESS) { 1468 if (rval == QLA_SUCCESS) {
1482 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 1469 ql_dbg(ql_dbg_init, vha, 0x00c9,
1483 "code.\n", vha->host_no)); 1470 "Verifying Checksum of loaded RISC code.\n");
1484 1471
1485 rval = qla2x00_verify_checksum(vha, srisc_address); 1472 rval = qla2x00_verify_checksum(vha, srisc_address);
1486 if (rval == QLA_SUCCESS) { 1473 if (rval == QLA_SUCCESS) {
1487 /* Start firmware execution. */ 1474 /* Start firmware execution. */
1488 DEBUG(printk("scsi(%ld): Checksum OK, start " 1475 ql_dbg(ql_dbg_init, vha, 0x00ca,
1489 "firmware.\n", vha->host_no)); 1476 "Starting firmware.\n");
1490 1477
1491 rval = qla2x00_execute_fw(vha, srisc_address); 1478 rval = qla2x00_execute_fw(vha, srisc_address);
1492 /* Retrieve firmware information. */ 1479 /* Retrieve firmware information. */
@@ -1522,9 +1509,9 @@ enable_82xx_npiv:
1522 } 1509 }
1523 } 1510 }
1524 } else { 1511 } else {
1525 DEBUG2(printk(KERN_INFO 1512 ql_log(ql_log_fatal, vha, 0x00cd,
1526 "scsi(%ld): ISP Firmware failed checksum.\n", 1513 "ISP Firmware failed checksum.\n");
1527 vha->host_no)); 1514 goto failed;
1528 } 1515 }
1529 } 1516 }
1530 1517
@@ -1549,7 +1536,7 @@ enable_82xx_npiv:
1549 ha->flags.fac_supported = 1; 1536 ha->flags.fac_supported = 1;
1550 ha->fdt_block_size = size << 2; 1537 ha->fdt_block_size = size << 2;
1551 } else { 1538 } else {
1552 qla_printk(KERN_ERR, ha, 1539 ql_log(ql_log_warn, vha, 0x00ce,
1553 "Unsupported FAC firmware (%d.%02d.%02d).\n", 1540 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1554 ha->fw_major_version, ha->fw_minor_version, 1541 ha->fw_major_version, ha->fw_minor_version,
1555 ha->fw_subminor_version); 1542 ha->fw_subminor_version);
@@ -1557,8 +1544,8 @@ enable_82xx_npiv:
1557 } 1544 }
1558failed: 1545failed:
1559 if (rval) { 1546 if (rval) {
1560 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1547 ql_log(ql_log_fatal, vha, 0x00cf,
1561 vha->host_no)); 1548 "Setup chip ****FAILED****.\n");
1562 } 1549 }
1563 1550
1564 return (rval); 1551 return (rval);
@@ -1608,10 +1595,11 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha)
1608 return; 1595 return;
1609 1596
1610 /* Serial Link options. */ 1597 /* Serial Link options. */
1611 DEBUG3(printk("scsi(%ld): Serial link options:\n", 1598 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
1612 vha->host_no)); 1599 "Serial link options.\n");
1613 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, 1600 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
1614 sizeof(ha->fw_seriallink_options))); 1601 (uint8_t *)&ha->fw_seriallink_options,
1602 sizeof(ha->fw_seriallink_options));
1615 1603
1616 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1604 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1617 if (ha->fw_seriallink_options[3] & BIT_2) { 1605 if (ha->fw_seriallink_options[3] & BIT_2) {
@@ -1688,7 +1676,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
1688 le16_to_cpu(ha->fw_seriallink_options24[2]), 1676 le16_to_cpu(ha->fw_seriallink_options24[2]),
1689 le16_to_cpu(ha->fw_seriallink_options24[3])); 1677 le16_to_cpu(ha->fw_seriallink_options24[3]));
1690 if (rval != QLA_SUCCESS) { 1678 if (rval != QLA_SUCCESS) {
1691 qla_printk(KERN_WARNING, ha, 1679 ql_log(ql_log_warn, vha, 0x0104,
1692 "Unable to update Serial Link options (%x).\n", rval); 1680 "Unable to update Serial Link options (%x).\n", rval);
1693 } 1681 }
1694} 1682}
@@ -1746,8 +1734,9 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1746 icb->rid = __constant_cpu_to_le16(rid); 1734 icb->rid = __constant_cpu_to_le16(rid);
1747 if (ha->flags.msix_enabled) { 1735 if (ha->flags.msix_enabled) {
1748 msix = &ha->msix_entries[1]; 1736 msix = &ha->msix_entries[1];
1749 DEBUG2_17(printk(KERN_INFO 1737 ql_dbg(ql_dbg_init, vha, 0x00fd,
1750 "Registering vector 0x%x for base que\n", msix->entry)); 1738 "Registering vector 0x%x for base que.\n",
1739 msix->entry);
1751 icb->msix = cpu_to_le16(msix->entry); 1740 icb->msix = cpu_to_le16(msix->entry);
1752 } 1741 }
1753 /* Use alternate PCI bus number */ 1742 /* Use alternate PCI bus number */
@@ -1764,8 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1764 icb->firmware_options_2 &= 1753 icb->firmware_options_2 &=
1765 __constant_cpu_to_le32(~BIT_22); 1754 __constant_cpu_to_le32(~BIT_22);
1766 ha->flags.disable_msix_handshake = 1; 1755 ha->flags.disable_msix_handshake = 1;
1767 qla_printk(KERN_INFO, ha, 1756 ql_dbg(ql_dbg_init, vha, 0x00fe,
1768 "MSIX Handshake Disable Mode turned on\n"); 1757 "MSIX Handshake Disable Mode turned on.\n");
1769 } else { 1758 } else {
1770 icb->firmware_options_2 |= 1759 icb->firmware_options_2 |=
1771 __constant_cpu_to_le32(BIT_22); 1760 __constant_cpu_to_le32(BIT_22);
@@ -1850,7 +1839,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1850 /* Update any ISP specific firmware options before initialization. */ 1839 /* Update any ISP specific firmware options before initialization. */
1851 ha->isp_ops->update_fw_options(vha); 1840 ha->isp_ops->update_fw_options(vha);
1852 1841
1853 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no)); 1842 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
1854 1843
1855 if (ha->flags.npiv_supported) { 1844 if (ha->flags.npiv_supported) {
1856 if (ha->operating_mode == LOOP) 1845 if (ha->operating_mode == LOOP)
@@ -1866,11 +1855,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1866 1855
1867 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 1856 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1868 if (rval) { 1857 if (rval) {
1869 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1858 ql_log(ql_log_fatal, vha, 0x00d2,
1870 vha->host_no)); 1859 "Init Firmware **** FAILED ****.\n");
1871 } else { 1860 } else {
1872 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", 1861 ql_dbg(ql_dbg_init, vha, 0x00d3,
1873 vha->host_no)); 1862 "Init Firmware -- success.\n");
1874 } 1863 }
1875 1864
1876 return (rval); 1865 return (rval);
@@ -1913,10 +1902,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1913 1902
1914 /* Wait for ISP to finish LIP */ 1903 /* Wait for ISP to finish LIP */
1915 if (!vha->flags.init_done) 1904 if (!vha->flags.init_done)
1916 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); 1905 ql_log(ql_log_info, vha, 0x801e,
1917 1906 "Waiting for LIP to complete.\n");
1918 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1919 vha->host_no));
1920 1907
1921 do { 1908 do {
1922 rval = qla2x00_get_firmware_state(vha, state); 1909 rval = qla2x00_get_firmware_state(vha, state);
@@ -1925,30 +1912,35 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1925 vha->device_flags &= ~DFLG_NO_CABLE; 1912 vha->device_flags &= ~DFLG_NO_CABLE;
1926 } 1913 }
1927 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1914 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1928 DEBUG16(printk("scsi(%ld): fw_state=%x " 1915 ql_dbg(ql_dbg_taskm, vha, 0x801f,
1929 "84xx=%x.\n", vha->host_no, state[0], 1916 "fw_state=%x 84xx=%x.\n", state[0],
1930 state[2])); 1917 state[2]);
1931 if ((state[2] & FSTATE_LOGGED_IN) && 1918 if ((state[2] & FSTATE_LOGGED_IN) &&
1932 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1919 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1933 DEBUG16(printk("scsi(%ld): Sending " 1920 ql_dbg(ql_dbg_taskm, vha, 0x8028,
1934 "verify iocb.\n", vha->host_no)); 1921 "Sending verify iocb.\n");
1935 1922
1936 cs84xx_time = jiffies; 1923 cs84xx_time = jiffies;
1937 rval = qla84xx_init_chip(vha); 1924 rval = qla84xx_init_chip(vha);
1938 if (rval != QLA_SUCCESS) 1925 if (rval != QLA_SUCCESS) {
1926 ql_log(ql_log_warn,
1927 vha, 0x8043,
1928 "Init chip failed.\n");
1939 break; 1929 break;
1930 }
1940 1931
1941 /* Add time taken to initialize. */ 1932 /* Add time taken to initialize. */
1942 cs84xx_time = jiffies - cs84xx_time; 1933 cs84xx_time = jiffies - cs84xx_time;
1943 wtime += cs84xx_time; 1934 wtime += cs84xx_time;
1944 mtime += cs84xx_time; 1935 mtime += cs84xx_time;
1945 DEBUG16(printk("scsi(%ld): Increasing " 1936 ql_dbg(ql_dbg_taskm, vha, 0x8042,
1946 "wait time by %ld. New time %ld\n", 1937 "Increasing wait time by %ld. "
1947 vha->host_no, cs84xx_time, wtime)); 1938 "New time %ld.\n", cs84xx_time,
1939 wtime);
1948 } 1940 }
1949 } else if (state[0] == FSTATE_READY) { 1941 } else if (state[0] == FSTATE_READY) {
1950 DEBUG(printk("scsi(%ld): F/W Ready - OK \n", 1942 ql_dbg(ql_dbg_taskm, vha, 0x8037,
1951 vha->host_no)); 1943 "F/W Ready - OK.\n");
1952 1944
1953 qla2x00_get_retry_cnt(vha, &ha->retry_count, 1945 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1954 &ha->login_timeout, &ha->r_a_tov); 1946 &ha->login_timeout, &ha->r_a_tov);
@@ -1965,7 +1957,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1965 * other than Wait for Login. 1957 * other than Wait for Login.
1966 */ 1958 */
1967 if (time_after_eq(jiffies, mtime)) { 1959 if (time_after_eq(jiffies, mtime)) {
1968 qla_printk(KERN_INFO, ha, 1960 ql_log(ql_log_info, vha, 0x8038,
1969 "Cable is unplugged...\n"); 1961 "Cable is unplugged...\n");
1970 1962
1971 vha->device_flags |= DFLG_NO_CABLE; 1963 vha->device_flags |= DFLG_NO_CABLE;
@@ -1985,17 +1977,17 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1985 /* Delay for a while */ 1977 /* Delay for a while */
1986 msleep(500); 1978 msleep(500);
1987 1979
1988 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1980 ql_dbg(ql_dbg_taskm, vha, 0x8039,
1989 vha->host_no, state[0], jiffies)); 1981 "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1990 } while (1); 1982 } while (1);
1991 1983
1992 DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n", 1984 ql_dbg(ql_dbg_taskm, vha, 0x803a,
1993 vha->host_no, state[0], state[1], state[2], state[3], state[4], 1985 "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
1994 jiffies)); 1986 state[1], state[2], state[3], state[4], jiffies);
1995 1987
1996 if (rval) { 1988 if (rval) {
1997 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1989 ql_log(ql_log_warn, vha, 0x803b,
1998 vha->host_no)); 1990 "Firmware ready **** FAILED ****.\n");
1999 } 1991 }
2000 1992
2001 return (rval); 1993 return (rval);
@@ -2034,19 +2026,19 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2034 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 2026 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2035 IS_QLA8XXX_TYPE(ha) || 2027 IS_QLA8XXX_TYPE(ha) ||
2036 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 2028 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2037 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 2029 ql_dbg(ql_dbg_disc, vha, 0x2008,
2038 __func__, vha->host_no)); 2030 "Loop is in a transition state.\n");
2039 } else { 2031 } else {
2040 qla_printk(KERN_WARNING, ha, 2032 ql_log(ql_log_warn, vha, 0x2009,
2041 "ERROR -- Unable to get host loop ID.\n"); 2033 "Unable to get host loop ID.\n");
2042 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2034 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2043 } 2035 }
2044 return (rval); 2036 return (rval);
2045 } 2037 }
2046 2038
2047 if (topo == 4) { 2039 if (topo == 4) {
2048 qla_printk(KERN_INFO, ha, 2040 ql_log(ql_log_info, vha, 0x200a,
2049 "Cannot get topology - retrying.\n"); 2041 "Cannot get topology - retrying.\n");
2050 return (QLA_FUNCTION_FAILED); 2042 return (QLA_FUNCTION_FAILED);
2051 } 2043 }
2052 2044
@@ -2059,31 +2051,27 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2059 2051
2060 switch (topo) { 2052 switch (topo) {
2061 case 0: 2053 case 0:
2062 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", 2054 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
2063 vha->host_no));
2064 ha->current_topology = ISP_CFG_NL; 2055 ha->current_topology = ISP_CFG_NL;
2065 strcpy(connect_type, "(Loop)"); 2056 strcpy(connect_type, "(Loop)");
2066 break; 2057 break;
2067 2058
2068 case 1: 2059 case 1:
2069 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 2060 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
2070 vha->host_no));
2071 ha->switch_cap = sw_cap; 2061 ha->switch_cap = sw_cap;
2072 ha->current_topology = ISP_CFG_FL; 2062 ha->current_topology = ISP_CFG_FL;
2073 strcpy(connect_type, "(FL_Port)"); 2063 strcpy(connect_type, "(FL_Port)");
2074 break; 2064 break;
2075 2065
2076 case 2: 2066 case 2:
2077 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", 2067 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
2078 vha->host_no));
2079 ha->operating_mode = P2P; 2068 ha->operating_mode = P2P;
2080 ha->current_topology = ISP_CFG_N; 2069 ha->current_topology = ISP_CFG_N;
2081 strcpy(connect_type, "(N_Port-to-N_Port)"); 2070 strcpy(connect_type, "(N_Port-to-N_Port)");
2082 break; 2071 break;
2083 2072
2084 case 3: 2073 case 3:
2085 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 2074 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
2086 vha->host_no));
2087 ha->switch_cap = sw_cap; 2075 ha->switch_cap = sw_cap;
2088 ha->operating_mode = P2P; 2076 ha->operating_mode = P2P;
2089 ha->current_topology = ISP_CFG_F; 2077 ha->current_topology = ISP_CFG_F;
@@ -2091,9 +2079,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2091 break; 2079 break;
2092 2080
2093 default: 2081 default:
2094 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " 2082 ql_dbg(ql_dbg_disc, vha, 0x200f,
2095 "Using NL.\n", 2083 "HBA in unknown topology %x, using NL.\n", topo);
2096 vha->host_no, topo));
2097 ha->current_topology = ISP_CFG_NL; 2084 ha->current_topology = ISP_CFG_NL;
2098 strcpy(connect_type, "(Loop)"); 2085 strcpy(connect_type, "(Loop)");
2099 break; 2086 break;
@@ -2106,14 +2093,16 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2106 vha->d_id.b.al_pa = al_pa; 2093 vha->d_id.b.al_pa = al_pa;
2107 2094
2108 if (!vha->flags.init_done) 2095 if (!vha->flags.init_done)
2109 qla_printk(KERN_INFO, ha, 2096 ql_log(ql_log_info, vha, 0x2010,
2110 "Topology - %s, Host Loop address 0x%x\n", 2097 "Topology - %s, Host Loop address 0x%x.\n",
2111 connect_type, vha->loop_id); 2098 connect_type, vha->loop_id);
2112 2099
2113 if (rval) { 2100 if (rval) {
2114 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no)); 2101 ql_log(ql_log_warn, vha, 0x2011,
2102 "%s FAILED\n", __func__);
2115 } else { 2103 } else {
2116 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no)); 2104 ql_dbg(ql_dbg_disc, vha, 0x2012,
2105 "%s success\n", __func__);
2117 } 2106 }
2118 2107
2119 return(rval); 2108 return(rval);
@@ -2227,18 +2216,22 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2227 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 2216 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
2228 chksum += *ptr++; 2217 chksum += *ptr++;
2229 2218
2230 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 2219 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
2231 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 2220 "Contents of NVRAM.\n");
2221 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
2222 (uint8_t *)nv, ha->nvram_size);
2232 2223
2233 /* Bad NVRAM data, set defaults parameters. */ 2224 /* Bad NVRAM data, set defaults parameters. */
2234 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || 2225 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2235 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 2226 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2236 /* Reset NVRAM data. */ 2227 /* Reset NVRAM data. */
2237 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 2228 ql_log(ql_log_warn, vha, 0x0064,
2238 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 2229 "Inconisistent NVRAM "
2239 nv->nvram_version); 2230 "detected: checksum=0x%x id=%c version=0x%x.\n",
2240 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 2231 chksum, nv->id[0], nv->nvram_version);
2241 "invalid -- WWPN) defaults.\n"); 2232 ql_log(ql_log_warn, vha, 0x0065,
2233 "Falling back to "
2234 "functioning (yet invalid -- WWPN) defaults.\n");
2242 2235
2243 /* 2236 /*
2244 * Set default initialization control block. 2237 * Set default initialization control block.
@@ -2382,8 +2375,13 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2382 /* 2375 /*
2383 * Set host adapter parameters. 2376 * Set host adapter parameters.
2384 */ 2377 */
2378
2379 /*
2380 * BIT_7 in the host-parameters section allows for modification to
2381 * internal driver logging.
2382 */
2385 if (nv->host_p[0] & BIT_7) 2383 if (nv->host_p[0] & BIT_7)
2386 ql2xextended_error_logging = 1; 2384 ql2xextended_error_logging = 0x7fffffff;
2387 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0); 2385 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
2388 /* Always load RISC code on non ISP2[12]00 chips. */ 2386 /* Always load RISC code on non ISP2[12]00 chips. */
2389 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) 2387 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -2488,10 +2486,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2488 if (ha->zio_mode != QLA_ZIO_DISABLED) { 2486 if (ha->zio_mode != QLA_ZIO_DISABLED) {
2489 ha->zio_mode = QLA_ZIO_MODE_6; 2487 ha->zio_mode = QLA_ZIO_MODE_6;
2490 2488
2491 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " 2489 ql_log(ql_log_info, vha, 0x0068,
2492 "delay (%d us).\n", vha->host_no, ha->zio_mode,
2493 ha->zio_timer * 100));
2494 qla_printk(KERN_INFO, ha,
2495 "ZIO mode %d enabled; timer delay (%d us).\n", 2490 "ZIO mode %d enabled; timer delay (%d us).\n",
2496 ha->zio_mode, ha->zio_timer * 100); 2491 ha->zio_mode, ha->zio_timer * 100);
2497 2492
@@ -2502,8 +2497,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2502 } 2497 }
2503 2498
2504 if (rval) { 2499 if (rval) {
2505 DEBUG2_3(printk(KERN_WARNING 2500 ql_log(ql_log_warn, vha, 0x0069,
2506 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 2501 "NVRAM configuration failed.\n");
2507 } 2502 }
2508 return (rval); 2503 return (rval);
2509} 2504}
@@ -2574,15 +2569,15 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2574 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) { 2569 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
2575 rval = qla2x00_configure_hba(vha); 2570 rval = qla2x00_configure_hba(vha);
2576 if (rval != QLA_SUCCESS) { 2571 if (rval != QLA_SUCCESS) {
2577 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", 2572 ql_dbg(ql_dbg_disc, vha, 0x2013,
2578 vha->host_no)); 2573 "Unable to configure HBA.\n");
2579 return (rval); 2574 return (rval);
2580 } 2575 }
2581 } 2576 }
2582 2577
2583 save_flags = flags = vha->dpc_flags; 2578 save_flags = flags = vha->dpc_flags;
2584 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", 2579 ql_dbg(ql_dbg_disc, vha, 0x2014,
2585 vha->host_no, flags)); 2580 "Configure loop -- dpc flags = 0x%lx.\n", flags);
2586 2581
2587 /* 2582 /*
2588 * If we have both an RSCN and PORT UPDATE pending then handle them 2583 * If we have both an RSCN and PORT UPDATE pending then handle them
@@ -2619,15 +2614,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2619 } 2614 }
2620 2615
2621 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2616 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
2622 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 2617 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2618 ql_dbg(ql_dbg_disc, vha, 0x2015,
2619 "Loop resync needed, failing.\n");
2623 rval = QLA_FUNCTION_FAILED; 2620 rval = QLA_FUNCTION_FAILED;
2621 }
2624 else 2622 else
2625 rval = qla2x00_configure_local_loop(vha); 2623 rval = qla2x00_configure_local_loop(vha);
2626 } 2624 }
2627 2625
2628 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2626 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
2629 if (LOOP_TRANSITION(vha)) 2627 if (LOOP_TRANSITION(vha)) {
2628 ql_dbg(ql_dbg_disc, vha, 0x201e,
2629 "Needs RSCN update and loop transition.\n");
2630 rval = QLA_FUNCTION_FAILED; 2630 rval = QLA_FUNCTION_FAILED;
2631 }
2631 else 2632 else
2632 rval = qla2x00_configure_fabric(vha); 2633 rval = qla2x00_configure_fabric(vha);
2633 } 2634 }
@@ -2638,16 +2639,17 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2638 rval = QLA_FUNCTION_FAILED; 2639 rval = QLA_FUNCTION_FAILED;
2639 } else { 2640 } else {
2640 atomic_set(&vha->loop_state, LOOP_READY); 2641 atomic_set(&vha->loop_state, LOOP_READY);
2641 2642 ql_dbg(ql_dbg_disc, vha, 0x2069,
2642 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no)); 2643 "LOOP READY.\n");
2643 } 2644 }
2644 } 2645 }
2645 2646
2646 if (rval) { 2647 if (rval) {
2647 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", 2648 ql_dbg(ql_dbg_disc, vha, 0x206a,
2648 __func__, vha->host_no)); 2649 "%s *** FAILED ***.\n", __func__);
2649 } else { 2650 } else {
2650 DEBUG3(printk("%s: exiting normally\n", __func__)); 2651 ql_dbg(ql_dbg_disc, vha, 0x206b,
2652 "%s: exiting normally.\n", __func__);
2651 } 2653 }
2652 2654
2653 /* Restore state if a resync event occurred during processing */ 2655 /* Restore state if a resync event occurred during processing */
@@ -2695,8 +2697,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2695 new_fcport = NULL; 2697 new_fcport = NULL;
2696 entries = MAX_FIBRE_DEVICES; 2698 entries = MAX_FIBRE_DEVICES;
2697 2699
2698 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no)); 2700 ql_dbg(ql_dbg_disc, vha, 0x2016,
2699 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL)); 2701 "Getting FCAL position map.\n");
2702 if (ql2xextended_error_logging & ql_dbg_disc)
2703 qla2x00_get_fcal_position_map(vha, NULL);
2700 2704
2701 /* Get list of logged in devices. */ 2705 /* Get list of logged in devices. */
2702 memset(ha->gid_list, 0, GID_LIST_SIZE); 2706 memset(ha->gid_list, 0, GID_LIST_SIZE);
@@ -2705,14 +2709,17 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2705 if (rval != QLA_SUCCESS) 2709 if (rval != QLA_SUCCESS)
2706 goto cleanup_allocation; 2710 goto cleanup_allocation;
2707 2711
2708 DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", 2712 ql_dbg(ql_dbg_disc, vha, 0x2017,
2709 vha->host_no, entries)); 2713 "Entries in ID list (%d).\n", entries);
2710 DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, 2714 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
2711 entries * sizeof(struct gid_list_info))); 2715 (uint8_t *)ha->gid_list,
2716 entries * sizeof(struct gid_list_info));
2712 2717
2713 /* Allocate temporary fcport for any new fcports discovered. */ 2718 /* Allocate temporary fcport for any new fcports discovered. */
2714 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2719 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2715 if (new_fcport == NULL) { 2720 if (new_fcport == NULL) {
2721 ql_log(ql_log_warn, vha, 0x2018,
2722 "Memory allocation failed for fcport.\n");
2716 rval = QLA_MEMORY_ALLOC_FAILED; 2723 rval = QLA_MEMORY_ALLOC_FAILED;
2717 goto cleanup_allocation; 2724 goto cleanup_allocation;
2718 } 2725 }
@@ -2726,9 +2733,9 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2726 fcport->port_type != FCT_BROADCAST && 2733 fcport->port_type != FCT_BROADCAST &&
2727 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2734 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2728 2735
2729 DEBUG(printk("scsi(%ld): Marking port lost, " 2736 ql_dbg(ql_dbg_disc, vha, 0x2019,
2730 "loop_id=0x%04x\n", 2737 "Marking port lost loop_id=0x%04x.\n",
2731 vha->host_no, fcport->loop_id)); 2738 fcport->loop_id);
2732 2739
2733 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2740 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2734 } 2741 }
@@ -2769,12 +2776,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2769 new_fcport->vp_idx = vha->vp_idx; 2776 new_fcport->vp_idx = vha->vp_idx;
2770 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 2777 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2771 if (rval2 != QLA_SUCCESS) { 2778 if (rval2 != QLA_SUCCESS) {
2772 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2779 ql_dbg(ql_dbg_disc, vha, 0x201a,
2773 "information -- get_port_database=%x, " 2780 "Failed to retrieve fcport information "
2774 "loop_id=0x%04x\n", 2781 "-- get_port_database=%x, loop_id=0x%04x.\n",
2775 vha->host_no, rval2, new_fcport->loop_id)); 2782 rval2, new_fcport->loop_id);
2776 DEBUG2(printk("scsi(%ld): Scheduling resync...\n", 2783 ql_dbg(ql_dbg_disc, vha, 0x201b,
2777 vha->host_no)); 2784 "Scheduling resync.\n");
2778 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2785 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2779 continue; 2786 continue;
2780 } 2787 }
@@ -2810,6 +2817,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2810 fcport = new_fcport; 2817 fcport = new_fcport;
2811 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2818 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2812 if (new_fcport == NULL) { 2819 if (new_fcport == NULL) {
2820 ql_log(ql_log_warn, vha, 0x201c,
2821 "Failed to allocate memory for fcport.\n");
2813 rval = QLA_MEMORY_ALLOC_FAILED; 2822 rval = QLA_MEMORY_ALLOC_FAILED;
2814 goto cleanup_allocation; 2823 goto cleanup_allocation;
2815 } 2824 }
@@ -2828,8 +2837,8 @@ cleanup_allocation:
2828 kfree(new_fcport); 2837 kfree(new_fcport);
2829 2838
2830 if (rval != QLA_SUCCESS) { 2839 if (rval != QLA_SUCCESS) {
2831 DEBUG2(printk("scsi(%ld): Configure local loop error exit: " 2840 ql_dbg(ql_dbg_disc, vha, 0x201d,
2832 "rval=%x\n", vha->host_no, rval)); 2841 "Configure local loop error exit: rval=%x.\n", rval);
2833 } 2842 }
2834 2843
2835 return (rval); 2844 return (rval);
@@ -2858,27 +2867,27 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2858 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, 2867 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2859 mb); 2868 mb);
2860 if (rval != QLA_SUCCESS) { 2869 if (rval != QLA_SUCCESS) {
2861 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " 2870 ql_dbg(ql_dbg_disc, vha, 0x2004,
2862 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", 2871 "Unable to adjust iIDMA "
2863 vha->host_no, fcport->port_name[0], fcport->port_name[1], 2872 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x "
2873 "%04x.\n", fcport->port_name[0], fcport->port_name[1],
2864 fcport->port_name[2], fcport->port_name[3], 2874 fcport->port_name[2], fcport->port_name[3],
2865 fcport->port_name[4], fcport->port_name[5], 2875 fcport->port_name[4], fcport->port_name[5],
2866 fcport->port_name[6], fcport->port_name[7], rval, 2876 fcport->port_name[6], fcport->port_name[7], rval,
2867 fcport->fp_speed, mb[0], mb[1])); 2877 fcport->fp_speed, mb[0], mb[1]);
2868 } else { 2878 } else {
2869 link_speed = link_speeds[LS_UNKNOWN]; 2879 link_speed = link_speeds[LS_UNKNOWN];
2870 if (fcport->fp_speed < 5) 2880 if (fcport->fp_speed < 5)
2871 link_speed = link_speeds[fcport->fp_speed]; 2881 link_speed = link_speeds[fcport->fp_speed];
2872 else if (fcport->fp_speed == 0x13) 2882 else if (fcport->fp_speed == 0x13)
2873 link_speed = link_speeds[5]; 2883 link_speed = link_speeds[5];
2874 DEBUG2(qla_printk(KERN_INFO, ha, 2884 ql_dbg(ql_dbg_disc, vha, 0x2005,
2875 "iIDMA adjusted to %s GB/s on " 2885 "iIDMA adjusted to %s GB/s "
2876 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", 2886 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
2877 link_speed, fcport->port_name[0], 2887 fcport->port_name[0], fcport->port_name[1],
2878 fcport->port_name[1], fcport->port_name[2], 2888 fcport->port_name[2], fcport->port_name[3],
2879 fcport->port_name[3], fcport->port_name[4], 2889 fcport->port_name[4], fcport->port_name[5],
2880 fcport->port_name[5], fcport->port_name[6], 2890 fcport->port_name[6], fcport->port_name[7]);
2881 fcport->port_name[7]));
2882 } 2891 }
2883} 2892}
2884 2893
@@ -2887,7 +2896,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2887{ 2896{
2888 struct fc_rport_identifiers rport_ids; 2897 struct fc_rport_identifiers rport_ids;
2889 struct fc_rport *rport; 2898 struct fc_rport *rport;
2890 struct qla_hw_data *ha = vha->hw;
2891 unsigned long flags; 2899 unsigned long flags;
2892 2900
2893 qla2x00_rport_del(fcport); 2901 qla2x00_rport_del(fcport);
@@ -2899,8 +2907,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2899 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2907 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2900 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids); 2908 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2901 if (!rport) { 2909 if (!rport) {
2902 qla_printk(KERN_WARNING, ha, 2910 ql_log(ql_log_warn, vha, 0x2006,
2903 "Unable to allocate fc remote port!\n"); 2911 "Unable to allocate fc remote port.\n");
2904 return; 2912 return;
2905 } 2913 }
2906 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2914 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -2975,8 +2983,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2975 loop_id = SNS_FL_PORT; 2983 loop_id = SNS_FL_PORT;
2976 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1); 2984 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2977 if (rval != QLA_SUCCESS) { 2985 if (rval != QLA_SUCCESS) {
2978 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2986 ql_dbg(ql_dbg_disc, vha, 0x201f,
2979 "Port\n", vha->host_no)); 2987 "MBX_GET_PORT_NAME failed, No FL Port.\n");
2980 2988
2981 vha->device_flags &= ~SWITCH_FOUND; 2989 vha->device_flags &= ~SWITCH_FOUND;
2982 return (QLA_SUCCESS); 2990 return (QLA_SUCCESS);
@@ -3003,32 +3011,32 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3003 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 3011 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
3004 0xfc, mb, BIT_1 | BIT_0); 3012 0xfc, mb, BIT_1 | BIT_0);
3005 if (mb[0] != MBS_COMMAND_COMPLETE) { 3013 if (mb[0] != MBS_COMMAND_COMPLETE) {
3006 DEBUG2(qla_printk(KERN_INFO, ha, 3014 ql_dbg(ql_dbg_disc, vha, 0x2042,
3007 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 3015 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
3008 "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id, 3016 "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
3009 mb[0], mb[1], mb[2], mb[6], mb[7])); 3017 mb[2], mb[6], mb[7]);
3010 return (QLA_SUCCESS); 3018 return (QLA_SUCCESS);
3011 } 3019 }
3012 3020
3013 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) { 3021 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
3014 if (qla2x00_rft_id(vha)) { 3022 if (qla2x00_rft_id(vha)) {
3015 /* EMPTY */ 3023 /* EMPTY */
3016 DEBUG2(printk("scsi(%ld): Register FC-4 " 3024 ql_dbg(ql_dbg_disc, vha, 0x2045,
3017 "TYPE failed.\n", vha->host_no)); 3025 "Register FC-4 TYPE failed.\n");
3018 } 3026 }
3019 if (qla2x00_rff_id(vha)) { 3027 if (qla2x00_rff_id(vha)) {
3020 /* EMPTY */ 3028 /* EMPTY */
3021 DEBUG2(printk("scsi(%ld): Register FC-4 " 3029 ql_dbg(ql_dbg_disc, vha, 0x2049,
3022 "Features failed.\n", vha->host_no)); 3030 "Register FC-4 Features failed.\n");
3023 } 3031 }
3024 if (qla2x00_rnn_id(vha)) { 3032 if (qla2x00_rnn_id(vha)) {
3025 /* EMPTY */ 3033 /* EMPTY */
3026 DEBUG2(printk("scsi(%ld): Register Node Name " 3034 ql_dbg(ql_dbg_disc, vha, 0x204f,
3027 "failed.\n", vha->host_no)); 3035 "Register Node Name failed.\n");
3028 } else if (qla2x00_rsnn_nn(vha)) { 3036 } else if (qla2x00_rsnn_nn(vha)) {
3029 /* EMPTY */ 3037 /* EMPTY */
3030 DEBUG2(printk("scsi(%ld): Register Symbolic " 3038 ql_dbg(ql_dbg_disc, vha, 0x2053,
3031 "Node Name failed.\n", vha->host_no)); 3039 "Register Symobilic Node Name failed.\n");
3032 } 3040 }
3033 } 3041 }
3034 3042
@@ -3132,8 +3140,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3132 } 3140 }
3133 3141
3134 if (rval) { 3142 if (rval) {
3135 DEBUG2(printk("scsi(%ld): Configure fabric error exit: " 3143 ql_dbg(ql_dbg_disc, vha, 0x2068,
3136 "rval=%d\n", vha->host_no, rval)); 3144 "Configure fabric error exit rval=%d.\n", rval);
3137 } 3145 }
3138 3146
3139 return (rval); 3147 return (rval);
@@ -3175,8 +3183,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3175 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); 3183 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
3176 if (!swl) { 3184 if (!swl) {
3177 /*EMPTY*/ 3185 /*EMPTY*/
3178 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 3186 ql_dbg(ql_dbg_disc, vha, 0x2054,
3179 "on GA_NXT\n", vha->host_no)); 3187 "GID_PT allocations failed, fallback on GA_NXT.\n");
3180 } else { 3188 } else {
3181 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 3189 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3182 kfree(swl); 3190 kfree(swl);
@@ -3201,6 +3209,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3201 /* Allocate temporary fcport for any new fcports discovered. */ 3209 /* Allocate temporary fcport for any new fcports discovered. */
3202 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 3210 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3203 if (new_fcport == NULL) { 3211 if (new_fcport == NULL) {
3212 ql_log(ql_log_warn, vha, 0x205e,
3213 "Failed to allocate memory for fcport.\n");
3204 kfree(swl); 3214 kfree(swl);
3205 return (QLA_MEMORY_ALLOC_FAILED); 3215 return (QLA_MEMORY_ALLOC_FAILED);
3206 } 3216 }
@@ -3247,9 +3257,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3247 /* Send GA_NXT to the switch */ 3257 /* Send GA_NXT to the switch */
3248 rval = qla2x00_ga_nxt(vha, new_fcport); 3258 rval = qla2x00_ga_nxt(vha, new_fcport);
3249 if (rval != QLA_SUCCESS) { 3259 if (rval != QLA_SUCCESS) {
3250 qla_printk(KERN_WARNING, ha, 3260 ql_log(ql_log_warn, vha, 0x2064,
3251 "SNS scan failed -- assuming zero-entry " 3261 "SNS scan failed -- assuming "
3252 "result...\n"); 3262 "zero-entry result.\n");
3253 list_for_each_entry_safe(fcport, fcptemp, 3263 list_for_each_entry_safe(fcport, fcptemp,
3254 new_fcports, list) { 3264 new_fcports, list) {
3255 list_del(&fcport->list); 3265 list_del(&fcport->list);
@@ -3265,9 +3275,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3265 wrap.b24 = new_fcport->d_id.b24; 3275 wrap.b24 = new_fcport->d_id.b24;
3266 first_dev = 0; 3276 first_dev = 0;
3267 } else if (new_fcport->d_id.b24 == wrap.b24) { 3277 } else if (new_fcport->d_id.b24 == wrap.b24) {
3268 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", 3278 ql_dbg(ql_dbg_disc, vha, 0x2065,
3269 vha->host_no, new_fcport->d_id.b.domain, 3279 "Device wrap (%02x%02x%02x).\n",
3270 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); 3280 new_fcport->d_id.b.domain,
3281 new_fcport->d_id.b.area,
3282 new_fcport->d_id.b.al_pa);
3271 break; 3283 break;
3272 } 3284 }
3273 3285
@@ -3372,6 +3384,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3372 nxt_d_id.b24 = new_fcport->d_id.b24; 3384 nxt_d_id.b24 = new_fcport->d_id.b24;
3373 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 3385 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
3374 if (new_fcport == NULL) { 3386 if (new_fcport == NULL) {
3387 ql_log(ql_log_warn, vha, 0x2066,
3388 "Memory allocation failed for fcport.\n");
3375 kfree(swl); 3389 kfree(swl);
3376 return (QLA_MEMORY_ALLOC_FAILED); 3390 return (QLA_MEMORY_ALLOC_FAILED);
3377 } 3391 }
@@ -3501,10 +3515,10 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
3501 d_id.b.area = MSB(LSW(rscn_entry)); 3515 d_id.b.area = MSB(LSW(rscn_entry));
3502 d_id.b.al_pa = LSB(LSW(rscn_entry)); 3516 d_id.b.al_pa = LSB(LSW(rscn_entry));
3503 3517
3504 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " 3518 ql_dbg(ql_dbg_disc, vha, 0x2020,
3505 "[%02x/%02x%02x%02x].\n", 3519 "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
3506 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain, 3520 vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
3507 d_id.b.area, d_id.b.al_pa)); 3521 d_id.b.al_pa);
3508 3522
3509 vha->rscn_out_ptr++; 3523 vha->rscn_out_ptr++;
3510 if (vha->rscn_out_ptr == MAX_RSCN_COUNT) 3524 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
@@ -3520,17 +3534,17 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
3520 if (rscn_entry != vha->rscn_queue[rscn_out_iter]) 3534 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3521 break; 3535 break;
3522 3536
3523 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " 3537 ql_dbg(ql_dbg_disc, vha, 0x2021,
3524 "entry found at [%d].\n", vha->host_no, 3538 "Skipping duplicate RSCN queue entry found at "
3525 rscn_out_iter)); 3539 "[%d].\n", rscn_out_iter);
3526 3540
3527 vha->rscn_out_ptr = rscn_out_iter; 3541 vha->rscn_out_ptr = rscn_out_iter;
3528 } 3542 }
3529 3543
3530 /* Queue overflow, set switch default case. */ 3544 /* Queue overflow, set switch default case. */
3531 if (vha->flags.rscn_queue_overflow) { 3545 if (vha->flags.rscn_queue_overflow) {
3532 DEBUG(printk("scsi(%ld): device_resync: rscn " 3546 ql_dbg(ql_dbg_disc, vha, 0x2022,
3533 "overflow.\n", vha->host_no)); 3547 "device_resync: rscn overflow.\n");
3534 3548
3535 format = 3; 3549 format = 3;
3536 vha->flags.rscn_queue_overflow = 0; 3550 vha->flags.rscn_queue_overflow = 0;
@@ -3659,10 +3673,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3659 tmp_loopid = 0; 3673 tmp_loopid = 0;
3660 3674
3661 for (;;) { 3675 for (;;) {
3662 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " 3676 ql_dbg(ql_dbg_disc, vha, 0x2000,
3663 "for port %02x%02x%02x.\n", 3677 "Trying Fabric Login w/loop id 0x%04x for port "
3664 vha->host_no, fcport->loop_id, fcport->d_id.b.domain, 3678 "%02x%02x%02x.\n",
3665 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3679 fcport->loop_id, fcport->d_id.b.domain,
3680 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3666 3681
3667 /* Login fcport on switch. */ 3682 /* Login fcport on switch. */
3668 ha->isp_ops->fabric_login(vha, fcport->loop_id, 3683 ha->isp_ops->fabric_login(vha, fcport->loop_id,
@@ -3680,10 +3695,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3680 tmp_loopid = fcport->loop_id; 3695 tmp_loopid = fcport->loop_id;
3681 fcport->loop_id = mb[1]; 3696 fcport->loop_id = mb[1];
3682 3697
3683 DEBUG(printk("Fabric Login: port in use - next " 3698 ql_dbg(ql_dbg_disc, vha, 0x2001,
3684 "loop id=0x%04x, port Id=%02x%02x%02x.\n", 3699 "Fabric Login: port in use - next loop "
3700 "id=0x%04x, port id= %02x%02x%02x.\n",
3685 fcport->loop_id, fcport->d_id.b.domain, 3701 fcport->loop_id, fcport->d_id.b.domain,
3686 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3702 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3687 3703
3688 } else if (mb[0] == MBS_COMMAND_COMPLETE) { 3704 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
3689 /* 3705 /*
@@ -3744,11 +3760,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3744 /* 3760 /*
3745 * unrecoverable / not handled error 3761 * unrecoverable / not handled error
3746 */ 3762 */
3747 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " 3763 ql_dbg(ql_dbg_disc, vha, 0x2002,
3748 "loop_id=%x jiffies=%lx.\n", 3764 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
3749 __func__, vha->host_no, mb[0], 3765 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
3750 fcport->d_id.b.domain, fcport->d_id.b.area, 3766 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3751 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3767 fcport->loop_id, jiffies);
3752 3768
3753 *next_loopid = fcport->loop_id; 3769 *next_loopid = fcport->loop_id;
3754 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3770 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3852,7 +3868,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3852 return (QLA_FUNCTION_FAILED); 3868 return (QLA_FUNCTION_FAILED);
3853 3869
3854 if (rval) 3870 if (rval)
3855 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); 3871 ql_dbg(ql_dbg_disc, vha, 0x206c,
3872 "%s *** FAILED ***.\n", __func__);
3856 3873
3857 return (rval); 3874 return (rval);
3858} 3875}
@@ -3929,8 +3946,8 @@ qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
3929 struct qla_hw_data *ha = vha->hw; 3946 struct qla_hw_data *ha = vha->hw;
3930 struct scsi_qla_host *vp; 3947 struct scsi_qla_host *vp;
3931 3948
3932 qla_printk(KERN_INFO, ha, 3949 ql_dbg(ql_dbg_p3p, vha, 0xb002,
3933 "Performing ISP error recovery - ha= %p.\n", ha); 3950 "Performing ISP error recovery - ha=%p.\n", ha);
3934 3951
3935 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 3952 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3936 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3953 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -3964,8 +3981,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3964 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3981 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3965 ha->qla_stats.total_isp_aborts++; 3982 ha->qla_stats.total_isp_aborts++;
3966 3983
3967 qla_printk(KERN_INFO, ha, 3984 ql_log(ql_log_info, vha, 0x00af,
3968 "Performing ISP error recovery - ha= %p.\n", ha); 3985 "Performing ISP error recovery - ha=%p.\n", ha);
3969 3986
3970 /* For ISP82XX, reset_chip is just disabling interrupts. 3987 /* For ISP82XX, reset_chip is just disabling interrupts.
3971 * Driver waits for the completion of the commands. 3988 * Driver waits for the completion of the commands.
@@ -4016,6 +4033,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4016 /* Make sure for ISP 82XX IO DMA is complete */ 4033 /* Make sure for ISP 82XX IO DMA is complete */
4017 if (IS_QLA82XX(ha)) { 4034 if (IS_QLA82XX(ha)) {
4018 qla82xx_chip_reset_cleanup(vha); 4035 qla82xx_chip_reset_cleanup(vha);
4036 ql_log(ql_log_info, vha, 0x00b4,
4037 "Done chip reset cleanup.\n");
4019 4038
4020 /* Done waiting for pending commands. 4039 /* Done waiting for pending commands.
4021 * Reset the online flag. 4040 * Reset the online flag.
@@ -4097,7 +4116,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4097 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 4116 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
4098 &ha->fce_bufs); 4117 &ha->fce_bufs);
4099 if (rval) { 4118 if (rval) {
4100 qla_printk(KERN_WARNING, ha, 4119 ql_log(ql_log_warn, vha, 0x8033,
4101 "Unable to reinitialize FCE " 4120 "Unable to reinitialize FCE "
4102 "(%d).\n", rval); 4121 "(%d).\n", rval);
4103 ha->flags.fce_enabled = 0; 4122 ha->flags.fce_enabled = 0;
@@ -4109,7 +4128,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4109 rval = qla2x00_enable_eft_trace(vha, 4128 rval = qla2x00_enable_eft_trace(vha,
4110 ha->eft_dma, EFT_NUM_BUFFERS); 4129 ha->eft_dma, EFT_NUM_BUFFERS);
4111 if (rval) { 4130 if (rval) {
4112 qla_printk(KERN_WARNING, ha, 4131 ql_log(ql_log_warn, vha, 0x8034,
4113 "Unable to reinitialize EFT " 4132 "Unable to reinitialize EFT "
4114 "(%d).\n", rval); 4133 "(%d).\n", rval);
4115 } 4134 }
@@ -4118,9 +4137,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4118 vha->flags.online = 1; 4137 vha->flags.online = 1;
4119 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 4138 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
4120 if (ha->isp_abort_cnt == 0) { 4139 if (ha->isp_abort_cnt == 0) {
4121 qla_printk(KERN_WARNING, ha, 4140 ql_log(ql_log_fatal, vha, 0x8035,
4122 "ISP error recovery failed - " 4141 "ISP error recover failed - "
4123 "board disabled\n"); 4142 "board disabled.\n");
4124 /* 4143 /*
4125 * The next call disables the board 4144 * The next call disables the board
4126 * completely. 4145 * completely.
@@ -4132,16 +4151,16 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4132 status = 0; 4151 status = 0;
4133 } else { /* schedule another ISP abort */ 4152 } else { /* schedule another ISP abort */
4134 ha->isp_abort_cnt--; 4153 ha->isp_abort_cnt--;
4135 DEBUG(printk("qla%ld: ISP abort - " 4154 ql_dbg(ql_dbg_taskm, vha, 0x8020,
4136 "retry remaining %d\n", 4155 "ISP abort - retry remaining %d.\n",
4137 vha->host_no, ha->isp_abort_cnt)); 4156 ha->isp_abort_cnt);
4138 status = 1; 4157 status = 1;
4139 } 4158 }
4140 } else { 4159 } else {
4141 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 4160 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
4142 DEBUG(printk("qla2x00(%ld): ISP error recovery " 4161 ql_dbg(ql_dbg_taskm, vha, 0x8021,
4143 "- retrying (%d) more times\n", 4162 "ISP error recovery - retrying (%d) "
4144 vha->host_no, ha->isp_abort_cnt)); 4163 "more times.\n", ha->isp_abort_cnt);
4145 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 4164 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4146 status = 1; 4165 status = 1;
4147 } 4166 }
@@ -4150,9 +4169,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4150 } 4169 }
4151 4170
4152 if (!status) { 4171 if (!status) {
4153 DEBUG(printk(KERN_INFO 4172 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
4154 "qla2x00_abort_isp(%ld): succeeded.\n",
4155 vha->host_no));
4156 4173
4157 spin_lock_irqsave(&ha->vport_slock, flags); 4174 spin_lock_irqsave(&ha->vport_slock, flags);
4158 list_for_each_entry(vp, &ha->vp_list, list) { 4175 list_for_each_entry(vp, &ha->vp_list, list) {
@@ -4169,8 +4186,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4169 spin_unlock_irqrestore(&ha->vport_slock, flags); 4186 spin_unlock_irqrestore(&ha->vport_slock, flags);
4170 4187
4171 } else { 4188 } else {
4172 qla_printk(KERN_INFO, ha, 4189 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
4173 "qla2x00_abort_isp: **** FAILED ****\n");
4174 } 4190 }
4175 4191
4176 return(status); 4192 return(status);
@@ -4211,8 +4227,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4211 4227
4212 status = qla2x00_fw_ready(vha); 4228 status = qla2x00_fw_ready(vha);
4213 if (!status) { 4229 if (!status) {
4214 DEBUG(printk("%s(): Start configure loop, " 4230 ql_dbg(ql_dbg_taskm, vha, 0x8031,
4215 "status = %d\n", __func__, status)); 4231 "Start configure loop status = %d.\n", status);
4216 4232
4217 /* Issue a marker after FW becomes ready. */ 4233 /* Issue a marker after FW becomes ready. */
4218 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4234 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4234,9 +4250,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4234 if ((vha->device_flags & DFLG_NO_CABLE)) 4250 if ((vha->device_flags & DFLG_NO_CABLE))
4235 status = 0; 4251 status = 0;
4236 4252
4237 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 4253 ql_dbg(ql_dbg_taskm, vha, 0x8032,
4238 __func__, 4254 "Configure loop done, status = 0x%x.\n", status);
4239 status));
4240 } 4255 }
4241 return (status); 4256 return (status);
4242} 4257}
@@ -4256,13 +4271,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4256 rsp->options &= ~BIT_0; 4271 rsp->options &= ~BIT_0;
4257 ret = qla25xx_init_rsp_que(base_vha, rsp); 4272 ret = qla25xx_init_rsp_que(base_vha, rsp);
4258 if (ret != QLA_SUCCESS) 4273 if (ret != QLA_SUCCESS)
4259 DEBUG2_17(printk(KERN_WARNING 4274 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
4260 "%s Rsp que:%d init failed\n", __func__, 4275 "%s Rsp que: %d init failed.\n",
4261 rsp->id)); 4276 __func__, rsp->id);
4262 else 4277 else
4263 DEBUG2_17(printk(KERN_INFO 4278 ql_dbg(ql_dbg_init, base_vha, 0x0100,
4264 "%s Rsp que:%d inited\n", __func__, 4279 "%s Rsp que: %d inited.\n",
4265 rsp->id)); 4280 __func__, rsp->id);
4266 } 4281 }
4267 } 4282 }
4268 for (i = 1; i < ha->max_req_queues; i++) { 4283 for (i = 1; i < ha->max_req_queues; i++) {
@@ -4272,13 +4287,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4272 req->options &= ~BIT_0; 4287 req->options &= ~BIT_0;
4273 ret = qla25xx_init_req_que(base_vha, req); 4288 ret = qla25xx_init_req_que(base_vha, req);
4274 if (ret != QLA_SUCCESS) 4289 if (ret != QLA_SUCCESS)
4275 DEBUG2_17(printk(KERN_WARNING 4290 ql_dbg(ql_dbg_init, base_vha, 0x0101,
4276 "%s Req que:%d init failed\n", __func__, 4291 "%s Req que: %d init failed.\n",
4277 req->id)); 4292 __func__, req->id);
4278 else 4293 else
4279 DEBUG2_17(printk(KERN_WARNING 4294 ql_dbg(ql_dbg_init, base_vha, 0x0102,
4280 "%s Req que:%d inited\n", __func__, 4295 "%s Req que: %d inited.\n",
4281 req->id)); 4296 __func__, req->id);
4282 } 4297 }
4283 } 4298 }
4284 return ret; 4299 return ret;
@@ -4397,19 +4412,22 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4397 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 4412 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4398 chksum += le32_to_cpu(*dptr++); 4413 chksum += le32_to_cpu(*dptr++);
4399 4414
4400 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 4415 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
4401 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 4416 "Contents of NVRAM\n");
4417 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
4418 (uint8_t *)nv, ha->nvram_size);
4402 4419
4403 /* Bad NVRAM data, set defaults parameters. */ 4420 /* Bad NVRAM data, set defaults parameters. */
4404 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 4421 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4405 || nv->id[3] != ' ' || 4422 || nv->id[3] != ' ' ||
4406 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 4423 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4407 /* Reset NVRAM data. */ 4424 /* Reset NVRAM data. */
4408 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 4425 ql_log(ql_log_warn, vha, 0x006b,
4409 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 4426 "Inconisistent NVRAM detected: checksum=0x%x id=%c "
4410 le16_to_cpu(nv->nvram_version)); 4427 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
4411 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 4428 ql_log(ql_log_warn, vha, 0x006c,
4412 "invalid -- WWPN) defaults.\n"); 4429 "Falling back to functioning (yet invalid -- WWPN) "
4430 "defaults.\n");
4413 4431
4414 /* 4432 /*
4415 * Set default initialization control block. 4433 * Set default initialization control block.
@@ -4587,10 +4605,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4587 if (ha->zio_mode != QLA_ZIO_DISABLED) { 4605 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4588 ha->zio_mode = QLA_ZIO_MODE_6; 4606 ha->zio_mode = QLA_ZIO_MODE_6;
4589 4607
4590 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 4608 ql_log(ql_log_info, vha, 0x006f,
4591 "(%d us).\n", vha->host_no, ha->zio_mode,
4592 ha->zio_timer * 100));
4593 qla_printk(KERN_INFO, ha,
4594 "ZIO mode %d enabled; timer delay (%d us).\n", 4609 "ZIO mode %d enabled; timer delay (%d us).\n",
4595 ha->zio_mode, ha->zio_timer * 100); 4610 ha->zio_mode, ha->zio_timer * 100);
4596 4611
@@ -4601,8 +4616,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4601 } 4616 }
4602 4617
4603 if (rval) { 4618 if (rval) {
4604 DEBUG2_3(printk(KERN_WARNING 4619 ql_log(ql_log_warn, vha, 0x0070,
4605 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 4620 "NVRAM configuration failed.\n");
4606 } 4621 }
4607 return (rval); 4622 return (rval);
4608} 4623}
@@ -4620,8 +4635,8 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4620 struct qla_hw_data *ha = vha->hw; 4635 struct qla_hw_data *ha = vha->hw;
4621 struct req_que *req = ha->req_q_map[0]; 4636 struct req_que *req = ha->req_q_map[0];
4622 4637
4623 qla_printk(KERN_INFO, ha, 4638 ql_dbg(ql_dbg_init, vha, 0x008b,
4624 "FW: Loading from flash (%x)...\n", faddr); 4639 "Loading firmware from flash (%x).\n", faddr);
4625 4640
4626 rval = QLA_SUCCESS; 4641 rval = QLA_SUCCESS;
4627 4642
@@ -4637,11 +4652,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4637 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4652 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4638 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4653 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4639 dcode[3] == 0)) { 4654 dcode[3] == 0)) {
4640 qla_printk(KERN_WARNING, ha, 4655 ql_log(ql_log_fatal, vha, 0x008c,
4641 "Unable to verify integrity of flash firmware image!\n"); 4656 "Unable to verify the integrity of flash firmware "
4642 qla_printk(KERN_WARNING, ha, 4657 "image.\n");
4643 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4658 ql_log(ql_log_fatal, vha, 0x008d,
4644 dcode[1], dcode[2], dcode[3]); 4659 "Firmware data: %08x %08x %08x %08x.\n",
4660 dcode[0], dcode[1], dcode[2], dcode[3]);
4645 4661
4646 return QLA_FUNCTION_FAILED; 4662 return QLA_FUNCTION_FAILED;
4647 } 4663 }
@@ -4660,9 +4676,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4660 if (dlen > risc_size) 4676 if (dlen > risc_size)
4661 dlen = risc_size; 4677 dlen = risc_size;
4662 4678
4663 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4679 ql_dbg(ql_dbg_init, vha, 0x008e,
4664 "addr %x, number of dwords 0x%x, offset 0x%x.\n", 4680 "Loading risc segment@ risc addr %x "
4665 vha->host_no, risc_addr, dlen, faddr)); 4681 "number of dwords 0x%x offset 0x%x.\n",
4682 risc_addr, dlen, faddr);
4666 4683
4667 qla24xx_read_flash_data(vha, dcode, faddr, dlen); 4684 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
4668 for (i = 0; i < dlen; i++) 4685 for (i = 0; i < dlen; i++)
@@ -4671,12 +4688,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
4671 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4688 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4672 dlen); 4689 dlen);
4673 if (rval) { 4690 if (rval) {
4674 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4691 ql_log(ql_log_fatal, vha, 0x008f,
4675 "segment %d of firmware\n", vha->host_no, 4692 "Failed to load segment %d of firmware.\n",
4676 fragment)); 4693 fragment);
4677 qla_printk(KERN_WARNING, ha,
4678 "[ERROR] Failed to load segment %d of "
4679 "firmware\n", fragment);
4680 break; 4694 break;
4681 } 4695 }
4682 4696
@@ -4709,9 +4723,10 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4709 /* Load firmware blob. */ 4723 /* Load firmware blob. */
4710 blob = qla2x00_request_firmware(vha); 4724 blob = qla2x00_request_firmware(vha);
4711 if (!blob) { 4725 if (!blob) {
4712 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4726 ql_log(ql_log_info, vha, 0x0083,
4713 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4727 "Fimware image unavailable.\n");
4714 "from: " QLA_FW_URL ".\n"); 4728 ql_log(ql_log_info, vha, 0x0084,
4729 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
4715 return QLA_FUNCTION_FAILED; 4730 return QLA_FUNCTION_FAILED;
4716 } 4731 }
4717 4732
@@ -4724,8 +4739,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4724 4739
4725 /* Validate firmware image by checking version. */ 4740 /* Validate firmware image by checking version. */
4726 if (blob->fw->size < 8 * sizeof(uint16_t)) { 4741 if (blob->fw->size < 8 * sizeof(uint16_t)) {
4727 qla_printk(KERN_WARNING, ha, 4742 ql_log(ql_log_fatal, vha, 0x0085,
4728 "Unable to verify integrity of firmware image (%Zd)!\n", 4743 "Unable to verify integrity of firmware image (%Zd).\n",
4729 blob->fw->size); 4744 blob->fw->size);
4730 goto fail_fw_integrity; 4745 goto fail_fw_integrity;
4731 } 4746 }
@@ -4734,11 +4749,11 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4734 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff && 4749 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
4735 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 && 4750 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
4736 wcode[2] == 0 && wcode[3] == 0)) { 4751 wcode[2] == 0 && wcode[3] == 0)) {
4737 qla_printk(KERN_WARNING, ha, 4752 ql_log(ql_log_fatal, vha, 0x0086,
4738 "Unable to verify integrity of firmware image!\n"); 4753 "Unable to verify integrity of firmware image.\n");
4739 qla_printk(KERN_WARNING, ha, 4754 ql_log(ql_log_fatal, vha, 0x0087,
4740 "Firmware data: %04x %04x %04x %04x!\n", wcode[0], 4755 "Firmware data: %04x %04x %04x %04x.\n",
4741 wcode[1], wcode[2], wcode[3]); 4756 wcode[0], wcode[1], wcode[2], wcode[3]);
4742 goto fail_fw_integrity; 4757 goto fail_fw_integrity;
4743 } 4758 }
4744 4759
@@ -4751,9 +4766,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4751 /* Validate firmware image size. */ 4766 /* Validate firmware image size. */
4752 fwclen += risc_size * sizeof(uint16_t); 4767 fwclen += risc_size * sizeof(uint16_t);
4753 if (blob->fw->size < fwclen) { 4768 if (blob->fw->size < fwclen) {
4754 qla_printk(KERN_WARNING, ha, 4769 ql_log(ql_log_fatal, vha, 0x0088,
4755 "Unable to verify integrity of firmware image " 4770 "Unable to verify integrity of firmware image "
4756 "(%Zd)!\n", blob->fw->size); 4771 "(%Zd).\n", blob->fw->size);
4757 goto fail_fw_integrity; 4772 goto fail_fw_integrity;
4758 } 4773 }
4759 4774
@@ -4762,10 +4777,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4762 wlen = (uint16_t)(ha->fw_transfer_size >> 1); 4777 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
4763 if (wlen > risc_size) 4778 if (wlen > risc_size)
4764 wlen = risc_size; 4779 wlen = risc_size;
4765 4780 ql_dbg(ql_dbg_init, vha, 0x0089,
4766 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4781 "Loading risc segment@ risc addr %x number of "
4767 "addr %x, number of words 0x%x.\n", vha->host_no, 4782 "words 0x%x.\n", risc_addr, wlen);
4768 risc_addr, wlen));
4769 4783
4770 for (i = 0; i < wlen; i++) 4784 for (i = 0; i < wlen; i++)
4771 wcode[i] = swab16(fwcode[i]); 4785 wcode[i] = swab16(fwcode[i]);
@@ -4773,12 +4787,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4773 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4787 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4774 wlen); 4788 wlen);
4775 if (rval) { 4789 if (rval) {
4776 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4790 ql_log(ql_log_fatal, vha, 0x008a,
4777 "segment %d of firmware\n", vha->host_no, 4791 "Failed to load segment %d of firmware.\n",
4778 fragment)); 4792 fragment);
4779 qla_printk(KERN_WARNING, ha,
4780 "[ERROR] Failed to load segment %d of "
4781 "firmware\n", fragment);
4782 break; 4793 break;
4783 } 4794 }
4784 4795
@@ -4814,15 +4825,17 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4814 /* Load firmware blob. */ 4825 /* Load firmware blob. */
4815 blob = qla2x00_request_firmware(vha); 4826 blob = qla2x00_request_firmware(vha);
4816 if (!blob) { 4827 if (!blob) {
4817 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4828 ql_log(ql_log_warn, vha, 0x0090,
4818 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4829 "Fimware image unavailable.\n");
4819 "from: " QLA_FW_URL ".\n"); 4830 ql_log(ql_log_warn, vha, 0x0091,
4831 "Firmware images can be retrieved from: "
4832 QLA_FW_URL ".\n");
4820 4833
4821 return QLA_FUNCTION_FAILED; 4834 return QLA_FUNCTION_FAILED;
4822 } 4835 }
4823 4836
4824 qla_printk(KERN_INFO, ha, 4837 ql_log(ql_log_info, vha, 0x0092,
4825 "FW: Loading via request-firmware...\n"); 4838 "Loading via request-firmware.\n");
4826 4839
4827 rval = QLA_SUCCESS; 4840 rval = QLA_SUCCESS;
4828 4841
@@ -4834,8 +4847,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4834 4847
4835 /* Validate firmware image by checking version. */ 4848 /* Validate firmware image by checking version. */
4836 if (blob->fw->size < 8 * sizeof(uint32_t)) { 4849 if (blob->fw->size < 8 * sizeof(uint32_t)) {
4837 qla_printk(KERN_WARNING, ha, 4850 ql_log(ql_log_fatal, vha, 0x0093,
4838 "Unable to verify integrity of firmware image (%Zd)!\n", 4851 "Unable to verify integrity of firmware image (%Zd).\n",
4839 blob->fw->size); 4852 blob->fw->size);
4840 goto fail_fw_integrity; 4853 goto fail_fw_integrity;
4841 } 4854 }
@@ -4845,11 +4858,12 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4845 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 4858 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
4846 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 4859 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
4847 dcode[3] == 0)) { 4860 dcode[3] == 0)) {
4848 qla_printk(KERN_WARNING, ha, 4861 ql_log(ql_log_fatal, vha, 0x0094,
4849 "Unable to verify integrity of firmware image!\n"); 4862 "Unable to verify integrity of firmware image (%Zd).\n",
4850 qla_printk(KERN_WARNING, ha, 4863 blob->fw->size);
4851 "Firmware data: %08x %08x %08x %08x!\n", dcode[0], 4864 ql_log(ql_log_fatal, vha, 0x0095,
4852 dcode[1], dcode[2], dcode[3]); 4865 "Firmware data: %08x %08x %08x %08x.\n",
4866 dcode[0], dcode[1], dcode[2], dcode[3]);
4853 goto fail_fw_integrity; 4867 goto fail_fw_integrity;
4854 } 4868 }
4855 4869
@@ -4861,9 +4875,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4861 /* Validate firmware image size. */ 4875 /* Validate firmware image size. */
4862 fwclen += risc_size * sizeof(uint32_t); 4876 fwclen += risc_size * sizeof(uint32_t);
4863 if (blob->fw->size < fwclen) { 4877 if (blob->fw->size < fwclen) {
4864 qla_printk(KERN_WARNING, ha, 4878 ql_log(ql_log_fatal, vha, 0x0096,
4865 "Unable to verify integrity of firmware image " 4879 "Unable to verify integrity of firmware image "
4866 "(%Zd)!\n", blob->fw->size); 4880 "(%Zd).\n", blob->fw->size);
4867 4881
4868 goto fail_fw_integrity; 4882 goto fail_fw_integrity;
4869 } 4883 }
@@ -4874,9 +4888,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4874 if (dlen > risc_size) 4888 if (dlen > risc_size)
4875 dlen = risc_size; 4889 dlen = risc_size;
4876 4890
4877 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4891 ql_dbg(ql_dbg_init, vha, 0x0097,
4878 "addr %x, number of dwords 0x%x.\n", vha->host_no, 4892 "Loading risc segment@ risc addr %x "
4879 risc_addr, dlen)); 4893 "number of dwords 0x%x.\n", risc_addr, dlen);
4880 4894
4881 for (i = 0; i < dlen; i++) 4895 for (i = 0; i < dlen; i++)
4882 dcode[i] = swab32(fwcode[i]); 4896 dcode[i] = swab32(fwcode[i]);
@@ -4884,12 +4898,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
4884 rval = qla2x00_load_ram(vha, req->dma, risc_addr, 4898 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
4885 dlen); 4899 dlen);
4886 if (rval) { 4900 if (rval) {
4887 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4901 ql_log(ql_log_fatal, vha, 0x0098,
4888 "segment %d of firmware\n", vha->host_no, 4902 "Failed to load segment %d of firmware.\n",
4889 fragment)); 4903 fragment);
4890 qla_printk(KERN_WARNING, ha,
4891 "[ERROR] Failed to load segment %d of "
4892 "firmware\n", fragment);
4893 break; 4904 break;
4894 } 4905 }
4895 4906
@@ -4953,14 +4964,13 @@ try_blob_fw:
4953 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) 4964 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
4954 return rval; 4965 return rval;
4955 4966
4956 qla_printk(KERN_ERR, ha, 4967 ql_log(ql_log_info, vha, 0x0099,
4957 "FW: Attempting to fallback to golden firmware...\n"); 4968 "Attempting to fallback to golden firmware.\n");
4958 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); 4969 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
4959 if (rval != QLA_SUCCESS) 4970 if (rval != QLA_SUCCESS)
4960 return rval; 4971 return rval;
4961 4972
4962 qla_printk(KERN_ERR, ha, 4973 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
4963 "FW: Please update operational firmware...\n");
4964 ha->flags.running_gold_fw = 1; 4974 ha->flags.running_gold_fw = 1;
4965 4975
4966 return rval; 4976 return rval;
@@ -4987,8 +4997,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4987 continue; 4997 continue;
4988 if (qla2x00_setup_chip(vha) != QLA_SUCCESS) 4998 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4989 continue; 4999 continue;
4990 qla_printk(KERN_INFO, ha, 5000 ql_log(ql_log_info, vha, 0x8015,
4991 "Attempting retry of stop-firmware command...\n"); 5001 "Attempting retry of stop-firmware command.\n");
4992 ret = qla2x00_stop_firmware(vha); 5002 ret = qla2x00_stop_firmware(vha);
4993 } 5003 }
4994} 5004}
@@ -5023,10 +5033,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
5023 /* Login to SNS first */ 5033 /* Login to SNS first */
5024 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); 5034 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
5025 if (mb[0] != MBS_COMMAND_COMPLETE) { 5035 if (mb[0] != MBS_COMMAND_COMPLETE) {
5026 DEBUG15(qla_printk(KERN_INFO, ha, 5036 ql_dbg(ql_dbg_init, vha, 0x0103,
5027 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 5037 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
5028 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS, 5038 "mb[6]=%x mb[7]=%x.\n",
5029 mb[0], mb[1], mb[2], mb[6], mb[7])); 5039 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
5030 return (QLA_FUNCTION_FAILED); 5040 return (QLA_FUNCTION_FAILED);
5031 } 5041 }
5032 5042
@@ -5146,19 +5156,23 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5146 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 5156 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
5147 chksum += le32_to_cpu(*dptr++); 5157 chksum += le32_to_cpu(*dptr++);
5148 5158
5149 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); 5159 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
5150 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 5160 "Contents of NVRAM:\n");
5161 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
5162 (uint8_t *)nv, ha->nvram_size);
5151 5163
5152 /* Bad NVRAM data, set defaults parameters. */ 5164 /* Bad NVRAM data, set defaults parameters. */
5153 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 5165 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5154 || nv->id[3] != ' ' || 5166 || nv->id[3] != ' ' ||
5155 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 5167 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5156 /* Reset NVRAM data. */ 5168 /* Reset NVRAM data. */
5157 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " 5169 ql_log(ql_log_info, vha, 0x0073,
5158 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], 5170 "Inconisistent NVRAM detected: checksum=0x%x id=%c "
5171 "version=0x%x.\n", chksum, nv->id[0],
5159 le16_to_cpu(nv->nvram_version)); 5172 le16_to_cpu(nv->nvram_version));
5160 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 5173 ql_log(ql_log_info, vha, 0x0074,
5161 "invalid -- WWPN) defaults.\n"); 5174 "Falling back to functioning (yet invalid -- WWPN) "
5175 "defaults.\n");
5162 5176
5163 /* 5177 /*
5164 * Set default initialization control block. 5178 * Set default initialization control block.
@@ -5350,12 +5364,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5350 if (ha->zio_mode != QLA_ZIO_DISABLED) { 5364 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5351 ha->zio_mode = QLA_ZIO_MODE_6; 5365 ha->zio_mode = QLA_ZIO_MODE_6;
5352 5366
5353 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 5367 ql_log(ql_log_info, vha, 0x0075,
5354 "(%d us).\n", vha->host_no, ha->zio_mode,
5355 ha->zio_timer * 100));
5356 qla_printk(KERN_INFO, ha,
5357 "ZIO mode %d enabled; timer delay (%d us).\n", 5368 "ZIO mode %d enabled; timer delay (%d us).\n",
5358 ha->zio_mode, ha->zio_timer * 100); 5369 ha->zio_mode,
5370 ha->zio_timer * 100);
5359 5371
5360 icb->firmware_options_2 |= cpu_to_le32( 5372 icb->firmware_options_2 |= cpu_to_le32(
5361 (uint32_t)ha->zio_mode); 5373 (uint32_t)ha->zio_mode);
@@ -5364,8 +5376,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5364 } 5376 }
5365 5377
5366 if (rval) { 5378 if (rval) {
5367 DEBUG2_3(printk(KERN_WARNING 5379 ql_log(ql_log_warn, vha, 0x0076,
5368 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no)); 5380 "NVRAM configuration failed.\n");
5369 } 5381 }
5370 return (rval); 5382 return (rval);
5371} 5383}
@@ -5388,9 +5400,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5388 5400
5389 status = qla2x00_fw_ready(vha); 5401 status = qla2x00_fw_ready(vha);
5390 if (!status) { 5402 if (!status) {
5391 qla_printk(KERN_INFO, ha, 5403 ql_log(ql_log_info, vha, 0x803c,
5392 "%s(): Start configure loop, " 5404 "Start configure loop, status =%d.\n", status);
5393 "status = %d\n", __func__, status);
5394 5405
5395 /* Issue a marker after FW becomes ready. */ 5406 /* Issue a marker after FW becomes ready. */
5396 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 5407 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -5412,9 +5423,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5412 if ((vha->device_flags & DFLG_NO_CABLE)) 5423 if ((vha->device_flags & DFLG_NO_CABLE))
5413 status = 0; 5424 status = 0;
5414 5425
5415 qla_printk(KERN_INFO, ha, 5426 ql_log(ql_log_info, vha, 0x803d,
5416 "%s(): Configure loop done, status = 0x%x\n", 5427 "Configure loop done, status = 0x%x.\n", status);
5417 __func__, status);
5418 } 5428 }
5419 5429
5420 if (!status) { 5430 if (!status) {
@@ -5450,9 +5460,9 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5450 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 5460 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5451 &ha->fce_bufs); 5461 &ha->fce_bufs);
5452 if (rval) { 5462 if (rval) {
5453 qla_printk(KERN_WARNING, ha, 5463 ql_log(ql_log_warn, vha, 0x803e,
5454 "Unable to reinitialize FCE " 5464 "Unable to reinitialize FCE (%d).\n",
5455 "(%d).\n", rval); 5465 rval);
5456 ha->flags.fce_enabled = 0; 5466 ha->flags.fce_enabled = 0;
5457 } 5467 }
5458 } 5468 }
@@ -5462,17 +5472,16 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5462 rval = qla2x00_enable_eft_trace(vha, 5472 rval = qla2x00_enable_eft_trace(vha,
5463 ha->eft_dma, EFT_NUM_BUFFERS); 5473 ha->eft_dma, EFT_NUM_BUFFERS);
5464 if (rval) { 5474 if (rval) {
5465 qla_printk(KERN_WARNING, ha, 5475 ql_log(ql_log_warn, vha, 0x803f,
5466 "Unable to reinitialize EFT " 5476 "Unable to reinitialize EFT (%d).\n",
5467 "(%d).\n", rval); 5477 rval);
5468 } 5478 }
5469 } 5479 }
5470 } 5480 }
5471 5481
5472 if (!status) { 5482 if (!status) {
5473 DEBUG(printk(KERN_INFO 5483 ql_dbg(ql_dbg_taskm, vha, 0x8040,
5474 "qla82xx_restart_isp(%ld): succeeded.\n", 5484 "qla82xx_restart_isp succeeded.\n");
5475 vha->host_no));
5476 5485
5477 spin_lock_irqsave(&ha->vport_slock, flags); 5486 spin_lock_irqsave(&ha->vport_slock, flags);
5478 list_for_each_entry(vp, &ha->vp_list, list) { 5487 list_for_each_entry(vp, &ha->vp_list, list) {
@@ -5489,8 +5498,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5489 spin_unlock_irqrestore(&ha->vport_slock, flags); 5498 spin_unlock_irqrestore(&ha->vport_slock, flags);
5490 5499
5491 } else { 5500 } else {
5492 qla_printk(KERN_INFO, ha, 5501 ql_log(ql_log_warn, vha, 0x8041,
5493 "qla82xx_restart_isp: **** FAILED ****\n"); 5502 "qla82xx_restart_isp **** FAILED ****.\n");
5494 } 5503 }
5495 5504
5496 return status; 5505 return status;
@@ -5640,9 +5649,8 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5640 if (ret == QLA_SUCCESS) 5649 if (ret == QLA_SUCCESS)
5641 fcport->fcp_prio = priority; 5650 fcport->fcp_prio = priority;
5642 else 5651 else
5643 DEBUG2(printk(KERN_WARNING 5652 ql_dbg(ql_dbg_user, vha, 0x704f,
5644 "scsi(%ld): Unable to activate fcp priority, " 5653 "Unable to activate fcp priority, ret=0x%x.\n", ret);
5645 " ret=0x%x\n", vha->host_no, ret));
5646 5654
5647 return ret; 5655 return ret;
5648} 5656}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 4c8167e11f69..d2e904bc21c0 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -94,11 +94,11 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
94 94
95 /* Don't print state transitions during initial allocation of fcport */ 95 /* Don't print state transitions during initial allocation of fcport */
96 if (old_state && old_state != state) { 96 if (old_state && old_state != state) {
97 DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw, 97 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
98 "scsi(%ld): FCPort state transitioned from %s to %s - " 98 "FCPort state transitioned from %s to %s - "
99 "portid=%02x%02x%02x.\n", fcport->vha->host_no, 99 "portid=%02x%02x%02x.\n",
100 port_state_str[old_state], port_state_str[state], 100 port_state_str[old_state], port_state_str[state],
101 fcport->d_id.b.domain, fcport->d_id.b.area, 101 fcport->d_id.b.domain, fcport->d_id.b.area,
102 fcport->d_id.b.al_pa)); 102 fcport->d_id.b.al_pa);
103 } 103 }
104} 104}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 7bac3cd109d6..49d6906af886 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -150,7 +150,8 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
150 150
151 /* We only support T10 DIF right now */ 151 /* We only support T10 DIF right now */
152 if (guard != SHOST_DIX_GUARD_CRC) { 152 if (guard != SHOST_DIX_GUARD_CRC) {
153 DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard)); 153 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
154 "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
154 return 0; 155 return 0;
155 } 156 }
156 157
@@ -343,9 +344,10 @@ qla2x00_start_scsi(srb_t *sp)
343 344
344 /* Send marker if required */ 345 /* Send marker if required */
345 if (vha->marker_needed != 0) { 346 if (vha->marker_needed != 0) {
346 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) 347 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
347 != QLA_SUCCESS) 348 QLA_SUCCESS) {
348 return (QLA_FUNCTION_FAILED); 349 return (QLA_FUNCTION_FAILED);
350 }
349 vha->marker_needed = 0; 351 vha->marker_needed = 0;
350 } 352 }
351 353
@@ -490,8 +492,8 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
490 mrk24 = NULL; 492 mrk24 = NULL;
491 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0); 493 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
492 if (mrk == NULL) { 494 if (mrk == NULL) {
493 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", 495 ql_log(ql_log_warn, base_vha, 0x3026,
494 __func__, base_vha->host_no)); 496 "Failed to allocate Marker IOCB.\n");
495 497
496 return (QLA_FUNCTION_FAILED); 498 return (QLA_FUNCTION_FAILED);
497 } 499 }
@@ -547,9 +549,10 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
547 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 549 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
548 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 550 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
549 551
550 DEBUG5(printk("%s(): IOCB data:\n", __func__)); 552 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
551 DEBUG5(qla2x00_dump_buffer( 553 "IOCB data:\n");
552 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE)); 554 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
555 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
553 556
554 /* Adjust ring index. */ 557 /* Adjust ring index. */
555 req->ring_index++; 558 req->ring_index++;
@@ -604,7 +607,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
604 * Returns the number of IOCB entries needed to store @dsds. 607 * Returns the number of IOCB entries needed to store @dsds.
605 */ 608 */
606inline uint16_t 609inline uint16_t
607qla24xx_calc_iocbs(uint16_t dsds) 610qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
608{ 611{
609 uint16_t iocbs; 612 uint16_t iocbs;
610 613
@@ -614,8 +617,6 @@ qla24xx_calc_iocbs(uint16_t dsds)
614 if ((dsds - 1) % 5) 617 if ((dsds - 1) % 5)
615 iocbs++; 618 iocbs++;
616 } 619 }
617 DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
618 __func__, iocbs));
619 return iocbs; 620 return iocbs;
620} 621}
621 622
@@ -712,6 +713,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
712 unsigned int protcnt) 713 unsigned int protcnt)
713{ 714{
714 struct sd_dif_tuple *spt; 715 struct sd_dif_tuple *spt;
716 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
715 unsigned char op = scsi_get_prot_op(cmd); 717 unsigned char op = scsi_get_prot_op(cmd);
716 718
717 switch (scsi_get_prot_type(cmd)) { 719 switch (scsi_get_prot_type(cmd)) {
@@ -768,9 +770,9 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
768 op == SCSI_PROT_WRITE_PASS)) { 770 op == SCSI_PROT_WRITE_PASS)) {
769 spt = page_address(sg_page(scsi_prot_sglist(cmd))) + 771 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
770 scsi_prot_sglist(cmd)[0].offset; 772 scsi_prot_sglist(cmd)[0].offset;
771 DEBUG18(printk(KERN_DEBUG 773 ql_dbg(ql_dbg_io, vha, 0x3008,
772 "%s(): LBA from user %p, lba = 0x%x\n", 774 "LBA from user %p, lba = 0x%x for cmd=%p.\n",
773 __func__, spt, (int)spt->ref_tag)); 775 spt, (int)spt->ref_tag, cmd);
774 pkt->ref_tag = swab32(spt->ref_tag); 776 pkt->ref_tag = swab32(spt->ref_tag);
775 pkt->app_tag_mask[0] = 0x0; 777 pkt->app_tag_mask[0] = 0x0;
776 pkt->app_tag_mask[1] = 0x0; 778 pkt->app_tag_mask[1] = 0x0;
@@ -789,11 +791,11 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
789 break; 791 break;
790 } 792 }
791 793
792 DEBUG18(printk(KERN_DEBUG 794 ql_dbg(ql_dbg_io, vha, 0x3009,
793 "%s(): Setting protection Tags: (BIG) ref tag = 0x%x," 795 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
794 " app tag = 0x%x, prot SG count %d , cmd lba 0x%x," 796 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
795 " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt, 797 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
796 (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd))); 798 scsi_get_prot_type(cmd), cmd);
797} 799}
798 800
799 801
@@ -809,6 +811,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
809 uint32_t *cur_dsd = dsd; 811 uint32_t *cur_dsd = dsd;
810 int i; 812 int i;
811 uint16_t used_dsds = tot_dsds; 813 uint16_t used_dsds = tot_dsds;
814 scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
812 815
813 uint8_t *cp; 816 uint8_t *cp;
814 817
@@ -853,9 +856,10 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
853 cur_dsd = (uint32_t *)next_dsd; 856 cur_dsd = (uint32_t *)next_dsd;
854 } 857 }
855 sle_dma = sg_dma_address(sg); 858 sle_dma = sg_dma_address(sg);
856 DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x," 859 ql_dbg(ql_dbg_io, vha, 0x300a,
857 " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma), 860 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
858 MSD(sle_dma), sg_dma_len(sg))); 861 cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
862 sp->cmd);
859 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 863 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
860 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 864 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
861 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 865 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
@@ -863,8 +867,8 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
863 867
864 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 868 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
865 cp = page_address(sg_page(sg)) + sg->offset; 869 cp = page_address(sg_page(sg)) + sg->offset;
866 DEBUG18(printk("%s(): User Data buffer= %p:\n", 870 ql_dbg(ql_dbg_io, vha, 0x300b,
867 __func__ , cp)); 871 "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
868 } 872 }
869 } 873 }
870 /* Null termination */ 874 /* Null termination */
@@ -888,7 +892,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
888 struct scsi_cmnd *cmd; 892 struct scsi_cmnd *cmd;
889 uint32_t *cur_dsd = dsd; 893 uint32_t *cur_dsd = dsd;
890 uint16_t used_dsds = tot_dsds; 894 uint16_t used_dsds = tot_dsds;
891 895 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
892 uint8_t *cp; 896 uint8_t *cp;
893 897
894 898
@@ -935,10 +939,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
935 } 939 }
936 sle_dma = sg_dma_address(sg); 940 sle_dma = sg_dma_address(sg);
937 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 941 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
938 DEBUG18(printk(KERN_DEBUG 942 ql_dbg(ql_dbg_io, vha, 0x3027,
939 "%s(): %p, sg entry %d - addr =0x%x" 943 "%s(): %p, sg_entry %d - "
940 "0x%x, len =%d\n", __func__ , cur_dsd, i, 944 "addr=0x%x0x%x, len=%d.\n",
941 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg))); 945 __func__, cur_dsd, i,
946 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
942 } 947 }
943 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 948 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
944 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 949 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -946,8 +951,9 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
946 951
947 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 952 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
948 cp = page_address(sg_page(sg)) + sg->offset; 953 cp = page_address(sg_page(sg)) + sg->offset;
949 DEBUG18(printk("%s(): Protection Data buffer = %p:\n", 954 ql_dbg(ql_dbg_io, vha, 0x3028,
950 __func__ , cp)); 955 "%s(): Protection Data buffer = %p.\n", __func__,
956 cp);
951 } 957 }
952 avail_dsds--; 958 avail_dsds--;
953 } 959 }
@@ -996,22 +1002,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
996 *((uint32_t *)(&cmd_pkt->entry_type)) = 1002 *((uint32_t *)(&cmd_pkt->entry_type)) =
997 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2); 1003 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
998 1004
1005 vha = sp->fcport->vha;
1006 ha = vha->hw;
1007
999 /* No data transfer */ 1008 /* No data transfer */
1000 data_bytes = scsi_bufflen(cmd); 1009 data_bytes = scsi_bufflen(cmd);
1001 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1010 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1002 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1003 __func__, data_bytes));
1004 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1011 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1005 return QLA_SUCCESS; 1012 return QLA_SUCCESS;
1006 } 1013 }
1007 1014
1008 vha = sp->fcport->vha;
1009 ha = vha->hw;
1010
1011 DEBUG18(printk(KERN_DEBUG
1012 "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
1013 vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
1014
1015 cmd_pkt->vp_index = sp->fcport->vp_idx; 1015 cmd_pkt->vp_index = sp->fcport->vp_idx;
1016 1016
1017 /* Set transfer direction */ 1017 /* Set transfer direction */
@@ -1056,8 +1056,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1056 1056
1057 /* Determine SCSI command length -- align to 4 byte boundary */ 1057 /* Determine SCSI command length -- align to 4 byte boundary */
1058 if (cmd->cmd_len > 16) { 1058 if (cmd->cmd_len > 16) {
1059 DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
1060 __func__));
1061 additional_fcpcdb_len = cmd->cmd_len - 16; 1059 additional_fcpcdb_len = cmd->cmd_len - 16;
1062 if ((cmd->cmd_len % 4) != 0) { 1060 if ((cmd->cmd_len % 4) != 0) {
1063 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1061 /* SCSI cmd > 16 bytes must be multiple of 4 */
@@ -1108,11 +1106,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1108 1106
1109 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1107 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1110 1108
1111 DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
1112 "entries %d, data bytes %d, Protection entries %d\n",
1113 __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
1114 data_bytes, tot_prot_dsds));
1115
1116 /* Compute dif len and adjust data len to incude protection */ 1109 /* Compute dif len and adjust data len to incude protection */
1117 total_bytes = data_bytes; 1110 total_bytes = data_bytes;
1118 dif_bytes = 0; 1111 dif_bytes = 0;
@@ -1150,14 +1143,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1150 additional_fcpcdb_len); 1143 additional_fcpcdb_len);
1151 *fcp_dl = htonl(total_bytes); 1144 *fcp_dl = htonl(total_bytes);
1152 1145
1153 DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
1154 " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
1155 vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
1156 crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
1157
1158 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1146 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1159 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1160 __func__, data_bytes));
1161 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1147 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1162 return QLA_SUCCESS; 1148 return QLA_SUCCESS;
1163 } 1149 }
@@ -1182,8 +1168,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1182 return QLA_SUCCESS; 1168 return QLA_SUCCESS;
1183 1169
1184crc_queuing_error: 1170crc_queuing_error:
1185 DEBUG18(qla_printk(KERN_INFO, ha,
1186 "CMD sent FAILED crc_q error:sp = %p\n", sp));
1187 /* Cleanup will be performed by the caller */ 1171 /* Cleanup will be performed by the caller */
1188 1172
1189 return QLA_FUNCTION_FAILED; 1173 return QLA_FUNCTION_FAILED;
@@ -1225,8 +1209,8 @@ qla24xx_start_scsi(srb_t *sp)
1225 1209
1226 /* Send marker if required */ 1210 /* Send marker if required */
1227 if (vha->marker_needed != 0) { 1211 if (vha->marker_needed != 0) {
1228 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) 1212 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1229 != QLA_SUCCESS) 1213 QLA_SUCCESS)
1230 return QLA_FUNCTION_FAILED; 1214 return QLA_FUNCTION_FAILED;
1231 vha->marker_needed = 0; 1215 vha->marker_needed = 0;
1232 } 1216 }
@@ -1243,8 +1227,9 @@ qla24xx_start_scsi(srb_t *sp)
1243 if (!req->outstanding_cmds[handle]) 1227 if (!req->outstanding_cmds[handle])
1244 break; 1228 break;
1245 } 1229 }
1246 if (index == MAX_OUTSTANDING_COMMANDS) 1230 if (index == MAX_OUTSTANDING_COMMANDS) {
1247 goto queuing_error; 1231 goto queuing_error;
1232 }
1248 1233
1249 /* Map the sg table so we have an accurate count of sg entries needed */ 1234 /* Map the sg table so we have an accurate count of sg entries needed */
1250 if (scsi_sg_count(cmd)) { 1235 if (scsi_sg_count(cmd)) {
@@ -1256,8 +1241,7 @@ qla24xx_start_scsi(srb_t *sp)
1256 nseg = 0; 1241 nseg = 0;
1257 1242
1258 tot_dsds = nseg; 1243 tot_dsds = nseg;
1259 1244 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1260 req_cnt = qla24xx_calc_iocbs(tot_dsds);
1261 if (req->cnt < (req_cnt + 2)) { 1245 if (req->cnt < (req_cnt + 2)) {
1262 cnt = RD_REG_DWORD_RELAXED(req->req_q_out); 1246 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1263 1247
@@ -1322,7 +1306,6 @@ qla24xx_start_scsi(srb_t *sp)
1322 /* Specify response queue number where completion should happen */ 1306 /* Specify response queue number where completion should happen */
1323 cmd_pkt->entry_status = (uint8_t) rsp->id; 1307 cmd_pkt->entry_status = (uint8_t) rsp->id;
1324 wmb(); 1308 wmb();
1325
1326 /* Adjust ring index. */ 1309 /* Adjust ring index. */
1327 req->ring_index++; 1310 req->ring_index++;
1328 if (req->ring_index == req->length) { 1311 if (req->ring_index == req->length) {
@@ -1534,9 +1517,6 @@ queuing_error:
1534 /* Cleanup will be performed by the caller (queuecommand) */ 1517 /* Cleanup will be performed by the caller (queuecommand) */
1535 1518
1536 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1519 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1537
1538 DEBUG18(qla_printk(KERN_INFO, ha,
1539 "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
1540 return QLA_FUNCTION_FAILED; 1520 return QLA_FUNCTION_FAILED;
1541} 1521}
1542 1522
@@ -1581,8 +1561,11 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1581 if (!req->outstanding_cmds[handle]) 1561 if (!req->outstanding_cmds[handle])
1582 break; 1562 break;
1583 } 1563 }
1584 if (index == MAX_OUTSTANDING_COMMANDS) 1564 if (index == MAX_OUTSTANDING_COMMANDS) {
1565 ql_log(ql_log_warn, vha, 0x700b,
1566 "No room on oustanding cmd array.\n");
1585 goto queuing_error; 1567 goto queuing_error;
1568 }
1586 1569
1587 /* Prep command array. */ 1570 /* Prep command array. */
1588 req->current_outstanding_cmd = handle; 1571 req->current_outstanding_cmd = handle;
@@ -1999,8 +1982,11 @@ qla2x00_start_sp(srb_t *sp)
1999 rval = QLA_FUNCTION_FAILED; 1982 rval = QLA_FUNCTION_FAILED;
2000 spin_lock_irqsave(&ha->hardware_lock, flags); 1983 spin_lock_irqsave(&ha->hardware_lock, flags);
2001 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp); 1984 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2002 if (!pkt) 1985 if (!pkt) {
1986 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
1987 "qla2x00_alloc_iocbs failed.\n");
2003 goto done; 1988 goto done;
1989 }
2004 1990
2005 rval = QLA_SUCCESS; 1991 rval = QLA_SUCCESS;
2006 switch (ctx->type) { 1992 switch (ctx->type) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ae8e298746ba..b16b7725dee0 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -45,7 +45,7 @@ qla2100_intr_handler(int irq, void *dev_id)
45 rsp = (struct rsp_que *) dev_id; 45 rsp = (struct rsp_que *) dev_id;
46 if (!rsp) { 46 if (!rsp) {
47 printk(KERN_INFO 47 printk(KERN_INFO
48 "%s(): NULL response queue pointer\n", __func__); 48 "%s(): NULL response queue pointer.\n", __func__);
49 return (IRQ_NONE); 49 return (IRQ_NONE);
50 } 50 }
51 51
@@ -91,9 +91,9 @@ qla2100_intr_handler(int irq, void *dev_id)
91 qla2x00_async_event(vha, rsp, mb); 91 qla2x00_async_event(vha, rsp, mb);
92 } else { 92 } else {
93 /*EMPTY*/ 93 /*EMPTY*/
94 DEBUG2(printk("scsi(%ld): Unrecognized " 94 ql_dbg(ql_dbg_async, vha, 0x5025,
95 "interrupt type (%d).\n", 95 "Unrecognized interrupt type (%d).\n",
96 vha->host_no, mb[0])); 96 mb[0]);
97 } 97 }
98 /* Release mailbox registers. */ 98 /* Release mailbox registers. */
99 WRT_REG_WORD(&reg->semaphore, 0); 99 WRT_REG_WORD(&reg->semaphore, 0);
@@ -142,7 +142,7 @@ qla2300_intr_handler(int irq, void *dev_id)
142 rsp = (struct rsp_que *) dev_id; 142 rsp = (struct rsp_que *) dev_id;
143 if (!rsp) { 143 if (!rsp) {
144 printk(KERN_INFO 144 printk(KERN_INFO
145 "%s(): NULL response queue pointer\n", __func__); 145 "%s(): NULL response queue pointer.\n", __func__);
146 return (IRQ_NONE); 146 return (IRQ_NONE);
147 } 147 }
148 148
@@ -160,11 +160,13 @@ qla2300_intr_handler(int irq, void *dev_id)
160 160
161 hccr = RD_REG_WORD(&reg->hccr); 161 hccr = RD_REG_WORD(&reg->hccr);
162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
163 qla_printk(KERN_INFO, ha, "Parity error -- " 163 ql_log(ql_log_warn, vha, 0x5026,
164 "HCCR=%x, Dumping firmware!\n", hccr); 164 "Parity error -- HCCR=%x, Dumping "
165 "firmware.\n", hccr);
165 else 166 else
166 qla_printk(KERN_INFO, ha, "RISC paused -- " 167 ql_log(ql_log_warn, vha, 0x5027,
167 "HCCR=%x, Dumping firmware!\n", hccr); 168 "RISC paused -- HCCR=%x, Dumping "
169 "firmware.\n", hccr);
168 170
169 /* 171 /*
170 * Issue a "HARD" reset in order for the RISC 172 * Issue a "HARD" reset in order for the RISC
@@ -213,9 +215,8 @@ qla2300_intr_handler(int irq, void *dev_id)
213 qla2x00_async_event(vha, rsp, mb); 215 qla2x00_async_event(vha, rsp, mb);
214 break; 216 break;
215 default: 217 default:
216 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 218 ql_dbg(ql_dbg_async, vha, 0x5028,
217 "(%d).\n", 219 "Unrecognized interrupt type (%d).\n", stat & 0xff);
218 vha->host_no, stat & 0xff));
219 break; 220 break;
220 } 221 }
221 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 222 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -262,11 +263,11 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
262 } 263 }
263 264
264 if (ha->mcp) { 265 if (ha->mcp) {
265 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 266 ql_dbg(ql_dbg_async, vha, 0x5000,
266 __func__, vha->host_no, ha->mcp->mb[0])); 267 "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
267 } else { 268 } else {
268 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 269 ql_dbg(ql_dbg_async, vha, 0x5001,
269 __func__, vha->host_no)); 270 "MBX pointer ERROR.\n");
270 } 271 }
271} 272}
272 273
@@ -285,22 +286,24 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
285 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 286 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
286 mb[cnt] = RD_REG_WORD(wptr); 287 mb[cnt] = RD_REG_WORD(wptr);
287 288
288 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " 289 ql_dbg(ql_dbg_async, vha, 0x5021,
289 "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no, 290 "Inter-Driver Commucation %s -- "
290 event[aen & 0xff], 291 "%04x %04x %04x %04x %04x %04x %04x.\n",
291 mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6])); 292 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
293 mb[4], mb[5], mb[6]);
292 294
293 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 295 /* Acknowledgement needed? [Notify && non-zero timeout]. */
294 timeout = (descr >> 8) & 0xf; 296 timeout = (descr >> 8) & 0xf;
295 if (aen != MBA_IDC_NOTIFY || !timeout) 297 if (aen != MBA_IDC_NOTIFY || !timeout)
296 return; 298 return;
297 299
298 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " 300 ql_dbg(ql_dbg_async, vha, 0x5022,
299 "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout)); 301 "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
302 vha->host_no, event[aen & 0xff], timeout);
300 303
301 rval = qla2x00_post_idc_ack_work(vha, mb); 304 rval = qla2x00_post_idc_ack_work(vha, mb);
302 if (rval != QLA_SUCCESS) 305 if (rval != QLA_SUCCESS)
303 qla_printk(KERN_WARNING, vha->hw, 306 ql_log(ql_log_warn, vha, 0x5023,
304 "IDC failed to post ACK.\n"); 307 "IDC failed to post ACK.\n");
305} 308}
306 309
@@ -393,15 +396,15 @@ skip_rio:
393 break; 396 break;
394 397
395 case MBA_RESET: /* Reset */ 398 case MBA_RESET: /* Reset */
396 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", 399 ql_dbg(ql_dbg_async, vha, 0x5002,
397 vha->host_no)); 400 "Asynchronous RESET.\n");
398 401
399 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 402 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
400 break; 403 break;
401 404
402 case MBA_SYSTEM_ERR: /* System Error */ 405 case MBA_SYSTEM_ERR: /* System Error */
403 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0; 406 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
404 qla_printk(KERN_INFO, ha, 407 ql_log(ql_log_warn, vha, 0x5003,
405 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 408 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
406 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 409 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
407 410
@@ -409,7 +412,7 @@ skip_rio:
409 412
410 if (IS_FWI2_CAPABLE(ha)) { 413 if (IS_FWI2_CAPABLE(ha)) {
411 if (mb[1] == 0 && mb[2] == 0) { 414 if (mb[1] == 0 && mb[2] == 0) {
412 qla_printk(KERN_ERR, ha, 415 ql_log(ql_log_fatal, vha, 0x5004,
413 "Unrecoverable Hardware Error: adapter " 416 "Unrecoverable Hardware Error: adapter "
414 "marked OFFLINE!\n"); 417 "marked OFFLINE!\n");
415 vha->flags.online = 0; 418 vha->flags.online = 0;
@@ -422,7 +425,7 @@ skip_rio:
422 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 425 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
423 } 426 }
424 } else if (mb[1] == 0) { 427 } else if (mb[1] == 0) {
425 qla_printk(KERN_INFO, ha, 428 ql_log(ql_log_fatal, vha, 0x5005,
426 "Unrecoverable Hardware Error: adapter marked " 429 "Unrecoverable Hardware Error: adapter marked "
427 "OFFLINE!\n"); 430 "OFFLINE!\n");
428 vha->flags.online = 0; 431 vha->flags.online = 0;
@@ -431,31 +434,27 @@ skip_rio:
431 break; 434 break;
432 435
433 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 436 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
434 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n", 437 ql_log(ql_log_warn, vha, 0x5006,
435 vha->host_no, mb[1])); 438 "ISP Request Transfer Error (%x).\n", mb[1]);
436 qla_printk(KERN_WARNING, ha,
437 "ISP Request Transfer Error (%x).\n", mb[1]);
438 439
439 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 440 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
440 break; 441 break;
441 442
442 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 443 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
443 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", 444 ql_log(ql_log_warn, vha, 0x5007,
444 vha->host_no)); 445 "ISP Response Transfer Error.\n");
445 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
446 446
447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
448 break; 448 break;
449 449
450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 450 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
451 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", 451 ql_dbg(ql_dbg_async, vha, 0x5008,
452 vha->host_no)); 452 "Asynchronous WAKEUP_THRES.\n");
453 break; 453 break;
454 454
455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 455 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
456 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no, 456 ql_log(ql_log_info, vha, 0x5009,
457 mb[1])); 457 "LIP occurred (%x).\n", mb[1]);
458 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
459 458
460 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
461 atomic_set(&vha->loop_state, LOOP_DOWN); 460 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -488,10 +487,8 @@ skip_rio:
488 ha->link_data_rate = mb[1]; 487 ha->link_data_rate = mb[1];
489 } 488 }
490 489
491 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", 490 ql_log(ql_log_info, vha, 0x500a,
492 vha->host_no, link_speed)); 491 "LOOP UP detected (%s Gbps).\n", link_speed);
493 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
494 link_speed);
495 492
496 vha->flags.management_server_logged_in = 0; 493 vha->flags.management_server_logged_in = 0;
497 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 494 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -500,12 +497,9 @@ skip_rio:
500 case MBA_LOOP_DOWN: /* Loop Down Event */ 497 case MBA_LOOP_DOWN: /* Loop Down Event */
501 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0; 498 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
502 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx; 499 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
503 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " 500 ql_log(ql_log_info, vha, 0x500b,
504 "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], 501 "LOOP DOWN detected (%x %x %x %x).\n",
505 mbx)); 502 mb[1], mb[2], mb[3], mbx);
506 qla_printk(KERN_INFO, ha,
507 "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
508 mbx);
509 503
510 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 504 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
511 atomic_set(&vha->loop_state, LOOP_DOWN); 505 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -525,9 +519,7 @@ skip_rio:
525 break; 519 break;
526 520
527 case MBA_LIP_RESET: /* LIP reset occurred */ 521 case MBA_LIP_RESET: /* LIP reset occurred */
528 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 522 ql_log(ql_log_info, vha, 0x500c,
529 vha->host_no, mb[1]));
530 qla_printk(KERN_INFO, ha,
531 "LIP reset occurred (%x).\n", mb[1]); 523 "LIP reset occurred (%x).\n", mb[1]);
532 524
533 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 525 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -554,14 +546,15 @@ skip_rio:
554 break; 546 break;
555 547
556 if (IS_QLA8XXX_TYPE(ha)) { 548 if (IS_QLA8XXX_TYPE(ha)) {
557 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x " 549 ql_dbg(ql_dbg_async, vha, 0x500d,
558 "%04x\n", vha->host_no, mb[1], mb[2], mb[3])); 550 "DCBX Completed -- %04x %04x %04x.\n",
551 mb[1], mb[2], mb[3]);
559 if (ha->notify_dcbx_comp) 552 if (ha->notify_dcbx_comp)
560 complete(&ha->dcbx_comp); 553 complete(&ha->dcbx_comp);
561 554
562 } else 555 } else
563 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE " 556 ql_dbg(ql_dbg_async, vha, 0x500e,
564 "received.\n", vha->host_no)); 557 "Asynchronous P2P MODE received.\n");
565 558
566 /* 559 /*
567 * Until there's a transition from loop down to loop up, treat 560 * Until there's a transition from loop down to loop up, treat
@@ -594,10 +587,7 @@ skip_rio:
594 if (IS_QLA2100(ha)) 587 if (IS_QLA2100(ha))
595 break; 588 break;
596 589
597 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " 590 ql_log(ql_log_info, vha, 0x500f,
598 "received.\n",
599 vha->host_no));
600 qla_printk(KERN_INFO, ha,
601 "Configuration change detected: value=%x.\n", mb[1]); 591 "Configuration change detected: value=%x.\n", mb[1]);
602 592
603 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 593 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -640,11 +630,9 @@ skip_rio:
640 630
641 /* Global event -- port logout or port unavailable. */ 631 /* Global event -- port logout or port unavailable. */
642 if (mb[1] == 0xffff && mb[2] == 0x7) { 632 if (mb[1] == 0xffff && mb[2] == 0x7) {
643 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 633 ql_dbg(ql_dbg_async, vha, 0x5010,
644 vha->host_no)); 634 "Port unavailable %04x %04x %04x.\n",
645 DEBUG(printk(KERN_INFO 635 mb[1], mb[2], mb[3]);
646 "scsi(%ld): Port unavailable %04x %04x %04x.\n",
647 vha->host_no, mb[1], mb[2], mb[3]));
648 636
649 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 637 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
650 atomic_set(&vha->loop_state, LOOP_DOWN); 638 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -674,17 +662,15 @@ skip_rio:
674 atomic_set(&vha->loop_down_timer, 0); 662 atomic_set(&vha->loop_down_timer, 0);
675 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 663 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
676 atomic_read(&vha->loop_state) != LOOP_DEAD) { 664 atomic_read(&vha->loop_state) != LOOP_DEAD) {
677 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " 665 ql_dbg(ql_dbg_async, vha, 0x5011,
678 "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1], 666 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
679 mb[2], mb[3])); 667 mb[1], mb[2], mb[3]);
680 break; 668 break;
681 } 669 }
682 670
683 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 671 ql_dbg(ql_dbg_async, vha, 0x5012,
684 vha->host_no)); 672 "Port database changed %04x %04x %04x.\n",
685 DEBUG(printk(KERN_INFO 673 mb[1], mb[2], mb[3]);
686 "scsi(%ld): Port database changed %04x %04x %04x.\n",
687 vha->host_no, mb[1], mb[2], mb[3]));
688 674
689 /* 675 /*
690 * Mark all devices as missing so we will login again. 676 * Mark all devices as missing so we will login again.
@@ -707,20 +693,17 @@ skip_rio:
707 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 693 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
708 break; 694 break;
709 695
710 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 696 ql_dbg(ql_dbg_async, vha, 0x5013,
711 vha->host_no)); 697 "RSCN database changed -- %04x %04x %04x.\n",
712 DEBUG(printk(KERN_INFO 698 mb[1], mb[2], mb[3]);
713 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
714 vha->host_no, mb[1], mb[2], mb[3]));
715 699
716 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 700 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
717 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 701 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
718 | vha->d_id.b.al_pa; 702 | vha->d_id.b.al_pa;
719 if (rscn_entry == host_pid) { 703 if (rscn_entry == host_pid) {
720 DEBUG(printk(KERN_INFO 704 ql_dbg(ql_dbg_async, vha, 0x5014,
721 "scsi(%ld): Ignoring RSCN update to local host " 705 "Ignoring RSCN update to local host "
722 "port ID (%06x)\n", 706 "port ID (%06x).\n", host_pid);
723 vha->host_no, host_pid));
724 break; 707 break;
725 } 708 }
726 709
@@ -747,8 +730,8 @@ skip_rio:
747 730
748 /* case MBA_RIO_RESPONSE: */ 731 /* case MBA_RIO_RESPONSE: */
749 case MBA_ZIO_RESPONSE: 732 case MBA_ZIO_RESPONSE:
750 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n", 733 ql_dbg(ql_dbg_async, vha, 0x5015,
751 vha->host_no)); 734 "[R|Z]IO update completion.\n");
752 735
753 if (IS_FWI2_CAPABLE(ha)) 736 if (IS_FWI2_CAPABLE(ha))
754 qla24xx_process_response_queue(vha, rsp); 737 qla24xx_process_response_queue(vha, rsp);
@@ -757,61 +740,68 @@ skip_rio:
757 break; 740 break;
758 741
759 case MBA_DISCARD_RND_FRAME: 742 case MBA_DISCARD_RND_FRAME:
760 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 743 ql_dbg(ql_dbg_async, vha, 0x5016,
761 "%04x.\n", vha->host_no, mb[1], mb[2], mb[3])); 744 "Discard RND Frame -- %04x %04x %04x.\n",
745 mb[1], mb[2], mb[3]);
762 break; 746 break;
763 747
764 case MBA_TRACE_NOTIFICATION: 748 case MBA_TRACE_NOTIFICATION:
765 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", 749 ql_dbg(ql_dbg_async, vha, 0x5017,
766 vha->host_no, mb[1], mb[2])); 750 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
767 break; 751 break;
768 752
769 case MBA_ISP84XX_ALERT: 753 case MBA_ISP84XX_ALERT:
770 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " 754 ql_dbg(ql_dbg_async, vha, 0x5018,
771 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 755 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
756 mb[1], mb[2], mb[3]);
772 757
773 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 758 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
774 switch (mb[1]) { 759 switch (mb[1]) {
775 case A84_PANIC_RECOVERY: 760 case A84_PANIC_RECOVERY:
776 qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery " 761 ql_log(ql_log_info, vha, 0x5019,
777 "%04x %04x\n", mb[2], mb[3]); 762 "Alert 84XX: panic recovery %04x %04x.\n",
763 mb[2], mb[3]);
778 break; 764 break;
779 case A84_OP_LOGIN_COMPLETE: 765 case A84_OP_LOGIN_COMPLETE:
780 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 766 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
781 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" 767 ql_log(ql_log_info, vha, 0x501a,
782 "firmware version %x\n", ha->cs84xx->op_fw_version)); 768 "Alert 84XX: firmware version %x.\n",
769 ha->cs84xx->op_fw_version);
783 break; 770 break;
784 case A84_DIAG_LOGIN_COMPLETE: 771 case A84_DIAG_LOGIN_COMPLETE:
785 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 772 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
786 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" 773 ql_log(ql_log_info, vha, 0x501b,
787 "diagnostic firmware version %x\n", 774 "Alert 84XX: diagnostic firmware version %x.\n",
788 ha->cs84xx->diag_fw_version)); 775 ha->cs84xx->diag_fw_version);
789 break; 776 break;
790 case A84_GOLD_LOGIN_COMPLETE: 777 case A84_GOLD_LOGIN_COMPLETE:
791 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 778 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
792 ha->cs84xx->fw_update = 1; 779 ha->cs84xx->fw_update = 1;
793 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold " 780 ql_log(ql_log_info, vha, 0x501c,
794 "firmware version %x\n", 781 "Alert 84XX: gold firmware version %x.\n",
795 ha->cs84xx->gold_fw_version)); 782 ha->cs84xx->gold_fw_version);
796 break; 783 break;
797 default: 784 default:
798 qla_printk(KERN_ERR, ha, 785 ql_log(ql_log_warn, vha, 0x501d,
799 "Alert 84xx: Invalid Alert %04x %04x %04x\n", 786 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
800 mb[1], mb[2], mb[3]); 787 mb[1], mb[2], mb[3]);
801 } 788 }
802 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 789 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
803 break; 790 break;
804 case MBA_DCBX_START: 791 case MBA_DCBX_START:
805 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n", 792 ql_dbg(ql_dbg_async, vha, 0x501e,
806 vha->host_no, mb[1], mb[2], mb[3])); 793 "DCBX Started -- %04x %04x %04x.\n",
794 mb[1], mb[2], mb[3]);
807 break; 795 break;
808 case MBA_DCBX_PARAM_UPDATE: 796 case MBA_DCBX_PARAM_UPDATE:
809 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- " 797 ql_dbg(ql_dbg_async, vha, 0x501f,
810 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 798 "DCBX Parameters Updated -- %04x %04x %04x.\n",
799 mb[1], mb[2], mb[3]);
811 break; 800 break;
812 case MBA_FCF_CONF_ERR: 801 case MBA_FCF_CONF_ERR:
813 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- " 802 ql_dbg(ql_dbg_async, vha, 0x5020,
814 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 803 "FCF Configuration Error -- %04x %04x %04x.\n",
804 mb[1], mb[2], mb[3]);
815 break; 805 break;
816 case MBA_IDC_COMPLETE: 806 case MBA_IDC_COMPLETE:
817 case MBA_IDC_NOTIFY: 807 case MBA_IDC_NOTIFY:
@@ -838,10 +828,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
838 828
839 /* Validate handle. */ 829 /* Validate handle. */
840 if (index >= MAX_OUTSTANDING_COMMANDS) { 830 if (index >= MAX_OUTSTANDING_COMMANDS) {
841 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", 831 ql_log(ql_log_warn, vha, 0x3014,
842 vha->host_no, index)); 832 "Invalid SCSI command index (%x).\n", index);
843 qla_printk(KERN_WARNING, ha,
844 "Invalid SCSI completion handle %d.\n", index);
845 833
846 if (IS_QLA82XX(ha)) 834 if (IS_QLA82XX(ha))
847 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 835 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -859,10 +847,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
859 sp->cmd->result = DID_OK << 16; 847 sp->cmd->result = DID_OK << 16;
860 qla2x00_sp_compl(ha, sp); 848 qla2x00_sp_compl(ha, sp);
861 } else { 849 } else {
862 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" 850 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
863 " handle(0x%x)\n", vha->host_no, req->id, index));
864 qla_printk(KERN_WARNING, ha,
865 "Invalid ISP SCSI completion handle\n");
866 851
867 if (IS_QLA82XX(ha)) 852 if (IS_QLA82XX(ha))
868 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 853 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -882,8 +867,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
882 867
883 index = LSW(pkt->handle); 868 index = LSW(pkt->handle);
884 if (index >= MAX_OUTSTANDING_COMMANDS) { 869 if (index >= MAX_OUTSTANDING_COMMANDS) {
885 qla_printk(KERN_WARNING, ha, 870 ql_log(ql_log_warn, vha, 0x5031,
886 "%s: Invalid completion handle (%x).\n", func, index); 871 "Invalid command index (%x).\n", index);
887 if (IS_QLA82XX(ha)) 872 if (IS_QLA82XX(ha))
888 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 873 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
889 else 874 else
@@ -892,15 +877,13 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
892 } 877 }
893 sp = req->outstanding_cmds[index]; 878 sp = req->outstanding_cmds[index];
894 if (!sp) { 879 if (!sp) {
895 qla_printk(KERN_WARNING, ha, 880 ql_log(ql_log_warn, vha, 0x5032,
896 "%s: Invalid completion handle (%x) -- timed-out.\n", func, 881 "Invalid completion handle (%x) -- timed-out.\n", index);
897 index);
898 return sp; 882 return sp;
899 } 883 }
900 if (sp->handle != index) { 884 if (sp->handle != index) {
901 qla_printk(KERN_WARNING, ha, 885 ql_log(ql_log_warn, vha, 0x5033,
902 "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle, 886 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
903 index);
904 return NULL; 887 return NULL;
905 } 888 }
906 889
@@ -937,17 +920,17 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
937 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 920 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
938 QLA_LOGIO_LOGIN_RETRIED : 0; 921 QLA_LOGIO_LOGIN_RETRIED : 0;
939 if (mbx->entry_status) { 922 if (mbx->entry_status) {
940 DEBUG2(printk(KERN_WARNING 923 ql_dbg(ql_dbg_async, vha, 0x5043,
941 "scsi(%ld:%x): Async-%s error entry - portid=%02x%02x%02x " 924 "Async-%s error entry - portid=%02x%02x%02x "
942 "entry-status=%x status=%x state-flag=%x " 925 "entry-status=%x status=%x state-flag=%x "
943 "status-flags=%x.\n", 926 "status-flags=%x.\n",
944 fcport->vha->host_no, sp->handle, type, 927 type, fcport->d_id.b.domain, fcport->d_id.b.area,
945 fcport->d_id.b.domain, fcport->d_id.b.area,
946 fcport->d_id.b.al_pa, mbx->entry_status, 928 fcport->d_id.b.al_pa, mbx->entry_status,
947 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 929 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
948 le16_to_cpu(mbx->status_flags))); 930 le16_to_cpu(mbx->status_flags));
949 931
950 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx))); 932 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
933 (uint8_t *)mbx, sizeof(*mbx));
951 934
952 goto logio_done; 935 goto logio_done;
953 } 936 }
@@ -957,12 +940,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
957 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 940 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
958 status = 0; 941 status = 0;
959 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 942 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
960 DEBUG2(printk(KERN_DEBUG 943 ql_dbg(ql_dbg_async, vha, 0x5045,
961 "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x " 944 "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
962 "mbx1=%x.\n", 945 type, fcport->d_id.b.domain, fcport->d_id.b.area,
963 fcport->vha->host_no, sp->handle, type, 946 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
964 fcport->d_id.b.domain, fcport->d_id.b.area,
965 fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)));
966 947
967 data[0] = MBS_COMMAND_COMPLETE; 948 data[0] = MBS_COMMAND_COMPLETE;
968 if (ctx->type == SRB_LOGIN_CMD) { 949 if (ctx->type == SRB_LOGIN_CMD) {
@@ -987,14 +968,14 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
987 break; 968 break;
988 } 969 }
989 970
990 DEBUG2(printk(KERN_WARNING 971 ql_log(ql_log_warn, vha, 0x5046,
991 "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x status=%x " 972 "Async-%s failed - portid=%02x%02x%02x status=%x "
992 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", 973 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
993 fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain, 974 type, fcport->d_id.b.domain,
994 fcport->d_id.b.area, fcport->d_id.b.al_pa, status, 975 fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
995 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 976 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
996 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 977 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
997 le16_to_cpu(mbx->mb7))); 978 le16_to_cpu(mbx->mb7));
998 979
999logio_done: 980logio_done:
1000 lio->done(sp); 981 lio->done(sp);
@@ -1025,9 +1006,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1025 type = "ct pass-through"; 1006 type = "ct pass-through";
1026 break; 1007 break;
1027 default: 1008 default:
1028 qla_printk(KERN_WARNING, ha, 1009 ql_log(ql_log_warn, vha, 0x5047,
1029 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, 1010 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1030 sp_bsg->type);
1031 return; 1011 return;
1032 } 1012 }
1033 1013
@@ -1045,20 +1025,20 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1045 bsg_job->reply->reply_payload_rcv_len = 1025 bsg_job->reply->reply_payload_rcv_len =
1046 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1026 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1047 1027
1048 DEBUG2(qla_printk(KERN_WARNING, ha, 1028 ql_log(ql_log_warn, vha, 0x5048,
1049 "scsi(%ld): CT pass-through-%s error " 1029 "CT pass-through-%s error "
1050 "comp_status-status=0x%x total_byte = 0x%x.\n", 1030 "comp_status-status=0x%x total_byte = 0x%x.\n",
1051 vha->host_no, type, comp_status, 1031 type, comp_status,
1052 bsg_job->reply->reply_payload_rcv_len)); 1032 bsg_job->reply->reply_payload_rcv_len);
1053 } else { 1033 } else {
1054 DEBUG2(qla_printk(KERN_WARNING, ha, 1034 ql_log(ql_log_warn, vha, 0x5049,
1055 "scsi(%ld): CT pass-through-%s error " 1035 "CT pass-through-%s error "
1056 "comp_status-status=0x%x.\n", 1036 "comp_status-status=0x%x.\n", type, comp_status);
1057 vha->host_no, type, comp_status));
1058 bsg_job->reply->result = DID_ERROR << 16; 1037 bsg_job->reply->result = DID_ERROR << 16;
1059 bsg_job->reply->reply_payload_rcv_len = 0; 1038 bsg_job->reply->reply_payload_rcv_len = 0;
1060 } 1039 }
1061 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); 1040 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
1041 (uint8_t *)pkt, sizeof(*pkt));
1062 } else { 1042 } else {
1063 bsg_job->reply->result = DID_OK << 16; 1043 bsg_job->reply->result = DID_OK << 16;
1064 bsg_job->reply->reply_payload_rcv_len = 1044 bsg_job->reply->reply_payload_rcv_len =
@@ -1110,9 +1090,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1110 type = "ct pass-through"; 1090 type = "ct pass-through";
1111 break; 1091 break;
1112 default: 1092 default:
1113 qla_printk(KERN_WARNING, ha, 1093 ql_log(ql_log_warn, vha, 0x503e,
1114 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, 1094 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1115 sp_bsg->type);
1116 return; 1095 return;
1117 } 1096 }
1118 1097
@@ -1132,27 +1111,31 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1132 bsg_job->reply->reply_payload_rcv_len = 1111 bsg_job->reply->reply_payload_rcv_len =
1133 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); 1112 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1134 1113
1135 DEBUG2(qla_printk(KERN_WARNING, ha, 1114 ql_log(ql_log_info, vha, 0x503f,
1136 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " 1115 "ELS-CT pass-through-%s error comp_status-status=0x%x "
1137 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1116 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1138 vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2], 1117 type, comp_status, fw_status[1], fw_status[2],
1139 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count))); 1118 le16_to_cpu(((struct els_sts_entry_24xx *)
1119 pkt)->total_byte_count));
1140 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1120 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1141 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1121 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1142 } 1122 }
1143 else { 1123 else {
1144 DEBUG2(qla_printk(KERN_WARNING, ha, 1124 ql_log(ql_log_info, vha, 0x5040,
1145 "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " 1125 "ELS-CT pass-through-%s error comp_status-status=0x%x "
1146 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1126 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1147 vha->host_no, sp->handle, type, comp_status, 1127 type, comp_status,
1148 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1), 1128 le16_to_cpu(((struct els_sts_entry_24xx *)
1149 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2))); 1129 pkt)->error_subcode_1),
1130 le16_to_cpu(((struct els_sts_entry_24xx *)
1131 pkt)->error_subcode_2));
1150 bsg_job->reply->result = DID_ERROR << 16; 1132 bsg_job->reply->result = DID_ERROR << 16;
1151 bsg_job->reply->reply_payload_rcv_len = 0; 1133 bsg_job->reply->reply_payload_rcv_len = 0;
1152 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1134 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1153 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1135 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1154 } 1136 }
1155 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); 1137 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
1138 (uint8_t *)pkt, sizeof(*pkt));
1156 } 1139 }
1157 else { 1140 else {
1158 bsg_job->reply->result = DID_OK << 16; 1141 bsg_job->reply->result = DID_OK << 16;
@@ -1201,25 +1184,24 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1201 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1184 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1202 QLA_LOGIO_LOGIN_RETRIED : 0; 1185 QLA_LOGIO_LOGIN_RETRIED : 0;
1203 if (logio->entry_status) { 1186 if (logio->entry_status) {
1204 DEBUG2(printk(KERN_WARNING 1187 ql_log(ql_log_warn, vha, 0x5034,
1205 "scsi(%ld:%x): Async-%s error entry - " 1188 "Async-%s error entry - "
1206 "portid=%02x%02x%02x entry-status=%x.\n", 1189 "portid=%02x%02x%02x entry-status=%x.\n",
1207 fcport->vha->host_no, sp->handle, type, 1190 type, fcport->d_id.b.domain, fcport->d_id.b.area,
1208 fcport->d_id.b.domain, fcport->d_id.b.area, 1191 fcport->d_id.b.al_pa, logio->entry_status);
1209 fcport->d_id.b.al_pa, logio->entry_status)); 1192 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
1210 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio))); 1193 (uint8_t *)logio, sizeof(*logio));
1211 1194
1212 goto logio_done; 1195 goto logio_done;
1213 } 1196 }
1214 1197
1215 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1198 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1216 DEBUG2(printk(KERN_DEBUG 1199 ql_dbg(ql_dbg_async, vha, 0x5036,
1217 "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x " 1200 "Async-%s complete - portid=%02x%02x%02x "
1218 "iop0=%x.\n", 1201 "iop0=%x.\n",
1219 fcport->vha->host_no, sp->handle, type, 1202 type, fcport->d_id.b.domain, fcport->d_id.b.area,
1220 fcport->d_id.b.domain, fcport->d_id.b.area,
1221 fcport->d_id.b.al_pa, 1203 fcport->d_id.b.al_pa,
1222 le32_to_cpu(logio->io_parameter[0]))); 1204 le32_to_cpu(logio->io_parameter[0]));
1223 1205
1224 data[0] = MBS_COMMAND_COMPLETE; 1206 data[0] = MBS_COMMAND_COMPLETE;
1225 if (ctx->type != SRB_LOGIN_CMD) 1207 if (ctx->type != SRB_LOGIN_CMD)
@@ -1256,14 +1238,14 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1256 break; 1238 break;
1257 } 1239 }
1258 1240
1259 DEBUG2(printk(KERN_WARNING 1241 ql_dbg(ql_dbg_async, vha, 0x5037,
1260 "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x comp=%x " 1242 "Async-%s failed - portid=%02x%02x%02x comp=%x "
1261 "iop0=%x iop1=%x.\n", 1243 "iop0=%x iop1=%x.\n",
1262 fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain, 1244 type, fcport->d_id.b.domain,
1263 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1245 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1264 le16_to_cpu(logio->comp_status), 1246 le16_to_cpu(logio->comp_status),
1265 le32_to_cpu(logio->io_parameter[0]), 1247 le32_to_cpu(logio->io_parameter[0]),
1266 le32_to_cpu(logio->io_parameter[1]))); 1248 le32_to_cpu(logio->io_parameter[1]));
1267 1249
1268logio_done: 1250logio_done:
1269 lio->done(sp); 1251 lio->done(sp);
@@ -1292,38 +1274,34 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1292 fcport = sp->fcport; 1274 fcport = sp->fcport;
1293 1275
1294 if (sts->entry_status) { 1276 if (sts->entry_status) {
1295 DEBUG2(printk(KERN_WARNING 1277 ql_log(ql_log_warn, vha, 0x5038,
1296 "scsi(%ld:%x): Async-%s error - entry-status(%x).\n", 1278 "Async-%s error - entry-status(%x).\n",
1297 fcport->vha->host_no, sp->handle, type, 1279 type, sts->entry_status);
1298 sts->entry_status));
1299 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1280 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1300 DEBUG2(printk(KERN_WARNING 1281 ql_log(ql_log_warn, vha, 0x5039,
1301 "scsi(%ld:%x): Async-%s error - completion status(%x).\n", 1282 "Async-%s error - completion status(%x).\n",
1302 fcport->vha->host_no, sp->handle, type, 1283 type, sts->comp_status);
1303 sts->comp_status));
1304 } else if (!(le16_to_cpu(sts->scsi_status) & 1284 } else if (!(le16_to_cpu(sts->scsi_status) &
1305 SS_RESPONSE_INFO_LEN_VALID)) { 1285 SS_RESPONSE_INFO_LEN_VALID)) {
1306 DEBUG2(printk(KERN_WARNING 1286 ql_log(ql_log_warn, vha, 0x503a,
1307 "scsi(%ld:%x): Async-%s error - no response info(%x).\n", 1287 "Async-%s error - no response info(%x).\n",
1308 fcport->vha->host_no, sp->handle, type, 1288 type, sts->scsi_status);
1309 sts->scsi_status));
1310 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1289 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1311 DEBUG2(printk(KERN_WARNING 1290 ql_log(ql_log_warn, vha, 0x503b,
1312 "scsi(%ld:%x): Async-%s error - not enough response(%d).\n", 1291 "Async-%s error - not enough response(%d).\n",
1313 fcport->vha->host_no, sp->handle, type, 1292 type, sts->rsp_data_len);
1314 sts->rsp_data_len));
1315 } else if (sts->data[3]) { 1293 } else if (sts->data[3]) {
1316 DEBUG2(printk(KERN_WARNING 1294 ql_log(ql_log_warn, vha, 0x503c,
1317 "scsi(%ld:%x): Async-%s error - response(%x).\n", 1295 "Async-%s error - response(%x).\n",
1318 fcport->vha->host_no, sp->handle, type, 1296 type, sts->data[3]);
1319 sts->data[3]));
1320 } else { 1297 } else {
1321 error = 0; 1298 error = 0;
1322 } 1299 }
1323 1300
1324 if (error) { 1301 if (error) {
1325 iocb->u.tmf.data = error; 1302 iocb->u.tmf.data = error;
1326 DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts))); 1303 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1304 (uint8_t *)sts, sizeof(*sts));
1327 } 1305 }
1328 1306
1329 iocb->done(sp); 1307 iocb->done(sp);
@@ -1360,8 +1338,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1360 } 1338 }
1361 1339
1362 if (pkt->entry_status != 0) { 1340 if (pkt->entry_status != 0) {
1363 DEBUG3(printk(KERN_INFO 1341 ql_log(ql_log_warn, vha, 0x5035,
1364 "scsi(%ld): Process error entry.\n", vha->host_no)); 1342 "Process error entry.\n");
1365 1343
1366 qla2x00_error_entry(vha, rsp, pkt); 1344 qla2x00_error_entry(vha, rsp, pkt);
1367 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1345 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1399,10 +1377,10 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1399 break; 1377 break;
1400 default: 1378 default:
1401 /* Type Not Supported. */ 1379 /* Type Not Supported. */
1402 DEBUG4(printk(KERN_WARNING 1380 ql_log(ql_log_warn, vha, 0x504a,
1403 "scsi(%ld): Received unknown response pkt type %x " 1381 "Received unknown response pkt type %x "
1404 "entry status=%x.\n", 1382 "entry status=%x.\n",
1405 vha->host_no, pkt->entry_type, pkt->entry_status)); 1383 pkt->entry_type, pkt->entry_status);
1406 break; 1384 break;
1407 } 1385 }
1408 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1386 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1418,6 +1396,7 @@ static inline void
1418qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1396qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1419 uint32_t sense_len, struct rsp_que *rsp) 1397 uint32_t sense_len, struct rsp_que *rsp)
1420{ 1398{
1399 struct scsi_qla_host *vha = sp->fcport->vha;
1421 struct scsi_cmnd *cp = sp->cmd; 1400 struct scsi_cmnd *cp = sp->cmd;
1422 1401
1423 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1402 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
@@ -1435,11 +1414,13 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1435 if (sp->request_sense_length != 0) 1414 if (sp->request_sense_length != 0)
1436 rsp->status_srb = sp; 1415 rsp->status_srb = sp;
1437 1416
1438 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 1417 ql_dbg(ql_dbg_io, vha, 0x301c,
1439 "cmd=%p\n", __func__, sp->fcport->vha->host_no, 1418 "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
1440 cp->device->channel, cp->device->id, cp->device->lun, cp)); 1419 sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
1420 cp->device->lun, cp);
1441 if (sense_len) 1421 if (sense_len)
1442 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); 1422 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1423 cp->sense_buffer, sense_len);
1443} 1424}
1444 1425
1445struct scsi_dif_tuple { 1426struct scsi_dif_tuple {
@@ -1457,6 +1438,7 @@ struct scsi_dif_tuple {
1457static inline void 1438static inline void
1458qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1439qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1459{ 1440{
1441 struct scsi_qla_host *vha = sp->fcport->vha;
1460 struct scsi_cmnd *cmd = sp->cmd; 1442 struct scsi_cmnd *cmd = sp->cmd;
1461 struct scsi_dif_tuple *ep = 1443 struct scsi_dif_tuple *ep =
1462 (struct scsi_dif_tuple *)&sts24->data[20]; 1444 (struct scsi_dif_tuple *)&sts24->data[20];
@@ -1473,15 +1455,15 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1473 e_guard = be16_to_cpu(ep->guard); 1455 e_guard = be16_to_cpu(ep->guard);
1474 a_guard = be16_to_cpu(ap->guard); 1456 a_guard = be16_to_cpu(ap->guard);
1475 1457
1476 DEBUG18(printk(KERN_DEBUG 1458 ql_dbg(ql_dbg_io, vha, 0x3023,
1477 "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24)); 1459 "iocb(s) %p Returned STATUS.\n", sts24);
1478 1460
1479 DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1461 ql_dbg(ql_dbg_io, vha, 0x3024,
1462 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1480 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1463 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1481 " tag=0x%x, act guard=0x%x, exp guard=0x%x\n", 1464 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1482 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1465 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1483 a_app_tag, e_app_tag, a_guard, e_guard)); 1466 a_app_tag, e_app_tag, a_guard, e_guard);
1484
1485 1467
1486 /* check guard */ 1468 /* check guard */
1487 if (e_guard != a_guard) { 1469 if (e_guard != a_guard) {
@@ -1569,9 +1551,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1569 sp = NULL; 1551 sp = NULL;
1570 1552
1571 if (sp == NULL) { 1553 if (sp == NULL) {
1572 qla_printk(KERN_WARNING, ha, 1554 ql_log(ql_log_warn, vha, 0x3017,
1573 "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no, 1555 "Invalid status handle (0x%x).\n", sts->handle);
1574 sts->handle);
1575 1556
1576 if (IS_QLA82XX(ha)) 1557 if (IS_QLA82XX(ha))
1577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1558 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1582,9 +1563,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1582 } 1563 }
1583 cp = sp->cmd; 1564 cp = sp->cmd;
1584 if (cp == NULL) { 1565 if (cp == NULL) {
1585 qla_printk(KERN_WARNING, ha, 1566 ql_log(ql_log_warn, vha, 0x3018,
1586 "scsi(%ld): Command already returned (0x%x/%p).\n", 1567 "Command already returned (0x%x/%p).\n",
1587 vha->host_no, sts->handle, sp); 1568 sts->handle, sp);
1588 1569
1589 return; 1570 return;
1590 } 1571 }
@@ -1629,10 +1610,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1629 par_sense_len -= rsp_info_len; 1610 par_sense_len -= rsp_info_len;
1630 } 1611 }
1631 if (rsp_info_len > 3 && rsp_info[3]) { 1612 if (rsp_info_len > 3 && rsp_info[3]) {
1632 DEBUG2(qla_printk(KERN_INFO, ha, 1613 ql_log(ql_log_warn, vha, 0x3019,
1633 "scsi(%ld:%d:%d): FCP I/O protocol failure " 1614 "FCP I/O protocol failure (0x%x/0x%x).\n",
1634 "(0x%x/0x%x).\n", vha->host_no, cp->device->id, 1615 rsp_info_len, rsp_info[3]);
1635 cp->device->lun, rsp_info_len, rsp_info[3]));
1636 1616
1637 cp->result = DID_BUS_BUSY << 16; 1617 cp->result = DID_BUS_BUSY << 16;
1638 goto out; 1618 goto out;
@@ -1661,11 +1641,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1661 if (!lscsi_status && 1641 if (!lscsi_status &&
1662 ((unsigned)(scsi_bufflen(cp) - resid) < 1642 ((unsigned)(scsi_bufflen(cp) - resid) <
1663 cp->underflow)) { 1643 cp->underflow)) {
1664 qla_printk(KERN_INFO, ha, 1644 ql_log(ql_log_warn, vha, 0x301a,
1665 "scsi(%ld:%d:%d): Mid-layer underflow " 1645 "Mid-layer underflow "
1666 "detected (0x%x of 0x%x bytes).\n", 1646 "detected (0x%x of 0x%x bytes).\n",
1667 vha->host_no, cp->device->id, 1647 resid, scsi_bufflen(cp));
1668 cp->device->lun, resid, scsi_bufflen(cp));
1669 1648
1670 cp->result = DID_ERROR << 16; 1649 cp->result = DID_ERROR << 16;
1671 break; 1650 break;
@@ -1674,9 +1653,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1674 cp->result = DID_OK << 16 | lscsi_status; 1653 cp->result = DID_OK << 16 | lscsi_status;
1675 1654
1676 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1655 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1677 DEBUG2(qla_printk(KERN_INFO, ha, 1656 ql_log(ql_log_warn, vha, 0x301b,
1678 "scsi(%ld:%d:%d) QUEUE FULL detected.\n", 1657 "QUEUE FULL detected.\n");
1679 vha->host_no, cp->device->id, cp->device->lun));
1680 break; 1658 break;
1681 } 1659 }
1682 logit = 0; 1660 logit = 0;
@@ -1697,11 +1675,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1697 scsi_set_resid(cp, resid); 1675 scsi_set_resid(cp, resid);
1698 if (scsi_status & SS_RESIDUAL_UNDER) { 1676 if (scsi_status & SS_RESIDUAL_UNDER) {
1699 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1677 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1700 DEBUG2(qla_printk(KERN_INFO, ha, 1678 ql_log(ql_log_warn, vha, 0x301d,
1701 "scsi(%ld:%d:%d) Dropped frame(s) detected " 1679 "Dropped frame(s) detected "
1702 "(0x%x of 0x%x bytes).\n", vha->host_no, 1680 "(0x%x of 0x%x bytes).\n",
1703 cp->device->id, cp->device->lun, resid, 1681 resid, scsi_bufflen(cp));
1704 scsi_bufflen(cp)));
1705 1682
1706 cp->result = DID_ERROR << 16 | lscsi_status; 1683 cp->result = DID_ERROR << 16 | lscsi_status;
1707 break; 1684 break;
@@ -1710,20 +1687,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1710 if (!lscsi_status && 1687 if (!lscsi_status &&
1711 ((unsigned)(scsi_bufflen(cp) - resid) < 1688 ((unsigned)(scsi_bufflen(cp) - resid) <
1712 cp->underflow)) { 1689 cp->underflow)) {
1713 qla_printk(KERN_INFO, ha, 1690 ql_log(ql_log_warn, vha, 0x301e,
1714 "scsi(%ld:%d:%d): Mid-layer underflow " 1691 "Mid-layer underflow "
1715 "detected (0x%x of 0x%x bytes).\n", 1692 "detected (0x%x of 0x%x bytes).\n",
1716 vha->host_no, cp->device->id, 1693 resid, scsi_bufflen(cp));
1717 cp->device->lun, resid, scsi_bufflen(cp));
1718 1694
1719 cp->result = DID_ERROR << 16; 1695 cp->result = DID_ERROR << 16;
1720 break; 1696 break;
1721 } 1697 }
1722 } else { 1698 } else {
1723 DEBUG2(qla_printk(KERN_INFO, ha, 1699 ql_log(ql_log_warn, vha, 0x301f,
1724 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " 1700 "Dropped frame(s) detected (0x%x "
1725 "of 0x%x bytes).\n", vha->host_no, cp->device->id, 1701 "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
1726 cp->device->lun, resid, scsi_bufflen(cp)));
1727 1702
1728 cp->result = DID_ERROR << 16 | lscsi_status; 1703 cp->result = DID_ERROR << 16 | lscsi_status;
1729 goto check_scsi_status; 1704 goto check_scsi_status;
@@ -1739,10 +1714,8 @@ check_scsi_status:
1739 */ 1714 */
1740 if (lscsi_status != 0) { 1715 if (lscsi_status != 0) {
1741 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1716 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1742 DEBUG2(qla_printk(KERN_INFO, ha, 1717 ql_log(ql_log_warn, vha, 0x3020,
1743 "scsi(%ld:%d:%d) QUEUE FULL detected.\n", 1718 "QUEUE FULL detected.\n");
1744 vha->host_no, cp->device->id,
1745 cp->device->lun));
1746 logit = 1; 1719 logit = 1;
1747 break; 1720 break;
1748 } 1721 }
@@ -1781,10 +1754,9 @@ check_scsi_status:
1781 break; 1754 break;
1782 } 1755 }
1783 1756
1784 DEBUG2(qla_printk(KERN_INFO, ha, 1757 ql_dbg(ql_dbg_io, vha, 0x3021,
1785 "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n", 1758 "Port down status: port-state=0x%x.\n",
1786 vha->host_no, cp->device->id, cp->device->lun, 1759 atomic_read(&fcport->state));
1787 atomic_read(&fcport->state)));
1788 1760
1789 if (atomic_read(&fcport->state) == FCS_ONLINE) 1761 if (atomic_read(&fcport->state) == FCS_ONLINE)
1790 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1762 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -1804,15 +1776,13 @@ check_scsi_status:
1804 1776
1805out: 1777out:
1806 if (logit) 1778 if (logit)
1807 DEBUG2(qla_printk(KERN_INFO, ha, 1779 ql_dbg(ql_dbg_io, vha, 0x3022,
1808 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " 1780 "FCP command status: 0x%x-0x%x (0x%x) "
1809 "portid=%02x%02x%02x oxid=0x%x cdb=%02x%02x%02x len=0x%x " 1781 "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
1810 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, 1782 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1811 cp->device->id, cp->device->lun, comp_status, scsi_status, 1783 comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
1812 cp->result, fcport->d_id.b.domain, fcport->d_id.b.area, 1784 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
1813 fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1], 1785 resid_len, fw_resid_len);
1814 cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len,
1815 fw_resid_len));
1816 1786
1817 if (rsp->status_srb == NULL) 1787 if (rsp->status_srb == NULL)
1818 qla2x00_sp_compl(ha, sp); 1788 qla2x00_sp_compl(ha, sp);
@@ -1830,16 +1800,15 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1830{ 1800{
1831 uint8_t sense_sz = 0; 1801 uint8_t sense_sz = 0;
1832 struct qla_hw_data *ha = rsp->hw; 1802 struct qla_hw_data *ha = rsp->hw;
1803 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1833 srb_t *sp = rsp->status_srb; 1804 srb_t *sp = rsp->status_srb;
1834 struct scsi_cmnd *cp; 1805 struct scsi_cmnd *cp;
1835 1806
1836 if (sp != NULL && sp->request_sense_length != 0) { 1807 if (sp != NULL && sp->request_sense_length != 0) {
1837 cp = sp->cmd; 1808 cp = sp->cmd;
1838 if (cp == NULL) { 1809 if (cp == NULL) {
1839 DEBUG2(printk("%s(): Cmd already returned back to OS " 1810 ql_log(ql_log_warn, vha, 0x3025,
1840 "sp=%p.\n", __func__, sp)); 1811 "cmd is NULL: already returned to OS (sp=%p).\n",
1841 qla_printk(KERN_INFO, ha,
1842 "cmd is NULL: already returned to OS (sp=%p)\n",
1843 sp); 1812 sp);
1844 1813
1845 rsp->status_srb = NULL; 1814 rsp->status_srb = NULL;
@@ -1856,7 +1825,8 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1856 if (IS_FWI2_CAPABLE(ha)) 1825 if (IS_FWI2_CAPABLE(ha))
1857 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1826 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1858 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1827 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1859 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz)); 1828 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1829 sp->request_sense_ptr, sense_sz);
1860 1830
1861 sp->request_sense_ptr += sense_sz; 1831 sp->request_sense_ptr += sense_sz;
1862 sp->request_sense_length -= sense_sz; 1832 sp->request_sense_length -= sense_sz;
@@ -1882,21 +1852,25 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1882 uint32_t handle = LSW(pkt->handle); 1852 uint32_t handle = LSW(pkt->handle);
1883 uint16_t que = MSW(pkt->handle); 1853 uint16_t que = MSW(pkt->handle);
1884 struct req_que *req = ha->req_q_map[que]; 1854 struct req_que *req = ha->req_q_map[que];
1885#if defined(QL_DEBUG_LEVEL_2) 1855
1886 if (pkt->entry_status & RF_INV_E_ORDER) 1856 if (pkt->entry_status & RF_INV_E_ORDER)
1887 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1857 ql_dbg(ql_dbg_async, vha, 0x502a,
1858 "Invalid Entry Order.\n");
1888 else if (pkt->entry_status & RF_INV_E_COUNT) 1859 else if (pkt->entry_status & RF_INV_E_COUNT)
1889 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__); 1860 ql_dbg(ql_dbg_async, vha, 0x502b,
1861 "Invalid Entry Count.\n");
1890 else if (pkt->entry_status & RF_INV_E_PARAM) 1862 else if (pkt->entry_status & RF_INV_E_PARAM)
1891 qla_printk(KERN_ERR, ha, 1863 ql_dbg(ql_dbg_async, vha, 0x502c,
1892 "%s: Invalid Entry Parameter\n", __func__); 1864 "Invalid Entry Parameter.\n");
1893 else if (pkt->entry_status & RF_INV_E_TYPE) 1865 else if (pkt->entry_status & RF_INV_E_TYPE)
1894 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__); 1866 ql_dbg(ql_dbg_async, vha, 0x502d,
1867 "Invalid Entry Type.\n");
1895 else if (pkt->entry_status & RF_BUSY) 1868 else if (pkt->entry_status & RF_BUSY)
1896 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__); 1869 ql_dbg(ql_dbg_async, vha, 0x502e,
1870 "Busy.\n");
1897 else 1871 else
1898 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__); 1872 ql_dbg(ql_dbg_async, vha, 0x502f,
1899#endif 1873 "UNKNOWN flag error.\n");
1900 1874
1901 /* Validate handle. */ 1875 /* Validate handle. */
1902 if (handle < MAX_OUTSTANDING_COMMANDS) 1876 if (handle < MAX_OUTSTANDING_COMMANDS)
@@ -1923,10 +1897,8 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1923 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1897 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1924 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7 1898 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1925 || pkt->entry_type == COMMAND_TYPE_6) { 1899 || pkt->entry_type == COMMAND_TYPE_6) {
1926 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1900 ql_log(ql_log_warn, vha, 0x5030,
1927 vha->host_no)); 1901 "Error entry - invalid handle.\n");
1928 qla_printk(KERN_WARNING, ha,
1929 "Error entry - invalid handle\n");
1930 1902
1931 if (IS_QLA82XX(ha)) 1903 if (IS_QLA82XX(ha))
1932 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1904 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1960,11 +1932,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1960 } 1932 }
1961 1933
1962 if (ha->mcp) { 1934 if (ha->mcp) {
1963 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 1935 ql_dbg(ql_dbg_async, vha, 0x504d,
1964 __func__, vha->host_no, ha->mcp->mb[0])); 1936 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
1965 } else { 1937 } else {
1966 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 1938 ql_dbg(ql_dbg_async, vha, 0x504e,
1967 __func__, vha->host_no)); 1939 "MBX pointer ERROR.\n");
1968 } 1940 }
1969} 1941}
1970 1942
@@ -1993,8 +1965,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1993 } 1965 }
1994 1966
1995 if (pkt->entry_status != 0) { 1967 if (pkt->entry_status != 0) {
1996 DEBUG3(printk(KERN_INFO 1968 ql_dbg(ql_dbg_async, vha, 0x5029,
1997 "scsi(%ld): Process error entry.\n", vha->host_no)); 1969 "Process error entry.\n");
1998 1970
1999 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 1971 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2000 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1972 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2030,10 +2002,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2030 break; 2002 break;
2031 default: 2003 default:
2032 /* Type Not Supported. */ 2004 /* Type Not Supported. */
2033 DEBUG4(printk(KERN_WARNING 2005 ql_dbg(ql_dbg_async, vha, 0x5042,
2034 "scsi(%ld): Received unknown response pkt type %x " 2006 "Received unknown response pkt type %x "
2035 "entry status=%x.\n", 2007 "entry status=%x.\n",
2036 vha->host_no, pkt->entry_type, pkt->entry_status)); 2008 pkt->entry_type, pkt->entry_status);
2037 break; 2009 break;
2038 } 2010 }
2039 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2011 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2088,7 +2060,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2088 2060
2089next_test: 2061next_test:
2090 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3) 2062 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2091 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n"); 2063 ql_log(ql_log_info, vha, 0x504c,
2064 "Additional code -- 0x55AA.\n");
2092 2065
2093done: 2066done:
2094 WRT_REG_DWORD(&reg->iobase_window, 0x0000); 2067 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
@@ -2121,7 +2094,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
2121 rsp = (struct rsp_que *) dev_id; 2094 rsp = (struct rsp_que *) dev_id;
2122 if (!rsp) { 2095 if (!rsp) {
2123 printk(KERN_INFO 2096 printk(KERN_INFO
2124 "%s(): NULL response queue pointer\n", __func__); 2097 "%s(): NULL response queue pointer.\n", __func__);
2125 return IRQ_NONE; 2098 return IRQ_NONE;
2126 } 2099 }
2127 2100
@@ -2142,8 +2115,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
2142 2115
2143 hccr = RD_REG_DWORD(&reg->hccr); 2116 hccr = RD_REG_DWORD(&reg->hccr);
2144 2117
2145 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 2118 ql_log(ql_log_warn, vha, 0x504b,
2146 "Dumping firmware!\n", hccr); 2119 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2120 hccr);
2147 2121
2148 qla2xxx_check_risc_status(vha); 2122 qla2xxx_check_risc_status(vha);
2149 2123
@@ -2174,9 +2148,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
2174 qla24xx_process_response_queue(vha, rsp); 2148 qla24xx_process_response_queue(vha, rsp);
2175 break; 2149 break;
2176 default: 2150 default:
2177 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2151 ql_dbg(ql_dbg_async, vha, 0x504f,
2178 "(%d).\n", 2152 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2179 vha->host_no, stat & 0xff));
2180 break; 2153 break;
2181 } 2154 }
2182 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2155 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2205,7 +2178,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
2205 rsp = (struct rsp_que *) dev_id; 2178 rsp = (struct rsp_que *) dev_id;
2206 if (!rsp) { 2179 if (!rsp) {
2207 printk(KERN_INFO 2180 printk(KERN_INFO
2208 "%s(): NULL response queue pointer\n", __func__); 2181 "%s(): NULL response queue pointer.\n", __func__);
2209 return IRQ_NONE; 2182 return IRQ_NONE;
2210 } 2183 }
2211 ha = rsp->hw; 2184 ha = rsp->hw;
@@ -2235,7 +2208,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2235 rsp = (struct rsp_que *) dev_id; 2208 rsp = (struct rsp_que *) dev_id;
2236 if (!rsp) { 2209 if (!rsp) {
2237 printk(KERN_INFO 2210 printk(KERN_INFO
2238 "%s(): NULL response queue pointer\n", __func__); 2211 "%s(): NULL response queue pointer.\n", __func__);
2239 return IRQ_NONE; 2212 return IRQ_NONE;
2240 } 2213 }
2241 ha = rsp->hw; 2214 ha = rsp->hw;
@@ -2268,8 +2241,8 @@ qla24xx_msix_default(int irq, void *dev_id)
2268 2241
2269 rsp = (struct rsp_que *) dev_id; 2242 rsp = (struct rsp_que *) dev_id;
2270 if (!rsp) { 2243 if (!rsp) {
2271 DEBUG(printk( 2244 printk(KERN_INFO
2272 "%s(): NULL response queue pointer\n", __func__)); 2245 "%s(): NULL response queue pointer.\n", __func__);
2273 return IRQ_NONE; 2246 return IRQ_NONE;
2274 } 2247 }
2275 ha = rsp->hw; 2248 ha = rsp->hw;
@@ -2286,8 +2259,9 @@ qla24xx_msix_default(int irq, void *dev_id)
2286 2259
2287 hccr = RD_REG_DWORD(&reg->hccr); 2260 hccr = RD_REG_DWORD(&reg->hccr);
2288 2261
2289 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 2262 ql_log(ql_log_info, vha, 0x5050,
2290 "Dumping firmware!\n", hccr); 2263 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2264 hccr);
2291 2265
2292 qla2xxx_check_risc_status(vha); 2266 qla2xxx_check_risc_status(vha);
2293 2267
@@ -2318,9 +2292,8 @@ qla24xx_msix_default(int irq, void *dev_id)
2318 qla24xx_process_response_queue(vha, rsp); 2292 qla24xx_process_response_queue(vha, rsp);
2319 break; 2293 break;
2320 default: 2294 default:
2321 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2295 ql_dbg(ql_dbg_async, vha, 0x5051,
2322 "(%d).\n", 2296 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2323 vha->host_no, stat & 0xff));
2324 break; 2297 break;
2325 } 2298 }
2326 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2299 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2358,6 +2331,7 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
2358{ 2331{
2359 int i; 2332 int i;
2360 struct qla_msix_entry *qentry; 2333 struct qla_msix_entry *qentry;
2334 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2361 2335
2362 for (i = 0; i < ha->msix_count; i++) { 2336 for (i = 0; i < ha->msix_count; i++) {
2363 qentry = &ha->msix_entries[i]; 2337 qentry = &ha->msix_entries[i];
@@ -2368,6 +2342,8 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
2368 kfree(ha->msix_entries); 2342 kfree(ha->msix_entries);
2369 ha->msix_entries = NULL; 2343 ha->msix_entries = NULL;
2370 ha->flags.msix_enabled = 0; 2344 ha->flags.msix_enabled = 0;
2345 ql_dbg(ql_dbg_init, vha, 0x0042,
2346 "Disabled the MSI.\n");
2371} 2347}
2372 2348
2373static int 2349static int
@@ -2377,11 +2353,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2377 int i, ret; 2353 int i, ret;
2378 struct msix_entry *entries; 2354 struct msix_entry *entries;
2379 struct qla_msix_entry *qentry; 2355 struct qla_msix_entry *qentry;
2356 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2380 2357
2381 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2358 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2382 GFP_KERNEL); 2359 GFP_KERNEL);
2383 if (!entries) 2360 if (!entries) {
2361 ql_log(ql_log_warn, vha, 0x00bc,
2362 "Failed to allocate memory for msix_entry.\n");
2384 return -ENOMEM; 2363 return -ENOMEM;
2364 }
2385 2365
2386 for (i = 0; i < ha->msix_count; i++) 2366 for (i = 0; i < ha->msix_count; i++)
2387 entries[i].entry = i; 2367 entries[i].entry = i;
@@ -2391,16 +2371,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2391 if (ret < MIN_MSIX_COUNT) 2371 if (ret < MIN_MSIX_COUNT)
2392 goto msix_failed; 2372 goto msix_failed;
2393 2373
2394 qla_printk(KERN_WARNING, ha, 2374 ql_log(ql_log_warn, vha, 0x00c6,
2395 "MSI-X: Failed to enable support -- %d/%d\n" 2375 "MSI-X: Failed to enable support "
2396 " Retry with %d vectors\n", ha->msix_count, ret, ret); 2376 "-- %d/%d\n Retry with %d vectors.\n",
2377 ha->msix_count, ret, ret);
2397 ha->msix_count = ret; 2378 ha->msix_count = ret;
2398 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2379 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2399 if (ret) { 2380 if (ret) {
2400msix_failed: 2381msix_failed:
2401 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable" 2382 ql_log(ql_log_fatal, vha, 0x00c7,
2402 " support, giving up -- %d/%d\n", 2383 "MSI-X: Failed to enable support, "
2403 ha->msix_count, ret); 2384 "giving up -- %d/%d.\n",
2385 ha->msix_count, ret);
2404 goto msix_out; 2386 goto msix_out;
2405 } 2387 }
2406 ha->max_rsp_queues = ha->msix_count - 1; 2388 ha->max_rsp_queues = ha->msix_count - 1;
@@ -2408,6 +2390,8 @@ msix_failed:
2408 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2390 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2409 ha->msix_count, GFP_KERNEL); 2391 ha->msix_count, GFP_KERNEL);
2410 if (!ha->msix_entries) { 2392 if (!ha->msix_entries) {
2393 ql_log(ql_log_fatal, vha, 0x00c8,
2394 "Failed to allocate memory for ha->msix_entries.\n");
2411 ret = -ENOMEM; 2395 ret = -ENOMEM;
2412 goto msix_out; 2396 goto msix_out;
2413 } 2397 }
@@ -2434,9 +2418,9 @@ msix_failed:
2434 0, msix_entries[i].name, rsp); 2418 0, msix_entries[i].name, rsp);
2435 } 2419 }
2436 if (ret) { 2420 if (ret) {
2437 qla_printk(KERN_WARNING, ha, 2421 ql_log(ql_log_fatal, vha, 0x00cb,
2438 "MSI-X: Unable to register handler -- %x/%d.\n", 2422 "MSI-X: unable to register handler -- %x/%d.\n",
2439 qentry->vector, ret); 2423 qentry->vector, ret);
2440 qla24xx_disable_msix(ha); 2424 qla24xx_disable_msix(ha);
2441 ha->mqenable = 0; 2425 ha->mqenable = 0;
2442 goto msix_out; 2426 goto msix_out;
@@ -2449,6 +2433,12 @@ msix_failed:
2449 /* Enable MSI-X vector for response queue update for queue 0 */ 2433 /* Enable MSI-X vector for response queue update for queue 0 */
2450 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2434 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2451 ha->mqenable = 1; 2435 ha->mqenable = 1;
2436 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2437 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2438 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2439 ql_dbg(ql_dbg_init, vha, 0x0055,
2440 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2441 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2452 2442
2453msix_out: 2443msix_out:
2454 kfree(entries); 2444 kfree(entries);
@@ -2460,6 +2450,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2460{ 2450{
2461 int ret; 2451 int ret;
2462 device_reg_t __iomem *reg = ha->iobase; 2452 device_reg_t __iomem *reg = ha->iobase;
2453 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2463 2454
2464 /* If possible, enable MSI-X. */ 2455 /* If possible, enable MSI-X. */
2465 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 2456 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
@@ -2470,30 +2461,30 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2470 (ha->pdev->subsystem_device == 0x7040 || 2461 (ha->pdev->subsystem_device == 0x7040 ||
2471 ha->pdev->subsystem_device == 0x7041 || 2462 ha->pdev->subsystem_device == 0x7041 ||
2472 ha->pdev->subsystem_device == 0x1705)) { 2463 ha->pdev->subsystem_device == 0x1705)) {
2473 DEBUG2(qla_printk(KERN_WARNING, ha, 2464 ql_log(ql_log_warn, vha, 0x0034,
2474 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n", 2465 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2475 ha->pdev->subsystem_vendor, 2466 ha->pdev->subsystem_vendor,
2476 ha->pdev->subsystem_device)); 2467 ha->pdev->subsystem_device);
2477 goto skip_msi; 2468 goto skip_msi;
2478 } 2469 }
2479 2470
2480 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 2471 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2481 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 2472 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2482 DEBUG2(qla_printk(KERN_WARNING, ha, 2473 ql_log(ql_log_warn, vha, 0x0035,
2483 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 2474 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2484 ha->pdev->revision, ha->fw_attributes)); 2475 ha->pdev->revision, ha->fw_attributes);
2485 goto skip_msix; 2476 goto skip_msix;
2486 } 2477 }
2487 2478
2488 ret = qla24xx_enable_msix(ha, rsp); 2479 ret = qla24xx_enable_msix(ha, rsp);
2489 if (!ret) { 2480 if (!ret) {
2490 DEBUG2(qla_printk(KERN_INFO, ha, 2481 ql_dbg(ql_dbg_init, vha, 0x0036,
2491 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 2482 "MSI-X: Enabled (0x%X, 0x%X).\n",
2492 ha->fw_attributes)); 2483 ha->chip_revision, ha->fw_attributes);
2493 goto clear_risc_ints; 2484 goto clear_risc_ints;
2494 } 2485 }
2495 qla_printk(KERN_WARNING, ha, 2486 ql_log(ql_log_info, vha, 0x0037,
2496 "MSI-X: Falling back-to MSI mode -- %d.\n", ret); 2487 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2497skip_msix: 2488skip_msix:
2498 2489
2499 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2490 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
@@ -2502,18 +2493,19 @@ skip_msix:
2502 2493
2503 ret = pci_enable_msi(ha->pdev); 2494 ret = pci_enable_msi(ha->pdev);
2504 if (!ret) { 2495 if (!ret) {
2505 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n")); 2496 ql_dbg(ql_dbg_init, vha, 0x0038,
2497 "MSI: Enabled.\n");
2506 ha->flags.msi_enabled = 1; 2498 ha->flags.msi_enabled = 1;
2507 } else 2499 } else
2508 qla_printk(KERN_WARNING, ha, 2500 ql_log(ql_log_warn, vha, 0x0039,
2509 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 2501 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2510skip_msi: 2502skip_msi:
2511 2503
2512 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2504 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2513 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2505 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2514 QLA2XXX_DRIVER_NAME, rsp); 2506 QLA2XXX_DRIVER_NAME, rsp);
2515 if (ret) { 2507 if (ret) {
2516 qla_printk(KERN_WARNING, ha, 2508 ql_log(ql_log_warn, vha, 0x003a,
2517 "Failed to reserve interrupt %d already in use.\n", 2509 "Failed to reserve interrupt %d already in use.\n",
2518 ha->pdev->irq); 2510 ha->pdev->irq);
2519 goto fail; 2511 goto fail;
@@ -2563,13 +2555,14 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2563 struct qla_hw_data *ha = rsp->hw; 2555 struct qla_hw_data *ha = rsp->hw;
2564 struct qla_init_msix_entry *intr = &msix_entries[2]; 2556 struct qla_init_msix_entry *intr = &msix_entries[2];
2565 struct qla_msix_entry *msix = rsp->msix; 2557 struct qla_msix_entry *msix = rsp->msix;
2558 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2566 int ret; 2559 int ret;
2567 2560
2568 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2561 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2569 if (ret) { 2562 if (ret) {
2570 qla_printk(KERN_WARNING, ha, 2563 ql_log(ql_log_fatal, vha, 0x00e6,
2571 "MSI-X: Unable to register handler -- %x/%d.\n", 2564 "MSI-X: Unable to register handler -- %x/%d.\n",
2572 msix->vector, ret); 2565 msix->vector, ret);
2573 return ret; 2566 return ret;
2574 } 2567 }
2575 msix->have_irq = 1; 2568 msix->have_irq = 1;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index c26f0acdfecc..f7604ea1af83 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -46,14 +46,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
46 struct qla_hw_data *ha = vha->hw; 46 struct qla_hw_data *ha = vha->hw;
47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
48 48
49 if (ha->pdev->error_state > pci_channel_io_frozen) 49 ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
50
51 if (ha->pdev->error_state > pci_channel_io_frozen) {
52 ql_log(ql_log_warn, base_vha, 0x1001,
53 "error_state is greater than pci_channel_io_frozen, "
54 "exiting.\n");
50 return QLA_FUNCTION_TIMEOUT; 55 return QLA_FUNCTION_TIMEOUT;
56 }
51 57
52 if (vha->device_flags & DFLG_DEV_FAILED) { 58 if (vha->device_flags & DFLG_DEV_FAILED) {
53 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 59 ql_log(ql_log_warn, base_vha, 0x1002,
54 "%s(%ld): Device in failed state, " 60 "Device in failed state, exiting.\n");
55 "timeout MBX Exiting.\n",
56 __func__, base_vha->host_no));
57 return QLA_FUNCTION_TIMEOUT; 61 return QLA_FUNCTION_TIMEOUT;
58 } 62 }
59 63
@@ -63,17 +67,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
63 rval = QLA_SUCCESS; 67 rval = QLA_SUCCESS;
64 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 68 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
65 69
66 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
67 70
68 if (ha->flags.pci_channel_io_perm_failure) { 71 if (ha->flags.pci_channel_io_perm_failure) {
69 DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX " 72 ql_log(ql_log_warn, base_vha, 0x1003,
70 "Exiting.\n", __func__, vha->host_no)); 73 "Perm failure on EEH timeout MBX, exiting.\n");
71 return QLA_FUNCTION_TIMEOUT; 74 return QLA_FUNCTION_TIMEOUT;
72 } 75 }
73 76
74 if (ha->flags.isp82xx_fw_hung) { 77 if (ha->flags.isp82xx_fw_hung) {
75 /* Setting Link-Down error */ 78 /* Setting Link-Down error */
76 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 79 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
80 ql_log(ql_log_warn, base_vha, 0x1004,
81 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
77 rval = QLA_FUNCTION_FAILED; 82 rval = QLA_FUNCTION_FAILED;
78 goto premature_exit; 83 goto premature_exit;
79 } 84 }
@@ -85,8 +90,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
85 */ 90 */
86 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 91 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
87 /* Timeout occurred. Return error. */ 92 /* Timeout occurred. Return error. */
88 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " 93 ql_log(ql_log_warn, base_vha, 0x1005,
89 "Exiting.\n", __func__, base_vha->host_no)); 94 "Cmd access timeout, Exiting.\n");
90 return QLA_FUNCTION_TIMEOUT; 95 return QLA_FUNCTION_TIMEOUT;
91 } 96 }
92 97
@@ -94,8 +99,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
94 /* Save mailbox command for debug */ 99 /* Save mailbox command for debug */
95 ha->mcp = mcp; 100 ha->mcp = mcp;
96 101
97 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", 102 ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
98 base_vha->host_no, mcp->mb[0])); 103 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
99 104
100 spin_lock_irqsave(&ha->hardware_lock, flags); 105 spin_lock_irqsave(&ha->hardware_lock, flags);
101 106
@@ -123,27 +128,30 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
123 iptr++; 128 iptr++;
124 } 129 }
125 130
126#if defined(QL_DEBUG_LEVEL_1) 131 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
127 printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n", 132 "Loaded MBX registers (displayed in bytes) =.\n");
128 __func__, base_vha->host_no); 133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
129 qla2x00_dump_buffer((uint8_t *)mcp->mb, 16); 134 (uint8_t *)mcp->mb, 16);
130 printk("\n"); 135 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
131 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16); 136 ".\n");
132 printk("\n"); 137 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
133 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8); 138 ((uint8_t *)mcp->mb + 0x10), 16);
134 printk("\n"); 139 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
135 printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no, 140 ".\n");
136 optr); 141 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
137 qla2x00_dump_regs(base_vha); 142 ((uint8_t *)mcp->mb + 0x20), 8);
138#endif 143 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
144 "I/O Address = %p.\n", optr);
145 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
139 146
140 /* Issue set host interrupt command to send cmd out. */ 147 /* Issue set host interrupt command to send cmd out. */
141 ha->flags.mbox_int = 0; 148 ha->flags.mbox_int = 0;
142 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 149 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
143 150
144 /* Unlock mbx registers and wait for interrupt */ 151 /* Unlock mbx registers and wait for interrupt */
145 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. " 152 ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
146 "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies)); 153 "Going to unlock irq & waiting for interrupts. "
154 "jiffies=%lx.\n", jiffies);
147 155
148 /* Wait for mbx cmd completion until timeout */ 156 /* Wait for mbx cmd completion until timeout */
149 157
@@ -155,9 +163,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
155 HINT_MBX_INT_PENDING) { 163 HINT_MBX_INT_PENDING) {
156 spin_unlock_irqrestore(&ha->hardware_lock, 164 spin_unlock_irqrestore(&ha->hardware_lock,
157 flags); 165 flags);
158 DEBUG2_3_11(printk(KERN_INFO 166 ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
159 "%s(%ld): Pending Mailbox timeout. " 167 "Pending mailbox timeout, exiting.\n");
160 "Exiting.\n", __func__, base_vha->host_no));
161 rval = QLA_FUNCTION_TIMEOUT; 168 rval = QLA_FUNCTION_TIMEOUT;
162 goto premature_exit; 169 goto premature_exit;
163 } 170 }
@@ -173,17 +180,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
173 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 180 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
174 181
175 } else { 182 } else {
176 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 183 ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
177 base_vha->host_no, command)); 184 "Cmd=%x Polling Mode.\n", command);
178 185
179 if (IS_QLA82XX(ha)) { 186 if (IS_QLA82XX(ha)) {
180 if (RD_REG_DWORD(&reg->isp82.hint) & 187 if (RD_REG_DWORD(&reg->isp82.hint) &
181 HINT_MBX_INT_PENDING) { 188 HINT_MBX_INT_PENDING) {
182 spin_unlock_irqrestore(&ha->hardware_lock, 189 spin_unlock_irqrestore(&ha->hardware_lock,
183 flags); 190 flags);
184 DEBUG2_3_11(printk(KERN_INFO 191 ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
185 "%s(%ld): Pending Mailbox timeout. " 192 "Pending mailbox timeout, exiting.\n");
186 "Exiting.\n", __func__, base_vha->host_no));
187 rval = QLA_FUNCTION_TIMEOUT; 193 rval = QLA_FUNCTION_TIMEOUT;
188 goto premature_exit; 194 goto premature_exit;
189 } 195 }
@@ -207,17 +213,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
207 command == MBC_LOAD_RISC_RAM_EXTENDED)) 213 command == MBC_LOAD_RISC_RAM_EXTENDED))
208 msleep(10); 214 msleep(10);
209 } /* while */ 215 } /* while */
210 DEBUG17(qla_printk(KERN_WARNING, ha, 216 ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
211 "Waited %d sec\n", 217 "Waited %d sec.\n",
212 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ))); 218 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
213 } 219 }
214 220
215 /* Check whether we timed out */ 221 /* Check whether we timed out */
216 if (ha->flags.mbox_int) { 222 if (ha->flags.mbox_int) {
217 uint16_t *iptr2; 223 uint16_t *iptr2;
218 224
219 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__, 225 ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
220 base_vha->host_no, command)); 226 "Cmd=%x completed.\n", command);
221 227
222 /* Got interrupt. Clear the flag. */ 228 /* Got interrupt. Clear the flag. */
223 ha->flags.mbox_int = 0; 229 ha->flags.mbox_int = 0;
@@ -229,6 +235,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
229 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 235 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
230 ha->mcp = NULL; 236 ha->mcp = NULL;
231 rval = QLA_FUNCTION_FAILED; 237 rval = QLA_FUNCTION_FAILED;
238 ql_log(ql_log_warn, base_vha, 0x1015,
239 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
232 goto premature_exit; 240 goto premature_exit;
233 } 241 }
234 242
@@ -249,8 +257,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
249 } 257 }
250 } else { 258 } else {
251 259
252#if defined(QL_DEBUG_LEVEL_2) || defined(QL_DEBUG_LEVEL_3) || \
253 defined(QL_DEBUG_LEVEL_11)
254 uint16_t mb0; 260 uint16_t mb0;
255 uint32_t ictrl; 261 uint32_t ictrl;
256 262
@@ -261,14 +267,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
261 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); 267 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
262 ictrl = RD_REG_WORD(&reg->isp.ictrl); 268 ictrl = RD_REG_WORD(&reg->isp.ictrl);
263 } 269 }
264 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", 270 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
265 __func__, base_vha->host_no, command); 271 "MBX Command timeout for cmd %x.\n", command);
266 printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__, 272 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
267 base_vha->host_no, ictrl, jiffies); 273 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
268 printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__, 274 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
269 base_vha->host_no, mb0); 275 "mb[0] = 0x%x.\n", mb0);
270 qla2x00_dump_regs(base_vha); 276 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
271#endif
272 277
273 rval = QLA_FUNCTION_TIMEOUT; 278 rval = QLA_FUNCTION_TIMEOUT;
274 } 279 }
@@ -279,8 +284,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
279 ha->mcp = NULL; 284 ha->mcp = NULL;
280 285
281 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 286 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
282 DEBUG11(printk("%s(%ld): checking for additional resp " 287 ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
283 "interrupt.\n", __func__, base_vha->host_no)); 288 "Checking for additional resp interrupt.\n");
284 289
285 /* polling mode for non isp_abort commands. */ 290 /* polling mode for non isp_abort commands. */
286 qla2x00_poll(ha->rsp_q_map[0]); 291 qla2x00_poll(ha->rsp_q_map[0]);
@@ -291,38 +296,32 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
291 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 296 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
292 ha->flags.eeh_busy) { 297 ha->flags.eeh_busy) {
293 /* not in dpc. schedule it for dpc to take over. */ 298 /* not in dpc. schedule it for dpc to take over. */
294 DEBUG(printk("%s(%ld): timeout schedule " 299 ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
295 "isp_abort_needed.\n", __func__, 300 "Timeout, schedule isp_abort_needed.\n");
296 base_vha->host_no));
297 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
298 "isp_abort_needed.\n", __func__,
299 base_vha->host_no));
300 301
301 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 302 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
302 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 303 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
303 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 304 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
304 305
305 qla_printk(KERN_WARNING, ha, 306 ql_log(ql_log_info, base_vha, 0x101c,
306 "Mailbox command timeout occurred. " 307 "Mailbox cmd timeout occured. "
307 "Scheduling ISP " "abort. eeh_busy: 0x%x\n", 308 "Scheduling ISP abort eeh_busy=0x%x.\n",
308 ha->flags.eeh_busy); 309 ha->flags.eeh_busy);
309 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 310 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
310 qla2xxx_wake_dpc(vha); 311 qla2xxx_wake_dpc(vha);
311 } 312 }
312 } else if (!abort_active) { 313 } else if (!abort_active) {
313 /* call abort directly since we are in the DPC thread */ 314 /* call abort directly since we are in the DPC thread */
314 DEBUG(printk("%s(%ld): timeout calling abort_isp\n", 315 ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
315 __func__, base_vha->host_no)); 316 "Timeout, calling abort_isp.\n");
316 DEBUG2_3_11(printk("%s(%ld): timeout calling "
317 "abort_isp\n", __func__, base_vha->host_no));
318 317
319 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 318 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
320 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 319 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
321 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 320 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
322 321
323 qla_printk(KERN_WARNING, ha, 322 ql_log(ql_log_info, base_vha, 0x101e,
324 "Mailbox command timeout occurred. " 323 "Mailbox cmd timeout occured. "
325 "Issuing ISP abort.\n"); 324 "Scheduling ISP abort.\n");
326 325
327 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 326 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
328 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 327 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -332,11 +331,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
332 &vha->dpc_flags); 331 &vha->dpc_flags);
333 } 332 }
334 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 333 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
335 DEBUG(printk("%s(%ld): finished abort_isp\n", 334 ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
336 __func__, vha->host_no)); 335 "Finished abort_isp.\n");
337 DEBUG2_3_11(printk(
338 "%s(%ld): finished abort_isp\n",
339 __func__, vha->host_no));
340 } 336 }
341 } 337 }
342 } 338 }
@@ -346,12 +342,11 @@ premature_exit:
346 complete(&ha->mbx_cmd_comp); 342 complete(&ha->mbx_cmd_comp);
347 343
348 if (rval) { 344 if (rval) {
349 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " 345 ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
350 "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no, 346 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
351 mcp->mb[0], mcp->mb[1], mcp->mb[2], command)); 347 mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
352 } else { 348 } else {
353 DEBUG11(printk("%s(%ld): done.\n", __func__, 349 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
354 base_vha->host_no));
355 } 350 }
356 351
357 return rval; 352 return rval;
@@ -366,7 +361,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
366 mbx_cmd_t mc; 361 mbx_cmd_t mc;
367 mbx_cmd_t *mcp = &mc; 362 mbx_cmd_t *mcp = &mc;
368 363
369 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 364 ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
370 365
371 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 366 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
372 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 367 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -397,10 +392,10 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
397 rval = qla2x00_mailbox_command(vha, mcp); 392 rval = qla2x00_mailbox_command(vha, mcp);
398 393
399 if (rval != QLA_SUCCESS) { 394 if (rval != QLA_SUCCESS) {
400 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 395 ql_dbg(ql_dbg_mbx, vha, 0x1023,
401 vha->host_no, rval, mcp->mb[0])); 396 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
402 } else { 397 } else {
403 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 398 ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
404 } 399 }
405 400
406 return rval; 401 return rval;
@@ -430,7 +425,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
430 mbx_cmd_t mc; 425 mbx_cmd_t mc;
431 mbx_cmd_t *mcp = &mc; 426 mbx_cmd_t *mcp = &mc;
432 427
433 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 428 ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
434 429
435 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 430 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
436 mcp->out_mb = MBX_0; 431 mcp->out_mb = MBX_0;
@@ -461,15 +456,14 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
461 rval = qla2x00_mailbox_command(vha, mcp); 456 rval = qla2x00_mailbox_command(vha, mcp);
462 457
463 if (rval != QLA_SUCCESS) { 458 if (rval != QLA_SUCCESS) {
464 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 459 ql_dbg(ql_dbg_mbx, vha, 0x1026,
465 vha->host_no, rval, mcp->mb[0])); 460 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
466 } else { 461 } else {
467 if (IS_FWI2_CAPABLE(ha)) { 462 if (IS_FWI2_CAPABLE(ha)) {
468 DEBUG11(printk("%s(%ld): done exchanges=%x.\n", 463 ql_dbg(ql_dbg_mbx, vha, 0x1027,
469 __func__, vha->host_no, mcp->mb[1])); 464 "Done exchanges=%x.\n", mcp->mb[1]);
470 } else { 465 } else {
471 DEBUG11(printk("%s(%ld): done.\n", __func__, 466 ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
472 vha->host_no));
473 } 467 }
474 } 468 }
475 469
@@ -501,7 +495,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
501 mbx_cmd_t mc; 495 mbx_cmd_t mc;
502 mbx_cmd_t *mcp = &mc; 496 mbx_cmd_t *mcp = &mc;
503 497
504 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 498 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
505 499
506 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 500 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
507 mcp->out_mb = MBX_0; 501 mcp->out_mb = MBX_0;
@@ -535,11 +529,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
535failed: 529failed:
536 if (rval != QLA_SUCCESS) { 530 if (rval != QLA_SUCCESS) {
537 /*EMPTY*/ 531 /*EMPTY*/
538 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 532 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
539 vha->host_no, rval));
540 } else { 533 } else {
541 /*EMPTY*/ 534 /*EMPTY*/
542 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 535 ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
543 } 536 }
544 return rval; 537 return rval;
545} 538}
@@ -565,7 +558,7 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
565 mbx_cmd_t mc; 558 mbx_cmd_t mc;
566 mbx_cmd_t *mcp = &mc; 559 mbx_cmd_t *mcp = &mc;
567 560
568 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 561 ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
569 562
570 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 563 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
571 mcp->out_mb = MBX_0; 564 mcp->out_mb = MBX_0;
@@ -576,15 +569,14 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
576 569
577 if (rval != QLA_SUCCESS) { 570 if (rval != QLA_SUCCESS) {
578 /*EMPTY*/ 571 /*EMPTY*/
579 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 572 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
580 vha->host_no, rval));
581 } else { 573 } else {
582 fwopts[0] = mcp->mb[0]; 574 fwopts[0] = mcp->mb[0];
583 fwopts[1] = mcp->mb[1]; 575 fwopts[1] = mcp->mb[1];
584 fwopts[2] = mcp->mb[2]; 576 fwopts[2] = mcp->mb[2];
585 fwopts[3] = mcp->mb[3]; 577 fwopts[3] = mcp->mb[3];
586 578
587 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 579 ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
588 } 580 }
589 581
590 return rval; 582 return rval;
@@ -612,7 +604,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
612 mbx_cmd_t mc; 604 mbx_cmd_t mc;
613 mbx_cmd_t *mcp = &mc; 605 mbx_cmd_t *mcp = &mc;
614 606
615 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 607 ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
616 608
617 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 609 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
618 mcp->mb[1] = fwopts[1]; 610 mcp->mb[1] = fwopts[1];
@@ -636,11 +628,11 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
636 628
637 if (rval != QLA_SUCCESS) { 629 if (rval != QLA_SUCCESS) {
638 /*EMPTY*/ 630 /*EMPTY*/
639 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__, 631 ql_dbg(ql_dbg_mbx, vha, 0x1030,
640 vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 632 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
641 } else { 633 } else {
642 /*EMPTY*/ 634 /*EMPTY*/
643 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 635 ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
644 } 636 }
645 637
646 return rval; 638 return rval;
@@ -668,7 +660,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
668 mbx_cmd_t mc; 660 mbx_cmd_t mc;
669 mbx_cmd_t *mcp = &mc; 661 mbx_cmd_t *mcp = &mc;
670 662
671 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no)); 663 ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
672 664
673 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 665 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
674 mcp->mb[1] = 0xAAAA; 666 mcp->mb[1] = 0xAAAA;
@@ -695,12 +687,10 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
695 687
696 if (rval != QLA_SUCCESS) { 688 if (rval != QLA_SUCCESS) {
697 /*EMPTY*/ 689 /*EMPTY*/
698 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n", 690 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
699 vha->host_no, rval));
700 } else { 691 } else {
701 /*EMPTY*/ 692 /*EMPTY*/
702 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n", 693 ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
703 vha->host_no));
704 } 694 }
705 695
706 return rval; 696 return rval;
@@ -728,7 +718,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
728 mbx_cmd_t mc; 718 mbx_cmd_t mc;
729 mbx_cmd_t *mcp = &mc; 719 mbx_cmd_t *mcp = &mc;
730 720
731 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 721 ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
732 722
733 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 723 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
734 mcp->out_mb = MBX_0; 724 mcp->out_mb = MBX_0;
@@ -749,11 +739,11 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
749 rval = qla2x00_mailbox_command(vha, mcp); 739 rval = qla2x00_mailbox_command(vha, mcp);
750 740
751 if (rval != QLA_SUCCESS) { 741 if (rval != QLA_SUCCESS) {
752 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, 742 ql_dbg(ql_dbg_mbx, vha, 0x1036,
753 vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ? 743 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
754 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])); 744 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
755 } else { 745 } else {
756 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 746 ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
757 } 747 }
758 748
759 return rval; 749 return rval;
@@ -785,6 +775,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
785 mbx_cmd_t mc; 775 mbx_cmd_t mc;
786 mbx_cmd_t *mcp = &mc; 776 mbx_cmd_t *mcp = &mc;
787 777
778 ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
779
788 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 780 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
789 mcp->mb[1] = 0; 781 mcp->mb[1] = 0;
790 mcp->mb[2] = MSW(phys_addr); 782 mcp->mb[2] = MSW(phys_addr);
@@ -799,14 +791,14 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
799 791
800 if (rval != QLA_SUCCESS) { 792 if (rval != QLA_SUCCESS) {
801 /*EMPTY*/ 793 /*EMPTY*/
802 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", 794 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
803 vha->host_no, rval));
804 } else { 795 } else {
805 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 796 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
806 797
807 /* Mask reserved bits. */ 798 /* Mask reserved bits. */
808 sts_entry->entry_status &= 799 sts_entry->entry_status &=
809 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 800 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
801 ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
810 } 802 }
811 803
812 return rval; 804 return rval;
@@ -847,7 +839,7 @@ qla2x00_abort_command(srb_t *sp)
847 struct qla_hw_data *ha = vha->hw; 839 struct qla_hw_data *ha = vha->hw;
848 struct req_que *req = vha->req; 840 struct req_que *req = vha->req;
849 841
850 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); 842 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
851 843
852 spin_lock_irqsave(&ha->hardware_lock, flags); 844 spin_lock_irqsave(&ha->hardware_lock, flags);
853 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 845 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -876,11 +868,9 @@ qla2x00_abort_command(srb_t *sp)
876 rval = qla2x00_mailbox_command(vha, mcp); 868 rval = qla2x00_mailbox_command(vha, mcp);
877 869
878 if (rval != QLA_SUCCESS) { 870 if (rval != QLA_SUCCESS) {
879 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", 871 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
880 vha->host_no, rval));
881 } else { 872 } else {
882 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", 873 ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
883 vha->host_no));
884 } 874 }
885 875
886 return rval; 876 return rval;
@@ -896,10 +886,11 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
896 struct req_que *req; 886 struct req_que *req;
897 struct rsp_que *rsp; 887 struct rsp_que *rsp;
898 888
899 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
900
901 l = l; 889 l = l;
902 vha = fcport->vha; 890 vha = fcport->vha;
891
892 ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
893
903 req = vha->hw->req_q_map[0]; 894 req = vha->hw->req_q_map[0];
904 rsp = req->rsp; 895 rsp = req->rsp;
905 mcp->mb[0] = MBC_ABORT_TARGET; 896 mcp->mb[0] = MBC_ABORT_TARGET;
@@ -919,18 +910,17 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
919 mcp->flags = 0; 910 mcp->flags = 0;
920 rval = qla2x00_mailbox_command(vha, mcp); 911 rval = qla2x00_mailbox_command(vha, mcp);
921 if (rval != QLA_SUCCESS) { 912 if (rval != QLA_SUCCESS) {
922 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 913 ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
923 vha->host_no, rval));
924 } 914 }
925 915
926 /* Issue marker IOCB. */ 916 /* Issue marker IOCB. */
927 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, 917 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
928 MK_SYNC_ID); 918 MK_SYNC_ID);
929 if (rval2 != QLA_SUCCESS) { 919 if (rval2 != QLA_SUCCESS) {
930 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 920 ql_dbg(ql_dbg_mbx, vha, 0x1040,
931 "(%x).\n", __func__, vha->host_no, rval2)); 921 "Failed to issue marker IOCB (%x).\n", rval2);
932 } else { 922 } else {
933 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 923 ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
934 } 924 }
935 925
936 return rval; 926 return rval;
@@ -946,9 +936,10 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
946 struct req_que *req; 936 struct req_que *req;
947 struct rsp_que *rsp; 937 struct rsp_que *rsp;
948 938
949 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
950
951 vha = fcport->vha; 939 vha = fcport->vha;
940
941 ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
942
952 req = vha->hw->req_q_map[0]; 943 req = vha->hw->req_q_map[0];
953 rsp = req->rsp; 944 rsp = req->rsp;
954 mcp->mb[0] = MBC_LUN_RESET; 945 mcp->mb[0] = MBC_LUN_RESET;
@@ -966,18 +957,17 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
966 mcp->flags = 0; 957 mcp->flags = 0;
967 rval = qla2x00_mailbox_command(vha, mcp); 958 rval = qla2x00_mailbox_command(vha, mcp);
968 if (rval != QLA_SUCCESS) { 959 if (rval != QLA_SUCCESS) {
969 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 960 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
970 vha->host_no, rval));
971 } 961 }
972 962
973 /* Issue marker IOCB. */ 963 /* Issue marker IOCB. */
974 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 964 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
975 MK_SYNC_ID_LUN); 965 MK_SYNC_ID_LUN);
976 if (rval2 != QLA_SUCCESS) { 966 if (rval2 != QLA_SUCCESS) {
977 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 967 ql_dbg(ql_dbg_mbx, vha, 0x1044,
978 "(%x).\n", __func__, vha->host_no, rval2)); 968 "Failed to issue marker IOCB (%x).\n", rval2);
979 } else { 969 } else {
980 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 970 ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
981 } 971 }
982 972
983 return rval; 973 return rval;
@@ -1011,8 +1001,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1011 mbx_cmd_t mc; 1001 mbx_cmd_t mc;
1012 mbx_cmd_t *mcp = &mc; 1002 mbx_cmd_t *mcp = &mc;
1013 1003
1014 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n", 1004 ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
1015 vha->host_no));
1016 1005
1017 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1006 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1018 mcp->mb[9] = vha->vp_idx; 1007 mcp->mb[9] = vha->vp_idx;
@@ -1038,11 +1027,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1038 1027
1039 if (rval != QLA_SUCCESS) { 1028 if (rval != QLA_SUCCESS) {
1040 /*EMPTY*/ 1029 /*EMPTY*/
1041 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 1030 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1042 vha->host_no, rval));
1043 } else { 1031 } else {
1044 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 1032 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
1045 vha->host_no));
1046 1033
1047 if (IS_QLA8XXX_TYPE(vha->hw)) { 1034 if (IS_QLA8XXX_TYPE(vha->hw)) {
1048 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1035 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1083,8 +1070,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1083 mbx_cmd_t mc; 1070 mbx_cmd_t mc;
1084 mbx_cmd_t *mcp = &mc; 1071 mbx_cmd_t *mcp = &mc;
1085 1072
1086 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n", 1073 ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
1087 vha->host_no));
1088 1074
1089 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1075 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1090 mcp->out_mb = MBX_0; 1076 mcp->out_mb = MBX_0;
@@ -1095,8 +1081,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1095 1081
1096 if (rval != QLA_SUCCESS) { 1082 if (rval != QLA_SUCCESS) {
1097 /*EMPTY*/ 1083 /*EMPTY*/
1098 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n", 1084 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1099 vha->host_no, mcp->mb[0])); 1085 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1100 } else { 1086 } else {
1101 /* Convert returned data and check our values. */ 1087 /* Convert returned data and check our values. */
1102 *r_a_tov = mcp->mb[3] / 2; 1088 *r_a_tov = mcp->mb[3] / 2;
@@ -1107,8 +1093,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1107 *tov = ratov; 1093 *tov = ratov;
1108 } 1094 }
1109 1095
1110 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d " 1096 ql_dbg(ql_dbg_mbx, vha, 0x104b,
1111 "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov)); 1097 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1112 } 1098 }
1113 1099
1114 return rval; 1100 return rval;
@@ -1139,8 +1125,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1139 mbx_cmd_t *mcp = &mc; 1125 mbx_cmd_t *mcp = &mc;
1140 struct qla_hw_data *ha = vha->hw; 1126 struct qla_hw_data *ha = vha->hw;
1141 1127
1142 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 1128 ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
1143 vha->host_no));
1144 1129
1145 if (IS_QLA82XX(ha) && ql2xdbwr) 1130 if (IS_QLA82XX(ha) && ql2xdbwr)
1146 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, 1131 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1174,13 +1159,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1174 1159
1175 if (rval != QLA_SUCCESS) { 1160 if (rval != QLA_SUCCESS) {
1176 /*EMPTY*/ 1161 /*EMPTY*/
1177 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x " 1162 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1178 "mb0=%x.\n", 1163 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1179 vha->host_no, rval, mcp->mb[0]));
1180 } else { 1164 } else {
1181 /*EMPTY*/ 1165 /*EMPTY*/
1182 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n", 1166 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
1183 vha->host_no));
1184 } 1167 }
1185 1168
1186 return rval; 1169 return rval;
@@ -1213,13 +1196,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1213 dma_addr_t pd_dma; 1196 dma_addr_t pd_dma;
1214 struct qla_hw_data *ha = vha->hw; 1197 struct qla_hw_data *ha = vha->hw;
1215 1198
1216 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1199 ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
1217 1200
1218 pd24 = NULL; 1201 pd24 = NULL;
1219 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1202 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1220 if (pd == NULL) { 1203 if (pd == NULL) {
1221 DEBUG2_3(printk("%s(%ld): failed to allocate Port Database " 1204 ql_log(ql_log_warn, vha, 0x1050,
1222 "structure.\n", __func__, vha->host_no)); 1205 "Failed to allocate port database structure.\n");
1223 return QLA_MEMORY_ALLOC_FAILED; 1206 return QLA_MEMORY_ALLOC_FAILED;
1224 } 1207 }
1225 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 1208 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -1261,12 +1244,10 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1261 /* Check for logged in state. */ 1244 /* Check for logged in state. */
1262 if (pd24->current_login_state != PDS_PRLI_COMPLETE && 1245 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1263 pd24->last_login_state != PDS_PRLI_COMPLETE) { 1246 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1264 DEBUG2(qla_printk(KERN_WARNING, ha, 1247 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1265 "scsi(%ld): Unable to verify login-state (%x/%x) " 1248 "Unable to verify login-state (%x/%x) for "
1266 " - portid=%02x%02x%02x.\n", vha->host_no, 1249 "loop_id %x.\n", pd24->current_login_state,
1267 pd24->current_login_state, pd24->last_login_state, 1250 pd24->last_login_state, fcport->loop_id);
1268 fcport->d_id.b.domain, fcport->d_id.b.area,
1269 fcport->d_id.b.al_pa));
1270 rval = QLA_FUNCTION_FAILED; 1251 rval = QLA_FUNCTION_FAILED;
1271 goto gpd_error_out; 1252 goto gpd_error_out;
1272 } 1253 }
@@ -1290,12 +1271,11 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1290 /* Check for logged in state. */ 1271 /* Check for logged in state. */
1291 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1272 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1292 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1273 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1293 DEBUG2(qla_printk(KERN_WARNING, ha, 1274 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1294 "scsi(%ld): Unable to verify login-state (%x/%x) " 1275 "Unable to verify login-state (%x/%x) - "
1295 " - portid=%02x%02x%02x.\n", vha->host_no, 1276 "portid=%02x%02x%02x.\n", pd->master_state,
1296 pd->master_state, pd->slave_state, 1277 pd->slave_state, fcport->d_id.b.domain,
1297 fcport->d_id.b.domain, fcport->d_id.b.area, 1278 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1298 fcport->d_id.b.al_pa));
1299 rval = QLA_FUNCTION_FAILED; 1279 rval = QLA_FUNCTION_FAILED;
1300 goto gpd_error_out; 1280 goto gpd_error_out;
1301 } 1281 }
@@ -1325,10 +1305,11 @@ gpd_error_out:
1325 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1305 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1326 1306
1327 if (rval != QLA_SUCCESS) { 1307 if (rval != QLA_SUCCESS) {
1328 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 1308 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1329 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1309 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1310 mcp->mb[0], mcp->mb[1]);
1330 } else { 1311 } else {
1331 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1312 ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
1332 } 1313 }
1333 1314
1334 return rval; 1315 return rval;
@@ -1357,8 +1338,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1357 mbx_cmd_t mc; 1338 mbx_cmd_t mc;
1358 mbx_cmd_t *mcp = &mc; 1339 mbx_cmd_t *mcp = &mc;
1359 1340
1360 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n", 1341 ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
1361 vha->host_no));
1362 1342
1363 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1343 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1364 mcp->out_mb = MBX_0; 1344 mcp->out_mb = MBX_0;
@@ -1381,12 +1361,10 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1381 1361
1382 if (rval != QLA_SUCCESS) { 1362 if (rval != QLA_SUCCESS) {
1383 /*EMPTY*/ 1363 /*EMPTY*/
1384 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): " 1364 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1385 "failed=%x.\n", vha->host_no, rval));
1386 } else { 1365 } else {
1387 /*EMPTY*/ 1366 /*EMPTY*/
1388 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n", 1367 ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
1389 vha->host_no));
1390 } 1368 }
1391 1369
1392 return rval; 1370 return rval;
@@ -1418,8 +1396,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1418 mbx_cmd_t mc; 1396 mbx_cmd_t mc;
1419 mbx_cmd_t *mcp = &mc; 1397 mbx_cmd_t *mcp = &mc;
1420 1398
1421 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n", 1399 ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
1422 vha->host_no));
1423 1400
1424 mcp->mb[0] = MBC_GET_PORT_NAME; 1401 mcp->mb[0] = MBC_GET_PORT_NAME;
1425 mcp->mb[9] = vha->vp_idx; 1402 mcp->mb[9] = vha->vp_idx;
@@ -1439,8 +1416,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1439 1416
1440 if (rval != QLA_SUCCESS) { 1417 if (rval != QLA_SUCCESS) {
1441 /*EMPTY*/ 1418 /*EMPTY*/
1442 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n", 1419 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
1443 vha->host_no, rval));
1444 } else { 1420 } else {
1445 if (name != NULL) { 1421 if (name != NULL) {
1446 /* This function returns name in big endian. */ 1422 /* This function returns name in big endian. */
@@ -1454,8 +1430,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1454 name[7] = LSB(mcp->mb[7]); 1430 name[7] = LSB(mcp->mb[7]);
1455 } 1431 }
1456 1432
1457 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", 1433 ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
1458 vha->host_no));
1459 } 1434 }
1460 1435
1461 return rval; 1436 return rval;
@@ -1483,7 +1458,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1483 mbx_cmd_t mc; 1458 mbx_cmd_t mc;
1484 mbx_cmd_t *mcp = &mc; 1459 mbx_cmd_t *mcp = &mc;
1485 1460
1486 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1461 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
1487 1462
1488 if (IS_QLA8XXX_TYPE(vha->hw)) { 1463 if (IS_QLA8XXX_TYPE(vha->hw)) {
1489 /* Logout across all FCFs. */ 1464 /* Logout across all FCFs. */
@@ -1517,11 +1492,10 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1517 1492
1518 if (rval != QLA_SUCCESS) { 1493 if (rval != QLA_SUCCESS) {
1519 /*EMPTY*/ 1494 /*EMPTY*/
1520 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", 1495 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1521 __func__, vha->host_no, rval));
1522 } else { 1496 } else {
1523 /*EMPTY*/ 1497 /*EMPTY*/
1524 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1498 ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
1525 } 1499 }
1526 1500
1527 return rval; 1501 return rval;
@@ -1553,12 +1527,11 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1553 mbx_cmd_t mc; 1527 mbx_cmd_t mc;
1554 mbx_cmd_t *mcp = &mc; 1528 mbx_cmd_t *mcp = &mc;
1555 1529
1556 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n", 1530 ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
1557 vha->host_no));
1558 1531
1559 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total " 1532 ql_dbg(ql_dbg_mbx, vha, 0x105e,
1560 "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout, 1533 "Retry cnt=%d ratov=%d total tov=%d.\n",
1561 mcp->tov)); 1534 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1562 1535
1563 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 1536 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1564 mcp->mb[1] = cmd_size; 1537 mcp->mb[1] = cmd_size;
@@ -1575,13 +1548,12 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1575 1548
1576 if (rval != QLA_SUCCESS) { 1549 if (rval != QLA_SUCCESS) {
1577 /*EMPTY*/ 1550 /*EMPTY*/
1578 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1551 ql_dbg(ql_dbg_mbx, vha, 0x105f,
1579 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1552 "Failed=%x mb[0]=%x mb[1]=%x.\n",
1580 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1553 rval, mcp->mb[0], mcp->mb[1]);
1581 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1582 } else { 1554 } else {
1583 /*EMPTY*/ 1555 /*EMPTY*/
1584 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no)); 1556 ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
1585 } 1557 }
1586 1558
1587 return rval; 1559 return rval;
@@ -1600,7 +1572,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1600 struct req_que *req; 1572 struct req_que *req;
1601 struct rsp_que *rsp; 1573 struct rsp_que *rsp;
1602 1574
1603 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1575 ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
1604 1576
1605 if (ha->flags.cpu_affinity_enabled) 1577 if (ha->flags.cpu_affinity_enabled)
1606 req = ha->req_q_map[0]; 1578 req = ha->req_q_map[0];
@@ -1610,8 +1582,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1610 1582
1611 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1583 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1612 if (lg == NULL) { 1584 if (lg == NULL) {
1613 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1585 ql_log(ql_log_warn, vha, 0x1062,
1614 __func__, vha->host_no)); 1586 "Failed to allocate login IOCB.\n");
1615 return QLA_MEMORY_ALLOC_FAILED; 1587 return QLA_MEMORY_ALLOC_FAILED;
1616 } 1588 }
1617 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1589 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1631,21 +1603,21 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1631 lg->vp_index = vha->vp_idx; 1603 lg->vp_index = vha->vp_idx;
1632 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); 1604 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1633 if (rval != QLA_SUCCESS) { 1605 if (rval != QLA_SUCCESS) {
1634 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1606 ql_dbg(ql_dbg_mbx, vha, 0x1063,
1635 "(%x).\n", __func__, vha->host_no, rval)); 1607 "Failed to issue login IOCB (%x).\n", rval);
1636 } else if (lg->entry_status != 0) { 1608 } else if (lg->entry_status != 0) {
1637 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1609 ql_dbg(ql_dbg_mbx, vha, 0x1064,
1638 "-- error status (%x).\n", __func__, vha->host_no, 1610 "Failed to complete IOCB -- error status (%x).\n",
1639 lg->entry_status)); 1611 lg->entry_status);
1640 rval = QLA_FUNCTION_FAILED; 1612 rval = QLA_FUNCTION_FAILED;
1641 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1613 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1642 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1614 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1643 iop[1] = le32_to_cpu(lg->io_parameter[1]); 1615 iop[1] = le32_to_cpu(lg->io_parameter[1]);
1644 1616
1645 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1617 ql_dbg(ql_dbg_mbx, vha, 0x1065,
1646 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1618 "Failed to complete IOCB -- completion status (%x) "
1647 vha->host_no, le16_to_cpu(lg->comp_status), iop[0], 1619 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1648 iop[1])); 1620 iop[0], iop[1]);
1649 1621
1650 switch (iop[0]) { 1622 switch (iop[0]) {
1651 case LSC_SCODE_PORTID_USED: 1623 case LSC_SCODE_PORTID_USED:
@@ -1673,7 +1645,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1673 break; 1645 break;
1674 } 1646 }
1675 } else { 1647 } else {
1676 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1648 ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
1677 1649
1678 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1650 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1679 1651
@@ -1728,7 +1700,7 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1728 mbx_cmd_t *mcp = &mc; 1700 mbx_cmd_t *mcp = &mc;
1729 struct qla_hw_data *ha = vha->hw; 1701 struct qla_hw_data *ha = vha->hw;
1730 1702
1731 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no)); 1703 ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
1732 1704
1733 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1705 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1734 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1706 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1771,13 +1743,12 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1771 rval = QLA_SUCCESS; 1743 rval = QLA_SUCCESS;
1772 1744
1773 /*EMPTY*/ 1745 /*EMPTY*/
1774 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x " 1746 ql_dbg(ql_dbg_mbx, vha, 0x1068,
1775 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval, 1747 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
1776 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 1748 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
1777 } else { 1749 } else {
1778 /*EMPTY*/ 1750 /*EMPTY*/
1779 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n", 1751 ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
1780 vha->host_no));
1781 } 1752 }
1782 1753
1783 return rval; 1754 return rval;
@@ -1808,13 +1779,13 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1808 mbx_cmd_t *mcp = &mc; 1779 mbx_cmd_t *mcp = &mc;
1809 struct qla_hw_data *ha = vha->hw; 1780 struct qla_hw_data *ha = vha->hw;
1810 1781
1782 ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
1783
1811 if (IS_FWI2_CAPABLE(ha)) 1784 if (IS_FWI2_CAPABLE(ha))
1812 return qla24xx_login_fabric(vha, fcport->loop_id, 1785 return qla24xx_login_fabric(vha, fcport->loop_id,
1813 fcport->d_id.b.domain, fcport->d_id.b.area, 1786 fcport->d_id.b.domain, fcport->d_id.b.area,
1814 fcport->d_id.b.al_pa, mb_ret, opt); 1787 fcport->d_id.b.al_pa, mb_ret, opt);
1815 1788
1816 DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1817
1818 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 1789 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1819 if (HAS_EXTENDED_IDS(ha)) 1790 if (HAS_EXTENDED_IDS(ha))
1820 mcp->mb[1] = fcport->loop_id; 1791 mcp->mb[1] = fcport->loop_id;
@@ -1845,15 +1816,12 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1845 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 1816 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
1846 rval = QLA_SUCCESS; 1817 rval = QLA_SUCCESS;
1847 1818
1848 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1819 ql_dbg(ql_dbg_mbx, vha, 0x106b,
1849 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval, 1820 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
1850 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); 1821 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
1851 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1852 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
1853 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1854 } else { 1822 } else {
1855 /*EMPTY*/ 1823 /*EMPTY*/
1856 DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1824 ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
1857 } 1825 }
1858 1826
1859 return (rval); 1827 return (rval);
@@ -1870,12 +1838,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1870 struct req_que *req; 1838 struct req_que *req;
1871 struct rsp_que *rsp; 1839 struct rsp_que *rsp;
1872 1840
1873 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1841 ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
1874 1842
1875 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1843 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1876 if (lg == NULL) { 1844 if (lg == NULL) {
1877 DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n", 1845 ql_log(ql_log_warn, vha, 0x106e,
1878 __func__, vha->host_no)); 1846 "Failed to allocate logout IOCB.\n");
1879 return QLA_MEMORY_ALLOC_FAILED; 1847 return QLA_MEMORY_ALLOC_FAILED;
1880 } 1848 }
1881 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1849 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1899,22 +1867,22 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1899 1867
1900 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); 1868 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1901 if (rval != QLA_SUCCESS) { 1869 if (rval != QLA_SUCCESS) {
1902 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1870 ql_dbg(ql_dbg_mbx, vha, 0x106f,
1903 "(%x).\n", __func__, vha->host_no, rval)); 1871 "Failed to issue logout IOCB (%x).\n", rval);
1904 } else if (lg->entry_status != 0) { 1872 } else if (lg->entry_status != 0) {
1905 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1873 ql_dbg(ql_dbg_mbx, vha, 0x1070,
1906 "-- error status (%x).\n", __func__, vha->host_no, 1874 "Failed to complete IOCB -- error status (%x).\n",
1907 lg->entry_status)); 1875 lg->entry_status);
1908 rval = QLA_FUNCTION_FAILED; 1876 rval = QLA_FUNCTION_FAILED;
1909 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1877 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1910 DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB " 1878 ql_dbg(ql_dbg_mbx, vha, 0x1071,
1911 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1879 "Failed to complete IOCB -- completion status (%x) "
1912 vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status), 1880 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1913 le32_to_cpu(lg->io_parameter[0]), 1881 le32_to_cpu(lg->io_parameter[0]),
1914 le32_to_cpu(lg->io_parameter[1]))); 1882 le32_to_cpu(lg->io_parameter[1]));
1915 } else { 1883 } else {
1916 /*EMPTY*/ 1884 /*EMPTY*/
1917 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 1885 ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
1918 } 1886 }
1919 1887
1920 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1888 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1946,8 +1914,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1946 mbx_cmd_t mc; 1914 mbx_cmd_t mc;
1947 mbx_cmd_t *mcp = &mc; 1915 mbx_cmd_t *mcp = &mc;
1948 1916
1949 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n", 1917 ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
1950 vha->host_no));
1951 1918
1952 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 1919 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1953 mcp->out_mb = MBX_1|MBX_0; 1920 mcp->out_mb = MBX_1|MBX_0;
@@ -1966,12 +1933,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1966 1933
1967 if (rval != QLA_SUCCESS) { 1934 if (rval != QLA_SUCCESS) {
1968 /*EMPTY*/ 1935 /*EMPTY*/
1969 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x " 1936 ql_dbg(ql_dbg_mbx, vha, 0x1074,
1970 "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1])); 1937 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
1971 } else { 1938 } else {
1972 /*EMPTY*/ 1939 /*EMPTY*/
1973 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n", 1940 ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
1974 vha->host_no));
1975 } 1941 }
1976 1942
1977 return rval; 1943 return rval;
@@ -1999,8 +1965,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
1999 mbx_cmd_t mc; 1965 mbx_cmd_t mc;
2000 mbx_cmd_t *mcp = &mc; 1966 mbx_cmd_t *mcp = &mc;
2001 1967
2002 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1968 ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
2003 vha->host_no));
2004 1969
2005 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1970 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2006 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; 1971 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2014,12 +1979,10 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
2014 1979
2015 if (rval != QLA_SUCCESS) { 1980 if (rval != QLA_SUCCESS) {
2016 /*EMPTY*/ 1981 /*EMPTY*/
2017 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n", 1982 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2018 vha->host_no, rval));
2019 } else { 1983 } else {
2020 /*EMPTY*/ 1984 /*EMPTY*/
2021 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n", 1985 ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
2022 vha->host_no));
2023 } 1986 }
2024 1987
2025 return rval; 1988 return rval;
@@ -2045,8 +2008,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2045 mbx_cmd_t mc; 2008 mbx_cmd_t mc;
2046 mbx_cmd_t *mcp = &mc; 2009 mbx_cmd_t *mcp = &mc;
2047 2010
2048 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n", 2011 ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
2049 vha->host_no));
2050 2012
2051 if (id_list == NULL) 2013 if (id_list == NULL)
2052 return QLA_FUNCTION_FAILED; 2014 return QLA_FUNCTION_FAILED;
@@ -2075,12 +2037,10 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2075 2037
2076 if (rval != QLA_SUCCESS) { 2038 if (rval != QLA_SUCCESS) {
2077 /*EMPTY*/ 2039 /*EMPTY*/
2078 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n", 2040 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2079 vha->host_no, rval));
2080 } else { 2041 } else {
2081 *entries = mcp->mb[1]; 2042 *entries = mcp->mb[1];
2082 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n", 2043 ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
2083 vha->host_no));
2084 } 2044 }
2085 2045
2086 return rval; 2046 return rval;
@@ -2108,7 +2068,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2108 mbx_cmd_t mc; 2068 mbx_cmd_t mc;
2109 mbx_cmd_t *mcp = &mc; 2069 mbx_cmd_t *mcp = &mc;
2110 2070
2111 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2071 ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
2112 2072
2113 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2073 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2114 mcp->out_mb = MBX_0; 2074 mcp->out_mb = MBX_0;
@@ -2121,14 +2081,14 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2121 2081
2122 if (rval != QLA_SUCCESS) { 2082 if (rval != QLA_SUCCESS) {
2123 /*EMPTY*/ 2083 /*EMPTY*/
2124 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__, 2084 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2125 vha->host_no, mcp->mb[0])); 2085 "Failed mb[0]=%x.\n", mcp->mb[0]);
2126 } else { 2086 } else {
2127 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " 2087 ql_dbg(ql_dbg_mbx, vha, 0x107e,
2128 "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__, 2088 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2129 vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3], 2089 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2130 mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11], 2090 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2131 mcp->mb[12])); 2091 mcp->mb[11], mcp->mb[12]);
2132 2092
2133 if (cur_xchg_cnt) 2093 if (cur_xchg_cnt)
2134 *cur_xchg_cnt = mcp->mb[3]; 2094 *cur_xchg_cnt = mcp->mb[3];
@@ -2147,7 +2107,6 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2147 return (rval); 2107 return (rval);
2148} 2108}
2149 2109
2150#if defined(QL_DEBUG_LEVEL_3)
2151/* 2110/*
2152 * qla2x00_get_fcal_position_map 2111 * qla2x00_get_fcal_position_map
2153 * Get FCAL (LILP) position map using mailbox command 2112 * Get FCAL (LILP) position map using mailbox command
@@ -2172,10 +2131,12 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2172 dma_addr_t pmap_dma; 2131 dma_addr_t pmap_dma;
2173 struct qla_hw_data *ha = vha->hw; 2132 struct qla_hw_data *ha = vha->hw;
2174 2133
2134 ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
2135
2175 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2136 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2176 if (pmap == NULL) { 2137 if (pmap == NULL) {
2177 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", 2138 ql_log(ql_log_warn, vha, 0x1080,
2178 __func__, vha->host_no)); 2139 "Memory alloc failed.\n");
2179 return QLA_MEMORY_ALLOC_FAILED; 2140 return QLA_MEMORY_ALLOC_FAILED;
2180 } 2141 }
2181 memset(pmap, 0, FCAL_MAP_SIZE); 2142 memset(pmap, 0, FCAL_MAP_SIZE);
@@ -2193,10 +2154,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2193 rval = qla2x00_mailbox_command(vha, mcp); 2154 rval = qla2x00_mailbox_command(vha, mcp);
2194 2155
2195 if (rval == QLA_SUCCESS) { 2156 if (rval == QLA_SUCCESS) {
2196 DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map " 2157 ql_dbg(ql_dbg_mbx, vha, 0x1081,
2197 "size (%x)\n", __func__, vha->host_no, mcp->mb[0], 2158 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2198 mcp->mb[1], (unsigned)pmap[0])); 2159 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2199 DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1)); 2160 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2161 pmap, pmap[0] + 1);
2200 2162
2201 if (pos_map) 2163 if (pos_map)
2202 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 2164 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
@@ -2204,15 +2166,13 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2204 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 2166 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2205 2167
2206 if (rval != QLA_SUCCESS) { 2168 if (rval != QLA_SUCCESS) {
2207 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2169 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2208 vha->host_no, rval));
2209 } else { 2170 } else {
2210 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2171 ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
2211 } 2172 }
2212 2173
2213 return rval; 2174 return rval;
2214} 2175}
2215#endif
2216 2176
2217/* 2177/*
2218 * qla2x00_get_link_status 2178 * qla2x00_get_link_status
@@ -2237,7 +2197,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2237 uint32_t *siter, *diter, dwords; 2197 uint32_t *siter, *diter, dwords;
2238 struct qla_hw_data *ha = vha->hw; 2198 struct qla_hw_data *ha = vha->hw;
2239 2199
2240 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2200 ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
2241 2201
2242 mcp->mb[0] = MBC_GET_LINK_STATUS; 2202 mcp->mb[0] = MBC_GET_LINK_STATUS;
2243 mcp->mb[2] = MSW(stats_dma); 2203 mcp->mb[2] = MSW(stats_dma);
@@ -2266,11 +2226,12 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2266 2226
2267 if (rval == QLA_SUCCESS) { 2227 if (rval == QLA_SUCCESS) {
2268 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2228 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2269 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2229 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2270 __func__, vha->host_no, mcp->mb[0])); 2230 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2271 rval = QLA_FUNCTION_FAILED; 2231 rval = QLA_FUNCTION_FAILED;
2272 } else { 2232 } else {
2273 /* Copy over data -- firmware data is LE. */ 2233 /* Copy over data -- firmware data is LE. */
2234 ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
2274 dwords = offsetof(struct link_statistics, unused1) / 4; 2235 dwords = offsetof(struct link_statistics, unused1) / 4;
2275 siter = diter = &stats->link_fail_cnt; 2236 siter = diter = &stats->link_fail_cnt;
2276 while (dwords--) 2237 while (dwords--)
@@ -2278,8 +2239,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2278 } 2239 }
2279 } else { 2240 } else {
2280 /* Failed. */ 2241 /* Failed. */
2281 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2242 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2282 vha->host_no, rval));
2283 } 2243 }
2284 2244
2285 return rval; 2245 return rval;
@@ -2294,7 +2254,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2294 mbx_cmd_t *mcp = &mc; 2254 mbx_cmd_t *mcp = &mc;
2295 uint32_t *siter, *diter, dwords; 2255 uint32_t *siter, *diter, dwords;
2296 2256
2297 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2257 ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
2298 2258
2299 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2259 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2300 mcp->mb[2] = MSW(stats_dma); 2260 mcp->mb[2] = MSW(stats_dma);
@@ -2312,10 +2272,11 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2312 2272
2313 if (rval == QLA_SUCCESS) { 2273 if (rval == QLA_SUCCESS) {
2314 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2274 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2315 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2275 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2316 __func__, vha->host_no, mcp->mb[0])); 2276 "Failed mb[0]=%x.\n", mcp->mb[0]);
2317 rval = QLA_FUNCTION_FAILED; 2277 rval = QLA_FUNCTION_FAILED;
2318 } else { 2278 } else {
2279 ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
2319 /* Copy over data -- firmware data is LE. */ 2280 /* Copy over data -- firmware data is LE. */
2320 dwords = sizeof(struct link_statistics) / 4; 2281 dwords = sizeof(struct link_statistics) / 4;
2321 siter = diter = &stats->link_fail_cnt; 2282 siter = diter = &stats->link_fail_cnt;
@@ -2324,8 +2285,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2324 } 2285 }
2325 } else { 2286 } else {
2326 /* Failed. */ 2287 /* Failed. */
2327 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2288 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2328 vha->host_no, rval));
2329 } 2289 }
2330 2290
2331 return rval; 2291 return rval;
@@ -2345,7 +2305,7 @@ qla24xx_abort_command(srb_t *sp)
2345 struct qla_hw_data *ha = vha->hw; 2305 struct qla_hw_data *ha = vha->hw;
2346 struct req_que *req = vha->req; 2306 struct req_que *req = vha->req;
2347 2307
2348 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2308 ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
2349 2309
2350 spin_lock_irqsave(&ha->hardware_lock, flags); 2310 spin_lock_irqsave(&ha->hardware_lock, flags);
2351 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2311 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2360,8 +2320,8 @@ qla24xx_abort_command(srb_t *sp)
2360 2320
2361 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 2321 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2362 if (abt == NULL) { 2322 if (abt == NULL) {
2363 DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n", 2323 ql_log(ql_log_warn, vha, 0x108d,
2364 __func__, vha->host_no)); 2324 "Failed to allocate abort IOCB.\n");
2365 return QLA_MEMORY_ALLOC_FAILED; 2325 return QLA_MEMORY_ALLOC_FAILED;
2366 } 2326 }
2367 memset(abt, 0, sizeof(struct abort_entry_24xx)); 2327 memset(abt, 0, sizeof(struct abort_entry_24xx));
@@ -2380,20 +2340,20 @@ qla24xx_abort_command(srb_t *sp)
2380 2340
2381 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 2341 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2382 if (rval != QLA_SUCCESS) { 2342 if (rval != QLA_SUCCESS) {
2383 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", 2343 ql_dbg(ql_dbg_mbx, vha, 0x108e,
2384 __func__, vha->host_no, rval)); 2344 "Failed to issue IOCB (%x).\n", rval);
2385 } else if (abt->entry_status != 0) { 2345 } else if (abt->entry_status != 0) {
2386 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2346 ql_dbg(ql_dbg_mbx, vha, 0x108f,
2387 "-- error status (%x).\n", __func__, vha->host_no, 2347 "Failed to complete IOCB -- error status (%x).\n",
2388 abt->entry_status)); 2348 abt->entry_status);
2389 rval = QLA_FUNCTION_FAILED; 2349 rval = QLA_FUNCTION_FAILED;
2390 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { 2350 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2391 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2351 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2392 "-- completion status (%x).\n", __func__, vha->host_no, 2352 "Failed to complete IOCB -- completion status (%x).\n",
2393 le16_to_cpu(abt->nport_handle))); 2353 le16_to_cpu(abt->nport_handle));
2394 rval = QLA_FUNCTION_FAILED; 2354 rval = QLA_FUNCTION_FAILED;
2395 } else { 2355 } else {
2396 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2356 ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
2397 } 2357 }
2398 2358
2399 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2359 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2421,19 +2381,20 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2421 struct req_que *req; 2381 struct req_que *req;
2422 struct rsp_que *rsp; 2382 struct rsp_que *rsp;
2423 2383
2424 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
2425
2426 vha = fcport->vha; 2384 vha = fcport->vha;
2427 ha = vha->hw; 2385 ha = vha->hw;
2428 req = vha->req; 2386 req = vha->req;
2387
2388 ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
2389
2429 if (ha->flags.cpu_affinity_enabled) 2390 if (ha->flags.cpu_affinity_enabled)
2430 rsp = ha->rsp_q_map[tag + 1]; 2391 rsp = ha->rsp_q_map[tag + 1];
2431 else 2392 else
2432 rsp = req->rsp; 2393 rsp = req->rsp;
2433 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2394 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2434 if (tsk == NULL) { 2395 if (tsk == NULL) {
2435 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2396 ql_log(ql_log_warn, vha, 0x1093,
2436 "IOCB.\n", __func__, vha->host_no)); 2397 "Failed to allocate task management IOCB.\n");
2437 return QLA_MEMORY_ALLOC_FAILED; 2398 return QLA_MEMORY_ALLOC_FAILED;
2438 } 2399 }
2439 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); 2400 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
@@ -2457,30 +2418,30 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2457 sts = &tsk->p.sts; 2418 sts = &tsk->p.sts;
2458 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 2419 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2459 if (rval != QLA_SUCCESS) { 2420 if (rval != QLA_SUCCESS) {
2460 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " 2421 ql_dbg(ql_dbg_mbx, vha, 0x1094,
2461 "(%x).\n", __func__, vha->host_no, name, rval)); 2422 "Failed to issue %s reset IOCB (%x).\n", name, rval);
2462 } else if (sts->entry_status != 0) { 2423 } else if (sts->entry_status != 0) {
2463 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2424 ql_dbg(ql_dbg_mbx, vha, 0x1095,
2464 "-- error status (%x).\n", __func__, vha->host_no, 2425 "Failed to complete IOCB -- error status (%x).\n",
2465 sts->entry_status)); 2426 sts->entry_status);
2466 rval = QLA_FUNCTION_FAILED; 2427 rval = QLA_FUNCTION_FAILED;
2467 } else if (sts->comp_status != 2428 } else if (sts->comp_status !=
2468 __constant_cpu_to_le16(CS_COMPLETE)) { 2429 __constant_cpu_to_le16(CS_COMPLETE)) {
2469 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2430 ql_dbg(ql_dbg_mbx, vha, 0x1096,
2470 "-- completion status (%x).\n", __func__, 2431 "Failed to complete IOCB -- completion status (%x).\n",
2471 vha->host_no, le16_to_cpu(sts->comp_status))); 2432 le16_to_cpu(sts->comp_status));
2472 rval = QLA_FUNCTION_FAILED; 2433 rval = QLA_FUNCTION_FAILED;
2473 } else if (le16_to_cpu(sts->scsi_status) & 2434 } else if (le16_to_cpu(sts->scsi_status) &
2474 SS_RESPONSE_INFO_LEN_VALID) { 2435 SS_RESPONSE_INFO_LEN_VALID) {
2475 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2436 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2476 DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent " 2437 ql_dbg(ql_dbg_mbx, vha, 0x1097,
2477 "data length -- not enough response info (%d).\n", 2438 "Ignoring inconsistent data length -- not enough "
2478 __func__, vha->host_no, 2439 "response info (%d).\n",
2479 le32_to_cpu(sts->rsp_data_len))); 2440 le32_to_cpu(sts->rsp_data_len));
2480 } else if (sts->data[3]) { 2441 } else if (sts->data[3]) {
2481 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2442 ql_dbg(ql_dbg_mbx, vha, 0x1098,
2482 "-- response (%x).\n", __func__, 2443 "Failed to complete IOCB -- response (%x).\n",
2483 vha->host_no, sts->data[3])); 2444 sts->data[3]);
2484 rval = QLA_FUNCTION_FAILED; 2445 rval = QLA_FUNCTION_FAILED;
2485 } 2446 }
2486 } 2447 }
@@ -2489,10 +2450,10 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2489 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, 2450 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2490 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); 2451 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2491 if (rval2 != QLA_SUCCESS) { 2452 if (rval2 != QLA_SUCCESS) {
2492 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 2453 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2493 "(%x).\n", __func__, vha->host_no, rval2)); 2454 "Failed to issue marker IOCB (%x).\n", rval2);
2494 } else { 2455 } else {
2495 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2456 ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
2496 } 2457 }
2497 2458
2498 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 2459 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2533,7 +2494,7 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2533 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 2494 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2534 return QLA_FUNCTION_FAILED; 2495 return QLA_FUNCTION_FAILED;
2535 2496
2536 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2497 ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
2537 2498
2538 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 2499 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2539 mcp->out_mb = MBX_0; 2500 mcp->out_mb = MBX_0;
@@ -2543,10 +2504,9 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2543 rval = qla2x00_mailbox_command(vha, mcp); 2504 rval = qla2x00_mailbox_command(vha, mcp);
2544 2505
2545 if (rval != QLA_SUCCESS) { 2506 if (rval != QLA_SUCCESS) {
2546 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2507 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2547 vha->host_no, rval));
2548 } else { 2508 } else {
2549 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2509 ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
2550 } 2510 }
2551 2511
2552 return rval; 2512 return rval;
@@ -2566,7 +2526,7 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2566 mbx_cmd_t mc; 2526 mbx_cmd_t mc;
2567 mbx_cmd_t *mcp = &mc; 2527 mbx_cmd_t *mcp = &mc;
2568 2528
2569 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2529 ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
2570 2530
2571 mcp->mb[0] = MBC_SERDES_PARAMS; 2531 mcp->mb[0] = MBC_SERDES_PARAMS;
2572 mcp->mb[1] = BIT_0; 2532 mcp->mb[1] = BIT_0;
@@ -2581,11 +2541,11 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2581 2541
2582 if (rval != QLA_SUCCESS) { 2542 if (rval != QLA_SUCCESS) {
2583 /*EMPTY*/ 2543 /*EMPTY*/
2584 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 2544 ql_dbg(ql_dbg_mbx, vha, 0x109f,
2585 vha->host_no, rval, mcp->mb[0])); 2545 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2586 } else { 2546 } else {
2587 /*EMPTY*/ 2547 /*EMPTY*/
2588 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2548 ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
2589 } 2549 }
2590 2550
2591 return rval; 2551 return rval;
@@ -2601,7 +2561,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2601 if (!IS_FWI2_CAPABLE(vha->hw)) 2561 if (!IS_FWI2_CAPABLE(vha->hw))
2602 return QLA_FUNCTION_FAILED; 2562 return QLA_FUNCTION_FAILED;
2603 2563
2604 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2564 ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
2605 2565
2606 mcp->mb[0] = MBC_STOP_FIRMWARE; 2566 mcp->mb[0] = MBC_STOP_FIRMWARE;
2607 mcp->out_mb = MBX_0; 2567 mcp->out_mb = MBX_0;
@@ -2611,12 +2571,11 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2611 rval = qla2x00_mailbox_command(vha, mcp); 2571 rval = qla2x00_mailbox_command(vha, mcp);
2612 2572
2613 if (rval != QLA_SUCCESS) { 2573 if (rval != QLA_SUCCESS) {
2614 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2574 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
2615 vha->host_no, rval));
2616 if (mcp->mb[0] == MBS_INVALID_COMMAND) 2575 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2617 rval = QLA_INVALID_COMMAND; 2576 rval = QLA_INVALID_COMMAND;
2618 } else { 2577 } else {
2619 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2578 ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
2620 } 2579 }
2621 2580
2622 return rval; 2581 return rval;
@@ -2630,14 +2589,14 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2630 mbx_cmd_t mc; 2589 mbx_cmd_t mc;
2631 mbx_cmd_t *mcp = &mc; 2590 mbx_cmd_t *mcp = &mc;
2632 2591
2592 ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
2593
2633 if (!IS_FWI2_CAPABLE(vha->hw)) 2594 if (!IS_FWI2_CAPABLE(vha->hw))
2634 return QLA_FUNCTION_FAILED; 2595 return QLA_FUNCTION_FAILED;
2635 2596
2636 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2597 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2637 return QLA_FUNCTION_FAILED; 2598 return QLA_FUNCTION_FAILED;
2638 2599
2639 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2640
2641 mcp->mb[0] = MBC_TRACE_CONTROL; 2600 mcp->mb[0] = MBC_TRACE_CONTROL;
2642 mcp->mb[1] = TC_EFT_ENABLE; 2601 mcp->mb[1] = TC_EFT_ENABLE;
2643 mcp->mb[2] = LSW(eft_dma); 2602 mcp->mb[2] = LSW(eft_dma);
@@ -2652,10 +2611,11 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2652 mcp->flags = 0; 2611 mcp->flags = 0;
2653 rval = qla2x00_mailbox_command(vha, mcp); 2612 rval = qla2x00_mailbox_command(vha, mcp);
2654 if (rval != QLA_SUCCESS) { 2613 if (rval != QLA_SUCCESS) {
2655 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2614 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
2656 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2615 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2616 rval, mcp->mb[0], mcp->mb[1]);
2657 } else { 2617 } else {
2658 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2618 ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
2659 } 2619 }
2660 2620
2661 return rval; 2621 return rval;
@@ -2668,14 +2628,14 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2668 mbx_cmd_t mc; 2628 mbx_cmd_t mc;
2669 mbx_cmd_t *mcp = &mc; 2629 mbx_cmd_t *mcp = &mc;
2670 2630
2631 ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
2632
2671 if (!IS_FWI2_CAPABLE(vha->hw)) 2633 if (!IS_FWI2_CAPABLE(vha->hw))
2672 return QLA_FUNCTION_FAILED; 2634 return QLA_FUNCTION_FAILED;
2673 2635
2674 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2636 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2675 return QLA_FUNCTION_FAILED; 2637 return QLA_FUNCTION_FAILED;
2676 2638
2677 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2678
2679 mcp->mb[0] = MBC_TRACE_CONTROL; 2639 mcp->mb[0] = MBC_TRACE_CONTROL;
2680 mcp->mb[1] = TC_EFT_DISABLE; 2640 mcp->mb[1] = TC_EFT_DISABLE;
2681 mcp->out_mb = MBX_1|MBX_0; 2641 mcp->out_mb = MBX_1|MBX_0;
@@ -2684,10 +2644,11 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2684 mcp->flags = 0; 2644 mcp->flags = 0;
2685 rval = qla2x00_mailbox_command(vha, mcp); 2645 rval = qla2x00_mailbox_command(vha, mcp);
2686 if (rval != QLA_SUCCESS) { 2646 if (rval != QLA_SUCCESS) {
2687 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2647 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
2688 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2648 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2649 rval, mcp->mb[0], mcp->mb[1]);
2689 } else { 2650 } else {
2690 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2651 ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
2691 } 2652 }
2692 2653
2693 return rval; 2654 return rval;
@@ -2701,14 +2662,14 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2701 mbx_cmd_t mc; 2662 mbx_cmd_t mc;
2702 mbx_cmd_t *mcp = &mc; 2663 mbx_cmd_t *mcp = &mc;
2703 2664
2665 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
2666
2704 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) 2667 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2705 return QLA_FUNCTION_FAILED; 2668 return QLA_FUNCTION_FAILED;
2706 2669
2707 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2670 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2708 return QLA_FUNCTION_FAILED; 2671 return QLA_FUNCTION_FAILED;
2709 2672
2710 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2711
2712 mcp->mb[0] = MBC_TRACE_CONTROL; 2673 mcp->mb[0] = MBC_TRACE_CONTROL;
2713 mcp->mb[1] = TC_FCE_ENABLE; 2674 mcp->mb[1] = TC_FCE_ENABLE;
2714 mcp->mb[2] = LSW(fce_dma); 2675 mcp->mb[2] = LSW(fce_dma);
@@ -2727,10 +2688,11 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2727 mcp->flags = 0; 2688 mcp->flags = 0;
2728 rval = qla2x00_mailbox_command(vha, mcp); 2689 rval = qla2x00_mailbox_command(vha, mcp);
2729 if (rval != QLA_SUCCESS) { 2690 if (rval != QLA_SUCCESS) {
2730 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2691 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
2731 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2692 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2693 rval, mcp->mb[0], mcp->mb[1]);
2732 } else { 2694 } else {
2733 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2695 ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
2734 2696
2735 if (mb) 2697 if (mb)
2736 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 2698 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2748,14 +2710,14 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2748 mbx_cmd_t mc; 2710 mbx_cmd_t mc;
2749 mbx_cmd_t *mcp = &mc; 2711 mbx_cmd_t *mcp = &mc;
2750 2712
2713 ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
2714
2751 if (!IS_FWI2_CAPABLE(vha->hw)) 2715 if (!IS_FWI2_CAPABLE(vha->hw))
2752 return QLA_FUNCTION_FAILED; 2716 return QLA_FUNCTION_FAILED;
2753 2717
2754 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2718 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2755 return QLA_FUNCTION_FAILED; 2719 return QLA_FUNCTION_FAILED;
2756 2720
2757 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2758
2759 mcp->mb[0] = MBC_TRACE_CONTROL; 2721 mcp->mb[0] = MBC_TRACE_CONTROL;
2760 mcp->mb[1] = TC_FCE_DISABLE; 2722 mcp->mb[1] = TC_FCE_DISABLE;
2761 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 2723 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
@@ -2766,10 +2728,11 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2766 mcp->flags = 0; 2728 mcp->flags = 0;
2767 rval = qla2x00_mailbox_command(vha, mcp); 2729 rval = qla2x00_mailbox_command(vha, mcp);
2768 if (rval != QLA_SUCCESS) { 2730 if (rval != QLA_SUCCESS) {
2769 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2731 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
2770 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2732 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2733 rval, mcp->mb[0], mcp->mb[1]);
2771 } else { 2734 } else {
2772 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2735 ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
2773 2736
2774 if (wr) 2737 if (wr)
2775 *wr = (uint64_t) mcp->mb[5] << 48 | 2738 *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2794,11 +2757,11 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2794 mbx_cmd_t mc; 2757 mbx_cmd_t mc;
2795 mbx_cmd_t *mcp = &mc; 2758 mbx_cmd_t *mcp = &mc;
2796 2759
2760 ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
2761
2797 if (!IS_IIDMA_CAPABLE(vha->hw)) 2762 if (!IS_IIDMA_CAPABLE(vha->hw))
2798 return QLA_FUNCTION_FAILED; 2763 return QLA_FUNCTION_FAILED;
2799 2764
2800 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2801
2802 mcp->mb[0] = MBC_PORT_PARAMS; 2765 mcp->mb[0] = MBC_PORT_PARAMS;
2803 mcp->mb[1] = loop_id; 2766 mcp->mb[1] = loop_id;
2804 mcp->mb[2] = mcp->mb[3] = 0; 2767 mcp->mb[2] = mcp->mb[3] = 0;
@@ -2817,10 +2780,9 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2817 } 2780 }
2818 2781
2819 if (rval != QLA_SUCCESS) { 2782 if (rval != QLA_SUCCESS) {
2820 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2783 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
2821 vha->host_no, rval));
2822 } else { 2784 } else {
2823 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2785 ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
2824 if (port_speed) 2786 if (port_speed)
2825 *port_speed = mcp->mb[3]; 2787 *port_speed = mcp->mb[3];
2826 } 2788 }
@@ -2836,11 +2798,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2836 mbx_cmd_t mc; 2798 mbx_cmd_t mc;
2837 mbx_cmd_t *mcp = &mc; 2799 mbx_cmd_t *mcp = &mc;
2838 2800
2801 ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
2802
2839 if (!IS_IIDMA_CAPABLE(vha->hw)) 2803 if (!IS_IIDMA_CAPABLE(vha->hw))
2840 return QLA_FUNCTION_FAILED; 2804 return QLA_FUNCTION_FAILED;
2841 2805
2842 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2843
2844 mcp->mb[0] = MBC_PORT_PARAMS; 2806 mcp->mb[0] = MBC_PORT_PARAMS;
2845 mcp->mb[1] = loop_id; 2807 mcp->mb[1] = loop_id;
2846 mcp->mb[2] = BIT_0; 2808 mcp->mb[2] = BIT_0;
@@ -2863,10 +2825,9 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2863 } 2825 }
2864 2826
2865 if (rval != QLA_SUCCESS) { 2827 if (rval != QLA_SUCCESS) {
2866 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2828 ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
2867 vha->host_no, rval));
2868 } else { 2829 } else {
2869 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 2830 ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
2870 } 2831 }
2871 2832
2872 return rval; 2833 return rval;
@@ -2882,33 +2843,36 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2882 scsi_qla_host_t *vp; 2843 scsi_qla_host_t *vp;
2883 unsigned long flags; 2844 unsigned long flags;
2884 2845
2846 ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
2847
2885 if (rptid_entry->entry_status != 0) 2848 if (rptid_entry->entry_status != 0)
2886 return; 2849 return;
2887 2850
2888 if (rptid_entry->format == 0) { 2851 if (rptid_entry->format == 0) {
2889 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," 2852 ql_dbg(ql_dbg_mbx, vha, 0x10b7,
2890 " number of VPs acquired %d\n", __func__, vha->host_no, 2853 "Format 0 : Number of VPs setup %d, number of "
2891 MSB(le16_to_cpu(rptid_entry->vp_count)), 2854 "VPs acquired %d.\n",
2892 LSB(le16_to_cpu(rptid_entry->vp_count)))); 2855 MSB(le16_to_cpu(rptid_entry->vp_count)),
2893 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__, 2856 LSB(le16_to_cpu(rptid_entry->vp_count)));
2894 rptid_entry->port_id[2], rptid_entry->port_id[1], 2857 ql_dbg(ql_dbg_mbx, vha, 0x10b8,
2895 rptid_entry->port_id[0])); 2858 "Primary port id %02x%02x%02x.\n",
2859 rptid_entry->port_id[2], rptid_entry->port_id[1],
2860 rptid_entry->port_id[0]);
2896 } else if (rptid_entry->format == 1) { 2861 } else if (rptid_entry->format == 1) {
2897 vp_idx = LSB(stat); 2862 vp_idx = LSB(stat);
2898 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled " 2863 ql_dbg(ql_dbg_mbx, vha, 0x10b9,
2899 "- status %d - " 2864 "Format 1: VP[%d] enabled - status %d - with "
2900 "with port id %02x%02x%02x\n", __func__, vha->host_no, 2865 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
2901 vp_idx, MSB(stat),
2902 rptid_entry->port_id[2], rptid_entry->port_id[1], 2866 rptid_entry->port_id[2], rptid_entry->port_id[1],
2903 rptid_entry->port_id[0])); 2867 rptid_entry->port_id[0]);
2904 2868
2905 vp = vha; 2869 vp = vha;
2906 if (vp_idx == 0 && (MSB(stat) != 1)) 2870 if (vp_idx == 0 && (MSB(stat) != 1))
2907 goto reg_needed; 2871 goto reg_needed;
2908 2872
2909 if (MSB(stat) == 1) { 2873 if (MSB(stat) == 1) {
2910 DEBUG2(printk("scsi(%ld): Could not acquire ID for " 2874 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
2911 "VP[%d].\n", vha->host_no, vp_idx)); 2875 "Could not acquire ID for VP[%d].\n", vp_idx);
2912 return; 2876 return;
2913 } 2877 }
2914 2878
@@ -2963,10 +2927,12 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2963 2927
2964 /* This can be called by the parent */ 2928 /* This can be called by the parent */
2965 2929
2930 ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
2931
2966 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 2932 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
2967 if (!vpmod) { 2933 if (!vpmod) {
2968 DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP " 2934 ql_log(ql_log_warn, vha, 0x10bc,
2969 "IOCB.\n", __func__, vha->host_no)); 2935 "Failed to allocate modify VP IOCB.\n");
2970 return QLA_MEMORY_ALLOC_FAILED; 2936 return QLA_MEMORY_ALLOC_FAILED;
2971 } 2937 }
2972 2938
@@ -2983,22 +2949,21 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2983 2949
2984 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 2950 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
2985 if (rval != QLA_SUCCESS) { 2951 if (rval != QLA_SUCCESS) {
2986 DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB" 2952 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
2987 "(%x).\n", __func__, base_vha->host_no, rval)); 2953 "Failed to issue VP config IOCB (%x).\n", rval);
2988 } else if (vpmod->comp_status != 0) { 2954 } else if (vpmod->comp_status != 0) {
2989 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2955 ql_dbg(ql_dbg_mbx, vha, 0x10be,
2990 "-- error status (%x).\n", __func__, base_vha->host_no, 2956 "Failed to complete IOCB -- error status (%x).\n",
2991 vpmod->comp_status)); 2957 vpmod->comp_status);
2992 rval = QLA_FUNCTION_FAILED; 2958 rval = QLA_FUNCTION_FAILED;
2993 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 2959 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2994 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2960 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
2995 "-- completion status (%x).\n", __func__, base_vha->host_no, 2961 "Failed to complete IOCB -- completion status (%x).\n",
2996 le16_to_cpu(vpmod->comp_status))); 2962 le16_to_cpu(vpmod->comp_status));
2997 rval = QLA_FUNCTION_FAILED; 2963 rval = QLA_FUNCTION_FAILED;
2998 } else { 2964 } else {
2999 /* EMPTY */ 2965 /* EMPTY */
3000 DEBUG11(printk("%s(%ld): done.\n", __func__, 2966 ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
3001 base_vha->host_no));
3002 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 2967 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3003 } 2968 }
3004 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 2969 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3032,17 +2997,16 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3032 int vp_index = vha->vp_idx; 2997 int vp_index = vha->vp_idx;
3033 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2998 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3034 2999
3035 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__, 3000 ql_dbg(ql_dbg_mbx, vha, 0x10c1,
3036 vha->host_no, vp_index)); 3001 "Entered %s enabling index %d.\n", __func__, vp_index);
3037 3002
3038 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 3003 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3039 return QLA_PARAMETER_ERROR; 3004 return QLA_PARAMETER_ERROR;
3040 3005
3041 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma); 3006 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3042 if (!vce) { 3007 if (!vce) {
3043 DEBUG2_3(printk("%s(%ld): " 3008 ql_log(ql_log_warn, vha, 0x10c2,
3044 "failed to allocate VP Control IOCB.\n", __func__, 3009 "Failed to allocate VP control IOCB.\n");
3045 base_vha->host_no));
3046 return QLA_MEMORY_ALLOC_FAILED; 3010 return QLA_MEMORY_ALLOC_FAILED;
3047 } 3011 }
3048 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); 3012 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
@@ -3063,28 +3027,20 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3063 3027
3064 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0); 3028 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3065 if (rval != QLA_SUCCESS) { 3029 if (rval != QLA_SUCCESS) {
3066 DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB" 3030 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3067 "(%x).\n", __func__, base_vha->host_no, rval)); 3031 "Failed to issue VP control IOCB (%x).\n", rval);
3068 printk("%s(%ld): failed to issue VP control IOCB"
3069 "(%x).\n", __func__, base_vha->host_no, rval);
3070 } else if (vce->entry_status != 0) { 3032 } else if (vce->entry_status != 0) {
3071 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 3033 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3072 "-- error status (%x).\n", __func__, base_vha->host_no, 3034 "Failed to complete IOCB -- error status (%x).\n",
3073 vce->entry_status));
3074 printk("%s(%ld): failed to complete IOCB "
3075 "-- error status (%x).\n", __func__, base_vha->host_no,
3076 vce->entry_status); 3035 vce->entry_status);
3077 rval = QLA_FUNCTION_FAILED; 3036 rval = QLA_FUNCTION_FAILED;
3078 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 3037 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
3079 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 3038 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3080 "-- completion status (%x).\n", __func__, base_vha->host_no, 3039 "Failed to complet IOCB -- completion status (%x).\n",
3081 le16_to_cpu(vce->comp_status)));
3082 printk("%s(%ld): failed to complete IOCB "
3083 "-- completion status (%x).\n", __func__, base_vha->host_no,
3084 le16_to_cpu(vce->comp_status)); 3040 le16_to_cpu(vce->comp_status));
3085 rval = QLA_FUNCTION_FAILED; 3041 rval = QLA_FUNCTION_FAILED;
3086 } else { 3042 } else {
3087 DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no)); 3043 ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
3088 } 3044 }
3089 3045
3090 dma_pool_free(ha->s_dma_pool, vce, vce_dma); 3046 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3121,6 +3077,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3121 mbx_cmd_t mc; 3077 mbx_cmd_t mc;
3122 mbx_cmd_t *mcp = &mc; 3078 mbx_cmd_t *mcp = &mc;
3123 3079
3080 ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
3081
3124 /* 3082 /*
3125 * This command is implicitly executed by firmware during login for the 3083 * This command is implicitly executed by firmware during login for the
3126 * physical hosts 3084 * physical hosts
@@ -3155,7 +3113,7 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3155 mbx_cmd_t mc; 3113 mbx_cmd_t mc;
3156 mbx_cmd_t *mcp = &mc; 3114 mbx_cmd_t *mcp = &mc;
3157 3115
3158 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3116 ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
3159 3117
3160 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 3118 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3161 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 3119 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3186,10 +3144,10 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3186 rval = qla2x00_mailbox_command(vha, mcp); 3144 rval = qla2x00_mailbox_command(vha, mcp);
3187 3145
3188 if (rval != QLA_SUCCESS) { 3146 if (rval != QLA_SUCCESS) {
3189 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3147 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3190 vha->host_no, rval, mcp->mb[0])); 3148 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3191 } else { 3149 } else {
3192 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3150 ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
3193 } 3151 }
3194 3152
3195 return rval; 3153 return rval;
@@ -3214,12 +3172,10 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3214 unsigned long flags; 3172 unsigned long flags;
3215 struct qla_hw_data *ha = vha->hw; 3173 struct qla_hw_data *ha = vha->hw;
3216 3174
3217 DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3175 ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
3218 3176
3219 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 3177 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3220 if (mn == NULL) { 3178 if (mn == NULL) {
3221 DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
3222 "IOCB.\n", __func__, vha->host_no));
3223 return QLA_MEMORY_ALLOC_FAILED; 3179 return QLA_MEMORY_ALLOC_FAILED;
3224 } 3180 }
3225 3181
@@ -3237,43 +3193,43 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3237 mn->p.req.entry_count = 1; 3193 mn->p.req.entry_count = 1;
3238 mn->p.req.options = cpu_to_le16(options); 3194 mn->p.req.options = cpu_to_le16(options);
3239 3195
3240 DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__, 3196 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
3241 vha->host_no)); 3197 "Dump of Verify Request.\n");
3242 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3198 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
3243 sizeof(*mn))); 3199 (uint8_t *)mn, sizeof(*mn));
3244 3200
3245 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 3201 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
3246 if (rval != QLA_SUCCESS) { 3202 if (rval != QLA_SUCCESS) {
3247 DEBUG2_16(printk("%s(%ld): failed to issue Verify " 3203 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
3248 "IOCB (%x).\n", __func__, vha->host_no, rval)); 3204 "Failed to issue verify IOCB (%x).\n", rval);
3249 goto verify_done; 3205 goto verify_done;
3250 } 3206 }
3251 3207
3252 DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__, 3208 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
3253 vha->host_no)); 3209 "Dump of Verify Response.\n");
3254 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3210 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
3255 sizeof(*mn))); 3211 (uint8_t *)mn, sizeof(*mn));
3256 3212
3257 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 3213 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3258 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 3214 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3259 le16_to_cpu(mn->p.rsp.failure_code) : 0; 3215 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3260 DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__, 3216 ql_dbg(ql_dbg_mbx, vha, 0x10ce,
3261 vha->host_no, status[0], status[1])); 3217 "cs=%x fc=%x.\n", status[0], status[1]);
3262 3218
3263 if (status[0] != CS_COMPLETE) { 3219 if (status[0] != CS_COMPLETE) {
3264 rval = QLA_FUNCTION_FAILED; 3220 rval = QLA_FUNCTION_FAILED;
3265 if (!(options & VCO_DONT_UPDATE_FW)) { 3221 if (!(options & VCO_DONT_UPDATE_FW)) {
3266 DEBUG2_16(printk("%s(%ld): Firmware update " 3222 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
3267 "failed. Retrying without update " 3223 "Firmware update failed. Retrying "
3268 "firmware.\n", __func__, vha->host_no)); 3224 "without update firmware.\n");
3269 options |= VCO_DONT_UPDATE_FW; 3225 options |= VCO_DONT_UPDATE_FW;
3270 options &= ~VCO_FORCE_UPDATE; 3226 options &= ~VCO_FORCE_UPDATE;
3271 retry = 1; 3227 retry = 1;
3272 } 3228 }
3273 } else { 3229 } else {
3274 DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n", 3230 ql_dbg(ql_dbg_mbx, vha, 0x10d0,
3275 __func__, vha->host_no, 3231 "Firmware updated to %x.\n",
3276 le32_to_cpu(mn->p.rsp.fw_ver))); 3232 le32_to_cpu(mn->p.rsp.fw_ver));
3277 3233
3278 /* NOTE: we only update OP firmware. */ 3234 /* NOTE: we only update OP firmware. */
3279 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 3235 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
@@ -3288,10 +3244,9 @@ verify_done:
3288 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 3244 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3289 3245
3290 if (rval != QLA_SUCCESS) { 3246 if (rval != QLA_SUCCESS) {
3291 DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__, 3247 ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
3292 vha->host_no, rval));
3293 } else { 3248 } else {
3294 DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3249 ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
3295 } 3250 }
3296 3251
3297 return rval; 3252 return rval;
@@ -3307,6 +3262,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3307 struct device_reg_25xxmq __iomem *reg; 3262 struct device_reg_25xxmq __iomem *reg;
3308 struct qla_hw_data *ha = vha->hw; 3263 struct qla_hw_data *ha = vha->hw;
3309 3264
3265 ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
3266
3310 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3267 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3311 mcp->mb[1] = req->options; 3268 mcp->mb[1] = req->options;
3312 mcp->mb[2] = MSW(LSD(req->dma)); 3269 mcp->mb[2] = MSW(LSD(req->dma));
@@ -3344,9 +3301,13 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3344 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3345 3302
3346 rval = qla2x00_mailbox_command(vha, mcp); 3303 rval = qla2x00_mailbox_command(vha, mcp);
3347 if (rval != QLA_SUCCESS) 3304 if (rval != QLA_SUCCESS) {
3348 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n", 3305 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3349 __func__, vha->host_no, rval, mcp->mb[0])); 3306 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3307 } else {
3308 ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
3309 }
3310
3350 return rval; 3311 return rval;
3351} 3312}
3352 3313
@@ -3360,6 +3321,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3360 struct device_reg_25xxmq __iomem *reg; 3321 struct device_reg_25xxmq __iomem *reg;
3361 struct qla_hw_data *ha = vha->hw; 3322 struct qla_hw_data *ha = vha->hw;
3362 3323
3324 ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
3325
3363 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3326 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3364 mcp->mb[1] = rsp->options; 3327 mcp->mb[1] = rsp->options;
3365 mcp->mb[2] = MSW(LSD(rsp->dma)); 3328 mcp->mb[2] = MSW(LSD(rsp->dma));
@@ -3393,10 +3356,13 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3393 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3356 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3394 3357
3395 rval = qla2x00_mailbox_command(vha, mcp); 3358 rval = qla2x00_mailbox_command(vha, mcp);
3396 if (rval != QLA_SUCCESS) 3359 if (rval != QLA_SUCCESS) {
3397 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x " 3360 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3398 "mb0=%x.\n", __func__, 3361 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3399 vha->host_no, rval, mcp->mb[0])); 3362 } else {
3363 ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
3364 }
3365
3400 return rval; 3366 return rval;
3401} 3367}
3402 3368
@@ -3407,7 +3373,7 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3407 mbx_cmd_t mc; 3373 mbx_cmd_t mc;
3408 mbx_cmd_t *mcp = &mc; 3374 mbx_cmd_t *mcp = &mc;
3409 3375
3410 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3376 ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
3411 3377
3412 mcp->mb[0] = MBC_IDC_ACK; 3378 mcp->mb[0] = MBC_IDC_ACK;
3413 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 3379 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3418,10 +3384,10 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3418 rval = qla2x00_mailbox_command(vha, mcp); 3384 rval = qla2x00_mailbox_command(vha, mcp);
3419 3385
3420 if (rval != QLA_SUCCESS) { 3386 if (rval != QLA_SUCCESS) {
3421 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3387 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3422 vha->host_no, rval, mcp->mb[0])); 3388 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3423 } else { 3389 } else {
3424 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3390 ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
3425 } 3391 }
3426 3392
3427 return rval; 3393 return rval;
@@ -3434,11 +3400,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3434 mbx_cmd_t mc; 3400 mbx_cmd_t mc;
3435 mbx_cmd_t *mcp = &mc; 3401 mbx_cmd_t *mcp = &mc;
3436 3402
3403 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
3404
3437 if (!IS_QLA81XX(vha->hw)) 3405 if (!IS_QLA81XX(vha->hw))
3438 return QLA_FUNCTION_FAILED; 3406 return QLA_FUNCTION_FAILED;
3439 3407
3440 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3441
3442 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3408 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3443 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 3409 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
3444 mcp->out_mb = MBX_1|MBX_0; 3410 mcp->out_mb = MBX_1|MBX_0;
@@ -3448,10 +3414,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3448 rval = qla2x00_mailbox_command(vha, mcp); 3414 rval = qla2x00_mailbox_command(vha, mcp);
3449 3415
3450 if (rval != QLA_SUCCESS) { 3416 if (rval != QLA_SUCCESS) {
3451 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 3417 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
3452 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3418 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3419 rval, mcp->mb[0], mcp->mb[1]);
3453 } else { 3420 } else {
3454 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3421 ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
3455 *sector_size = mcp->mb[1]; 3422 *sector_size = mcp->mb[1];
3456 } 3423 }
3457 3424
@@ -3468,7 +3435,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3468 if (!IS_QLA81XX(vha->hw)) 3435 if (!IS_QLA81XX(vha->hw))
3469 return QLA_FUNCTION_FAILED; 3436 return QLA_FUNCTION_FAILED;
3470 3437
3471 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3438 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
3472 3439
3473 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3440 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3474 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 3441 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3480,10 +3447,11 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3480 rval = qla2x00_mailbox_command(vha, mcp); 3447 rval = qla2x00_mailbox_command(vha, mcp);
3481 3448
3482 if (rval != QLA_SUCCESS) { 3449 if (rval != QLA_SUCCESS) {
3483 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 3450 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
3484 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3451 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3452 rval, mcp->mb[0], mcp->mb[1]);
3485 } else { 3453 } else {
3486 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3454 ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
3487 } 3455 }
3488 3456
3489 return rval; 3457 return rval;
@@ -3499,7 +3467,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3499 if (!IS_QLA81XX(vha->hw)) 3467 if (!IS_QLA81XX(vha->hw))
3500 return QLA_FUNCTION_FAILED; 3468 return QLA_FUNCTION_FAILED;
3501 3469
3502 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3470 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
3503 3471
3504 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3472 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3505 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 3473 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3514,11 +3482,11 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3514 rval = qla2x00_mailbox_command(vha, mcp); 3482 rval = qla2x00_mailbox_command(vha, mcp);
3515 3483
3516 if (rval != QLA_SUCCESS) { 3484 if (rval != QLA_SUCCESS) {
3517 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 3485 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
3518 "mb[2]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0], 3486 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3519 mcp->mb[1], mcp->mb[2])); 3487 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3520 } else { 3488 } else {
3521 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3489 ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
3522 } 3490 }
3523 3491
3524 return rval; 3492 return rval;
@@ -3531,7 +3499,7 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3531 mbx_cmd_t mc; 3499 mbx_cmd_t mc;
3532 mbx_cmd_t *mcp = &mc; 3500 mbx_cmd_t *mcp = &mc;
3533 3501
3534 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3502 ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
3535 3503
3536 mcp->mb[0] = MBC_RESTART_MPI_FW; 3504 mcp->mb[0] = MBC_RESTART_MPI_FW;
3537 mcp->out_mb = MBX_0; 3505 mcp->out_mb = MBX_0;
@@ -3541,10 +3509,11 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3541 rval = qla2x00_mailbox_command(vha, mcp); 3509 rval = qla2x00_mailbox_command(vha, mcp);
3542 3510
3543 if (rval != QLA_SUCCESS) { 3511 if (rval != QLA_SUCCESS) {
3544 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", 3512 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
3545 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3513 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3514 rval, mcp->mb[0], mcp->mb[1]);
3546 } else { 3515 } else {
3547 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3516 ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
3548 } 3517 }
3549 3518
3550 return rval; 3519 return rval;
@@ -3559,11 +3528,11 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3559 mbx_cmd_t *mcp = &mc; 3528 mbx_cmd_t *mcp = &mc;
3560 struct qla_hw_data *ha = vha->hw; 3529 struct qla_hw_data *ha = vha->hw;
3561 3530
3531 ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
3532
3562 if (!IS_FWI2_CAPABLE(ha)) 3533 if (!IS_FWI2_CAPABLE(ha))
3563 return QLA_FUNCTION_FAILED; 3534 return QLA_FUNCTION_FAILED;
3564 3535
3565 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3566
3567 if (len == 1) 3536 if (len == 1)
3568 opt |= BIT_0; 3537 opt |= BIT_0;
3569 3538
@@ -3586,10 +3555,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3586 *sfp = mcp->mb[1]; 3555 *sfp = mcp->mb[1];
3587 3556
3588 if (rval != QLA_SUCCESS) { 3557 if (rval != QLA_SUCCESS) {
3589 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3558 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3590 vha->host_no, rval, mcp->mb[0])); 3559 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3591 } else { 3560 } else {
3592 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3561 ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
3593 } 3562 }
3594 3563
3595 return rval; 3564 return rval;
@@ -3604,11 +3573,11 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3604 mbx_cmd_t *mcp = &mc; 3573 mbx_cmd_t *mcp = &mc;
3605 struct qla_hw_data *ha = vha->hw; 3574 struct qla_hw_data *ha = vha->hw;
3606 3575
3576 ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
3577
3607 if (!IS_FWI2_CAPABLE(ha)) 3578 if (!IS_FWI2_CAPABLE(ha))
3608 return QLA_FUNCTION_FAILED; 3579 return QLA_FUNCTION_FAILED;
3609 3580
3610 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3611
3612 if (len == 1) 3581 if (len == 1)
3613 opt |= BIT_0; 3582 opt |= BIT_0;
3614 3583
@@ -3631,10 +3600,10 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3631 rval = qla2x00_mailbox_command(vha, mcp); 3600 rval = qla2x00_mailbox_command(vha, mcp);
3632 3601
3633 if (rval != QLA_SUCCESS) { 3602 if (rval != QLA_SUCCESS) {
3634 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 3603 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
3635 vha->host_no, rval, mcp->mb[0])); 3604 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3636 } else { 3605 } else {
3637 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3606 ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
3638 } 3607 }
3639 3608
3640 return rval; 3609 return rval;
@@ -3648,11 +3617,11 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3648 mbx_cmd_t mc; 3617 mbx_cmd_t mc;
3649 mbx_cmd_t *mcp = &mc; 3618 mbx_cmd_t *mcp = &mc;
3650 3619
3620 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
3621
3651 if (!IS_QLA8XXX_TYPE(vha->hw)) 3622 if (!IS_QLA8XXX_TYPE(vha->hw))
3652 return QLA_FUNCTION_FAILED; 3623 return QLA_FUNCTION_FAILED;
3653 3624
3654 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3655
3656 mcp->mb[0] = MBC_GET_XGMAC_STATS; 3625 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3657 mcp->mb[2] = MSW(stats_dma); 3626 mcp->mb[2] = MSW(stats_dma);
3658 mcp->mb[3] = LSW(stats_dma); 3627 mcp->mb[3] = LSW(stats_dma);
@@ -3666,11 +3635,12 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3666 rval = qla2x00_mailbox_command(vha, mcp); 3635 rval = qla2x00_mailbox_command(vha, mcp);
3667 3636
3668 if (rval != QLA_SUCCESS) { 3637 if (rval != QLA_SUCCESS) {
3669 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " 3638 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
3670 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, 3639 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3671 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 3640 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3672 } else { 3641 } else {
3673 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3642 ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
3643
3674 3644
3675 *actual_size = mcp->mb[2] << 2; 3645 *actual_size = mcp->mb[2] << 2;
3676 } 3646 }
@@ -3686,11 +3656,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3686 mbx_cmd_t mc; 3656 mbx_cmd_t mc;
3687 mbx_cmd_t *mcp = &mc; 3657 mbx_cmd_t *mcp = &mc;
3688 3658
3659 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
3660
3689 if (!IS_QLA8XXX_TYPE(vha->hw)) 3661 if (!IS_QLA8XXX_TYPE(vha->hw))
3690 return QLA_FUNCTION_FAILED; 3662 return QLA_FUNCTION_FAILED;
3691 3663
3692 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3693
3694 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 3664 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3695 mcp->mb[1] = 0; 3665 mcp->mb[1] = 0;
3696 mcp->mb[2] = MSW(tlv_dma); 3666 mcp->mb[2] = MSW(tlv_dma);
@@ -3705,11 +3675,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3705 rval = qla2x00_mailbox_command(vha, mcp); 3675 rval = qla2x00_mailbox_command(vha, mcp);
3706 3676
3707 if (rval != QLA_SUCCESS) { 3677 if (rval != QLA_SUCCESS) {
3708 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " 3678 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
3709 "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, 3679 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3710 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 3680 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3711 } else { 3681 } else {
3712 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3682 ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
3713 } 3683 }
3714 3684
3715 return rval; 3685 return rval;
@@ -3722,11 +3692,11 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3722 mbx_cmd_t mc; 3692 mbx_cmd_t mc;
3723 mbx_cmd_t *mcp = &mc; 3693 mbx_cmd_t *mcp = &mc;
3724 3694
3695 ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
3696
3725 if (!IS_FWI2_CAPABLE(vha->hw)) 3697 if (!IS_FWI2_CAPABLE(vha->hw))
3726 return QLA_FUNCTION_FAILED; 3698 return QLA_FUNCTION_FAILED;
3727 3699
3728 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3729
3730 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 3700 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3731 mcp->mb[1] = LSW(risc_addr); 3701 mcp->mb[1] = LSW(risc_addr);
3732 mcp->mb[8] = MSW(risc_addr); 3702 mcp->mb[8] = MSW(risc_addr);
@@ -3736,10 +3706,10 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3736 mcp->flags = 0; 3706 mcp->flags = 0;
3737 rval = qla2x00_mailbox_command(vha, mcp); 3707 rval = qla2x00_mailbox_command(vha, mcp);
3738 if (rval != QLA_SUCCESS) { 3708 if (rval != QLA_SUCCESS) {
3739 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3709 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
3740 vha->host_no, rval, mcp->mb[0])); 3710 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3741 } else { 3711 } else {
3742 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3712 ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
3743 *data = mcp->mb[3] << 16 | mcp->mb[2]; 3713 *data = mcp->mb[3] << 16 | mcp->mb[2];
3744 } 3714 }
3745 3715
@@ -3755,7 +3725,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3755 mbx_cmd_t *mcp = &mc; 3725 mbx_cmd_t *mcp = &mc;
3756 uint32_t iter_cnt = 0x1; 3726 uint32_t iter_cnt = 0x1;
3757 3727
3758 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); 3728 ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
3759 3729
3760 memset(mcp->mb, 0 , sizeof(mcp->mb)); 3730 memset(mcp->mb, 0 , sizeof(mcp->mb));
3761 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 3731 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3794,15 +3764,12 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3794 rval = qla2x00_mailbox_command(vha, mcp); 3764 rval = qla2x00_mailbox_command(vha, mcp);
3795 3765
3796 if (rval != QLA_SUCCESS) { 3766 if (rval != QLA_SUCCESS) {
3797 DEBUG2(printk(KERN_WARNING 3767 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
3798 "(%ld): failed=%x mb[0]=0x%x " 3768 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
3799 "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x " 3769 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3800 "mb[19]=0x%x.\n", 3770 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
3801 vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3802 mcp->mb[3], mcp->mb[18], mcp->mb[19]));
3803 } else { 3771 } else {
3804 DEBUG2(printk(KERN_WARNING 3772 ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
3805 "scsi(%ld): done.\n", vha->host_no));
3806 } 3773 }
3807 3774
3808 /* Copy mailbox information */ 3775 /* Copy mailbox information */
@@ -3819,7 +3786,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3819 mbx_cmd_t *mcp = &mc; 3786 mbx_cmd_t *mcp = &mc;
3820 struct qla_hw_data *ha = vha->hw; 3787 struct qla_hw_data *ha = vha->hw;
3821 3788
3822 DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no)); 3789 ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
3823 3790
3824 memset(mcp->mb, 0 , sizeof(mcp->mb)); 3791 memset(mcp->mb, 0 , sizeof(mcp->mb));
3825 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 3792 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3858,12 +3825,11 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3858 rval = qla2x00_mailbox_command(vha, mcp); 3825 rval = qla2x00_mailbox_command(vha, mcp);
3859 3826
3860 if (rval != QLA_SUCCESS) { 3827 if (rval != QLA_SUCCESS) {
3861 DEBUG2(printk(KERN_WARNING 3828 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
3862 "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n", 3829 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3863 vha->host_no, rval, mcp->mb[0], mcp->mb[1])); 3830 rval, mcp->mb[0], mcp->mb[1]);
3864 } else { 3831 } else {
3865 DEBUG2(printk(KERN_WARNING 3832 ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
3866 "scsi(%ld): done.\n", vha->host_no));
3867 } 3833 }
3868 3834
3869 /* Copy mailbox information */ 3835 /* Copy mailbox information */
@@ -3872,14 +3838,14 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3872} 3838}
3873 3839
3874int 3840int
3875qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic) 3841qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3876{ 3842{
3877 int rval; 3843 int rval;
3878 mbx_cmd_t mc; 3844 mbx_cmd_t mc;
3879 mbx_cmd_t *mcp = &mc; 3845 mbx_cmd_t *mcp = &mc;
3880 3846
3881 DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__, 3847 ql_dbg(ql_dbg_mbx, vha, 0x10fd,
3882 ha->host_no, enable_diagnostic)); 3848 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
3883 3849
3884 mcp->mb[0] = MBC_ISP84XX_RESET; 3850 mcp->mb[0] = MBC_ISP84XX_RESET;
3885 mcp->mb[1] = enable_diagnostic; 3851 mcp->mb[1] = enable_diagnostic;
@@ -3887,13 +3853,12 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
3887 mcp->in_mb = MBX_1|MBX_0; 3853 mcp->in_mb = MBX_1|MBX_0;
3888 mcp->tov = MBX_TOV_SECONDS; 3854 mcp->tov = MBX_TOV_SECONDS;
3889 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 3855 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3890 rval = qla2x00_mailbox_command(ha, mcp); 3856 rval = qla2x00_mailbox_command(vha, mcp);
3891 3857
3892 if (rval != QLA_SUCCESS) 3858 if (rval != QLA_SUCCESS)
3893 DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no, 3859 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
3894 rval));
3895 else 3860 else
3896 DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3861 ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
3897 3862
3898 return rval; 3863 return rval;
3899} 3864}
@@ -3905,11 +3870,11 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3905 mbx_cmd_t mc; 3870 mbx_cmd_t mc;
3906 mbx_cmd_t *mcp = &mc; 3871 mbx_cmd_t *mcp = &mc;
3907 3872
3873 ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
3874
3908 if (!IS_FWI2_CAPABLE(vha->hw)) 3875 if (!IS_FWI2_CAPABLE(vha->hw))
3909 return QLA_FUNCTION_FAILED; 3876 return QLA_FUNCTION_FAILED;
3910 3877
3911 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3912
3913 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 3878 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3914 mcp->mb[1] = LSW(risc_addr); 3879 mcp->mb[1] = LSW(risc_addr);
3915 mcp->mb[2] = LSW(data); 3880 mcp->mb[2] = LSW(data);
@@ -3921,10 +3886,10 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3921 mcp->flags = 0; 3886 mcp->flags = 0;
3922 rval = qla2x00_mailbox_command(vha, mcp); 3887 rval = qla2x00_mailbox_command(vha, mcp);
3923 if (rval != QLA_SUCCESS) { 3888 if (rval != QLA_SUCCESS) {
3924 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 3889 ql_dbg(ql_dbg_mbx, vha, 0x1101,
3925 vha->host_no, rval, mcp->mb[0])); 3890 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3926 } else { 3891 } else {
3927 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); 3892 ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
3928 } 3893 }
3929 3894
3930 return rval; 3895 return rval;
@@ -3941,8 +3906,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3941 3906
3942 rval = QLA_SUCCESS; 3907 rval = QLA_SUCCESS;
3943 3908
3944 DEBUG11(qla_printk(KERN_INFO, ha, 3909 ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
3945 "%s(%ld): entered.\n", __func__, vha->host_no));
3946 3910
3947 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 3911 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
3948 3912
@@ -3982,11 +3946,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3982 rval = QLA_FUNCTION_FAILED; 3946 rval = QLA_FUNCTION_FAILED;
3983 3947
3984 if (rval != QLA_SUCCESS) { 3948 if (rval != QLA_SUCCESS) {
3985 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", 3949 ql_dbg(ql_dbg_mbx, vha, 0x1104,
3986 __func__, vha->host_no, rval, mb[0])); 3950 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
3987 } else { 3951 } else {
3988 DEBUG11(printk(KERN_INFO 3952 ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
3989 "%s(%ld): done.\n", __func__, vha->host_no));
3990 } 3953 }
3991 3954
3992 return rval; 3955 return rval;
@@ -3999,12 +3962,11 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
3999 mbx_cmd_t *mcp = &mc; 3962 mbx_cmd_t *mcp = &mc;
4000 struct qla_hw_data *ha = vha->hw; 3963 struct qla_hw_data *ha = vha->hw;
4001 3964
3965 ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
3966
4002 if (!IS_FWI2_CAPABLE(ha)) 3967 if (!IS_FWI2_CAPABLE(ha))
4003 return QLA_FUNCTION_FAILED; 3968 return QLA_FUNCTION_FAILED;
4004 3969
4005 DEBUG11(qla_printk(KERN_INFO, ha,
4006 "%s(%ld): entered.\n", __func__, vha->host_no));
4007
4008 mcp->mb[0] = MBC_DATA_RATE; 3970 mcp->mb[0] = MBC_DATA_RATE;
4009 mcp->mb[1] = 0; 3971 mcp->mb[1] = 0;
4010 mcp->out_mb = MBX_1|MBX_0; 3972 mcp->out_mb = MBX_1|MBX_0;
@@ -4013,11 +3975,10 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4013 mcp->flags = 0; 3975 mcp->flags = 0;
4014 rval = qla2x00_mailbox_command(vha, mcp); 3976 rval = qla2x00_mailbox_command(vha, mcp);
4015 if (rval != QLA_SUCCESS) { 3977 if (rval != QLA_SUCCESS) {
4016 DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", 3978 ql_dbg(ql_dbg_mbx, vha, 0x1107,
4017 __func__, vha->host_no, rval, mcp->mb[0])); 3979 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4018 } else { 3980 } else {
4019 DEBUG11(printk(KERN_INFO 3981 ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
4020 "%s(%ld): done.\n", __func__, vha->host_no));
4021 if (mcp->mb[1] != 0x7) 3982 if (mcp->mb[1] != 0x7)
4022 ha->link_data_rate = mcp->mb[1]; 3983 ha->link_data_rate = mcp->mb[1];
4023 } 3984 }
@@ -4033,8 +3994,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4033 mbx_cmd_t *mcp = &mc; 3994 mbx_cmd_t *mcp = &mc;
4034 struct qla_hw_data *ha = vha->hw; 3995 struct qla_hw_data *ha = vha->hw;
4035 3996
4036 DEBUG11(printk(KERN_INFO 3997 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
4037 "%s(%ld): entered.\n", __func__, vha->host_no));
4038 3998
4039 if (!IS_QLA81XX(ha)) 3999 if (!IS_QLA81XX(ha))
4040 return QLA_FUNCTION_FAILED; 4000 return QLA_FUNCTION_FAILED;
@@ -4047,15 +4007,13 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4047 rval = qla2x00_mailbox_command(vha, mcp); 4007 rval = qla2x00_mailbox_command(vha, mcp);
4048 4008
4049 if (rval != QLA_SUCCESS) { 4009 if (rval != QLA_SUCCESS) {
4050 DEBUG2_3_11(printk(KERN_WARNING 4010 ql_dbg(ql_dbg_mbx, vha, 0x110a,
4051 "%s(%ld): failed=%x (%x).\n", __func__, 4011 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4052 vha->host_no, rval, mcp->mb[0]));
4053 } else { 4012 } else {
4054 /* Copy all bits to preserve original value */ 4013 /* Copy all bits to preserve original value */
4055 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 4014 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4056 4015
4057 DEBUG11(printk(KERN_INFO 4016 ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
4058 "%s(%ld): done.\n", __func__, vha->host_no));
4059 } 4017 }
4060 return rval; 4018 return rval;
4061} 4019}
@@ -4067,8 +4025,7 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4067 mbx_cmd_t mc; 4025 mbx_cmd_t mc;
4068 mbx_cmd_t *mcp = &mc; 4026 mbx_cmd_t *mcp = &mc;
4069 4027
4070 DEBUG11(printk(KERN_INFO 4028 ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
4071 "%s(%ld): entered.\n", __func__, vha->host_no));
4072 4029
4073 mcp->mb[0] = MBC_SET_PORT_CONFIG; 4030 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4074 /* Copy all bits to preserve original setting */ 4031 /* Copy all bits to preserve original setting */
@@ -4080,12 +4037,10 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4080 rval = qla2x00_mailbox_command(vha, mcp); 4037 rval = qla2x00_mailbox_command(vha, mcp);
4081 4038
4082 if (rval != QLA_SUCCESS) { 4039 if (rval != QLA_SUCCESS) {
4083 DEBUG2_3_11(printk(KERN_WARNING 4040 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4084 "%s(%ld): failed=%x (%x).\n", __func__, 4041 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4085 vha->host_no, rval, mcp->mb[0]));
4086 } else 4042 } else
4087 DEBUG11(printk(KERN_INFO 4043 ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
4088 "%s(%ld): done.\n", __func__, vha->host_no));
4089 4044
4090 return rval; 4045 return rval;
4091} 4046}
@@ -4100,12 +4055,11 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4100 mbx_cmd_t *mcp = &mc; 4055 mbx_cmd_t *mcp = &mc;
4101 struct qla_hw_data *ha = vha->hw; 4056 struct qla_hw_data *ha = vha->hw;
4102 4057
4058 ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
4059
4103 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 4060 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4104 return QLA_FUNCTION_FAILED; 4061 return QLA_FUNCTION_FAILED;
4105 4062
4106 DEBUG11(printk(KERN_INFO
4107 "%s(%ld): entered.\n", __func__, vha->host_no));
4108
4109 mcp->mb[0] = MBC_PORT_PARAMS; 4063 mcp->mb[0] = MBC_PORT_PARAMS;
4110 mcp->mb[1] = loop_id; 4064 mcp->mb[1] = loop_id;
4111 if (ha->flags.fcp_prio_enabled) 4065 if (ha->flags.fcp_prio_enabled)
@@ -4127,12 +4081,9 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4127 } 4081 }
4128 4082
4129 if (rval != QLA_SUCCESS) { 4083 if (rval != QLA_SUCCESS) {
4130 DEBUG2_3_11(printk(KERN_WARNING 4084 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4131 "%s(%ld): failed=%x.\n", __func__,
4132 vha->host_no, rval));
4133 } else { 4085 } else {
4134 DEBUG11(printk(KERN_INFO 4086 ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
4135 "%s(%ld): done.\n", __func__, vha->host_no));
4136 } 4087 }
4137 4088
4138 return rval; 4089 return rval;
@@ -4145,13 +4096,12 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4145 uint8_t byte; 4096 uint8_t byte;
4146 struct qla_hw_data *ha = vha->hw; 4097 struct qla_hw_data *ha = vha->hw;
4147 4098
4148 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); 4099 ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
4149 4100
4150 /* Integer part */ 4101 /* Integer part */
4151 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); 4102 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
4152 if (rval != QLA_SUCCESS) { 4103 if (rval != QLA_SUCCESS) {
4153 DEBUG2_3_11(printk(KERN_WARNING 4104 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
4154 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4155 ha->flags.thermal_supported = 0; 4105 ha->flags.thermal_supported = 0;
4156 goto fail; 4106 goto fail;
4157 } 4107 }
@@ -4160,14 +4110,13 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4160 /* Fraction part */ 4110 /* Fraction part */
4161 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0); 4111 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
4162 if (rval != QLA_SUCCESS) { 4112 if (rval != QLA_SUCCESS) {
4163 DEBUG2_3_11(printk(KERN_WARNING 4113 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
4164 "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
4165 ha->flags.thermal_supported = 0; 4114 ha->flags.thermal_supported = 0;
4166 goto fail; 4115 goto fail;
4167 } 4116 }
4168 *frac = (byte >> 6) * 25; 4117 *frac = (byte >> 6) * 25;
4169 4118
4170 DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no)); 4119 ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
4171fail: 4120fail:
4172 return rval; 4121 return rval;
4173} 4122}
@@ -4180,12 +4129,11 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4180 mbx_cmd_t mc; 4129 mbx_cmd_t mc;
4181 mbx_cmd_t *mcp = &mc; 4130 mbx_cmd_t *mcp = &mc;
4182 4131
4132 ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
4133
4183 if (!IS_FWI2_CAPABLE(ha)) 4134 if (!IS_FWI2_CAPABLE(ha))
4184 return QLA_FUNCTION_FAILED; 4135 return QLA_FUNCTION_FAILED;
4185 4136
4186 DEBUG11(qla_printk(KERN_INFO, ha,
4187 "%s(%ld): entered.\n", __func__, vha->host_no));
4188
4189 memset(mcp, 0, sizeof(mbx_cmd_t)); 4137 memset(mcp, 0, sizeof(mbx_cmd_t));
4190 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 4138 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4191 mcp->mb[1] = 1; 4139 mcp->mb[1] = 1;
@@ -4197,12 +4145,10 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4197 4145
4198 rval = qla2x00_mailbox_command(vha, mcp); 4146 rval = qla2x00_mailbox_command(vha, mcp);
4199 if (rval != QLA_SUCCESS) { 4147 if (rval != QLA_SUCCESS) {
4200 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 4148 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4201 "%s(%ld): failed=%x mb[0]=%x.\n", __func__, 4149 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4202 vha->host_no, rval, mcp->mb[0]));
4203 } else { 4150 } else {
4204 DEBUG11(qla_printk(KERN_INFO, ha, 4151 ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
4205 "%s(%ld): done.\n", __func__, vha->host_no));
4206 } 4152 }
4207 4153
4208 return rval; 4154 return rval;
@@ -4216,12 +4162,11 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4216 mbx_cmd_t mc; 4162 mbx_cmd_t mc;
4217 mbx_cmd_t *mcp = &mc; 4163 mbx_cmd_t *mcp = &mc;
4218 4164
4165 ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
4166
4219 if (!IS_QLA82XX(ha)) 4167 if (!IS_QLA82XX(ha))
4220 return QLA_FUNCTION_FAILED; 4168 return QLA_FUNCTION_FAILED;
4221 4169
4222 DEBUG11(qla_printk(KERN_INFO, ha,
4223 "%s(%ld): entered.\n", __func__, vha->host_no));
4224
4225 memset(mcp, 0, sizeof(mbx_cmd_t)); 4170 memset(mcp, 0, sizeof(mbx_cmd_t));
4226 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 4171 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4227 mcp->mb[1] = 0; 4172 mcp->mb[1] = 0;
@@ -4233,12 +4178,10 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4233 4178
4234 rval = qla2x00_mailbox_command(vha, mcp); 4179 rval = qla2x00_mailbox_command(vha, mcp);
4235 if (rval != QLA_SUCCESS) { 4180 if (rval != QLA_SUCCESS) {
4236 DEBUG2_3_11(qla_printk(KERN_WARNING, ha, 4181 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4237 "%s(%ld): failed=%x mb[0]=%x.\n", __func__, 4182 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4238 vha->host_no, rval, mcp->mb[0]));
4239 } else { 4183 } else {
4240 DEBUG11(qla_printk(KERN_INFO, ha, 4184 ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
4241 "%s(%ld): done.\n", __func__, vha->host_no));
4242 } 4185 }
4243 4186
4244 return rval; 4187 return rval;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 5e343919acad..c706ed370000 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -36,8 +36,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
36 mutex_lock(&ha->vport_lock); 36 mutex_lock(&ha->vport_lock);
37 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); 37 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
38 if (vp_id > ha->max_npiv_vports) { 38 if (vp_id > ha->max_npiv_vports) {
39 DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n", 39 ql_dbg(ql_dbg_vport, vha, 0xa000,
40 vp_id, ha->max_npiv_vports)); 40 "vp_id %d is bigger than max-supported %d.\n",
41 vp_id, ha->max_npiv_vports);
41 mutex_unlock(&ha->vport_lock); 42 mutex_unlock(&ha->vport_lock);
42 return vp_id; 43 return vp_id;
43 } 44 }
@@ -131,9 +132,9 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
131 fc_port_t *fcport; 132 fc_port_t *fcport;
132 133
133 list_for_each_entry(fcport, &vha->vp_fcports, list) { 134 list_for_each_entry(fcport, &vha->vp_fcports, list) {
134 DEBUG15(printk("scsi(%ld): Marking port dead, " 135 ql_dbg(ql_dbg_vport, vha, 0xa001,
135 "loop_id=0x%04x :%x\n", 136 "Marking port dead, loop_id=0x%04x : %x.\n",
136 vha->host_no, fcport->loop_id, fcport->vp_idx)); 137 fcport->loop_id, fcport->vp_idx);
137 138
138 qla2x00_mark_device_lost(vha, fcport, 0, 0); 139 qla2x00_mark_device_lost(vha, fcport, 0, 0);
139 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 140 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -187,13 +188,13 @@ qla24xx_enable_vp(scsi_qla_host_t *vha)
187 goto enable_failed; 188 goto enable_failed;
188 } 189 }
189 190
190 DEBUG15(qla_printk(KERN_INFO, ha, 191 ql_dbg(ql_dbg_taskm, vha, 0x801a,
191 "Virtual port with id: %d - Enabled\n", vha->vp_idx)); 192 "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
192 return 0; 193 return 0;
193 194
194enable_failed: 195enable_failed:
195 DEBUG15(qla_printk(KERN_INFO, ha, 196 ql_dbg(ql_dbg_taskm, vha, 0x801b,
196 "Virtual port with id: %d - Disabled\n", vha->vp_idx)); 197 "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
197 return 1; 198 return 1;
198} 199}
199 200
@@ -205,12 +206,12 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
205 206
206 fc_vport = vha->fc_vport; 207 fc_vport = vha->fc_vport;
207 208
208 DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n", 209 ql_dbg(ql_dbg_vport, vha, 0xa002,
209 vha->host_no, __func__)); 210 "%s: change request #3.\n", __func__);
210 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); 211 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
211 if (ret != QLA_SUCCESS) { 212 if (ret != QLA_SUCCESS) {
212 DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable " 213 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
213 "receiving of RSCN requests: 0x%x\n", ret)); 214 "receiving of RSCN requests: 0x%x.\n", ret);
214 return; 215 return;
215 } else { 216 } else {
216 /* Corresponds to SCR enabled */ 217 /* Corresponds to SCR enabled */
@@ -248,9 +249,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
248 case MBA_CHG_IN_CONNECTION: 249 case MBA_CHG_IN_CONNECTION:
249 case MBA_PORT_UPDATE: 250 case MBA_PORT_UPDATE:
250 case MBA_RSCN_UPDATE: 251 case MBA_RSCN_UPDATE:
251 DEBUG15(printk("scsi(%ld)%s: Async_event for" 252 ql_dbg(ql_dbg_async, vha, 0x5024,
252 " VP[%d], mb = 0x%x, vha=%p\n", 253 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
253 vha->host_no, __func__, i, *mb, vha)); 254 i, *mb, vha);
254 qla2x00_async_event(vha, rsp, mb); 255 qla2x00_async_event(vha, rsp, mb);
255 break; 256 break;
256 } 257 }
@@ -286,37 +287,49 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
286 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 287 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
287 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 288 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
288 289
289 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n", 290 ql_dbg(ql_dbg_taskm, vha, 0x801d,
290 vha->host_no, vha->vp_idx)); 291 "Scheduling enable of Vport %d.\n", vha->vp_idx);
291 return qla24xx_enable_vp(vha); 292 return qla24xx_enable_vp(vha);
292} 293}
293 294
294static int 295static int
295qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 296qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
296{ 297{
298 ql_dbg(ql_dbg_dpc, vha, 0x4012,
299 "Entering %s.\n", __func__);
300 ql_dbg(ql_dbg_dpc, vha, 0x4013,
301 "vp_flags: 0x%lx.\n", vha->vp_flags);
302
297 qla2x00_do_work(vha); 303 qla2x00_do_work(vha);
298 304
299 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 305 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
300 /* VP acquired. complete port configuration */ 306 /* VP acquired. complete port configuration */
307 ql_dbg(ql_dbg_dpc, vha, 0x4014,
308 "Configure VP scheduled.\n");
301 qla24xx_configure_vp(vha); 309 qla24xx_configure_vp(vha);
310 ql_dbg(ql_dbg_dpc, vha, 0x4015,
311 "Configure VP end.\n");
302 return 0; 312 return 0;
303 } 313 }
304 314
305 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { 315 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
316 ql_dbg(ql_dbg_dpc, vha, 0x4016,
317 "FCPort update scheduled.\n");
306 qla2x00_update_fcports(vha); 318 qla2x00_update_fcports(vha);
307 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); 319 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
320 ql_dbg(ql_dbg_dpc, vha, 0x4017,
321 "FCPort update end.\n");
308 } 322 }
309 323
310 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) && 324 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
311 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && 325 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
312 atomic_read(&vha->loop_state) != LOOP_DOWN) { 326 atomic_read(&vha->loop_state) != LOOP_DOWN) {
313 327
314 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 328 ql_dbg(ql_dbg_dpc, vha, 0x4018,
315 vha->host_no)); 329 "Relogin needed scheduled.\n");
316 qla2x00_relogin(vha); 330 qla2x00_relogin(vha);
317 331 ql_dbg(ql_dbg_dpc, vha, 0x4019,
318 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 332 "Relogin needed end.\n");
319 vha->host_no));
320 } 333 }
321 334
322 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 335 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -326,11 +339,17 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
326 339
327 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 340 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
328 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 341 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
342 ql_dbg(ql_dbg_dpc, vha, 0x401a,
343 "Loop resync scheduled.\n");
329 qla2x00_loop_resync(vha); 344 qla2x00_loop_resync(vha);
330 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 345 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
346 ql_dbg(ql_dbg_dpc, vha, 0x401b,
347 "Loop resync end.\n");
331 } 348 }
332 } 349 }
333 350
351 ql_dbg(ql_dbg_dpc, vha, 0x401c,
352 "Exiting %s.\n", __func__);
334 return 0; 353 return 0;
335} 354}
336 355
@@ -396,9 +415,10 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
396 415
397 /* Check up max-npiv-supports */ 416 /* Check up max-npiv-supports */
398 if (ha->num_vhosts > ha->max_npiv_vports) { 417 if (ha->num_vhosts > ha->max_npiv_vports) {
399 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than " 418 ql_dbg(ql_dbg_vport, vha, 0xa004,
400 "max_npv_vports %ud.\n", base_vha->host_no, 419 "num_vhosts %ud is bigger "
401 ha->num_vhosts, ha->max_npiv_vports)); 420 "than max_npiv_vports %ud.\n",
421 ha->num_vhosts, ha->max_npiv_vports);
402 return VPCERR_UNSUPPORTED; 422 return VPCERR_UNSUPPORTED;
403 } 423 }
404 return 0; 424 return 0;
@@ -415,7 +435,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
415 435
416 vha = qla2x00_create_host(sht, ha); 436 vha = qla2x00_create_host(sht, ha);
417 if (!vha) { 437 if (!vha) {
418 DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n")); 438 ql_log(ql_log_warn, vha, 0xa005,
439 "scsi_host_alloc() failed for vport.\n");
419 return(NULL); 440 return(NULL);
420 } 441 }
421 442
@@ -429,8 +450,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
429 vha->device_flags = 0; 450 vha->device_flags = 0;
430 vha->vp_idx = qla24xx_allocate_vp_id(vha); 451 vha->vp_idx = qla24xx_allocate_vp_id(vha);
431 if (vha->vp_idx > ha->max_npiv_vports) { 452 if (vha->vp_idx > ha->max_npiv_vports) {
432 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n", 453 ql_dbg(ql_dbg_vport, vha, 0xa006,
433 vha->host_no)); 454 "Couldn't allocate vp_id.\n");
434 goto create_vhost_failed; 455 goto create_vhost_failed;
435 } 456 }
436 vha->mgmt_svr_loop_id = 10 + vha->vp_idx; 457 vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
@@ -461,8 +482,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
461 host->max_id = MAX_TARGETS_2200; 482 host->max_id = MAX_TARGETS_2200;
462 host->transportt = qla2xxx_transport_vport_template; 483 host->transportt = qla2xxx_transport_vport_template;
463 484
464 DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n", 485 ql_dbg(ql_dbg_vport, vha, 0xa007,
465 vha->host_no, vha)); 486 "Detect vport hba %ld at address = %p.\n",
487 vha->host_no, vha);
466 488
467 vha->flags.init_done = 1; 489 vha->flags.init_done = 1;
468 490
@@ -567,9 +589,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
567 if (req) { 589 if (req) {
568 ret = qla25xx_delete_req_que(vha, req); 590 ret = qla25xx_delete_req_que(vha, req);
569 if (ret != QLA_SUCCESS) { 591 if (ret != QLA_SUCCESS) {
570 qla_printk(KERN_WARNING, ha, 592 ql_log(ql_log_warn, vha, 0x00ea,
571 "Couldn't delete req que %d\n", 593 "Couldn't delete req que %d.\n",
572 req->id); 594 req->id);
573 return ret; 595 return ret;
574 } 596 }
575 } 597 }
@@ -581,9 +603,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
581 if (rsp) { 603 if (rsp) {
582 ret = qla25xx_delete_rsp_que(vha, rsp); 604 ret = qla25xx_delete_rsp_que(vha, rsp);
583 if (ret != QLA_SUCCESS) { 605 if (ret != QLA_SUCCESS) {
584 qla_printk(KERN_WARNING, ha, 606 ql_log(ql_log_warn, vha, 0x00eb,
585 "Couldn't delete rsp que %d\n", 607 "Couldn't delete rsp que %d.\n",
586 rsp->id); 608 rsp->id);
587 return ret; 609 return ret;
588 } 610 }
589 } 611 }
@@ -604,8 +626,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
604 626
605 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 627 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
606 if (req == NULL) { 628 if (req == NULL) {
607 qla_printk(KERN_WARNING, ha, "could not allocate memory" 629 ql_log(ql_log_fatal, base_vha, 0x00d9,
608 "for request que\n"); 630 "Failed to allocate memory for request queue.\n");
609 goto failed; 631 goto failed;
610 } 632 }
611 633
@@ -614,8 +636,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
614 (req->length + 1) * sizeof(request_t), 636 (req->length + 1) * sizeof(request_t),
615 &req->dma, GFP_KERNEL); 637 &req->dma, GFP_KERNEL);
616 if (req->ring == NULL) { 638 if (req->ring == NULL) {
617 qla_printk(KERN_WARNING, ha, 639 ql_log(ql_log_fatal, base_vha, 0x00da,
618 "Memory Allocation failed - request_ring\n"); 640 "Failed to allocte memory for request_ring.\n");
619 goto que_failed; 641 goto que_failed;
620 } 642 }
621 643
@@ -623,8 +645,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
623 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); 645 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
624 if (que_id >= ha->max_req_queues) { 646 if (que_id >= ha->max_req_queues) {
625 mutex_unlock(&ha->vport_lock); 647 mutex_unlock(&ha->vport_lock);
626 qla_printk(KERN_INFO, ha, "No resources to create " 648 ql_log(ql_log_warn, base_vha, 0x00db,
627 "additional request queue\n"); 649 "No resources to create additional request queue.\n");
628 goto que_failed; 650 goto que_failed;
629 } 651 }
630 set_bit(que_id, ha->req_qid_map); 652 set_bit(que_id, ha->req_qid_map);
@@ -633,6 +655,12 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
633 req->vp_idx = vp_idx; 655 req->vp_idx = vp_idx;
634 req->qos = qos; 656 req->qos = qos;
635 657
658 ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
659 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
660 que_id, req->rid, req->vp_idx, req->qos);
661 ql_dbg(ql_dbg_init, base_vha, 0x00dc,
662 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
663 que_id, req->rid, req->vp_idx, req->qos);
636 if (rsp_que < 0) 664 if (rsp_que < 0)
637 req->rsp = NULL; 665 req->rsp = NULL;
638 else 666 else
@@ -645,6 +673,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
645 options |= BIT_5; 673 options |= BIT_5;
646 req->options = options; 674 req->options = options;
647 675
676 ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
677 "options=0x%x.\n", req->options);
678 ql_dbg(ql_dbg_init, base_vha, 0x00dd,
679 "options=0x%x.\n", req->options);
648 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 680 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
649 req->outstanding_cmds[cnt] = NULL; 681 req->outstanding_cmds[cnt] = NULL;
650 req->current_outstanding_cmd = 1; 682 req->current_outstanding_cmd = 1;
@@ -656,10 +688,21 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
656 reg = ISP_QUE_REG(ha, que_id); 688 reg = ISP_QUE_REG(ha, que_id);
657 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 689 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
658 mutex_unlock(&ha->vport_lock); 690 mutex_unlock(&ha->vport_lock);
691 ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
692 "ring_ptr=%p ring_index=%d, "
693 "cnt=%d id=%d max_q_depth=%d.\n",
694 req->ring_ptr, req->ring_index,
695 req->cnt, req->id, req->max_q_depth);
696 ql_dbg(ql_dbg_init, base_vha, 0x00de,
697 "ring_ptr=%p ring_index=%d, "
698 "cnt=%d id=%d max_q_depth=%d.\n",
699 req->ring_ptr, req->ring_index, req->cnt,
700 req->id, req->max_q_depth);
659 701
660 ret = qla25xx_init_req_que(base_vha, req); 702 ret = qla25xx_init_req_que(base_vha, req);
661 if (ret != QLA_SUCCESS) { 703 if (ret != QLA_SUCCESS) {
662 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); 704 ql_log(ql_log_fatal, base_vha, 0x00df,
705 "%s failed.\n", __func__);
663 mutex_lock(&ha->vport_lock); 706 mutex_lock(&ha->vport_lock);
664 clear_bit(que_id, ha->req_qid_map); 707 clear_bit(que_id, ha->req_qid_map);
665 mutex_unlock(&ha->vport_lock); 708 mutex_unlock(&ha->vport_lock);
@@ -700,8 +743,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
700 743
701 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 744 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
702 if (rsp == NULL) { 745 if (rsp == NULL) {
703 qla_printk(KERN_WARNING, ha, "could not allocate memory for" 746 ql_log(ql_log_warn, base_vha, 0x0066,
704 " response que\n"); 747 "Failed to allocate memory for response queue.\n");
705 goto failed; 748 goto failed;
706 } 749 }
707 750
@@ -710,8 +753,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
710 (rsp->length + 1) * sizeof(response_t), 753 (rsp->length + 1) * sizeof(response_t),
711 &rsp->dma, GFP_KERNEL); 754 &rsp->dma, GFP_KERNEL);
712 if (rsp->ring == NULL) { 755 if (rsp->ring == NULL) {
713 qla_printk(KERN_WARNING, ha, 756 ql_log(ql_log_warn, base_vha, 0x00e1,
714 "Memory Allocation failed - response_ring\n"); 757 "Failed to allocate memory for response ring.\n");
715 goto que_failed; 758 goto que_failed;
716 } 759 }
717 760
@@ -719,8 +762,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
719 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); 762 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
720 if (que_id >= ha->max_rsp_queues) { 763 if (que_id >= ha->max_rsp_queues) {
721 mutex_unlock(&ha->vport_lock); 764 mutex_unlock(&ha->vport_lock);
722 qla_printk(KERN_INFO, ha, "No resources to create " 765 ql_log(ql_log_warn, base_vha, 0x00e2,
723 "additional response queue\n"); 766 "No resources to create additional request queue.\n");
724 goto que_failed; 767 goto que_failed;
725 } 768 }
726 set_bit(que_id, ha->rsp_qid_map); 769 set_bit(que_id, ha->rsp_qid_map);
@@ -728,12 +771,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
728 if (ha->flags.msix_enabled) 771 if (ha->flags.msix_enabled)
729 rsp->msix = &ha->msix_entries[que_id + 1]; 772 rsp->msix = &ha->msix_entries[que_id + 1];
730 else 773 else
731 qla_printk(KERN_WARNING, ha, "msix not enabled\n"); 774 ql_log(ql_log_warn, base_vha, 0x00e3,
775 "MSIX not enalbled.\n");
732 776
733 ha->rsp_q_map[que_id] = rsp; 777 ha->rsp_q_map[que_id] = rsp;
734 rsp->rid = rid; 778 rsp->rid = rid;
735 rsp->vp_idx = vp_idx; 779 rsp->vp_idx = vp_idx;
736 rsp->hw = ha; 780 rsp->hw = ha;
781 ql_dbg(ql_dbg_init, base_vha, 0x00e4,
782 "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
783 que_id, rsp->rid, rsp->vp_idx, rsp->hw);
737 /* Use alternate PCI bus number */ 784 /* Use alternate PCI bus number */
738 if (MSB(rsp->rid)) 785 if (MSB(rsp->rid))
739 options |= BIT_4; 786 options |= BIT_4;
@@ -750,6 +797,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
750 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 797 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
751 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out; 798 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
752 mutex_unlock(&ha->vport_lock); 799 mutex_unlock(&ha->vport_lock);
800 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
801 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
802 rsp->options, rsp->id, rsp->rsp_q_in,
803 rsp->rsp_q_out);
804 ql_dbg(ql_dbg_init, base_vha, 0x00e5,
805 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
806 rsp->options, rsp->id, rsp->rsp_q_in,
807 rsp->rsp_q_out);
753 808
754 ret = qla25xx_request_irq(rsp); 809 ret = qla25xx_request_irq(rsp);
755 if (ret) 810 if (ret)
@@ -757,7 +812,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
757 812
758 ret = qla25xx_init_rsp_que(base_vha, rsp); 813 ret = qla25xx_init_rsp_que(base_vha, rsp);
759 if (ret != QLA_SUCCESS) { 814 if (ret != QLA_SUCCESS) {
760 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); 815 ql_log(ql_log_fatal, base_vha, 0x00e7,
816 "%s failed.\n", __func__);
761 mutex_lock(&ha->vport_lock); 817 mutex_lock(&ha->vport_lock);
762 clear_bit(que_id, ha->rsp_qid_map); 818 clear_bit(que_id, ha->rsp_qid_map);
763 mutex_unlock(&ha->vport_lock); 819 mutex_unlock(&ha->vport_lock);
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index e1138bcc834c..5cbf33a50b14 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -348,6 +348,7 @@ static void
348qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) 348qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
349{ 349{
350 u32 win_read; 350 u32 win_read;
351 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
351 352
352 ha->crb_win = CRB_HI(*off); 353 ha->crb_win = CRB_HI(*off);
353 writel(ha->crb_win, 354 writel(ha->crb_win,
@@ -358,9 +359,10 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
358 */ 359 */
359 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase)); 360 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
360 if (win_read != ha->crb_win) { 361 if (win_read != ha->crb_win) {
361 DEBUG2(qla_printk(KERN_INFO, ha, 362 ql_dbg(ql_dbg_p3p, vha, 0xb000,
362 "%s: Written crbwin (0x%x) != Read crbwin (0x%x), " 363 "%s: Written crbwin (0x%x) "
363 "off=0x%lx\n", __func__, ha->crb_win, win_read, *off)); 364 "!= Read crbwin (0x%x), off=0x%lx.\n",
365 ha->crb_win, win_read, *off);
364 } 366 }
365 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; 367 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
366} 368}
@@ -368,6 +370,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
368static inline unsigned long 370static inline unsigned long
369qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) 371qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
370{ 372{
373 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
371 /* See if we are currently pointing to the region we want to use next */ 374 /* See if we are currently pointing to the region we want to use next */
372 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { 375 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
373 /* No need to change window. PCIX and PCIEregs are in both 376 /* No need to change window. PCIX and PCIEregs are in both
@@ -398,9 +401,10 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
398 return off; 401 return off;
399 } 402 }
400 /* strange address given */ 403 /* strange address given */
401 qla_printk(KERN_WARNING, ha, 404 ql_dbg(ql_dbg_p3p, vha, 0xb001,
402 "%s: Warning: unm_nic_pci_set_crbwindow called with" 405 "%x: Warning: unm_nic_pci_set_crbwindow "
403 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off); 406 "called with an unknown address(%llx).\n",
407 QLA2XXX_DRIVER_NAME, off);
404 return off; 408 return off;
405} 409}
406 410
@@ -563,6 +567,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
563{ 567{
564 int window; 568 int window;
565 u32 win_read; 569 u32 win_read;
570 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
566 571
567 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 572 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
568 QLA82XX_ADDR_DDR_NET_MAX)) { 573 QLA82XX_ADDR_DDR_NET_MAX)) {
@@ -574,8 +579,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
574 win_read = qla82xx_rd_32(ha, 579 win_read = qla82xx_rd_32(ha,
575 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); 580 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
576 if ((win_read << 17) != window) { 581 if ((win_read << 17) != window) {
577 qla_printk(KERN_WARNING, ha, 582 ql_dbg(ql_dbg_p3p, vha, 0xb003,
578 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n", 583 "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
579 __func__, window, win_read); 584 __func__, window, win_read);
580 } 585 }
581 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; 586 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
@@ -583,7 +588,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
583 QLA82XX_ADDR_OCM0_MAX)) { 588 QLA82XX_ADDR_OCM0_MAX)) {
584 unsigned int temp1; 589 unsigned int temp1;
585 if ((addr & 0x00ff800) == 0xff800) { 590 if ((addr & 0x00ff800) == 0xff800) {
586 qla_printk(KERN_WARNING, ha, 591 ql_log(ql_log_warn, vha, 0xb004,
587 "%s: QM access not handled.\n", __func__); 592 "%s: QM access not handled.\n", __func__);
588 addr = -1UL; 593 addr = -1UL;
589 } 594 }
@@ -596,8 +601,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
596 temp1 = ((window & 0x1FF) << 7) | 601 temp1 = ((window & 0x1FF) << 7) |
597 ((window & 0x0FFFE0000) >> 17); 602 ((window & 0x0FFFE0000) >> 17);
598 if (win_read != temp1) { 603 if (win_read != temp1) {
599 qla_printk(KERN_WARNING, ha, 604 ql_log(ql_log_warn, vha, 0xb005,
600 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n", 605 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
601 __func__, temp1, win_read); 606 __func__, temp1, win_read);
602 } 607 }
603 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; 608 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
@@ -612,8 +617,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
612 win_read = qla82xx_rd_32(ha, 617 win_read = qla82xx_rd_32(ha,
613 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); 618 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
614 if (win_read != window) { 619 if (win_read != window) {
615 qla_printk(KERN_WARNING, ha, 620 ql_log(ql_log_warn, vha, 0xb006,
616 "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n", 621 "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
617 __func__, window, win_read); 622 __func__, window, win_read);
618 } 623 }
619 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; 624 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
@@ -624,9 +629,9 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
624 */ 629 */
625 if ((qla82xx_pci_set_window_warning_count++ < 8) || 630 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
626 (qla82xx_pci_set_window_warning_count%64 == 0)) { 631 (qla82xx_pci_set_window_warning_count%64 == 0)) {
627 qla_printk(KERN_WARNING, ha, 632 ql_log(ql_log_warn, vha, 0xb007,
628 "%s: Warning:%s Unknown address range!\n", __func__, 633 "%s: Warning:%s Unknown address range!.\n",
629 QLA2XXX_DRIVER_NAME); 634 __func__, QLA2XXX_DRIVER_NAME);
630 } 635 }
631 addr = -1UL; 636 addr = -1UL;
632 } 637 }
@@ -671,6 +676,7 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
671 uint8_t *mem_ptr = NULL; 676 uint8_t *mem_ptr = NULL;
672 unsigned long mem_base; 677 unsigned long mem_base;
673 unsigned long mem_page; 678 unsigned long mem_page;
679 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
674 680
675 write_lock_irqsave(&ha->hw_lock, flags); 681 write_lock_irqsave(&ha->hw_lock, flags);
676 682
@@ -682,9 +688,10 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
682 if ((start == -1UL) || 688 if ((start == -1UL) ||
683 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 689 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
684 write_unlock_irqrestore(&ha->hw_lock, flags); 690 write_unlock_irqrestore(&ha->hw_lock, flags);
685 qla_printk(KERN_ERR, ha, 691 ql_log(ql_log_fatal, vha, 0xb008,
686 "%s out of bound pci memory access. " 692 "%s out of bound pci memory "
687 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 693 "access, offset is 0x%llx.\n",
694 QLA2XXX_DRIVER_NAME, off);
688 return -1; 695 return -1;
689 } 696 }
690 697
@@ -741,6 +748,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
741 uint8_t *mem_ptr = NULL; 748 uint8_t *mem_ptr = NULL;
742 unsigned long mem_base; 749 unsigned long mem_base;
743 unsigned long mem_page; 750 unsigned long mem_page;
751 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
744 752
745 write_lock_irqsave(&ha->hw_lock, flags); 753 write_lock_irqsave(&ha->hw_lock, flags);
746 754
@@ -752,9 +760,10 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
752 if ((start == -1UL) || 760 if ((start == -1UL) ||
753 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { 761 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
754 write_unlock_irqrestore(&ha->hw_lock, flags); 762 write_unlock_irqrestore(&ha->hw_lock, flags);
755 qla_printk(KERN_ERR, ha, 763 ql_log(ql_log_fatal, vha, 0xb009,
756 "%s out of bound pci memory access. " 764 "%s out of bount memory "
757 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off); 765 "access, offset is 0x%llx.\n",
766 QLA2XXX_DRIVER_NAME, off);
758 return -1; 767 return -1;
759 } 768 }
760 769
@@ -855,15 +864,16 @@ qla82xx_wait_rom_busy(struct qla_hw_data *ha)
855{ 864{
856 long timeout = 0; 865 long timeout = 0;
857 long done = 0 ; 866 long done = 0 ;
867 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
858 868
859 while (done == 0) { 869 while (done == 0) {
860 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 870 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
861 done &= 4; 871 done &= 4;
862 timeout++; 872 timeout++;
863 if (timeout >= rom_max_timeout) { 873 if (timeout >= rom_max_timeout) {
864 DEBUG(qla_printk(KERN_INFO, ha, 874 ql_dbg(ql_dbg_p3p, vha, 0xb00a,
865 "%s: Timeout reached waiting for rom busy", 875 "%s: Timeout reached waiting for rom busy.\n",
866 QLA2XXX_DRIVER_NAME)); 876 QLA2XXX_DRIVER_NAME);
867 return -1; 877 return -1;
868 } 878 }
869 } 879 }
@@ -875,15 +885,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
875{ 885{
876 long timeout = 0; 886 long timeout = 0;
877 long done = 0 ; 887 long done = 0 ;
888 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
878 889
879 while (done == 0) { 890 while (done == 0) {
880 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 891 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
881 done &= 2; 892 done &= 2;
882 timeout++; 893 timeout++;
883 if (timeout >= rom_max_timeout) { 894 if (timeout >= rom_max_timeout) {
884 DEBUG(qla_printk(KERN_INFO, ha, 895 ql_dbg(ql_dbg_p3p, vha, 0xb00b,
885 "%s: Timeout reached waiting for rom done", 896 "%s: Timeout reached waiting for rom done.\n",
886 QLA2XXX_DRIVER_NAME)); 897 QLA2XXX_DRIVER_NAME);
887 return -1; 898 return -1;
888 } 899 }
889 } 900 }
@@ -893,15 +904,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
893static int 904static int
894qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 905qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
895{ 906{
907 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
908
896 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 909 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
897 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 910 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
898 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 911 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
899 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); 912 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
900 qla82xx_wait_rom_busy(ha); 913 qla82xx_wait_rom_busy(ha);
901 if (qla82xx_wait_rom_done(ha)) { 914 if (qla82xx_wait_rom_done(ha)) {
902 qla_printk(KERN_WARNING, ha, 915 ql_log(ql_log_fatal, vha, 0x00ba,
903 "%s: Error waiting for rom done\n", 916 "Error waiting for rom done.\n");
904 QLA2XXX_DRIVER_NAME);
905 return -1; 917 return -1;
906 } 918 }
907 /* Reset abyte_cnt and dummy_byte_cnt */ 919 /* Reset abyte_cnt and dummy_byte_cnt */
@@ -917,6 +929,7 @@ static int
917qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 929qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
918{ 930{
919 int ret, loops = 0; 931 int ret, loops = 0;
932 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
920 933
921 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 934 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
922 udelay(100); 935 udelay(100);
@@ -924,9 +937,8 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
924 loops++; 937 loops++;
925 } 938 }
926 if (loops >= 50000) { 939 if (loops >= 50000) {
927 qla_printk(KERN_INFO, ha, 940 ql_log(ql_log_fatal, vha, 0x00b9,
928 "%s: qla82xx_rom_lock failed\n", 941 "Failed to aquire SEM2 lock.\n");
929 QLA2XXX_DRIVER_NAME);
930 return -1; 942 return -1;
931 } 943 }
932 ret = qla82xx_do_rom_fast_read(ha, addr, valp); 944 ret = qla82xx_do_rom_fast_read(ha, addr, valp);
@@ -937,11 +949,12 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
937static int 949static int
938qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) 950qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
939{ 951{
952 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
940 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); 953 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
941 qla82xx_wait_rom_busy(ha); 954 qla82xx_wait_rom_busy(ha);
942 if (qla82xx_wait_rom_done(ha)) { 955 if (qla82xx_wait_rom_done(ha)) {
943 qla_printk(KERN_WARNING, ha, 956 ql_log(ql_log_warn, vha, 0xb00c,
944 "Error waiting for rom done\n"); 957 "Error waiting for rom done.\n");
945 return -1; 958 return -1;
946 } 959 }
947 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); 960 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
@@ -955,6 +968,7 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
955 uint32_t done = 1 ; 968 uint32_t done = 1 ;
956 uint32_t val; 969 uint32_t val;
957 int ret = 0; 970 int ret = 0;
971 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
958 972
959 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 973 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
960 while ((done != 0) && (ret == 0)) { 974 while ((done != 0) && (ret == 0)) {
@@ -964,8 +978,8 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
964 udelay(10); 978 udelay(10);
965 cond_resched(); 979 cond_resched();
966 if (timeout >= 50000) { 980 if (timeout >= 50000) {
967 qla_printk(KERN_WARNING, ha, 981 ql_log(ql_log_warn, vha, 0xb00d,
968 "Timeout reached waiting for write finish"); 982 "Timeout reached waiting for write finish.\n");
969 return -1; 983 return -1;
970 } 984 }
971 } 985 }
@@ -992,13 +1006,14 @@ qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
992static int 1006static int
993qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) 1007qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
994{ 1008{
1009 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
995 if (qla82xx_flash_set_write_enable(ha)) 1010 if (qla82xx_flash_set_write_enable(ha))
996 return -1; 1011 return -1;
997 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); 1012 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
998 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); 1013 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
999 if (qla82xx_wait_rom_done(ha)) { 1014 if (qla82xx_wait_rom_done(ha)) {
1000 qla_printk(KERN_WARNING, ha, 1015 ql_log(ql_log_warn, vha, 0xb00e,
1001 "Error waiting for rom done\n"); 1016 "Error waiting for rom done.\n");
1002 return -1; 1017 return -1;
1003 } 1018 }
1004 return qla82xx_flash_wait_write_finish(ha); 1019 return qla82xx_flash_wait_write_finish(ha);
@@ -1007,10 +1022,11 @@ qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
1007static int 1022static int
1008qla82xx_write_disable_flash(struct qla_hw_data *ha) 1023qla82xx_write_disable_flash(struct qla_hw_data *ha)
1009{ 1024{
1025 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1010 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); 1026 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1011 if (qla82xx_wait_rom_done(ha)) { 1027 if (qla82xx_wait_rom_done(ha)) {
1012 qla_printk(KERN_WARNING, ha, 1028 ql_log(ql_log_warn, vha, 0xb00f,
1013 "Error waiting for rom done\n"); 1029 "Error waiting for rom done.\n");
1014 return -1; 1030 return -1;
1015 } 1031 }
1016 return 0; 1032 return 0;
@@ -1020,13 +1036,16 @@ static int
1020ql82xx_rom_lock_d(struct qla_hw_data *ha) 1036ql82xx_rom_lock_d(struct qla_hw_data *ha)
1021{ 1037{
1022 int loops = 0; 1038 int loops = 0;
1039 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1040
1023 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { 1041 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1024 udelay(100); 1042 udelay(100);
1025 cond_resched(); 1043 cond_resched();
1026 loops++; 1044 loops++;
1027 } 1045 }
1028 if (loops >= 50000) { 1046 if (loops >= 50000) {
1029 qla_printk(KERN_WARNING, ha, "ROM lock failed\n"); 1047 ql_log(ql_log_warn, vha, 0xb010,
1048 "ROM lock failed.\n");
1030 return -1; 1049 return -1;
1031 } 1050 }
1032 return 0;; 1051 return 0;;
@@ -1037,10 +1056,12 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1037 uint32_t data) 1056 uint32_t data)
1038{ 1057{
1039 int ret = 0; 1058 int ret = 0;
1059 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1040 1060
1041 ret = ql82xx_rom_lock_d(ha); 1061 ret = ql82xx_rom_lock_d(ha);
1042 if (ret < 0) { 1062 if (ret < 0) {
1043 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 1063 ql_log(ql_log_warn, vha, 0xb011,
1064 "ROM lock failed.\n");
1044 return ret; 1065 return ret;
1045 } 1066 }
1046 1067
@@ -1053,8 +1074,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1053 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); 1074 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1054 qla82xx_wait_rom_busy(ha); 1075 qla82xx_wait_rom_busy(ha);
1055 if (qla82xx_wait_rom_done(ha)) { 1076 if (qla82xx_wait_rom_done(ha)) {
1056 qla_printk(KERN_WARNING, ha, 1077 ql_log(ql_log_warn, vha, 0xb012,
1057 "Error waiting for rom done\n"); 1078 "Error waiting for rom done.\n");
1058 ret = -1; 1079 ret = -1;
1059 goto done_write; 1080 goto done_write;
1060 } 1081 }
@@ -1159,8 +1180,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1159 */ 1180 */
1160 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || 1181 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1161 qla82xx_rom_fast_read(ha, 4, &n) != 0) { 1182 qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1162 qla_printk(KERN_WARNING, ha, 1183 ql_log(ql_log_fatal, vha, 0x006e,
1163 "[ERROR] Reading crb_init area: n: %08x\n", n); 1184 "Error Reading crb_init area: n: %08x.\n", n);
1164 return -1; 1185 return -1;
1165 } 1186 }
1166 1187
@@ -1172,20 +1193,18 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1172 1193
1173 /* number of addr/value pair should not exceed 1024 enteries */ 1194 /* number of addr/value pair should not exceed 1024 enteries */
1174 if (n >= 1024) { 1195 if (n >= 1024) {
1175 qla_printk(KERN_WARNING, ha, 1196 ql_log(ql_log_fatal, vha, 0x0071,
1176 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n", 1197 "Card flash not initialized:n=0x%x.\n", n);
1177 QLA2XXX_DRIVER_NAME, __func__, n);
1178 return -1; 1198 return -1;
1179 } 1199 }
1180 1200
1181 qla_printk(KERN_INFO, ha, 1201 ql_log(ql_log_info, vha, 0x0072,
1182 "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n); 1202 "%d CRB init values found in ROM.\n", n);
1183 1203
1184 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL); 1204 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1185 if (buf == NULL) { 1205 if (buf == NULL) {
1186 qla_printk(KERN_WARNING, ha, 1206 ql_log(ql_log_fatal, vha, 0x010c,
1187 "%s: [ERROR] Unable to malloc memory.\n", 1207 "Unable to allocate memory.\n");
1188 QLA2XXX_DRIVER_NAME);
1189 return -1; 1208 return -1;
1190 } 1209 }
1191 1210
@@ -1236,9 +1255,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1236 continue; 1255 continue;
1237 1256
1238 if (off == ADDR_ERROR) { 1257 if (off == ADDR_ERROR) {
1239 qla_printk(KERN_WARNING, ha, 1258 ql_log(ql_log_fatal, vha, 0x0116,
1240 "%s: [ERROR] Unknown addr: 0x%08lx\n", 1259 "Unknow addr: 0x%08lx.\n", buf[i].addr);
1241 QLA2XXX_DRIVER_NAME, buf[i].addr);
1242 continue; 1260 continue;
1243 } 1261 }
1244 1262
@@ -1370,7 +1388,7 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1370 if (j >= MAX_CTL_CHECK) { 1388 if (j >= MAX_CTL_CHECK) {
1371 if (printk_ratelimit()) 1389 if (printk_ratelimit())
1372 dev_err(&ha->pdev->dev, 1390 dev_err(&ha->pdev->dev,
1373 "failed to write through agent\n"); 1391 "failed to write through agent.\n");
1374 ret = -1; 1392 ret = -1;
1375 break; 1393 break;
1376 } 1394 }
@@ -1460,7 +1478,7 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1460 if (j >= MAX_CTL_CHECK) { 1478 if (j >= MAX_CTL_CHECK) {
1461 if (printk_ratelimit()) 1479 if (printk_ratelimit())
1462 dev_err(&ha->pdev->dev, 1480 dev_err(&ha->pdev->dev,
1463 "failed to read through agent\n"); 1481 "failed to read through agent.\n");
1464 break; 1482 break;
1465 } 1483 }
1466 1484
@@ -1633,17 +1651,15 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1633 uint32_t len = 0; 1651 uint32_t len = 0;
1634 1652
1635 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { 1653 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1636 qla_printk(KERN_WARNING, ha, 1654 ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
1637 "Failed to reserve selected regions (%s)\n", 1655 "Failed to reserver selected regions.\n");
1638 pci_name(ha->pdev));
1639 goto iospace_error_exit; 1656 goto iospace_error_exit;
1640 } 1657 }
1641 1658
1642 /* Use MMIO operations for all accesses. */ 1659 /* Use MMIO operations for all accesses. */
1643 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { 1660 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1644 qla_printk(KERN_ERR, ha, 1661 ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
1645 "region #0 not an MMIO resource (%s), aborting\n", 1662 "Region #0 not an MMIO resource, aborting.\n");
1646 pci_name(ha->pdev));
1647 goto iospace_error_exit; 1663 goto iospace_error_exit;
1648 } 1664 }
1649 1665
@@ -1651,9 +1667,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1651 ha->nx_pcibase = 1667 ha->nx_pcibase =
1652 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len); 1668 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1653 if (!ha->nx_pcibase) { 1669 if (!ha->nx_pcibase) {
1654 qla_printk(KERN_ERR, ha, 1670 ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
1655 "cannot remap pcibase MMIO (%s), aborting\n", 1671 "Cannot remap pcibase MMIO, aborting.\n");
1656 pci_name(ha->pdev));
1657 pci_release_regions(ha->pdev); 1672 pci_release_regions(ha->pdev);
1658 goto iospace_error_exit; 1673 goto iospace_error_exit;
1659 } 1674 }
@@ -1667,9 +1682,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1667 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) + 1682 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1668 (ha->pdev->devfn << 12)), 4); 1683 (ha->pdev->devfn << 12)), 4);
1669 if (!ha->nxdb_wr_ptr) { 1684 if (!ha->nxdb_wr_ptr) {
1670 qla_printk(KERN_ERR, ha, 1685 ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
1671 "cannot remap MMIO (%s), aborting\n", 1686 "Cannot remap MMIO, aborting.\n");
1672 pci_name(ha->pdev));
1673 pci_release_regions(ha->pdev); 1687 pci_release_regions(ha->pdev);
1674 goto iospace_error_exit; 1688 goto iospace_error_exit;
1675 } 1689 }
@@ -1687,6 +1701,16 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1687 1701
1688 ha->max_req_queues = ha->max_rsp_queues = 1; 1702 ha->max_req_queues = ha->max_rsp_queues = 1;
1689 ha->msix_count = ha->max_rsp_queues + 1; 1703 ha->msix_count = ha->max_rsp_queues + 1;
1704 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
1705 "nx_pci_base=%p iobase=%p "
1706 "max_req_queues=%d msix_count=%d.\n",
1707 ha->nx_pcibase, ha->iobase,
1708 ha->max_req_queues, ha->msix_count);
1709 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
1710 "nx_pci_base=%p iobase=%p "
1711 "max_req_queues=%d msix_count=%d.\n",
1712 ha->nx_pcibase, ha->iobase,
1713 ha->max_req_queues, ha->msix_count);
1690 return 0; 1714 return 0;
1691 1715
1692iospace_error_exit: 1716iospace_error_exit:
@@ -1712,6 +1736,9 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
1712 pci_set_master(ha->pdev); 1736 pci_set_master(ha->pdev);
1713 ret = pci_set_mwi(ha->pdev); 1737 ret = pci_set_mwi(ha->pdev);
1714 ha->chip_revision = ha->pdev->revision; 1738 ha->chip_revision = ha->pdev->revision;
1739 ql_dbg(ql_dbg_init, vha, 0x0043,
1740 "Chip revision:%ld.\n",
1741 ha->chip_revision);
1715 return 0; 1742 return 0;
1716} 1743}
1717 1744
@@ -1877,6 +1904,7 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1877{ 1904{
1878 u32 val = 0; 1905 u32 val = 0;
1879 int retries = 60; 1906 int retries = 60;
1907 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1880 1908
1881 do { 1909 do {
1882 read_lock(&ha->hw_lock); 1910 read_lock(&ha->hw_lock);
@@ -1892,15 +1920,15 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1892 default: 1920 default:
1893 break; 1921 break;
1894 } 1922 }
1895 qla_printk(KERN_WARNING, ha, 1923 ql_log(ql_log_info, vha, 0x00a8,
1896 "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n", 1924 "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
1897 val, retries); 1925 val, retries);
1898 1926
1899 msleep(500); 1927 msleep(500);
1900 1928
1901 } while (--retries); 1929 } while (--retries);
1902 1930
1903 qla_printk(KERN_INFO, ha, 1931 ql_log(ql_log_fatal, vha, 0x00a9,
1904 "Cmd Peg initialization failed: 0x%x.\n", val); 1932 "Cmd Peg initialization failed: 0x%x.\n", val);
1905 1933
1906 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); 1934 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
@@ -1915,6 +1943,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1915{ 1943{
1916 u32 val = 0; 1944 u32 val = 0;
1917 int retries = 60; 1945 int retries = 60;
1946 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1918 1947
1919 do { 1948 do {
1920 read_lock(&ha->hw_lock); 1949 read_lock(&ha->hw_lock);
@@ -1930,17 +1959,16 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1930 default: 1959 default:
1931 break; 1960 break;
1932 } 1961 }
1933 1962 ql_log(ql_log_info, vha, 0x00ab,
1934 qla_printk(KERN_WARNING, ha, 1963 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
1935 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n", 1964 val, retries);
1936 val, retries);
1937 1965
1938 msleep(500); 1966 msleep(500);
1939 1967
1940 } while (--retries); 1968 } while (--retries);
1941 1969
1942 qla_printk(KERN_INFO, ha, 1970 ql_log(ql_log_fatal, vha, 0x00ac,
1943 "Rcv Peg initialization failed: 0x%x.\n", val); 1971 "Rcv Peg initializatin failed: 0x%x.\n", val);
1944 read_lock(&ha->hw_lock); 1972 read_lock(&ha->hw_lock);
1945 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); 1973 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1946 read_unlock(&ha->hw_lock); 1974 read_unlock(&ha->hw_lock);
@@ -1989,13 +2017,11 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1989 } 2017 }
1990 2018
1991 if (ha->mcp) { 2019 if (ha->mcp) {
1992 DEBUG3_11(printk(KERN_INFO "%s(%ld): " 2020 ql_dbg(ql_dbg_async, vha, 0x5052,
1993 "Got mailbox completion. cmd=%x.\n", 2021 "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
1994 __func__, vha->host_no, ha->mcp->mb[0]));
1995 } else { 2022 } else {
1996 qla_printk(KERN_INFO, ha, 2023 ql_dbg(ql_dbg_async, vha, 0x5053,
1997 "%s(%ld): MBX pointer ERROR!\n", 2024 "MBX pointer ERROR.\n");
1998 __func__, vha->host_no);
1999 } 2025 }
2000} 2026}
2001 2027
@@ -2019,13 +2045,13 @@ qla82xx_intr_handler(int irq, void *dev_id)
2019 int status = 0, status1 = 0; 2045 int status = 0, status1 = 0;
2020 unsigned long flags; 2046 unsigned long flags;
2021 unsigned long iter; 2047 unsigned long iter;
2022 uint32_t stat; 2048 uint32_t stat = 0;
2023 uint16_t mb[4]; 2049 uint16_t mb[4];
2024 2050
2025 rsp = (struct rsp_que *) dev_id; 2051 rsp = (struct rsp_que *) dev_id;
2026 if (!rsp) { 2052 if (!rsp) {
2027 printk(KERN_INFO 2053 printk(KERN_INFO
2028 "%s(): NULL response queue pointer\n", __func__); 2054 "%s(): NULL response queue pointer.\n", __func__);
2029 return IRQ_NONE; 2055 return IRQ_NONE;
2030 } 2056 }
2031 ha = rsp->hw; 2057 ha = rsp->hw;
@@ -2075,9 +2101,9 @@ qla82xx_intr_handler(int irq, void *dev_id)
2075 qla24xx_process_response_queue(vha, rsp); 2101 qla24xx_process_response_queue(vha, rsp);
2076 break; 2102 break;
2077 default: 2103 default:
2078 DEBUG2(printk("scsi(%ld): " 2104 ql_dbg(ql_dbg_async, vha, 0x5054,
2079 " Unrecognized interrupt type (%d).\n", 2105 "Unrecognized interrupt type (%d).\n",
2080 vha->host_no, stat & 0xff)); 2106 stat & 0xff);
2081 break; 2107 break;
2082 } 2108 }
2083 } 2109 }
@@ -2089,8 +2115,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
2089 2115
2090#ifdef QL_DEBUG_LEVEL_17 2116#ifdef QL_DEBUG_LEVEL_17
2091 if (!irq && ha->flags.eeh_busy) 2117 if (!irq && ha->flags.eeh_busy)
2092 qla_printk(KERN_WARNING, ha, 2118 ql_log(ql_log_warn, vha, 0x503d,
2093 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2119 "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2094 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2120 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2095#endif 2121#endif
2096 2122
@@ -2111,13 +2137,13 @@ qla82xx_msix_default(int irq, void *dev_id)
2111 struct device_reg_82xx __iomem *reg; 2137 struct device_reg_82xx __iomem *reg;
2112 int status = 0; 2138 int status = 0;
2113 unsigned long flags; 2139 unsigned long flags;
2114 uint32_t stat; 2140 uint32_t stat = 0;
2115 uint16_t mb[4]; 2141 uint16_t mb[4];
2116 2142
2117 rsp = (struct rsp_que *) dev_id; 2143 rsp = (struct rsp_que *) dev_id;
2118 if (!rsp) { 2144 if (!rsp) {
2119 printk(KERN_INFO 2145 printk(KERN_INFO
2120 "%s(): NULL response queue pointer\n", __func__); 2146 "%s(): NULL response queue pointer.\n", __func__);
2121 return IRQ_NONE; 2147 return IRQ_NONE;
2122 } 2148 }
2123 ha = rsp->hw; 2149 ha = rsp->hw;
@@ -2149,9 +2175,9 @@ qla82xx_msix_default(int irq, void *dev_id)
2149 qla24xx_process_response_queue(vha, rsp); 2175 qla24xx_process_response_queue(vha, rsp);
2150 break; 2176 break;
2151 default: 2177 default:
2152 DEBUG2(printk("scsi(%ld): " 2178 ql_dbg(ql_dbg_async, vha, 0x5041,
2153 " Unrecognized interrupt type (%d).\n", 2179 "Unrecognized interrupt type (%d).\n",
2154 vha->host_no, stat & 0xff)); 2180 stat & 0xff);
2155 break; 2181 break;
2156 } 2182 }
2157 } 2183 }
@@ -2162,9 +2188,9 @@ qla82xx_msix_default(int irq, void *dev_id)
2162 2188
2163#ifdef QL_DEBUG_LEVEL_17 2189#ifdef QL_DEBUG_LEVEL_17
2164 if (!irq && ha->flags.eeh_busy) 2190 if (!irq && ha->flags.eeh_busy)
2165 qla_printk(KERN_WARNING, ha, 2191 ql_log(ql_log_warn, vha, 0x5044,
2166 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n", 2192 "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
2167 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); 2193 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2168#endif 2194#endif
2169 2195
2170 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2196 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
@@ -2186,7 +2212,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
2186 rsp = (struct rsp_que *) dev_id; 2212 rsp = (struct rsp_que *) dev_id;
2187 if (!rsp) { 2213 if (!rsp) {
2188 printk(KERN_INFO 2214 printk(KERN_INFO
2189 "%s(): NULL response queue pointer\n", __func__); 2215 "%s(): NULL response queue pointer.\n", __func__);
2190 return IRQ_NONE; 2216 return IRQ_NONE;
2191 } 2217 }
2192 2218
@@ -2215,7 +2241,7 @@ qla82xx_poll(int irq, void *dev_id)
2215 rsp = (struct rsp_que *) dev_id; 2241 rsp = (struct rsp_que *) dev_id;
2216 if (!rsp) { 2242 if (!rsp) {
2217 printk(KERN_INFO 2243 printk(KERN_INFO
2218 "%s(): NULL response queue pointer\n", __func__); 2244 "%s(): NULL response queue pointer.\n", __func__);
2219 return; 2245 return;
2220 } 2246 }
2221 ha = rsp->hw; 2247 ha = rsp->hw;
@@ -2245,9 +2271,9 @@ qla82xx_poll(int irq, void *dev_id)
2245 qla24xx_process_response_queue(vha, rsp); 2271 qla24xx_process_response_queue(vha, rsp);
2246 break; 2272 break;
2247 default: 2273 default:
2248 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 2274 ql_dbg(ql_dbg_p3p, vha, 0xb013,
2249 "(%d).\n", 2275 "Unrecognized interrupt type (%d).\n",
2250 vha->host_no, stat & 0xff)); 2276 stat * 0xff);
2251 break; 2277 break;
2252 } 2278 }
2253 } 2279 }
@@ -2347,9 +2373,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
2347 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2373 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2348 } 2374 }
2349 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2375 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2350 qla_printk(KERN_INFO, ha, 2376 ql_log(ql_log_info, vha, 0x00bb,
2351 "%s(%ld):drv_state = 0x%x\n", 2377 "drv_state = 0x%x.\n", drv_state);
2352 __func__, vha->host_no, drv_state);
2353 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 2378 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2354} 2379}
2355 2380
@@ -2392,8 +2417,8 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2392 struct qla_hw_data *ha = vha->hw; 2417 struct qla_hw_data *ha = vha->hw;
2393 2418
2394 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { 2419 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2395 qla_printk(KERN_ERR, ha, 2420 ql_log(ql_log_fatal, vha, 0x009f,
2396 "%s: Error during CRB Initialization\n", __func__); 2421 "Error during CRB initialization.\n");
2397 return QLA_FUNCTION_FAILED; 2422 return QLA_FUNCTION_FAILED;
2398 } 2423 }
2399 udelay(500); 2424 udelay(500);
@@ -2411,27 +2436,27 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2411 if (ql2xfwloadbin == 2) 2436 if (ql2xfwloadbin == 2)
2412 goto try_blob_fw; 2437 goto try_blob_fw;
2413 2438
2414 qla_printk(KERN_INFO, ha, 2439 ql_log(ql_log_info, vha, 0x00a0,
2415 "Attempting to load firmware from flash\n"); 2440 "Attempting to load firmware from flash.\n");
2416 2441
2417 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { 2442 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2418 qla_printk(KERN_ERR, ha, 2443 ql_log(ql_log_info, vha, 0x00a1,
2419 "Firmware loaded successfully from flash\n"); 2444 "Firmware loaded successully from flash.\n");
2420 return QLA_SUCCESS; 2445 return QLA_SUCCESS;
2421 } else { 2446 } else {
2422 qla_printk(KERN_ERR, ha, 2447 ql_log(ql_log_warn, vha, 0x0108,
2423 "Firmware load from flash failed\n"); 2448 "Firmware load from flash failed.\n");
2424 } 2449 }
2425 2450
2426try_blob_fw: 2451try_blob_fw:
2427 qla_printk(KERN_INFO, ha, 2452 ql_log(ql_log_info, vha, 0x00a2,
2428 "Attempting to load firmware from blob\n"); 2453 "Attempting to load firmware from blob.\n");
2429 2454
2430 /* Load firmware blob. */ 2455 /* Load firmware blob. */
2431 blob = ha->hablob = qla2x00_request_firmware(vha); 2456 blob = ha->hablob = qla2x00_request_firmware(vha);
2432 if (!blob) { 2457 if (!blob) {
2433 qla_printk(KERN_ERR, ha, 2458 ql_log(ql_log_fatal, vha, 0x00a3,
2434 "Firmware image not present.\n"); 2459 "Firmware image not preset.\n");
2435 goto fw_load_failed; 2460 goto fw_load_failed;
2436 } 2461 }
2437 2462
@@ -2441,20 +2466,19 @@ try_blob_fw:
2441 /* Fallback to URI format */ 2466 /* Fallback to URI format */
2442 if (qla82xx_validate_firmware_blob(vha, 2467 if (qla82xx_validate_firmware_blob(vha,
2443 QLA82XX_UNIFIED_ROMIMAGE)) { 2468 QLA82XX_UNIFIED_ROMIMAGE)) {
2444 qla_printk(KERN_ERR, ha, 2469 ql_log(ql_log_fatal, vha, 0x00a4,
2445 "No valid firmware image found!!!"); 2470 "No valid firmware image found.\n");
2446 return QLA_FUNCTION_FAILED; 2471 return QLA_FUNCTION_FAILED;
2447 } 2472 }
2448 } 2473 }
2449 2474
2450 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { 2475 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2451 qla_printk(KERN_ERR, ha, 2476 ql_log(ql_log_info, vha, 0x00a5,
2452 "%s: Firmware loaded successfully " 2477 "Firmware loaded successfully from binary blob.\n");
2453 " from binary blob\n", __func__);
2454 return QLA_SUCCESS; 2478 return QLA_SUCCESS;
2455 } else { 2479 } else {
2456 qla_printk(KERN_ERR, ha, 2480 ql_log(ql_log_fatal, vha, 0x00a6,
2457 "Firmware load failed from binary blob\n"); 2481 "Firmware load failed for binary blob.\n");
2458 blob->fw = NULL; 2482 blob->fw = NULL;
2459 blob = NULL; 2483 blob = NULL;
2460 goto fw_load_failed; 2484 goto fw_load_failed;
@@ -2486,15 +2510,15 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
2486 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); 2510 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2487 2511
2488 if (qla82xx_load_fw(vha) != QLA_SUCCESS) { 2512 if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2489 qla_printk(KERN_INFO, ha, 2513 ql_log(ql_log_fatal, vha, 0x00a7,
2490 "%s: Error trying to start fw!\n", __func__); 2514 "Error trying to start fw.\n");
2491 return QLA_FUNCTION_FAILED; 2515 return QLA_FUNCTION_FAILED;
2492 } 2516 }
2493 2517
2494 /* Handshake with the card before we register the devices. */ 2518 /* Handshake with the card before we register the devices. */
2495 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { 2519 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2496 qla_printk(KERN_INFO, ha, 2520 ql_log(ql_log_fatal, vha, 0x00aa,
2497 "%s: Error during card handshake!\n", __func__); 2521 "Error during card handshake.\n");
2498 return QLA_FUNCTION_FAILED; 2522 return QLA_FUNCTION_FAILED;
2499 } 2523 }
2500 2524
@@ -2663,8 +2687,11 @@ qla82xx_start_scsi(srb_t *sp)
2663 /* Send marker if required */ 2687 /* Send marker if required */
2664 if (vha->marker_needed != 0) { 2688 if (vha->marker_needed != 0) {
2665 if (qla2x00_marker(vha, req, 2689 if (qla2x00_marker(vha, req,
2666 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 2690 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2691 ql_log(ql_log_warn, vha, 0x300c,
2692 "qla2x00_marker failed for cmd=%p.\n", cmd);
2667 return QLA_FUNCTION_FAILED; 2693 return QLA_FUNCTION_FAILED;
2694 }
2668 vha->marker_needed = 0; 2695 vha->marker_needed = 0;
2669 } 2696 }
2670 2697
@@ -2701,8 +2728,13 @@ qla82xx_start_scsi(srb_t *sp)
2701 uint16_t i; 2728 uint16_t i;
2702 2729
2703 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds); 2730 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2704 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) 2731 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2732 ql_dbg(ql_dbg_io, vha, 0x300d,
2733 "Num of DSD list %d is than %d for cmd=%p.\n",
2734 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2735 cmd);
2705 goto queuing_error; 2736 goto queuing_error;
2737 }
2706 2738
2707 if (more_dsd_lists <= ha->gbl_dsd_avail) 2739 if (more_dsd_lists <= ha->gbl_dsd_avail)
2708 goto sufficient_dsds; 2740 goto sufficient_dsds;
@@ -2711,13 +2743,20 @@ qla82xx_start_scsi(srb_t *sp)
2711 2743
2712 for (i = 0; i < more_dsd_lists; i++) { 2744 for (i = 0; i < more_dsd_lists; i++) {
2713 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 2745 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2714 if (!dsd_ptr) 2746 if (!dsd_ptr) {
2747 ql_log(ql_log_fatal, vha, 0x300e,
2748 "Failed to allocate memory for dsd_dma "
2749 "for cmd=%p.\n", cmd);
2715 goto queuing_error; 2750 goto queuing_error;
2751 }
2716 2752
2717 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 2753 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2718 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 2754 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2719 if (!dsd_ptr->dsd_addr) { 2755 if (!dsd_ptr->dsd_addr) {
2720 kfree(dsd_ptr); 2756 kfree(dsd_ptr);
2757 ql_log(ql_log_fatal, vha, 0x300f,
2758 "Failed to allocate memory for dsd_addr "
2759 "for cmd=%p.\n", cmd);
2721 goto queuing_error; 2760 goto queuing_error;
2722 } 2761 }
2723 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 2762 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
@@ -2742,17 +2781,16 @@ sufficient_dsds:
2742 2781
2743 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2782 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2744 if (!sp->ctx) { 2783 if (!sp->ctx) {
2745 DEBUG(printk(KERN_INFO 2784 ql_log(ql_log_fatal, vha, 0x3010,
2746 "%s(%ld): failed to allocate" 2785 "Failed to allocate ctx for cmd=%p.\n", cmd);
2747 " ctx.\n", __func__, vha->host_no));
2748 goto queuing_error; 2786 goto queuing_error;
2749 } 2787 }
2750 memset(ctx, 0, sizeof(struct ct6_dsd)); 2788 memset(ctx, 0, sizeof(struct ct6_dsd));
2751 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 2789 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2752 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 2790 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2753 if (!ctx->fcp_cmnd) { 2791 if (!ctx->fcp_cmnd) {
2754 DEBUG2_3(printk("%s(%ld): failed to allocate" 2792 ql_log(ql_log_fatal, vha, 0x3011,
2755 " fcp_cmnd.\n", __func__, vha->host_no)); 2793 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2756 goto queuing_error_fcp_cmnd; 2794 goto queuing_error_fcp_cmnd;
2757 } 2795 }
2758 2796
@@ -2766,6 +2804,9 @@ sufficient_dsds:
2766 /* SCSI command bigger than 16 bytes must be 2804 /* SCSI command bigger than 16 bytes must be
2767 * multiple of 4 2805 * multiple of 4
2768 */ 2806 */
2807 ql_log(ql_log_warn, vha, 0x3012,
2808 "scsi cmd len %d not multiple of 4 "
2809 "for cmd=%p.\n", cmd->cmd_len, cmd);
2769 goto queuing_error_fcp_cmnd; 2810 goto queuing_error_fcp_cmnd;
2770 } 2811 }
2771 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 2812 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
@@ -2845,7 +2886,7 @@ sufficient_dsds:
2845 cmd_pkt->entry_status = (uint8_t) rsp->id; 2886 cmd_pkt->entry_status = (uint8_t) rsp->id;
2846 } else { 2887 } else {
2847 struct cmd_type_7 *cmd_pkt; 2888 struct cmd_type_7 *cmd_pkt;
2848 req_cnt = qla24xx_calc_iocbs(tot_dsds); 2889 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2849 if (req->cnt < (req_cnt + 2)) { 2890 if (req->cnt < (req_cnt + 2)) {
2850 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2891 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2851 &reg->req_q_out[0]); 2892 &reg->req_q_out[0]);
@@ -2979,8 +3020,8 @@ qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2979 /* Dword reads to flash. */ 3020 /* Dword reads to flash. */
2980 for (i = 0; i < length/4; i++, faddr += 4) { 3021 for (i = 0; i < length/4; i++, faddr += 4) {
2981 if (qla82xx_rom_fast_read(ha, faddr, &val)) { 3022 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2982 qla_printk(KERN_WARNING, ha, 3023 ql_log(ql_log_warn, vha, 0x0106,
2983 "Do ROM fast read failed\n"); 3024 "Do ROM fast read failed.\n");
2984 goto done_read; 3025 goto done_read;
2985 } 3026 }
2986 dwptr[i] = __constant_cpu_to_le32(val); 3027 dwptr[i] = __constant_cpu_to_le32(val);
@@ -2994,10 +3035,12 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
2994{ 3035{
2995 int ret; 3036 int ret;
2996 uint32_t val; 3037 uint32_t val;
3038 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2997 3039
2998 ret = ql82xx_rom_lock_d(ha); 3040 ret = ql82xx_rom_lock_d(ha);
2999 if (ret < 0) { 3041 if (ret < 0) {
3000 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3042 ql_log(ql_log_warn, vha, 0xb014,
3043 "ROM Lock failed.\n");
3001 return ret; 3044 return ret;
3002 } 3045 }
3003 3046
@@ -3013,7 +3056,8 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
3013 } 3056 }
3014 3057
3015 if (qla82xx_write_disable_flash(ha) != 0) 3058 if (qla82xx_write_disable_flash(ha) != 0)
3016 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3059 ql_log(ql_log_warn, vha, 0xb015,
3060 "Write disable failed.\n");
3017 3061
3018done_unprotect: 3062done_unprotect:
3019 qla82xx_rom_unlock(ha); 3063 qla82xx_rom_unlock(ha);
@@ -3025,10 +3069,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
3025{ 3069{
3026 int ret; 3070 int ret;
3027 uint32_t val; 3071 uint32_t val;
3072 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3028 3073
3029 ret = ql82xx_rom_lock_d(ha); 3074 ret = ql82xx_rom_lock_d(ha);
3030 if (ret < 0) { 3075 if (ret < 0) {
3031 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3076 ql_log(ql_log_warn, vha, 0xb016,
3077 "ROM Lock failed.\n");
3032 return ret; 3078 return ret;
3033 } 3079 }
3034 3080
@@ -3040,10 +3086,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
3040 /* LOCK all sectors */ 3086 /* LOCK all sectors */
3041 ret = qla82xx_write_status_reg(ha, val); 3087 ret = qla82xx_write_status_reg(ha, val);
3042 if (ret < 0) 3088 if (ret < 0)
3043 qla_printk(KERN_WARNING, ha, "Write status register failed\n"); 3089 ql_log(ql_log_warn, vha, 0xb017,
3090 "Write status register failed.\n");
3044 3091
3045 if (qla82xx_write_disable_flash(ha) != 0) 3092 if (qla82xx_write_disable_flash(ha) != 0)
3046 qla_printk(KERN_WARNING, ha, "Write disable failed\n"); 3093 ql_log(ql_log_warn, vha, 0xb018,
3094 "Write disable failed.\n");
3047done_protect: 3095done_protect:
3048 qla82xx_rom_unlock(ha); 3096 qla82xx_rom_unlock(ha);
3049 return ret; 3097 return ret;
@@ -3053,10 +3101,12 @@ static int
3053qla82xx_erase_sector(struct qla_hw_data *ha, int addr) 3101qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3054{ 3102{
3055 int ret = 0; 3103 int ret = 0;
3104 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3056 3105
3057 ret = ql82xx_rom_lock_d(ha); 3106 ret = ql82xx_rom_lock_d(ha);
3058 if (ret < 0) { 3107 if (ret < 0) {
3059 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n"); 3108 ql_log(ql_log_warn, vha, 0xb019,
3109 "ROM Lock failed.\n");
3060 return ret; 3110 return ret;
3061 } 3111 }
3062 3112
@@ -3066,8 +3116,8 @@ qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3066 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); 3116 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3067 3117
3068 if (qla82xx_wait_rom_done(ha)) { 3118 if (qla82xx_wait_rom_done(ha)) {
3069 qla_printk(KERN_WARNING, ha, 3119 ql_log(ql_log_warn, vha, 0xb01a,
3070 "Error waiting for rom done\n"); 3120 "Error waiting for rom done.\n");
3071 ret = -1; 3121 ret = -1;
3072 goto done; 3122 goto done;
3073 } 3123 }
@@ -3110,10 +3160,10 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3110 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 3160 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3111 &optrom_dma, GFP_KERNEL); 3161 &optrom_dma, GFP_KERNEL);
3112 if (!optrom) { 3162 if (!optrom) {
3113 qla_printk(KERN_DEBUG, ha, 3163 ql_log(ql_log_warn, vha, 0xb01b,
3114 "Unable to allocate memory for optrom " 3164 "Unable to allocate memory "
3115 "burst write (%x KB).\n", 3165 "for optron burst write (%x KB).\n",
3116 OPTROM_BURST_SIZE / 1024); 3166 OPTROM_BURST_SIZE / 1024);
3117 } 3167 }
3118 } 3168 }
3119 3169
@@ -3122,8 +3172,8 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3122 3172
3123 ret = qla82xx_unprotect_flash(ha); 3173 ret = qla82xx_unprotect_flash(ha);
3124 if (ret) { 3174 if (ret) {
3125 qla_printk(KERN_WARNING, ha, 3175 ql_log(ql_log_warn, vha, 0xb01c,
3126 "Unable to unprotect flash for update.\n"); 3176 "Unable to unprotect flash for update.\n");
3127 goto write_done; 3177 goto write_done;
3128 } 3178 }
3129 3179
@@ -3133,9 +3183,9 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3133 3183
3134 ret = qla82xx_erase_sector(ha, faddr); 3184 ret = qla82xx_erase_sector(ha, faddr);
3135 if (ret) { 3185 if (ret) {
3136 DEBUG9(qla_printk(KERN_ERR, ha, 3186 ql_log(ql_log_warn, vha, 0xb01d,
3137 "Unable to erase sector: " 3187 "Unable to erase sector: address=%x.\n",
3138 "address=%x.\n", faddr)); 3188 faddr);
3139 break; 3189 break;
3140 } 3190 }
3141 } 3191 }
@@ -3149,12 +3199,12 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3149 (ha->flash_data_off | faddr), 3199 (ha->flash_data_off | faddr),
3150 OPTROM_BURST_DWORDS); 3200 OPTROM_BURST_DWORDS);
3151 if (ret != QLA_SUCCESS) { 3201 if (ret != QLA_SUCCESS) {
3152 qla_printk(KERN_WARNING, ha, 3202 ql_log(ql_log_warn, vha, 0xb01e,
3153 "Unable to burst-write optrom segment " 3203 "Unable to burst-write optrom segment "
3154 "(%x/%x/%llx).\n", ret, 3204 "(%x/%x/%llx).\n", ret,
3155 (ha->flash_data_off | faddr), 3205 (ha->flash_data_off | faddr),
3156 (unsigned long long)optrom_dma); 3206 (unsigned long long)optrom_dma);
3157 qla_printk(KERN_WARNING, ha, 3207 ql_log(ql_log_warn, vha, 0xb01f,
3158 "Reverting to slow-write.\n"); 3208 "Reverting to slow-write.\n");
3159 3209
3160 dma_free_coherent(&ha->pdev->dev, 3210 dma_free_coherent(&ha->pdev->dev,
@@ -3171,16 +3221,16 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3171 ret = qla82xx_write_flash_dword(ha, faddr, 3221 ret = qla82xx_write_flash_dword(ha, faddr,
3172 cpu_to_le32(*dwptr)); 3222 cpu_to_le32(*dwptr));
3173 if (ret) { 3223 if (ret) {
3174 DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program" 3224 ql_dbg(ql_dbg_p3p, vha, 0xb020,
3175 "flash address=%x data=%x.\n", __func__, 3225 "Unable to program flash address=%x data=%x.\n",
3176 ha->host_no, faddr, *dwptr)); 3226 faddr, *dwptr);
3177 break; 3227 break;
3178 } 3228 }
3179 } 3229 }
3180 3230
3181 ret = qla82xx_protect_flash(ha); 3231 ret = qla82xx_protect_flash(ha);
3182 if (ret) 3232 if (ret)
3183 qla_printk(KERN_WARNING, ha, 3233 ql_log(ql_log_warn, vha, 0xb021,
3184 "Unable to protect flash after update.\n"); 3234 "Unable to protect flash after update.\n");
3185write_done: 3235write_done:
3186 if (optrom) 3236 if (optrom)
@@ -3244,9 +3294,12 @@ qla82xx_start_iocbs(srb_t *sp)
3244 3294
3245void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) 3295void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3246{ 3296{
3297 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3298
3247 if (qla82xx_rom_lock(ha)) 3299 if (qla82xx_rom_lock(ha))
3248 /* Someone else is holding the lock. */ 3300 /* Someone else is holding the lock. */
3249 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n"); 3301 ql_log(ql_log_info, vha, 0xb022,
3302 "Resetting rom_lock.\n");
3250 3303
3251 /* 3304 /*
3252 * Either we got the lock, or someone 3305 * Either we got the lock, or someone
@@ -3313,7 +3366,8 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3313 3366
3314dev_initialize: 3367dev_initialize:
3315 /* set to DEV_INITIALIZING */ 3368 /* set to DEV_INITIALIZING */
3316 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); 3369 ql_log(ql_log_info, vha, 0x009e,
3370 "HW State: INITIALIZING.\n");
3317 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); 3371 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3318 3372
3319 /* Driver that sets device state to initializating sets IDC version */ 3373 /* Driver that sets device state to initializating sets IDC version */
@@ -3324,14 +3378,16 @@ dev_initialize:
3324 qla82xx_idc_lock(ha); 3378 qla82xx_idc_lock(ha);
3325 3379
3326 if (rval != QLA_SUCCESS) { 3380 if (rval != QLA_SUCCESS) {
3327 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 3381 ql_log(ql_log_fatal, vha, 0x00ad,
3382 "HW State: FAILED.\n");
3328 qla82xx_clear_drv_active(ha); 3383 qla82xx_clear_drv_active(ha);
3329 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); 3384 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3330 return rval; 3385 return rval;
3331 } 3386 }
3332 3387
3333dev_ready: 3388dev_ready:
3334 qla_printk(KERN_INFO, ha, "HW State: READY\n"); 3389 ql_log(ql_log_info, vha, 0x00ae,
3390 "HW State: READY.\n");
3335 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 3391 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3336 3392
3337 return QLA_SUCCESS; 3393 return QLA_SUCCESS;
@@ -3376,15 +3432,15 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3376 /* quiescence timeout, other functions didn't ack 3432 /* quiescence timeout, other functions didn't ack
3377 * changing the state to DEV_READY 3433 * changing the state to DEV_READY
3378 */ 3434 */
3379 qla_printk(KERN_INFO, ha, 3435 ql_log(ql_log_info, vha, 0xb023,
3380 "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME); 3436 "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
3381 qla_printk(KERN_INFO, ha, 3437 ql_log(ql_log_info, vha, 0xb024,
3382 "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active, 3438 "DRV_ACTIVE:%d DRV_STATE:%d.\n",
3383 drv_state); 3439 drv_active, drv_state);
3384 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3440 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3385 QLA82XX_DEV_READY); 3441 QLA82XX_DEV_READY);
3386 qla_printk(KERN_INFO, ha, 3442 ql_log(ql_log_info, vha, 0xb025,
3387 "HW State: DEV_READY\n"); 3443 "HW State: DEV_READY.\n");
3388 qla82xx_idc_unlock(ha); 3444 qla82xx_idc_unlock(ha);
3389 qla2x00_perform_loop_resync(vha); 3445 qla2x00_perform_loop_resync(vha);
3390 qla82xx_idc_lock(ha); 3446 qla82xx_idc_lock(ha);
@@ -3404,7 +3460,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3404 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3460 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3405 /* everyone acked so set the state to DEV_QUIESCENCE */ 3461 /* everyone acked so set the state to DEV_QUIESCENCE */
3406 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) { 3462 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3407 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n"); 3463 ql_log(ql_log_info, vha, 0xb026,
3464 "HW State: DEV_QUIESCENT.\n");
3408 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT); 3465 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3409 } 3466 }
3410} 3467}
@@ -3441,7 +3498,8 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3441 struct qla_hw_data *ha = vha->hw; 3498 struct qla_hw_data *ha = vha->hw;
3442 3499
3443 /* Disable the board */ 3500 /* Disable the board */
3444 qla_printk(KERN_INFO, ha, "Disabling the board\n"); 3501 ql_log(ql_log_fatal, vha, 0x00b8,
3502 "Disabling the board.\n");
3445 3503
3446 qla82xx_idc_lock(ha); 3504 qla82xx_idc_lock(ha);
3447 qla82xx_clear_drv_active(ha); 3505 qla82xx_clear_drv_active(ha);
@@ -3492,8 +3550,8 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3492 3550
3493 while (drv_state != drv_active) { 3551 while (drv_state != drv_active) {
3494 if (time_after_eq(jiffies, reset_timeout)) { 3552 if (time_after_eq(jiffies, reset_timeout)) {
3495 qla_printk(KERN_INFO, ha, 3553 ql_log(ql_log_warn, vha, 0x00b5,
3496 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME); 3554 "Reset timeout.\n");
3497 break; 3555 break;
3498 } 3556 }
3499 qla82xx_idc_unlock(ha); 3557 qla82xx_idc_unlock(ha);
@@ -3504,12 +3562,15 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3504 } 3562 }
3505 3563
3506 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3564 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3507 qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, 3565 ql_log(ql_log_info, vha, 0x00b6,
3508 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3566 "Device state is 0x%x = %s.\n",
3567 dev_state,
3568 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3509 3569
3510 /* Force to DEV_COLD unless someone else is starting a reset */ 3570 /* Force to DEV_COLD unless someone else is starting a reset */
3511 if (dev_state != QLA82XX_DEV_INITIALIZING) { 3571 if (dev_state != QLA82XX_DEV_INITIALIZING) {
3512 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); 3572 ql_log(ql_log_info, vha, 0x00b7,
3573 "HW State: COLD/RE-INIT.\n");
3513 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3574 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3514 } 3575 }
3515} 3576}
@@ -3523,8 +3584,12 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3523 fw_heartbeat_counter = qla82xx_rd_32(vha->hw, 3584 fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
3524 QLA82XX_PEG_ALIVE_COUNTER); 3585 QLA82XX_PEG_ALIVE_COUNTER);
3525 /* all 0xff, assume AER/EEH in progress, ignore */ 3586 /* all 0xff, assume AER/EEH in progress, ignore */
3526 if (fw_heartbeat_counter == 0xffffffff) 3587 if (fw_heartbeat_counter == 0xffffffff) {
3588 ql_dbg(ql_dbg_timer, vha, 0x6003,
3589 "FW heartbeat counter is 0xffffffff, "
3590 "returning status=%d.\n", status);
3527 return status; 3591 return status;
3592 }
3528 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { 3593 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3529 vha->seconds_since_last_heartbeat++; 3594 vha->seconds_since_last_heartbeat++;
3530 /* FW not alive after 2 seconds */ 3595 /* FW not alive after 2 seconds */
@@ -3535,6 +3600,9 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3535 } else 3600 } else
3536 vha->seconds_since_last_heartbeat = 0; 3601 vha->seconds_since_last_heartbeat = 0;
3537 vha->fw_heartbeat_counter = fw_heartbeat_counter; 3602 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3603 if (status)
3604 ql_dbg(ql_dbg_timer, vha, 0x6004,
3605 "Returning status=%d.\n", status);
3538 return status; 3606 return status;
3539} 3607}
3540 3608
@@ -3565,8 +3633,10 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3565 3633
3566 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3634 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3567 old_dev_state = dev_state; 3635 old_dev_state = dev_state;
3568 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 3636 ql_log(ql_log_info, vha, 0x009b,
3569 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3637 "Device state is 0x%x = %s.\n",
3638 dev_state,
3639 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3570 3640
3571 /* wait for 30 seconds for device to go ready */ 3641 /* wait for 30 seconds for device to go ready */
3572 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 3642 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -3574,9 +3644,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3574 while (1) { 3644 while (1) {
3575 3645
3576 if (time_after_eq(jiffies, dev_init_timeout)) { 3646 if (time_after_eq(jiffies, dev_init_timeout)) {
3577 DEBUG(qla_printk(KERN_INFO, ha, 3647 ql_log(ql_log_fatal, vha, 0x009c,
3578 "%s: device init failed!\n", 3648 "Device init failed.\n");
3579 QLA2XXX_DRIVER_NAME));
3580 rval = QLA_FUNCTION_FAILED; 3649 rval = QLA_FUNCTION_FAILED;
3581 break; 3650 break;
3582 } 3651 }
@@ -3586,10 +3655,11 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3586 old_dev_state = dev_state; 3655 old_dev_state = dev_state;
3587 } 3656 }
3588 if (loopcount < 5) { 3657 if (loopcount < 5) {
3589 qla_printk(KERN_INFO, ha, 3658 ql_log(ql_log_info, vha, 0x009d,
3590 "2:Device state is 0x%x = %s\n", dev_state, 3659 "Device state is 0x%x = %s.\n",
3591 dev_state < MAX_STATES ? 3660 dev_state,
3592 qdev_state[dev_state] : "Unknown"); 3661 dev_state < MAX_STATES ? qdev_state[dev_state] :
3662 "Unknown");
3593 } 3663 }
3594 3664
3595 switch (dev_state) { 3665 switch (dev_state) {
@@ -3656,29 +3726,26 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3656 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3726 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3657 if (dev_state == QLA82XX_DEV_NEED_RESET && 3727 if (dev_state == QLA82XX_DEV_NEED_RESET &&
3658 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3728 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3659 qla_printk(KERN_WARNING, ha, 3729 ql_log(ql_log_warn, vha, 0x6001,
3660 "scsi(%ld) %s: Adapter reset needed!\n", 3730 "Adapter reset needed.\n");
3661 vha->host_no, __func__);
3662 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3731 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3663 qla2xxx_wake_dpc(vha); 3732 qla2xxx_wake_dpc(vha);
3664 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3733 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3665 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 3734 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3666 DEBUG(qla_printk(KERN_INFO, ha, 3735 ql_log(ql_log_warn, vha, 0x6002,
3667 "scsi(%ld) %s - detected quiescence needed\n", 3736 "Quiescent needed.\n");
3668 vha->host_no, __func__));
3669 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3737 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3670 qla2xxx_wake_dpc(vha); 3738 qla2xxx_wake_dpc(vha);
3671 } else { 3739 } else {
3672 if (qla82xx_check_fw_alive(vha)) { 3740 if (qla82xx_check_fw_alive(vha)) {
3673 halt_status = qla82xx_rd_32(ha, 3741 halt_status = qla82xx_rd_32(ha,
3674 QLA82XX_PEG_HALT_STATUS1); 3742 QLA82XX_PEG_HALT_STATUS1);
3675 qla_printk(KERN_INFO, ha, 3743 ql_dbg(ql_dbg_timer, vha, 0x6005,
3676 "scsi(%ld): %s, Dumping hw/fw registers:\n " 3744 "dumping hw/fw registers:.\n "
3677 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n " 3745 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
3678 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n " 3746 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
3679 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n " 3747 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
3680 " PEG_NET_4_PC: 0x%x\n", 3748 " PEG_NET_4_PC: 0x%x.\n", halt_status,
3681 vha->host_no, __func__, halt_status,
3682 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), 3749 qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
3683 qla82xx_rd_32(ha, 3750 qla82xx_rd_32(ha,
3684 QLA82XX_CRB_PEG_NET_0 + 0x3c), 3751 QLA82XX_CRB_PEG_NET_0 + 0x3c),
@@ -3694,9 +3761,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3694 set_bit(ISP_UNRECOVERABLE, 3761 set_bit(ISP_UNRECOVERABLE,
3695 &vha->dpc_flags); 3762 &vha->dpc_flags);
3696 } else { 3763 } else {
3697 qla_printk(KERN_INFO, ha, 3764 ql_log(ql_log_info, vha, 0x6006,
3698 "scsi(%ld): %s - detect abort needed\n", 3765 "Detect abort needed.\n");
3699 vha->host_no, __func__);
3700 set_bit(ISP_ABORT_NEEDED, 3766 set_bit(ISP_ABORT_NEEDED,
3701 &vha->dpc_flags); 3767 &vha->dpc_flags);
3702 } 3768 }
@@ -3704,10 +3770,10 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3704 ha->flags.isp82xx_fw_hung = 1; 3770 ha->flags.isp82xx_fw_hung = 1;
3705 if (ha->flags.mbox_busy) { 3771 if (ha->flags.mbox_busy) {
3706 ha->flags.mbox_int = 1; 3772 ha->flags.mbox_int = 1;
3707 DEBUG2(qla_printk(KERN_ERR, ha, 3773 ql_log(ql_log_warn, vha, 0x6007,
3708 "scsi(%ld) Due to fw hung, doing " 3774 "Due to FW hung, doing "
3709 "premature completion of mbx " 3775 "premature completion of mbx "
3710 "command\n", vha->host_no)); 3776 "command.\n");
3711 if (test_bit(MBX_INTR_WAIT, 3777 if (test_bit(MBX_INTR_WAIT,
3712 &ha->mbx_cmd_flags)) 3778 &ha->mbx_cmd_flags))
3713 complete(&ha->mbx_intr_comp); 3779 complete(&ha->mbx_intr_comp);
@@ -3742,9 +3808,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3742 uint32_t dev_state; 3808 uint32_t dev_state;
3743 3809
3744 if (vha->device_flags & DFLG_DEV_FAILED) { 3810 if (vha->device_flags & DFLG_DEV_FAILED) {
3745 qla_printk(KERN_WARNING, ha, 3811 ql_log(ql_log_warn, vha, 0x8024,
3746 "%s(%ld): Device in failed state, " 3812 "Device in failed state, exiting.\n");
3747 "Exiting.\n", __func__, vha->host_no);
3748 return QLA_SUCCESS; 3813 return QLA_SUCCESS;
3749 } 3814 }
3750 ha->flags.isp82xx_reset_hdlr_active = 1; 3815 ha->flags.isp82xx_reset_hdlr_active = 1;
@@ -3752,13 +3817,14 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3752 qla82xx_idc_lock(ha); 3817 qla82xx_idc_lock(ha);
3753 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3818 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3754 if (dev_state == QLA82XX_DEV_READY) { 3819 if (dev_state == QLA82XX_DEV_READY) {
3755 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); 3820 ql_log(ql_log_info, vha, 0x8025,
3821 "HW State: NEED RESET.\n");
3756 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3822 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3757 QLA82XX_DEV_NEED_RESET); 3823 QLA82XX_DEV_NEED_RESET);
3758 } else 3824 } else
3759 qla_printk(KERN_INFO, ha, "HW State: %s\n", 3825 ql_log(ql_log_info, vha, 0x8026,
3760 dev_state < MAX_STATES ? 3826 "Hw State: %s.\n", dev_state < MAX_STATES ?
3761 qdev_state[dev_state] : "Unknown"); 3827 qdev_state[dev_state] : "Unknown");
3762 qla82xx_idc_unlock(ha); 3828 qla82xx_idc_unlock(ha);
3763 3829
3764 rval = qla82xx_device_state_handler(vha); 3830 rval = qla82xx_device_state_handler(vha);
@@ -3777,9 +3843,9 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3777 vha->flags.online = 1; 3843 vha->flags.online = 1;
3778 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 3844 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3779 if (ha->isp_abort_cnt == 0) { 3845 if (ha->isp_abort_cnt == 0) {
3780 qla_printk(KERN_WARNING, ha, 3846 ql_log(ql_log_warn, vha, 0x8027,
3781 "ISP error recovery failed - " 3847 "ISP error recover failed - board "
3782 "board disabled\n"); 3848 "disabled.\n");
3783 /* 3849 /*
3784 * The next call disables the board 3850 * The next call disables the board
3785 * completely. 3851 * completely.
@@ -3791,16 +3857,16 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3791 rval = QLA_SUCCESS; 3857 rval = QLA_SUCCESS;
3792 } else { /* schedule another ISP abort */ 3858 } else { /* schedule another ISP abort */
3793 ha->isp_abort_cnt--; 3859 ha->isp_abort_cnt--;
3794 DEBUG(qla_printk(KERN_INFO, ha, 3860 ql_log(ql_log_warn, vha, 0x8036,
3795 "qla%ld: ISP abort - retry remaining %d\n", 3861 "ISP abort - retry remaining %d.\n",
3796 vha->host_no, ha->isp_abort_cnt)); 3862 ha->isp_abort_cnt);
3797 rval = QLA_FUNCTION_FAILED; 3863 rval = QLA_FUNCTION_FAILED;
3798 } 3864 }
3799 } else { 3865 } else {
3800 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3866 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3801 DEBUG(qla_printk(KERN_INFO, ha, 3867 ql_dbg(ql_dbg_taskm, vha, 0x8029,
3802 "(%ld): ISP error recovery - retrying (%d) " 3868 "ISP error recovery - retrying (%d) more times.\n",
3803 "more times\n", vha->host_no, ha->isp_abort_cnt)); 3869 ha->isp_abort_cnt);
3804 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3870 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3805 rval = QLA_FUNCTION_FAILED; 3871 rval = QLA_FUNCTION_FAILED;
3806 } 3872 }
@@ -3872,8 +3938,8 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3872 break; 3938 break;
3873 } 3939 }
3874 } 3940 }
3875 DEBUG2(printk(KERN_INFO 3941 ql_dbg(ql_dbg_p3p, vha, 0xb027,
3876 "%s status=%d\n", __func__, status)); 3942 "%s status=%d.\n", status);
3877 3943
3878 return status; 3944 return status;
3879} 3945}
@@ -3902,6 +3968,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3902 } 3968 }
3903 } 3969 }
3904 } 3970 }
3971 ql_dbg(ql_dbg_init, vha, 0x00b0,
3972 "Entered %s fw_hung=%d.\n",
3973 __func__, ha->flags.isp82xx_fw_hung);
3905 3974
3906 /* Abort all commands gracefully if fw NOT hung */ 3975 /* Abort all commands gracefully if fw NOT hung */
3907 if (!ha->flags.isp82xx_fw_hung) { 3976 if (!ha->flags.isp82xx_fw_hung) {
@@ -3922,13 +3991,13 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3922 spin_unlock_irqrestore( 3991 spin_unlock_irqrestore(
3923 &ha->hardware_lock, flags); 3992 &ha->hardware_lock, flags);
3924 if (ha->isp_ops->abort_command(sp)) { 3993 if (ha->isp_ops->abort_command(sp)) {
3925 qla_printk(KERN_INFO, ha, 3994 ql_log(ql_log_info, vha,
3926 "scsi(%ld): mbx abort command failed in %s\n", 3995 0x00b1,
3927 vha->host_no, __func__); 3996 "mbx abort failed.\n");
3928 } else { 3997 } else {
3929 qla_printk(KERN_INFO, ha, 3998 ql_log(ql_log_info, vha,
3930 "scsi(%ld): mbx abort command success in %s\n", 3999 0x00b2,
3931 vha->host_no, __func__); 4000 "mbx abort success.\n");
3932 } 4001 }
3933 spin_lock_irqsave(&ha->hardware_lock, flags); 4002 spin_lock_irqsave(&ha->hardware_lock, flags);
3934 } 4003 }
@@ -3940,8 +4009,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3940 /* Wait for pending cmds (physical and virtual) to complete */ 4009 /* Wait for pending cmds (physical and virtual) to complete */
3941 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, 4010 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3942 WAIT_HOST) == QLA_SUCCESS) { 4011 WAIT_HOST) == QLA_SUCCESS) {
3943 DEBUG2(qla_printk(KERN_INFO, ha, 4012 ql_dbg(ql_dbg_init, vha, 0x00b3,
3944 "Done wait for pending commands\n")); 4013 "Done wait for "
4014 "pending commands.\n");
3945 } 4015 }
3946 } 4016 }
3947} 4017}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f461925a9dfc..e02df276804e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -35,6 +35,10 @@ static struct kmem_cache *srb_cachep;
35 * CT6 CTX allocation cache 35 * CT6 CTX allocation cache
36 */ 36 */
37static struct kmem_cache *ctx_cachep; 37static struct kmem_cache *ctx_cachep;
38/*
39 * error level for logging
40 */
41int ql_errlev = ql_log_all;
38 42
39int ql2xlogintimeout = 20; 43int ql2xlogintimeout = 20;
40module_param(ql2xlogintimeout, int, S_IRUGO); 44module_param(ql2xlogintimeout, int, S_IRUGO);
@@ -69,8 +73,17 @@ MODULE_PARM_DESC(ql2xallocfwdump,
69int ql2xextended_error_logging; 73int ql2xextended_error_logging;
70module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR); 74module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
71MODULE_PARM_DESC(ql2xextended_error_logging, 75MODULE_PARM_DESC(ql2xextended_error_logging,
72 "Option to enable extended error logging, " 76 "Option to enable extended error logging,\n"
73 "Default is 0 - no logging. 1 - log errors."); 77 "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
78 "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
79 "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
80 "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
81 "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
82 "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
83 "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
84 "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
85 "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
86 "\t\tDo LOGICAL OR of the value to enable more than one level");
74 87
75int ql2xshiftctondsd = 6; 88int ql2xshiftctondsd = 6;
76module_param(ql2xshiftctondsd, int, S_IRUGO); 89module_param(ql2xshiftctondsd, int, S_IRUGO);
@@ -128,8 +141,8 @@ MODULE_PARM_DESC(ql2xmultique_tag,
128int ql2xfwloadbin; 141int ql2xfwloadbin;
129module_param(ql2xfwloadbin, int, S_IRUGO); 142module_param(ql2xfwloadbin, int, S_IRUGO);
130MODULE_PARM_DESC(ql2xfwloadbin, 143MODULE_PARM_DESC(ql2xfwloadbin,
131 "Option to specify location from which to load ISP firmware:\n" 144 "Option to specify location from which to load ISP firmware:.\n"
132 " 2 -- load firmware via the request_firmware() (hotplug)\n" 145 " 2 -- load firmware via the request_firmware() (hotplug).\n"
133 " interface.\n" 146 " interface.\n"
134 " 1 -- load firmware from flash.\n" 147 " 1 -- load firmware from flash.\n"
135 " 0 -- use default semantics.\n"); 148 " 0 -- use default semantics.\n");
@@ -143,7 +156,7 @@ MODULE_PARM_DESC(ql2xetsenable,
143int ql2xdbwr = 1; 156int ql2xdbwr = 1;
144module_param(ql2xdbwr, int, S_IRUGO); 157module_param(ql2xdbwr, int, S_IRUGO);
145MODULE_PARM_DESC(ql2xdbwr, 158MODULE_PARM_DESC(ql2xdbwr,
146 "Option to specify scheme for request queue posting\n" 159 "Option to specify scheme for request queue posting.\n"
147 " 0 -- Regular doorbell.\n" 160 " 0 -- Regular doorbell.\n"
148 " 1 -- CAMRAM doorbell (faster).\n"); 161 " 1 -- CAMRAM doorbell (faster).\n");
149 162
@@ -168,7 +181,7 @@ MODULE_PARM_DESC(ql2xasynctmfenable,
168int ql2xdontresethba; 181int ql2xdontresethba;
169module_param(ql2xdontresethba, int, S_IRUGO); 182module_param(ql2xdontresethba, int, S_IRUGO);
170MODULE_PARM_DESC(ql2xdontresethba, 183MODULE_PARM_DESC(ql2xdontresethba,
171 "Option to specify reset behaviour\n" 184 "Option to specify reset behaviour.\n"
172 " 0 (Default) -- Reset on failure.\n" 185 " 0 (Default) -- Reset on failure.\n"
173 " 1 -- Do not reset on failure.\n"); 186 " 1 -- Do not reset on failure.\n");
174 187
@@ -247,8 +260,11 @@ static inline void
247qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 260qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
248{ 261{
249 /* Currently used for 82XX only. */ 262 /* Currently used for 82XX only. */
250 if (vha->device_flags & DFLG_DEV_FAILED) 263 if (vha->device_flags & DFLG_DEV_FAILED) {
264 ql_dbg(ql_dbg_timer, vha, 0x600d,
265 "Device in a failed state, returning.\n");
251 return; 266 return;
267 }
252 268
253 mod_timer(&vha->timer, jiffies + interval * HZ); 269 mod_timer(&vha->timer, jiffies + interval * HZ);
254} 270}
@@ -273,19 +289,20 @@ static void qla2x00_sp_free_dma(srb_t *);
273/* -------------------------------------------------------------------------- */ 289/* -------------------------------------------------------------------------- */
274static int qla2x00_alloc_queues(struct qla_hw_data *ha) 290static int qla2x00_alloc_queues(struct qla_hw_data *ha)
275{ 291{
292 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
276 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 293 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
277 GFP_KERNEL); 294 GFP_KERNEL);
278 if (!ha->req_q_map) { 295 if (!ha->req_q_map) {
279 qla_printk(KERN_WARNING, ha, 296 ql_log(ql_log_fatal, vha, 0x003b,
280 "Unable to allocate memory for request queue ptrs\n"); 297 "Unable to allocate memory for request queue ptrs.\n");
281 goto fail_req_map; 298 goto fail_req_map;
282 } 299 }
283 300
284 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, 301 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
285 GFP_KERNEL); 302 GFP_KERNEL);
286 if (!ha->rsp_q_map) { 303 if (!ha->rsp_q_map) {
287 qla_printk(KERN_WARNING, ha, 304 ql_log(ql_log_fatal, vha, 0x003c,
288 "Unable to allocate memory for response queue ptrs\n"); 305 "Unable to allocate memory for response queue ptrs.\n");
289 goto fail_rsp_map; 306 goto fail_rsp_map;
290 } 307 }
291 set_bit(0, ha->rsp_qid_map); 308 set_bit(0, ha->rsp_qid_map);
@@ -349,8 +366,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
349 struct qla_hw_data *ha = vha->hw; 366 struct qla_hw_data *ha = vha->hw;
350 367
351 if (!(ha->fw_attributes & BIT_6)) { 368 if (!(ha->fw_attributes & BIT_6)) {
352 qla_printk(KERN_INFO, ha, 369 ql_log(ql_log_warn, vha, 0x00d8,
353 "Firmware is not multi-queue capable\n"); 370 "Firmware is not multi-queue capable.\n");
354 goto fail; 371 goto fail;
355 } 372 }
356 if (ql2xmultique_tag) { 373 if (ql2xmultique_tag) {
@@ -359,8 +376,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
359 req = qla25xx_create_req_que(ha, options, 0, 0, -1, 376 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
360 QLA_DEFAULT_QUE_QOS); 377 QLA_DEFAULT_QUE_QOS);
361 if (!req) { 378 if (!req) {
362 qla_printk(KERN_WARNING, ha, 379 ql_log(ql_log_warn, vha, 0x00e0,
363 "Can't create request queue\n"); 380 "Failed to create request queue.\n");
364 goto fail; 381 goto fail;
365 } 382 }
366 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); 383 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
@@ -369,17 +386,20 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
369 for (ques = 1; ques < ha->max_rsp_queues; ques++) { 386 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
370 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); 387 ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
371 if (!ret) { 388 if (!ret) {
372 qla_printk(KERN_WARNING, ha, 389 ql_log(ql_log_warn, vha, 0x00e8,
373 "Response Queue create failed\n"); 390 "Failed to create response queue.\n");
374 goto fail2; 391 goto fail2;
375 } 392 }
376 } 393 }
377 ha->flags.cpu_affinity_enabled = 1; 394 ha->flags.cpu_affinity_enabled = 1;
378 395 ql_dbg(ql_dbg_multiq, vha, 0xc007,
379 DEBUG2(qla_printk(KERN_INFO, ha, 396 "CPU affinity mode enalbed, "
380 "CPU affinity mode enabled, no. of response" 397 "no. of response queues:%d no. of request queues:%d.\n",
381 " queues:%d, no. of request queues:%d\n", 398 ha->max_rsp_queues, ha->max_req_queues);
382 ha->max_rsp_queues, ha->max_req_queues)); 399 ql_dbg(ql_dbg_init, vha, 0x00e9,
400 "CPU affinity mode enalbed, "
401 "no. of response queues:%d no. of request queues:%d.\n",
402 ha->max_rsp_queues, ha->max_req_queues);
383 } 403 }
384 return 0; 404 return 0;
385fail2: 405fail2:
@@ -526,8 +546,11 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
526 struct qla_hw_data *ha = vha->hw; 546 struct qla_hw_data *ha = vha->hw;
527 547
528 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 548 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
529 if (!sp) 549 if (!sp) {
550 ql_log(ql_log_warn, vha, 0x3006,
551 "Memory allocation failed for sp.\n");
530 return sp; 552 return sp;
553 }
531 554
532 atomic_set(&sp->ref_count, 1); 555 atomic_set(&sp->ref_count, 1);
533 sp->fcport = fcport; 556 sp->fcport = fcport;
@@ -551,30 +574,43 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
551 int rval; 574 int rval;
552 575
553 if (ha->flags.eeh_busy) { 576 if (ha->flags.eeh_busy) {
554 if (ha->flags.pci_channel_io_perm_failure) 577 if (ha->flags.pci_channel_io_perm_failure) {
578 ql_dbg(ql_dbg_io, vha, 0x3001,
579 "PCI Channel IO permanent failure, exiting "
580 "cmd=%p.\n", cmd);
555 cmd->result = DID_NO_CONNECT << 16; 581 cmd->result = DID_NO_CONNECT << 16;
556 else 582 } else {
583 ql_dbg(ql_dbg_io, vha, 0x3002,
584 "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
557 cmd->result = DID_REQUEUE << 16; 585 cmd->result = DID_REQUEUE << 16;
586 }
558 goto qc24_fail_command; 587 goto qc24_fail_command;
559 } 588 }
560 589
561 rval = fc_remote_port_chkready(rport); 590 rval = fc_remote_port_chkready(rport);
562 if (rval) { 591 if (rval) {
563 cmd->result = rval; 592 cmd->result = rval;
593 ql_dbg(ql_dbg_io, vha, 0x3003,
594 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
595 cmd, rval);
564 goto qc24_fail_command; 596 goto qc24_fail_command;
565 } 597 }
566 598
567 if (!vha->flags.difdix_supported && 599 if (!vha->flags.difdix_supported &&
568 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 600 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
569 DEBUG2(qla_printk(KERN_ERR, ha, 601 ql_dbg(ql_dbg_io, vha, 0x3004,
570 "DIF Cap Not Reg, fail DIF capable cmd's:%x\n", 602 "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
571 cmd->cmnd[0])); 603 cmd);
572 cmd->result = DID_NO_CONNECT << 16; 604 cmd->result = DID_NO_CONNECT << 16;
573 goto qc24_fail_command; 605 goto qc24_fail_command;
574 } 606 }
575 if (atomic_read(&fcport->state) != FCS_ONLINE) { 607 if (atomic_read(&fcport->state) != FCS_ONLINE) {
576 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 608 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
577 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 609 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
610 ql_dbg(ql_dbg_io, vha, 0x3005,
611 "Returning DNC, fcport_state=%d loop_state=%d.\n",
612 atomic_read(&fcport->state),
613 atomic_read(&base_vha->loop_state));
578 cmd->result = DID_NO_CONNECT << 16; 614 cmd->result = DID_NO_CONNECT << 16;
579 goto qc24_fail_command; 615 goto qc24_fail_command;
580 } 616 }
@@ -586,8 +622,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
586 goto qc24_host_busy; 622 goto qc24_host_busy;
587 623
588 rval = ha->isp_ops->start_scsi(sp); 624 rval = ha->isp_ops->start_scsi(sp);
589 if (rval != QLA_SUCCESS) 625 if (rval != QLA_SUCCESS) {
626 ql_dbg(ql_dbg_io, vha, 0x3013,
627 "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
590 goto qc24_host_busy_free_sp; 628 goto qc24_host_busy_free_sp;
629 }
591 630
592 return 0; 631 return 0;
593 632
@@ -630,7 +669,8 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
630 int ret = QLA_SUCCESS; 669 int ret = QLA_SUCCESS;
631 670
632 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { 671 if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
633 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n")); 672 ql_dbg(ql_dbg_taskm, vha, 0x8005,
673 "Return:eh_wait.\n");
634 return ret; 674 return ret;
635 } 675 }
636 676
@@ -723,7 +763,8 @@ qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
723 else 763 else
724 return_status = QLA_FUNCTION_FAILED; 764 return_status = QLA_FUNCTION_FAILED;
725 765
726 DEBUG2(printk("%s return_status=%d\n", __func__, return_status)); 766 ql_dbg(ql_dbg_taskm, vha, 0x8019,
767 "%s return status=%d.\n", __func__, return_status);
727 768
728 return return_status; 769 return return_status;
729} 770}
@@ -831,10 +872,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
831 int wait = 0; 872 int wait = 0;
832 struct qla_hw_data *ha = vha->hw; 873 struct qla_hw_data *ha = vha->hw;
833 874
875 ql_dbg(ql_dbg_taskm, vha, 0x8000,
876 "Entered %s for cmd=%p.\n", __func__, cmd);
834 if (!CMD_SP(cmd)) 877 if (!CMD_SP(cmd))
835 return SUCCESS; 878 return SUCCESS;
836 879
837 ret = fc_block_scsi_eh(cmd); 880 ret = fc_block_scsi_eh(cmd);
881 ql_dbg(ql_dbg_taskm, vha, 0x8001,
882 "Return value of fc_block_scsi_eh=%d.\n", ret);
838 if (ret != 0) 883 if (ret != 0)
839 return ret; 884 return ret;
840 ret = SUCCESS; 885 ret = SUCCESS;
@@ -849,20 +894,19 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
849 return SUCCESS; 894 return SUCCESS;
850 } 895 }
851 896
852 DEBUG2(printk("%s(%ld): aborting sp %p from RISC.", 897 ql_dbg(ql_dbg_taskm, vha, 0x8002,
853 __func__, vha->host_no, sp)); 898 "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
854 899
855 /* Get a reference to the sp and drop the lock.*/ 900 /* Get a reference to the sp and drop the lock.*/
856 sp_get(sp); 901 sp_get(sp);
857 902
858 spin_unlock_irqrestore(&ha->hardware_lock, flags); 903 spin_unlock_irqrestore(&ha->hardware_lock, flags);
859 if (ha->isp_ops->abort_command(sp)) { 904 if (ha->isp_ops->abort_command(sp)) {
860 DEBUG2(printk("%s(%ld): abort_command " 905 ql_dbg(ql_dbg_taskm, vha, 0x8003,
861 "mbx failed.\n", __func__, vha->host_no)); 906 "Abort command mbx failed for cmd=%p.\n", cmd);
862 ret = FAILED;
863 } else { 907 } else {
864 DEBUG3(printk("%s(%ld): abort_command " 908 ql_dbg(ql_dbg_taskm, vha, 0x8004,
865 "mbx success.\n", __func__, vha->host_no)); 909 "Abort command mbx success.\n");
866 wait = 1; 910 wait = 1;
867 } 911 }
868 qla2x00_sp_compl(ha, sp); 912 qla2x00_sp_compl(ha, sp);
@@ -870,16 +914,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
870 /* Wait for the command to be returned. */ 914 /* Wait for the command to be returned. */
871 if (wait) { 915 if (wait) {
872 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 916 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
873 qla_printk(KERN_ERR, ha, 917 ql_log(ql_log_warn, vha, 0x8006,
874 "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n", 918 "Abort handler timed out for cmd=%p.\n", cmd);
875 vha->host_no, id, lun, ret);
876 ret = FAILED; 919 ret = FAILED;
877 } 920 }
878 } 921 }
879 922
880 qla_printk(KERN_INFO, ha, 923 ql_log(ql_log_info, vha, 0x801c,
881 "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n", 924 "Abort command issued -- %d %x.\n", wait, ret);
882 vha->host_no, id, lun, wait, ret);
883 925
884 return ret; 926 return ret;
885} 927}
@@ -947,40 +989,59 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
947 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 989 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
948 int err; 990 int err;
949 991
950 if (!fcport) 992 if (!fcport) {
993 ql_log(ql_log_warn, vha, 0x8007,
994 "fcport is NULL.\n");
951 return FAILED; 995 return FAILED;
996 }
952 997
953 err = fc_block_scsi_eh(cmd); 998 err = fc_block_scsi_eh(cmd);
999 ql_dbg(ql_dbg_taskm, vha, 0x8008,
1000 "fc_block_scsi_eh ret=%d.\n", err);
954 if (err != 0) 1001 if (err != 0)
955 return err; 1002 return err;
956 1003
957 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 1004 ql_log(ql_log_info, vha, 0x8009,
958 vha->host_no, cmd->device->id, cmd->device->lun, name); 1005 "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
1006 cmd->device->id, cmd->device->lun, cmd);
959 1007
960 err = 0; 1008 err = 0;
961 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 1009 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1010 ql_log(ql_log_warn, vha, 0x800a,
1011 "Wait for hba online failed for cmd=%p.\n", cmd);
962 goto eh_reset_failed; 1012 goto eh_reset_failed;
1013 }
963 err = 1; 1014 err = 1;
964 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 1015 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
1016 ql_log(ql_log_warn, vha, 0x800b,
1017 "Wait for loop ready failed for cmd=%p.\n", cmd);
965 goto eh_reset_failed; 1018 goto eh_reset_failed;
1019 }
966 err = 2; 1020 err = 2;
967 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) 1021 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
968 != QLA_SUCCESS) 1022 != QLA_SUCCESS) {
1023 ql_log(ql_log_warn, vha, 0x800c,
1024 "do_reset failed for cmd=%p.\n", cmd);
969 goto eh_reset_failed; 1025 goto eh_reset_failed;
1026 }
970 err = 3; 1027 err = 3;
971 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1028 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
972 cmd->device->lun, type) != QLA_SUCCESS) 1029 cmd->device->lun, type) != QLA_SUCCESS) {
1030 ql_log(ql_log_warn, vha, 0x800d,
1031 "wait for peding cmds failed for cmd=%p.\n", cmd);
973 goto eh_reset_failed; 1032 goto eh_reset_failed;
1033 }
974 1034
975 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 1035 ql_log(ql_log_info, vha, 0x800e,
976 vha->host_no, cmd->device->id, cmd->device->lun, name); 1036 "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
1037 cmd->device->id, cmd->device->lun, cmd);
977 1038
978 return SUCCESS; 1039 return SUCCESS;
979 1040
980eh_reset_failed: 1041eh_reset_failed:
981 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n" 1042 ql_log(ql_log_info, vha, 0x800f,
982 , vha->host_no, cmd->device->id, cmd->device->lun, name, 1043 "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
983 reset_errors[err]); 1044 reset_errors[err], cmd->device->id, cmd->device->lun);
984 return FAILED; 1045 return FAILED;
985} 1046}
986 1047
@@ -1030,19 +1091,25 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1030 id = cmd->device->id; 1091 id = cmd->device->id;
1031 lun = cmd->device->lun; 1092 lun = cmd->device->lun;
1032 1093
1033 if (!fcport) 1094 if (!fcport) {
1095 ql_log(ql_log_warn, vha, 0x8010,
1096 "fcport is NULL.\n");
1034 return ret; 1097 return ret;
1098 }
1035 1099
1036 ret = fc_block_scsi_eh(cmd); 1100 ret = fc_block_scsi_eh(cmd);
1101 ql_dbg(ql_dbg_taskm, vha, 0x8011,
1102 "fc_block_scsi_eh ret=%d.\n", ret);
1037 if (ret != 0) 1103 if (ret != 0)
1038 return ret; 1104 return ret;
1039 ret = FAILED; 1105 ret = FAILED;
1040 1106
1041 qla_printk(KERN_INFO, vha->hw, 1107 ql_log(ql_log_info, vha, 0x8012,
1042 "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun); 1108 "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
1043 1109
1044 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1110 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1045 DEBUG2(printk("%s failed:board disabled\n",__func__)); 1111 ql_log(ql_log_fatal, vha, 0x8013,
1112 "Wait for hba online failed board disabled.\n");
1046 goto eh_bus_reset_done; 1113 goto eh_bus_reset_done;
1047 } 1114 }
1048 1115
@@ -1055,12 +1122,15 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1055 1122
1056 /* Flush outstanding commands. */ 1123 /* Flush outstanding commands. */
1057 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) != 1124 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1058 QLA_SUCCESS) 1125 QLA_SUCCESS) {
1126 ql_log(ql_log_warn, vha, 0x8014,
1127 "Wait for pending commands failed.\n");
1059 ret = FAILED; 1128 ret = FAILED;
1129 }
1060 1130
1061eh_bus_reset_done: 1131eh_bus_reset_done:
1062 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__, 1132 ql_log(ql_log_warn, vha, 0x802b,
1063 (ret == FAILED) ? "failed" : "succeeded"); 1133 "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
1064 1134
1065 return ret; 1135 return ret;
1066} 1136}
@@ -1093,16 +1163,21 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1093 id = cmd->device->id; 1163 id = cmd->device->id;
1094 lun = cmd->device->lun; 1164 lun = cmd->device->lun;
1095 1165
1096 if (!fcport) 1166 if (!fcport) {
1167 ql_log(ql_log_warn, vha, 0x8016,
1168 "fcport is NULL.\n");
1097 return ret; 1169 return ret;
1170 }
1098 1171
1099 ret = fc_block_scsi_eh(cmd); 1172 ret = fc_block_scsi_eh(cmd);
1173 ql_dbg(ql_dbg_taskm, vha, 0x8017,
1174 "fc_block_scsi_eh ret=%d.\n", ret);
1100 if (ret != 0) 1175 if (ret != 0)
1101 return ret; 1176 return ret;
1102 ret = FAILED; 1177 ret = FAILED;
1103 1178
1104 qla_printk(KERN_INFO, ha, 1179 ql_log(ql_log_info, vha, 0x8018,
1105 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun); 1180 "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
1106 1181
1107 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) 1182 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
1108 goto eh_host_reset_lock; 1183 goto eh_host_reset_lock;
@@ -1137,8 +1212,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1137 /* failed. schedule dpc to try */ 1212 /* failed. schedule dpc to try */
1138 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1213 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1139 1214
1140 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 1215 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1216 ql_log(ql_log_warn, vha, 0x802a,
1217 "wait for hba online failed.\n");
1141 goto eh_host_reset_lock; 1218 goto eh_host_reset_lock;
1219 }
1142 } 1220 }
1143 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1221 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1144 } 1222 }
@@ -1149,7 +1227,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1149 ret = SUCCESS; 1227 ret = SUCCESS;
1150 1228
1151eh_host_reset_lock: 1229eh_host_reset_lock:
1152 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1230 qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
1153 (ret == FAILED) ? "failed" : "succeeded"); 1231 (ret == FAILED) ? "failed" : "succeeded");
1154 1232
1155 return ret; 1233 return ret;
@@ -1179,9 +1257,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1179 1257
1180 ret = ha->isp_ops->target_reset(fcport, 0, 0); 1258 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1181 if (ret != QLA_SUCCESS) { 1259 if (ret != QLA_SUCCESS) {
1182 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1260 ql_dbg(ql_dbg_taskm, vha, 0x802c,
1183 "target_reset=%d d_id=%x.\n", __func__, 1261 "Bus Reset failed: Target Reset=%d "
1184 vha->host_no, ret, fcport->d_id.b24)); 1262 "d_id=%x.\n", ret, fcport->d_id.b24);
1185 } 1263 }
1186 } 1264 }
1187 } 1265 }
@@ -1189,9 +1267,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1189 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) { 1267 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
1190 ret = qla2x00_full_login_lip(vha); 1268 ret = qla2x00_full_login_lip(vha);
1191 if (ret != QLA_SUCCESS) { 1269 if (ret != QLA_SUCCESS) {
1192 DEBUG2_3(printk("%s(%ld): failed: " 1270 ql_dbg(ql_dbg_taskm, vha, 0x802d,
1193 "full_login_lip=%d.\n", __func__, vha->host_no, 1271 "full_login_lip=%d.\n", ret);
1194 ret));
1195 } 1272 }
1196 atomic_set(&vha->loop_state, LOOP_DOWN); 1273 atomic_set(&vha->loop_state, LOOP_DOWN);
1197 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1274 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -1202,8 +1279,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1202 if (ha->flags.enable_lip_reset) { 1279 if (ha->flags.enable_lip_reset) {
1203 ret = qla2x00_lip_reset(vha); 1280 ret = qla2x00_lip_reset(vha);
1204 if (ret != QLA_SUCCESS) { 1281 if (ret != QLA_SUCCESS) {
1205 DEBUG2_3(printk("%s(%ld): failed: " 1282 ql_dbg(ql_dbg_taskm, vha, 0x802e,
1206 "lip_reset=%d.\n", __func__, vha->host_no, ret)); 1283 "lip_reset failed (%d).\n", ret);
1207 } else 1284 } else
1208 qla2x00_wait_for_loop_ready(vha); 1285 qla2x00_wait_for_loop_ready(vha);
1209 } 1286 }
@@ -1302,17 +1379,17 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1302 if (!scsi_track_queue_full(sdev, qdepth)) 1379 if (!scsi_track_queue_full(sdev, qdepth))
1303 return; 1380 return;
1304 1381
1305 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw, 1382 ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
1306 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", 1383 "Queue depth adjusted-down "
1307 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 1384 "to %d for scsi(%ld:%d:%d:%d).\n",
1308 sdev->queue_depth)); 1385 sdev->queue_depth, fcport->vha->host_no,
1386 sdev->channel, sdev->id, sdev->lun);
1309} 1387}
1310 1388
1311static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) 1389static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1312{ 1390{
1313 fc_port_t *fcport = sdev->hostdata; 1391 fc_port_t *fcport = sdev->hostdata;
1314 struct scsi_qla_host *vha = fcport->vha; 1392 struct scsi_qla_host *vha = fcport->vha;
1315 struct qla_hw_data *ha = vha->hw;
1316 struct req_que *req = NULL; 1393 struct req_que *req = NULL;
1317 1394
1318 req = vha->req; 1395 req = vha->req;
@@ -1327,10 +1404,11 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1327 else 1404 else
1328 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); 1405 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1329 1406
1330 DEBUG2(qla_printk(KERN_INFO, ha, 1407 ql_dbg(ql_dbg_io, vha, 0x302a,
1331 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", 1408 "Queue depth adjusted-up to %d for "
1332 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 1409 "scsi(%ld:%d:%d:%d).\n",
1333 sdev->queue_depth)); 1410 sdev->queue_depth, fcport->vha->host_no,
1411 sdev->channel, sdev->id, sdev->lun);
1334} 1412}
1335 1413
1336static int 1414static int
@@ -1776,6 +1854,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1776 ha->flags.port0 = 1; 1854 ha->flags.port0 = 1;
1777 else 1855 else
1778 ha->flags.port0 = 0; 1856 ha->flags.port0 = 0;
1857 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
1858 "device_type=0x%x port=%d fw_srisc_address=%p.\n",
1859 ha->device_type, ha->flags.port0, ha->fw_srisc_address);
1779} 1860}
1780 1861
1781static int 1862static int
@@ -1790,10 +1871,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1790 1871
1791 if (pci_request_selected_regions(ha->pdev, ha->bars, 1872 if (pci_request_selected_regions(ha->pdev, ha->bars,
1792 QLA2XXX_DRIVER_NAME)) { 1873 QLA2XXX_DRIVER_NAME)) {
1793 qla_printk(KERN_WARNING, ha, 1874 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
1794 "Failed to reserve PIO/MMIO regions (%s)\n", 1875 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1795 pci_name(ha->pdev)); 1876 pci_name(ha->pdev));
1796
1797 goto iospace_error_exit; 1877 goto iospace_error_exit;
1798 } 1878 }
1799 if (!(ha->bars & 1)) 1879 if (!(ha->bars & 1))
@@ -1803,39 +1883,42 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1803 pio = pci_resource_start(ha->pdev, 0); 1883 pio = pci_resource_start(ha->pdev, 0);
1804 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) { 1884 if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1805 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) { 1885 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1806 qla_printk(KERN_WARNING, ha, 1886 ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
1807 "Invalid PCI I/O region size (%s)...\n", 1887 "Invalid pci I/O region size (%s).\n",
1808 pci_name(ha->pdev)); 1888 pci_name(ha->pdev));
1809 pio = 0; 1889 pio = 0;
1810 } 1890 }
1811 } else { 1891 } else {
1812 qla_printk(KERN_WARNING, ha, 1892 ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
1813 "region #0 not a PIO resource (%s)...\n", 1893 "Region #0 no a PIO resource (%s).\n",
1814 pci_name(ha->pdev)); 1894 pci_name(ha->pdev));
1815 pio = 0; 1895 pio = 0;
1816 } 1896 }
1817 ha->pio_address = pio; 1897 ha->pio_address = pio;
1898 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
1899 "PIO address=%p.\n",
1900 ha->pio_address);
1818 1901
1819skip_pio: 1902skip_pio:
1820 /* Use MMIO operations for all accesses. */ 1903 /* Use MMIO operations for all accesses. */
1821 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) { 1904 if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1822 qla_printk(KERN_ERR, ha, 1905 ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
1823 "region #1 not an MMIO resource (%s), aborting\n", 1906 "Region #1 not an MMIO resource (%s), aborting.\n",
1824 pci_name(ha->pdev)); 1907 pci_name(ha->pdev));
1825 goto iospace_error_exit; 1908 goto iospace_error_exit;
1826 } 1909 }
1827 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) { 1910 if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1828 qla_printk(KERN_ERR, ha, 1911 ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
1829 "Invalid PCI mem region size (%s), aborting\n", 1912 "Invalid PCI mem region size (%s), aborting.\n",
1830 pci_name(ha->pdev)); 1913 pci_name(ha->pdev));
1831 goto iospace_error_exit; 1914 goto iospace_error_exit;
1832 } 1915 }
1833 1916
1834 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN); 1917 ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1835 if (!ha->iobase) { 1918 if (!ha->iobase) {
1836 qla_printk(KERN_ERR, ha, 1919 ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
1837 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); 1920 "Cannot remap MMIO (%s), aborting.\n",
1838 1921 pci_name(ha->pdev));
1839 goto iospace_error_exit; 1922 goto iospace_error_exit;
1840 } 1923 }
1841 1924
@@ -1849,6 +1932,8 @@ skip_pio:
1849 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1932 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1850 pci_resource_len(ha->pdev, 3)); 1933 pci_resource_len(ha->pdev, 3));
1851 if (ha->mqiobase) { 1934 if (ha->mqiobase) {
1935 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
1936 "MQIO Base=%p.\n", ha->mqiobase);
1852 /* Read MSIX vector size of the board */ 1937 /* Read MSIX vector size of the board */
1853 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1938 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1854 ha->msix_count = msix; 1939 ha->msix_count = msix;
@@ -1861,17 +1946,24 @@ skip_pio:
1861 ha->max_req_queues = 2; 1946 ha->max_req_queues = 2;
1862 } else if (ql2xmaxqueues > 1) { 1947 } else if (ql2xmaxqueues > 1) {
1863 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? 1948 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1864 QLA_MQ_SIZE : ql2xmaxqueues; 1949 QLA_MQ_SIZE : ql2xmaxqueues;
1865 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no" 1950 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
1866 " of request queues:%d\n", ha->max_req_queues)); 1951 "QoS mode set, max no of request queues:%d.\n",
1952 ha->max_req_queues);
1953 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
1954 "QoS mode set, max no of request queues:%d.\n",
1955 ha->max_req_queues);
1867 } 1956 }
1868 qla_printk(KERN_INFO, ha, 1957 ql_log_pci(ql_log_info, ha->pdev, 0x001a,
1869 "MSI-X vector count: %d\n", msix); 1958 "MSI-X vector count: %d.\n", msix);
1870 } else 1959 } else
1871 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n"); 1960 ql_log_pci(ql_log_info, ha->pdev, 0x001b,
1961 "BAR 3 not enabled.\n");
1872 1962
1873mqiobase_exit: 1963mqiobase_exit:
1874 ha->msix_count = ha->max_rsp_queues + 1; 1964 ha->msix_count = ha->max_rsp_queues + 1;
1965 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
1966 "MSIX Count:%d.\n", ha->msix_count);
1875 return (0); 1967 return (0);
1876 1968
1877iospace_error_exit: 1969iospace_error_exit:
@@ -1935,7 +2027,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1935 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) { 2027 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
1936 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2028 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1937 mem_only = 1; 2029 mem_only = 1;
2030 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
2031 "Mem only adapter.\n");
1938 } 2032 }
2033 ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
2034 "Bars=%d.\n", bars);
1939 2035
1940 if (mem_only) { 2036 if (mem_only) {
1941 if (pci_enable_device_mem(pdev)) 2037 if (pci_enable_device_mem(pdev))
@@ -1950,9 +2046,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1950 2046
1951 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); 2047 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1952 if (!ha) { 2048 if (!ha) {
1953 DEBUG(printk("Unable to allocate memory for ha\n")); 2049 ql_log_pci(ql_log_fatal, pdev, 0x0009,
2050 "Unable to allocate memory for ha.\n");
1954 goto probe_out; 2051 goto probe_out;
1955 } 2052 }
2053 ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2054 "Memory allocated for ha=%p.\n", ha);
1956 ha->pdev = pdev; 2055 ha->pdev = pdev;
1957 2056
1958 /* Clear our data area */ 2057 /* Clear our data area */
@@ -1974,10 +2073,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1974 if (ret) 2073 if (ret)
1975 goto probe_hw_failed; 2074 goto probe_hw_failed;
1976 2075
1977 qla_printk(KERN_INFO, ha, 2076 ql_log_pci(ql_log_info, pdev, 0x001d,
1978 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 2077 "Found an ISP%04X irq %d iobase 0x%p.\n",
1979 ha->iobase); 2078 pdev->device, pdev->irq, ha->iobase);
1980
1981 ha->prev_topology = 0; 2079 ha->prev_topology = 0;
1982 ha->init_cb_size = sizeof(init_cb_t); 2080 ha->init_cb_size = sizeof(init_cb_t);
1983 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2081 ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2078,7 +2176,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2078 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2176 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2079 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2177 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2080 } 2178 }
2081 2179 ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
2180 "mbx_count=%d, req_length=%d, "
2181 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
2182 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n",
2183 ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
2184 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
2185 ha->nvram_npiv_size);
2186 ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
2187 "isp_ops=%p, flash_conf_off=%d, "
2188 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
2189 ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
2190 ha->nvram_conf_off, ha->nvram_data_off);
2082 mutex_init(&ha->vport_lock); 2191 mutex_init(&ha->vport_lock);
2083 init_completion(&ha->mbx_cmd_comp); 2192 init_completion(&ha->mbx_cmd_comp);
2084 complete(&ha->mbx_cmd_comp); 2193 complete(&ha->mbx_cmd_comp);
@@ -2088,10 +2197,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2088 set_bit(0, (unsigned long *) ha->vp_idx_map); 2197 set_bit(0, (unsigned long *) ha->vp_idx_map);
2089 2198
2090 qla2x00_config_dma_addressing(ha); 2199 qla2x00_config_dma_addressing(ha);
2200 ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
2201 "64 Bit addressing is %s.\n",
2202 ha->flags.enable_64bit_addressing ? "enable" :
2203 "disable");
2091 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 2204 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
2092 if (!ret) { 2205 if (!ret) {
2093 qla_printk(KERN_WARNING, ha, 2206 ql_log_pci(ql_log_fatal, pdev, 0x0031,
2094 "[ERROR] Failed to allocate memory for adapter\n"); 2207 "Failed to allocate memory for adapter, aborting.\n");
2095 2208
2096 goto probe_hw_failed; 2209 goto probe_hw_failed;
2097 } 2210 }
@@ -2103,9 +2216,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2103 2216
2104 base_vha = qla2x00_create_host(sht, ha); 2217 base_vha = qla2x00_create_host(sht, ha);
2105 if (!base_vha) { 2218 if (!base_vha) {
2106 qla_printk(KERN_WARNING, ha,
2107 "[ERROR] Failed to allocate memory for scsi_host\n");
2108
2109 ret = -ENOMEM; 2219 ret = -ENOMEM;
2110 qla2x00_mem_free(ha); 2220 qla2x00_mem_free(ha);
2111 qla2x00_free_req_que(ha, req); 2221 qla2x00_free_req_que(ha, req);
@@ -2132,7 +2242,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2132 if (!IS_QLA82XX(ha)) 2242 if (!IS_QLA82XX(ha))
2133 host->sg_tablesize = QLA_SG_ALL; 2243 host->sg_tablesize = QLA_SG_ALL;
2134 } 2244 }
2135 2245 ql_dbg(ql_dbg_init, base_vha, 0x0032,
2246 "can_queue=%d, req=%p, "
2247 "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
2248 host->can_queue, base_vha->req,
2249 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2136 host->max_id = max_id; 2250 host->max_id = max_id;
2137 host->this_id = 255; 2251 host->this_id = 255;
2138 host->cmd_per_lun = 3; 2252 host->cmd_per_lun = 3;
@@ -2146,6 +2260,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2146 host->transportt = qla2xxx_transport_template; 2260 host->transportt = qla2xxx_transport_template;
2147 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC); 2261 sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
2148 2262
2263 ql_dbg(ql_dbg_init, base_vha, 0x0033,
2264 "max_id=%d this_id=%d "
2265 "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
2266 "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
2267 host->this_id, host->cmd_per_lun, host->unique_id,
2268 host->max_cmd_len, host->max_channel, host->max_lun,
2269 host->transportt, sht->vendor_id);
2270
2149 /* Set up the irqs */ 2271 /* Set up the irqs */
2150 ret = qla2x00_request_irqs(ha, rsp); 2272 ret = qla2x00_request_irqs(ha, rsp);
2151 if (ret) 2273 if (ret)
@@ -2156,9 +2278,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2156 /* Alloc arrays of request and response ring ptrs */ 2278 /* Alloc arrays of request and response ring ptrs */
2157que_init: 2279que_init:
2158 if (!qla2x00_alloc_queues(ha)) { 2280 if (!qla2x00_alloc_queues(ha)) {
2159 qla_printk(KERN_WARNING, ha, 2281 ql_log(ql_log_fatal, base_vha, 0x003d,
2160 "[ERROR] Failed to allocate memory for queue" 2282 "Failed to allocate memory for queue pointers.. aborting.\n");
2161 " pointers\n");
2162 goto probe_init_failed; 2283 goto probe_init_failed;
2163 } 2284 }
2164 2285
@@ -2186,20 +2307,33 @@ que_init:
2186 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; 2307 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2187 } 2308 }
2188 2309
2189 if (qla2x00_initialize_adapter(base_vha)) { 2310 ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
2190 qla_printk(KERN_WARNING, ha, 2311 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2191 "Failed to initialize adapter\n"); 2312 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2313 ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
2314 "req->req_q_in=%p req->req_q_out=%p "
2315 "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2316 req->req_q_in, req->req_q_out,
2317 rsp->rsp_q_in, rsp->rsp_q_out);
2318 ql_dbg(ql_dbg_init, base_vha, 0x003e,
2319 "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
2320 ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
2321 ql_dbg(ql_dbg_init, base_vha, 0x003f,
2322 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
2323 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
2192 2324
2193 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 2325 if (qla2x00_initialize_adapter(base_vha)) {
2194 "Adapter flags %x.\n", 2326 ql_log(ql_log_fatal, base_vha, 0x00d6,
2195 base_vha->host_no, base_vha->device_flags)); 2327 "Failed to initialize adapter - Adapter flags %x.\n",
2328 base_vha->device_flags);
2196 2329
2197 if (IS_QLA82XX(ha)) { 2330 if (IS_QLA82XX(ha)) {
2198 qla82xx_idc_lock(ha); 2331 qla82xx_idc_lock(ha);
2199 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2332 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2200 QLA82XX_DEV_FAILED); 2333 QLA82XX_DEV_FAILED);
2201 qla82xx_idc_unlock(ha); 2334 qla82xx_idc_unlock(ha);
2202 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 2335 ql_log(ql_log_fatal, base_vha, 0x00d7,
2336 "HW State: FAILED.\n");
2203 } 2337 }
2204 2338
2205 ret = -ENODEV; 2339 ret = -ENODEV;
@@ -2208,9 +2342,8 @@ que_init:
2208 2342
2209 if (ha->mqenable) { 2343 if (ha->mqenable) {
2210 if (qla25xx_setup_mode(base_vha)) { 2344 if (qla25xx_setup_mode(base_vha)) {
2211 qla_printk(KERN_WARNING, ha, 2345 ql_log(ql_log_warn, base_vha, 0x00ec,
2212 "Can't create queues, falling back to single" 2346 "Failed to create queues, falling back to single queue mode.\n");
2213 " queue mode\n");
2214 goto que_init; 2347 goto que_init;
2215 } 2348 }
2216 } 2349 }
@@ -2222,13 +2355,15 @@ que_init:
2222 * Startup the kernel thread for this host adapter 2355 * Startup the kernel thread for this host adapter
2223 */ 2356 */
2224 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 2357 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
2225 "%s_dpc", base_vha->host_str); 2358 "%s_dpc", base_vha->host_str);
2226 if (IS_ERR(ha->dpc_thread)) { 2359 if (IS_ERR(ha->dpc_thread)) {
2227 qla_printk(KERN_WARNING, ha, 2360 ql_log(ql_log_fatal, base_vha, 0x00ed,
2228 "Unable to start DPC thread!\n"); 2361 "Failed to start DPC thread.\n");
2229 ret = PTR_ERR(ha->dpc_thread); 2362 ret = PTR_ERR(ha->dpc_thread);
2230 goto probe_failed; 2363 goto probe_failed;
2231 } 2364 }
2365 ql_dbg(ql_dbg_init, base_vha, 0x00ee,
2366 "DPC thread started successfully.\n");
2232 2367
2233skip_dpc: 2368skip_dpc:
2234 list_add_tail(&base_vha->list, &ha->vp_list); 2369 list_add_tail(&base_vha->list, &ha->vp_list);
@@ -2236,16 +2371,18 @@ skip_dpc:
2236 2371
2237 /* Initialized the timer */ 2372 /* Initialized the timer */
2238 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); 2373 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
2239 2374 ql_dbg(ql_dbg_init, base_vha, 0x00ef,
2240 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 2375 "Started qla2x00_timer with "
2241 base_vha->host_no, ha)); 2376 "interval=%d.\n", WATCH_INTERVAL);
2377 ql_dbg(ql_dbg_init, base_vha, 0x00f0,
2378 "Detected hba at address=%p.\n",
2379 ha);
2242 2380
2243 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 2381 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
2244 if (ha->fw_attributes & BIT_4) { 2382 if (ha->fw_attributes & BIT_4) {
2245 base_vha->flags.difdix_supported = 1; 2383 base_vha->flags.difdix_supported = 1;
2246 DEBUG18(qla_printk(KERN_INFO, ha, 2384 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
2247 "Registering for DIF/DIX type 1 and 3" 2385 "Registering for DIF/DIX type 1 and 3 protection.\n");
2248 " protection.\n"));
2249 scsi_host_set_prot(host, 2386 scsi_host_set_prot(host,
2250 SHOST_DIF_TYPE1_PROTECTION 2387 SHOST_DIF_TYPE1_PROTECTION
2251 | SHOST_DIF_TYPE2_PROTECTION 2388 | SHOST_DIF_TYPE2_PROTECTION
@@ -2267,6 +2404,9 @@ skip_dpc:
2267 base_vha->flags.init_done = 1; 2404 base_vha->flags.init_done = 1;
2268 base_vha->flags.online = 1; 2405 base_vha->flags.online = 1;
2269 2406
2407 ql_dbg(ql_dbg_init, base_vha, 0x00f2,
2408 "Init done and hba is online.\n");
2409
2270 scsi_scan_host(host); 2410 scsi_scan_host(host);
2271 2411
2272 qla2x00_alloc_sysfs_attr(base_vha); 2412 qla2x00_alloc_sysfs_attr(base_vha);
@@ -2275,14 +2415,17 @@ skip_dpc:
2275 2415
2276 qla2x00_dfs_setup(base_vha); 2416 qla2x00_dfs_setup(base_vha);
2277 2417
2278 qla_printk(KERN_INFO, ha, "\n" 2418 ql_log(ql_log_info, base_vha, 0x00fa,
2279 " QLogic Fibre Channel HBA Driver: %s\n" 2419 "QLogic Fibre Channed HBA Driver: %s.\n",
2280 " QLogic %s - %s\n" 2420 qla2x00_version_str);
2281 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 2421 ql_log(ql_log_info, base_vha, 0x00fb,
2282 qla2x00_version_str, ha->model_number, 2422 "QLogic %s - %s.\n",
2283 ha->model_desc ? ha->model_desc : "", pdev->device, 2423 ha->model_number, ha->model_desc ? ha->model_desc : "");
2284 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev), 2424 ql_log(ql_log_info, base_vha, 0x00fc,
2285 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no, 2425 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
2426 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
2427 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
2428 base_vha->host_no,
2286 ha->isp_ops->fw_version_str(base_vha, fw_str)); 2429 ha->isp_ops->fw_version_str(base_vha, fw_str));
2287 2430
2288 return 0; 2431 return 0;
@@ -2580,20 +2723,15 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2580 fcport->login_retry = vha->hw->login_retry_count; 2723 fcport->login_retry = vha->hw->login_retry_count;
2581 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2724 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2582 2725
2583 DEBUG(printk("scsi(%ld): Port login retry: " 2726 ql_dbg(ql_dbg_disc, vha, 0x2067,
2727 "Port login retry "
2584 "%02x%02x%02x%02x%02x%02x%02x%02x, " 2728 "%02x%02x%02x%02x%02x%02x%02x%02x, "
2585 "id = 0x%04x retry cnt=%d\n", 2729 "id = 0x%04x retry cnt=%d.\n",
2586 vha->host_no, 2730 fcport->port_name[0], fcport->port_name[1],
2587 fcport->port_name[0], 2731 fcport->port_name[2], fcport->port_name[3],
2588 fcport->port_name[1], 2732 fcport->port_name[4], fcport->port_name[5],
2589 fcport->port_name[2], 2733 fcport->port_name[6], fcport->port_name[7],
2590 fcport->port_name[3], 2734 fcport->loop_id, fcport->login_retry);
2591 fcport->port_name[4],
2592 fcport->port_name[5],
2593 fcport->port_name[6],
2594 fcport->port_name[7],
2595 fcport->loop_id,
2596 fcport->login_retry));
2597 } 2735 }
2598} 2736}
2599 2737
@@ -2676,6 +2814,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2676 ctx_cachep); 2814 ctx_cachep);
2677 if (!ha->ctx_mempool) 2815 if (!ha->ctx_mempool)
2678 goto fail_free_srb_mempool; 2816 goto fail_free_srb_mempool;
2817 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
2818 "ctx_cachep=%p ctx_mempool=%p.\n",
2819 ctx_cachep, ha->ctx_mempool);
2679 } 2820 }
2680 2821
2681 /* Get memory for cached NVRAM */ 2822 /* Get memory for cached NVRAM */
@@ -2690,22 +2831,29 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2690 if (!ha->s_dma_pool) 2831 if (!ha->s_dma_pool)
2691 goto fail_free_nvram; 2832 goto fail_free_nvram;
2692 2833
2834 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
2835 "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
2836 ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
2837
2693 if (IS_QLA82XX(ha) || ql2xenabledif) { 2838 if (IS_QLA82XX(ha) || ql2xenabledif) {
2694 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2839 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2695 DSD_LIST_DMA_POOL_SIZE, 8, 0); 2840 DSD_LIST_DMA_POOL_SIZE, 8, 0);
2696 if (!ha->dl_dma_pool) { 2841 if (!ha->dl_dma_pool) {
2697 qla_printk(KERN_WARNING, ha, 2842 ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
2698 "Memory Allocation failed - dl_dma_pool\n"); 2843 "Failed to allocate memory for dl_dma_pool.\n");
2699 goto fail_s_dma_pool; 2844 goto fail_s_dma_pool;
2700 } 2845 }
2701 2846
2702 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2847 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2703 FCP_CMND_DMA_POOL_SIZE, 8, 0); 2848 FCP_CMND_DMA_POOL_SIZE, 8, 0);
2704 if (!ha->fcp_cmnd_dma_pool) { 2849 if (!ha->fcp_cmnd_dma_pool) {
2705 qla_printk(KERN_WARNING, ha, 2850 ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
2706 "Memory Allocation failed - fcp_cmnd_dma_pool\n"); 2851 "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
2707 goto fail_dl_dma_pool; 2852 goto fail_dl_dma_pool;
2708 } 2853 }
2854 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
2855 "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
2856 ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
2709 } 2857 }
2710 2858
2711 /* Allocate memory for SNS commands */ 2859 /* Allocate memory for SNS commands */
@@ -2715,6 +2863,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2715 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2863 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2716 if (!ha->sns_cmd) 2864 if (!ha->sns_cmd)
2717 goto fail_dma_pool; 2865 goto fail_dma_pool;
2866 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
2867 "sns_cmd.\n", ha->sns_cmd);
2718 } else { 2868 } else {
2719 /* Get consistent memory allocated for MS IOCB */ 2869 /* Get consistent memory allocated for MS IOCB */
2720 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2870 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -2726,12 +2876,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2726 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2876 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2727 if (!ha->ct_sns) 2877 if (!ha->ct_sns)
2728 goto fail_free_ms_iocb; 2878 goto fail_free_ms_iocb;
2879 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
2880 "ms_iocb=%p ct_sns=%p.\n",
2881 ha->ms_iocb, ha->ct_sns);
2729 } 2882 }
2730 2883
2731 /* Allocate memory for request ring */ 2884 /* Allocate memory for request ring */
2732 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 2885 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2733 if (!*req) { 2886 if (!*req) {
2734 DEBUG(printk("Unable to allocate memory for req\n")); 2887 ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
2888 "Failed to allocate memory for req.\n");
2735 goto fail_req; 2889 goto fail_req;
2736 } 2890 }
2737 (*req)->length = req_len; 2891 (*req)->length = req_len;
@@ -2739,14 +2893,15 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2739 ((*req)->length + 1) * sizeof(request_t), 2893 ((*req)->length + 1) * sizeof(request_t),
2740 &(*req)->dma, GFP_KERNEL); 2894 &(*req)->dma, GFP_KERNEL);
2741 if (!(*req)->ring) { 2895 if (!(*req)->ring) {
2742 DEBUG(printk("Unable to allocate memory for req_ring\n")); 2896 ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
2897 "Failed to allocate memory for req_ring.\n");
2743 goto fail_req_ring; 2898 goto fail_req_ring;
2744 } 2899 }
2745 /* Allocate memory for response ring */ 2900 /* Allocate memory for response ring */
2746 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 2901 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2747 if (!*rsp) { 2902 if (!*rsp) {
2748 qla_printk(KERN_WARNING, ha, 2903 ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
2749 "Unable to allocate memory for rsp\n"); 2904 "Failed to allocate memory for rsp.\n");
2750 goto fail_rsp; 2905 goto fail_rsp;
2751 } 2906 }
2752 (*rsp)->hw = ha; 2907 (*rsp)->hw = ha;
@@ -2755,19 +2910,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2755 ((*rsp)->length + 1) * sizeof(response_t), 2910 ((*rsp)->length + 1) * sizeof(response_t),
2756 &(*rsp)->dma, GFP_KERNEL); 2911 &(*rsp)->dma, GFP_KERNEL);
2757 if (!(*rsp)->ring) { 2912 if (!(*rsp)->ring) {
2758 qla_printk(KERN_WARNING, ha, 2913 ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
2759 "Unable to allocate memory for rsp_ring\n"); 2914 "Failed to allocate memory for rsp_ring.\n");
2760 goto fail_rsp_ring; 2915 goto fail_rsp_ring;
2761 } 2916 }
2762 (*req)->rsp = *rsp; 2917 (*req)->rsp = *rsp;
2763 (*rsp)->req = *req; 2918 (*rsp)->req = *req;
2919 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
2920 "req=%p req->length=%d req->ring=%p rsp=%p "
2921 "rsp->length=%d rsp->ring=%p.\n",
2922 *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
2923 (*rsp)->ring);
2764 /* Allocate memory for NVRAM data for vports */ 2924 /* Allocate memory for NVRAM data for vports */
2765 if (ha->nvram_npiv_size) { 2925 if (ha->nvram_npiv_size) {
2766 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) * 2926 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2767 ha->nvram_npiv_size, GFP_KERNEL); 2927 ha->nvram_npiv_size, GFP_KERNEL);
2768 if (!ha->npiv_info) { 2928 if (!ha->npiv_info) {
2769 qla_printk(KERN_WARNING, ha, 2929 ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
2770 "Unable to allocate memory for npiv info\n"); 2930 "Failed to allocate memory for npiv_info.\n");
2771 goto fail_npiv_info; 2931 goto fail_npiv_info;
2772 } 2932 }
2773 } else 2933 } else
@@ -2779,6 +2939,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2779 &ha->ex_init_cb_dma); 2939 &ha->ex_init_cb_dma);
2780 if (!ha->ex_init_cb) 2940 if (!ha->ex_init_cb)
2781 goto fail_ex_init_cb; 2941 goto fail_ex_init_cb;
2942 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
2943 "ex_init_cb=%p.\n", ha->ex_init_cb);
2782 } 2944 }
2783 2945
2784 INIT_LIST_HEAD(&ha->gbl_dsd_list); 2946 INIT_LIST_HEAD(&ha->gbl_dsd_list);
@@ -2789,6 +2951,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2789 &ha->async_pd_dma); 2951 &ha->async_pd_dma);
2790 if (!ha->async_pd) 2952 if (!ha->async_pd)
2791 goto fail_async_pd; 2953 goto fail_async_pd;
2954 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
2955 "async_pd=%p.\n", ha->async_pd);
2792 } 2956 }
2793 2957
2794 INIT_LIST_HEAD(&ha->vp_list); 2958 INIT_LIST_HEAD(&ha->vp_list);
@@ -2854,7 +3018,8 @@ fail_free_init_cb:
2854 ha->init_cb = NULL; 3018 ha->init_cb = NULL;
2855 ha->init_cb_dma = 0; 3019 ha->init_cb_dma = 0;
2856fail: 3020fail:
2857 DEBUG(printk("%s: Memory allocation failure\n", __func__)); 3021 ql_log(ql_log_fatal, NULL, 0x0030,
3022 "Memory allocation failure.\n");
2858 return -ENOMEM; 3023 return -ENOMEM;
2859} 3024}
2860 3025
@@ -3003,8 +3168,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3003 3168
3004 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 3169 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
3005 if (host == NULL) { 3170 if (host == NULL) {
3006 printk(KERN_WARNING 3171 ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
3007 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 3172 "Failed to allocate host from the scsi layer, aborting.\n");
3008 goto fail; 3173 goto fail;
3009 } 3174 }
3010 3175
@@ -3023,6 +3188,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3023 spin_lock_init(&vha->work_lock); 3188 spin_lock_init(&vha->work_lock);
3024 3189
3025 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3190 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3191 ql_dbg(ql_dbg_init, vha, 0x0041,
3192 "Allocated the host=%p hw=%p vha=%p dev_name=%s",
3193 vha->host, vha->hw, vha,
3194 dev_name(&(ha->pdev->dev)));
3195
3026 return vha; 3196 return vha;
3027 3197
3028fail: 3198fail:
@@ -3264,18 +3434,18 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3264 if (status == QLA_SUCCESS) { 3434 if (status == QLA_SUCCESS) {
3265 fcport->old_loop_id = fcport->loop_id; 3435 fcport->old_loop_id = fcport->loop_id;
3266 3436
3267 DEBUG(printk("scsi(%ld): port login OK: logged " 3437 ql_dbg(ql_dbg_disc, vha, 0x2003,
3268 "in ID 0x%x\n", vha->host_no, fcport->loop_id)); 3438 "Port login OK: logged in ID 0x%x.\n",
3439 fcport->loop_id);
3269 3440
3270 qla2x00_update_fcport(vha, fcport); 3441 qla2x00_update_fcport(vha, fcport);
3271 3442
3272 } else if (status == 1) { 3443 } else if (status == 1) {
3273 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 3444 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3274 /* retry the login again */ 3445 /* retry the login again */
3275 DEBUG(printk("scsi(%ld): Retrying" 3446 ql_dbg(ql_dbg_disc, vha, 0x2007,
3276 " %d login again loop_id 0x%x\n", 3447 "Retrying %d login again loop_id 0x%x.\n",
3277 vha->host_no, fcport->login_retry, 3448 fcport->login_retry, fcport->loop_id);
3278 fcport->loop_id));
3279 } else { 3449 } else {
3280 fcport->login_retry = 0; 3450 fcport->login_retry = 0;
3281 } 3451 }
@@ -3315,26 +3485,27 @@ qla2x00_do_dpc(void *data)
3315 3485
3316 set_current_state(TASK_INTERRUPTIBLE); 3486 set_current_state(TASK_INTERRUPTIBLE);
3317 while (!kthread_should_stop()) { 3487 while (!kthread_should_stop()) {
3318 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 3488 ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
3489 "DPC handler sleeping.\n");
3319 3490
3320 schedule(); 3491 schedule();
3321 __set_current_state(TASK_RUNNING); 3492 __set_current_state(TASK_RUNNING);
3322 3493
3323 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 3494 ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
3495 "DPC handler waking up.\n");
3496 ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
3497 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3324 3498
3325 /* Initialization not yet finished. Don't do anything yet. */ 3499 /* Initialization not yet finished. Don't do anything yet. */
3326 if (!base_vha->flags.init_done) 3500 if (!base_vha->flags.init_done)
3327 continue; 3501 continue;
3328 3502
3329 if (ha->flags.eeh_busy) { 3503 if (ha->flags.eeh_busy) {
3330 DEBUG17(qla_printk(KERN_WARNING, ha, 3504 ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
3331 "qla2x00_do_dpc: dpc_flags: %lx\n", 3505 "eeh_busy=%d.\n", ha->flags.eeh_busy);
3332 base_vha->dpc_flags));
3333 continue; 3506 continue;
3334 } 3507 }
3335 3508
3336 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
3337
3338 ha->dpc_active = 1; 3509 ha->dpc_active = 1;
3339 3510
3340 if (ha->flags.mbox_busy) { 3511 if (ha->flags.mbox_busy) {
@@ -3351,8 +3522,8 @@ qla2x00_do_dpc(void *data)
3351 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3522 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3352 QLA82XX_DEV_FAILED); 3523 QLA82XX_DEV_FAILED);
3353 qla82xx_idc_unlock(ha); 3524 qla82xx_idc_unlock(ha);
3354 qla_printk(KERN_INFO, ha, 3525 ql_log(ql_log_info, base_vha, 0x4004,
3355 "HW State: FAILED\n"); 3526 "HW State: FAILED.\n");
3356 qla82xx_device_state_handler(base_vha); 3527 qla82xx_device_state_handler(base_vha);
3357 continue; 3528 continue;
3358 } 3529 }
@@ -3360,10 +3531,8 @@ qla2x00_do_dpc(void *data)
3360 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, 3531 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
3361 &base_vha->dpc_flags)) { 3532 &base_vha->dpc_flags)) {
3362 3533
3363 DEBUG(printk(KERN_INFO 3534 ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
3364 "scsi(%ld): dpc: sched " 3535 "FCoE context reset scheduled.\n");
3365 "qla82xx_fcoe_ctx_reset ha = %p\n",
3366 base_vha->host_no, ha));
3367 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 3536 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3368 &base_vha->dpc_flags))) { 3537 &base_vha->dpc_flags))) {
3369 if (qla82xx_fcoe_ctx_reset(base_vha)) { 3538 if (qla82xx_fcoe_ctx_reset(base_vha)) {
@@ -3377,18 +3546,16 @@ qla2x00_do_dpc(void *data)
3377 &base_vha->dpc_flags); 3546 &base_vha->dpc_flags);
3378 } 3547 }
3379 3548
3380 DEBUG(printk("scsi(%ld): dpc:" 3549 ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
3381 " qla82xx_fcoe_ctx_reset end\n", 3550 "FCoE context reset end.\n");
3382 base_vha->host_no));
3383 } 3551 }
3384 } 3552 }
3385 3553
3386 if (test_and_clear_bit(ISP_ABORT_NEEDED, 3554 if (test_and_clear_bit(ISP_ABORT_NEEDED,
3387 &base_vha->dpc_flags)) { 3555 &base_vha->dpc_flags)) {
3388 3556
3389 DEBUG(printk("scsi(%ld): dpc: sched " 3557 ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
3390 "qla2x00_abort_isp ha = %p\n", 3558 "ISP abort scheduled.\n");
3391 base_vha->host_no, ha));
3392 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 3559 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3393 &base_vha->dpc_flags))) { 3560 &base_vha->dpc_flags))) {
3394 3561
@@ -3401,8 +3568,8 @@ qla2x00_do_dpc(void *data)
3401 &base_vha->dpc_flags); 3568 &base_vha->dpc_flags);
3402 } 3569 }
3403 3570
3404 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 3571 ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
3405 base_vha->host_no)); 3572 "ISP abort end.\n");
3406 } 3573 }
3407 3574
3408 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) { 3575 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
@@ -3411,9 +3578,8 @@ qla2x00_do_dpc(void *data)
3411 } 3578 }
3412 3579
3413 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 3580 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3414 DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched " 3581 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
3415 "qla2x00_quiesce_needed ha = %p\n", 3582 "Quiescence mode scheduled.\n");
3416 base_vha->host_no, ha));
3417 qla82xx_device_state_handler(base_vha); 3583 qla82xx_device_state_handler(base_vha);
3418 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); 3584 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
3419 if (!ha->flags.quiesce_owner) { 3585 if (!ha->flags.quiesce_owner) {
@@ -3423,17 +3589,20 @@ qla2x00_do_dpc(void *data)
3423 qla82xx_clear_qsnt_ready(base_vha); 3589 qla82xx_clear_qsnt_ready(base_vha);
3424 qla82xx_idc_unlock(ha); 3590 qla82xx_idc_unlock(ha);
3425 } 3591 }
3592 ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
3593 "Quiescence mode end.\n");
3426 } 3594 }
3427 3595
3428 if (test_and_clear_bit(RESET_MARKER_NEEDED, 3596 if (test_and_clear_bit(RESET_MARKER_NEEDED,
3429 &base_vha->dpc_flags) && 3597 &base_vha->dpc_flags) &&
3430 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { 3598 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
3431 3599
3432 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 3600 ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
3433 base_vha->host_no)); 3601 "Reset marker scheduled.\n");
3434
3435 qla2x00_rst_aen(base_vha); 3602 qla2x00_rst_aen(base_vha);
3436 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags); 3603 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
3604 ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
3605 "Reset marker end.\n");
3437 } 3606 }
3438 3607
3439 /* Retry each device up to login retry count */ 3608 /* Retry each device up to login retry count */
@@ -3442,19 +3611,18 @@ qla2x00_do_dpc(void *data)
3442 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 3611 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
3443 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 3612 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
3444 3613
3445 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 3614 ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
3446 base_vha->host_no)); 3615 "Relogin scheduled.\n");
3447 qla2x00_relogin(base_vha); 3616 qla2x00_relogin(base_vha);
3448 3617 ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
3449 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 3618 "Relogin end.\n");
3450 base_vha->host_no));
3451 } 3619 }
3452 3620
3453 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 3621 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
3454 &base_vha->dpc_flags)) { 3622 &base_vha->dpc_flags)) {
3455 3623
3456 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 3624 ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
3457 base_vha->host_no)); 3625 "Loop resync scheduled.\n");
3458 3626
3459 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 3627 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
3460 &base_vha->dpc_flags))) { 3628 &base_vha->dpc_flags))) {
@@ -3465,8 +3633,8 @@ qla2x00_do_dpc(void *data)
3465 &base_vha->dpc_flags); 3633 &base_vha->dpc_flags);
3466 } 3634 }
3467 3635
3468 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 3636 ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
3469 base_vha->host_no)); 3637 "Loop resync end.\n");
3470 } 3638 }
3471 3639
3472 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && 3640 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
@@ -3489,7 +3657,8 @@ qla2x00_do_dpc(void *data)
3489 } /* End of while(1) */ 3657 } /* End of while(1) */
3490 __set_current_state(TASK_RUNNING); 3658 __set_current_state(TASK_RUNNING);
3491 3659
3492 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); 3660 ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
3661 "DPC handler exiting.\n");
3493 3662
3494 /* 3663 /*
3495 * Make sure that nobody tries to wake us up again. 3664 * Make sure that nobody tries to wake us up again.
@@ -3596,9 +3765,11 @@ void
3596qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp) 3765qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3597{ 3766{
3598 if (atomic_read(&sp->ref_count) == 0) { 3767 if (atomic_read(&sp->ref_count) == 0) {
3599 DEBUG2(qla_printk(KERN_WARNING, ha, 3768 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
3600 "SP reference-count to ZERO -- sp=%p\n", sp)); 3769 "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
3601 DEBUG2(BUG()); 3770 sp, sp->cmd);
3771 if (ql2xextended_error_logging & ql_dbg_io)
3772 BUG();
3602 return; 3773 return;
3603 } 3774 }
3604 if (!atomic_dec_and_test(&sp->ref_count)) 3775 if (!atomic_dec_and_test(&sp->ref_count))
@@ -3626,6 +3797,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
3626 struct req_que *req; 3797 struct req_que *req;
3627 3798
3628 if (ha->flags.eeh_busy) { 3799 if (ha->flags.eeh_busy) {
3800 ql_dbg(ql_dbg_timer, vha, 0x6000,
3801 "EEH = %d, restarting timer.\n",
3802 ha->flags.eeh_busy);
3629 qla2x00_restart_timer(vha, WATCH_INTERVAL); 3803 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3630 return; 3804 return;
3631 } 3805 }
@@ -3650,9 +3824,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3650 if (atomic_read(&vha->loop_down_timer) == 3824 if (atomic_read(&vha->loop_down_timer) ==
3651 vha->loop_down_abort_time) { 3825 vha->loop_down_abort_time) {
3652 3826
3653 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 3827 ql_log(ql_log_info, vha, 0x6008,
3654 "queues before time expire\n", 3828 "Loop down - aborting the queues before time expires.\n");
3655 vha->host_no));
3656 3829
3657 if (!IS_QLA2100(ha) && vha->link_down_timeout) 3830 if (!IS_QLA2100(ha) && vha->link_down_timeout)
3658 atomic_set(&vha->loop_state, LOOP_DEAD); 3831 atomic_set(&vha->loop_state, LOOP_DEAD);
@@ -3697,10 +3870,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3697 /* if the loop has been down for 4 minutes, reinit adapter */ 3870 /* if the loop has been down for 4 minutes, reinit adapter */
3698 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { 3871 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
3699 if (!(vha->device_flags & DFLG_NO_CABLE)) { 3872 if (!(vha->device_flags & DFLG_NO_CABLE)) {
3700 DEBUG(printk("scsi(%ld): Loop down - " 3873 ql_log(ql_log_warn, vha, 0x6009,
3701 "aborting ISP.\n",
3702 vha->host_no));
3703 qla_printk(KERN_WARNING, ha,
3704 "Loop down - aborting ISP.\n"); 3874 "Loop down - aborting ISP.\n");
3705 3875
3706 if (IS_QLA82XX(ha)) 3876 if (IS_QLA82XX(ha))
@@ -3711,9 +3881,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
3711 &vha->dpc_flags); 3881 &vha->dpc_flags);
3712 } 3882 }
3713 } 3883 }
3714 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 3884 ql_dbg(ql_dbg_timer, vha, 0x600a,
3715 vha->host_no, 3885 "Loop down - seconds remaining %d.\n",
3716 atomic_read(&vha->loop_down_timer))); 3886 atomic_read(&vha->loop_down_timer));
3717 } 3887 }
3718 3888
3719 /* Check if beacon LED needs to be blinked for physical host only */ 3889 /* Check if beacon LED needs to be blinked for physical host only */
@@ -3736,8 +3906,27 @@ qla2x00_timer(scsi_qla_host_t *vha)
3736 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || 3906 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
3737 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || 3907 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3738 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 3908 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3739 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) 3909 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
3910 ql_dbg(ql_dbg_timer, vha, 0x600b,
3911 "isp_abort_needed=%d loop_resync_needed=%d "
3912 "fcport_update_needed=%d start_dpc=%d "
3913 "reset_marker_needed=%d",
3914 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
3915 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
3916 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
3917 start_dpc,
3918 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
3919 ql_dbg(ql_dbg_timer, vha, 0x600c,
3920 "beacon_blink_needed=%d isp_unrecoverable=%d "
3921 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
3922 "relogin_needed=%d.\n",
3923 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
3924 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
3925 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
3926 test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
3927 test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
3740 qla2xxx_wake_dpc(vha); 3928 qla2xxx_wake_dpc(vha);
3929 }
3741 3930
3742 qla2x00_restart_timer(vha, WATCH_INTERVAL); 3931 qla2x00_restart_timer(vha, WATCH_INTERVAL);
3743} 3932}
@@ -3806,8 +3995,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
3806 goto out; 3995 goto out;
3807 3996
3808 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 3997 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3809 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 3998 ql_log(ql_log_warn, vha, 0x0063,
3810 "(%s).\n", vha->host_no, blob->name)); 3999 "Failed to load firmware image (%s).\n", blob->name);
3811 blob->fw = NULL; 4000 blob->fw = NULL;
3812 blob = NULL; 4001 blob = NULL;
3813 goto out; 4002 goto out;
@@ -3836,8 +4025,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3836 scsi_qla_host_t *vha = pci_get_drvdata(pdev); 4025 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3837 struct qla_hw_data *ha = vha->hw; 4026 struct qla_hw_data *ha = vha->hw;
3838 4027
3839 DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n", 4028 ql_dbg(ql_dbg_aer, vha, 0x9000,
3840 state)); 4029 "PCI error detected, state %x.\n", state);
3841 4030
3842 switch (state) { 4031 switch (state) {
3843 case pci_channel_io_normal: 4032 case pci_channel_io_normal:
@@ -3850,9 +4039,9 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3850 ha->flags.isp82xx_fw_hung = 1; 4039 ha->flags.isp82xx_fw_hung = 1;
3851 if (ha->flags.mbox_busy) { 4040 if (ha->flags.mbox_busy) {
3852 ha->flags.mbox_int = 1; 4041 ha->flags.mbox_int = 1;
3853 DEBUG2(qla_printk(KERN_ERR, ha, 4042 ql_dbg(ql_dbg_aer, vha, 0x9001,
3854 "Due to pci channel io frozen, doing premature " 4043 "Due to pci channel io frozen, doing premature "
3855 "completion of mbx command\n")); 4044 "completion of mbx command.\n");
3856 complete(&ha->mbx_intr_comp); 4045 complete(&ha->mbx_intr_comp);
3857 } 4046 }
3858 } 4047 }
@@ -3900,8 +4089,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3900 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4089 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3901 4090
3902 if (risc_paused) { 4091 if (risc_paused) {
3903 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 4092 ql_log(ql_log_info, base_vha, 0x9003,
3904 "Dumping firmware!\n"); 4093 "RISC paused -- mmio_enabled, Dumping firmware.\n");
3905 ha->isp_ops->fw_dump(base_vha, 0); 4094 ha->isp_ops->fw_dump(base_vha, 0);
3906 4095
3907 return PCI_ERS_RESULT_NEED_RESET; 4096 return PCI_ERS_RESULT_NEED_RESET;
@@ -3917,8 +4106,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3917 int fn; 4106 int fn;
3918 struct pci_dev *other_pdev = NULL; 4107 struct pci_dev *other_pdev = NULL;
3919 4108
3920 DEBUG17(qla_printk(KERN_INFO, ha, 4109 ql_dbg(ql_dbg_aer, base_vha, 0x9006,
3921 "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no)); 4110 "Entered %s.\n", __func__);
3922 4111
3923 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 4112 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3924 4113
@@ -3932,8 +4121,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3932 fn = PCI_FUNC(ha->pdev->devfn); 4121 fn = PCI_FUNC(ha->pdev->devfn);
3933 while (fn > 0) { 4122 while (fn > 0) {
3934 fn--; 4123 fn--;
3935 DEBUG17(qla_printk(KERN_INFO, ha, 4124 ql_dbg(ql_dbg_aer, base_vha, 0x9007,
3936 "Finding pci device at function = 0x%x\n", fn)); 4125 "Finding pci device at function = 0x%x.\n", fn);
3937 other_pdev = 4126 other_pdev =
3938 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), 4127 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3939 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), 4128 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
@@ -3942,9 +4131,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3942 if (!other_pdev) 4131 if (!other_pdev)
3943 continue; 4132 continue;
3944 if (atomic_read(&other_pdev->enable_cnt)) { 4133 if (atomic_read(&other_pdev->enable_cnt)) {
3945 DEBUG17(qla_printk(KERN_INFO, ha, 4134 ql_dbg(ql_dbg_aer, base_vha, 0x9008,
3946 "Found PCI func available and enabled at 0x%x\n", 4135 "Found PCI func available and enable at 0x%x.\n",
3947 fn)); 4136 fn);
3948 pci_dev_put(other_pdev); 4137 pci_dev_put(other_pdev);
3949 break; 4138 break;
3950 } 4139 }
@@ -3953,8 +4142,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3953 4142
3954 if (!fn) { 4143 if (!fn) {
3955 /* Reset owner */ 4144 /* Reset owner */
3956 DEBUG17(qla_printk(KERN_INFO, ha, 4145 ql_dbg(ql_dbg_aer, base_vha, 0x9009,
3957 "This devfn is reset owner = 0x%x\n", ha->pdev->devfn)); 4146 "This devfn is reset owner = 0x%x.\n",
4147 ha->pdev->devfn);
3958 qla82xx_idc_lock(ha); 4148 qla82xx_idc_lock(ha);
3959 4149
3960 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4150 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
@@ -3964,8 +4154,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3964 QLA82XX_IDC_VERSION); 4154 QLA82XX_IDC_VERSION);
3965 4155
3966 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 4156 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3967 DEBUG17(qla_printk(KERN_INFO, ha, 4157 ql_dbg(ql_dbg_aer, base_vha, 0x900a,
3968 "drv_active = 0x%x\n", drv_active)); 4158 "drv_active = 0x%x.\n", drv_active);
3969 4159
3970 qla82xx_idc_unlock(ha); 4160 qla82xx_idc_unlock(ha);
3971 /* Reset if device is not already reset 4161 /* Reset if device is not already reset
@@ -3978,12 +4168,14 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3978 qla82xx_idc_lock(ha); 4168 qla82xx_idc_lock(ha);
3979 4169
3980 if (rval != QLA_SUCCESS) { 4170 if (rval != QLA_SUCCESS) {
3981 qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); 4171 ql_log(ql_log_info, base_vha, 0x900b,
4172 "HW State: FAILED.\n");
3982 qla82xx_clear_drv_active(ha); 4173 qla82xx_clear_drv_active(ha);
3983 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4174 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3984 QLA82XX_DEV_FAILED); 4175 QLA82XX_DEV_FAILED);
3985 } else { 4176 } else {
3986 qla_printk(KERN_INFO, ha, "HW State: READY\n"); 4177 ql_log(ql_log_info, base_vha, 0x900c,
4178 "HW State: READY.\n");
3987 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4179 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3988 QLA82XX_DEV_READY); 4180 QLA82XX_DEV_READY);
3989 qla82xx_idc_unlock(ha); 4181 qla82xx_idc_unlock(ha);
@@ -3996,8 +4188,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
3996 } 4188 }
3997 qla82xx_idc_unlock(ha); 4189 qla82xx_idc_unlock(ha);
3998 } else { 4190 } else {
3999 DEBUG17(qla_printk(KERN_INFO, ha, 4191 ql_dbg(ql_dbg_aer, base_vha, 0x900d,
4000 "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn)); 4192 "This devfn is not reset owner = 0x%x.\n",
4193 ha->pdev->devfn);
4001 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 4194 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4002 QLA82XX_DEV_READY)) { 4195 QLA82XX_DEV_READY)) {
4003 ha->flags.isp82xx_fw_hung = 0; 4196 ha->flags.isp82xx_fw_hung = 0;
@@ -4021,7 +4214,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4021 struct rsp_que *rsp; 4214 struct rsp_que *rsp;
4022 int rc, retries = 10; 4215 int rc, retries = 10;
4023 4216
4024 DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n")); 4217 ql_dbg(ql_dbg_aer, base_vha, 0x9004,
4218 "Slot Reset.\n");
4025 4219
4026 /* Workaround: qla2xxx driver which access hardware earlier 4220 /* Workaround: qla2xxx driver which access hardware earlier
4027 * needs error state to be pci_channel_io_online. 4221 * needs error state to be pci_channel_io_online.
@@ -4042,7 +4236,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4042 rc = pci_enable_device(pdev); 4236 rc = pci_enable_device(pdev);
4043 4237
4044 if (rc) { 4238 if (rc) {
4045 qla_printk(KERN_WARNING, ha, 4239 ql_log(ql_log_warn, base_vha, 0x9005,
4046 "Can't re-enable PCI device after reset.\n"); 4240 "Can't re-enable PCI device after reset.\n");
4047 goto exit_slot_reset; 4241 goto exit_slot_reset;
4048 } 4242 }
@@ -4072,8 +4266,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
4072 4266
4073 4267
4074exit_slot_reset: 4268exit_slot_reset:
4075 DEBUG17(qla_printk(KERN_WARNING, ha, 4269 ql_dbg(ql_dbg_aer, base_vha, 0x900e,
4076 "slot_reset-return:ret=%x\n", ret)); 4270 "slot_reset return %x.\n", ret);
4077 4271
4078 return ret; 4272 return ret;
4079} 4273}
@@ -4085,13 +4279,13 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
4085 struct qla_hw_data *ha = base_vha->hw; 4279 struct qla_hw_data *ha = base_vha->hw;
4086 int ret; 4280 int ret;
4087 4281
4088 DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n")); 4282 ql_dbg(ql_dbg_aer, base_vha, 0x900f,
4283 "pci_resume.\n");
4089 4284
4090 ret = qla2x00_wait_for_hba_online(base_vha); 4285 ret = qla2x00_wait_for_hba_online(base_vha);
4091 if (ret != QLA_SUCCESS) { 4286 if (ret != QLA_SUCCESS) {
4092 qla_printk(KERN_ERR, ha, 4287 ql_log(ql_log_fatal, base_vha, 0x9002,
4093 "the device failed to resume I/O " 4288 "The device failed to resume I/O from slot/link_reset.\n");
4094 "from slot/link_reset");
4095 } 4289 }
4096 4290
4097 pci_cleanup_aer_uncorrect_error_status(pdev); 4291 pci_cleanup_aer_uncorrect_error_status(pdev);
@@ -4155,8 +4349,8 @@ qla2x00_module_init(void)
4155 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 4349 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
4156 SLAB_HWCACHE_ALIGN, NULL); 4350 SLAB_HWCACHE_ALIGN, NULL);
4157 if (srb_cachep == NULL) { 4351 if (srb_cachep == NULL) {
4158 printk(KERN_ERR 4352 ql_log(ql_log_fatal, NULL, 0x0001,
4159 "qla2xxx: Unable to allocate SRB cache...Failing load!\n"); 4353 "Unable to allocate SRB cache...Failing load!.\n");
4160 return -ENOMEM; 4354 return -ENOMEM;
4161 } 4355 }
4162 4356
@@ -4169,13 +4363,15 @@ qla2x00_module_init(void)
4169 fc_attach_transport(&qla2xxx_transport_functions); 4363 fc_attach_transport(&qla2xxx_transport_functions);
4170 if (!qla2xxx_transport_template) { 4364 if (!qla2xxx_transport_template) {
4171 kmem_cache_destroy(srb_cachep); 4365 kmem_cache_destroy(srb_cachep);
4366 ql_log(ql_log_fatal, NULL, 0x0002,
4367 "fc_attach_transport failed...Failing load!.\n");
4172 return -ENODEV; 4368 return -ENODEV;
4173 } 4369 }
4174 4370
4175 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops); 4371 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
4176 if (apidev_major < 0) { 4372 if (apidev_major < 0) {
4177 printk(KERN_WARNING "qla2xxx: Unable to register char device " 4373 ql_log(ql_log_fatal, NULL, 0x0003,
4178 "%s\n", QLA2XXX_APIDEV); 4374 "Unable to register char device %s.\n", QLA2XXX_APIDEV);
4179 } 4375 }
4180 4376
4181 qla2xxx_transport_vport_template = 4377 qla2xxx_transport_vport_template =
@@ -4183,16 +4379,21 @@ qla2x00_module_init(void)
4183 if (!qla2xxx_transport_vport_template) { 4379 if (!qla2xxx_transport_vport_template) {
4184 kmem_cache_destroy(srb_cachep); 4380 kmem_cache_destroy(srb_cachep);
4185 fc_release_transport(qla2xxx_transport_template); 4381 fc_release_transport(qla2xxx_transport_template);
4382 ql_log(ql_log_fatal, NULL, 0x0004,
4383 "fc_attach_transport vport failed...Failing load!.\n");
4186 return -ENODEV; 4384 return -ENODEV;
4187 } 4385 }
4188 4386 ql_log(ql_log_info, NULL, 0x0005,
4189 printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n", 4387 "QLogic Fibre Channel HBA Driver: %s.\n",
4190 qla2x00_version_str); 4388 qla2x00_version_str);
4191 ret = pci_register_driver(&qla2xxx_pci_driver); 4389 ret = pci_register_driver(&qla2xxx_pci_driver);
4192 if (ret) { 4390 if (ret) {
4193 kmem_cache_destroy(srb_cachep); 4391 kmem_cache_destroy(srb_cachep);
4194 fc_release_transport(qla2xxx_transport_template); 4392 fc_release_transport(qla2xxx_transport_template);
4195 fc_release_transport(qla2xxx_transport_vport_template); 4393 fc_release_transport(qla2xxx_transport_vport_template);
4394 ql_log(ql_log_fatal, NULL, 0x0006,
4395 "pci_register_driver failed...ret=%d Failing load!.\n",
4396 ret);
4196 } 4397 }
4197 return ret; 4398 return ret;
4198} 4399}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 693647661ed1..eff13563c82d 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -189,6 +189,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
189 uint16_t word; 189 uint16_t word;
190 uint32_t nv_cmd, wait_cnt; 190 uint32_t nv_cmd, wait_cnt;
191 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 191 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
192 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
192 193
193 qla2x00_nv_write(ha, NVR_DATA_OUT); 194 qla2x00_nv_write(ha, NVR_DATA_OUT);
194 qla2x00_nv_write(ha, 0); 195 qla2x00_nv_write(ha, 0);
@@ -220,8 +221,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
220 wait_cnt = NVR_WAIT_CNT; 221 wait_cnt = NVR_WAIT_CNT;
221 do { 222 do {
222 if (!--wait_cnt) { 223 if (!--wait_cnt) {
223 DEBUG9_10(qla_printk(KERN_WARNING, ha, 224 ql_dbg(ql_dbg_user, vha, 0x708d,
224 "NVRAM didn't go ready...\n")); 225 "NVRAM didn't go ready...\n");
225 break; 226 break;
226 } 227 }
227 NVRAM_DELAY(); 228 NVRAM_DELAY();
@@ -308,6 +309,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
308 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 309 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
309 uint32_t word, wait_cnt; 310 uint32_t word, wait_cnt;
310 uint16_t wprot, wprot_old; 311 uint16_t wprot, wprot_old;
312 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
311 313
312 /* Clear NVRAM write protection. */ 314 /* Clear NVRAM write protection. */
313 ret = QLA_FUNCTION_FAILED; 315 ret = QLA_FUNCTION_FAILED;
@@ -350,8 +352,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
350 wait_cnt = NVR_WAIT_CNT; 352 wait_cnt = NVR_WAIT_CNT;
351 do { 353 do {
352 if (!--wait_cnt) { 354 if (!--wait_cnt) {
353 DEBUG9_10(qla_printk(KERN_WARNING, ha, 355 ql_dbg(ql_dbg_user, vha, 0x708e,
354 "NVRAM didn't go ready...\n")); 356 "NVRAM didn't go ready...\n");
355 break; 357 break;
356 } 358 }
357 NVRAM_DELAY(); 359 NVRAM_DELAY();
@@ -371,6 +373,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
371{ 373{
372 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 374 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
373 uint32_t word, wait_cnt; 375 uint32_t word, wait_cnt;
376 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
374 377
375 if (stat != QLA_SUCCESS) 378 if (stat != QLA_SUCCESS)
376 return; 379 return;
@@ -409,8 +412,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
409 wait_cnt = NVR_WAIT_CNT; 412 wait_cnt = NVR_WAIT_CNT;
410 do { 413 do {
411 if (!--wait_cnt) { 414 if (!--wait_cnt) {
412 DEBUG9_10(qla_printk(KERN_WARNING, ha, 415 ql_dbg(ql_dbg_user, vha, 0x708f,
413 "NVRAM didn't go ready...\n")); 416 "NVRAM didn't go ready...\n");
414 break; 417 break;
415 } 418 }
416 NVRAM_DELAY(); 419 NVRAM_DELAY();
@@ -607,9 +610,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
607 for (chksum = 0; cnt; cnt--) 610 for (chksum = 0; cnt; cnt--)
608 chksum += le16_to_cpu(*wptr++); 611 chksum += le16_to_cpu(*wptr++);
609 if (chksum) { 612 if (chksum) {
610 qla_printk(KERN_ERR, ha, 613 ql_log(ql_log_fatal, vha, 0x0045,
611 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum); 614 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
612 qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location)); 615 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
616 buf, sizeof(struct qla_flt_location));
613 return QLA_FUNCTION_FAILED; 617 return QLA_FUNCTION_FAILED;
614 } 618 }
615 619
@@ -618,7 +622,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
618 *start = (le16_to_cpu(fltl->start_hi) << 16 | 622 *start = (le16_to_cpu(fltl->start_hi) << 16 |
619 le16_to_cpu(fltl->start_lo)) >> 2; 623 le16_to_cpu(fltl->start_lo)) >> 2;
620end: 624end:
621 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start)); 625 ql_dbg(ql_dbg_init, vha, 0x0046,
626 "FLTL[%s] = 0x%x.\n",
627 loc, *start);
622 return QLA_SUCCESS; 628 return QLA_SUCCESS;
623} 629}
624 630
@@ -685,10 +691,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
685 if (*wptr == __constant_cpu_to_le16(0xffff)) 691 if (*wptr == __constant_cpu_to_le16(0xffff))
686 goto no_flash_data; 692 goto no_flash_data;
687 if (flt->version != __constant_cpu_to_le16(1)) { 693 if (flt->version != __constant_cpu_to_le16(1)) {
688 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: " 694 ql_log(ql_log_warn, vha, 0x0047,
689 "version=0x%x length=0x%x checksum=0x%x.\n", 695 "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
690 le16_to_cpu(flt->version), le16_to_cpu(flt->length), 696 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
691 le16_to_cpu(flt->checksum))); 697 le16_to_cpu(flt->checksum));
692 goto no_flash_data; 698 goto no_flash_data;
693 } 699 }
694 700
@@ -696,10 +702,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
696 for (chksum = 0; cnt; cnt--) 702 for (chksum = 0; cnt; cnt--)
697 chksum += le16_to_cpu(*wptr++); 703 chksum += le16_to_cpu(*wptr++);
698 if (chksum) { 704 if (chksum) {
699 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: " 705 ql_log(ql_log_fatal, vha, 0x0048,
700 "version=0x%x length=0x%x checksum=0x%x.\n", 706 "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
701 le16_to_cpu(flt->version), le16_to_cpu(flt->length), 707 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
702 chksum)); 708 le16_to_cpu(flt->checksum));
703 goto no_flash_data; 709 goto no_flash_data;
704 } 710 }
705 711
@@ -708,10 +714,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
708 for ( ; cnt; cnt--, region++) { 714 for ( ; cnt; cnt--, region++) {
709 /* Store addresses as DWORD offsets. */ 715 /* Store addresses as DWORD offsets. */
710 start = le32_to_cpu(region->start) >> 2; 716 start = le32_to_cpu(region->start) >> 2;
711 717 ql_dbg(ql_dbg_init, vha, 0x0049,
712 DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x " 718 "FLT[%02x]: start=0x%x "
713 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start, 719 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code),
714 le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size))); 720 start, le32_to_cpu(region->end) >> 2,
721 le32_to_cpu(region->size));
715 722
716 switch (le32_to_cpu(region->code) & 0xff) { 723 switch (le32_to_cpu(region->code) & 0xff) {
717 case FLT_REG_FW: 724 case FLT_REG_FW:
@@ -796,12 +803,16 @@ no_flash_data:
796 ha->flt_region_npiv_conf = ha->flags.port0 ? 803 ha->flt_region_npiv_conf = ha->flags.port0 ?
797 def_npiv_conf0[def] : def_npiv_conf1[def]; 804 def_npiv_conf0[def] : def_npiv_conf1[def];
798done: 805done:
799 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 806 ql_dbg(ql_dbg_init, vha, 0x004a,
800 "vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x " 807 "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n",
801 "npiv=0x%x. fcp_prio_cfg=0x%x\n", loc, ha->flt_region_boot, 808 loc, ha->flt_region_boot,
802 ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd, 809 ha->flt_region_fw, ha->flt_region_vpd_nvram,
803 ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt, 810 ha->flt_region_vpd);
804 ha->flt_region_npiv_conf, ha->flt_region_fcp_prio)); 811 ql_dbg(ql_dbg_init, vha, 0x004b,
812 "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
813 ha->flt_region_nvram,
814 ha->flt_region_fdt, ha->flt_region_flt,
815 ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
805} 816}
806 817
807static void 818static void
@@ -833,10 +844,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
833 cnt++) 844 cnt++)
834 chksum += le16_to_cpu(*wptr++); 845 chksum += le16_to_cpu(*wptr++);
835 if (chksum) { 846 if (chksum) {
836 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FDT detected: " 847 ql_dbg(ql_dbg_init, vha, 0x004c,
837 "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0], 848 "Inconsistent FDT detected:"
838 le16_to_cpu(fdt->version))); 849 " checksum=0x%x id=%c version0x%x.\n", chksum,
839 DEBUG9(qla2x00_dump_buffer((uint8_t *)fdt, sizeof(*fdt))); 850 fdt->sig[0], le16_to_cpu(fdt->version));
851 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
852 (uint8_t *)fdt, sizeof(*fdt));
840 goto no_flash_data; 853 goto no_flash_data;
841 } 854 }
842 855
@@ -890,11 +903,12 @@ no_flash_data:
890 break; 903 break;
891 } 904 }
892done: 905done:
893 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x " 906 ql_dbg(ql_dbg_init, vha, 0x004d,
894 "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid, 907 "FDT[%x]: (0x%x/0x%x) erase=0x%x "
908 "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
895 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 909 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
896 ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable, 910 ha->fdt_wrt_disable, ha->fdt_block_size);
897 ha->fdt_block_size)); 911
898} 912}
899 913
900static void 914static void
@@ -919,6 +933,10 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
919 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++); 933 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
920 ha->nx_reset_timeout = le32_to_cpu(*wptr); 934 ha->nx_reset_timeout = le32_to_cpu(*wptr);
921 } 935 }
936 ql_dbg(ql_dbg_init, vha, 0x004e,
937 "nx_dev_init_timeout=%d "
938 "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout,
939 ha->nx_reset_timeout);
922 return; 940 return;
923} 941}
924 942
@@ -963,17 +981,18 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
963 if (hdr.version == __constant_cpu_to_le16(0xffff)) 981 if (hdr.version == __constant_cpu_to_le16(0xffff))
964 return; 982 return;
965 if (hdr.version != __constant_cpu_to_le16(1)) { 983 if (hdr.version != __constant_cpu_to_le16(1)) {
966 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config " 984 ql_dbg(ql_dbg_user, vha, 0x7090,
985 "Unsupported NPIV-Config "
967 "detected: version=0x%x entries=0x%x checksum=0x%x.\n", 986 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
968 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), 987 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
969 le16_to_cpu(hdr.checksum))); 988 le16_to_cpu(hdr.checksum));
970 return; 989 return;
971 } 990 }
972 991
973 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL); 992 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
974 if (!data) { 993 if (!data) {
975 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to " 994 ql_log(ql_log_warn, vha, 0x7091,
976 "allocate memory.\n")); 995 "Unable to allocate memory for data.\n");
977 return; 996 return;
978 } 997 }
979 998
@@ -985,10 +1004,11 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
985 for (wptr = data, chksum = 0; cnt; cnt--) 1004 for (wptr = data, chksum = 0; cnt; cnt--)
986 chksum += le16_to_cpu(*wptr++); 1005 chksum += le16_to_cpu(*wptr++);
987 if (chksum) { 1006 if (chksum) {
988 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config " 1007 ql_dbg(ql_dbg_user, vha, 0x7092,
1008 "Inconsistent NPIV-Config "
989 "detected: version=0x%x entries=0x%x checksum=0x%x.\n", 1009 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
990 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), 1010 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
991 chksum)); 1011 le16_to_cpu(hdr.checksum));
992 goto done; 1012 goto done;
993 } 1013 }
994 1014
@@ -1014,21 +1034,22 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
1014 vid.port_name = wwn_to_u64(entry->port_name); 1034 vid.port_name = wwn_to_u64(entry->port_name);
1015 vid.node_name = wwn_to_u64(entry->node_name); 1035 vid.node_name = wwn_to_u64(entry->node_name);
1016 1036
1017 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " 1037 ql_dbg(ql_dbg_user, vha, 0x7093,
1018 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, 1038 "NPIV[%02x]: wwpn=%llx "
1019 (unsigned long long)vid.port_name, 1039 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
1020 (unsigned long long)vid.node_name, 1040 (unsigned long long)vid.port_name,
1021 le16_to_cpu(entry->vf_id), 1041 (unsigned long long)vid.node_name,
1022 entry->q_qos, entry->f_qos)); 1042 le16_to_cpu(entry->vf_id),
1043 entry->q_qos, entry->f_qos);
1023 1044
1024 if (i < QLA_PRECONFIG_VPORTS) { 1045 if (i < QLA_PRECONFIG_VPORTS) {
1025 vport = fc_vport_create(vha->host, 0, &vid); 1046 vport = fc_vport_create(vha->host, 0, &vid);
1026 if (!vport) 1047 if (!vport)
1027 qla_printk(KERN_INFO, ha, 1048 ql_log(ql_log_warn, vha, 0x7094,
1028 "NPIV-Config: Failed to create vport [%02x]: " 1049 "NPIV-Config Failed to create vport [%02x]: "
1029 "wwpn=%llx wwnn=%llx.\n", cnt, 1050 "wwpn=%llx wwnn=%llx.\n", cnt,
1030 (unsigned long long)vid.port_name, 1051 (unsigned long long)vid.port_name,
1031 (unsigned long long)vid.node_name); 1052 (unsigned long long)vid.node_name);
1032 } 1053 }
1033 } 1054 }
1034done: 1055done:
@@ -1127,9 +1148,10 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1127 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 1148 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
1128 &optrom_dma, GFP_KERNEL); 1149 &optrom_dma, GFP_KERNEL);
1129 if (!optrom) { 1150 if (!optrom) {
1130 qla_printk(KERN_DEBUG, ha, 1151 ql_log(ql_log_warn, vha, 0x7095,
1131 "Unable to allocate memory for optrom burst write " 1152 "Unable to allocate "
1132 "(%x KB).\n", OPTROM_BURST_SIZE / 1024); 1153 "memory for optrom burst write (%x KB).\n",
1154 OPTROM_BURST_SIZE / 1024);
1133 } 1155 }
1134 } 1156 }
1135 1157
@@ -1138,7 +1160,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1138 1160
1139 ret = qla24xx_unprotect_flash(vha); 1161 ret = qla24xx_unprotect_flash(vha);
1140 if (ret != QLA_SUCCESS) { 1162 if (ret != QLA_SUCCESS) {
1141 qla_printk(KERN_WARNING, ha, 1163 ql_log(ql_log_warn, vha, 0x7096,
1142 "Unable to unprotect flash for update.\n"); 1164 "Unable to unprotect flash for update.\n");
1143 goto done; 1165 goto done;
1144 } 1166 }
@@ -1156,9 +1178,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1156 0xff0000) | ((fdata >> 16) & 0xff)); 1178 0xff0000) | ((fdata >> 16) & 0xff));
1157 ret = qla24xx_erase_sector(vha, fdata); 1179 ret = qla24xx_erase_sector(vha, fdata);
1158 if (ret != QLA_SUCCESS) { 1180 if (ret != QLA_SUCCESS) {
1159 DEBUG9(qla_printk(KERN_WARNING, ha, 1181 ql_dbg(ql_dbg_user, vha, 0x7007,
1160 "Unable to erase sector: address=%x.\n", 1182 "Unable to erase erase sector: address=%x.\n",
1161 faddr)); 1183 faddr);
1162 break; 1184 break;
1163 } 1185 }
1164 } 1186 }
@@ -1172,12 +1194,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1172 flash_data_addr(ha, faddr), 1194 flash_data_addr(ha, faddr),
1173 OPTROM_BURST_DWORDS); 1195 OPTROM_BURST_DWORDS);
1174 if (ret != QLA_SUCCESS) { 1196 if (ret != QLA_SUCCESS) {
1175 qla_printk(KERN_WARNING, ha, 1197 ql_log(ql_log_warn, vha, 0x7097,
1176 "Unable to burst-write optrom segment " 1198 "Unable to burst-write optrom segment "
1177 "(%x/%x/%llx).\n", ret, 1199 "(%x/%x/%llx).\n", ret,
1178 flash_data_addr(ha, faddr), 1200 flash_data_addr(ha, faddr),
1179 (unsigned long long)optrom_dma); 1201 (unsigned long long)optrom_dma);
1180 qla_printk(KERN_WARNING, ha, 1202 ql_log(ql_log_warn, vha, 0x7098,
1181 "Reverting to slow-write.\n"); 1203 "Reverting to slow-write.\n");
1182 1204
1183 dma_free_coherent(&ha->pdev->dev, 1205 dma_free_coherent(&ha->pdev->dev,
@@ -1194,9 +1216,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1194 ret = qla24xx_write_flash_dword(ha, 1216 ret = qla24xx_write_flash_dword(ha,
1195 flash_data_addr(ha, faddr), cpu_to_le32(*dwptr)); 1217 flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
1196 if (ret != QLA_SUCCESS) { 1218 if (ret != QLA_SUCCESS) {
1197 DEBUG9(printk("%s(%ld) Unable to program flash " 1219 ql_dbg(ql_dbg_user, vha, 0x7006,
1198 "address=%x data=%x.\n", __func__, 1220 "Unable to program flash address=%x data=%x.\n",
1199 vha->host_no, faddr, *dwptr)); 1221 faddr, *dwptr);
1200 break; 1222 break;
1201 } 1223 }
1202 1224
@@ -1211,7 +1233,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1211 1233
1212 ret = qla24xx_protect_flash(vha); 1234 ret = qla24xx_protect_flash(vha);
1213 if (ret != QLA_SUCCESS) 1235 if (ret != QLA_SUCCESS)
1214 qla_printk(KERN_WARNING, ha, 1236 ql_log(ql_log_warn, vha, 0x7099,
1215 "Unable to protect flash after update.\n"); 1237 "Unable to protect flash after update.\n");
1216done: 1238done:
1217 if (optrom) 1239 if (optrom)
@@ -1324,9 +1346,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1324 ret = qla24xx_write_flash_dword(ha, 1346 ret = qla24xx_write_flash_dword(ha,
1325 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); 1347 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
1326 if (ret != QLA_SUCCESS) { 1348 if (ret != QLA_SUCCESS) {
1327 DEBUG9(qla_printk(KERN_WARNING, ha, 1349 ql_dbg(ql_dbg_user, vha, 0x709a,
1328 "Unable to program nvram address=%x data=%x.\n", 1350 "Unable to program nvram address=%x data=%x.\n",
1329 naddr, *dwptr)); 1351 naddr, *dwptr);
1330 break; 1352 break;
1331 } 1353 }
1332 } 1354 }
@@ -1476,7 +1498,7 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
1476 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; 1498 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
1477 1499
1478 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1500 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1479 qla_printk(KERN_WARNING, ha, 1501 ql_log(ql_log_warn, vha, 0x709b,
1480 "Unable to update fw options (beacon on).\n"); 1502 "Unable to update fw options (beacon on).\n");
1481 return QLA_FUNCTION_FAILED; 1503 return QLA_FUNCTION_FAILED;
1482 } 1504 }
@@ -1541,7 +1563,7 @@ qla2x00_beacon_off(struct scsi_qla_host *vha)
1541 1563
1542 rval = qla2x00_set_fw_options(vha, ha->fw_options); 1564 rval = qla2x00_set_fw_options(vha, ha->fw_options);
1543 if (rval != QLA_SUCCESS) 1565 if (rval != QLA_SUCCESS)
1544 qla_printk(KERN_WARNING, ha, 1566 ql_log(ql_log_warn, vha, 0x709c,
1545 "Unable to update fw options (beacon off).\n"); 1567 "Unable to update fw options (beacon off).\n");
1546 return rval; 1568 return rval;
1547} 1569}
@@ -1616,7 +1638,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
1616 1638
1617 if (qla2x00_get_fw_options(vha, ha->fw_options) != 1639 if (qla2x00_get_fw_options(vha, ha->fw_options) !=
1618 QLA_SUCCESS) { 1640 QLA_SUCCESS) {
1619 qla_printk(KERN_WARNING, ha, 1641 ql_log(ql_log_warn, vha, 0x7009,
1620 "Unable to update fw options (beacon on).\n"); 1642 "Unable to update fw options (beacon on).\n");
1621 return QLA_FUNCTION_FAILED; 1643 return QLA_FUNCTION_FAILED;
1622 } 1644 }
@@ -1670,14 +1692,14 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
1670 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; 1692 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
1671 1693
1672 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1694 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1673 qla_printk(KERN_WARNING, ha, 1695 ql_log(ql_log_warn, vha, 0x704d,
1674 "Unable to update fw options (beacon off).\n"); 1696 "Unable to update fw options (beacon on).\n");
1675 return QLA_FUNCTION_FAILED; 1697 return QLA_FUNCTION_FAILED;
1676 } 1698 }
1677 1699
1678 if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1700 if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1679 qla_printk(KERN_WARNING, ha, 1701 ql_log(ql_log_warn, vha, 0x704e,
1680 "Unable to get fw options (beacon off).\n"); 1702 "Unable to update fw options (beacon on).\n");
1681 return QLA_FUNCTION_FAILED; 1703 return QLA_FUNCTION_FAILED;
1682 } 1704 }
1683 1705
@@ -2389,10 +2411,9 @@ try_fast:
2389 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2411 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
2390 &optrom_dma, GFP_KERNEL); 2412 &optrom_dma, GFP_KERNEL);
2391 if (!optrom) { 2413 if (!optrom) {
2392 qla_printk(KERN_DEBUG, ha, 2414 ql_log(ql_log_warn, vha, 0x00cc,
2393 "Unable to allocate memory for optrom burst read " 2415 "Unable to allocate memory for optrom burst read (%x KB).\n",
2394 "(%x KB).\n", OPTROM_BURST_SIZE / 1024); 2416 OPTROM_BURST_SIZE / 1024);
2395
2396 goto slow_read; 2417 goto slow_read;
2397 } 2418 }
2398 2419
@@ -2407,12 +2428,11 @@ try_fast:
2407 rval = qla2x00_dump_ram(vha, optrom_dma, 2428 rval = qla2x00_dump_ram(vha, optrom_dma,
2408 flash_data_addr(ha, faddr), burst); 2429 flash_data_addr(ha, faddr), burst);
2409 if (rval) { 2430 if (rval) {
2410 qla_printk(KERN_WARNING, ha, 2431 ql_log(ql_log_warn, vha, 0x00f5,
2411 "Unable to burst-read optrom segment " 2432 "Unable to burst-read optrom segment (%x/%x/%llx).\n",
2412 "(%x/%x/%llx).\n", rval, 2433 rval, flash_data_addr(ha, faddr),
2413 flash_data_addr(ha, faddr),
2414 (unsigned long long)optrom_dma); 2434 (unsigned long long)optrom_dma);
2415 qla_printk(KERN_WARNING, ha, 2435 ql_log(ql_log_warn, vha, 0x00f6,
2416 "Reverting to slow-read.\n"); 2436 "Reverting to slow-read.\n");
2417 2437
2418 dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 2438 dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
@@ -2556,8 +2576,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2556 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 || 2576 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
2557 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) { 2577 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
2558 /* No signature */ 2578 /* No signature */
2559 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM " 2579 ql_log(ql_log_fatal, vha, 0x0050,
2560 "signature.\n")); 2580 "No matching ROM signature.\n");
2561 ret = QLA_FUNCTION_FAILED; 2581 ret = QLA_FUNCTION_FAILED;
2562 break; 2582 break;
2563 } 2583 }
@@ -2573,8 +2593,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2573 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' || 2593 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
2574 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') { 2594 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
2575 /* Incorrect header. */ 2595 /* Incorrect header. */
2576 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not " 2596 ql_log(ql_log_fatal, vha, 0x0051,
2577 "found pcir_adr=%x.\n", pcids)); 2597 "PCI data struct not found pcir_adr=%x.\n", pcids);
2578 ret = QLA_FUNCTION_FAILED; 2598 ret = QLA_FUNCTION_FAILED;
2579 break; 2599 break;
2580 } 2600 }
@@ -2588,8 +2608,9 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2588 qla2x00_read_flash_byte(ha, pcids + 0x12); 2608 qla2x00_read_flash_byte(ha, pcids + 0x12);
2589 ha->bios_revision[1] = 2609 ha->bios_revision[1] =
2590 qla2x00_read_flash_byte(ha, pcids + 0x13); 2610 qla2x00_read_flash_byte(ha, pcids + 0x13);
2591 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n", 2611 ql_dbg(ql_dbg_init, vha, 0x0052,
2592 ha->bios_revision[1], ha->bios_revision[0])); 2612 "Read BIOS %d.%d.\n",
2613 ha->bios_revision[1], ha->bios_revision[0]);
2593 break; 2614 break;
2594 case ROM_CODE_TYPE_FCODE: 2615 case ROM_CODE_TYPE_FCODE:
2595 /* Open Firmware standard for PCI (FCode). */ 2616 /* Open Firmware standard for PCI (FCode). */
@@ -2602,12 +2623,14 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2602 qla2x00_read_flash_byte(ha, pcids + 0x12); 2623 qla2x00_read_flash_byte(ha, pcids + 0x12);
2603 ha->efi_revision[1] = 2624 ha->efi_revision[1] =
2604 qla2x00_read_flash_byte(ha, pcids + 0x13); 2625 qla2x00_read_flash_byte(ha, pcids + 0x13);
2605 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n", 2626 ql_dbg(ql_dbg_init, vha, 0x0053,
2606 ha->efi_revision[1], ha->efi_revision[0])); 2627 "Read EFI %d.%d.\n",
2628 ha->efi_revision[1], ha->efi_revision[0]);
2607 break; 2629 break;
2608 default: 2630 default:
2609 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code " 2631 ql_log(ql_log_warn, vha, 0x0054,
2610 "type %x at pcids %x.\n", code_type, pcids)); 2632 "Unrecognized code type %x at pcids %x.\n",
2633 code_type, pcids);
2611 break; 2634 break;
2612 } 2635 }
2613 2636
@@ -2627,21 +2650,28 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2627 2650
2628 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10, 2651 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
2629 8); 2652 8);
2630 DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from " 2653 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a,
2631 "flash:\n")); 2654 "Dumping fw "
2632 DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8)); 2655 "ver from flash:.\n");
2656 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
2657 (uint8_t *)dbyte, 8);
2633 2658
2634 if ((dcode[0] == 0xffff && dcode[1] == 0xffff && 2659 if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
2635 dcode[2] == 0xffff && dcode[3] == 0xffff) || 2660 dcode[2] == 0xffff && dcode[3] == 0xffff) ||
2636 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2661 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2637 dcode[3] == 0)) { 2662 dcode[3] == 0)) {
2638 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw " 2663 ql_log(ql_log_warn, vha, 0x0057,
2639 "revision at %x.\n", ha->flt_region_fw * 4)); 2664 "Unrecognized fw revision at %x.\n",
2665 ha->flt_region_fw * 4);
2640 } else { 2666 } else {
2641 /* values are in big endian */ 2667 /* values are in big endian */
2642 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; 2668 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
2643 ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3]; 2669 ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
2644 ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5]; 2670 ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
2671 ql_dbg(ql_dbg_init, vha, 0x0058,
2672 "FW Version: "
2673 "%d.%d.%d.\n", ha->fw_revision[0],
2674 ha->fw_revision[1], ha->fw_revision[2]);
2645 } 2675 }
2646 } 2676 }
2647 2677
@@ -2683,8 +2713,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2683 bcode = mbuf + (pcihdr % 4); 2713 bcode = mbuf + (pcihdr % 4);
2684 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { 2714 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
2685 /* No signature */ 2715 /* No signature */
2686 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM " 2716 ql_log(ql_log_fatal, vha, 0x0059,
2687 "signature.\n")); 2717 "No matching ROM signature.\n");
2688 ret = QLA_FUNCTION_FAILED; 2718 ret = QLA_FUNCTION_FAILED;
2689 break; 2719 break;
2690 } 2720 }
@@ -2699,8 +2729,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2699 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || 2729 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
2700 bcode[0x2] != 'I' || bcode[0x3] != 'R') { 2730 bcode[0x2] != 'I' || bcode[0x3] != 'R') {
2701 /* Incorrect header. */ 2731 /* Incorrect header. */
2702 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not " 2732 ql_log(ql_log_fatal, vha, 0x005a,
2703 "found pcir_adr=%x.\n", pcids)); 2733 "PCI data struct not found pcir_adr=%x.\n", pcids);
2704 ret = QLA_FUNCTION_FAILED; 2734 ret = QLA_FUNCTION_FAILED;
2705 break; 2735 break;
2706 } 2736 }
@@ -2712,26 +2742,30 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2712 /* Intel x86, PC-AT compatible. */ 2742 /* Intel x86, PC-AT compatible. */
2713 ha->bios_revision[0] = bcode[0x12]; 2743 ha->bios_revision[0] = bcode[0x12];
2714 ha->bios_revision[1] = bcode[0x13]; 2744 ha->bios_revision[1] = bcode[0x13];
2715 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n", 2745 ql_dbg(ql_dbg_init, vha, 0x005b,
2716 ha->bios_revision[1], ha->bios_revision[0])); 2746 "Read BIOS %d.%d.\n",
2747 ha->bios_revision[1], ha->bios_revision[0]);
2717 break; 2748 break;
2718 case ROM_CODE_TYPE_FCODE: 2749 case ROM_CODE_TYPE_FCODE:
2719 /* Open Firmware standard for PCI (FCode). */ 2750 /* Open Firmware standard for PCI (FCode). */
2720 ha->fcode_revision[0] = bcode[0x12]; 2751 ha->fcode_revision[0] = bcode[0x12];
2721 ha->fcode_revision[1] = bcode[0x13]; 2752 ha->fcode_revision[1] = bcode[0x13];
2722 DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n", 2753 ql_dbg(ql_dbg_init, vha, 0x005c,
2723 ha->fcode_revision[1], ha->fcode_revision[0])); 2754 "Read FCODE %d.%d.\n",
2755 ha->fcode_revision[1], ha->fcode_revision[0]);
2724 break; 2756 break;
2725 case ROM_CODE_TYPE_EFI: 2757 case ROM_CODE_TYPE_EFI:
2726 /* Extensible Firmware Interface (EFI). */ 2758 /* Extensible Firmware Interface (EFI). */
2727 ha->efi_revision[0] = bcode[0x12]; 2759 ha->efi_revision[0] = bcode[0x12];
2728 ha->efi_revision[1] = bcode[0x13]; 2760 ha->efi_revision[1] = bcode[0x13];
2729 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n", 2761 ql_dbg(ql_dbg_init, vha, 0x005d,
2730 ha->efi_revision[1], ha->efi_revision[0])); 2762 "Read EFI %d.%d.\n",
2763 ha->efi_revision[1], ha->efi_revision[0]);
2731 break; 2764 break;
2732 default: 2765 default:
2733 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code " 2766 ql_log(ql_log_warn, vha, 0x005e,
2734 "type %x at pcids %x.\n", code_type, pcids)); 2767 "Unrecognized code type %x at pcids %x.\n",
2768 code_type, pcids);
2735 break; 2769 break;
2736 } 2770 }
2737 2771
@@ -2753,13 +2787,18 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2753 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 2787 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
2754 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2788 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2755 dcode[3] == 0)) { 2789 dcode[3] == 0)) {
2756 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw " 2790 ql_log(ql_log_warn, vha, 0x005f,
2757 "revision at %x.\n", ha->flt_region_fw * 4)); 2791 "Unrecognized fw revision at %x.\n",
2792 ha->flt_region_fw * 4);
2758 } else { 2793 } else {
2759 ha->fw_revision[0] = dcode[0]; 2794 ha->fw_revision[0] = dcode[0];
2760 ha->fw_revision[1] = dcode[1]; 2795 ha->fw_revision[1] = dcode[1];
2761 ha->fw_revision[2] = dcode[2]; 2796 ha->fw_revision[2] = dcode[2];
2762 ha->fw_revision[3] = dcode[3]; 2797 ha->fw_revision[3] = dcode[3];
2798 ql_dbg(ql_dbg_init, vha, 0x0060,
2799 "Firmware revision %d.%d.%d.%d.\n",
2800 ha->fw_revision[0], ha->fw_revision[1],
2801 ha->fw_revision[2], ha->fw_revision[3]);
2763 } 2802 }
2764 2803
2765 /* Check for golden firmware and get version if available */ 2804 /* Check for golden firmware and get version if available */
@@ -2775,9 +2814,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2775 2814
2776 if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF && 2815 if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
2777 dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) { 2816 dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
2778 DEBUG2(qla_printk(KERN_INFO, ha, 2817 ql_log(ql_log_warn, vha, 0x0056,
2779 "%s(%ld): Unrecognized golden fw at 0x%x.\n", 2818 "Unrecognized golden fw at 0x%x.\n",
2780 __func__, vha->host_no, ha->flt_region_gold_fw * 4)); 2819 ha->flt_region_gold_fw * 4);
2781 return ret; 2820 return ret;
2782 } 2821 }
2783 2822
@@ -2843,9 +2882,9 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2843 if (!ha->fcp_prio_cfg) { 2882 if (!ha->fcp_prio_cfg) {
2844 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 2883 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
2845 if (!ha->fcp_prio_cfg) { 2884 if (!ha->fcp_prio_cfg) {
2846 qla_printk(KERN_WARNING, ha, 2885 ql_log(ql_log_warn, vha, 0x00d5,
2847 "Unable to allocate memory for fcp priority data " 2886 "Unable to allocate memory for fcp priorty data (%x).\n",
2848 "(%x).\n", FCP_PRIO_CFG_SIZE); 2887 FCP_PRIO_CFG_SIZE);
2849 return QLA_FUNCTION_FAILED; 2888 return QLA_FUNCTION_FAILED;
2850 } 2889 }
2851 } 2890 }
@@ -2857,7 +2896,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2857 ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg, 2896 ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
2858 fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE); 2897 fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
2859 2898
2860 if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0)) 2899 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
2861 goto fail; 2900 goto fail;
2862 2901
2863 /* read remaining FCP CMD config data from flash */ 2902 /* read remaining FCP CMD config data from flash */
@@ -2869,7 +2908,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2869 fcp_prio_addr << 2, (len < max_len ? len : max_len)); 2908 fcp_prio_addr << 2, (len < max_len ? len : max_len));
2870 2909
2871 /* revalidate the entire FCP priority config data, including entries */ 2910 /* revalidate the entire FCP priority config data, including entries */
2872 if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1)) 2911 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1))
2873 goto fail; 2912 goto fail;
2874 2913
2875 ha->flags.fcp_prio_enabled = 1; 2914 ha->flags.fcp_prio_enabled = 1;
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index 2c33ce6eac1e..0f5599e0abf6 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -1,6 +1,6 @@
1config SCSI_QLA_ISCSI 1config SCSI_QLA_ISCSI
2 tristate "QLogic ISP4XXX and ISP82XX host adapter family support" 2 tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
3 depends on PCI && SCSI 3 depends on PCI && SCSI && NET
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 ---help--- 5 ---help---
6 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX) 6 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 28d9c9d6b4b4..fc3f168decb4 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -137,6 +137,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
137 host->host_blocked = host->max_host_blocked; 137 host->host_blocked = host->max_host_blocked;
138 break; 138 break;
139 case SCSI_MLQUEUE_DEVICE_BUSY: 139 case SCSI_MLQUEUE_DEVICE_BUSY:
140 case SCSI_MLQUEUE_EH_RETRY:
140 device->device_blocked = device->max_device_blocked; 141 device->device_blocked = device->max_device_blocked;
141 break; 142 break;
142 case SCSI_MLQUEUE_TARGET_BUSY: 143 case SCSI_MLQUEUE_TARGET_BUSY:
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 8a172d4f4564..5fbeadd96819 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -597,6 +597,28 @@ static DEVICE_ATTR(signalling, S_IRUGO,
597 show_spi_host_signalling, 597 show_spi_host_signalling,
598 store_spi_host_signalling); 598 store_spi_host_signalling);
599 599
600static ssize_t show_spi_host_width(struct device *cdev,
601 struct device_attribute *attr,
602 char *buf)
603{
604 struct Scsi_Host *shost = transport_class_to_shost(cdev);
605
606 return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow");
607}
608static DEVICE_ATTR(host_width, S_IRUGO,
609 show_spi_host_width, NULL);
610
611static ssize_t show_spi_host_hba_id(struct device *cdev,
612 struct device_attribute *attr,
613 char *buf)
614{
615 struct Scsi_Host *shost = transport_class_to_shost(cdev);
616
617 return sprintf(buf, "%d\n", shost->this_id);
618}
619static DEVICE_ATTR(hba_id, S_IRUGO,
620 show_spi_host_hba_id, NULL);
621
600#define DV_SET(x, y) \ 622#define DV_SET(x, y) \
601 if(i->f->set_##x) \ 623 if(i->f->set_##x) \
602 i->f->set_##x(sdev->sdev_target, y) 624 i->f->set_##x(sdev->sdev_target, y)
@@ -1380,6 +1402,8 @@ static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class,
1380 1402
1381static struct attribute *host_attributes[] = { 1403static struct attribute *host_attributes[] = {
1382 &dev_attr_signalling.attr, 1404 &dev_attr_signalling.attr,
1405 &dev_attr_host_width.attr,
1406 &dev_attr_hba_id.attr,
1383 NULL 1407 NULL
1384}; 1408};
1385 1409
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index d6702e57d428..dc8d022c07a1 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -34,6 +34,9 @@ static LIST_HEAD(clock_list);
34static DEFINE_SPINLOCK(clock_lock); 34static DEFINE_SPINLOCK(clock_lock);
35static DEFINE_MUTEX(clock_list_sem); 35static DEFINE_MUTEX(clock_list_sem);
36 36
37/* clock disable operations are not passed on to hardware during boot */
38static int allow_disable;
39
37void clk_rate_table_build(struct clk *clk, 40void clk_rate_table_build(struct clk *clk,
38 struct cpufreq_frequency_table *freq_table, 41 struct cpufreq_frequency_table *freq_table,
39 int nr_freqs, 42 int nr_freqs,
@@ -228,7 +231,7 @@ static void __clk_disable(struct clk *clk)
228 return; 231 return;
229 232
230 if (!(--clk->usecount)) { 233 if (!(--clk->usecount)) {
231 if (likely(clk->ops && clk->ops->disable)) 234 if (likely(allow_disable && clk->ops && clk->ops->disable))
232 clk->ops->disable(clk); 235 clk->ops->disable(clk);
233 if (likely(clk->parent)) 236 if (likely(clk->parent))
234 __clk_disable(clk->parent); 237 __clk_disable(clk->parent);
@@ -393,7 +396,7 @@ int clk_register(struct clk *clk)
393{ 396{
394 int ret; 397 int ret;
395 398
396 if (clk == NULL || IS_ERR(clk)) 399 if (IS_ERR_OR_NULL(clk))
397 return -EINVAL; 400 return -EINVAL;
398 401
399 /* 402 /*
@@ -744,3 +747,25 @@ err_out:
744 return err; 747 return err;
745} 748}
746late_initcall(clk_debugfs_init); 749late_initcall(clk_debugfs_init);
750
751static int __init clk_late_init(void)
752{
753 unsigned long flags;
754 struct clk *clk;
755
756 /* disable all clocks with zero use count */
757 mutex_lock(&clock_list_sem);
758 spin_lock_irqsave(&clock_lock, flags);
759
760 list_for_each_entry(clk, &clock_list, node)
761 if (!clk->usecount && clk->ops && clk->ops->disable)
762 clk->ops->disable(clk);
763
764 /* from now on allow clock disable operations */
765 allow_disable = 1;
766
767 spin_unlock_irqrestore(&clock_lock, flags);
768 mutex_unlock(&clock_list_sem);
769 return 0;
770}
771late_initcall(clk_late_init);
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index f33e2dd97934..33b2ed451e09 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -186,6 +186,9 @@ static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
186 !defined(CONFIG_CPU_SUBTYPE_SH7709) 186 !defined(CONFIG_CPU_SUBTYPE_SH7709)
187 [IRQ_TYPE_LEVEL_HIGH] = VALID(3), 187 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
188#endif 188#endif
189#if defined(CONFIG_ARCH_SH7372)
190 [IRQ_TYPE_EDGE_BOTH] = VALID(4),
191#endif
189}; 192};
190 193
191static int intc_set_type(struct irq_data *data, unsigned int type) 194static int intc_set_type(struct irq_data *data, unsigned int type)
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index eba88c749fb1..730b4a37b823 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2267,17 +2267,13 @@ static int __devexit
2267pl022_remove(struct amba_device *adev) 2267pl022_remove(struct amba_device *adev)
2268{ 2268{
2269 struct pl022 *pl022 = amba_get_drvdata(adev); 2269 struct pl022 *pl022 = amba_get_drvdata(adev);
2270 int status = 0; 2270
2271 if (!pl022) 2271 if (!pl022)
2272 return 0; 2272 return 0;
2273 2273
2274 /* Remove the queue */ 2274 /* Remove the queue */
2275 status = destroy_queue(pl022); 2275 if (destroy_queue(pl022) != 0)
2276 if (status != 0) { 2276 dev_err(&adev->dev, "queue remove failed\n");
2277 dev_err(&adev->dev,
2278 "queue remove failed (%d)\n", status);
2279 return status;
2280 }
2281 load_ssp_default_config(pl022); 2277 load_ssp_default_config(pl022);
2282 pl022_dma_remove(pl022); 2278 pl022_dma_remove(pl022);
2283 free_irq(adev->irq[0], pl022); 2279 free_irq(adev->irq[0], pl022);
@@ -2289,7 +2285,6 @@ pl022_remove(struct amba_device *adev)
2289 spi_unregister_master(pl022->master); 2285 spi_unregister_master(pl022->master);
2290 spi_master_put(pl022->master); 2286 spi_master_put(pl022->master);
2291 amba_set_drvdata(adev, NULL); 2287 amba_set_drvdata(adev, NULL);
2292 dev_dbg(&adev->dev, "remove succeeded\n");
2293 return 0; 2288 return 0;
2294} 2289}
2295 2290
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
index c01c0cb0af4e..b99a11a9dd69 100644
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
@@ -812,7 +812,7 @@ int AthCreateCommandList(struct ps_cmd_packet **HciPacketList, u32 *numPackets)
812 for(count = 0; count < Patch_Count; count++) { 812 for(count = 0; count < Patch_Count; count++) {
813 813
814 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing Patch Buffer %d \r\n",count)); 814 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing Patch Buffer %d \r\n",count));
815 kfree(RamPatch[Patch_Count].Data); 815 kfree(RamPatch[count].Data);
816 } 816 }
817 817
818 for(count = 0; count < Tag_Count; count++) { 818 for(count = 0; count < Tag_Count; count++) {
diff --git a/drivers/staging/brcm80211/brcmsmac/otp.c b/drivers/staging/brcm80211/brcmsmac/otp.c
index 34253cf37812..4a70180eba5d 100644
--- a/drivers/staging/brcm80211/brcmsmac/otp.c
+++ b/drivers/staging/brcm80211/brcmsmac/otp.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/string.h>
19 20
20#include <brcm_hw_ids.h> 21#include <brcm_hw_ids.h>
21#include <chipcommon.h> 22#include <chipcommon.h>
diff --git a/drivers/staging/brcm80211/brcmsmac/types.h b/drivers/staging/brcm80211/brcmsmac/types.h
index bbf21897ae0e..823b5e4672e2 100644
--- a/drivers/staging/brcm80211/brcmsmac/types.h
+++ b/drivers/staging/brcm80211/brcmsmac/types.h
@@ -18,6 +18,7 @@
18#define _BRCM_TYPES_H_ 18#define _BRCM_TYPES_H_
19 19
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/io.h>
21 22
22/* Bus types */ 23/* Bus types */
23#define SI_BUS 0 /* SOC Interconnect */ 24#define SI_BUS 0 /* SOC Interconnect */
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.c b/drivers/staging/dt3155v4l/dt3155v4l.c
index fe02d22274b4..05aa41cf875b 100644
--- a/drivers/staging/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/dt3155v4l/dt3155v4l.c
@@ -22,6 +22,7 @@
22#include <linux/stringify.h> 22#include <linux/stringify.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/kthread.h> 24#include <linux/kthread.h>
25#include <linux/slab.h>
25#include <media/v4l2-dev.h> 26#include <media/v4l2-dev.h>
26#include <media/v4l2-ioctl.h> 27#include <media/v4l2-ioctl.h>
27#include <media/videobuf2-dma-contig.h> 28#include <media/videobuf2-dma-contig.h>
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index 627a98b4ec30..9e728b3415e3 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -22,6 +22,7 @@
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <asm/io.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26#include "ft1000.h" 27#include "ft1000.h"
27 28
diff --git a/drivers/staging/gma500/gem_glue.c b/drivers/staging/gma500/gem_glue.c
index 779ac1a12d24..daac12120653 100644
--- a/drivers/staging/gma500/gem_glue.c
+++ b/drivers/staging/gma500/gem_glue.c
@@ -20,26 +20,6 @@
20#include <drm/drmP.h> 20#include <drm/drmP.h>
21#include <drm/drm.h> 21#include <drm/drm.h>
22 22
23/**
24 * Initialize an already allocated GEM object of the specified size with
25 * no GEM provided backing store. Instead the caller is responsible for
26 * backing the object and handling it.
27 */
28int drm_gem_private_object_init(struct drm_device *dev,
29 struct drm_gem_object *obj, size_t size)
30{
31 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
32
33 obj->dev = dev;
34 obj->filp = NULL;
35
36 kref_init(&obj->refcount);
37 atomic_set(&obj->handle_count, 0);
38 obj->size = size;
39
40 return 0;
41}
42
43void drm_gem_object_release_wrap(struct drm_gem_object *obj) 23void drm_gem_object_release_wrap(struct drm_gem_object *obj)
44{ 24{
45 /* Remove the list map if one is present */ 25 /* Remove the list map if one is present */
@@ -51,8 +31,7 @@ void drm_gem_object_release_wrap(struct drm_gem_object *obj)
51 kfree(list->map); 31 kfree(list->map);
52 list->map = NULL; 32 list->map = NULL;
53 } 33 }
54 if (obj->filp) 34 drm_gem_object_release(obj);
55 drm_gem_object_release(obj);
56} 35}
57 36
58/** 37/**
diff --git a/drivers/staging/gma500/gem_glue.h b/drivers/staging/gma500/gem_glue.h
index a0f2bc4e4ae7..ce5ce30f74db 100644
--- a/drivers/staging/gma500/gem_glue.h
+++ b/drivers/staging/gma500/gem_glue.h
@@ -1,4 +1,2 @@
1extern void drm_gem_object_release_wrap(struct drm_gem_object *obj); 1extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
2extern int drm_gem_private_object_init(struct drm_device *dev,
3 struct drm_gem_object *obj, size_t size);
4extern int gem_create_mmap_offset(struct drm_gem_object *obj); 2extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.c b/drivers/staging/gma500/mdfld_dsi_dbi.c
index 02e17c9c8637..fd211f3467c4 100644
--- a/drivers/staging/gma500/mdfld_dsi_dbi.c
+++ b/drivers/staging/gma500/mdfld_dsi_dbi.c
@@ -711,10 +711,11 @@ struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
711 /* Create drm encoder object */ 711 /* Create drm encoder object */
712 connector = &dsi_connector->base.base; 712 connector = &dsi_connector->base.base;
713 encoder = &dbi_output->base.base; 713 encoder = &dbi_output->base.base;
714 /* Review this if we ever get MIPI-HDMI bridges or similar */
714 drm_encoder_init(dev, 715 drm_encoder_init(dev,
715 encoder, 716 encoder,
716 p_funcs->encoder_funcs, 717 p_funcs->encoder_funcs,
717 DRM_MODE_ENCODER_MIPI); 718 DRM_MODE_ENCODER_LVDS);
718 drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); 719 drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs);
719 720
720 /* Attach to given connector */ 721 /* Attach to given connector */
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.h b/drivers/staging/gma500/mdfld_dsi_dbi.h
index dc6242c51d0b..f0fa986fd934 100644
--- a/drivers/staging/gma500/mdfld_dsi_dbi.h
+++ b/drivers/staging/gma500/mdfld_dsi_dbi.h
@@ -42,9 +42,6 @@
42#include "mdfld_dsi_output.h" 42#include "mdfld_dsi_output.h"
43#include "mdfld_output.h" 43#include "mdfld_output.h"
44 44
45#define DRM_MODE_ENCODER_MIPI 5
46
47
48/* 45/*
49 * DBI encoder which inherits from mdfld_dsi_encoder 46 * DBI encoder which inherits from mdfld_dsi_encoder
50 */ 47 */
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.c b/drivers/staging/gma500/mdfld_dsi_dpi.c
index 6e03a91e947e..e685f1217baa 100644
--- a/drivers/staging/gma500/mdfld_dsi_dpi.c
+++ b/drivers/staging/gma500/mdfld_dsi_dpi.c
@@ -777,10 +777,15 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
777 /* Create drm encoder object */ 777 /* Create drm encoder object */
778 connector = &dsi_connector->base.base; 778 connector = &dsi_connector->base.base;
779 encoder = &dpi_output->base.base; 779 encoder = &dpi_output->base.base;
780 /*
781 * On existing hardware this will be a panel of some form,
782 * if future devices also have HDMI bridges this will need
783 * revisiting
784 */
780 drm_encoder_init(dev, 785 drm_encoder_init(dev,
781 encoder, 786 encoder,
782 p_funcs->encoder_funcs, 787 p_funcs->encoder_funcs,
783 DRM_MODE_ENCODER_MIPI); 788 DRM_MODE_ENCODER_LVDS);
784 drm_encoder_helper_add(encoder, 789 drm_encoder_helper_add(encoder,
785 p_funcs->encoder_helper_funcs); 790 p_funcs->encoder_helper_funcs);
786 791
diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c
index 7536095c30a0..9050c0f78b15 100644
--- a/drivers/staging/gma500/mdfld_dsi_output.c
+++ b/drivers/staging/gma500/mdfld_dsi_output.c
@@ -955,7 +955,9 @@ void mdfld_dsi_output_init(struct drm_device *dev,
955 psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2; 955 psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2;
956 956
957 connector = &psb_output->base; 957 connector = &psb_output->base;
958 drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, DRM_MODE_CONNECTOR_MIPI); 958 /* Revisit type if MIPI/HDMI bridges ever appear on Medfield */
959 drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
960 DRM_MODE_CONNECTOR_LVDS);
959 drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); 961 drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
960 962
961 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 963 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/staging/gma500/medfield.h b/drivers/staging/gma500/medfield.h
index 38165e8367e5..09e9687431f1 100644
--- a/drivers/staging/gma500/medfield.h
+++ b/drivers/staging/gma500/medfield.h
@@ -21,8 +21,6 @@
21 * DEALINGS IN THE SOFTWARE. 21 * DEALINGS IN THE SOFTWARE.
22 */ 22 */
23 23
24#define DRM_MODE_ENCODER_MIPI 5
25
26/* Medfield DSI controller registers */ 24/* Medfield DSI controller registers */
27 25
28#define MIPIA_DEVICE_READY_REG 0xb000 26#define MIPIA_DEVICE_READY_REG 0xb000
diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/staging/gma500/psb_drv.h
index 72f487a2a1b7..fd4732dd783a 100644
--- a/drivers/staging/gma500/psb_drv.h
+++ b/drivers/staging/gma500/psb_drv.h
@@ -35,7 +35,6 @@
35 35
36/* Append new drm mode definition here, align with libdrm definition */ 36/* Append new drm mode definition here, align with libdrm definition */
37#define DRM_MODE_SCALE_NO_SCALE 2 37#define DRM_MODE_SCALE_NO_SCALE 2
38#define DRM_MODE_CONNECTOR_MIPI 15
39 38
40enum { 39enum {
41 CHIP_PSB_8108 = 0, /* Poulsbo */ 40 CHIP_PSB_8108 = 0, /* Poulsbo */
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index 3612574ca520..d286b2223181 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -325,7 +325,7 @@ static int blkvsc_do_operation(struct block_device_context *blkdev,
325 325
326 page_buf = alloc_page(GFP_KERNEL); 326 page_buf = alloc_page(GFP_KERNEL);
327 if (!page_buf) { 327 if (!page_buf) {
328 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); 328 kmem_cache_free(blkdev->request_pool, blkvsc_req);
329 return -ENOMEM; 329 return -ENOMEM;
330 } 330 }
331 331
@@ -422,7 +422,7 @@ cleanup:
422 422
423 __free_page(page_buf); 423 __free_page(page_buf);
424 424
425 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); 425 kmem_cache_free(blkdev->request_pool, blkvsc_req);
426 426
427 return ret; 427 return ret;
428} 428}
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index bf1988884e93..cf5d15da76ad 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -311,13 +311,17 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
311 mutex_lock(&indio_dev->mlock); 311 mutex_lock(&indio_dev->mlock);
312 addr = adis16203_addresses[chan->address][0]; 312 addr = adis16203_addresses[chan->address][0];
313 ret = adis16203_spi_read_reg_16(indio_dev, addr, &val16); 313 ret = adis16203_spi_read_reg_16(indio_dev, addr, &val16);
314 if (ret) 314 if (ret) {
315 mutex_unlock(&indio_dev->mlock);
315 return ret; 316 return ret;
317 }
316 318
317 if (val16 & ADIS16203_ERROR_ACTIVE) { 319 if (val16 & ADIS16203_ERROR_ACTIVE) {
318 ret = adis16203_check_status(indio_dev); 320 ret = adis16203_check_status(indio_dev);
319 if (ret) 321 if (ret) {
322 mutex_unlock(&indio_dev->mlock);
320 return ret; 323 return ret;
324 }
321 } 325 }
322 val16 = val16 & ((1 << chan->scan_type.realbits) - 1); 326 val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
323 if (chan->scan_type.sign == 's') 327 if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index cfd09b3b9937..3e2b62654b7d 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -341,13 +341,17 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
341 mutex_lock(&indio_dev->mlock); 341 mutex_lock(&indio_dev->mlock);
342 addr = adis16204_addresses[chan->address][0]; 342 addr = adis16204_addresses[chan->address][0];
343 ret = adis16204_spi_read_reg_16(indio_dev, addr, &val16); 343 ret = adis16204_spi_read_reg_16(indio_dev, addr, &val16);
344 if (ret) 344 if (ret) {
345 mutex_unlock(&indio_dev->mlock);
345 return ret; 346 return ret;
347 }
346 348
347 if (val16 & ADIS16204_ERROR_ACTIVE) { 349 if (val16 & ADIS16204_ERROR_ACTIVE) {
348 ret = adis16204_check_status(indio_dev); 350 ret = adis16204_check_status(indio_dev);
349 if (ret) 351 if (ret) {
352 mutex_unlock(&indio_dev->mlock);
350 return ret; 353 return ret;
354 }
351 } 355 }
352 val16 = val16 & ((1 << chan->scan_type.realbits) - 1); 356 val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
353 if (chan->scan_type.sign == 's') 357 if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 55f3a7bcaf0a..bec1fa8de9b9 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -337,13 +337,17 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
337 mutex_lock(&indio_dev->mlock); 337 mutex_lock(&indio_dev->mlock);
338 addr = adis16209_addresses[chan->address][0]; 338 addr = adis16209_addresses[chan->address][0];
339 ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16); 339 ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16);
340 if (ret) 340 if (ret) {
341 mutex_unlock(&indio_dev->mlock);
341 return ret; 342 return ret;
343 }
342 344
343 if (val16 & ADIS16209_ERROR_ACTIVE) { 345 if (val16 & ADIS16209_ERROR_ACTIVE) {
344 ret = adis16209_check_status(indio_dev); 346 ret = adis16209_check_status(indio_dev);
345 if (ret) 347 if (ret) {
348 mutex_unlock(&indio_dev->mlock);
346 return ret; 349 return ret;
350 }
347 } 351 }
348 val16 = val16 & ((1 << chan->scan_type.realbits) - 1); 352 val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
349 if (chan->scan_type.sign == 's') 353 if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index 4a4eafc58630..aee8b69173c4 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -370,13 +370,17 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
370 mutex_lock(&indio_dev->mlock); 370 mutex_lock(&indio_dev->mlock);
371 addr = adis16240_addresses[chan->address][0]; 371 addr = adis16240_addresses[chan->address][0];
372 ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16); 372 ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16);
373 if (ret) 373 if (ret) {
374 mutex_unlock(&indio_dev->mlock);
374 return ret; 375 return ret;
376 }
375 377
376 if (val16 & ADIS16240_ERROR_ACTIVE) { 378 if (val16 & ADIS16240_ERROR_ACTIVE) {
377 ret = adis16240_check_status(indio_dev); 379 ret = adis16240_check_status(indio_dev);
378 if (ret) 380 if (ret) {
381 mutex_unlock(&indio_dev->mlock);
379 return ret; 382 return ret;
383 }
380 } 384 }
381 val16 = val16 & ((1 << chan->scan_type.realbits) - 1); 385 val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
382 if (chan->scan_type.sign == 's') 386 if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index 05797f404bea..f2d43cfcc493 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -446,13 +446,17 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
446 mutex_lock(&indio_dev->mlock); 446 mutex_lock(&indio_dev->mlock);
447 addr = adis16260_addresses[chan->address][0]; 447 addr = adis16260_addresses[chan->address][0];
448 ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16); 448 ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16);
449 if (ret) 449 if (ret) {
450 mutex_unlock(&indio_dev->mlock);
450 return ret; 451 return ret;
452 }
451 453
452 if (val16 & ADIS16260_ERROR_ACTIVE) { 454 if (val16 & ADIS16260_ERROR_ACTIVE) {
453 ret = adis16260_check_status(indio_dev); 455 ret = adis16260_check_status(indio_dev);
454 if (ret) 456 if (ret) {
457 mutex_unlock(&indio_dev->mlock);
455 return ret; 458 return ret;
459 }
456 } 460 }
457 val16 = val16 & ((1 << chan->scan_type.realbits) - 1); 461 val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
458 if (chan->scan_type.sign == 's') 462 if (chan->scan_type.sign == 's')
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index 77b47f763f22..649d6b70deaa 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -4,5 +4,7 @@ ToDo list (incomplete, unordered)
4 - add compile as module support 4 - add compile as module support
5 - move nvec devices to mfd cells? 5 - move nvec devices to mfd cells?
6 - adjust to kernel style 6 - adjust to kernel style
7 7 - fix clk usage
8 8 should not be using clk_get_sys(), but clk_get(&pdev->dev, conn)
9 where conn is either NULL if the device only has one clock, or
10 the device specific name if it has multiple clocks.
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 9c0d2936e486..c3d73f8431ae 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -26,6 +26,7 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/interrupt.h>
29#include <linux/phy.h> 30#include <linux/phy.h>
30#include <linux/ratelimit.h> 31#include <linux/ratelimit.h>
31#include <net/dst.h> 32#include <net/dst.h>
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 970825421884..d0e2d514968a 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -26,6 +26,7 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/interrupt.h>
29#include <net/dst.h> 30#include <net/dst.h>
30 31
31#include <asm/octeon/octeon.h> 32#include <asm/octeon/octeon.h>
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index 6766f468639f..4bb5fffca5b9 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -399,10 +399,7 @@ download_firmware_fail:
399 399
400} 400}
401 401
402 402MODULE_FIRMWARE("RTL8192U/boot.img");
403 403MODULE_FIRMWARE("RTL8192U/main.img");
404 404MODULE_FIRMWARE("RTL8192U/data.img");
405
406
407
408 405
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 5ff59f27d101..16c73fbff51f 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -66,12 +66,6 @@ static int msi_en;
66module_param(msi_en, int, S_IRUGO | S_IWUSR); 66module_param(msi_en, int, S_IRUGO | S_IWUSR);
67MODULE_PARM_DESC(msi_en, "enable msi"); 67MODULE_PARM_DESC(msi_en, "enable msi");
68 68
69/* These are used to make sure the module doesn't unload before all the
70 * threads have exited.
71 */
72static atomic_t total_threads = ATOMIC_INIT(0);
73static DECLARE_COMPLETION(threads_gone);
74
75static irqreturn_t rtsx_interrupt(int irq, void *dev_id); 69static irqreturn_t rtsx_interrupt(int irq, void *dev_id);
76 70
77/*********************************************************************** 71/***********************************************************************
@@ -192,7 +186,7 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
192 /* enqueue the command and wake up the control thread */ 186 /* enqueue the command and wake up the control thread */
193 srb->scsi_done = done; 187 srb->scsi_done = done;
194 chip->srb = srb; 188 chip->srb = srb;
195 up(&(dev->sema)); 189 complete(&dev->cmnd_ready);
196 190
197 return 0; 191 return 0;
198} 192}
@@ -475,7 +469,7 @@ static int rtsx_control_thread(void *__dev)
475 current->flags |= PF_NOFREEZE; 469 current->flags |= PF_NOFREEZE;
476 470
477 for (;;) { 471 for (;;) {
478 if (down_interruptible(&dev->sema)) 472 if (wait_for_completion_interruptible(&dev->cmnd_ready))
479 break; 473 break;
480 474
481 /* lock the device pointers */ 475 /* lock the device pointers */
@@ -557,8 +551,6 @@ SkipForAbort:
557 mutex_unlock(&dev->dev_mutex); 551 mutex_unlock(&dev->dev_mutex);
558 } /* for (;;) */ 552 } /* for (;;) */
559 553
560 scsi_host_put(host);
561
562 /* notify the exit routine that we're actually exiting now 554 /* notify the exit routine that we're actually exiting now
563 * 555 *
564 * complete()/wait_for_completion() is similar to up()/down(), 556 * complete()/wait_for_completion() is similar to up()/down(),
@@ -573,7 +565,7 @@ SkipForAbort:
573 * This is important in preemption kernels, which transfer the flow 565 * This is important in preemption kernels, which transfer the flow
574 * of execution immediately upon a complete(). 566 * of execution immediately upon a complete().
575 */ 567 */
576 complete_and_exit(&threads_gone, 0); 568 complete_and_exit(&dev->control_exit, 0);
577} 569}
578 570
579 571
@@ -581,7 +573,6 @@ static int rtsx_polling_thread(void *__dev)
581{ 573{
582 struct rtsx_dev *dev = (struct rtsx_dev *)__dev; 574 struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
583 struct rtsx_chip *chip = dev->chip; 575 struct rtsx_chip *chip = dev->chip;
584 struct Scsi_Host *host = rtsx_to_host(dev);
585 struct sd_info *sd_card = &(chip->sd_card); 576 struct sd_info *sd_card = &(chip->sd_card);
586 struct xd_info *xd_card = &(chip->xd_card); 577 struct xd_info *xd_card = &(chip->xd_card);
587 struct ms_info *ms_card = &(chip->ms_card); 578 struct ms_info *ms_card = &(chip->ms_card);
@@ -621,8 +612,7 @@ static int rtsx_polling_thread(void *__dev)
621 mutex_unlock(&dev->dev_mutex); 612 mutex_unlock(&dev->dev_mutex);
622 } 613 }
623 614
624 scsi_host_put(host); 615 complete_and_exit(&dev->polling_exit, 0);
625 complete_and_exit(&threads_gone, 0);
626} 616}
627 617
628/* 618/*
@@ -699,29 +689,38 @@ static void rtsx_release_resources(struct rtsx_dev *dev)
699{ 689{
700 printk(KERN_INFO "-- %s\n", __func__); 690 printk(KERN_INFO "-- %s\n", __func__);
701 691
692 /* Tell the control thread to exit. The SCSI host must
693 * already have been removed so it won't try to queue
694 * any more commands.
695 */
696 printk(KERN_INFO "-- sending exit command to thread\n");
697 complete(&dev->cmnd_ready);
698 if (dev->ctl_thread)
699 wait_for_completion(&dev->control_exit);
700 if (dev->polling_thread)
701 wait_for_completion(&dev->polling_exit);
702
703 wait_timeout(200);
704
702 if (dev->rtsx_resv_buf) { 705 if (dev->rtsx_resv_buf) {
703 dma_free_coherent(&(dev->pci->dev), HOST_CMDS_BUF_LEN, 706 dma_free_coherent(&(dev->pci->dev), RTSX_RESV_BUF_LEN,
704 dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr); 707 dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr);
705 dev->chip->host_cmds_ptr = NULL; 708 dev->chip->host_cmds_ptr = NULL;
706 dev->chip->host_sg_tbl_ptr = NULL; 709 dev->chip->host_sg_tbl_ptr = NULL;
707 } 710 }
708 711
709 pci_disable_device(dev->pci); 712 if (dev->irq > 0)
710 pci_release_regions(dev->pci);
711
712 if (dev->irq > 0) {
713 free_irq(dev->irq, (void *)dev); 713 free_irq(dev->irq, (void *)dev);
714 } 714 if (dev->chip->msi_en)
715 if (dev->chip->msi_en) {
716 pci_disable_msi(dev->pci); 715 pci_disable_msi(dev->pci);
717 } 716 if (dev->remap_addr)
717 iounmap(dev->remap_addr);
718 718
719 /* Tell the control thread to exit. The SCSI host must 719 pci_disable_device(dev->pci);
720 * already have been removed so it won't try to queue 720 pci_release_regions(dev->pci);
721 * any more commands. 721
722 */ 722 rtsx_release_chip(dev->chip);
723 printk(KERN_INFO "-- sending exit command to thread\n"); 723 kfree(dev->chip);
724 up(&dev->sema);
725} 724}
726 725
727/* First stage of disconnect processing: stop all commands and remove 726/* First stage of disconnect processing: stop all commands and remove
@@ -739,6 +738,7 @@ static void quiesce_and_remove_host(struct rtsx_dev *dev)
739 scsi_unlock(host); 738 scsi_unlock(host);
740 mutex_unlock(&dev->dev_mutex); 739 mutex_unlock(&dev->dev_mutex);
741 wake_up(&dev->delay_wait); 740 wake_up(&dev->delay_wait);
741 wait_for_completion(&dev->scanning_done);
742 742
743 /* Wait some time to let other threads exist */ 743 /* Wait some time to let other threads exist */
744 wait_timeout(100); 744 wait_timeout(100);
@@ -793,8 +793,7 @@ static int rtsx_scan_thread(void *__dev)
793 /* Should we unbind if no devices were detected? */ 793 /* Should we unbind if no devices were detected? */
794 } 794 }
795 795
796 scsi_host_put(rtsx_to_host(dev)); 796 complete_and_exit(&dev->scanning_done, 0);
797 complete_and_exit(&threads_gone, 0);
798} 797}
799 798
800static void rtsx_init_options(struct rtsx_chip *chip) 799static void rtsx_init_options(struct rtsx_chip *chip)
@@ -941,8 +940,11 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
941 940
942 spin_lock_init(&dev->reg_lock); 941 spin_lock_init(&dev->reg_lock);
943 mutex_init(&(dev->dev_mutex)); 942 mutex_init(&(dev->dev_mutex));
944 sema_init(&(dev->sema), 0); 943 init_completion(&dev->cmnd_ready);
944 init_completion(&dev->control_exit);
945 init_completion(&dev->polling_exit);
945 init_completion(&(dev->notify)); 946 init_completion(&(dev->notify));
947 init_completion(&dev->scanning_done);
946 init_waitqueue_head(&dev->delay_wait); 948 init_waitqueue_head(&dev->delay_wait);
947 949
948 dev->pci = pci; 950 dev->pci = pci;
@@ -992,28 +994,22 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
992 pci_set_master(pci); 994 pci_set_master(pci);
993 synchronize_irq(dev->irq); 995 synchronize_irq(dev->irq);
994 996
995 err = scsi_add_host(host, &pci->dev);
996 if (err) {
997 printk(KERN_ERR "Unable to add the scsi host\n");
998 goto errout;
999 }
1000
1001 rtsx_init_chip(dev->chip); 997 rtsx_init_chip(dev->chip);
1002 998
1003 /* Start up our control thread */ 999 /* Start up our control thread */
1004 th = kthread_create(rtsx_control_thread, dev, CR_DRIVER_NAME); 1000 th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
1005 if (IS_ERR(th)) { 1001 if (IS_ERR(th)) {
1006 printk(KERN_ERR "Unable to start control thread\n"); 1002 printk(KERN_ERR "Unable to start control thread\n");
1007 err = PTR_ERR(th); 1003 err = PTR_ERR(th);
1008 goto errout; 1004 goto errout;
1009 } 1005 }
1006 dev->ctl_thread = th;
1010 1007
1011 /* Take a reference to the host for the control thread and 1008 err = scsi_add_host(host, &pci->dev);
1012 * count it among all the threads we have launched. Then 1009 if (err) {
1013 * start it up. */ 1010 printk(KERN_ERR "Unable to add the scsi host\n");
1014 scsi_host_get(rtsx_to_host(dev)); 1011 goto errout;
1015 atomic_inc(&total_threads); 1012 }
1016 wake_up_process(th);
1017 1013
1018 /* Start up the thread for delayed SCSI-device scanning */ 1014 /* Start up the thread for delayed SCSI-device scanning */
1019 th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan"); 1015 th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
@@ -1024,28 +1020,17 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
1024 goto errout; 1020 goto errout;
1025 } 1021 }
1026 1022
1027 /* Take a reference to the host for the scanning thread and
1028 * count it among all the threads we have launched. Then
1029 * start it up. */
1030 scsi_host_get(rtsx_to_host(dev));
1031 atomic_inc(&total_threads);
1032 wake_up_process(th); 1023 wake_up_process(th);
1033 1024
1034 /* Start up the thread for polling thread */ 1025 /* Start up the thread for polling thread */
1035 th = kthread_create(rtsx_polling_thread, dev, "rtsx-polling"); 1026 th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
1036 if (IS_ERR(th)) { 1027 if (IS_ERR(th)) {
1037 printk(KERN_ERR "Unable to start the device-polling thread\n"); 1028 printk(KERN_ERR "Unable to start the device-polling thread\n");
1038 quiesce_and_remove_host(dev); 1029 quiesce_and_remove_host(dev);
1039 err = PTR_ERR(th); 1030 err = PTR_ERR(th);
1040 goto errout; 1031 goto errout;
1041 } 1032 }
1042 1033 dev->polling_thread = th;
1043 /* Take a reference to the host for the polling thread and
1044 * count it among all the threads we have launched. Then
1045 * start it up. */
1046 scsi_host_get(rtsx_to_host(dev));
1047 atomic_inc(&total_threads);
1048 wake_up_process(th);
1049 1034
1050 pci_set_drvdata(pci, dev); 1035 pci_set_drvdata(pci, dev);
1051 1036
@@ -1108,16 +1093,6 @@ static void __exit rtsx_exit(void)
1108 1093
1109 pci_unregister_driver(&driver); 1094 pci_unregister_driver(&driver);
1110 1095
1111 /* Don't return until all of our control and scanning threads
1112 * have exited. Since each thread signals threads_gone as its
1113 * last act, we have to call wait_for_completion the right number
1114 * of times.
1115 */
1116 while (atomic_read(&total_threads) > 0) {
1117 wait_for_completion(&threads_gone);
1118 atomic_dec(&total_threads);
1119 }
1120
1121 printk(KERN_INFO "%s module exit\n", CR_DRIVER_NAME); 1096 printk(KERN_INFO "%s module exit\n", CR_DRIVER_NAME);
1122} 1097}
1123 1098
diff --git a/drivers/staging/rts_pstor/rtsx.h b/drivers/staging/rts_pstor/rtsx.h
index 247615ba1d2a..86e47c2e3e3c 100644
--- a/drivers/staging/rts_pstor/rtsx.h
+++ b/drivers/staging/rts_pstor/rtsx.h
@@ -112,9 +112,16 @@ struct rtsx_dev {
112 /* locks */ 112 /* locks */
113 spinlock_t reg_lock; 113 spinlock_t reg_lock;
114 114
115 struct task_struct *ctl_thread; /* the control thread */
116 struct task_struct *polling_thread; /* the polling thread */
117
115 /* mutual exclusion and synchronization structures */ 118 /* mutual exclusion and synchronization structures */
116 struct semaphore sema; /* to sleep thread on */ 119 struct completion cmnd_ready; /* to sleep thread on */
120 struct completion control_exit; /* control thread exit */
121 struct completion polling_exit; /* polling thread exit */
117 struct completion notify; /* thread begin/end */ 122 struct completion notify; /* thread begin/end */
123 struct completion scanning_done; /* wait for scan thread */
124
118 wait_queue_head_t delay_wait; /* wait during scan, reset */ 125 wait_queue_head_t delay_wait; /* wait during scan, reset */
119 struct mutex dev_mutex; 126 struct mutex dev_mutex;
120 127
diff --git a/drivers/staging/solo6x10/core.c b/drivers/staging/solo6x10/core.c
index 76779949f141..f974f6412ad7 100644
--- a/drivers/staging/solo6x10/core.c
+++ b/drivers/staging/solo6x10/core.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/slab.h>
24#include <linux/videodev2.h> 25#include <linux/videodev2.h>
25#include "solo6x10.h" 26#include "solo6x10.h"
26#include "tw28.h" 27#include "tw28.h"
diff --git a/drivers/staging/solo6x10/enc.c b/drivers/staging/solo6x10/enc.c
index 285f7f350062..de502599bb19 100644
--- a/drivers/staging/solo6x10/enc.c
+++ b/drivers/staging/solo6x10/enc.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/slab.h>
21#include "solo6x10.h" 22#include "solo6x10.h"
22#include "osd-font.h" 23#include "osd-font.h"
23 24
diff --git a/drivers/staging/solo6x10/g723.c b/drivers/staging/solo6x10/g723.c
index bd8eb92c94b1..59274bfca95b 100644
--- a/drivers/staging/solo6x10/g723.c
+++ b/drivers/staging/solo6x10/g723.c
@@ -21,6 +21,7 @@
21#include <linux/mempool.h> 21#include <linux/mempool.h>
22#include <linux/poll.h> 22#include <linux/poll.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/slab.h>
24#include <linux/freezer.h> 25#include <linux/freezer.h>
25#include <sound/core.h> 26#include <sound/core.h>
26#include <sound/initval.h> 27#include <sound/initval.h>
diff --git a/drivers/staging/solo6x10/p2m.c b/drivers/staging/solo6x10/p2m.c
index 5717eabb04a4..56210f0fc5ec 100644
--- a/drivers/staging/solo6x10/p2m.c
+++ b/drivers/staging/solo6x10/p2m.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/slab.h>
21#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
22#include "solo6x10.h" 23#include "solo6x10.h"
23 24
diff --git a/drivers/staging/solo6x10/solo6x10.h b/drivers/staging/solo6x10/solo6x10.h
index 17c06bd6cc91..abee7213202f 100644
--- a/drivers/staging/solo6x10/solo6x10.h
+++ b/drivers/staging/solo6x10/solo6x10.h
@@ -28,6 +28,7 @@
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/slab.h>
31#include <asm/io.h> 32#include <asm/io.h>
32#include <linux/atomic.h> 33#include <linux/atomic.h>
33#include <linux/videodev2.h> 34#include <linux/videodev2.h>
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c
index 39dc586fc8bb..940769ef883f 100644
--- a/drivers/staging/speakup/devsynth.c
+++ b/drivers/staging/speakup/devsynth.c
@@ -18,13 +18,14 @@ static ssize_t speakup_file_write(struct file *fp, const char *buffer,
18{ 18{
19 size_t count = nbytes; 19 size_t count = nbytes;
20 const char *ptr = buffer; 20 const char *ptr = buffer;
21 int bytes; 21 size_t bytes;
22 unsigned long flags; 22 unsigned long flags;
23 u_char buf[256]; 23 u_char buf[256];
24
24 if (synth == NULL) 25 if (synth == NULL)
25 return -ENODEV; 26 return -ENODEV;
26 while (count > 0) { 27 while (count > 0) {
27 bytes = min_t(size_t, count, sizeof(buf)); 28 bytes = min(count, sizeof(buf));
28 if (copy_from_user(buf, ptr, bytes)) 29 if (copy_from_user(buf, ptr, bytes))
29 return -EFAULT; 30 return -EFAULT;
30 count -= bytes; 31 count -= bytes;
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 589a0554332e..3d1279c424a8 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -209,7 +209,6 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
209 break; 209 break;
210#ifdef CONFIG_OMAP_MCBSP 210#ifdef CONFIG_OMAP_MCBSP
211 case MCBSP_CLK: 211 case MCBSP_CLK:
212 omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO);
213 omap_mcbsp_request(MCBSP_ID(clk_id)); 212 omap_mcbsp_request(MCBSP_ID(clk_id));
214 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC); 213 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);
215 break; 214 break;
diff --git a/drivers/staging/zcache/Makefile b/drivers/staging/zcache/Makefile
index f5ec64f94470..60daa272c204 100644
--- a/drivers/staging/zcache/Makefile
+++ b/drivers/staging/zcache/Makefile
@@ -1,3 +1,3 @@
1zcache-y := tmem.o 1zcache-y := zcache-main.o tmem.o
2 2
3obj-$(CONFIG_ZCACHE) += zcache.o 3obj-$(CONFIG_ZCACHE) += zcache.o
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
index 975e34bcd722..1ca66ea9b281 100644
--- a/drivers/staging/zcache/tmem.c
+++ b/drivers/staging/zcache/tmem.c
@@ -604,7 +604,7 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
604 struct tmem_obj *obj; 604 struct tmem_obj *obj;
605 void *pampd; 605 void *pampd;
606 bool ephemeral = is_ephemeral(pool); 606 bool ephemeral = is_ephemeral(pool);
607 uint32_t ret = -1; 607 int ret = -1;
608 struct tmem_hashbucket *hb; 608 struct tmem_hashbucket *hb;
609 bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral); 609 bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
610 bool lock_held = false; 610 bool lock_held = false;
diff --git a/drivers/staging/zcache/zcache.c b/drivers/staging/zcache/zcache-main.c
index 65a81a0d7c49..a3f5162bfedc 100644
--- a/drivers/staging/zcache/zcache.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -19,6 +19,7 @@
19 * http://marc.info/?l=linux-mm&m=127811271605009 19 * http://marc.info/?l=linux-mm&m=127811271605009
20 */ 20 */
21 21
22#include <linux/module.h>
22#include <linux/cpu.h> 23#include <linux/cpu.h>
23#include <linux/highmem.h> 24#include <linux/highmem.h>
24#include <linux/list.h> 25#include <linux/list.h>
@@ -27,6 +28,7 @@
27#include <linux/spinlock.h> 28#include <linux/spinlock.h>
28#include <linux/types.h> 29#include <linux/types.h>
29#include <linux/atomic.h> 30#include <linux/atomic.h>
31#include <linux/math64.h>
30#include "tmem.h" 32#include "tmem.h"
31 33
32#include "../zram/xvmalloc.h" /* if built in drivers/staging */ 34#include "../zram/xvmalloc.h" /* if built in drivers/staging */
@@ -53,6 +55,9 @@
53 55
54#define MAX_CLIENTS 16 56#define MAX_CLIENTS 16
55#define LOCAL_CLIENT ((uint16_t)-1) 57#define LOCAL_CLIENT ((uint16_t)-1)
58
59MODULE_LICENSE("GPL");
60
56struct zcache_client { 61struct zcache_client {
57 struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT]; 62 struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
58 struct xv_pool *xvpool; 63 struct xv_pool *xvpool;
@@ -1153,11 +1158,12 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1153 size_t clen; 1158 size_t clen;
1154 int ret; 1159 int ret;
1155 unsigned long count; 1160 unsigned long count;
1156 struct page *page = virt_to_page(data); 1161 struct page *page = (struct page *)(data);
1157 struct zcache_client *cli = pool->client; 1162 struct zcache_client *cli = pool->client;
1158 uint16_t client_id = get_client_id_from_client(cli); 1163 uint16_t client_id = get_client_id_from_client(cli);
1159 unsigned long zv_mean_zsize; 1164 unsigned long zv_mean_zsize;
1160 unsigned long curr_pers_pampd_count; 1165 unsigned long curr_pers_pampd_count;
1166 u64 total_zsize;
1161 1167
1162 if (eph) { 1168 if (eph) {
1163 ret = zcache_compress(page, &cdata, &clen); 1169 ret = zcache_compress(page, &cdata, &clen);
@@ -1190,8 +1196,9 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1190 } 1196 }
1191 /* reject if mean compression is too poor */ 1197 /* reject if mean compression is too poor */
1192 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) { 1198 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
1193 zv_mean_zsize = xv_get_total_size_bytes(cli->xvpool) / 1199 total_zsize = xv_get_total_size_bytes(cli->xvpool);
1194 curr_pers_pampd_count; 1200 zv_mean_zsize = div_u64(total_zsize,
1201 curr_pers_pampd_count);
1195 if (zv_mean_zsize > zv_max_mean_zsize) { 1202 if (zv_mean_zsize > zv_max_mean_zsize) {
1196 zcache_mean_compress_poor++; 1203 zcache_mean_compress_poor++;
1197 goto out; 1204 goto out;
@@ -1220,7 +1227,7 @@ static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
1220 int ret = 0; 1227 int ret = 0;
1221 1228
1222 BUG_ON(is_ephemeral(pool)); 1229 BUG_ON(is_ephemeral(pool));
1223 zv_decompress(virt_to_page(data), pampd); 1230 zv_decompress((struct page *)(data), pampd);
1224 return ret; 1231 return ret;
1225} 1232}
1226 1233
@@ -1532,7 +1539,7 @@ static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1532 goto out; 1539 goto out;
1533 if (!zcache_freeze && zcache_do_preload(pool) == 0) { 1540 if (!zcache_freeze && zcache_do_preload(pool) == 0) {
1534 /* preload does preempt_disable on success */ 1541 /* preload does preempt_disable on success */
1535 ret = tmem_put(pool, oidp, index, page_address(page), 1542 ret = tmem_put(pool, oidp, index, (char *)(page),
1536 PAGE_SIZE, 0, is_ephemeral(pool)); 1543 PAGE_SIZE, 0, is_ephemeral(pool));
1537 if (ret < 0) { 1544 if (ret < 0) {
1538 if (is_ephemeral(pool)) 1545 if (is_ephemeral(pool))
@@ -1565,7 +1572,7 @@ static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1565 pool = zcache_get_pool_by_id(cli_id, pool_id); 1572 pool = zcache_get_pool_by_id(cli_id, pool_id);
1566 if (likely(pool != NULL)) { 1573 if (likely(pool != NULL)) {
1567 if (atomic_read(&pool->obj_count) > 0) 1574 if (atomic_read(&pool->obj_count) > 0)
1568 ret = tmem_get(pool, oidp, index, page_address(page), 1575 ret = tmem_get(pool, oidp, index, (char *)(page),
1569 &size, 0, is_ephemeral(pool)); 1576 &size, 0, is_ephemeral(pool));
1570 zcache_put_pool(pool); 1577 zcache_put_pool(pool);
1571 } 1578 }
@@ -1929,9 +1936,9 @@ __setup("nofrontswap", no_frontswap);
1929 1936
1930static int __init zcache_init(void) 1937static int __init zcache_init(void)
1931{ 1938{
1932#ifdef CONFIG_SYSFS
1933 int ret = 0; 1939 int ret = 0;
1934 1940
1941#ifdef CONFIG_SYSFS
1935 ret = sysfs_create_group(mm_kobj, &zcache_attr_group); 1942 ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
1936 if (ret) { 1943 if (ret) {
1937 pr_err("zcache: can't create sysfs\n"); 1944 pr_err("zcache: can't create sysfs\n");
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
index 564ff4e0dbc4..8345fb457a40 100644
--- a/drivers/target/iscsi/Kconfig
+++ b/drivers/target/iscsi/Kconfig
@@ -1,5 +1,6 @@
1config ISCSI_TARGET 1config ISCSI_TARGET
2 tristate "Linux-iSCSI.org iSCSI Target Mode Stack" 2 tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
3 depends on NET
3 select CRYPTO 4 select CRYPTO
4 select CRYPTO_CRC32C 5 select CRYPTO_CRC32C
5 select CRYPTO_CRC32C_INTEL if X86 6 select CRYPTO_CRC32C_INTEL if X86
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 14c81c4265bd..6a4ea29c2f36 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -120,7 +120,7 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
120 struct iscsi_tiqn *tiqn = NULL; 120 struct iscsi_tiqn *tiqn = NULL;
121 int ret; 121 int ret;
122 122
123 if (strlen(buf) > ISCSI_IQN_LEN) { 123 if (strlen(buf) >= ISCSI_IQN_LEN) {
124 pr_err("Target IQN exceeds %d bytes\n", 124 pr_err("Target IQN exceeds %d bytes\n",
125 ISCSI_IQN_LEN); 125 ISCSI_IQN_LEN);
126 return ERR_PTR(-EINVAL); 126 return ERR_PTR(-EINVAL);
@@ -1857,7 +1857,7 @@ static int iscsit_handle_text_cmd(
1857 char *text_ptr, *text_in; 1857 char *text_ptr, *text_in;
1858 int cmdsn_ret, niov = 0, rx_got, rx_size; 1858 int cmdsn_ret, niov = 0, rx_got, rx_size;
1859 u32 checksum = 0, data_crc = 0, payload_length; 1859 u32 checksum = 0, data_crc = 0, payload_length;
1860 u32 padding = 0, text_length = 0; 1860 u32 padding = 0, pad_bytes = 0, text_length = 0;
1861 struct iscsi_cmd *cmd; 1861 struct iscsi_cmd *cmd;
1862 struct kvec iov[3]; 1862 struct kvec iov[3];
1863 struct iscsi_text *hdr; 1863 struct iscsi_text *hdr;
@@ -1896,7 +1896,7 @@ static int iscsit_handle_text_cmd(
1896 1896
1897 padding = ((-payload_length) & 3); 1897 padding = ((-payload_length) & 3);
1898 if (padding != 0) { 1898 if (padding != 0) {
1899 iov[niov].iov_base = cmd->pad_bytes; 1899 iov[niov].iov_base = &pad_bytes;
1900 iov[niov++].iov_len = padding; 1900 iov[niov++].iov_len = padding;
1901 rx_size += padding; 1901 rx_size += padding;
1902 pr_debug("Receiving %u additional bytes" 1902 pr_debug("Receiving %u additional bytes"
@@ -1917,7 +1917,7 @@ static int iscsit_handle_text_cmd(
1917 if (conn->conn_ops->DataDigest) { 1917 if (conn->conn_ops->DataDigest) {
1918 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 1918 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
1919 text_in, text_length, 1919 text_in, text_length,
1920 padding, cmd->pad_bytes, 1920 padding, (u8 *)&pad_bytes,
1921 (u8 *)&data_crc); 1921 (u8 *)&data_crc);
1922 1922
1923 if (checksum != data_crc) { 1923 if (checksum != data_crc) {
@@ -2243,7 +2243,6 @@ static int iscsit_handle_snack(
2243 case 0: 2243 case 0:
2244 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2244 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2245 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); 2245 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
2246 return 0;
2247 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2246 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2248 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, 2247 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
2249 hdr->begrun, hdr->runlength); 2248 hdr->begrun, hdr->runlength);
@@ -3468,7 +3467,12 @@ static inline void iscsit_thread_check_cpumask(
3468} 3467}
3469 3468
3470#else 3469#else
3471#define iscsit_thread_get_cpumask(X) ({}) 3470
3471void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
3472{
3473 return;
3474}
3475
3472#define iscsit_thread_check_cpumask(X, Y, Z) ({}) 3476#define iscsit_thread_check_cpumask(X, Y, Z) ({})
3473#endif /* CONFIG_SMP */ 3477#endif /* CONFIG_SMP */
3474 3478
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 32bb92c44450..f1643dbf6a92 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -181,7 +181,7 @@ struct se_tpg_np *lio_target_call_addnptotpg(
181 return ERR_PTR(-EOVERFLOW); 181 return ERR_PTR(-EOVERFLOW);
182 } 182 }
183 memset(buf, 0, MAX_PORTAL_LEN + 1); 183 memset(buf, 0, MAX_PORTAL_LEN + 1);
184 snprintf(buf, MAX_PORTAL_LEN, "%s", name); 184 snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
185 185
186 memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage)); 186 memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
187 187
@@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg(
268 ISCSI_TCP); 268 ISCSI_TCP);
269 if (IS_ERR(tpg_np)) { 269 if (IS_ERR(tpg_np)) {
270 iscsit_put_tpg(tpg); 270 iscsit_put_tpg(tpg);
271 return ERR_PTR(PTR_ERR(tpg_np)); 271 return ERR_CAST(tpg_np);
272 } 272 }
273 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); 273 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
274 274
@@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn(
1285 1285
1286 tiqn = iscsit_add_tiqn((unsigned char *)name); 1286 tiqn = iscsit_add_tiqn((unsigned char *)name);
1287 if (IS_ERR(tiqn)) 1287 if (IS_ERR(tiqn))
1288 return ERR_PTR(PTR_ERR(tiqn)); 1288 return ERR_CAST(tiqn);
1289 /* 1289 /*
1290 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. 1290 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
1291 */ 1291 */
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 980650792cf6..c4c68da3e500 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn(
834 */ 834 */
835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, 835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
836 ooo_list) { 836 ooo_list) {
837 while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) 837 if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
838 continue; 838 continue;
839 839
840 list_add(&ooo_cmdsn->ooo_list, 840 list_add(&ooo_cmdsn->ooo_list,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bcaf82f47037..daad362a93ce 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1013 ISCSI_LOGIN_STATUS_TARGET_ERROR); 1013 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1014 goto new_sess_out; 1014 goto new_sess_out;
1015 } 1015 }
1016#if 0 1016 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
1017 if (!iscsi_ntop6((const unsigned char *) 1017 &sock_in6.sin6_addr.in6_u);
1018 &sock_in6.sin6_addr.in6_u, 1018 conn->login_port = ntohs(sock_in6.sin6_port);
1019 (char *)&conn->ipv6_login_ip[0],
1020 IPV6_ADDRESS_SPACE)) {
1021 pr_err("iscsi_ntop6() failed\n");
1022 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1023 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1024 goto new_sess_out;
1025 }
1026#else
1027 pr_debug("Skipping iscsi_ntop6()\n");
1028#endif
1029 } else { 1019 } else {
1030 memset(&sock_in, 0, sizeof(struct sockaddr_in)); 1020 memset(&sock_in, 0, sizeof(struct sockaddr_in));
1031 1021
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 713a4d23557a..4d087ac11067 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -978,7 +978,7 @@ struct iscsi_login *iscsi_target_init_negotiation(
978 pr_err("Unable to allocate memory for struct iscsi_login.\n"); 978 pr_err("Unable to allocate memory for struct iscsi_login.\n");
979 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 979 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
980 ISCSI_LOGIN_STATUS_NO_RESOURCES); 980 ISCSI_LOGIN_STATUS_NO_RESOURCES);
981 goto out; 981 return NULL;
982 } 982 }
983 983
984 login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 984 login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 252e246cf51e..497b2e718a76 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -545,13 +545,13 @@ int iscsi_copy_param_list(
545 struct iscsi_param_list *src_param_list, 545 struct iscsi_param_list *src_param_list,
546 int leading) 546 int leading)
547{ 547{
548 struct iscsi_param *new_param = NULL, *param = NULL; 548 struct iscsi_param *param = NULL;
549 struct iscsi_param *new_param = NULL;
549 struct iscsi_param_list *param_list = NULL; 550 struct iscsi_param_list *param_list = NULL;
550 551
551 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); 552 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
552 if (!param_list) { 553 if (!param_list) {
553 pr_err("Unable to allocate memory for" 554 pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
554 " struct iscsi_param_list.\n");
555 goto err_out; 555 goto err_out;
556 } 556 }
557 INIT_LIST_HEAD(&param_list->param_list); 557 INIT_LIST_HEAD(&param_list->param_list);
@@ -567,8 +567,17 @@ int iscsi_copy_param_list(
567 567
568 new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); 568 new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
569 if (!new_param) { 569 if (!new_param) {
570 pr_err("Unable to allocate memory for" 570 pr_err("Unable to allocate memory for struct iscsi_param.\n");
571 " struct iscsi_param.\n"); 571 goto err_out;
572 }
573
574 new_param->name = kstrdup(param->name, GFP_KERNEL);
575 new_param->value = kstrdup(param->value, GFP_KERNEL);
576 if (!new_param->value || !new_param->name) {
577 kfree(new_param->value);
578 kfree(new_param->name);
579 kfree(new_param);
580 pr_err("Unable to allocate memory for parameter name/value.\n");
572 goto err_out; 581 goto err_out;
573 } 582 }
574 583
@@ -580,32 +589,12 @@ int iscsi_copy_param_list(
580 new_param->use = param->use; 589 new_param->use = param->use;
581 new_param->type_range = param->type_range; 590 new_param->type_range = param->type_range;
582 591
583 new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL);
584 if (!new_param->name) {
585 pr_err("Unable to allocate memory for"
586 " parameter name.\n");
587 goto err_out;
588 }
589
590 new_param->value = kzalloc(strlen(param->value) + 1,
591 GFP_KERNEL);
592 if (!new_param->value) {
593 pr_err("Unable to allocate memory for"
594 " parameter value.\n");
595 goto err_out;
596 }
597
598 memcpy(new_param->name, param->name, strlen(param->name));
599 new_param->name[strlen(param->name)] = '\0';
600 memcpy(new_param->value, param->value, strlen(param->value));
601 new_param->value[strlen(param->value)] = '\0';
602
603 list_add_tail(&new_param->p_list, &param_list->param_list); 592 list_add_tail(&new_param->p_list, &param_list->param_list);
604 } 593 }
605 594
606 if (!list_empty(&param_list->param_list)) 595 if (!list_empty(&param_list->param_list)) {
607 *dst_param_list = param_list; 596 *dst_param_list = param_list;
608 else { 597 } else {
609 pr_err("No parameters allocated.\n"); 598 pr_err("No parameters allocated.\n");
610 goto err_out; 599 goto err_out;
611 } 600 }
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index a1acb0167902..a0d23bc0fc98 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
243 if (!cmd->tmr_req) { 243 if (!cmd->tmr_req) {
244 pr_err("Unable to allocate memory for" 244 pr_err("Unable to allocate memory for"
245 " Task Management command!\n"); 245 " Task Management command!\n");
246 return NULL; 246 goto out;
247 } 247 }
248 /* 248 /*
249 * TASK_REASSIGN for ERL=2 / connection stays inside of 249 * TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -298,8 +298,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
298 return cmd; 298 return cmd;
299out: 299out:
300 iscsit_release_cmd(cmd); 300 iscsit_release_cmd(cmd);
301 if (se_cmd)
302 transport_free_se_cmd(se_cmd);
303 return NULL; 301 return NULL;
304} 302}
305 303
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 8ae09a1bdf74..89ae923c5da6 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -67,6 +67,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
67{ 67{
68 struct se_lun *lun = cmd->se_lun; 68 struct se_lun *lun = cmd->se_lun;
69 struct se_device *dev = cmd->se_dev; 69 struct se_device *dev = cmd->se_dev;
70 struct se_portal_group *tpg = lun->lun_sep->sep_tpg;
70 unsigned char *buf; 71 unsigned char *buf;
71 72
72 /* 73 /*
@@ -81,9 +82,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
81 82
82 buf = transport_kmap_first_data_page(cmd); 83 buf = transport_kmap_first_data_page(cmd);
83 84
84 buf[0] = dev->transport->get_device_type(dev); 85 if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
85 if (buf[0] == TYPE_TAPE) 86 buf[0] = 0x3f; /* Not connected */
86 buf[1] = 0x80; 87 } else {
88 buf[0] = dev->transport->get_device_type(dev);
89 if (buf[0] == TYPE_TAPE)
90 buf[1] = 0x80;
91 }
87 buf[2] = dev->transport->get_device_rev(dev); 92 buf[2] = dev->transport->get_device_rev(dev);
88 93
89 /* 94 /*
@@ -915,8 +920,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
915 length += target_modesense_control(dev, &buf[offset+length]); 920 length += target_modesense_control(dev, &buf[offset+length]);
916 break; 921 break;
917 default: 922 default:
918 pr_err("Got Unknown Mode Page: 0x%02x\n", 923 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
919 cdb[2] & 0x3f); 924 cdb[2] & 0x3f, cdb[3]);
920 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; 925 return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
921 } 926 }
922 offset += length; 927 offset += length;
@@ -1072,8 +1077,6 @@ target_emulate_unmap(struct se_task *task)
1072 size -= 16; 1077 size -= 16;
1073 } 1078 }
1074 1079
1075 task->task_scsi_status = GOOD;
1076 transport_complete_task(task, 1);
1077err: 1080err:
1078 transport_kunmap_first_data_page(cmd); 1081 transport_kunmap_first_data_page(cmd);
1079 1082
@@ -1085,24 +1088,17 @@ err:
1085 * Note this is not used for TCM/pSCSI passthrough 1088 * Note this is not used for TCM/pSCSI passthrough
1086 */ 1089 */
1087static int 1090static int
1088target_emulate_write_same(struct se_task *task, int write_same32) 1091target_emulate_write_same(struct se_task *task, u32 num_blocks)
1089{ 1092{
1090 struct se_cmd *cmd = task->task_se_cmd; 1093 struct se_cmd *cmd = task->task_se_cmd;
1091 struct se_device *dev = cmd->se_dev; 1094 struct se_device *dev = cmd->se_dev;
1092 sector_t range; 1095 sector_t range;
1093 sector_t lba = cmd->t_task_lba; 1096 sector_t lba = cmd->t_task_lba;
1094 unsigned int num_blocks;
1095 int ret; 1097 int ret;
1096 /* 1098 /*
1097 * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict 1099 * Use the explicit range when non zero is supplied, otherwise calculate
1098 * range when non zero is supplied, otherwise calculate the remaining 1100 * the remaining range based on ->get_blocks() - starting LBA.
1099 * range based on ->get_blocks() - starting LBA.
1100 */ 1101 */
1101 if (write_same32)
1102 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
1103 else
1104 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
1105
1106 if (num_blocks != 0) 1102 if (num_blocks != 0)
1107 range = num_blocks; 1103 range = num_blocks;
1108 else 1104 else
@@ -1117,8 +1113,6 @@ target_emulate_write_same(struct se_task *task, int write_same32)
1117 return ret; 1113 return ret;
1118 } 1114 }
1119 1115
1120 task->task_scsi_status = GOOD;
1121 transport_complete_task(task, 1);
1122 return 0; 1116 return 0;
1123} 1117}
1124 1118
@@ -1165,13 +1159,23 @@ transport_emulate_control_cdb(struct se_task *task)
1165 } 1159 }
1166 ret = target_emulate_unmap(task); 1160 ret = target_emulate_unmap(task);
1167 break; 1161 break;
1162 case WRITE_SAME:
1163 if (!dev->transport->do_discard) {
1164 pr_err("WRITE_SAME emulation not supported"
1165 " for: %s\n", dev->transport->name);
1166 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1167 }
1168 ret = target_emulate_write_same(task,
1169 get_unaligned_be16(&cmd->t_task_cdb[7]));
1170 break;
1168 case WRITE_SAME_16: 1171 case WRITE_SAME_16:
1169 if (!dev->transport->do_discard) { 1172 if (!dev->transport->do_discard) {
1170 pr_err("WRITE_SAME_16 emulation not supported" 1173 pr_err("WRITE_SAME_16 emulation not supported"
1171 " for: %s\n", dev->transport->name); 1174 " for: %s\n", dev->transport->name);
1172 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1175 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1173 } 1176 }
1174 ret = target_emulate_write_same(task, 0); 1177 ret = target_emulate_write_same(task,
1178 get_unaligned_be32(&cmd->t_task_cdb[10]));
1175 break; 1179 break;
1176 case VARIABLE_LENGTH_CMD: 1180 case VARIABLE_LENGTH_CMD:
1177 service_action = 1181 service_action =
@@ -1184,7 +1188,8 @@ transport_emulate_control_cdb(struct se_task *task)
1184 dev->transport->name); 1188 dev->transport->name);
1185 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1189 return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
1186 } 1190 }
1187 ret = target_emulate_write_same(task, 1); 1191 ret = target_emulate_write_same(task,
1192 get_unaligned_be32(&cmd->t_task_cdb[28]));
1188 break; 1193 break;
1189 default: 1194 default:
1190 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" 1195 pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
@@ -1219,8 +1224,14 @@ transport_emulate_control_cdb(struct se_task *task)
1219 1224
1220 if (ret < 0) 1225 if (ret < 0)
1221 return ret; 1226 return ret;
1222 task->task_scsi_status = GOOD; 1227 /*
1223 transport_complete_task(task, 1); 1228 * Handle the successful completion here unless a caller
1229 * has explictly requested an asychronous completion.
1230 */
1231 if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
1232 task->task_scsi_status = GOOD;
1233 transport_complete_task(task, 1);
1234 }
1224 1235
1225 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 1236 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
1226} 1237}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index b38b6c993e65..ca6e4a4df134 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
472 struct se_dev_entry *deve; 472 struct se_dev_entry *deve;
473 u32 i; 473 u32 i;
474 474
475 spin_lock_bh(&tpg->acl_node_lock); 475 spin_lock_irq(&tpg->acl_node_lock);
476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 476 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
477 spin_unlock_bh(&tpg->acl_node_lock); 477 spin_unlock_irq(&tpg->acl_node_lock);
478 478
479 spin_lock_irq(&nacl->device_list_lock); 479 spin_lock_irq(&nacl->device_list_lock);
480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 480 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
491 } 491 }
492 spin_unlock_irq(&nacl->device_list_lock); 492 spin_unlock_irq(&nacl->device_list_lock);
493 493
494 spin_lock_bh(&tpg->acl_node_lock); 494 spin_lock_irq(&tpg->acl_node_lock);
495 } 495 }
496 spin_unlock_bh(&tpg->acl_node_lock); 496 spin_unlock_irq(&tpg->acl_node_lock);
497} 497}
498 498
499static struct se_port *core_alloc_port(struct se_device *dev) 499static struct se_port *core_alloc_port(struct se_device *dev)
@@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev)
839 return ret; 839 return ret;
840} 840}
841 841
842u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
843{
844 u32 tmp, aligned_max_sectors;
845 /*
846 * Limit max_sectors to a PAGE_SIZE aligned value for modern
847 * transport_allocate_data_tasks() operation.
848 */
849 tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
850 aligned_max_sectors = (tmp / block_size);
851 if (max_sectors != aligned_max_sectors) {
852 printk(KERN_INFO "Rounding down aligned max_sectors from %u"
853 " to %u\n", max_sectors, aligned_max_sectors);
854 return aligned_max_sectors;
855 }
856
857 return max_sectors;
858}
859
842void se_dev_set_default_attribs( 860void se_dev_set_default_attribs(
843 struct se_device *dev, 861 struct se_device *dev,
844 struct se_dev_limits *dev_limits) 862 struct se_dev_limits *dev_limits)
@@ -878,6 +896,11 @@ void se_dev_set_default_attribs(
878 * max_sectors is based on subsystem plugin dependent requirements. 896 * max_sectors is based on subsystem plugin dependent requirements.
879 */ 897 */
880 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; 898 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
899 /*
900 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
901 */
902 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
903 limits->logical_block_size);
881 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 904 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
882 /* 905 /*
883 * Set optimal_sectors from max_sectors, which can be lowered via 906 * Set optimal_sectors from max_sectors, which can be lowered via
@@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1242 return -EINVAL; 1265 return -EINVAL;
1243 } 1266 }
1244 } 1267 }
1268 /*
1269 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1270 */
1271 max_sectors = se_dev_align_max_sectors(max_sectors,
1272 dev->se_sub_dev->se_dev_attrib.block_size);
1245 1273
1246 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; 1274 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1247 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1275 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
@@ -1344,15 +1372,17 @@ struct se_lun *core_dev_add_lun(
1344 */ 1372 */
1345 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1373 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1346 struct se_node_acl *acl; 1374 struct se_node_acl *acl;
1347 spin_lock_bh(&tpg->acl_node_lock); 1375 spin_lock_irq(&tpg->acl_node_lock);
1348 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1376 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1349 if (acl->dynamic_node_acl) { 1377 if (acl->dynamic_node_acl &&
1350 spin_unlock_bh(&tpg->acl_node_lock); 1378 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1379 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1380 spin_unlock_irq(&tpg->acl_node_lock);
1351 core_tpg_add_node_to_devs(acl, tpg); 1381 core_tpg_add_node_to_devs(acl, tpg);
1352 spin_lock_bh(&tpg->acl_node_lock); 1382 spin_lock_irq(&tpg->acl_node_lock);
1353 } 1383 }
1354 } 1384 }
1355 spin_unlock_bh(&tpg->acl_node_lock); 1385 spin_unlock_irq(&tpg->acl_node_lock);
1356 } 1386 }
1357 1387
1358 return lun_p; 1388 return lun_p;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index f1654694f4ea..55bbe0847a6d 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -481,7 +481,7 @@ static struct config_group *target_fabric_make_nodeacl(
481 481
482 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); 482 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
483 if (IS_ERR(se_nacl)) 483 if (IS_ERR(se_nacl))
484 return ERR_PTR(PTR_ERR(se_nacl)); 484 return ERR_CAST(se_nacl);
485 485
486 nacl_cg = &se_nacl->acl_group; 486 nacl_cg = &se_nacl->acl_group;
487 nacl_cg->default_groups = se_nacl->acl_default_groups; 487 nacl_cg->default_groups = se_nacl->acl_default_groups;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 1c1b849cd4fb..7fd3a161f7cc 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port(
1598 * from the decoded fabric module specific TransportID 1598 * from the decoded fabric module specific TransportID
1599 * at *i_str. 1599 * at *i_str.
1600 */ 1600 */
1601 spin_lock_bh(&tmp_tpg->acl_node_lock); 1601 spin_lock_irq(&tmp_tpg->acl_node_lock);
1602 dest_node_acl = __core_tpg_get_initiator_node_acl( 1602 dest_node_acl = __core_tpg_get_initiator_node_acl(
1603 tmp_tpg, i_str); 1603 tmp_tpg, i_str);
1604 if (dest_node_acl) { 1604 if (dest_node_acl) {
1605 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1605 atomic_inc(&dest_node_acl->acl_pr_ref_count);
1606 smp_mb__after_atomic_inc(); 1606 smp_mb__after_atomic_inc();
1607 } 1607 }
1608 spin_unlock_bh(&tmp_tpg->acl_node_lock); 1608 spin_unlock_irq(&tmp_tpg->acl_node_lock);
1609 1609
1610 if (!dest_node_acl) { 1610 if (!dest_node_acl) {
1611 core_scsi3_tpg_undepend_item(tmp_tpg); 1611 core_scsi3_tpg_undepend_item(tmp_tpg);
@@ -3496,14 +3496,14 @@ after_iport_check:
3496 /* 3496 /*
3497 * Locate the destination struct se_node_acl from the received Transport ID 3497 * Locate the destination struct se_node_acl from the received Transport ID
3498 */ 3498 */
3499 spin_lock_bh(&dest_se_tpg->acl_node_lock); 3499 spin_lock_irq(&dest_se_tpg->acl_node_lock);
3500 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, 3500 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
3501 initiator_str); 3501 initiator_str);
3502 if (dest_node_acl) { 3502 if (dest_node_acl) {
3503 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3503 atomic_inc(&dest_node_acl->acl_pr_ref_count);
3504 smp_mb__after_atomic_inc(); 3504 smp_mb__after_atomic_inc();
3505 } 3505 }
3506 spin_unlock_bh(&dest_se_tpg->acl_node_lock); 3506 spin_unlock_irq(&dest_se_tpg->acl_node_lock);
3507 3507
3508 if (!dest_node_acl) { 3508 if (!dest_node_acl) {
3509 pr_err("Unable to locate %s dest_node_acl for" 3509 pr_err("Unable to locate %s dest_node_acl for"
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 3dd81d24d9a9..e567e129c697 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -390,12 +390,10 @@ static int rd_MEMCPY_read(struct rd_request *req)
390 length = req->rd_size; 390 length = req->rd_size;
391 391
392 dst = sg_virt(&sg_d[i++]) + dst_offset; 392 dst = sg_virt(&sg_d[i++]) + dst_offset;
393 if (!dst) 393 BUG_ON(!dst);
394 BUG();
395 394
396 src = sg_virt(&sg_s[j]) + src_offset; 395 src = sg_virt(&sg_s[j]) + src_offset;
397 if (!src) 396 BUG_ON(!src);
398 BUG();
399 397
400 dst_offset = 0; 398 dst_offset = 0;
401 src_offset = length; 399 src_offset = length;
@@ -415,8 +413,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
415 length = req->rd_size; 413 length = req->rd_size;
416 414
417 dst = sg_virt(&sg_d[i]) + dst_offset; 415 dst = sg_virt(&sg_d[i]) + dst_offset;
418 if (!dst) 416 BUG_ON(!dst);
419 BUG();
420 417
421 if (sg_d[i].length == length) { 418 if (sg_d[i].length == length) {
422 i++; 419 i++;
@@ -425,8 +422,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
425 dst_offset = length; 422 dst_offset = length;
426 423
427 src = sg_virt(&sg_s[j++]) + src_offset; 424 src = sg_virt(&sg_s[j++]) + src_offset;
428 if (!src) 425 BUG_ON(!src);
429 BUG();
430 426
431 src_offset = 0; 427 src_offset = 0;
432 page_end = 1; 428 page_end = 1;
@@ -510,12 +506,10 @@ static int rd_MEMCPY_write(struct rd_request *req)
510 length = req->rd_size; 506 length = req->rd_size;
511 507
512 src = sg_virt(&sg_s[i++]) + src_offset; 508 src = sg_virt(&sg_s[i++]) + src_offset;
513 if (!src) 509 BUG_ON(!src);
514 BUG();
515 510
516 dst = sg_virt(&sg_d[j]) + dst_offset; 511 dst = sg_virt(&sg_d[j]) + dst_offset;
517 if (!dst) 512 BUG_ON(!dst);
518 BUG();
519 513
520 src_offset = 0; 514 src_offset = 0;
521 dst_offset = length; 515 dst_offset = length;
@@ -535,8 +529,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
535 length = req->rd_size; 529 length = req->rd_size;
536 530
537 src = sg_virt(&sg_s[i]) + src_offset; 531 src = sg_virt(&sg_s[i]) + src_offset;
538 if (!src) 532 BUG_ON(!src);
539 BUG();
540 533
541 if (sg_s[i].length == length) { 534 if (sg_s[i].length == length) {
542 i++; 535 i++;
@@ -545,8 +538,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
545 src_offset = length; 538 src_offset = length;
546 539
547 dst = sg_virt(&sg_d[j++]) + dst_offset; 540 dst = sg_virt(&sg_d[j++]) + dst_offset;
548 if (!dst) 541 BUG_ON(!dst);
549 BUG();
550 542
551 dst_offset = 0; 543 dst_offset = 0;
552 page_end = 1; 544 page_end = 1;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 4f1ba4c5ef11..162b736c7342 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
137{ 137{
138 struct se_node_acl *acl; 138 struct se_node_acl *acl;
139 139
140 spin_lock_bh(&tpg->acl_node_lock); 140 spin_lock_irq(&tpg->acl_node_lock);
141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
142 if (!strcmp(acl->initiatorname, initiatorname) && 142 if (!strcmp(acl->initiatorname, initiatorname) &&
143 !acl->dynamic_node_acl) { 143 !acl->dynamic_node_acl) {
144 spin_unlock_bh(&tpg->acl_node_lock); 144 spin_unlock_irq(&tpg->acl_node_lock);
145 return acl; 145 return acl;
146 } 146 }
147 } 147 }
148 spin_unlock_bh(&tpg->acl_node_lock); 148 spin_unlock_irq(&tpg->acl_node_lock);
149 149
150 return NULL; 150 return NULL;
151} 151}
@@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
299 return NULL; 299 return NULL;
300 } 300 }
301 /*
302 * Here we only create demo-mode MappedLUNs from the active
303 * TPG LUNs if the fabric is not explictly asking for
304 * tpg_check_demo_mode_login_only() == 1.
305 */
306 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
307 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
308 do { ; } while (0);
309 else
310 core_tpg_add_node_to_devs(acl, tpg);
301 311
302 core_tpg_add_node_to_devs(acl, tpg); 312 spin_lock_irq(&tpg->acl_node_lock);
303
304 spin_lock_bh(&tpg->acl_node_lock);
305 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 313 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
306 tpg->num_node_acls++; 314 tpg->num_node_acls++;
307 spin_unlock_bh(&tpg->acl_node_lock); 315 spin_unlock_irq(&tpg->acl_node_lock);
308 316
309 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 317 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
310 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 318 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
354{ 362{
355 struct se_node_acl *acl = NULL; 363 struct se_node_acl *acl = NULL;
356 364
357 spin_lock_bh(&tpg->acl_node_lock); 365 spin_lock_irq(&tpg->acl_node_lock);
358 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 366 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
359 if (acl) { 367 if (acl) {
360 if (acl->dynamic_node_acl) { 368 if (acl->dynamic_node_acl) {
@@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
362 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 370 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
363 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 371 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
364 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 372 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
365 spin_unlock_bh(&tpg->acl_node_lock); 373 spin_unlock_irq(&tpg->acl_node_lock);
366 /* 374 /*
367 * Release the locally allocated struct se_node_acl 375 * Release the locally allocated struct se_node_acl
368 * because * core_tpg_add_initiator_node_acl() returned 376 * because * core_tpg_add_initiator_node_acl() returned
@@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
378 " Node %s already exists for TPG %u, ignoring" 386 " Node %s already exists for TPG %u, ignoring"
379 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 387 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
380 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 388 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
381 spin_unlock_bh(&tpg->acl_node_lock); 389 spin_unlock_irq(&tpg->acl_node_lock);
382 return ERR_PTR(-EEXIST); 390 return ERR_PTR(-EEXIST);
383 } 391 }
384 spin_unlock_bh(&tpg->acl_node_lock); 392 spin_unlock_irq(&tpg->acl_node_lock);
385 393
386 if (!se_nacl) { 394 if (!se_nacl) {
387 pr_err("struct se_node_acl pointer is NULL\n"); 395 pr_err("struct se_node_acl pointer is NULL\n");
@@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
418 return ERR_PTR(-EINVAL); 426 return ERR_PTR(-EINVAL);
419 } 427 }
420 428
421 spin_lock_bh(&tpg->acl_node_lock); 429 spin_lock_irq(&tpg->acl_node_lock);
422 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 430 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
423 tpg->num_node_acls++; 431 tpg->num_node_acls++;
424 spin_unlock_bh(&tpg->acl_node_lock); 432 spin_unlock_irq(&tpg->acl_node_lock);
425 433
426done: 434done:
427 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" 435 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
@@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl(
445 struct se_session *sess, *sess_tmp; 453 struct se_session *sess, *sess_tmp;
446 int dynamic_acl = 0; 454 int dynamic_acl = 0;
447 455
448 spin_lock_bh(&tpg->acl_node_lock); 456 spin_lock_irq(&tpg->acl_node_lock);
449 if (acl->dynamic_node_acl) { 457 if (acl->dynamic_node_acl) {
450 acl->dynamic_node_acl = 0; 458 acl->dynamic_node_acl = 0;
451 dynamic_acl = 1; 459 dynamic_acl = 1;
452 } 460 }
453 list_del(&acl->acl_list); 461 list_del(&acl->acl_list);
454 tpg->num_node_acls--; 462 tpg->num_node_acls--;
455 spin_unlock_bh(&tpg->acl_node_lock); 463 spin_unlock_irq(&tpg->acl_node_lock);
456 464
457 spin_lock_bh(&tpg->session_lock); 465 spin_lock_bh(&tpg->session_lock);
458 list_for_each_entry_safe(sess, sess_tmp, 466 list_for_each_entry_safe(sess, sess_tmp,
@@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth(
503 struct se_node_acl *acl; 511 struct se_node_acl *acl;
504 int dynamic_acl = 0; 512 int dynamic_acl = 0;
505 513
506 spin_lock_bh(&tpg->acl_node_lock); 514 spin_lock_irq(&tpg->acl_node_lock);
507 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 515 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
508 if (!acl) { 516 if (!acl) {
509 pr_err("Access Control List entry for %s Initiator" 517 pr_err("Access Control List entry for %s Initiator"
510 " Node %s does not exists for TPG %hu, ignoring" 518 " Node %s does not exists for TPG %hu, ignoring"
511 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 519 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
512 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 520 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
513 spin_unlock_bh(&tpg->acl_node_lock); 521 spin_unlock_irq(&tpg->acl_node_lock);
514 return -ENODEV; 522 return -ENODEV;
515 } 523 }
516 if (acl->dynamic_node_acl) { 524 if (acl->dynamic_node_acl) {
517 acl->dynamic_node_acl = 0; 525 acl->dynamic_node_acl = 0;
518 dynamic_acl = 1; 526 dynamic_acl = 1;
519 } 527 }
520 spin_unlock_bh(&tpg->acl_node_lock); 528 spin_unlock_irq(&tpg->acl_node_lock);
521 529
522 spin_lock_bh(&tpg->session_lock); 530 spin_lock_bh(&tpg->session_lock);
523 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { 531 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
@@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth(
533 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 541 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
534 spin_unlock_bh(&tpg->session_lock); 542 spin_unlock_bh(&tpg->session_lock);
535 543
536 spin_lock_bh(&tpg->acl_node_lock); 544 spin_lock_irq(&tpg->acl_node_lock);
537 if (dynamic_acl) 545 if (dynamic_acl)
538 acl->dynamic_node_acl = 1; 546 acl->dynamic_node_acl = 1;
539 spin_unlock_bh(&tpg->acl_node_lock); 547 spin_unlock_irq(&tpg->acl_node_lock);
540 return -EEXIST; 548 return -EEXIST;
541 } 549 }
542 /* 550 /*
@@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth(
571 if (init_sess) 579 if (init_sess)
572 tpg->se_tpg_tfo->close_session(init_sess); 580 tpg->se_tpg_tfo->close_session(init_sess);
573 581
574 spin_lock_bh(&tpg->acl_node_lock); 582 spin_lock_irq(&tpg->acl_node_lock);
575 if (dynamic_acl) 583 if (dynamic_acl)
576 acl->dynamic_node_acl = 1; 584 acl->dynamic_node_acl = 1;
577 spin_unlock_bh(&tpg->acl_node_lock); 585 spin_unlock_irq(&tpg->acl_node_lock);
578 return -EINVAL; 586 return -EINVAL;
579 } 587 }
580 spin_unlock_bh(&tpg->session_lock); 588 spin_unlock_bh(&tpg->session_lock);
@@ -590,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth(
590 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 598 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
591 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 599 tpg->se_tpg_tfo->tpg_get_tag(tpg));
592 600
593 spin_lock_bh(&tpg->acl_node_lock); 601 spin_lock_irq(&tpg->acl_node_lock);
594 if (dynamic_acl) 602 if (dynamic_acl)
595 acl->dynamic_node_acl = 1; 603 acl->dynamic_node_acl = 1;
596 spin_unlock_bh(&tpg->acl_node_lock); 604 spin_unlock_irq(&tpg->acl_node_lock);
597 605
598 return 0; 606 return 0;
599} 607}
@@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
717 * not been released because of TFO->tpg_check_demo_mode_cache() == 1 725 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
718 * in transport_deregister_session(). 726 * in transport_deregister_session().
719 */ 727 */
720 spin_lock_bh(&se_tpg->acl_node_lock); 728 spin_lock_irq(&se_tpg->acl_node_lock);
721 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, 729 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
722 acl_list) { 730 acl_list) {
723 list_del(&nacl->acl_list); 731 list_del(&nacl->acl_list);
724 se_tpg->num_node_acls--; 732 se_tpg->num_node_acls--;
725 spin_unlock_bh(&se_tpg->acl_node_lock); 733 spin_unlock_irq(&se_tpg->acl_node_lock);
726 734
727 core_tpg_wait_for_nacl_pr_ref(nacl); 735 core_tpg_wait_for_nacl_pr_ref(nacl);
728 core_free_device_list_for_node(nacl, se_tpg); 736 core_free_device_list_for_node(nacl, se_tpg);
729 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); 737 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
730 738
731 spin_lock_bh(&se_tpg->acl_node_lock); 739 spin_lock_irq(&se_tpg->acl_node_lock);
732 } 740 }
733 spin_unlock_bh(&se_tpg->acl_node_lock); 741 spin_unlock_irq(&se_tpg->acl_node_lock);
734 742
735 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 743 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
736 core_tpg_release_virtual_lun0(se_tpg); 744 core_tpg_release_virtual_lun0(se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c75a01a1c475..8d0c58ea6316 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -389,17 +389,18 @@ void transport_deregister_session(struct se_session *se_sess)
389{ 389{
390 struct se_portal_group *se_tpg = se_sess->se_tpg; 390 struct se_portal_group *se_tpg = se_sess->se_tpg;
391 struct se_node_acl *se_nacl; 391 struct se_node_acl *se_nacl;
392 unsigned long flags;
392 393
393 if (!se_tpg) { 394 if (!se_tpg) {
394 transport_free_session(se_sess); 395 transport_free_session(se_sess);
395 return; 396 return;
396 } 397 }
397 398
398 spin_lock_bh(&se_tpg->session_lock); 399 spin_lock_irqsave(&se_tpg->session_lock, flags);
399 list_del(&se_sess->sess_list); 400 list_del(&se_sess->sess_list);
400 se_sess->se_tpg = NULL; 401 se_sess->se_tpg = NULL;
401 se_sess->fabric_sess_ptr = NULL; 402 se_sess->fabric_sess_ptr = NULL;
402 spin_unlock_bh(&se_tpg->session_lock); 403 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
403 404
404 /* 405 /*
405 * Determine if we need to do extra work for this initiator node's 406 * Determine if we need to do extra work for this initiator node's
@@ -407,22 +408,22 @@ void transport_deregister_session(struct se_session *se_sess)
407 */ 408 */
408 se_nacl = se_sess->se_node_acl; 409 se_nacl = se_sess->se_node_acl;
409 if (se_nacl) { 410 if (se_nacl) {
410 spin_lock_bh(&se_tpg->acl_node_lock); 411 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
411 if (se_nacl->dynamic_node_acl) { 412 if (se_nacl->dynamic_node_acl) {
412 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( 413 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
413 se_tpg)) { 414 se_tpg)) {
414 list_del(&se_nacl->acl_list); 415 list_del(&se_nacl->acl_list);
415 se_tpg->num_node_acls--; 416 se_tpg->num_node_acls--;
416 spin_unlock_bh(&se_tpg->acl_node_lock); 417 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
417 418
418 core_tpg_wait_for_nacl_pr_ref(se_nacl); 419 core_tpg_wait_for_nacl_pr_ref(se_nacl);
419 core_free_device_list_for_node(se_nacl, se_tpg); 420 core_free_device_list_for_node(se_nacl, se_tpg);
420 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, 421 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
421 se_nacl); 422 se_nacl);
422 spin_lock_bh(&se_tpg->acl_node_lock); 423 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
423 } 424 }
424 } 425 }
425 spin_unlock_bh(&se_tpg->acl_node_lock); 426 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
426 } 427 }
427 428
428 transport_free_session(se_sess); 429 transport_free_session(se_sess);
@@ -1747,6 +1748,8 @@ int transport_generic_handle_cdb(
1747} 1748}
1748EXPORT_SYMBOL(transport_generic_handle_cdb); 1749EXPORT_SYMBOL(transport_generic_handle_cdb);
1749 1750
1751static void transport_generic_request_failure(struct se_cmd *,
1752 struct se_device *, int, int);
1750/* 1753/*
1751 * Used by fabric module frontends to queue tasks directly. 1754 * Used by fabric module frontends to queue tasks directly.
1752 * Many only be used from process context only 1755 * Many only be used from process context only
@@ -1754,6 +1757,8 @@ EXPORT_SYMBOL(transport_generic_handle_cdb);
1754int transport_handle_cdb_direct( 1757int transport_handle_cdb_direct(
1755 struct se_cmd *cmd) 1758 struct se_cmd *cmd)
1756{ 1759{
1760 int ret;
1761
1757 if (!cmd->se_lun) { 1762 if (!cmd->se_lun) {
1758 dump_stack(); 1763 dump_stack();
1759 pr_err("cmd->se_lun is NULL\n"); 1764 pr_err("cmd->se_lun is NULL\n");
@@ -1765,8 +1770,31 @@ int transport_handle_cdb_direct(
1765 " from interrupt context\n"); 1770 " from interrupt context\n");
1766 return -EINVAL; 1771 return -EINVAL;
1767 } 1772 }
1768 1773 /*
1769 return transport_generic_new_cmd(cmd); 1774 * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
1775 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1776 * in existing usage to ensure that outstanding descriptors are handled
1777 * correctly during shutdown via transport_generic_wait_for_tasks()
1778 *
1779 * Also, we don't take cmd->t_state_lock here as we only expect
1780 * this to be called for initial descriptor submission.
1781 */
1782 cmd->t_state = TRANSPORT_NEW_CMD;
1783 atomic_set(&cmd->t_transport_active, 1);
1784 /*
1785 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1786 * so follow TRANSPORT_NEW_CMD processing thread context usage
1787 * and call transport_generic_request_failure() if necessary..
1788 */
1789 ret = transport_generic_new_cmd(cmd);
1790 if (ret == -EAGAIN)
1791 return 0;
1792 else if (ret < 0) {
1793 cmd->transport_error_status = ret;
1794 transport_generic_request_failure(cmd, NULL, 0,
1795 (cmd->data_direction != DMA_TO_DEVICE));
1796 }
1797 return 0;
1770} 1798}
1771EXPORT_SYMBOL(transport_handle_cdb_direct); 1799EXPORT_SYMBOL(transport_handle_cdb_direct);
1772 1800
@@ -2026,8 +2054,14 @@ static void transport_generic_request_failure(
2026 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 2054 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2027 break; 2055 break;
2028 } 2056 }
2029 2057 /*
2030 if (!sc) 2058 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
2059 * make the call to transport_send_check_condition_and_sense()
2060 * directly. Otherwise expect the fabric to make the call to
2061 * transport_send_check_condition_and_sense() after handling
2062 * possible unsoliticied write data payloads.
2063 */
2064 if (!sc && !cmd->se_tfo->new_cmd_map)
2031 transport_new_cmd_failure(cmd); 2065 transport_new_cmd_failure(cmd);
2032 else { 2066 else {
2033 ret = transport_send_check_condition_and_sense(cmd, 2067 ret = transport_send_check_condition_and_sense(cmd,
@@ -2820,12 +2854,42 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2820 " transport_dev_end_lba(): %llu\n", 2854 " transport_dev_end_lba(): %llu\n",
2821 cmd->t_task_lba, sectors, 2855 cmd->t_task_lba, sectors,
2822 transport_dev_end_lba(dev)); 2856 transport_dev_end_lba(dev));
2823 pr_err(" We should return CHECK_CONDITION" 2857 return -EINVAL;
2824 " but we don't yet\n");
2825 return 0;
2826 } 2858 }
2827 2859
2828 return sectors; 2860 return 0;
2861}
2862
2863static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2864{
2865 /*
2866 * Determine if the received WRITE_SAME is used to for direct
2867 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2868 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2869 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2870 */
2871 int passthrough = (dev->transport->transport_type ==
2872 TRANSPORT_PLUGIN_PHBA_PDEV);
2873
2874 if (!passthrough) {
2875 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2876 pr_err("WRITE_SAME PBDATA and LBDATA"
2877 " bits not supported for Block Discard"
2878 " Emulation\n");
2879 return -ENOSYS;
2880 }
2881 /*
2882 * Currently for the emulated case we only accept
2883 * tpws with the UNMAP=1 bit set.
2884 */
2885 if (!(flags[0] & 0x08)) {
2886 pr_err("WRITE_SAME w/o UNMAP bit not"
2887 " supported for Block Discard Emulation\n");
2888 return -ENOSYS;
2889 }
2890 }
2891
2892 return 0;
2829} 2893}
2830 2894
2831/* transport_generic_cmd_sequencer(): 2895/* transport_generic_cmd_sequencer():
@@ -3038,7 +3102,7 @@ static int transport_generic_cmd_sequencer(
3038 goto out_unsupported_cdb; 3102 goto out_unsupported_cdb;
3039 3103
3040 if (sectors) 3104 if (sectors)
3041 size = transport_get_size(sectors, cdb, cmd); 3105 size = transport_get_size(1, cdb, cmd);
3042 else { 3106 else {
3043 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 3107 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
3044 " supported\n"); 3108 " supported\n");
@@ -3048,27 +3112,9 @@ static int transport_generic_cmd_sequencer(
3048 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 3112 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
3049 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3113 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3050 3114
3051 /* 3115 if (target_check_write_same_discard(&cdb[10], dev) < 0)
3052 * Skip the remaining assignments for TCM/PSCSI passthrough
3053 */
3054 if (passthrough)
3055 break;
3056
3057 if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
3058 pr_err("WRITE_SAME PBDATA and LBDATA"
3059 " bits not supported for Block Discard"
3060 " Emulation\n");
3061 goto out_invalid_cdb_field;
3062 }
3063 /*
3064 * Currently for the emulated case we only accept
3065 * tpws with the UNMAP=1 bit set.
3066 */
3067 if (!(cdb[10] & 0x08)) {
3068 pr_err("WRITE_SAME w/o UNMAP bit not"
3069 " supported for Block Discard Emulation\n");
3070 goto out_invalid_cdb_field; 3116 goto out_invalid_cdb_field;
3071 } 3117
3072 break; 3118 break;
3073 default: 3119 default:
3074 pr_err("VARIABLE_LENGTH_CMD service action" 3120 pr_err("VARIABLE_LENGTH_CMD service action"
@@ -3303,10 +3349,12 @@ static int transport_generic_cmd_sequencer(
3303 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; 3349 cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
3304 /* 3350 /*
3305 * Check to ensure that LBA + Range does not exceed past end of 3351 * Check to ensure that LBA + Range does not exceed past end of
3306 * device. 3352 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
3307 */ 3353 */
3308 if (!transport_cmd_get_valid_sectors(cmd)) 3354 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
3309 goto out_invalid_cdb_field; 3355 if (transport_cmd_get_valid_sectors(cmd) < 0)
3356 goto out_invalid_cdb_field;
3357 }
3310 break; 3358 break;
3311 case UNMAP: 3359 case UNMAP:
3312 size = get_unaligned_be16(&cdb[7]); 3360 size = get_unaligned_be16(&cdb[7]);
@@ -3318,40 +3366,38 @@ static int transport_generic_cmd_sequencer(
3318 goto out_unsupported_cdb; 3366 goto out_unsupported_cdb;
3319 3367
3320 if (sectors) 3368 if (sectors)
3321 size = transport_get_size(sectors, cdb, cmd); 3369 size = transport_get_size(1, cdb, cmd);
3322 else { 3370 else {
3323 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 3371 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3324 goto out_invalid_cdb_field; 3372 goto out_invalid_cdb_field;
3325 } 3373 }
3326 3374
3327 cmd->t_task_lba = get_unaligned_be16(&cdb[2]); 3375 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
3328 passthrough = (dev->transport->transport_type == 3376 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3329 TRANSPORT_PLUGIN_PHBA_PDEV); 3377
3330 /* 3378 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3331 * Determine if the received WRITE_SAME_16 is used to for direct 3379 goto out_invalid_cdb_field;
3332 * passthrough into Linux/SCSI with struct request via TCM/pSCSI 3380 break;
3333 * or we are signaling the use of internal WRITE_SAME + UNMAP=1 3381 case WRITE_SAME:
3334 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and 3382 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
3335 * TCM/FILEIO subsystem plugin backstores. 3383 if (sector_ret)
3336 */ 3384 goto out_unsupported_cdb;
3337 if (!passthrough) { 3385
3338 if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { 3386 if (sectors)
3339 pr_err("WRITE_SAME PBDATA and LBDATA" 3387 size = transport_get_size(1, cdb, cmd);
3340 " bits not supported for Block Discard" 3388 else {
3341 " Emulation\n"); 3389 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
3342 goto out_invalid_cdb_field; 3390 goto out_invalid_cdb_field;
3343 }
3344 /*
3345 * Currently for the emulated case we only accept
3346 * tpws with the UNMAP=1 bit set.
3347 */
3348 if (!(cdb[1] & 0x08)) {
3349 pr_err("WRITE_SAME w/o UNMAP bit not "
3350 " supported for Block Discard Emulation\n");
3351 goto out_invalid_cdb_field;
3352 }
3353 } 3391 }
3392
3393 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
3354 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3394 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3395 /*
3396 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
3397 * of byte 1 bit 3 UNMAP instead of original reserved field
3398 */
3399 if (target_check_write_same_discard(&cdb[1], dev) < 0)
3400 goto out_invalid_cdb_field;
3355 break; 3401 break;
3356 case ALLOW_MEDIUM_REMOVAL: 3402 case ALLOW_MEDIUM_REMOVAL:
3357 case GPCMD_CLOSE_TRACK: 3403 case GPCMD_CLOSE_TRACK:
@@ -3846,9 +3892,7 @@ EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3846static int transport_new_cmd_obj(struct se_cmd *cmd) 3892static int transport_new_cmd_obj(struct se_cmd *cmd)
3847{ 3893{
3848 struct se_device *dev = cmd->se_dev; 3894 struct se_device *dev = cmd->se_dev;
3849 u32 task_cdbs; 3895 int set_counts = 1, rc, task_cdbs;
3850 u32 rc;
3851 int set_counts = 1;
3852 3896
3853 /* 3897 /*
3854 * Setup any BIDI READ tasks and memory from 3898 * Setup any BIDI READ tasks and memory from
@@ -3866,7 +3910,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
3866 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3910 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3867 cmd->scsi_sense_reason = 3911 cmd->scsi_sense_reason =
3868 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3912 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3869 return PYX_TRANSPORT_LU_COMM_FAILURE; 3913 return -EINVAL;
3870 } 3914 }
3871 atomic_inc(&cmd->t_fe_count); 3915 atomic_inc(&cmd->t_fe_count);
3872 atomic_inc(&cmd->t_se_count); 3916 atomic_inc(&cmd->t_se_count);
@@ -3885,7 +3929,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
3885 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3929 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3886 cmd->scsi_sense_reason = 3930 cmd->scsi_sense_reason =
3887 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3931 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3888 return PYX_TRANSPORT_LU_COMM_FAILURE; 3932 return -EINVAL;
3889 } 3933 }
3890 3934
3891 if (set_counts) { 3935 if (set_counts) {
@@ -4001,8 +4045,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4001 if (!task->task_sg) 4045 if (!task->task_sg)
4002 continue; 4046 continue;
4003 4047
4004 BUG_ON(!task->task_padded_sg);
4005
4006 if (!sg_first) { 4048 if (!sg_first) {
4007 sg_first = task->task_sg; 4049 sg_first = task->task_sg;
4008 chained_nents = task->task_sg_nents; 4050 chained_nents = task->task_sg_nents;
@@ -4010,9 +4052,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
4010 sg_chain(sg_prev, sg_prev_nents, task->task_sg); 4052 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
4011 chained_nents += task->task_sg_nents; 4053 chained_nents += task->task_sg_nents;
4012 } 4054 }
4055 /*
4056 * For the padded tasks, use the extra SGL vector allocated
4057 * in transport_allocate_data_tasks() for the sg_prev_nents
4058 * offset into sg_chain() above.. The last task of a
4059 * multi-task list, or a single task will not have
4060 * task->task_sg_padded set..
4061 */
4062 if (task->task_padded_sg)
4063 sg_prev_nents = (task->task_sg_nents + 1);
4064 else
4065 sg_prev_nents = task->task_sg_nents;
4013 4066
4014 sg_prev = task->task_sg; 4067 sg_prev = task->task_sg;
4015 sg_prev_nents = task->task_sg_nents;
4016 } 4068 }
4017 /* 4069 /*
4018 * Setup the starting pointer and total t_tasks_sg_linked_no including 4070 * Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4064,7 +4116,7 @@ static int transport_allocate_data_tasks(
4064 4116
4065 cmd_sg = sgl; 4117 cmd_sg = sgl;
4066 for (i = 0; i < task_count; i++) { 4118 for (i = 0; i < task_count; i++) {
4067 unsigned int task_size; 4119 unsigned int task_size, task_sg_nents_padded;
4068 int count; 4120 int count;
4069 4121
4070 task = transport_generic_get_task(cmd, data_direction); 4122 task = transport_generic_get_task(cmd, data_direction);
@@ -4083,30 +4135,33 @@ static int transport_allocate_data_tasks(
4083 4135
4084 /* Update new cdb with updated lba/sectors */ 4136 /* Update new cdb with updated lba/sectors */
4085 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); 4137 cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
4086 4138 /*
4139 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
4140 * in order to calculate the number per task SGL entries
4141 */
4142 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
4087 /* 4143 /*
4088 * Check if the fabric module driver is requesting that all 4144 * Check if the fabric module driver is requesting that all
4089 * struct se_task->task_sg[] be chained together.. If so, 4145 * struct se_task->task_sg[] be chained together.. If so,
4090 * then allocate an extra padding SG entry for linking and 4146 * then allocate an extra padding SG entry for linking and
4091 * marking the end of the chained SGL. 4147 * marking the end of the chained SGL for every task except
4092 * Possibly over-allocate task sgl size by using cmd sgl size. 4148 * the last one for (task_count > 1) operation, or skipping
4093 * It's so much easier and only a waste when task_count > 1. 4149 * the extra padding for the (task_count == 1) case.
4094 * That is extremely rare.
4095 */ 4150 */
4096 task->task_sg_nents = sgl_nents; 4151 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
4097 if (cmd->se_tfo->task_sg_chaining) { 4152 task_sg_nents_padded = (task->task_sg_nents + 1);
4098 task->task_sg_nents++;
4099 task->task_padded_sg = 1; 4153 task->task_padded_sg = 1;
4100 } 4154 } else
4155 task_sg_nents_padded = task->task_sg_nents;
4101 4156
4102 task->task_sg = kmalloc(sizeof(struct scatterlist) * 4157 task->task_sg = kmalloc(sizeof(struct scatterlist) *
4103 task->task_sg_nents, GFP_KERNEL); 4158 task_sg_nents_padded, GFP_KERNEL);
4104 if (!task->task_sg) { 4159 if (!task->task_sg) {
4105 cmd->se_dev->transport->free_task(task); 4160 cmd->se_dev->transport->free_task(task);
4106 return -ENOMEM; 4161 return -ENOMEM;
4107 } 4162 }
4108 4163
4109 sg_init_table(task->task_sg, task->task_sg_nents); 4164 sg_init_table(task->task_sg, task_sg_nents_padded);
4110 4165
4111 task_size = task->task_size; 4166 task_size = task->task_size;
4112 4167
@@ -4203,10 +4258,13 @@ static u32 transport_allocate_tasks(
4203 struct scatterlist *sgl, 4258 struct scatterlist *sgl,
4204 unsigned int sgl_nents) 4259 unsigned int sgl_nents)
4205{ 4260{
4206 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) 4261 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
4262 if (transport_cmd_get_valid_sectors(cmd) < 0)
4263 return -EINVAL;
4264
4207 return transport_allocate_data_tasks(cmd, lba, data_direction, 4265 return transport_allocate_data_tasks(cmd, lba, data_direction,
4208 sgl, sgl_nents); 4266 sgl, sgl_nents);
4209 else 4267 } else
4210 return transport_allocate_control_task(cmd); 4268 return transport_allocate_control_task(cmd);
4211 4269
4212} 4270}
@@ -4699,6 +4757,13 @@ int transport_send_check_condition_and_sense(
4699 */ 4757 */
4700 switch (reason) { 4758 switch (reason) {
4701 case TCM_NON_EXISTENT_LUN: 4759 case TCM_NON_EXISTENT_LUN:
4760 /* CURRENT ERROR */
4761 buffer[offset] = 0x70;
4762 /* ILLEGAL REQUEST */
4763 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4764 /* LOGICAL UNIT NOT SUPPORTED */
4765 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4766 break;
4702 case TCM_UNSUPPORTED_SCSI_OPCODE: 4767 case TCM_UNSUPPORTED_SCSI_OPCODE:
4703 case TCM_SECTOR_COUNT_TOO_MANY: 4768 case TCM_SECTOR_COUNT_TOO_MANY:
4704 /* CURRENT ERROR */ 4769 /* CURRENT ERROR */
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index f7fff7ed63c3..bd4fe21a23b8 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -187,4 +187,9 @@ void ft_dump_cmd(struct ft_cmd *, const char *caller);
187 187
188ssize_t ft_format_wwn(char *, size_t, u64); 188ssize_t ft_format_wwn(char *, size_t, u64);
189 189
190/*
191 * Underlying HW specific helper function
192 */
193void ft_invl_hw_context(struct ft_cmd *);
194
190#endif /* __TCM_FC_H__ */ 195#endif /* __TCM_FC_H__ */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 09df38b4610c..5654dc22f7ae 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -320,6 +320,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
320 default: 320 default:
321 pr_debug("%s: unhandled frame r_ctl %x\n", 321 pr_debug("%s: unhandled frame r_ctl %x\n",
322 __func__, fh->fh_r_ctl); 322 __func__, fh->fh_r_ctl);
323 ft_invl_hw_context(cmd);
323 fc_frame_free(fp); 324 fc_frame_free(fp);
324 transport_generic_free_cmd(&cmd->se_cmd, 0, 0); 325 transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
325 break; 326 break;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 8781d1e423df..b15879d43e22 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
256 struct se_portal_group *se_tpg = &tpg->se_tpg; 256 struct se_portal_group *se_tpg = &tpg->se_tpg;
257 struct se_node_acl *se_acl; 257 struct se_node_acl *se_acl;
258 258
259 spin_lock_bh(&se_tpg->acl_node_lock); 259 spin_lock_irq(&se_tpg->acl_node_lock);
260 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { 260 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
261 acl = container_of(se_acl, struct ft_node_acl, se_node_acl); 261 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
262 pr_debug("acl %p port_name %llx\n", 262 pr_debug("acl %p port_name %llx\n",
@@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
270 break; 270 break;
271 } 271 }
272 } 272 }
273 spin_unlock_bh(&se_tpg->acl_node_lock); 273 spin_unlock_irq(&se_tpg->acl_node_lock);
274 return found; 274 return found;
275} 275}
276 276
@@ -655,9 +655,7 @@ static void __exit ft_exit(void)
655 synchronize_rcu(); 655 synchronize_rcu();
656} 656}
657 657
658#ifdef MODULE
659MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); 658MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
660MODULE_LICENSE("GPL"); 659MODULE_LICENSE("GPL");
661module_init(ft_init); 660module_init(ft_init);
662module_exit(ft_exit); 661module_exit(ft_exit);
663#endif /* MODULE */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 8e2a46ddcccb..c37f4cd96452 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -213,62 +213,49 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
213 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) 213 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
214 goto drop; 214 goto drop;
215 215
216 f_ctl = ntoh24(fh->fh_f_ctl);
217 ep = fc_seq_exch(seq);
218 lport = ep->lp;
219 if (cmd->was_ddp_setup) {
220 BUG_ON(!ep);
221 BUG_ON(!lport);
222 }
223
216 /* 224 /*
217 * Doesn't expect even single byte of payload. Payload 225 * Doesn't expect payload if DDP is setup. Payload
218 * is expected to be copied directly to user buffers 226 * is expected to be copied directly to user buffers
219 * due to DDP (Large Rx offload) feature, hence 227 * due to DDP (Large Rx offload),
220 * BUG_ON if BUF is non-NULL
221 */ 228 */
222 buf = fc_frame_payload_get(fp, 1); 229 buf = fc_frame_payload_get(fp, 1);
223 if (cmd->was_ddp_setup && buf) { 230 if (buf)
224 pr_debug("%s: When DDP was setup, not expected to" 231 pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
225 "receive frame with payload, Payload shall be" 232 "cmd->sg_cnt 0x%x. DDP was setup"
226 "copied directly to buffer instead of coming " 233 " hence not expected to receive frame with "
227 "via. legacy receive queues\n", __func__); 234 "payload, Frame will be dropped if "
228 BUG_ON(buf); 235 "'Sequence Initiative' bit in f_ctl is "
229 } 236 "not set\n", __func__, ep->xid, f_ctl,
237 cmd->sg, cmd->sg_cnt);
238 /*
239 * Invalidate HW DDP context if it was setup for respective
240 * command. Invalidation of HW DDP context is requited in both
241 * situation (success and error).
242 */
243 ft_invl_hw_context(cmd);
230 244
231 /* 245 /*
232 * If ft_cmd indicated 'ddp_setup', in that case only the last frame 246 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
233 * should come with 'TSI bit being set'. If 'TSI bit is not set and if 247 * write data frame is received successfully where payload is
234 * data frame appears here, means error condition. In both the cases 248 * posted directly to user buffer and only the last frame's
235 * release the DDP context (ddp_put) and in error case, as well 249 * header is posted in receive queue.
236 * initiate error recovery mechanism. 250 *
251 * If "Sequence Initiative (TSI)" bit is not set, means error
252 * condition w.r.t. DDP, hence drop the packet and let explict
253 * ABORTS from other end of exchange timer trigger the recovery.
237 */ 254 */
238 ep = fc_seq_exch(seq); 255 if (f_ctl & FC_FC_SEQ_INIT)
239 if (cmd->was_ddp_setup) { 256 goto last_frame;
240 BUG_ON(!ep); 257 else
241 lport = ep->lp; 258 goto drop;
242 BUG_ON(!lport);
243 }
244 if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) {
245 f_ctl = ntoh24(fh->fh_f_ctl);
246 /*
247 * If TSI bit set in f_ctl, means last write data frame is
248 * received successfully where payload is posted directly
249 * to user buffer and only the last frame's header is posted
250 * in legacy receive queue
251 */
252 if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */
253 cmd->write_data_len = lport->tt.ddp_done(lport,
254 ep->xid);
255 goto last_frame;
256 } else {
257 /*
258 * Updating the write_data_len may be meaningless at
259 * this point, but just in case if required in future
260 * for debugging or any other purpose
261 */
262 pr_err("%s: Received frame with TSI bit not"
263 " being SET, dropping the frame, "
264 "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
265 __func__, cmd->sg, cmd->sg_cnt);
266 cmd->write_data_len = lport->tt.ddp_done(lport,
267 ep->xid);
268 lport->tt.seq_exch_abort(cmd->seq, 0);
269 goto drop;
270 }
271 }
272 259
273 rel_off = ntohl(fh->fh_parm_offset); 260 rel_off = ntohl(fh->fh_parm_offset);
274 frame_len = fr_len(fp); 261 frame_len = fr_len(fp);
@@ -331,3 +318,39 @@ last_frame:
331drop: 318drop:
332 fc_frame_free(fp); 319 fc_frame_free(fp);
333} 320}
321
322/*
323 * Handle and cleanup any HW specific resources if
324 * received ABORTS, errors, timeouts.
325 */
326void ft_invl_hw_context(struct ft_cmd *cmd)
327{
328 struct fc_seq *seq = cmd->seq;
329 struct fc_exch *ep = NULL;
330 struct fc_lport *lport = NULL;
331
332 BUG_ON(!cmd);
333
334 /* Cleanup the DDP context in HW if DDP was setup */
335 if (cmd->was_ddp_setup && seq) {
336 ep = fc_seq_exch(seq);
337 if (ep) {
338 lport = ep->lp;
339 if (lport && (ep->xid <= lport->lro_xid))
340 /*
341 * "ddp_done" trigger invalidation of HW
342 * specific DDP context
343 */
344 cmd->write_data_len = lport->tt.ddp_done(lport,
345 ep->xid);
346
347 /*
348 * Resetting same variable to indicate HW's
349 * DDP context has been invalidated to avoid
350 * re_invalidation of same context (context is
351 * identified using ep->xid)
352 */
353 cmd->was_ddp_setup = 0;
354 }
355 }
356}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index bf7c687519ef..f7f71b2d3101 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -14,11 +14,7 @@ menuconfig THERMAL
14 If you want this support, you should say Y or M here. 14 If you want this support, you should say Y or M here.
15 15
16config THERMAL_HWMON 16config THERMAL_HWMON
17 bool "Hardware monitoring support" 17 bool
18 depends on THERMAL 18 depends on THERMAL
19 depends on HWMON=y || HWMON=THERMAL 19 depends on HWMON=y || HWMON=THERMAL
20 help 20 default y
21 The generic thermal sysfs driver's hardware monitoring support
22 requires a 2.10.7/3.0.2 or later lm-sensors userspace.
23
24 Say Y if your user-space is new enough.
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 0b1c82ad6805..708f8e92771a 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -420,6 +420,29 @@ thermal_cooling_device_trip_point_show(struct device *dev,
420 420
421/* hwmon sys I/F */ 421/* hwmon sys I/F */
422#include <linux/hwmon.h> 422#include <linux/hwmon.h>
423
424/* thermal zone devices with the same type share one hwmon device */
425struct thermal_hwmon_device {
426 char type[THERMAL_NAME_LENGTH];
427 struct device *device;
428 int count;
429 struct list_head tz_list;
430 struct list_head node;
431};
432
433struct thermal_hwmon_attr {
434 struct device_attribute attr;
435 char name[16];
436};
437
438/* one temperature input for each thermal zone */
439struct thermal_hwmon_temp {
440 struct list_head hwmon_node;
441 struct thermal_zone_device *tz;
442 struct thermal_hwmon_attr temp_input; /* hwmon sys attr */
443 struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */
444};
445
423static LIST_HEAD(thermal_hwmon_list); 446static LIST_HEAD(thermal_hwmon_list);
424 447
425static ssize_t 448static ssize_t
@@ -437,9 +460,10 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
437 int ret; 460 int ret;
438 struct thermal_hwmon_attr *hwmon_attr 461 struct thermal_hwmon_attr *hwmon_attr
439 = container_of(attr, struct thermal_hwmon_attr, attr); 462 = container_of(attr, struct thermal_hwmon_attr, attr);
440 struct thermal_zone_device *tz 463 struct thermal_hwmon_temp *temp
441 = container_of(hwmon_attr, struct thermal_zone_device, 464 = container_of(hwmon_attr, struct thermal_hwmon_temp,
442 temp_input); 465 temp_input);
466 struct thermal_zone_device *tz = temp->tz;
443 467
444 ret = tz->ops->get_temp(tz, &temperature); 468 ret = tz->ops->get_temp(tz, &temperature);
445 469
@@ -455,9 +479,10 @@ temp_crit_show(struct device *dev, struct device_attribute *attr,
455{ 479{
456 struct thermal_hwmon_attr *hwmon_attr 480 struct thermal_hwmon_attr *hwmon_attr
457 = container_of(attr, struct thermal_hwmon_attr, attr); 481 = container_of(attr, struct thermal_hwmon_attr, attr);
458 struct thermal_zone_device *tz 482 struct thermal_hwmon_temp *temp
459 = container_of(hwmon_attr, struct thermal_zone_device, 483 = container_of(hwmon_attr, struct thermal_hwmon_temp,
460 temp_crit); 484 temp_crit);
485 struct thermal_zone_device *tz = temp->tz;
461 long temperature; 486 long temperature;
462 int ret; 487 int ret;
463 488
@@ -469,22 +494,54 @@ temp_crit_show(struct device *dev, struct device_attribute *attr,
469} 494}
470 495
471 496
472static int 497static struct thermal_hwmon_device *
473thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) 498thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
474{ 499{
475 struct thermal_hwmon_device *hwmon; 500 struct thermal_hwmon_device *hwmon;
476 int new_hwmon_device = 1;
477 int result;
478 501
479 mutex_lock(&thermal_list_lock); 502 mutex_lock(&thermal_list_lock);
480 list_for_each_entry(hwmon, &thermal_hwmon_list, node) 503 list_for_each_entry(hwmon, &thermal_hwmon_list, node)
481 if (!strcmp(hwmon->type, tz->type)) { 504 if (!strcmp(hwmon->type, tz->type)) {
482 new_hwmon_device = 0;
483 mutex_unlock(&thermal_list_lock); 505 mutex_unlock(&thermal_list_lock);
484 goto register_sys_interface; 506 return hwmon;
507 }
508 mutex_unlock(&thermal_list_lock);
509
510 return NULL;
511}
512
513/* Find the temperature input matching a given thermal zone */
514static struct thermal_hwmon_temp *
515thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
516 const struct thermal_zone_device *tz)
517{
518 struct thermal_hwmon_temp *temp;
519
520 mutex_lock(&thermal_list_lock);
521 list_for_each_entry(temp, &hwmon->tz_list, hwmon_node)
522 if (temp->tz == tz) {
523 mutex_unlock(&thermal_list_lock);
524 return temp;
485 } 525 }
486 mutex_unlock(&thermal_list_lock); 526 mutex_unlock(&thermal_list_lock);
487 527
528 return NULL;
529}
530
531static int
532thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
533{
534 struct thermal_hwmon_device *hwmon;
535 struct thermal_hwmon_temp *temp;
536 int new_hwmon_device = 1;
537 int result;
538
539 hwmon = thermal_hwmon_lookup_by_type(tz);
540 if (hwmon) {
541 new_hwmon_device = 0;
542 goto register_sys_interface;
543 }
544
488 hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL); 545 hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL);
489 if (!hwmon) 546 if (!hwmon)
490 return -ENOMEM; 547 return -ENOMEM;
@@ -502,30 +559,36 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
502 goto free_mem; 559 goto free_mem;
503 560
504 register_sys_interface: 561 register_sys_interface:
505 tz->hwmon = hwmon; 562 temp = kzalloc(sizeof(struct thermal_hwmon_temp), GFP_KERNEL);
563 if (!temp) {
564 result = -ENOMEM;
565 goto unregister_name;
566 }
567
568 temp->tz = tz;
506 hwmon->count++; 569 hwmon->count++;
507 570
508 snprintf(tz->temp_input.name, THERMAL_NAME_LENGTH, 571 snprintf(temp->temp_input.name, THERMAL_NAME_LENGTH,
509 "temp%d_input", hwmon->count); 572 "temp%d_input", hwmon->count);
510 tz->temp_input.attr.attr.name = tz->temp_input.name; 573 temp->temp_input.attr.attr.name = temp->temp_input.name;
511 tz->temp_input.attr.attr.mode = 0444; 574 temp->temp_input.attr.attr.mode = 0444;
512 tz->temp_input.attr.show = temp_input_show; 575 temp->temp_input.attr.show = temp_input_show;
513 sysfs_attr_init(&tz->temp_input.attr.attr); 576 sysfs_attr_init(&temp->temp_input.attr.attr);
514 result = device_create_file(hwmon->device, &tz->temp_input.attr); 577 result = device_create_file(hwmon->device, &temp->temp_input.attr);
515 if (result) 578 if (result)
516 goto unregister_name; 579 goto free_temp_mem;
517 580
518 if (tz->ops->get_crit_temp) { 581 if (tz->ops->get_crit_temp) {
519 unsigned long temperature; 582 unsigned long temperature;
520 if (!tz->ops->get_crit_temp(tz, &temperature)) { 583 if (!tz->ops->get_crit_temp(tz, &temperature)) {
521 snprintf(tz->temp_crit.name, THERMAL_NAME_LENGTH, 584 snprintf(temp->temp_crit.name, THERMAL_NAME_LENGTH,
522 "temp%d_crit", hwmon->count); 585 "temp%d_crit", hwmon->count);
523 tz->temp_crit.attr.attr.name = tz->temp_crit.name; 586 temp->temp_crit.attr.attr.name = temp->temp_crit.name;
524 tz->temp_crit.attr.attr.mode = 0444; 587 temp->temp_crit.attr.attr.mode = 0444;
525 tz->temp_crit.attr.show = temp_crit_show; 588 temp->temp_crit.attr.show = temp_crit_show;
526 sysfs_attr_init(&tz->temp_crit.attr.attr); 589 sysfs_attr_init(&temp->temp_crit.attr.attr);
527 result = device_create_file(hwmon->device, 590 result = device_create_file(hwmon->device,
528 &tz->temp_crit.attr); 591 &temp->temp_crit.attr);
529 if (result) 592 if (result)
530 goto unregister_input; 593 goto unregister_input;
531 } 594 }
@@ -534,13 +597,15 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
534 mutex_lock(&thermal_list_lock); 597 mutex_lock(&thermal_list_lock);
535 if (new_hwmon_device) 598 if (new_hwmon_device)
536 list_add_tail(&hwmon->node, &thermal_hwmon_list); 599 list_add_tail(&hwmon->node, &thermal_hwmon_list);
537 list_add_tail(&tz->hwmon_node, &hwmon->tz_list); 600 list_add_tail(&temp->hwmon_node, &hwmon->tz_list);
538 mutex_unlock(&thermal_list_lock); 601 mutex_unlock(&thermal_list_lock);
539 602
540 return 0; 603 return 0;
541 604
542 unregister_input: 605 unregister_input:
543 device_remove_file(hwmon->device, &tz->temp_input.attr); 606 device_remove_file(hwmon->device, &temp->temp_input.attr);
607 free_temp_mem:
608 kfree(temp);
544 unregister_name: 609 unregister_name:
545 if (new_hwmon_device) { 610 if (new_hwmon_device) {
546 device_remove_file(hwmon->device, &dev_attr_name); 611 device_remove_file(hwmon->device, &dev_attr_name);
@@ -556,15 +621,30 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
556static void 621static void
557thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) 622thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
558{ 623{
559 struct thermal_hwmon_device *hwmon = tz->hwmon; 624 struct thermal_hwmon_device *hwmon;
625 struct thermal_hwmon_temp *temp;
626
627 hwmon = thermal_hwmon_lookup_by_type(tz);
628 if (unlikely(!hwmon)) {
629 /* Should never happen... */
630 dev_dbg(&tz->device, "hwmon device lookup failed!\n");
631 return;
632 }
633
634 temp = thermal_hwmon_lookup_temp(hwmon, tz);
635 if (unlikely(!temp)) {
636 /* Should never happen... */
637 dev_dbg(&tz->device, "temperature input lookup failed!\n");
638 return;
639 }
560 640
561 tz->hwmon = NULL; 641 device_remove_file(hwmon->device, &temp->temp_input.attr);
562 device_remove_file(hwmon->device, &tz->temp_input.attr);
563 if (tz->ops->get_crit_temp) 642 if (tz->ops->get_crit_temp)
564 device_remove_file(hwmon->device, &tz->temp_crit.attr); 643 device_remove_file(hwmon->device, &temp->temp_crit.attr);
565 644
566 mutex_lock(&thermal_list_lock); 645 mutex_lock(&thermal_list_lock);
567 list_del(&tz->hwmon_node); 646 list_del(&temp->hwmon_node);
647 kfree(temp);
568 if (!list_empty(&hwmon->tz_list)) { 648 if (!list_empty(&hwmon->tz_list)) {
569 mutex_unlock(&thermal_list_lock); 649 mutex_unlock(&thermal_list_lock);
570 return; 650 return;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 98b6e3bdb000..e809e9d4683c 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -446,8 +446,19 @@ static inline void legacy_pty_init(void) { }
446int pty_limit = NR_UNIX98_PTY_DEFAULT; 446int pty_limit = NR_UNIX98_PTY_DEFAULT;
447static int pty_limit_min; 447static int pty_limit_min;
448static int pty_limit_max = NR_UNIX98_PTY_MAX; 448static int pty_limit_max = NR_UNIX98_PTY_MAX;
449static int tty_count;
449static int pty_count; 450static int pty_count;
450 451
452static inline void pty_inc_count(void)
453{
454 pty_count = (++tty_count) / 2;
455}
456
457static inline void pty_dec_count(void)
458{
459 pty_count = (--tty_count) / 2;
460}
461
451static struct cdev ptmx_cdev; 462static struct cdev ptmx_cdev;
452 463
453static struct ctl_table pty_table[] = { 464static struct ctl_table pty_table[] = {
@@ -542,6 +553,7 @@ static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
542 553
543static void pty_unix98_shutdown(struct tty_struct *tty) 554static void pty_unix98_shutdown(struct tty_struct *tty)
544{ 555{
556 tty_driver_remove_tty(tty->driver, tty);
545 /* We have our own method as we don't use the tty index */ 557 /* We have our own method as we don't use the tty index */
546 kfree(tty->termios); 558 kfree(tty->termios);
547} 559}
@@ -588,7 +600,8 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
588 */ 600 */
589 tty_driver_kref_get(driver); 601 tty_driver_kref_get(driver);
590 tty->count++; 602 tty->count++;
591 pty_count++; 603 pty_inc_count(); /* tty */
604 pty_inc_count(); /* tty->link */
592 return 0; 605 return 0;
593err_free_mem: 606err_free_mem:
594 deinitialize_tty_struct(o_tty); 607 deinitialize_tty_struct(o_tty);
@@ -602,7 +615,7 @@ err_free_tty:
602 615
603static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) 616static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
604{ 617{
605 pty_count--; 618 pty_dec_count();
606} 619}
607 620
608static const struct tty_operations ptm_unix98_ops = { 621static const struct tty_operations ptm_unix98_ops = {
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index f2dfec82faf8..7f50999eebc2 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -1819,6 +1819,8 @@ static void serial8250_backup_timeout(unsigned long data)
1819 unsigned int iir, ier = 0, lsr; 1819 unsigned int iir, ier = 0, lsr;
1820 unsigned long flags; 1820 unsigned long flags;
1821 1821
1822 spin_lock_irqsave(&up->port.lock, flags);
1823
1822 /* 1824 /*
1823 * Must disable interrupts or else we risk racing with the interrupt 1825 * Must disable interrupts or else we risk racing with the interrupt
1824 * based handler. 1826 * based handler.
@@ -1836,10 +1838,8 @@ static void serial8250_backup_timeout(unsigned long data)
1836 * the "Diva" UART used on the management processor on many HP 1838 * the "Diva" UART used on the management processor on many HP
1837 * ia64 and parisc boxes. 1839 * ia64 and parisc boxes.
1838 */ 1840 */
1839 spin_lock_irqsave(&up->port.lock, flags);
1840 lsr = serial_in(up, UART_LSR); 1841 lsr = serial_in(up, UART_LSR);
1841 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; 1842 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
1842 spin_unlock_irqrestore(&up->port.lock, flags);
1843 if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && 1843 if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
1844 (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) && 1844 (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
1845 (lsr & UART_LSR_THRE)) { 1845 (lsr & UART_LSR_THRE)) {
@@ -1848,11 +1848,13 @@ static void serial8250_backup_timeout(unsigned long data)
1848 } 1848 }
1849 1849
1850 if (!(iir & UART_IIR_NO_INT)) 1850 if (!(iir & UART_IIR_NO_INT))
1851 serial8250_handle_port(up); 1851 transmit_chars(up);
1852 1852
1853 if (is_real_interrupt(up->port.irq)) 1853 if (is_real_interrupt(up->port.irq))
1854 serial_out(up, UART_IER, ier); 1854 serial_out(up, UART_IER, ier);
1855 1855
1856 spin_unlock_irqrestore(&up->port.lock, flags);
1857
1856 /* Standard timer interval plus 0.2s to keep the port running */ 1858 /* Standard timer interval plus 0.2s to keep the port running */
1857 mod_timer(&up->timer, 1859 mod_timer(&up->timer,
1858 jiffies + uart_poll_timeout(&up->port) + HZ / 5); 1860 jiffies + uart_poll_timeout(&up->port) + HZ / 5);
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 6b887d90a205..3abeca2a2a1b 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -1599,11 +1599,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1599 .device = 0x800D, 1599 .device = 0x800D,
1600 .init = pci_eg20t_init, 1600 .init = pci_eg20t_init,
1601 }, 1601 },
1602 {
1603 .vendor = 0x10DB,
1604 .device = 0x800D,
1605 .init = pci_eg20t_init,
1606 },
1607 /* 1602 /*
1608 * Cronyx Omega PCI (PLX-chip based) 1603 * Cronyx Omega PCI (PLX-chip based)
1609 */ 1604 */
@@ -4021,7 +4016,7 @@ static struct pci_device_id serial_pci_tbl[] = {
4021 0, 0, pbn_NETMOS9900_2s_115200 }, 4016 0, 0, pbn_NETMOS9900_2s_115200 },
4022 4017
4023 /* 4018 /*
4024 * Best Connectivity PCI Multi I/O cards 4019 * Best Connectivity and Rosewill PCI Multi I/O cards
4025 */ 4020 */
4026 4021
4027 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 4022 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
@@ -4029,6 +4024,10 @@ static struct pci_device_id serial_pci_tbl[] = {
4029 0, 0, pbn_b0_1_115200 }, 4024 0, 0, pbn_b0_1_115200 },
4030 4025
4031 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 4026 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
4027 0xA000, 0x3002,
4028 0, 0, pbn_b0_bt_2_115200 },
4029
4030 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
4032 0xA000, 0x3004, 4031 0xA000, 0x3004,
4033 0, 0, pbn_b0_bt_4_115200 }, 4032 0, 0, pbn_b0_bt_4_115200 },
4034 /* Intel CE4100 */ 4033 /* Intel CE4100 */
diff --git a/drivers/tty/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
index fc301f6722e1..a2f236510ff1 100644
--- a/drivers/tty/serial/8250_pnp.c
+++ b/drivers/tty/serial/8250_pnp.c
@@ -109,6 +109,9 @@ static const struct pnp_device_id pnp_dev_table[] = {
109 /* IBM */ 109 /* IBM */
110 /* IBM Thinkpad 701 Internal Modem Voice */ 110 /* IBM Thinkpad 701 Internal Modem Voice */
111 { "IBM0033", 0 }, 111 { "IBM0033", 0 },
112 /* Intermec */
113 /* Intermec CV60 touchscreen port */
114 { "PNP4972", 0 },
112 /* Intertex */ 115 /* Intertex */
113 /* Intertex 28k8 33k6 Voice EXT PnP */ 116 /* Intertex 28k8 33k6 Voice EXT PnP */
114 { "IXDC801", 0 }, 117 { "IXDC801", 0 },
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index cb40b82daf36..4dcb37bbdf92 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -959,7 +959,7 @@ config SERIAL_IP22_ZILOG_CONSOLE
959 959
960config SERIAL_SH_SCI 960config SERIAL_SH_SCI
961 tristate "SuperH SCI(F) serial port support" 961 tristate "SuperH SCI(F) serial port support"
962 depends on HAVE_CLK && (SUPERH || H8300 || ARCH_SHMOBILE) 962 depends on HAVE_CLK && (SUPERH || ARCH_SHMOBILE)
963 select SERIAL_CORE 963 select SERIAL_CORE
964 964
965config SERIAL_SH_SCI_NR_UARTS 965config SERIAL_SH_SCI_NR_UARTS
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index af9b7814965a..b922f5d2e61e 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1609,9 +1609,11 @@ static struct console atmel_console = {
1609static int __init atmel_console_init(void) 1609static int __init atmel_console_init(void)
1610{ 1610{
1611 if (atmel_default_console_device) { 1611 if (atmel_default_console_device) {
1612 add_preferred_console(ATMEL_DEVICENAME, 1612 struct atmel_uart_data *pdata =
1613 atmel_default_console_device->id, NULL); 1613 atmel_default_console_device->dev.platform_data;
1614 atmel_init_port(&atmel_ports[atmel_default_console_device->id], 1614
1615 add_preferred_console(ATMEL_DEVICENAME, pdata->num, NULL);
1616 atmel_init_port(&atmel_ports[pdata->num],
1615 atmel_default_console_device); 1617 atmel_default_console_device);
1616 register_console(&atmel_console); 1618 register_console(&atmel_console);
1617 } 1619 }
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 827db7654594..7e91b3d368cd 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1286,22 +1286,17 @@ static int serial_imx_resume(struct platform_device *dev)
1286static int serial_imx_probe_dt(struct imx_port *sport, 1286static int serial_imx_probe_dt(struct imx_port *sport,
1287 struct platform_device *pdev) 1287 struct platform_device *pdev)
1288{ 1288{
1289 static int portnum = 0;
1289 struct device_node *np = pdev->dev.of_node; 1290 struct device_node *np = pdev->dev.of_node;
1290 const struct of_device_id *of_id = 1291 const struct of_device_id *of_id =
1291 of_match_device(imx_uart_dt_ids, &pdev->dev); 1292 of_match_device(imx_uart_dt_ids, &pdev->dev);
1292 int ret;
1293 1293
1294 if (!np) 1294 if (!np)
1295 return -ENODEV; 1295 return -ENODEV;
1296 1296
1297 ret = of_alias_get_id(np, "serial"); 1297 sport->port.line = portnum++;
1298 if (ret < 0) { 1298 if (sport->port.line >= UART_NR)
1299 pr_err("%s: failed to get alias id, errno %d\n", 1299 return -EINVAL;
1300 __func__, ret);
1301 return -ENODEV;
1302 } else {
1303 sport->port.line = ret;
1304 }
1305 1300
1306 if (of_get_property(np, "fsl,uart-has-rtscts", NULL)) 1301 if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
1307 sport->have_rtscts = 1; 1302 sport->have_rtscts = 1;
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
index a1fe304f2f52..d73aadd7a9ad 100644
--- a/drivers/tty/serial/max3107-aava.c
+++ b/drivers/tty/serial/max3107-aava.c
@@ -340,5 +340,5 @@ module_exit(max3107_exit);
340 340
341MODULE_DESCRIPTION("MAX3107 driver"); 341MODULE_DESCRIPTION("MAX3107 driver");
342MODULE_AUTHOR("Aavamobile"); 342MODULE_AUTHOR("Aavamobile");
343MODULE_ALIAS("aava-max3107-spi"); 343MODULE_ALIAS("spi:aava-max3107");
344MODULE_LICENSE("GPL v2"); 344MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 750b4f627315..a8164601c0ea 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -1209,5 +1209,5 @@ module_exit(max3107_exit);
1209 1209
1210MODULE_DESCRIPTION("MAX3107 driver"); 1210MODULE_DESCRIPTION("MAX3107 driver");
1211MODULE_AUTHOR("Aavamobile"); 1211MODULE_AUTHOR("Aavamobile");
1212MODULE_ALIAS("max3107-spi"); 1212MODULE_ALIAS("spi:max3107");
1213MODULE_LICENSE("GPL v2"); 1213MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index a764bf99743b..23bc743f2a22 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -917,4 +917,4 @@ module_init(serial_m3110_init);
917module_exit(serial_m3110_exit); 917module_exit(serial_m3110_exit);
918 918
919MODULE_LICENSE("GPL v2"); 919MODULE_LICENSE("GPL v2");
920MODULE_ALIAS("max3110-uart"); 920MODULE_ALIAS("spi:max3110-uart");
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index c37df8d0fa28..5e713d3ef1f4 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -806,8 +806,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
806 806
807 serial_omap_set_mctrl(&up->port, up->port.mctrl); 807 serial_omap_set_mctrl(&up->port, up->port.mctrl);
808 /* Software Flow Control Configuration */ 808 /* Software Flow Control Configuration */
809 if (termios->c_iflag & (IXON | IXOFF)) 809 serial_omap_configure_xonxoff(up, termios);
810 serial_omap_configure_xonxoff(up, termios);
811 810
812 spin_unlock_irqrestore(&up->port.lock, flags); 811 spin_unlock_irqrestore(&up->port.lock, flags);
813 dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id); 812 dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 846dfcd3ce0d..b46218d679e2 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -598,7 +598,8 @@ static void pch_request_dma(struct uart_port *port)
598 dma_cap_zero(mask); 598 dma_cap_zero(mask);
599 dma_cap_set(DMA_SLAVE, mask); 599 dma_cap_set(DMA_SLAVE, mask);
600 600
601 dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(0xa, 0)); /* Get DMA's dev 601 dma_dev = pci_get_bus_and_slot(priv->pdev->bus->number,
602 PCI_DEVFN(0xa, 0)); /* Get DMA's dev
602 information */ 603 information */
603 /* Set Tx DMA */ 604 /* Set Tx DMA */
604 param = &priv->param_tx; 605 param = &priv->param_tx;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index afc629423152..6edafb5ace18 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1225,15 +1225,19 @@ static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
1225 .suspend = s3c24xx_serial_suspend, 1225 .suspend = s3c24xx_serial_suspend,
1226 .resume = s3c24xx_serial_resume, 1226 .resume = s3c24xx_serial_resume,
1227}; 1227};
1228#define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops)
1229
1228#else /* !CONFIG_PM_SLEEP */ 1230#else /* !CONFIG_PM_SLEEP */
1229#define s3c24xx_serial_pm_ops NULL 1231
1232#define SERIAL_SAMSUNG_PM_OPS NULL
1230#endif /* CONFIG_PM_SLEEP */ 1233#endif /* CONFIG_PM_SLEEP */
1231 1234
1232int s3c24xx_serial_init(struct platform_driver *drv, 1235int s3c24xx_serial_init(struct platform_driver *drv,
1233 struct s3c24xx_uart_info *info) 1236 struct s3c24xx_uart_info *info)
1234{ 1237{
1235 dbg("s3c24xx_serial_init(%p,%p)\n", drv, info); 1238 dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
1236 drv->driver.pm = &s3c24xx_serial_pm_ops; 1239
1240 drv->driver.pm = SERIAL_SAMSUNG_PM_OPS;
1237 1241
1238 return platform_driver_register(drv); 1242 return platform_driver_register(drv);
1239} 1243}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index db7912cb7ae0..a3efbea5dbba 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -200,6 +200,11 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in
200 clear_bit(TTY_IO_ERROR, &tty->flags); 200 clear_bit(TTY_IO_ERROR, &tty->flags);
201 } 201 }
202 202
203 /*
204 * This is to allow setserial on this port. People may want to set
205 * port/irq/type and then reconfigure the port properly if it failed
206 * now.
207 */
203 if (retval && capable(CAP_SYS_ADMIN)) 208 if (retval && capable(CAP_SYS_ADMIN))
204 retval = 0; 209 retval = 0;
205 210
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ebd8629c108d..5ea6ec3442e6 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -47,6 +47,7 @@
47#include <linux/ctype.h> 47#include <linux/ctype.h>
48#include <linux/err.h> 48#include <linux/err.h>
49#include <linux/dmaengine.h> 49#include <linux/dmaengine.h>
50#include <linux/dma-mapping.h>
50#include <linux/scatterlist.h> 51#include <linux/scatterlist.h>
51#include <linux/slab.h> 52#include <linux/slab.h>
52 53
@@ -54,10 +55,6 @@
54#include <asm/sh_bios.h> 55#include <asm/sh_bios.h>
55#endif 56#endif
56 57
57#ifdef CONFIG_H8300
58#include <asm/gpio.h>
59#endif
60
61#include "sh-sci.h" 58#include "sh-sci.h"
62 59
63struct sci_port { 60struct sci_port {
@@ -66,12 +63,6 @@ struct sci_port {
66 /* Platform configuration */ 63 /* Platform configuration */
67 struct plat_sci_port *cfg; 64 struct plat_sci_port *cfg;
68 65
69 /* Port enable callback */
70 void (*enable)(struct uart_port *port);
71
72 /* Port disable callback */
73 void (*disable)(struct uart_port *port);
74
75 /* Break timer */ 66 /* Break timer */
76 struct timer_list break_timer; 67 struct timer_list break_timer;
77 int break_flag; 68 int break_flag;
@@ -81,6 +72,8 @@ struct sci_port {
81 /* Function clock */ 72 /* Function clock */
82 struct clk *fclk; 73 struct clk *fclk;
83 74
75 char *irqstr[SCIx_NR_IRQS];
76
84 struct dma_chan *chan_tx; 77 struct dma_chan *chan_tx;
85 struct dma_chan *chan_rx; 78 struct dma_chan *chan_rx;
86 79
@@ -103,6 +96,12 @@ struct sci_port {
103#endif 96#endif
104 97
105 struct notifier_block freq_transition; 98 struct notifier_block freq_transition;
99
100#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
101 unsigned short saved_smr;
102 unsigned short saved_fcr;
103 unsigned char saved_brr;
104#endif
106}; 105};
107 106
108/* Function prototypes */ 107/* Function prototypes */
@@ -121,6 +120,278 @@ to_sci_port(struct uart_port *uart)
121 return container_of(uart, struct sci_port, port); 120 return container_of(uart, struct sci_port, port);
122} 121}
123 122
123struct plat_sci_reg {
124 u8 offset, size;
125};
126
127/* Helper for invalidating specific entries of an inherited map. */
128#define sci_reg_invalid { .offset = 0, .size = 0 }
129
130static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
131 [SCIx_PROBE_REGTYPE] = {
132 [0 ... SCIx_NR_REGS - 1] = sci_reg_invalid,
133 },
134
135 /*
136 * Common SCI definitions, dependent on the port's regshift
137 * value.
138 */
139 [SCIx_SCI_REGTYPE] = {
140 [SCSMR] = { 0x00, 8 },
141 [SCBRR] = { 0x01, 8 },
142 [SCSCR] = { 0x02, 8 },
143 [SCxTDR] = { 0x03, 8 },
144 [SCxSR] = { 0x04, 8 },
145 [SCxRDR] = { 0x05, 8 },
146 [SCFCR] = sci_reg_invalid,
147 [SCFDR] = sci_reg_invalid,
148 [SCTFDR] = sci_reg_invalid,
149 [SCRFDR] = sci_reg_invalid,
150 [SCSPTR] = sci_reg_invalid,
151 [SCLSR] = sci_reg_invalid,
152 },
153
154 /*
155 * Common definitions for legacy IrDA ports, dependent on
156 * regshift value.
157 */
158 [SCIx_IRDA_REGTYPE] = {
159 [SCSMR] = { 0x00, 8 },
160 [SCBRR] = { 0x01, 8 },
161 [SCSCR] = { 0x02, 8 },
162 [SCxTDR] = { 0x03, 8 },
163 [SCxSR] = { 0x04, 8 },
164 [SCxRDR] = { 0x05, 8 },
165 [SCFCR] = { 0x06, 8 },
166 [SCFDR] = { 0x07, 16 },
167 [SCTFDR] = sci_reg_invalid,
168 [SCRFDR] = sci_reg_invalid,
169 [SCSPTR] = sci_reg_invalid,
170 [SCLSR] = sci_reg_invalid,
171 },
172
173 /*
174 * Common SCIFA definitions.
175 */
176 [SCIx_SCIFA_REGTYPE] = {
177 [SCSMR] = { 0x00, 16 },
178 [SCBRR] = { 0x04, 8 },
179 [SCSCR] = { 0x08, 16 },
180 [SCxTDR] = { 0x20, 8 },
181 [SCxSR] = { 0x14, 16 },
182 [SCxRDR] = { 0x24, 8 },
183 [SCFCR] = { 0x18, 16 },
184 [SCFDR] = { 0x1c, 16 },
185 [SCTFDR] = sci_reg_invalid,
186 [SCRFDR] = sci_reg_invalid,
187 [SCSPTR] = sci_reg_invalid,
188 [SCLSR] = sci_reg_invalid,
189 },
190
191 /*
192 * Common SCIFB definitions.
193 */
194 [SCIx_SCIFB_REGTYPE] = {
195 [SCSMR] = { 0x00, 16 },
196 [SCBRR] = { 0x04, 8 },
197 [SCSCR] = { 0x08, 16 },
198 [SCxTDR] = { 0x40, 8 },
199 [SCxSR] = { 0x14, 16 },
200 [SCxRDR] = { 0x60, 8 },
201 [SCFCR] = { 0x18, 16 },
202 [SCFDR] = { 0x1c, 16 },
203 [SCTFDR] = sci_reg_invalid,
204 [SCRFDR] = sci_reg_invalid,
205 [SCSPTR] = sci_reg_invalid,
206 [SCLSR] = sci_reg_invalid,
207 },
208
209 /*
210 * Common SH-3 SCIF definitions.
211 */
212 [SCIx_SH3_SCIF_REGTYPE] = {
213 [SCSMR] = { 0x00, 8 },
214 [SCBRR] = { 0x02, 8 },
215 [SCSCR] = { 0x04, 8 },
216 [SCxTDR] = { 0x06, 8 },
217 [SCxSR] = { 0x08, 16 },
218 [SCxRDR] = { 0x0a, 8 },
219 [SCFCR] = { 0x0c, 8 },
220 [SCFDR] = { 0x0e, 16 },
221 [SCTFDR] = sci_reg_invalid,
222 [SCRFDR] = sci_reg_invalid,
223 [SCSPTR] = sci_reg_invalid,
224 [SCLSR] = sci_reg_invalid,
225 },
226
227 /*
228 * Common SH-4(A) SCIF(B) definitions.
229 */
230 [SCIx_SH4_SCIF_REGTYPE] = {
231 [SCSMR] = { 0x00, 16 },
232 [SCBRR] = { 0x04, 8 },
233 [SCSCR] = { 0x08, 16 },
234 [SCxTDR] = { 0x0c, 8 },
235 [SCxSR] = { 0x10, 16 },
236 [SCxRDR] = { 0x14, 8 },
237 [SCFCR] = { 0x18, 16 },
238 [SCFDR] = { 0x1c, 16 },
239 [SCTFDR] = sci_reg_invalid,
240 [SCRFDR] = sci_reg_invalid,
241 [SCSPTR] = { 0x20, 16 },
242 [SCLSR] = { 0x24, 16 },
243 },
244
245 /*
246 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
247 * register.
248 */
249 [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = {
250 [SCSMR] = { 0x00, 16 },
251 [SCBRR] = { 0x04, 8 },
252 [SCSCR] = { 0x08, 16 },
253 [SCxTDR] = { 0x0c, 8 },
254 [SCxSR] = { 0x10, 16 },
255 [SCxRDR] = { 0x14, 8 },
256 [SCFCR] = { 0x18, 16 },
257 [SCFDR] = { 0x1c, 16 },
258 [SCTFDR] = sci_reg_invalid,
259 [SCRFDR] = sci_reg_invalid,
260 [SCSPTR] = sci_reg_invalid,
261 [SCLSR] = { 0x24, 16 },
262 },
263
264 /*
265 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
266 * count registers.
267 */
268 [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = {
269 [SCSMR] = { 0x00, 16 },
270 [SCBRR] = { 0x04, 8 },
271 [SCSCR] = { 0x08, 16 },
272 [SCxTDR] = { 0x0c, 8 },
273 [SCxSR] = { 0x10, 16 },
274 [SCxRDR] = { 0x14, 8 },
275 [SCFCR] = { 0x18, 16 },
276 [SCFDR] = { 0x1c, 16 },
277 [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */
278 [SCRFDR] = { 0x20, 16 },
279 [SCSPTR] = { 0x24, 16 },
280 [SCLSR] = { 0x28, 16 },
281 },
282
283 /*
284 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
285 * registers.
286 */
287 [SCIx_SH7705_SCIF_REGTYPE] = {
288 [SCSMR] = { 0x00, 16 },
289 [SCBRR] = { 0x04, 8 },
290 [SCSCR] = { 0x08, 16 },
291 [SCxTDR] = { 0x20, 8 },
292 [SCxSR] = { 0x14, 16 },
293 [SCxRDR] = { 0x24, 8 },
294 [SCFCR] = { 0x18, 16 },
295 [SCFDR] = { 0x1c, 16 },
296 [SCTFDR] = sci_reg_invalid,
297 [SCRFDR] = sci_reg_invalid,
298 [SCSPTR] = sci_reg_invalid,
299 [SCLSR] = sci_reg_invalid,
300 },
301};
302
303#define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
304
305/*
306 * The "offset" here is rather misleading, in that it refers to an enum
307 * value relative to the port mapping rather than the fixed offset
308 * itself, which needs to be manually retrieved from the platform's
309 * register map for the given port.
310 */
311static unsigned int sci_serial_in(struct uart_port *p, int offset)
312{
313 struct plat_sci_reg *reg = sci_getreg(p, offset);
314
315 if (reg->size == 8)
316 return ioread8(p->membase + (reg->offset << p->regshift));
317 else if (reg->size == 16)
318 return ioread16(p->membase + (reg->offset << p->regshift));
319 else
320 WARN(1, "Invalid register access\n");
321
322 return 0;
323}
324
325static void sci_serial_out(struct uart_port *p, int offset, int value)
326{
327 struct plat_sci_reg *reg = sci_getreg(p, offset);
328
329 if (reg->size == 8)
330 iowrite8(value, p->membase + (reg->offset << p->regshift));
331 else if (reg->size == 16)
332 iowrite16(value, p->membase + (reg->offset << p->regshift));
333 else
334 WARN(1, "Invalid register access\n");
335}
336
337#define sci_in(up, offset) (up->serial_in(up, offset))
338#define sci_out(up, offset, value) (up->serial_out(up, offset, value))
339
340static int sci_probe_regmap(struct plat_sci_port *cfg)
341{
342 switch (cfg->type) {
343 case PORT_SCI:
344 cfg->regtype = SCIx_SCI_REGTYPE;
345 break;
346 case PORT_IRDA:
347 cfg->regtype = SCIx_IRDA_REGTYPE;
348 break;
349 case PORT_SCIFA:
350 cfg->regtype = SCIx_SCIFA_REGTYPE;
351 break;
352 case PORT_SCIFB:
353 cfg->regtype = SCIx_SCIFB_REGTYPE;
354 break;
355 case PORT_SCIF:
356 /*
357 * The SH-4 is a bit of a misnomer here, although that's
358 * where this particular port layout originated. This
359 * configuration (or some slight variation thereof)
360 * remains the dominant model for all SCIFs.
361 */
362 cfg->regtype = SCIx_SH4_SCIF_REGTYPE;
363 break;
364 default:
365 printk(KERN_ERR "Can't probe register map for given port\n");
366 return -EINVAL;
367 }
368
369 return 0;
370}
371
372static void sci_port_enable(struct sci_port *sci_port)
373{
374 if (!sci_port->port.dev)
375 return;
376
377 pm_runtime_get_sync(sci_port->port.dev);
378
379 clk_enable(sci_port->iclk);
380 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
381 clk_enable(sci_port->fclk);
382}
383
384static void sci_port_disable(struct sci_port *sci_port)
385{
386 if (!sci_port->port.dev)
387 return;
388
389 clk_disable(sci_port->fclk);
390 clk_disable(sci_port->iclk);
391
392 pm_runtime_put_sync(sci_port->port.dev);
393}
394
124#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) 395#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
125 396
126#ifdef CONFIG_CONSOLE_POLL 397#ifdef CONFIG_CONSOLE_POLL
@@ -164,223 +435,76 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
164} 435}
165#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ 436#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
166 437
167#if defined(__H8300H__) || defined(__H8300S__)
168static void sci_init_pins(struct uart_port *port, unsigned int cflag) 438static void sci_init_pins(struct uart_port *port, unsigned int cflag)
169{ 439{
170 int ch = (port->mapbase - SMR0) >> 3; 440 struct sci_port *s = to_sci_port(port);
171 441 struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
172 /* set DDR regs */
173 H8300_GPIO_DDR(h8300_sci_pins[ch].port,
174 h8300_sci_pins[ch].rx,
175 H8300_GPIO_INPUT);
176 H8300_GPIO_DDR(h8300_sci_pins[ch].port,
177 h8300_sci_pins[ch].tx,
178 H8300_GPIO_OUTPUT);
179
180 /* tx mark output*/
181 H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx;
182}
183#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
184static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
185{
186 if (port->mapbase == 0xA4400000) {
187 __raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
188 __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
189 } else if (port->mapbase == 0xA4410000)
190 __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
191}
192#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721)
193static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
194{
195 unsigned short data;
196
197 if (cflag & CRTSCTS) {
198 /* enable RTS/CTS */
199 if (port->mapbase == 0xa4430000) { /* SCIF0 */
200 /* Clear PTCR bit 9-2; enable all scif pins but sck */
201 data = __raw_readw(PORT_PTCR);
202 __raw_writew((data & 0xfc03), PORT_PTCR);
203 } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
204 /* Clear PVCR bit 9-2 */
205 data = __raw_readw(PORT_PVCR);
206 __raw_writew((data & 0xfc03), PORT_PVCR);
207 }
208 } else {
209 if (port->mapbase == 0xa4430000) { /* SCIF0 */
210 /* Clear PTCR bit 5-2; enable only tx and rx */
211 data = __raw_readw(PORT_PTCR);
212 __raw_writew((data & 0xffc3), PORT_PTCR);
213 } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
214 /* Clear PVCR bit 5-2 */
215 data = __raw_readw(PORT_PVCR);
216 __raw_writew((data & 0xffc3), PORT_PVCR);
217 }
218 }
219}
220#elif defined(CONFIG_CPU_SH3)
221/* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */
222static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
223{
224 unsigned short data;
225
226 /* We need to set SCPCR to enable RTS/CTS */
227 data = __raw_readw(SCPCR);
228 /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
229 __raw_writew(data & 0x0fcf, SCPCR);
230
231 if (!(cflag & CRTSCTS)) {
232 /* We need to set SCPCR to enable RTS/CTS */
233 data = __raw_readw(SCPCR);
234 /* Clear out SCP7MD1,0, SCP4MD1,0,
235 Set SCP6MD1,0 = {01} (output) */
236 __raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
237 442
238 data = __raw_readb(SCPDR); 443 /*
239 /* Set /RTS2 (bit6) = 0 */ 444 * Use port-specific handler if provided.
240 __raw_writeb(data & 0xbf, SCPDR); 445 */
446 if (s->cfg->ops && s->cfg->ops->init_pins) {
447 s->cfg->ops->init_pins(port, cflag);
448 return;
241 } 449 }
242}
243#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
244static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
245{
246 unsigned short data;
247 450
248 if (port->mapbase == 0xffe00000) { 451 /*
249 data = __raw_readw(PSCR); 452 * For the generic path SCSPTR is necessary. Bail out if that's
250 data &= ~0x03cf; 453 * unavailable, too.
251 if (!(cflag & CRTSCTS)) 454 */
252 data |= 0x0340; 455 if (!reg->size)
456 return;
253 457
254 __raw_writew(data, PSCR);
255 }
256}
257#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
258 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
259 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
260 defined(CONFIG_CPU_SUBTYPE_SH7785) || \
261 defined(CONFIG_CPU_SUBTYPE_SH7786) || \
262 defined(CONFIG_CPU_SUBTYPE_SHX3)
263static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
264{
265 if (!(cflag & CRTSCTS))
266 __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */
267}
268#elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A)
269static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
270{
271 if (!(cflag & CRTSCTS)) 458 if (!(cflag & CRTSCTS))
272 __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */ 459 sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */
273} 460}
274#else
275static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
276{
277 /* Nothing to do */
278}
279#endif
280 461
281#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ 462static int sci_txfill(struct uart_port *port)
282 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
283 defined(CONFIG_CPU_SUBTYPE_SH7785) || \
284 defined(CONFIG_CPU_SUBTYPE_SH7786)
285static int scif_txfill(struct uart_port *port)
286{
287 return sci_in(port, SCTFDR) & 0xff;
288}
289
290static int scif_txroom(struct uart_port *port)
291{ 463{
292 return SCIF_TXROOM_MAX - scif_txfill(port); 464 struct plat_sci_reg *reg;
293}
294 465
295static int scif_rxfill(struct uart_port *port) 466 reg = sci_getreg(port, SCTFDR);
296{ 467 if (reg->size)
297 return sci_in(port, SCRFDR) & 0xff;
298}
299#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
300static int scif_txfill(struct uart_port *port)
301{
302 if (port->mapbase == 0xffe00000 ||
303 port->mapbase == 0xffe08000)
304 /* SCIF0/1*/
305 return sci_in(port, SCTFDR) & 0xff; 468 return sci_in(port, SCTFDR) & 0xff;
306 else 469
307 /* SCIF2 */ 470 reg = sci_getreg(port, SCFDR);
471 if (reg->size)
308 return sci_in(port, SCFDR) >> 8; 472 return sci_in(port, SCFDR) >> 8;
309}
310 473
311static int scif_txroom(struct uart_port *port) 474 return !(sci_in(port, SCxSR) & SCI_TDRE);
312{
313 if (port->mapbase == 0xffe00000 ||
314 port->mapbase == 0xffe08000)
315 /* SCIF0/1*/
316 return SCIF_TXROOM_MAX - scif_txfill(port);
317 else
318 /* SCIF2 */
319 return SCIF2_TXROOM_MAX - scif_txfill(port);
320} 475}
321 476
322static int scif_rxfill(struct uart_port *port) 477static int sci_txroom(struct uart_port *port)
323{
324 if ((port->mapbase == 0xffe00000) ||
325 (port->mapbase == 0xffe08000)) {
326 /* SCIF0/1*/
327 return sci_in(port, SCRFDR) & 0xff;
328 } else {
329 /* SCIF2 */
330 return sci_in(port, SCFDR) & SCIF2_RFDC_MASK;
331 }
332}
333#elif defined(CONFIG_ARCH_SH7372)
334static int scif_txfill(struct uart_port *port)
335{ 478{
336 if (port->type == PORT_SCIFA) 479 return port->fifosize - sci_txfill(port);
337 return sci_in(port, SCFDR) >> 8;
338 else
339 return sci_in(port, SCTFDR);
340} 480}
341 481
342static int scif_txroom(struct uart_port *port) 482static int sci_rxfill(struct uart_port *port)
343{ 483{
344 return port->fifosize - scif_txfill(port); 484 struct plat_sci_reg *reg;
345}
346 485
347static int scif_rxfill(struct uart_port *port) 486 reg = sci_getreg(port, SCRFDR);
348{ 487 if (reg->size)
349 if (port->type == PORT_SCIFA) 488 return sci_in(port, SCRFDR) & 0xff;
350 return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
351 else
352 return sci_in(port, SCRFDR);
353}
354#else
355static int scif_txfill(struct uart_port *port)
356{
357 return sci_in(port, SCFDR) >> 8;
358}
359 489
360static int scif_txroom(struct uart_port *port) 490 reg = sci_getreg(port, SCFDR);
361{ 491 if (reg->size)
362 return SCIF_TXROOM_MAX - scif_txfill(port); 492 return sci_in(port, SCFDR) & ((port->fifosize << 1) - 1);
363}
364 493
365static int scif_rxfill(struct uart_port *port) 494 return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
366{
367 return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
368} 495}
369#endif
370 496
371static int sci_txfill(struct uart_port *port) 497/*
498 * SCI helper for checking the state of the muxed port/RXD pins.
499 */
500static inline int sci_rxd_in(struct uart_port *port)
372{ 501{
373 return !(sci_in(port, SCxSR) & SCI_TDRE); 502 struct sci_port *s = to_sci_port(port);
374}
375 503
376static int sci_txroom(struct uart_port *port) 504 if (s->cfg->port_reg <= 0)
377{ 505 return 1;
378 return !sci_txfill(port);
379}
380 506
381static int sci_rxfill(struct uart_port *port) 507 return !!__raw_readb(s->cfg->port_reg);
382{
383 return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
384} 508}
385 509
386/* ********************************************************************** * 510/* ********************************************************************** *
@@ -406,10 +530,7 @@ static void sci_transmit_chars(struct uart_port *port)
406 return; 530 return;
407 } 531 }
408 532
409 if (port->type == PORT_SCI) 533 count = sci_txroom(port);
410 count = sci_txroom(port);
411 else
412 count = scif_txroom(port);
413 534
414 do { 535 do {
415 unsigned char c; 536 unsigned char c;
@@ -464,13 +585,8 @@ static void sci_receive_chars(struct uart_port *port)
464 return; 585 return;
465 586
466 while (1) { 587 while (1) {
467 if (port->type == PORT_SCI)
468 count = sci_rxfill(port);
469 else
470 count = scif_rxfill(port);
471
472 /* Don't copy more bytes than there is room for in the buffer */ 588 /* Don't copy more bytes than there is room for in the buffer */
473 count = tty_buffer_request_room(tty, count); 589 count = tty_buffer_request_room(tty, sci_rxfill(port));
474 590
475 /* If for any reason we can't copy more data, we're done! */ 591 /* If for any reason we can't copy more data, we're done! */
476 if (count == 0) 592 if (count == 0)
@@ -561,8 +677,7 @@ static void sci_break_timer(unsigned long data)
561{ 677{
562 struct sci_port *port = (struct sci_port *)data; 678 struct sci_port *port = (struct sci_port *)data;
563 679
564 if (port->enable) 680 sci_port_enable(port);
565 port->enable(&port->port);
566 681
567 if (sci_rxd_in(&port->port) == 0) { 682 if (sci_rxd_in(&port->port) == 0) {
568 port->break_flag = 1; 683 port->break_flag = 1;
@@ -574,8 +689,7 @@ static void sci_break_timer(unsigned long data)
574 } else 689 } else
575 port->break_flag = 0; 690 port->break_flag = 0;
576 691
577 if (port->disable) 692 sci_port_disable(port);
578 port->disable(&port->port);
579} 693}
580 694
581static int sci_handle_errors(struct uart_port *port) 695static int sci_handle_errors(struct uart_port *port)
@@ -583,13 +697,19 @@ static int sci_handle_errors(struct uart_port *port)
583 int copied = 0; 697 int copied = 0;
584 unsigned short status = sci_in(port, SCxSR); 698 unsigned short status = sci_in(port, SCxSR);
585 struct tty_struct *tty = port->state->port.tty; 699 struct tty_struct *tty = port->state->port.tty;
700 struct sci_port *s = to_sci_port(port);
586 701
587 if (status & SCxSR_ORER(port)) { 702 /*
588 /* overrun error */ 703 * Handle overruns, if supported.
589 if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) 704 */
590 copied++; 705 if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) {
706 if (status & (1 << s->cfg->overrun_bit)) {
707 /* overrun error */
708 if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
709 copied++;
591 710
592 dev_notice(port->dev, "overrun error"); 711 dev_notice(port->dev, "overrun error");
712 }
593 } 713 }
594 714
595 if (status & SCxSR_FER(port)) { 715 if (status & SCxSR_FER(port)) {
@@ -637,12 +757,15 @@ static int sci_handle_errors(struct uart_port *port)
637static int sci_handle_fifo_overrun(struct uart_port *port) 757static int sci_handle_fifo_overrun(struct uart_port *port)
638{ 758{
639 struct tty_struct *tty = port->state->port.tty; 759 struct tty_struct *tty = port->state->port.tty;
760 struct sci_port *s = to_sci_port(port);
761 struct plat_sci_reg *reg;
640 int copied = 0; 762 int copied = 0;
641 763
642 if (port->type != PORT_SCIF) 764 reg = sci_getreg(port, SCLSR);
765 if (!reg->size)
643 return 0; 766 return 0;
644 767
645 if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) { 768 if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
646 sci_out(port, SCLSR, 0); 769 sci_out(port, SCLSR, 0);
647 770
648 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 771 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
@@ -840,74 +963,102 @@ static int sci_notifier(struct notifier_block *self,
840 return NOTIFY_OK; 963 return NOTIFY_OK;
841} 964}
842 965
843static void sci_clk_enable(struct uart_port *port) 966static struct sci_irq_desc {
844{ 967 const char *desc;
845 struct sci_port *sci_port = to_sci_port(port); 968 irq_handler_t handler;
846 969} sci_irq_desc[] = {
847 pm_runtime_get_sync(port->dev); 970 /*
971 * Split out handlers, the default case.
972 */
973 [SCIx_ERI_IRQ] = {
974 .desc = "rx err",
975 .handler = sci_er_interrupt,
976 },
848 977
849 clk_enable(sci_port->iclk); 978 [SCIx_RXI_IRQ] = {
850 sci_port->port.uartclk = clk_get_rate(sci_port->iclk); 979 .desc = "rx full",
851 clk_enable(sci_port->fclk); 980 .handler = sci_rx_interrupt,
852} 981 },
853 982
854static void sci_clk_disable(struct uart_port *port) 983 [SCIx_TXI_IRQ] = {
855{ 984 .desc = "tx empty",
856 struct sci_port *sci_port = to_sci_port(port); 985 .handler = sci_tx_interrupt,
986 },
857 987
858 clk_disable(sci_port->fclk); 988 [SCIx_BRI_IRQ] = {
859 clk_disable(sci_port->iclk); 989 .desc = "break",
990 .handler = sci_br_interrupt,
991 },
860 992
861 pm_runtime_put_sync(port->dev); 993 /*
862} 994 * Special muxed handler.
995 */
996 [SCIx_MUX_IRQ] = {
997 .desc = "mux",
998 .handler = sci_mpxed_interrupt,
999 },
1000};
863 1001
864static int sci_request_irq(struct sci_port *port) 1002static int sci_request_irq(struct sci_port *port)
865{ 1003{
866 int i; 1004 struct uart_port *up = &port->port;
867 irqreturn_t (*handlers[4])(int irq, void *ptr) = { 1005 int i, j, ret = 0;
868 sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt, 1006
869 sci_br_interrupt, 1007 for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
870 }; 1008 struct sci_irq_desc *desc;
871 const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full", 1009 unsigned int irq;
872 "SCI Transmit Data Empty", "SCI Break" }; 1010
873 1011 if (SCIx_IRQ_IS_MUXED(port)) {
874 if (port->cfg->irqs[0] == port->cfg->irqs[1]) { 1012 i = SCIx_MUX_IRQ;
875 if (unlikely(!port->cfg->irqs[0])) 1013 irq = up->irq;
876 return -ENODEV; 1014 } else
877 1015 irq = port->cfg->irqs[i];
878 if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt, 1016
879 IRQF_DISABLED, "sci", port)) { 1017 desc = sci_irq_desc + i;
880 dev_err(port->port.dev, "Can't allocate IRQ\n"); 1018 port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
881 return -ENODEV; 1019 dev_name(up->dev), desc->desc);
1020 if (!port->irqstr[j]) {
1021 dev_err(up->dev, "Failed to allocate %s IRQ string\n",
1022 desc->desc);
1023 goto out_nomem;
882 } 1024 }
883 } else { 1025
884 for (i = 0; i < ARRAY_SIZE(handlers); i++) { 1026 ret = request_irq(irq, desc->handler, up->irqflags,
885 if (unlikely(!port->cfg->irqs[i])) 1027 port->irqstr[j], port);
886 continue; 1028 if (unlikely(ret)) {
887 1029 dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
888 if (request_irq(port->cfg->irqs[i], handlers[i], 1030 goto out_noirq;
889 IRQF_DISABLED, desc[i], port)) {
890 dev_err(port->port.dev, "Can't allocate IRQ\n");
891 return -ENODEV;
892 }
893 } 1031 }
894 } 1032 }
895 1033
896 return 0; 1034 return 0;
1035
1036out_noirq:
1037 while (--i >= 0)
1038 free_irq(port->cfg->irqs[i], port);
1039
1040out_nomem:
1041 while (--j >= 0)
1042 kfree(port->irqstr[j]);
1043
1044 return ret;
897} 1045}
898 1046
899static void sci_free_irq(struct sci_port *port) 1047static void sci_free_irq(struct sci_port *port)
900{ 1048{
901 int i; 1049 int i;
902 1050
903 if (port->cfg->irqs[0] == port->cfg->irqs[1]) 1051 /*
904 free_irq(port->cfg->irqs[0], port); 1052 * Intentionally in reverse order so we iterate over the muxed
905 else { 1053 * IRQ first.
906 for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) { 1054 */
907 if (!port->cfg->irqs[i]) 1055 for (i = 0; i < SCIx_NR_IRQS; i++) {
908 continue; 1056 free_irq(port->cfg->irqs[i], port);
1057 kfree(port->irqstr[i]);
909 1058
910 free_irq(port->cfg->irqs[i], port); 1059 if (SCIx_IRQ_IS_MUXED(port)) {
1060 /* If there's only one IRQ, we're done. */
1061 return;
911 } 1062 }
912 } 1063 }
913} 1064}
@@ -915,7 +1066,7 @@ static void sci_free_irq(struct sci_port *port)
915static unsigned int sci_tx_empty(struct uart_port *port) 1066static unsigned int sci_tx_empty(struct uart_port *port)
916{ 1067{
917 unsigned short status = sci_in(port, SCxSR); 1068 unsigned short status = sci_in(port, SCxSR);
918 unsigned short in_tx_fifo = scif_txfill(port); 1069 unsigned short in_tx_fifo = sci_txfill(port);
919 1070
920 return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; 1071 return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
921} 1072}
@@ -932,7 +1083,7 @@ static unsigned int sci_get_mctrl(struct uart_port *port)
932 /* This routine is used for getting signals of: DTR, DCD, DSR, RI, 1083 /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
933 and CTS/RTS */ 1084 and CTS/RTS */
934 1085
935 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; 1086 return TIOCM_DTR | TIOCM_RTS | TIOCM_CTS | TIOCM_DSR;
936} 1087}
937 1088
938#ifdef CONFIG_SERIAL_SH_SCI_DMA 1089#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -1438,8 +1589,7 @@ static int sci_startup(struct uart_port *port)
1438 1589
1439 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 1590 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1440 1591
1441 if (s->enable) 1592 sci_port_enable(s);
1442 s->enable(port);
1443 1593
1444 ret = sci_request_irq(s); 1594 ret = sci_request_irq(s);
1445 if (unlikely(ret < 0)) 1595 if (unlikely(ret < 0))
@@ -1465,8 +1615,7 @@ static void sci_shutdown(struct uart_port *port)
1465 sci_free_dma(port); 1615 sci_free_dma(port);
1466 sci_free_irq(s); 1616 sci_free_irq(s);
1467 1617
1468 if (s->disable) 1618 sci_port_disable(s);
1469 s->disable(port);
1470} 1619}
1471 1620
1472static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps, 1621static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
@@ -1491,11 +1640,25 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
1491 return ((freq + 16 * bps) / (32 * bps) - 1); 1640 return ((freq + 16 * bps) / (32 * bps) - 1);
1492} 1641}
1493 1642
1643static void sci_reset(struct uart_port *port)
1644{
1645 unsigned int status;
1646
1647 do {
1648 status = sci_in(port, SCxSR);
1649 } while (!(status & SCxSR_TEND(port)));
1650
1651 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1652
1653 if (port->type != PORT_SCI)
1654 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
1655}
1656
1494static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 1657static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1495 struct ktermios *old) 1658 struct ktermios *old)
1496{ 1659{
1497 struct sci_port *s = to_sci_port(port); 1660 struct sci_port *s = to_sci_port(port);
1498 unsigned int status, baud, smr_val, max_baud; 1661 unsigned int baud, smr_val, max_baud;
1499 int t = -1; 1662 int t = -1;
1500 u16 scfcr = 0; 1663 u16 scfcr = 0;
1501 1664
@@ -1513,17 +1676,9 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1513 if (likely(baud && port->uartclk)) 1676 if (likely(baud && port->uartclk))
1514 t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk); 1677 t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
1515 1678
1516 if (s->enable) 1679 sci_port_enable(s);
1517 s->enable(port);
1518
1519 do {
1520 status = sci_in(port, SCxSR);
1521 } while (!(status & SCxSR_TEND(port)));
1522
1523 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1524 1680
1525 if (port->type != PORT_SCI) 1681 sci_reset(port);
1526 sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1527 1682
1528 smr_val = sci_in(port, SCSMR) & 3; 1683 smr_val = sci_in(port, SCSMR) & 3;
1529 1684
@@ -1584,8 +1739,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1584 if ((termios->c_cflag & CREAD) != 0) 1739 if ((termios->c_cflag & CREAD) != 0)
1585 sci_start_rx(port); 1740 sci_start_rx(port);
1586 1741
1587 if (s->disable) 1742 sci_port_disable(s);
1588 s->disable(port);
1589} 1743}
1590 1744
1591static const char *sci_type(struct uart_port *port) 1745static const char *sci_type(struct uart_port *port)
@@ -1726,6 +1880,7 @@ static int __devinit sci_init_single(struct platform_device *dev,
1726 struct plat_sci_port *p) 1880 struct plat_sci_port *p)
1727{ 1881{
1728 struct uart_port *port = &sci_port->port; 1882 struct uart_port *port = &sci_port->port;
1883 int ret;
1729 1884
1730 port->ops = &sci_uart_ops; 1885 port->ops = &sci_uart_ops;
1731 port->iotype = UPIO_MEM; 1886 port->iotype = UPIO_MEM;
@@ -1746,6 +1901,12 @@ static int __devinit sci_init_single(struct platform_device *dev,
1746 break; 1901 break;
1747 } 1902 }
1748 1903
1904 if (p->regtype == SCIx_PROBE_REGTYPE) {
1905 ret = sci_probe_regmap(p);
1906 if (unlikely(ret))
1907 return ret;
1908 }
1909
1749 if (dev) { 1910 if (dev) {
1750 sci_port->iclk = clk_get(&dev->dev, "sci_ick"); 1911 sci_port->iclk = clk_get(&dev->dev, "sci_ick");
1751 if (IS_ERR(sci_port->iclk)) { 1912 if (IS_ERR(sci_port->iclk)) {
@@ -1764,10 +1925,9 @@ static int __devinit sci_init_single(struct platform_device *dev,
1764 if (IS_ERR(sci_port->fclk)) 1925 if (IS_ERR(sci_port->fclk))
1765 sci_port->fclk = NULL; 1926 sci_port->fclk = NULL;
1766 1927
1767 sci_port->enable = sci_clk_enable;
1768 sci_port->disable = sci_clk_disable;
1769 port->dev = &dev->dev; 1928 port->dev = &dev->dev;
1770 1929
1930 pm_runtime_irq_safe(&dev->dev);
1771 pm_runtime_enable(&dev->dev); 1931 pm_runtime_enable(&dev->dev);
1772 } 1932 }
1773 1933
@@ -1775,20 +1935,51 @@ static int __devinit sci_init_single(struct platform_device *dev,
1775 sci_port->break_timer.function = sci_break_timer; 1935 sci_port->break_timer.function = sci_break_timer;
1776 init_timer(&sci_port->break_timer); 1936 init_timer(&sci_port->break_timer);
1777 1937
1938 /*
1939 * Establish some sensible defaults for the error detection.
1940 */
1941 if (!p->error_mask)
1942 p->error_mask = (p->type == PORT_SCI) ?
1943 SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK;
1944
1945 /*
1946 * Establish sensible defaults for the overrun detection, unless
1947 * the part has explicitly disabled support for it.
1948 */
1949 if (p->overrun_bit != SCIx_NOT_SUPPORTED) {
1950 if (p->type == PORT_SCI)
1951 p->overrun_bit = 5;
1952 else if (p->scbrr_algo_id == SCBRR_ALGO_4)
1953 p->overrun_bit = 9;
1954 else
1955 p->overrun_bit = 0;
1956
1957 /*
1958 * Make the error mask inclusive of overrun detection, if
1959 * supported.
1960 */
1961 p->error_mask |= (1 << p->overrun_bit);
1962 }
1963
1778 sci_port->cfg = p; 1964 sci_port->cfg = p;
1779 1965
1780 port->mapbase = p->mapbase; 1966 port->mapbase = p->mapbase;
1781 port->type = p->type; 1967 port->type = p->type;
1782 port->flags = p->flags; 1968 port->flags = p->flags;
1969 port->regshift = p->regshift;
1783 1970
1784 /* 1971 /*
1785 * The UART port needs an IRQ value, so we peg this to the TX IRQ 1972 * The UART port needs an IRQ value, so we peg this to the RX IRQ
1786 * for the multi-IRQ ports, which is where we are primarily 1973 * for the multi-IRQ ports, which is where we are primarily
1787 * concerned with the shutdown path synchronization. 1974 * concerned with the shutdown path synchronization.
1788 * 1975 *
1789 * For the muxed case there's nothing more to do. 1976 * For the muxed case there's nothing more to do.
1790 */ 1977 */
1791 port->irq = p->irqs[SCIx_RXI_IRQ]; 1978 port->irq = p->irqs[SCIx_RXI_IRQ];
1979 port->irqflags = IRQF_DISABLED;
1980
1981 port->serial_in = sci_serial_in;
1982 port->serial_out = sci_serial_out;
1792 1983
1793 if (p->dma_dev) 1984 if (p->dma_dev)
1794 dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n", 1985 dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
@@ -1814,8 +2005,7 @@ static void serial_console_write(struct console *co, const char *s,
1814 struct uart_port *port = &sci_port->port; 2005 struct uart_port *port = &sci_port->port;
1815 unsigned short bits; 2006 unsigned short bits;
1816 2007
1817 if (sci_port->enable) 2008 sci_port_enable(sci_port);
1818 sci_port->enable(port);
1819 2009
1820 uart_console_write(port, s, count, serial_console_putchar); 2010 uart_console_write(port, s, count, serial_console_putchar);
1821 2011
@@ -1824,8 +2014,7 @@ static void serial_console_write(struct console *co, const char *s,
1824 while ((sci_in(port, SCxSR) & bits) != bits) 2014 while ((sci_in(port, SCxSR) & bits) != bits)
1825 cpu_relax(); 2015 cpu_relax();
1826 2016
1827 if (sci_port->disable) 2017 sci_port_disable(sci_port);
1828 sci_port->disable(port);
1829} 2018}
1830 2019
1831static int __devinit serial_console_setup(struct console *co, char *options) 2020static int __devinit serial_console_setup(struct console *co, char *options)
@@ -1857,20 +2046,14 @@ static int __devinit serial_console_setup(struct console *co, char *options)
1857 if (unlikely(ret != 0)) 2046 if (unlikely(ret != 0))
1858 return ret; 2047 return ret;
1859 2048
1860 if (sci_port->enable) 2049 sci_port_enable(sci_port);
1861 sci_port->enable(port);
1862 2050
1863 if (options) 2051 if (options)
1864 uart_parse_options(options, &baud, &parity, &bits, &flow); 2052 uart_parse_options(options, &baud, &parity, &bits, &flow);
1865 2053
1866 ret = uart_set_options(port, co, baud, parity, bits, flow); 2054 sci_port_disable(sci_port);
1867#if defined(__H8300H__) || defined(__H8300S__) 2055
1868 /* disable rx interrupt */ 2056 return uart_set_options(port, co, baud, parity, bits, flow);
1869 if (ret == 0)
1870 sci_stop_rx(port);
1871#endif
1872 /* TODO: disable clock */
1873 return ret;
1874} 2057}
1875 2058
1876static struct console serial_console = { 2059static struct console serial_console = {
@@ -1912,6 +2095,36 @@ static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
1912 return 0; 2095 return 0;
1913} 2096}
1914 2097
2098#define uart_console(port) ((port)->cons->index == (port)->line)
2099
2100static int sci_runtime_suspend(struct device *dev)
2101{
2102 struct sci_port *sci_port = dev_get_drvdata(dev);
2103 struct uart_port *port = &sci_port->port;
2104
2105 if (uart_console(port)) {
2106 sci_port->saved_smr = sci_in(port, SCSMR);
2107 sci_port->saved_brr = sci_in(port, SCBRR);
2108 sci_port->saved_fcr = sci_in(port, SCFCR);
2109 }
2110 return 0;
2111}
2112
2113static int sci_runtime_resume(struct device *dev)
2114{
2115 struct sci_port *sci_port = dev_get_drvdata(dev);
2116 struct uart_port *port = &sci_port->port;
2117
2118 if (uart_console(port)) {
2119 sci_reset(port);
2120 sci_out(port, SCSMR, sci_port->saved_smr);
2121 sci_out(port, SCBRR, sci_port->saved_brr);
2122 sci_out(port, SCFCR, sci_port->saved_fcr);
2123 sci_out(port, SCSCR, sci_port->cfg->scscr);
2124 }
2125 return 0;
2126}
2127
1915#define SCI_CONSOLE (&serial_console) 2128#define SCI_CONSOLE (&serial_console)
1916 2129
1917#else 2130#else
@@ -1921,6 +2134,8 @@ static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
1921} 2134}
1922 2135
1923#define SCI_CONSOLE NULL 2136#define SCI_CONSOLE NULL
2137#define sci_runtime_suspend NULL
2138#define sci_runtime_resume NULL
1924 2139
1925#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ 2140#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
1926 2141
@@ -2036,6 +2251,8 @@ static int sci_resume(struct device *dev)
2036} 2251}
2037 2252
2038static const struct dev_pm_ops sci_dev_pm_ops = { 2253static const struct dev_pm_ops sci_dev_pm_ops = {
2254 .runtime_suspend = sci_runtime_suspend,
2255 .runtime_resume = sci_runtime_resume,
2039 .suspend = sci_suspend, 2256 .suspend = sci_suspend,
2040 .resume = sci_resume, 2257 .resume = sci_resume,
2041}; 2258};
@@ -2081,3 +2298,5 @@ module_exit(sci_exit);
2081 2298
2082MODULE_LICENSE("GPL"); 2299MODULE_LICENSE("GPL");
2083MODULE_ALIAS("platform:sh-sci"); 2300MODULE_ALIAS("platform:sh-sci");
2301MODULE_AUTHOR("Paul Mundt");
2302MODULE_DESCRIPTION("SuperH SCI(F) serial driver");
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index b04d937c9110..e9bed038aa1f 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -2,169 +2,14 @@
2#include <linux/io.h> 2#include <linux/io.h>
3#include <linux/gpio.h> 3#include <linux/gpio.h>
4 4
5#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
6#include <asm/regs306x.h>
7#endif
8#if defined(CONFIG_H8S2678)
9#include <asm/regs267x.h>
10#endif
11
12#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
13 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
14 defined(CONFIG_CPU_SUBTYPE_SH7708) || \
15 defined(CONFIG_CPU_SUBTYPE_SH7709)
16# define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */
17# define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */
18#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
19# define SCIF0 0xA4400000
20# define SCIF2 0xA4410000
21# define SCPCR 0xA4000116
22# define SCPDR 0xA4000136
23#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
24 defined(CONFIG_CPU_SUBTYPE_SH7721) || \
25 defined(CONFIG_ARCH_SH73A0) || \
26 defined(CONFIG_ARCH_SH7367) || \
27 defined(CONFIG_ARCH_SH7377) || \
28 defined(CONFIG_ARCH_SH7372)
29# define PORT_PTCR 0xA405011EUL
30# define PORT_PVCR 0xA4050122UL
31# define SCIF_ORER 0x0200 /* overrun error bit */
32#elif defined(CONFIG_SH_RTS7751R2D)
33# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
34# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
35# define SCIF_ORER 0x0001 /* overrun error bit */
36#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
37 defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
38 defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
39 defined(CONFIG_CPU_SUBTYPE_SH7091) || \
40 defined(CONFIG_CPU_SUBTYPE_SH7751) || \
41 defined(CONFIG_CPU_SUBTYPE_SH7751R)
42# define SCSPTR1 0xffe0001c /* 8 bit SCI */
43# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
44# define SCIF_ORER 0x0001 /* overrun error bit */
45#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
46# define SCSPTR0 0xfe600024 /* 16 bit SCIF */
47# define SCSPTR1 0xfe610024 /* 16 bit SCIF */
48# define SCSPTR2 0xfe620024 /* 16 bit SCIF */
49# define SCIF_ORER 0x0001 /* overrun error bit */
50#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
51# define SCSPTR0 0xA4400000 /* 16 bit SCIF */
52# define SCIF_ORER 0x0001 /* overrun error bit */
53# define PACR 0xa4050100
54# define PBCR 0xa4050102
55#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
56# define SCSPTR0 0xffe00010 /* 16 bit SCIF */
57#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
58# define PADR 0xA4050120
59# define PSDR 0xA405013e
60# define PWDR 0xA4050166
61# define PSCR 0xA405011E
62# define SCIF_ORER 0x0001 /* overrun error bit */
63#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
64# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
65# define SCSPTR0 SCPDR0
66# define SCIF_ORER 0x0001 /* overrun error bit */
67#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
68# define SCSPTR0 0xa4050160
69# define SCIF_ORER 0x0001 /* overrun error bit */
70#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
71# define SCIF_ORER 0x0001 /* overrun error bit */
72#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
73# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
74# define SCIF_ORER 0x0001 /* overrun error bit */
75#elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
76# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
77#elif defined(CONFIG_H8S2678)
78# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
79#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
80# define SCSPTR0 0xfe4b0020
81# define SCIF_ORER 0x0001
82#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
83# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
84# define SCIF_ORER 0x0001 /* overrun error bit */
85#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
86# define SCSPTR0 0xff923020 /* 16 bit SCIF */
87# define SCIF_ORER 0x0001 /* overrun error bit */
88#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
89# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
90# define SCIF_ORER 0x0001 /* Overrun error bit */
91#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
92 defined(CONFIG_CPU_SUBTYPE_SH7786)
93# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
94# define SCIF_ORER 0x0001 /* Overrun error bit */
95#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
96 defined(CONFIG_CPU_SUBTYPE_SH7203) || \
97 defined(CONFIG_CPU_SUBTYPE_SH7206) || \
98 defined(CONFIG_CPU_SUBTYPE_SH7263)
99# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */
100#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
101# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
102# define SCIF_ORER 0x0001 /* overrun error bit */
103#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
104# define SCSPTR0 0xffc30020 /* 16 bit SCIF */
105# define SCIF_ORER 0x0001 /* Overrun error bit */
106#else
107# error CPU subtype not defined
108#endif
109
110/* SCxSR SCI */
111#define SCI_TDRE 0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
112#define SCI_RDRF 0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
113#define SCI_ORER 0x20 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
114#define SCI_FER 0x10 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
115#define SCI_PER 0x08 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
116#define SCI_TEND 0x04 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
117/* SCI_MPB 0x02 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
118/* SCI_MPBT 0x01 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
119
120#define SCI_ERRORS ( SCI_PER | SCI_FER | SCI_ORER)
121
122/* SCxSR SCIF */
123#define SCIF_ER 0x0080 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
124#define SCIF_TEND 0x0040 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
125#define SCIF_TDFE 0x0020 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
126#define SCIF_BRK 0x0010 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
127#define SCIF_FER 0x0008 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
128#define SCIF_PER 0x0004 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
129#define SCIF_RDF 0x0002 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
130#define SCIF_DR 0x0001 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
131
132#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
133 defined(CONFIG_CPU_SUBTYPE_SH7720) || \
134 defined(CONFIG_CPU_SUBTYPE_SH7721) || \
135 defined(CONFIG_ARCH_SH73A0) || \
136 defined(CONFIG_ARCH_SH7367) || \
137 defined(CONFIG_ARCH_SH7377) || \
138 defined(CONFIG_ARCH_SH7372)
139# define SCIF_ORER 0x0200
140# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER)
141# define SCIF_RFDC_MASK 0x007f
142# define SCIF_TXROOM_MAX 64
143#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
144# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK )
145# define SCIF_RFDC_MASK 0x007f
146# define SCIF_TXROOM_MAX 64
147/* SH7763 SCIF2 support */
148# define SCIF2_RFDC_MASK 0x001f
149# define SCIF2_TXROOM_MAX 16
150#else
151# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK)
152# define SCIF_RFDC_MASK 0x001f
153# define SCIF_TXROOM_MAX 16
154#endif
155
156#ifndef SCIF_ORER
157#define SCIF_ORER 0x0000
158#endif
159
160#define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND) 5#define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND)
161#define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS)
162#define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF) 6#define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF)
163#define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE) 7#define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE)
164#define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER) 8#define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER)
165#define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER) 9#define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER)
166#define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK) 10#define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK)
167#define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER) 11
12#define SCxSR_ERRORS(port) (to_sci_port(port)->cfg->error_mask)
168 13
169#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ 14#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
170 defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 15 defined(CONFIG_CPU_SUBTYPE_SH7720) || \
@@ -191,278 +36,3 @@
191 36
192#define SCI_MAJOR 204 37#define SCI_MAJOR 204
193#define SCI_MINOR_START 8 38#define SCI_MINOR_START 8
194
195#define SCI_IN(size, offset) \
196 if ((size) == 8) { \
197 return ioread8(port->membase + (offset)); \
198 } else { \
199 return ioread16(port->membase + (offset)); \
200 }
201#define SCI_OUT(size, offset, value) \
202 if ((size) == 8) { \
203 iowrite8(value, port->membase + (offset)); \
204 } else if ((size) == 16) { \
205 iowrite16(value, port->membase + (offset)); \
206 }
207
208#define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\
209 static inline unsigned int sci_##name##_in(struct uart_port *port) \
210 { \
211 if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
212 SCI_IN(scif_size, scif_offset) \
213 } else { /* PORT_SCI or PORT_SCIFA */ \
214 SCI_IN(sci_size, sci_offset); \
215 } \
216 } \
217 static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
218 { \
219 if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
220 SCI_OUT(scif_size, scif_offset, value) \
221 } else { /* PORT_SCI or PORT_SCIFA */ \
222 SCI_OUT(sci_size, sci_offset, value); \
223 } \
224 }
225
226#ifdef CONFIG_H8300
227/* h8300 don't have SCIF */
228#define CPU_SCIF_FNS(name) \
229 static inline unsigned int sci_##name##_in(struct uart_port *port) \
230 { \
231 return 0; \
232 } \
233 static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
234 { \
235 }
236#else
237#define CPU_SCIF_FNS(name, scif_offset, scif_size) \
238 static inline unsigned int sci_##name##_in(struct uart_port *port) \
239 { \
240 SCI_IN(scif_size, scif_offset); \
241 } \
242 static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
243 { \
244 SCI_OUT(scif_size, scif_offset, value); \
245 }
246#endif
247
248#define CPU_SCI_FNS(name, sci_offset, sci_size) \
249 static inline unsigned int sci_##name##_in(struct uart_port* port) \
250 { \
251 SCI_IN(sci_size, sci_offset); \
252 } \
253 static inline void sci_##name##_out(struct uart_port* port, unsigned int value) \
254 { \
255 SCI_OUT(sci_size, sci_offset, value); \
256 }
257
258#if defined(CONFIG_CPU_SH3) || \
259 defined(CONFIG_ARCH_SH73A0) || \
260 defined(CONFIG_ARCH_SH7367) || \
261 defined(CONFIG_ARCH_SH7377) || \
262 defined(CONFIG_ARCH_SH7372)
263#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
264#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
265 sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
266 h8_sci_offset, h8_sci_size) \
267 CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size)
268#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
269 CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
270#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
271 defined(CONFIG_CPU_SUBTYPE_SH7720) || \
272 defined(CONFIG_CPU_SUBTYPE_SH7721) || \
273 defined(CONFIG_ARCH_SH7367)
274#define SCIF_FNS(name, scif_offset, scif_size) \
275 CPU_SCIF_FNS(name, scif_offset, scif_size)
276#elif defined(CONFIG_ARCH_SH7377) || \
277 defined(CONFIG_ARCH_SH7372) || \
278 defined(CONFIG_ARCH_SH73A0)
279#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \
280 CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size)
281#define SCIF_FNS(name, scif_offset, scif_size) \
282 CPU_SCIF_FNS(name, scif_offset, scif_size)
283#else
284#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
285 sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
286 h8_sci_offset, h8_sci_size) \
287 CPU_SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh3_scif_offset, sh3_scif_size)
288#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
289 CPU_SCIF_FNS(name, sh3_scif_offset, sh3_scif_size)
290#endif
291#elif defined(__H8300H__) || defined(__H8300S__)
292#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
293 sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
294 h8_sci_offset, h8_sci_size) \
295 CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size)
296#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
297 CPU_SCIF_FNS(name)
298#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
299 defined(CONFIG_CPU_SUBTYPE_SH7724)
300 #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \
301 CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size)
302 #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \
303 CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
304#else
305#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
306 sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
307 h8_sci_offset, h8_sci_size) \
308 CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size)
309#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
310 CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
311#endif
312
313#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
314 defined(CONFIG_CPU_SUBTYPE_SH7720) || \
315 defined(CONFIG_CPU_SUBTYPE_SH7721) || \
316 defined(CONFIG_ARCH_SH7367)
317
318SCIF_FNS(SCSMR, 0x00, 16)
319SCIF_FNS(SCBRR, 0x04, 8)
320SCIF_FNS(SCSCR, 0x08, 16)
321SCIF_FNS(SCxSR, 0x14, 16)
322SCIF_FNS(SCFCR, 0x18, 16)
323SCIF_FNS(SCFDR, 0x1c, 16)
324SCIF_FNS(SCxTDR, 0x20, 8)
325SCIF_FNS(SCxRDR, 0x24, 8)
326SCIF_FNS(SCLSR, 0x00, 0)
327#elif defined(CONFIG_ARCH_SH7377) || \
328 defined(CONFIG_ARCH_SH7372) || \
329 defined(CONFIG_ARCH_SH73A0)
330SCIF_FNS(SCSMR, 0x00, 16)
331SCIF_FNS(SCBRR, 0x04, 8)
332SCIF_FNS(SCSCR, 0x08, 16)
333SCIF_FNS(SCTDSR, 0x0c, 16)
334SCIF_FNS(SCFER, 0x10, 16)
335SCIF_FNS(SCxSR, 0x14, 16)
336SCIF_FNS(SCFCR, 0x18, 16)
337SCIF_FNS(SCFDR, 0x1c, 16)
338SCIF_FNS(SCTFDR, 0x38, 16)
339SCIF_FNS(SCRFDR, 0x3c, 16)
340SCIx_FNS(SCxTDR, 0x20, 8, 0x40, 8)
341SCIx_FNS(SCxRDR, 0x24, 8, 0x60, 8)
342SCIF_FNS(SCLSR, 0x00, 0)
343#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
344 defined(CONFIG_CPU_SUBTYPE_SH7724)
345SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16)
346SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8)
347SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
348SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8)
349SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16)
350SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8)
351SCIx_FNS(SCSPTR, 0, 0, 0, 0)
352SCIF_FNS(SCFCR, 0x18, 16)
353SCIF_FNS(SCFDR, 0x1c, 16)
354SCIF_FNS(SCLSR, 0x24, 16)
355#else
356/* reg SCI/SH3 SCI/SH4 SCIF/SH3 SCIF/SH4 SCI/H8*/
357/* name off sz off sz off sz off sz off sz*/
358SCIx_FNS(SCSMR, 0x00, 8, 0x00, 8, 0x00, 8, 0x00, 16, 0x00, 8)
359SCIx_FNS(SCBRR, 0x02, 8, 0x04, 8, 0x02, 8, 0x04, 8, 0x01, 8)
360SCIx_FNS(SCSCR, 0x04, 8, 0x08, 8, 0x04, 8, 0x08, 16, 0x02, 8)
361SCIx_FNS(SCxTDR, 0x06, 8, 0x0c, 8, 0x06, 8, 0x0C, 8, 0x03, 8)
362SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8)
363SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8)
364SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16)
365#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
366 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
367 defined(CONFIG_CPU_SUBTYPE_SH7785) || \
368 defined(CONFIG_CPU_SUBTYPE_SH7786)
369SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
370SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
371SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
372SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
373SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
374#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
375SCIF_FNS(SCFDR, 0, 0, 0x1C, 16)
376SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
377SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
378SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
379SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
380#else
381SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
382#if defined(CONFIG_CPU_SUBTYPE_SH7722)
383SCIF_FNS(SCSPTR, 0, 0, 0, 0)
384#else
385SCIF_FNS(SCSPTR, 0, 0, 0x20, 16)
386#endif
387SCIF_FNS(SCLSR, 0, 0, 0x24, 16)
388#endif
389#endif
390#define sci_in(port, reg) sci_##reg##_in(port)
391#define sci_out(port, reg, value) sci_##reg##_out(port, value)
392
393/* H8/300 series SCI pins assignment */
394#if defined(__H8300H__) || defined(__H8300S__)
395static const struct __attribute__((packed)) {
396 int port; /* GPIO port no */
397 unsigned short rx,tx; /* GPIO bit no */
398} h8300_sci_pins[] = {
399#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
400 { /* SCI0 */
401 .port = H8300_GPIO_P9,
402 .rx = H8300_GPIO_B2,
403 .tx = H8300_GPIO_B0,
404 },
405 { /* SCI1 */
406 .port = H8300_GPIO_P9,
407 .rx = H8300_GPIO_B3,
408 .tx = H8300_GPIO_B1,
409 },
410 { /* SCI2 */
411 .port = H8300_GPIO_PB,
412 .rx = H8300_GPIO_B7,
413 .tx = H8300_GPIO_B6,
414 }
415#elif defined(CONFIG_H8S2678)
416 { /* SCI0 */
417 .port = H8300_GPIO_P3,
418 .rx = H8300_GPIO_B2,
419 .tx = H8300_GPIO_B0,
420 },
421 { /* SCI1 */
422 .port = H8300_GPIO_P3,
423 .rx = H8300_GPIO_B3,
424 .tx = H8300_GPIO_B1,
425 },
426 { /* SCI2 */
427 .port = H8300_GPIO_P5,
428 .rx = H8300_GPIO_B1,
429 .tx = H8300_GPIO_B0,
430 }
431#endif
432};
433#endif
434
435#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
436 defined(CONFIG_CPU_SUBTYPE_SH7707) || \
437 defined(CONFIG_CPU_SUBTYPE_SH7708) || \
438 defined(CONFIG_CPU_SUBTYPE_SH7709)
439static inline int sci_rxd_in(struct uart_port *port)
440{
441 if (port->mapbase == 0xfffffe80)
442 return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */
443 return 1;
444}
445#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
446 defined(CONFIG_CPU_SUBTYPE_SH7751) || \
447 defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
448 defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
449 defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
450 defined(CONFIG_CPU_SUBTYPE_SH7091)
451static inline int sci_rxd_in(struct uart_port *port)
452{
453 if (port->mapbase == 0xffe00000)
454 return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
455 return 1;
456}
457#elif defined(__H8300H__) || defined(__H8300S__)
458static inline int sci_rxd_in(struct uart_port *port)
459{
460 int ch = (port->mapbase - SMR0) >> 3;
461 return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0;
462}
463#else /* default case for non-SCI processors */
464static inline int sci_rxd_in(struct uart_port *port)
465{
466 return 1;
467}
468#endif
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index c327218cad44..9af9f0879a24 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -235,7 +235,7 @@ static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port)
235 return qe_port->bd_virt + (addr - qe_port->bd_dma_addr); 235 return qe_port->bd_virt + (addr - qe_port->bd_dma_addr);
236 236
237 /* something nasty happened */ 237 /* something nasty happened */
238 printk(KERN_ERR "%s: addr=%x\n", __func__, addr); 238 printk(KERN_ERR "%s: addr=%llx\n", __func__, (u64)addr);
239 BUG(); 239 BUG();
240 return NULL; 240 return NULL;
241} 241}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 150e4f747c7d..4f1fc81112e6 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1295,8 +1295,7 @@ static int tty_driver_install_tty(struct tty_driver *driver,
1295 * 1295 *
1296 * Locking: tty_mutex for now 1296 * Locking: tty_mutex for now
1297 */ 1297 */
1298static void tty_driver_remove_tty(struct tty_driver *driver, 1298void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
1299 struct tty_struct *tty)
1300{ 1299{
1301 if (driver->ops->remove) 1300 if (driver->ops->remove)
1302 driver->ops->remove(driver, tty); 1301 driver->ops->remove(driver, tty);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 385acb895ab3..3f94ac34dce3 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -268,7 +268,7 @@ usbtmc_abort_bulk_in_status:
268 dev_err(dev, "usb_bulk_msg returned %d\n", rv); 268 dev_err(dev, "usb_bulk_msg returned %d\n", rv);
269 goto exit; 269 goto exit;
270 } 270 }
271 } while ((actual = max_size) && 271 } while ((actual == max_size) &&
272 (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN)); 272 (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
273 273
274 if (actual == max_size) { 274 if (actual == max_size) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index c962608b4b9a..26678cadfb21 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -123,10 +123,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
123 } 123 }
124 124
125 if (usb_endpoint_xfer_isoc(&ep->desc)) 125 if (usb_endpoint_xfer_isoc(&ep->desc))
126 max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) * 126 max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
127 (desc->bmAttributes + 1); 127 le16_to_cpu(ep->desc.wMaxPacketSize);
128 else if (usb_endpoint_xfer_int(&ep->desc)) 128 else if (usb_endpoint_xfer_int(&ep->desc))
129 max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1); 129 max_tx = le16_to_cpu(ep->desc.wMaxPacketSize) *
130 (desc->bMaxBurst + 1);
130 else 131 else
131 max_tx = 999999; 132 max_tx = 999999;
132 if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { 133 if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) {
@@ -134,10 +135,10 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
134 "config %d interface %d altsetting %d ep %d: " 135 "config %d interface %d altsetting %d ep %d: "
135 "setting to %d\n", 136 "setting to %d\n",
136 usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", 137 usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
137 desc->wBytesPerInterval, 138 le16_to_cpu(desc->wBytesPerInterval),
138 cfgno, inum, asnum, ep->desc.bEndpointAddress, 139 cfgno, inum, asnum, ep->desc.bEndpointAddress,
139 max_tx); 140 max_tx);
140 ep->ss_ep_comp.wBytesPerInterval = max_tx; 141 ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
141 } 142 }
142} 143}
143 144
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8669ba3fe794..73cbbd85219f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1775,6 +1775,8 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
1775 struct usb_interface *iface = usb_ifnum_to_if(udev, 1775 struct usb_interface *iface = usb_ifnum_to_if(udev,
1776 cur_alt->desc.bInterfaceNumber); 1776 cur_alt->desc.bInterfaceNumber);
1777 1777
1778 if (!iface)
1779 return -EINVAL;
1778 if (iface->resetting_device) { 1780 if (iface->resetting_device) {
1779 /* 1781 /*
1780 * The USB core just reset the device, so the xHCI host 1782 * The USB core just reset the device, so the xHCI host
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 44b6b40aafb4..5a084b9cfa3c 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -310,7 +310,7 @@ config USB_PXA_U2O
310# musb builds in ../musb along with host support 310# musb builds in ../musb along with host support
311config USB_GADGET_MUSB_HDRC 311config USB_GADGET_MUSB_HDRC
312 tristate "Inventra HDRC USB Peripheral (TI, ADI, ...)" 312 tristate "Inventra HDRC USB Peripheral (TI, ADI, ...)"
313 depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) 313 depends on USB_MUSB_HDRC
314 select USB_GADGET_DUALSPEED 314 select USB_GADGET_DUALSPEED
315 help 315 help
316 This OTG-capable silicon IP is used in dual designs including 316 This OTG-capable silicon IP is used in dual designs including
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 98cbc06c30fd..ddb118a76807 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -35,6 +35,7 @@
35#include <linux/list.h> 35#include <linux/list.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/prefetch.h>
38#include <linux/clk.h> 39#include <linux/clk.h>
39#include <linux/usb/ch9.h> 40#include <linux/usb/ch9.h>
40#include <linux/usb/gadget.h> 41#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5ef87794fd32..aef47414f5d5 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1079,10 +1079,12 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1079 cdev->desc.bMaxPacketSize0 = 1079 cdev->desc.bMaxPacketSize0 =
1080 cdev->gadget->ep0->maxpacket; 1080 cdev->gadget->ep0->maxpacket;
1081 if (gadget_is_superspeed(gadget)) { 1081 if (gadget_is_superspeed(gadget)) {
1082 if (gadget->speed >= USB_SPEED_SUPER) 1082 if (gadget->speed >= USB_SPEED_SUPER) {
1083 cdev->desc.bcdUSB = cpu_to_le16(0x0300); 1083 cdev->desc.bcdUSB = cpu_to_le16(0x0300);
1084 else 1084 cdev->desc.bMaxPacketSize0 = 9;
1085 } else {
1085 cdev->desc.bcdUSB = cpu_to_le16(0x0210); 1086 cdev->desc.bcdUSB = cpu_to_le16(0x0210);
1087 }
1086 } 1088 }
1087 1089
1088 value = min(w_length, (u16) sizeof cdev->desc); 1090 value = min(w_length, (u16) sizeof cdev->desc);
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c
index 403a48bcf560..83a266bdb40e 100644
--- a/drivers/usb/gadget/f_hid.c
+++ b/drivers/usb/gadget/f_hid.c
@@ -367,6 +367,13 @@ static int hidg_setup(struct usb_function *f,
367 case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8 367 case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
368 | USB_REQ_GET_DESCRIPTOR): 368 | USB_REQ_GET_DESCRIPTOR):
369 switch (value >> 8) { 369 switch (value >> 8) {
370 case HID_DT_HID:
371 VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
372 length = min_t(unsigned short, length,
373 hidg_desc.bLength);
374 memcpy(req->buf, &hidg_desc, length);
375 goto respond;
376 break;
370 case HID_DT_REPORT: 377 case HID_DT_REPORT:
371 VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n"); 378 VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
372 length = min_t(unsigned short, length, 379 length = min_t(unsigned short, length,
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 8f8d3f6cd89e..8f3eab1af885 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -434,6 +434,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
434 config_ep_by_speed(gadget, f, fp->out_ep)) { 434 config_ep_by_speed(gadget, f, fp->out_ep)) {
435 fp->in_ep->desc = NULL; 435 fp->in_ep->desc = NULL;
436 fp->out_ep->desc = NULL; 436 fp->out_ep->desc = NULL;
437 spin_unlock(&port->lock);
437 return -EINVAL; 438 return -EINVAL;
438 } 439 }
439 usb_ep_enable(fp->out_ep); 440 usb_ep_enable(fp->out_ep);
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index 24a924330c81..4ec888f90002 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -609,107 +609,6 @@ void fusb300_rdcxf(struct fusb300 *fusb300,
609 } 609 }
610} 610}
611 611
612#if 0
613static void fusb300_dbg_fifo(struct fusb300_ep *ep,
614 u8 entry, u16 length)
615{
616 u32 reg;
617 u32 i = 0;
618 u32 j = 0;
619
620 reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM);
621 reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) |
622 FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG);
623 reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) |
624 FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG);
625 iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM);
626
627 for (i = 0; i < (length >> 2); i++) {
628 if (i * 4 == 1024)
629 break;
630 reg = ioread32(ep->fusb300->reg +
631 FUSB300_OFFSET_BUFDBG_START + i * 4);
632 printk(KERN_DEBUG" 0x%-8x", reg);
633 j++;
634 if ((j % 4) == 0)
635 printk(KERN_DEBUG "\n");
636 }
637
638 if (length % 4) {
639 reg = ioread32(ep->fusb300->reg +
640 FUSB300_OFFSET_BUFDBG_START + i * 4);
641 printk(KERN_DEBUG " 0x%x\n", reg);
642 }
643
644 if ((j % 4) != 0)
645 printk(KERN_DEBUG "\n");
646
647 fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM,
648 FUSB300_GTM_TST_FIFO_DEG);
649}
650
651static void fusb300_cmp_dbg_fifo(struct fusb300_ep *ep,
652 u8 entry, u16 length, u8 *golden)
653{
654 u32 reg;
655 u32 i = 0;
656 u32 golden_value;
657 u8 *tmp;
658
659 tmp = golden;
660
661 printk(KERN_DEBUG "fusb300_cmp_dbg_fifo (entry %d) : start\n", entry);
662
663 reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM);
664 reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) |
665 FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG);
666 reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) |
667 FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG);
668 iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM);
669
670 for (i = 0; i < (length >> 2); i++) {
671 if (i * 4 == 1024)
672 break;
673 golden_value = *tmp | *(tmp + 1) << 8 |
674 *(tmp + 2) << 16 | *(tmp + 3) << 24;
675
676 reg = ioread32(ep->fusb300->reg +
677 FUSB300_OFFSET_BUFDBG_START + i*4);
678
679 if (reg != golden_value) {
680 printk(KERN_DEBUG "0x%x : ", (u32)(ep->fusb300->reg +
681 FUSB300_OFFSET_BUFDBG_START + i*4));
682 printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n",
683 golden_value, reg);
684 }
685 tmp += 4;
686 }
687
688 switch (length % 4) {
689 case 1:
690 golden_value = *tmp;
691 case 2:
692 golden_value = *tmp | *(tmp + 1) << 8;
693 case 3:
694 golden_value = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
695 default:
696 break;
697
698 reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4);
699 if (reg != golden_value) {
700 printk(KERN_DEBUG "0x%x:", (u32)(ep->fusb300->reg +
701 FUSB300_OFFSET_BUFDBG_START + i*4));
702 printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n",
703 golden_value, reg);
704 }
705 }
706
707 printk(KERN_DEBUG "fusb300_cmp_dbg_fifo : end\n");
708 fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM,
709 FUSB300_GTM_TST_FIFO_DEG);
710}
711#endif
712
713static void fusb300_rdfifo(struct fusb300_ep *ep, 612static void fusb300_rdfifo(struct fusb300_ep *ep,
714 struct fusb300_request *req, 613 struct fusb300_request *req,
715 u32 length) 614 u32 length)
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index 7c7b0e120d88..ab98ea926a11 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -27,13 +27,13 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/ioport.h> 29#include <linux/ioport.h>
30#include <linux/irq.h>
31#include <linux/kernel.h> 30#include <linux/kernel.h>
32#include <linux/list.h> 31#include <linux/list.h>
33#include <linux/module.h> 32#include <linux/module.h>
34#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
35#include <linux/pci.h> 34#include <linux/pci.h>
36#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36#include <linux/prefetch.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/timer.h> 39#include <linux/timer.h>
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 85c1b0d66293..8d31848aab09 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -2060,6 +2060,7 @@ static int s3c2410_udc_resume(struct platform_device *pdev)
2060static const struct platform_device_id s3c_udc_ids[] = { 2060static const struct platform_device_id s3c_udc_ids[] = {
2061 { "s3c2410-usbgadget", }, 2061 { "s3c2410-usbgadget", },
2062 { "s3c2440-usbgadget", }, 2062 { "s3c2440-usbgadget", },
2063 { }
2063}; 2064};
2064MODULE_DEVICE_TABLE(platform, s3c_udc_ids); 2065MODULE_DEVICE_TABLE(platform, s3c_udc_ids);
2065 2066
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index bf2c8f65e1ae..4c32cb19b405 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -343,7 +343,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
343 u32 temp; 343 u32 temp;
344 u32 power_okay; 344 u32 power_okay;
345 int i; 345 int i;
346 u8 resume_needed = 0; 346 unsigned long resume_needed = 0;
347 347
348 if (time_before (jiffies, ehci->next_statechange)) 348 if (time_before (jiffies, ehci->next_statechange))
349 msleep(5); 349 msleep(5);
@@ -416,7 +416,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
416 if (test_bit(i, &ehci->bus_suspended) && 416 if (test_bit(i, &ehci->bus_suspended) &&
417 (temp & PORT_SUSPEND)) { 417 (temp & PORT_SUSPEND)) {
418 temp |= PORT_RESUME; 418 temp |= PORT_RESUME;
419 resume_needed = 1; 419 set_bit(i, &resume_needed);
420 } 420 }
421 ehci_writel(ehci, temp, &ehci->regs->port_status [i]); 421 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
422 } 422 }
@@ -431,8 +431,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
431 i = HCS_N_PORTS (ehci->hcs_params); 431 i = HCS_N_PORTS (ehci->hcs_params);
432 while (i--) { 432 while (i--) {
433 temp = ehci_readl(ehci, &ehci->regs->port_status [i]); 433 temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
434 if (test_bit(i, &ehci->bus_suspended) && 434 if (test_bit(i, &resume_needed)) {
435 (temp & PORT_SUSPEND)) {
436 temp &= ~(PORT_RWC_BITS | PORT_RESUME); 435 temp &= ~(PORT_RWC_BITS | PORT_RESUME);
437 ehci_writel(ehci, temp, &ehci->regs->port_status [i]); 436 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
438 ehci_vdbg (ehci, "resumed port %d\n", i + 1); 437 ehci_vdbg (ehci, "resumed port %d\n", i + 1);
@@ -1046,7 +1045,19 @@ static int ehci_hub_control (
1046 if (!selector || selector > 5) 1045 if (!selector || selector > 5)
1047 goto error; 1046 goto error;
1048 ehci_quiesce(ehci); 1047 ehci_quiesce(ehci);
1048
1049 /* Put all enabled ports into suspend */
1050 while (ports--) {
1051 u32 __iomem *sreg =
1052 &ehci->regs->port_status[ports];
1053
1054 temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS;
1055 if (temp & PORT_PE)
1056 ehci_writel(ehci, temp | PORT_SUSPEND,
1057 sreg);
1058 }
1049 ehci_halt(ehci); 1059 ehci_halt(ehci);
1060 temp = ehci_readl(ehci, status_reg);
1050 temp |= selector << 16; 1061 temp |= selector << 16;
1051 ehci_writel(ehci, temp, status_reg); 1062 ehci_writel(ehci, temp, status_reg);
1052 break; 1063 break;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index 0c058be35a38..555a73c864b5 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -24,6 +24,7 @@
24#include <linux/usb/ulpi.h> 24#include <linux/usb/ulpi.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26 26
27#include <mach/hardware.h>
27#include <mach/mxc_ehci.h> 28#include <mach/mxc_ehci.h>
28 29
29#include <asm/mach-types.h> 30#include <asm/mach-types.h>
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 55a57c23dd0f..45240321ca09 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -98,6 +98,18 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
98 } 98 }
99} 99}
100 100
101static void disable_put_regulator(
102 struct ehci_hcd_omap_platform_data *pdata)
103{
104 int i;
105
106 for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
107 if (pdata->regulator[i]) {
108 regulator_disable(pdata->regulator[i]);
109 regulator_put(pdata->regulator[i]);
110 }
111 }
112}
101 113
102/* configure so an HC device and id are always provided */ 114/* configure so an HC device and id are always provided */
103/* always called with process context; sleeping is OK */ 115/* always called with process context; sleeping is OK */
@@ -231,9 +243,11 @@ err_add_hcd:
231 omap_usbhs_disable(dev); 243 omap_usbhs_disable(dev);
232 244
233err_enable: 245err_enable:
246 disable_put_regulator(pdata);
234 usb_put_hcd(hcd); 247 usb_put_hcd(hcd);
235 248
236err_io: 249err_io:
250 iounmap(regs);
237 return ret; 251 return ret;
238} 252}
239 253
@@ -253,6 +267,8 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
253 267
254 usb_remove_hcd(hcd); 268 usb_remove_hcd(hcd);
255 omap_usbhs_disable(dev); 269 omap_usbhs_disable(dev);
270 disable_put_regulator(dev->platform_data);
271 iounmap(hcd->regs);
256 usb_put_hcd(hcd); 272 usb_put_hcd(hcd);
257 return 0; 273 return 0;
258} 274}
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index b3958b3d3163..9e77f1c8bdbd 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -86,6 +86,7 @@ static int __devinit s5p_ehci_probe(struct platform_device *pdev)
86 goto fail_hcd; 86 goto fail_hcd;
87 } 87 }
88 88
89 s5p_ehci->hcd = hcd;
89 s5p_ehci->clk = clk_get(&pdev->dev, "usbhost"); 90 s5p_ehci->clk = clk_get(&pdev->dev, "usbhost");
90 91
91 if (IS_ERR(s5p_ehci->clk)) { 92 if (IS_ERR(s5p_ehci->clk)) {
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 55d3d5859ac5..840beda66dd9 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1583,6 +1583,9 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1583 int retval = 0; 1583 int retval = 0;
1584 1584
1585 spin_lock_irqsave(&priv->lock, spinflags); 1585 spin_lock_irqsave(&priv->lock, spinflags);
1586 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1587 if (retval)
1588 goto out;
1586 1589
1587 qh = urb->ep->hcpriv; 1590 qh = urb->ep->hcpriv;
1588 if (!qh) { 1591 if (!qh) {
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index a9d315906e3d..629a96813fd6 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -535,7 +535,7 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
535 iounmap(base); 535 iounmap(base);
536} 536}
537 537
538static const struct dmi_system_id __initconst ehci_dmi_nohandoff_table[] = { 538static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
539 { 539 {
540 /* Pegatron Lucid (ExoPC) */ 540 /* Pegatron Lucid (ExoPC) */
541 .matches = { 541 .matches = {
@@ -817,7 +817,7 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
817 817
818 /* If the BIOS owns the HC, signal that the OS wants it, and wait */ 818 /* If the BIOS owns the HC, signal that the OS wants it, and wait */
819 if (val & XHCI_HC_BIOS_OWNED) { 819 if (val & XHCI_HC_BIOS_OWNED) {
820 writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset); 820 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
821 821
822 /* Wait for 5 seconds with 10 microsecond polling interval */ 822 /* Wait for 5 seconds with 10 microsecond polling interval */
823 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, 823 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0be788cc2fdb..1e96d1f1fe6b 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -463,11 +463,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
463 && (temp & PORT_POWER)) 463 && (temp & PORT_POWER))
464 status |= USB_PORT_STAT_SUSPEND; 464 status |= USB_PORT_STAT_SUSPEND;
465 } 465 }
466 if ((temp & PORT_PLS_MASK) == XDEV_RESUME) { 466 if ((temp & PORT_PLS_MASK) == XDEV_RESUME &&
467 !DEV_SUPERSPEED(temp)) {
467 if ((temp & PORT_RESET) || !(temp & PORT_PE)) 468 if ((temp & PORT_RESET) || !(temp & PORT_PE))
468 goto error; 469 goto error;
469 if (!DEV_SUPERSPEED(temp) && time_after_eq(jiffies, 470 if (time_after_eq(jiffies,
470 bus_state->resume_done[wIndex])) { 471 bus_state->resume_done[wIndex])) {
471 xhci_dbg(xhci, "Resume USB2 port %d\n", 472 xhci_dbg(xhci, "Resume USB2 port %d\n",
472 wIndex + 1); 473 wIndex + 1);
473 bus_state->resume_done[wIndex] = 0; 474 bus_state->resume_done[wIndex] = 0;
@@ -487,6 +488,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
487 xhci_ring_device(xhci, slot_id); 488 xhci_ring_device(xhci, slot_id);
488 bus_state->port_c_suspend |= 1 << wIndex; 489 bus_state->port_c_suspend |= 1 << wIndex;
489 bus_state->suspended_ports &= ~(1 << wIndex); 490 bus_state->suspended_ports &= ~(1 << wIndex);
491 } else {
492 /*
493 * The resume has been signaling for less than
494 * 20ms. Report the port status as SUSPEND,
495 * let the usbcore check port status again
496 * and clear resume signaling later.
497 */
498 status |= USB_PORT_STAT_SUSPEND;
490 } 499 }
491 } 500 }
492 if ((temp & PORT_PLS_MASK) == XDEV_U0 501 if ((temp & PORT_PLS_MASK) == XDEV_U0
@@ -664,7 +673,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
664 xhci_dbg(xhci, "PORTSC %04x\n", temp); 673 xhci_dbg(xhci, "PORTSC %04x\n", temp);
665 if (temp & PORT_RESET) 674 if (temp & PORT_RESET)
666 goto error; 675 goto error;
667 if (temp & XDEV_U3) { 676 if ((temp & PORT_PLS_MASK) == XDEV_U3) {
668 if ((temp & PORT_PE) == 0) 677 if ((temp & PORT_PE) == 0)
669 goto error; 678 goto error;
670 679
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7113d16e2d3a..54139a2f06ce 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -514,8 +514,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
514 (unsigned long long) addr); 514 (unsigned long long) addr);
515} 515}
516 516
517/* flip_cycle means flip the cycle bit of all but the first and last TRB.
518 * (The last TRB actually points to the ring enqueue pointer, which is not part
519 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
520 */
517static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 521static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
518 struct xhci_td *cur_td) 522 struct xhci_td *cur_td, bool flip_cycle)
519{ 523{
520 struct xhci_segment *cur_seg; 524 struct xhci_segment *cur_seg;
521 union xhci_trb *cur_trb; 525 union xhci_trb *cur_trb;
@@ -528,6 +532,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
528 * leave the pointers intact. 532 * leave the pointers intact.
529 */ 533 */
530 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); 534 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
535 /* Flip the cycle bit (link TRBs can't be the first
536 * or last TRB).
537 */
538 if (flip_cycle)
539 cur_trb->generic.field[3] ^=
540 cpu_to_le32(TRB_CYCLE);
531 xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); 541 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
532 xhci_dbg(xhci, "Address = %p (0x%llx dma); " 542 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
533 "in seg %p (0x%llx dma)\n", 543 "in seg %p (0x%llx dma)\n",
@@ -541,6 +551,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
541 cur_trb->generic.field[2] = 0; 551 cur_trb->generic.field[2] = 0;
542 /* Preserve only the cycle bit of this TRB */ 552 /* Preserve only the cycle bit of this TRB */
543 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); 553 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
554 /* Flip the cycle bit except on the first or last TRB */
555 if (flip_cycle && cur_trb != cur_td->first_trb &&
556 cur_trb != cur_td->last_trb)
557 cur_trb->generic.field[3] ^=
558 cpu_to_le32(TRB_CYCLE);
544 cur_trb->generic.field[3] |= cpu_to_le32( 559 cur_trb->generic.field[3] |= cpu_to_le32(
545 TRB_TYPE(TRB_TR_NOOP)); 560 TRB_TYPE(TRB_TR_NOOP));
546 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) " 561 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
@@ -719,14 +734,14 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
719 cur_td->urb->stream_id, 734 cur_td->urb->stream_id,
720 cur_td, &deq_state); 735 cur_td, &deq_state);
721 else 736 else
722 td_to_noop(xhci, ep_ring, cur_td); 737 td_to_noop(xhci, ep_ring, cur_td, false);
723remove_finished_td: 738remove_finished_td:
724 /* 739 /*
725 * The event handler won't see a completion for this TD anymore, 740 * The event handler won't see a completion for this TD anymore,
726 * so remove it from the endpoint ring's TD list. Keep it in 741 * so remove it from the endpoint ring's TD list. Keep it in
727 * the cancelled TD list for URB completion later. 742 * the cancelled TD list for URB completion later.
728 */ 743 */
729 list_del(&cur_td->td_list); 744 list_del_init(&cur_td->td_list);
730 } 745 }
731 last_unlinked_td = cur_td; 746 last_unlinked_td = cur_td;
732 xhci_stop_watchdog_timer_in_irq(xhci, ep); 747 xhci_stop_watchdog_timer_in_irq(xhci, ep);
@@ -754,7 +769,7 @@ remove_finished_td:
754 do { 769 do {
755 cur_td = list_entry(ep->cancelled_td_list.next, 770 cur_td = list_entry(ep->cancelled_td_list.next,
756 struct xhci_td, cancelled_td_list); 771 struct xhci_td, cancelled_td_list);
757 list_del(&cur_td->cancelled_td_list); 772 list_del_init(&cur_td->cancelled_td_list);
758 773
759 /* Clean up the cancelled URB */ 774 /* Clean up the cancelled URB */
760 /* Doesn't matter what we pass for status, since the core will 775 /* Doesn't matter what we pass for status, since the core will
@@ -862,9 +877,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
862 cur_td = list_first_entry(&ring->td_list, 877 cur_td = list_first_entry(&ring->td_list,
863 struct xhci_td, 878 struct xhci_td,
864 td_list); 879 td_list);
865 list_del(&cur_td->td_list); 880 list_del_init(&cur_td->td_list);
866 if (!list_empty(&cur_td->cancelled_td_list)) 881 if (!list_empty(&cur_td->cancelled_td_list))
867 list_del(&cur_td->cancelled_td_list); 882 list_del_init(&cur_td->cancelled_td_list);
868 xhci_giveback_urb_in_irq(xhci, cur_td, 883 xhci_giveback_urb_in_irq(xhci, cur_td,
869 -ESHUTDOWN, "killed"); 884 -ESHUTDOWN, "killed");
870 } 885 }
@@ -873,7 +888,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
873 &temp_ep->cancelled_td_list, 888 &temp_ep->cancelled_td_list,
874 struct xhci_td, 889 struct xhci_td,
875 cancelled_td_list); 890 cancelled_td_list);
876 list_del(&cur_td->cancelled_td_list); 891 list_del_init(&cur_td->cancelled_td_list);
877 xhci_giveback_urb_in_irq(xhci, cur_td, 892 xhci_giveback_urb_in_irq(xhci, cur_td,
878 -ESHUTDOWN, "killed"); 893 -ESHUTDOWN, "killed");
879 } 894 }
@@ -1565,10 +1580,10 @@ td_cleanup:
1565 else 1580 else
1566 *status = 0; 1581 *status = 0;
1567 } 1582 }
1568 list_del(&td->td_list); 1583 list_del_init(&td->td_list);
1569 /* Was this TD slated to be cancelled but completed anyway? */ 1584 /* Was this TD slated to be cancelled but completed anyway? */
1570 if (!list_empty(&td->cancelled_td_list)) 1585 if (!list_empty(&td->cancelled_td_list))
1571 list_del(&td->cancelled_td_list); 1586 list_del_init(&td->cancelled_td_list);
1572 1587
1573 urb_priv->td_cnt++; 1588 urb_priv->td_cnt++;
1574 /* Giveback the urb when all the tds are completed */ 1589 /* Giveback the urb when all the tds are completed */
@@ -2500,11 +2515,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
2500 2515
2501 if (td_index == 0) { 2516 if (td_index == 0) {
2502 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); 2517 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2503 if (unlikely(ret)) { 2518 if (unlikely(ret))
2504 xhci_urb_free_priv(xhci, urb_priv);
2505 urb->hcpriv = NULL;
2506 return ret; 2519 return ret;
2507 }
2508 } 2520 }
2509 2521
2510 td->urb = urb; 2522 td->urb = urb;
@@ -2672,6 +2684,10 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
2672{ 2684{
2673 int packets_transferred; 2685 int packets_transferred;
2674 2686
2687 /* One TRB with a zero-length data packet. */
2688 if (running_total == 0 && trb_buff_len == 0)
2689 return 0;
2690
2675 /* All the TRB queueing functions don't count the current TRB in 2691 /* All the TRB queueing functions don't count the current TRB in
2676 * running_total. 2692 * running_total.
2677 */ 2693 */
@@ -3113,20 +3129,15 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3113 struct urb *urb, int i) 3129 struct urb *urb, int i)
3114{ 3130{
3115 int num_trbs = 0; 3131 int num_trbs = 0;
3116 u64 addr, td_len, running_total; 3132 u64 addr, td_len;
3117 3133
3118 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 3134 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3119 td_len = urb->iso_frame_desc[i].length; 3135 td_len = urb->iso_frame_desc[i].length;
3120 3136
3121 running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); 3137 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3122 running_total &= TRB_MAX_BUFF_SIZE - 1; 3138 TRB_MAX_BUFF_SIZE);
3123 if (running_total != 0) 3139 if (num_trbs == 0)
3124 num_trbs++;
3125
3126 while (running_total < td_len) {
3127 num_trbs++; 3140 num_trbs++;
3128 running_total += TRB_MAX_BUFF_SIZE;
3129 }
3130 3141
3131 return num_trbs; 3142 return num_trbs;
3132} 3143}
@@ -3226,6 +3237,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3226 start_trb = &ep_ring->enqueue->generic; 3237 start_trb = &ep_ring->enqueue->generic;
3227 start_cycle = ep_ring->cycle_state; 3238 start_cycle = ep_ring->cycle_state;
3228 3239
3240 urb_priv = urb->hcpriv;
3229 /* Queue the first TRB, even if it's zero-length */ 3241 /* Queue the first TRB, even if it's zero-length */
3230 for (i = 0; i < num_tds; i++) { 3242 for (i = 0; i < num_tds; i++) {
3231 unsigned int total_packet_count; 3243 unsigned int total_packet_count;
@@ -3237,9 +3249,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3237 addr = start_addr + urb->iso_frame_desc[i].offset; 3249 addr = start_addr + urb->iso_frame_desc[i].offset;
3238 td_len = urb->iso_frame_desc[i].length; 3250 td_len = urb->iso_frame_desc[i].length;
3239 td_remain_len = td_len; 3251 td_remain_len = td_len;
3240 /* FIXME: Ignoring zero-length packets, can those happen? */
3241 total_packet_count = roundup(td_len, 3252 total_packet_count = roundup(td_len,
3242 le16_to_cpu(urb->ep->desc.wMaxPacketSize)); 3253 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
3254 /* A zero-length transfer still involves at least one packet. */
3255 if (total_packet_count == 0)
3256 total_packet_count++;
3243 burst_count = xhci_get_burst_count(xhci, urb->dev, urb, 3257 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3244 total_packet_count); 3258 total_packet_count);
3245 residue = xhci_get_last_burst_packet_count(xhci, 3259 residue = xhci_get_last_burst_packet_count(xhci,
@@ -3249,12 +3263,13 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3249 3263
3250 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 3264 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3251 urb->stream_id, trbs_per_td, urb, i, mem_flags); 3265 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3252 if (ret < 0) 3266 if (ret < 0) {
3253 return ret; 3267 if (i == 0)
3268 return ret;
3269 goto cleanup;
3270 }
3254 3271
3255 urb_priv = urb->hcpriv;
3256 td = urb_priv->td[i]; 3272 td = urb_priv->td[i];
3257
3258 for (j = 0; j < trbs_per_td; j++) { 3273 for (j = 0; j < trbs_per_td; j++) {
3259 u32 remainder = 0; 3274 u32 remainder = 0;
3260 field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3275 field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
@@ -3344,6 +3359,27 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3344 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3359 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3345 start_cycle, start_trb); 3360 start_cycle, start_trb);
3346 return 0; 3361 return 0;
3362cleanup:
3363 /* Clean up a partially enqueued isoc transfer. */
3364
3365 for (i--; i >= 0; i--)
3366 list_del_init(&urb_priv->td[i]->td_list);
3367
3368 /* Use the first TD as a temporary variable to turn the TDs we've queued
3369 * into No-ops with a software-owned cycle bit. That way the hardware
3370 * won't accidentally start executing bogus TDs when we partially
3371 * overwrite them. td->first_trb and td->start_seg are already set.
3372 */
3373 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3374 /* Every TRB except the first & last will have its cycle bit flipped. */
3375 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3376
3377 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3378 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3379 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3380 ep_ring->cycle_state = start_cycle;
3381 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3382 return ret;
3347} 3383}
3348 3384
3349/* 3385/*
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 763f484bc092..3a0f695138f4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -345,7 +345,8 @@ static void xhci_event_ring_work(unsigned long arg)
345 spin_lock_irqsave(&xhci->lock, flags); 345 spin_lock_irqsave(&xhci->lock, flags);
346 temp = xhci_readl(xhci, &xhci->op_regs->status); 346 temp = xhci_readl(xhci, &xhci->op_regs->status);
347 xhci_dbg(xhci, "op reg status = 0x%x\n", temp); 347 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
348 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { 348 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
349 (xhci->xhc_state & XHCI_STATE_HALTED)) {
349 xhci_dbg(xhci, "HW died, polling stopped.\n"); 350 xhci_dbg(xhci, "HW died, polling stopped.\n");
350 spin_unlock_irqrestore(&xhci->lock, flags); 351 spin_unlock_irqrestore(&xhci->lock, flags);
351 return; 352 return;
@@ -939,8 +940,11 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
939 return 0; 940 return 0;
940 } 941 }
941 942
943 xhci = hcd_to_xhci(hcd);
944 if (xhci->xhc_state & XHCI_STATE_HALTED)
945 return -ENODEV;
946
942 if (check_virt_dev) { 947 if (check_virt_dev) {
943 xhci = hcd_to_xhci(hcd);
944 if (!udev->slot_id || !xhci->devs 948 if (!udev->slot_id || !xhci->devs
945 || !xhci->devs[udev->slot_id]) { 949 || !xhci->devs[udev->slot_id]) {
946 printk(KERN_DEBUG "xHCI %s called with unaddressed " 950 printk(KERN_DEBUG "xHCI %s called with unaddressed "
@@ -1081,8 +1085,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1081 if (urb->dev->speed == USB_SPEED_FULL) { 1085 if (urb->dev->speed == USB_SPEED_FULL) {
1082 ret = xhci_check_maxpacket(xhci, slot_id, 1086 ret = xhci_check_maxpacket(xhci, slot_id,
1083 ep_index, urb); 1087 ep_index, urb);
1084 if (ret < 0) 1088 if (ret < 0) {
1089 xhci_urb_free_priv(xhci, urb_priv);
1090 urb->hcpriv = NULL;
1085 return ret; 1091 return ret;
1092 }
1086 } 1093 }
1087 1094
1088 /* We have a spinlock and interrupts disabled, so we must pass 1095 /* We have a spinlock and interrupts disabled, so we must pass
@@ -1093,6 +1100,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1093 goto dying; 1100 goto dying;
1094 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1101 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1095 slot_id, ep_index); 1102 slot_id, ep_index);
1103 if (ret)
1104 goto free_priv;
1096 spin_unlock_irqrestore(&xhci->lock, flags); 1105 spin_unlock_irqrestore(&xhci->lock, flags);
1097 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1106 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1098 spin_lock_irqsave(&xhci->lock, flags); 1107 spin_lock_irqsave(&xhci->lock, flags);
@@ -1113,6 +1122,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1113 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1122 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1114 slot_id, ep_index); 1123 slot_id, ep_index);
1115 } 1124 }
1125 if (ret)
1126 goto free_priv;
1116 spin_unlock_irqrestore(&xhci->lock, flags); 1127 spin_unlock_irqrestore(&xhci->lock, flags);
1117 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1128 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1118 spin_lock_irqsave(&xhci->lock, flags); 1129 spin_lock_irqsave(&xhci->lock, flags);
@@ -1120,6 +1131,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1120 goto dying; 1131 goto dying;
1121 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1132 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1122 slot_id, ep_index); 1133 slot_id, ep_index);
1134 if (ret)
1135 goto free_priv;
1123 spin_unlock_irqrestore(&xhci->lock, flags); 1136 spin_unlock_irqrestore(&xhci->lock, flags);
1124 } else { 1137 } else {
1125 spin_lock_irqsave(&xhci->lock, flags); 1138 spin_lock_irqsave(&xhci->lock, flags);
@@ -1127,18 +1140,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1127 goto dying; 1140 goto dying;
1128 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1141 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1129 slot_id, ep_index); 1142 slot_id, ep_index);
1143 if (ret)
1144 goto free_priv;
1130 spin_unlock_irqrestore(&xhci->lock, flags); 1145 spin_unlock_irqrestore(&xhci->lock, flags);
1131 } 1146 }
1132exit: 1147exit:
1133 return ret; 1148 return ret;
1134dying: 1149dying:
1135 xhci_urb_free_priv(xhci, urb_priv);
1136 urb->hcpriv = NULL;
1137 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1150 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1138 "non-responsive xHCI host.\n", 1151 "non-responsive xHCI host.\n",
1139 urb->ep->desc.bEndpointAddress, urb); 1152 urb->ep->desc.bEndpointAddress, urb);
1153 ret = -ESHUTDOWN;
1154free_priv:
1155 xhci_urb_free_priv(xhci, urb_priv);
1156 urb->hcpriv = NULL;
1140 spin_unlock_irqrestore(&xhci->lock, flags); 1157 spin_unlock_irqrestore(&xhci->lock, flags);
1141 return -ESHUTDOWN; 1158 return ret;
1142} 1159}
1143 1160
1144/* Get the right ring for the given URB. 1161/* Get the right ring for the given URB.
@@ -1235,6 +1252,13 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1235 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1252 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1236 xhci_dbg(xhci, "HW died, freeing TD.\n"); 1253 xhci_dbg(xhci, "HW died, freeing TD.\n");
1237 urb_priv = urb->hcpriv; 1254 urb_priv = urb->hcpriv;
1255 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1256 td = urb_priv->td[i];
1257 if (!list_empty(&td->td_list))
1258 list_del_init(&td->td_list);
1259 if (!list_empty(&td->cancelled_td_list))
1260 list_del_init(&td->cancelled_td_list);
1261 }
1238 1262
1239 usb_hcd_unlink_urb_from_ep(hcd, urb); 1263 usb_hcd_unlink_urb_from_ep(hcd, urb);
1240 spin_unlock_irqrestore(&xhci->lock, flags); 1264 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1242,7 +1266,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1242 xhci_urb_free_priv(xhci, urb_priv); 1266 xhci_urb_free_priv(xhci, urb_priv);
1243 return ret; 1267 return ret;
1244 } 1268 }
1245 if (xhci->xhc_state & XHCI_STATE_DYING) { 1269 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1270 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1246 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " 1271 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1247 "non-responsive xHCI host.\n", 1272 "non-responsive xHCI host.\n",
1248 urb->ep->desc.bEndpointAddress, urb); 1273 urb->ep->desc.bEndpointAddress, urb);
@@ -2665,7 +2690,10 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2665 int i, ret; 2690 int i, ret;
2666 2691
2667 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2692 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2668 if (ret <= 0) 2693 /* If the host is halted due to driver unload, we still need to free the
2694 * device.
2695 */
2696 if (ret <= 0 && ret != -ENODEV)
2669 return; 2697 return;
2670 2698
2671 virt_dev = xhci->devs[udev->slot_id]; 2699 virt_dev = xhci->devs[udev->slot_id];
@@ -2679,7 +2707,8 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2679 spin_lock_irqsave(&xhci->lock, flags); 2707 spin_lock_irqsave(&xhci->lock, flags);
2680 /* Don't disable the slot if the host controller is dead. */ 2708 /* Don't disable the slot if the host controller is dead. */
2681 state = xhci_readl(xhci, &xhci->op_regs->status); 2709 state = xhci_readl(xhci, &xhci->op_regs->status);
2682 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { 2710 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
2711 (xhci->xhc_state & XHCI_STATE_HALTED)) {
2683 xhci_free_virt_device(xhci, udev->slot_id); 2712 xhci_free_virt_device(xhci, udev->slot_id);
2684 spin_unlock_irqrestore(&xhci->lock, flags); 2713 spin_unlock_irqrestore(&xhci->lock, flags);
2685 return; 2714 return;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 6192b45959f4..fc34b8b11910 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -3,9 +3,6 @@
3# for silicon based on Mentor Graphics INVENTRA designs 3# for silicon based on Mentor Graphics INVENTRA designs
4# 4#
5 5
6comment "Enable Host or Gadget support to see Inventra options"
7 depends on !USB && USB_GADGET=n
8
9# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller 6# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
10config USB_MUSB_HDRC 7config USB_MUSB_HDRC
11 depends on USB && USB_GADGET 8 depends on USB && USB_GADGET
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ae8c39617743..5e7cfba5b079 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -17,6 +17,7 @@
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/prefetch.h>
20 21
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
22 23
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 149f3f310a0a..318fb4e8a885 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -226,8 +226,10 @@ static int cppi_controller_stop(struct dma_controller *c)
226 struct cppi *controller; 226 struct cppi *controller;
227 void __iomem *tibase; 227 void __iomem *tibase;
228 int i; 228 int i;
229 struct musb *musb;
229 230
230 controller = container_of(c, struct cppi, controller); 231 controller = container_of(c, struct cppi, controller);
232 musb = controller->musb;
231 233
232 tibase = controller->tibase; 234 tibase = controller->tibase;
233 /* DISABLE INDIVIDUAL CHANNEL Interrupts */ 235 /* DISABLE INDIVIDUAL CHANNEL Interrupts */
@@ -289,9 +291,11 @@ cppi_channel_allocate(struct dma_controller *c,
289 u8 index; 291 u8 index;
290 struct cppi_channel *cppi_ch; 292 struct cppi_channel *cppi_ch;
291 void __iomem *tibase; 293 void __iomem *tibase;
294 struct musb *musb;
292 295
293 controller = container_of(c, struct cppi, controller); 296 controller = container_of(c, struct cppi, controller);
294 tibase = controller->tibase; 297 tibase = controller->tibase;
298 musb = controller->musb;
295 299
296 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ 300 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
297 index = ep->epnum - 1; 301 index = ep->epnum - 1;
@@ -339,7 +343,8 @@ static void cppi_channel_release(struct dma_channel *channel)
339 c = container_of(channel, struct cppi_channel, channel); 343 c = container_of(channel, struct cppi_channel, channel);
340 tibase = c->controller->tibase; 344 tibase = c->controller->tibase;
341 if (!c->hw_ep) 345 if (!c->hw_ep)
342 dev_dbg(musb->controller, "releasing idle DMA channel %p\n", c); 346 dev_dbg(c->controller->musb->controller,
347 "releasing idle DMA channel %p\n", c);
343 else if (!c->transmit) 348 else if (!c->transmit)
344 core_rxirq_enable(tibase, c->index + 1); 349 core_rxirq_enable(tibase, c->index + 1);
345 350
@@ -357,10 +362,11 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
357 362
358 musb_ep_select(base, c->index + 1); 363 musb_ep_select(base, c->index + 1);
359 364
360 DBG(level, "RX DMA%d%s: %d left, csr %04x, " 365 dev_dbg(c->controller->musb->controller,
361 "%08x H%08x S%08x C%08x, " 366 "RX DMA%d%s: %d left, csr %04x, "
362 "B%08x L%08x %08x .. %08x" 367 "%08x H%08x S%08x C%08x, "
363 "\n", 368 "B%08x L%08x %08x .. %08x"
369 "\n",
364 c->index, tag, 370 c->index, tag,
365 musb_readl(c->controller->tibase, 371 musb_readl(c->controller->tibase,
366 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), 372 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
@@ -387,10 +393,11 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
387 393
388 musb_ep_select(base, c->index + 1); 394 musb_ep_select(base, c->index + 1);
389 395
390 DBG(level, "TX DMA%d%s: csr %04x, " 396 dev_dbg(c->controller->musb->controller,
391 "H%08x S%08x C%08x %08x, " 397 "TX DMA%d%s: csr %04x, "
392 "F%08x L%08x .. %08x" 398 "H%08x S%08x C%08x %08x, "
393 "\n", 399 "F%08x L%08x .. %08x"
400 "\n",
394 c->index, tag, 401 c->index, tag,
395 musb_readw(c->hw_ep->regs, MUSB_TXCSR), 402 musb_readw(c->hw_ep->regs, MUSB_TXCSR),
396 403
@@ -1022,6 +1029,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1022 int i; 1029 int i;
1023 dma_addr_t safe2ack; 1030 dma_addr_t safe2ack;
1024 void __iomem *regs = rx->hw_ep->regs; 1031 void __iomem *regs = rx->hw_ep->regs;
1032 struct musb *musb = cppi->musb;
1025 1033
1026 cppi_dump_rx(6, rx, "/K"); 1034 cppi_dump_rx(6, rx, "/K");
1027 1035
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 668eeef601ae..b3c065ab9dbc 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -172,7 +172,8 @@ enum musb_g_ep0_state {
172#endif 172#endif
173 173
174/* TUSB mapping: "flat" plus ep0 special cases */ 174/* TUSB mapping: "flat" plus ep0 special cases */
175#if defined(CONFIG_USB_MUSB_TUSB6010) 175#if defined(CONFIG_USB_MUSB_TUSB6010) || \
176 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
176#define musb_ep_select(_mbase, _epnum) \ 177#define musb_ep_select(_mbase, _epnum) \
177 musb_writeb((_mbase), MUSB_INDEX, (_epnum)) 178 musb_writeb((_mbase), MUSB_INDEX, (_epnum))
178#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET 179#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
@@ -241,7 +242,8 @@ struct musb_hw_ep {
241 void __iomem *fifo; 242 void __iomem *fifo;
242 void __iomem *regs; 243 void __iomem *regs;
243 244
244#ifdef CONFIG_USB_MUSB_TUSB6010 245#if defined(CONFIG_USB_MUSB_TUSB6010) || \
246 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
245 void __iomem *conf; 247 void __iomem *conf;
246#endif 248#endif
247 249
@@ -258,7 +260,8 @@ struct musb_hw_ep {
258 struct dma_channel *tx_channel; 260 struct dma_channel *tx_channel;
259 struct dma_channel *rx_channel; 261 struct dma_channel *rx_channel;
260 262
261#ifdef CONFIG_USB_MUSB_TUSB6010 263#if defined(CONFIG_USB_MUSB_TUSB6010) || \
264 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
262 /* TUSB has "asynchronous" and "synchronous" dma modes */ 265 /* TUSB has "asynchronous" and "synchronous" dma modes */
263 dma_addr_t fifo_async; 266 dma_addr_t fifo_async;
264 dma_addr_t fifo_sync; 267 dma_addr_t fifo_sync;
@@ -356,7 +359,8 @@ struct musb {
356 void __iomem *ctrl_base; 359 void __iomem *ctrl_base;
357 void __iomem *mregs; 360 void __iomem *mregs;
358 361
359#ifdef CONFIG_USB_MUSB_TUSB6010 362#if defined(CONFIG_USB_MUSB_TUSB6010) || \
363 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
360 dma_addr_t async; 364 dma_addr_t async;
361 dma_addr_t sync; 365 dma_addr_t sync;
362 void __iomem *sync_va; 366 void __iomem *sync_va;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index b67a062f556b..e81820370d6f 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1698,6 +1698,8 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1698 1698
1699 is_on = !!is_on; 1699 is_on = !!is_on;
1700 1700
1701 pm_runtime_get_sync(musb->controller);
1702
1701 /* NOTE: this assumes we are sensing vbus; we'd rather 1703 /* NOTE: this assumes we are sensing vbus; we'd rather
1702 * not pullup unless the B-session is active. 1704 * not pullup unless the B-session is active.
1703 */ 1705 */
@@ -1707,6 +1709,9 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1707 musb_pullup(musb, is_on); 1709 musb_pullup(musb, is_on);
1708 } 1710 }
1709 spin_unlock_irqrestore(&musb->lock, flags); 1711 spin_unlock_irqrestore(&musb->lock, flags);
1712
1713 pm_runtime_put(musb->controller);
1714
1710 return 0; 1715 return 0;
1711} 1716}
1712 1717
@@ -1851,6 +1856,7 @@ int __init musb_gadget_setup(struct musb *musb)
1851 1856
1852 return 0; 1857 return 0;
1853err: 1858err:
1859 musb->g.dev.parent = NULL;
1854 device_unregister(&musb->g.dev); 1860 device_unregister(&musb->g.dev);
1855 return status; 1861 return status;
1856} 1862}
@@ -1858,7 +1864,8 @@ err:
1858void musb_gadget_cleanup(struct musb *musb) 1864void musb_gadget_cleanup(struct musb *musb)
1859{ 1865{
1860 usb_del_gadget_udc(&musb->g); 1866 usb_del_gadget_udc(&musb->g);
1861 device_unregister(&musb->g.dev); 1867 if (musb->g.dev.parent)
1868 device_unregister(&musb->g.dev);
1862} 1869}
1863 1870
1864/* 1871/*
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 82410703dcd3..03f2655af290 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -234,7 +234,8 @@
234#define MUSB_TESTMODE 0x0F /* 8 bit */ 234#define MUSB_TESTMODE 0x0F /* 8 bit */
235 235
236/* Get offset for a given FIFO from musb->mregs */ 236/* Get offset for a given FIFO from musb->mregs */
237#ifdef CONFIG_USB_MUSB_TUSB6010 237#if defined(CONFIG_USB_MUSB_TUSB6010) || \
238 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
238#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) 239#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
239#else 240#else
240#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) 241#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
@@ -295,7 +296,8 @@
295#define MUSB_FLAT_OFFSET(_epnum, _offset) \ 296#define MUSB_FLAT_OFFSET(_epnum, _offset) \
296 (0x100 + (0x10*(_epnum)) + (_offset)) 297 (0x100 + (0x10*(_epnum)) + (_offset))
297 298
298#ifdef CONFIG_USB_MUSB_TUSB6010 299#if defined(CONFIG_USB_MUSB_TUSB6010) || \
300 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
299/* TUSB6010 EP0 configuration register is special */ 301/* TUSB6010 EP0 configuration register is special */
300#define MUSB_TUSB_OFFSET(_epnum, _offset) \ 302#define MUSB_TUSB_OFFSET(_epnum, _offset) \
301 (0x10 + _offset) 303 (0x10 + _offset)
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 9eec41fbf3a4..ec1480191f78 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/prefetch.h>
21#include <linux/usb.h> 22#include <linux/usb.h>
22#include <linux/irq.h> 23#include <linux/irq.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index c784e6c03aac..b67b4bc596c1 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -20,6 +20,7 @@
20#include <plat/mux.h> 20#include <plat/mux.h>
21 21
22#include "musb_core.h" 22#include "musb_core.h"
23#include "tusb6010.h"
23 24
24#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) 25#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
25 26
@@ -89,7 +90,7 @@ static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
89 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); 90 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
90 91
91 if (reg != 0) { 92 if (reg != 0) {
92 dev_dbg(musb->controller, "ep%i dmareq0 is busy for ep%i\n", 93 dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n",
93 chdat->epnum, reg & 0xf); 94 chdat->epnum, reg & 0xf);
94 return -EAGAIN; 95 return -EAGAIN;
95 } 96 }
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index cecace411832..ef4333f4bbe0 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -65,7 +65,8 @@ static void ux500_tx_work(struct work_struct *data)
65 struct musb *musb = hw_ep->musb; 65 struct musb *musb = hw_ep->musb;
66 unsigned long flags; 66 unsigned long flags;
67 67
68 DBG(4, "DMA tx transfer done on hw_ep=%d\n", hw_ep->epnum); 68 dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n",
69 hw_ep->epnum);
69 70
70 spin_lock_irqsave(&musb->lock, flags); 71 spin_lock_irqsave(&musb->lock, flags);
71 ux500_channel->channel.actual_len = ux500_channel->cur_len; 72 ux500_channel->channel.actual_len = ux500_channel->cur_len;
@@ -84,7 +85,8 @@ static void ux500_rx_work(struct work_struct *data)
84 struct musb *musb = hw_ep->musb; 85 struct musb *musb = hw_ep->musb;
85 unsigned long flags; 86 unsigned long flags;
86 87
87 DBG(4, "DMA rx transfer done on hw_ep=%d\n", hw_ep->epnum); 88 dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n",
89 hw_ep->epnum);
88 90
89 spin_lock_irqsave(&musb->lock, flags); 91 spin_lock_irqsave(&musb->lock, flags);
90 ux500_channel->channel.actual_len = ux500_channel->cur_len; 92 ux500_channel->channel.actual_len = ux500_channel->cur_len;
@@ -116,9 +118,11 @@ static bool ux500_configure_channel(struct dma_channel *channel,
116 enum dma_slave_buswidth addr_width; 118 enum dma_slave_buswidth addr_width;
117 dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) + 119 dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) +
118 ux500_channel->controller->phy_base); 120 ux500_channel->controller->phy_base);
121 struct musb *musb = ux500_channel->controller->private_data;
119 122
120 DBG(4, "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n", 123 dev_dbg(musb->controller,
121 packet_sz, mode, dma_addr, len, ux500_channel->is_tx); 124 "packet_sz=%d, mode=%d, dma_addr=0x%x, len=%d is_tx=%d\n",
125 packet_sz, mode, dma_addr, len, ux500_channel->is_tx);
122 126
123 ux500_channel->cur_len = len; 127 ux500_channel->cur_len = len;
124 128
@@ -133,15 +137,13 @@ static bool ux500_configure_channel(struct dma_channel *channel,
133 DMA_SLAVE_BUSWIDTH_4_BYTES; 137 DMA_SLAVE_BUSWIDTH_4_BYTES;
134 138
135 slave_conf.direction = direction; 139 slave_conf.direction = direction;
136 if (direction == DMA_FROM_DEVICE) { 140 slave_conf.src_addr = usb_fifo_addr;
137 slave_conf.src_addr = usb_fifo_addr; 141 slave_conf.src_addr_width = addr_width;
138 slave_conf.src_addr_width = addr_width; 142 slave_conf.src_maxburst = 16;
139 slave_conf.src_maxburst = 16; 143 slave_conf.dst_addr = usb_fifo_addr;
140 } else { 144 slave_conf.dst_addr_width = addr_width;
141 slave_conf.dst_addr = usb_fifo_addr; 145 slave_conf.dst_maxburst = 16;
142 slave_conf.dst_addr_width = addr_width; 146
143 slave_conf.dst_maxburst = 16;
144 }
145 dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG, 147 dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,
146 (unsigned long) &slave_conf); 148 (unsigned long) &slave_conf);
147 149
@@ -166,6 +168,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
166 struct ux500_dma_controller *controller = container_of(c, 168 struct ux500_dma_controller *controller = container_of(c,
167 struct ux500_dma_controller, controller); 169 struct ux500_dma_controller, controller);
168 struct ux500_dma_channel *ux500_channel = NULL; 170 struct ux500_dma_channel *ux500_channel = NULL;
171 struct musb *musb = controller->private_data;
169 u8 ch_num = hw_ep->epnum - 1; 172 u8 ch_num = hw_ep->epnum - 1;
170 u32 max_ch; 173 u32 max_ch;
171 174
@@ -192,7 +195,7 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
192 ux500_channel->hw_ep = hw_ep; 195 ux500_channel->hw_ep = hw_ep;
193 ux500_channel->is_allocated = 1; 196 ux500_channel->is_allocated = 1;
194 197
195 DBG(7, "hw_ep=%d, is_tx=0x%x, channel=%d\n", 198 dev_dbg(musb->controller, "hw_ep=%d, is_tx=0x%x, channel=%d\n",
196 hw_ep->epnum, is_tx, ch_num); 199 hw_ep->epnum, is_tx, ch_num);
197 200
198 return &(ux500_channel->channel); 201 return &(ux500_channel->channel);
@@ -201,8 +204,9 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
201static void ux500_dma_channel_release(struct dma_channel *channel) 204static void ux500_dma_channel_release(struct dma_channel *channel)
202{ 205{
203 struct ux500_dma_channel *ux500_channel = channel->private_data; 206 struct ux500_dma_channel *ux500_channel = channel->private_data;
207 struct musb *musb = ux500_channel->controller->private_data;
204 208
205 DBG(7, "channel=%d\n", ux500_channel->ch_num); 209 dev_dbg(musb->controller, "channel=%d\n", ux500_channel->ch_num);
206 210
207 if (ux500_channel->is_allocated) { 211 if (ux500_channel->is_allocated) {
208 ux500_channel->is_allocated = 0; 212 ux500_channel->is_allocated = 0;
@@ -252,8 +256,8 @@ static int ux500_dma_channel_abort(struct dma_channel *channel)
252 void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs; 256 void __iomem *epio = musb->endpoints[ux500_channel->hw_ep->epnum].regs;
253 u16 csr; 257 u16 csr;
254 258
255 DBG(4, "channel=%d, is_tx=%d\n", ux500_channel->ch_num, 259 dev_dbg(musb->controller, "channel=%d, is_tx=%d\n",
256 ux500_channel->is_tx); 260 ux500_channel->ch_num, ux500_channel->is_tx);
257 261
258 if (channel->status == MUSB_DMA_STATUS_BUSY) { 262 if (channel->status == MUSB_DMA_STATUS_BUSY) {
259 if (ux500_channel->is_tx) { 263 if (ux500_channel->is_tx) {
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index ba79dbf5adbc..cb2d451d511e 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -14,6 +14,7 @@
14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
15 * 15 *
16 */ 16 */
17#include <linux/dma-mapping.h>
17#include <linux/io.h> 18#include <linux/io.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
@@ -76,7 +77,7 @@ struct usbhsg_recip_handle {
76 struct usbhsg_gpriv, mod) 77 struct usbhsg_gpriv, mod)
77 78
78#define __usbhsg_for_each_uep(start, pos, g, i) \ 79#define __usbhsg_for_each_uep(start, pos, g, i) \
79 for (i = start, pos = (g)->uep; \ 80 for (i = start, pos = (g)->uep + i; \
80 i < (g)->uep_size; \ 81 i < (g)->uep_size; \
81 i++, pos = (g)->uep + i) 82 i++, pos = (g)->uep + i)
82 83
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 2e06b90aa1f8..5fc13e717911 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -101,6 +101,7 @@ static int ftdi_jtag_probe(struct usb_serial *serial);
101static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); 101static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
102static int ftdi_NDI_device_setup(struct usb_serial *serial); 102static int ftdi_NDI_device_setup(struct usb_serial *serial);
103static int ftdi_stmclite_probe(struct usb_serial *serial); 103static int ftdi_stmclite_probe(struct usb_serial *serial);
104static int ftdi_8u2232c_probe(struct usb_serial *serial);
104static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); 105static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
105static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); 106static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
106 107
@@ -128,6 +129,10 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
128 .probe = ftdi_stmclite_probe, 129 .probe = ftdi_stmclite_probe,
129}; 130};
130 131
132static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
133 .probe = ftdi_8u2232c_probe,
134};
135
131/* 136/*
132 * The 8U232AM has the same API as the sio except for: 137 * The 8U232AM has the same API as the sio except for:
133 * - it can support MUCH higher baudrates; up to: 138 * - it can support MUCH higher baudrates; up to:
@@ -151,6 +156,7 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
151 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! 156 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
152 */ 157 */
153static struct usb_device_id id_table_combined [] = { 158static struct usb_device_id id_table_combined [] = {
159 { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
154 { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, 160 { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
155 { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, 161 { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
156 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 162 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
@@ -177,7 +183,8 @@ static struct usb_device_id id_table_combined [] = {
177 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) }, 183 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) },
178 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, 184 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },
179 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, 185 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
180 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) }, 186 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) ,
187 .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk },
181 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, 188 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
182 { USB_DEVICE(FTDI_VID, FTDI_232H_PID) }, 189 { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
183 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, 190 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
@@ -1171,7 +1178,7 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
1171 case FT2232H: /* FT2232H chip */ 1178 case FT2232H: /* FT2232H chip */
1172 case FT4232H: /* FT4232H chip */ 1179 case FT4232H: /* FT4232H chip */
1173 case FT232H: /* FT232H chip */ 1180 case FT232H: /* FT232H chip */
1174 if ((baud <= 12000000) & (baud >= 1200)) { 1181 if ((baud <= 12000000) && (baud >= 1200)) {
1175 div_value = ftdi_2232h_baud_to_divisor(baud); 1182 div_value = ftdi_2232h_baud_to_divisor(baud);
1176 } else if (baud < 1200) { 1183 } else if (baud < 1200) {
1177 div_value = ftdi_232bm_baud_to_divisor(baud); 1184 div_value = ftdi_232bm_baud_to_divisor(baud);
@@ -1205,7 +1212,10 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
1205 urb_index_value = get_ftdi_divisor(tty, port); 1212 urb_index_value = get_ftdi_divisor(tty, port);
1206 urb_value = (__u16)urb_index_value; 1213 urb_value = (__u16)urb_index_value;
1207 urb_index = (__u16)(urb_index_value >> 16); 1214 urb_index = (__u16)(urb_index_value >> 16);
1208 if (priv->interface) { /* FT2232C */ 1215 if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) ||
1216 (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) {
1217 /* Probably the BM type needs the MSB of the encoded fractional
1218 * divider also moved like for the chips above. Any infos? */
1209 urb_index = (__u16)((urb_index << 8) | priv->interface); 1219 urb_index = (__u16)((urb_index << 8) | priv->interface);
1210 } 1220 }
1211 1221
@@ -1733,6 +1743,18 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
1733 return 0; 1743 return 0;
1734} 1744}
1735 1745
1746static int ftdi_8u2232c_probe(struct usb_serial *serial)
1747{
1748 struct usb_device *udev = serial->dev;
1749
1750 dbg("%s", __func__);
1751
1752 if (strcmp(udev->manufacturer, "CALAO Systems") == 0)
1753 return ftdi_jtag_probe(serial);
1754
1755 return 0;
1756}
1757
1736/* 1758/*
1737 * First and second port on STMCLiteadaptors is reserved for JTAG interface 1759 * First and second port on STMCLiteadaptors is reserved for JTAG interface
1738 * and the forth port for pio 1760 * and the forth port for pio
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 19156d1049fe..bf5227ad3ef7 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1159,4 +1159,8 @@
1159/* USB-Nano-485*/ 1159/* USB-Nano-485*/
1160#define FTDI_CTI_NANO_PID 0xF60B 1160#define FTDI_CTI_NANO_PID 0xF60B
1161 1161
1162 1162/*
1163 * ZeitControl cardsystems GmbH rfid-readers http://zeitconrol.de
1164 */
1165/* TagTracer MIFARE*/
1166#define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 60b25d8ea0e2..fe22e90bc879 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -148,6 +148,12 @@ static void option_instat_callback(struct urb *urb);
148#define HUAWEI_PRODUCT_K4505 0x1464 148#define HUAWEI_PRODUCT_K4505 0x1464
149#define HUAWEI_PRODUCT_K3765 0x1465 149#define HUAWEI_PRODUCT_K3765 0x1465
150#define HUAWEI_PRODUCT_E14AC 0x14AC 150#define HUAWEI_PRODUCT_E14AC 0x14AC
151#define HUAWEI_PRODUCT_K3806 0x14AE
152#define HUAWEI_PRODUCT_K4605 0x14C6
153#define HUAWEI_PRODUCT_K3770 0x14C9
154#define HUAWEI_PRODUCT_K3771 0x14CA
155#define HUAWEI_PRODUCT_K4510 0x14CB
156#define HUAWEI_PRODUCT_K4511 0x14CC
151#define HUAWEI_PRODUCT_ETS1220 0x1803 157#define HUAWEI_PRODUCT_ETS1220 0x1803
152#define HUAWEI_PRODUCT_E353 0x1506 158#define HUAWEI_PRODUCT_E353 0x1506
153 159
@@ -412,6 +418,56 @@ static void option_instat_callback(struct urb *urb);
412#define SAMSUNG_VENDOR_ID 0x04e8 418#define SAMSUNG_VENDOR_ID 0x04e8
413#define SAMSUNG_PRODUCT_GT_B3730 0x6889 419#define SAMSUNG_PRODUCT_GT_B3730 0x6889
414 420
421/* YUGA products www.yuga-info.com*/
422#define YUGA_VENDOR_ID 0x257A
423#define YUGA_PRODUCT_CEM600 0x1601
424#define YUGA_PRODUCT_CEM610 0x1602
425#define YUGA_PRODUCT_CEM500 0x1603
426#define YUGA_PRODUCT_CEM510 0x1604
427#define YUGA_PRODUCT_CEM800 0x1605
428#define YUGA_PRODUCT_CEM900 0x1606
429
430#define YUGA_PRODUCT_CEU818 0x1607
431#define YUGA_PRODUCT_CEU816 0x1608
432#define YUGA_PRODUCT_CEU828 0x1609
433#define YUGA_PRODUCT_CEU826 0x160A
434#define YUGA_PRODUCT_CEU518 0x160B
435#define YUGA_PRODUCT_CEU516 0x160C
436#define YUGA_PRODUCT_CEU528 0x160D
437#define YUGA_PRODUCT_CEU526 0x160F
438
439#define YUGA_PRODUCT_CWM600 0x2601
440#define YUGA_PRODUCT_CWM610 0x2602
441#define YUGA_PRODUCT_CWM500 0x2603
442#define YUGA_PRODUCT_CWM510 0x2604
443#define YUGA_PRODUCT_CWM800 0x2605
444#define YUGA_PRODUCT_CWM900 0x2606
445
446#define YUGA_PRODUCT_CWU718 0x2607
447#define YUGA_PRODUCT_CWU716 0x2608
448#define YUGA_PRODUCT_CWU728 0x2609
449#define YUGA_PRODUCT_CWU726 0x260A
450#define YUGA_PRODUCT_CWU518 0x260B
451#define YUGA_PRODUCT_CWU516 0x260C
452#define YUGA_PRODUCT_CWU528 0x260D
453#define YUGA_PRODUCT_CWU526 0x260F
454
455#define YUGA_PRODUCT_CLM600 0x2601
456#define YUGA_PRODUCT_CLM610 0x2602
457#define YUGA_PRODUCT_CLM500 0x2603
458#define YUGA_PRODUCT_CLM510 0x2604
459#define YUGA_PRODUCT_CLM800 0x2605
460#define YUGA_PRODUCT_CLM900 0x2606
461
462#define YUGA_PRODUCT_CLU718 0x2607
463#define YUGA_PRODUCT_CLU716 0x2608
464#define YUGA_PRODUCT_CLU728 0x2609
465#define YUGA_PRODUCT_CLU726 0x260A
466#define YUGA_PRODUCT_CLU518 0x260B
467#define YUGA_PRODUCT_CLU516 0x260C
468#define YUGA_PRODUCT_CLU528 0x260D
469#define YUGA_PRODUCT_CLU526 0x260F
470
415/* some devices interfaces need special handling due to a number of reasons */ 471/* some devices interfaces need special handling due to a number of reasons */
416enum option_blacklist_reason { 472enum option_blacklist_reason {
417 OPTION_BLACKLIST_NONE = 0, 473 OPTION_BLACKLIST_NONE = 0,
@@ -547,6 +603,16 @@ static const struct usb_device_id option_ids[] = {
547 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, 603 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
548 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, 604 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
549 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, 605 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
606 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
607 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff) },
608 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
609 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
610 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
611 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) },
612 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) },
613 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) },
614 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
615 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
550 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, 616 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
551 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 617 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
552 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, 618 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -993,6 +1059,48 @@ static const struct usb_device_id option_ids[] = {
993 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1059 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
994 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ 1060 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
995 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1061 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1062 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
1063 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
1064 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) },
1065 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM510) },
1066 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM800) },
1067 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM900) },
1068 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU818) },
1069 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU816) },
1070 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU828) },
1071 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU826) },
1072 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU518) },
1073 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU516) },
1074 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU528) },
1075 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU526) },
1076 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM600) },
1077 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM610) },
1078 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM500) },
1079 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM510) },
1080 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM800) },
1081 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM900) },
1082 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU718) },
1083 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU716) },
1084 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU728) },
1085 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU726) },
1086 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU518) },
1087 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU516) },
1088 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU528) },
1089 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU526) },
1090 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM600) },
1091 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM610) },
1092 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM500) },
1093 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM510) },
1094 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM800) },
1095 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM900) },
1096 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU718) },
1097 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU716) },
1098 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU728) },
1099 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU726) },
1100 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU518) },
1101 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
1102 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
1103 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
996 { } /* Terminating entry */ 1104 { } /* Terminating entry */
997}; 1105};
998MODULE_DEVICE_TABLE(usb, option_ids); 1106MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1122,11 +1230,13 @@ static int option_probe(struct usb_serial *serial,
1122 serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) 1230 serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
1123 return -ENODEV; 1231 return -ENODEV;
1124 1232
1125 /* Don't bind network interfaces on Huawei K3765 & K4505 */ 1233 /* Don't bind network interfaces on Huawei K3765, K4505 & K4605 */
1126 if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID && 1234 if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID &&
1127 (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 || 1235 (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 ||
1128 serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) && 1236 serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505 ||
1129 serial->interface->cur_altsetting->desc.bInterfaceNumber == 1) 1237 serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4605) &&
1238 (serial->interface->cur_altsetting->desc.bInterfaceNumber == 1 ||
1239 serial->interface->cur_altsetting->desc.bInterfaceNumber == 2))
1130 return -ENODEV; 1240 return -ENODEV;
1131 1241
1132 /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */ 1242 /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 54a9dab1f33b..aeccc7f0a93c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -45,6 +45,7 @@ static const struct usb_device_id id_table[] = {
45 {USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ 45 {USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
46 {USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ 46 {USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
47 {USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */ 47 {USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */
48 {USB_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
48 {USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */ 49 {USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */
49 {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ 50 {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
50 {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ 51 {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
@@ -78,6 +79,7 @@ static const struct usb_device_id id_table[] = {
78 {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 79 {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
79 {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 80 {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
80 {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ 81 {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
82 {USB_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
81 {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */ 83 {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
82 {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ 84 {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
83 {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */ 85 {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ccff3483eebc..3041a974faf3 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1988,6 +1988,16 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100,
1988 "Micro Mini 1GB", 1988 "Micro Mini 1GB",
1989 USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), 1989 USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
1990 1990
1991/*
1992 * Nick Bowler <nbowler@elliptictech.com>
1993 * SCSI stack spams (otherwise harmless) error messages.
1994 */
1995UNUSUAL_DEV( 0xc251, 0x4003, 0x0100, 0x0100,
1996 "Keil Software, Inc.",
1997 "V2M MotherBoard",
1998 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1999 US_FL_NOT_LOCKABLE),
2000
1991/* Reported by Andrew Simmons <andrew.simmons@gmail.com> */ 2001/* Reported by Andrew Simmons <andrew.simmons@gmail.com> */
1992UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001, 2002UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001,
1993 "DataStor", 2003 "DataStor",
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 1e54b8b7f698..278aeaa92505 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -335,6 +335,13 @@ config BACKLIGHT_PCF50633
335 If you have a backlight driven by a NXP PCF50633 MFD, say Y here to 335 If you have a backlight driven by a NXP PCF50633 MFD, say Y here to
336 enable its driver. 336 enable its driver.
337 337
338config BACKLIGHT_AAT2870
339 tristate "AnalogicTech AAT2870 Backlight"
340 depends on BACKLIGHT_CLASS_DEVICE && MFD_AAT2870_CORE
341 help
342 If you have a AnalogicTech AAT2870 say Y to enable the
343 backlight driver.
344
338endif # BACKLIGHT_CLASS_DEVICE 345endif # BACKLIGHT_CLASS_DEVICE
339 346
340endif # BACKLIGHT_LCD_SUPPORT 347endif # BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index bf1dd92b7527..fdd1fc4b2770 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -38,4 +38,5 @@ obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
38obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o 38obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
39obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o 39obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
40obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o 40obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
41obj-$(CONFIG_BACKLIGHT_AAT2870) += aat2870_bl.o
41 42
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
new file mode 100644
index 000000000000..331f1ef1dad5
--- /dev/null
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -0,0 +1,246 @@
1/*
2 * linux/drivers/video/backlight/aat2870_bl.c
3 *
4 * Copyright (c) 2011, NVIDIA Corporation.
5 * Author: Jin Park <jinyoungp@nvidia.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 */
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/platform_device.h>
26#include <linux/mutex.h>
27#include <linux/delay.h>
28#include <linux/fb.h>
29#include <linux/backlight.h>
30#include <linux/mfd/aat2870.h>
31
32struct aat2870_bl_driver_data {
33 struct platform_device *pdev;
34 struct backlight_device *bd;
35
36 int channels;
37 int max_current;
38 int brightness; /* current brightness */
39};
40
41static inline int aat2870_brightness(struct aat2870_bl_driver_data *aat2870_bl,
42 int brightness)
43{
44 struct backlight_device *bd = aat2870_bl->bd;
45 int val;
46
47 val = brightness * (aat2870_bl->max_current - 1);
48 val /= bd->props.max_brightness;
49
50 return val;
51}
52
53static inline int aat2870_bl_enable(struct aat2870_bl_driver_data *aat2870_bl)
54{
55 struct aat2870_data *aat2870
56 = dev_get_drvdata(aat2870_bl->pdev->dev.parent);
57
58 return aat2870->write(aat2870, AAT2870_BL_CH_EN,
59 (u8)aat2870_bl->channels);
60}
61
62static inline int aat2870_bl_disable(struct aat2870_bl_driver_data *aat2870_bl)
63{
64 struct aat2870_data *aat2870
65 = dev_get_drvdata(aat2870_bl->pdev->dev.parent);
66
67 return aat2870->write(aat2870, AAT2870_BL_CH_EN, 0x0);
68}
69
70static int aat2870_bl_get_brightness(struct backlight_device *bd)
71{
72 return bd->props.brightness;
73}
74
75static int aat2870_bl_update_status(struct backlight_device *bd)
76{
77 struct aat2870_bl_driver_data *aat2870_bl = dev_get_drvdata(&bd->dev);
78 struct aat2870_data *aat2870 =
79 dev_get_drvdata(aat2870_bl->pdev->dev.parent);
80 int brightness = bd->props.brightness;
81 int ret;
82
83 if ((brightness < 0) || (bd->props.max_brightness < brightness)) {
84 dev_err(&bd->dev, "invalid brightness, %d\n", brightness);
85 return -EINVAL;
86 }
87
88 dev_dbg(&bd->dev, "brightness=%d, power=%d, state=%d\n",
89 bd->props.brightness, bd->props.power, bd->props.state);
90
91 if ((bd->props.power != FB_BLANK_UNBLANK) ||
92 (bd->props.state & BL_CORE_FBBLANK) ||
93 (bd->props.state & BL_CORE_SUSPENDED))
94 brightness = 0;
95
96 ret = aat2870->write(aat2870, AAT2870_BLM,
97 (u8)aat2870_brightness(aat2870_bl, brightness));
98 if (ret < 0)
99 return ret;
100
101 if (brightness == 0) {
102 ret = aat2870_bl_disable(aat2870_bl);
103 if (ret < 0)
104 return ret;
105 } else if (aat2870_bl->brightness == 0) {
106 ret = aat2870_bl_enable(aat2870_bl);
107 if (ret < 0)
108 return ret;
109 }
110
111 aat2870_bl->brightness = brightness;
112
113 return 0;
114}
115
116static int aat2870_bl_check_fb(struct backlight_device *bd, struct fb_info *fi)
117{
118 return 1;
119}
120
121static const struct backlight_ops aat2870_bl_ops = {
122 .options = BL_CORE_SUSPENDRESUME,
123 .get_brightness = aat2870_bl_get_brightness,
124 .update_status = aat2870_bl_update_status,
125 .check_fb = aat2870_bl_check_fb,
126};
127
128static int aat2870_bl_probe(struct platform_device *pdev)
129{
130 struct aat2870_bl_platform_data *pdata = pdev->dev.platform_data;
131 struct aat2870_bl_driver_data *aat2870_bl;
132 struct backlight_device *bd;
133 struct backlight_properties props;
134 int ret = 0;
135
136 if (!pdata) {
137 dev_err(&pdev->dev, "No platform data\n");
138 ret = -ENXIO;
139 goto out;
140 }
141
142 if (pdev->id != AAT2870_ID_BL) {
143 dev_err(&pdev->dev, "Invalid device ID, %d\n", pdev->id);
144 ret = -EINVAL;
145 goto out;
146 }
147
148 aat2870_bl = kzalloc(sizeof(struct aat2870_bl_driver_data), GFP_KERNEL);
149 if (!aat2870_bl) {
150 dev_err(&pdev->dev,
151 "Failed to allocate memory for aat2870 backlight\n");
152 ret = -ENOMEM;
153 goto out;
154 }
155
156 memset(&props, 0, sizeof(struct backlight_properties));
157
158 props.type = BACKLIGHT_RAW;
159 bd = backlight_device_register("aat2870-backlight", &pdev->dev,
160 aat2870_bl, &aat2870_bl_ops, &props);
161 if (IS_ERR(bd)) {
162 dev_err(&pdev->dev,
163 "Failed allocate memory for backlight device\n");
164 ret = PTR_ERR(bd);
165 goto out_kfree;
166 }
167
168 aat2870_bl->pdev = pdev;
169 platform_set_drvdata(pdev, aat2870_bl);
170
171 aat2870_bl->bd = bd;
172
173 if (pdata->channels > 0)
174 aat2870_bl->channels = pdata->channels;
175 else
176 aat2870_bl->channels = AAT2870_BL_CH_ALL;
177
178 if (pdata->max_current > 0)
179 aat2870_bl->max_current = pdata->max_current;
180 else
181 aat2870_bl->max_current = AAT2870_CURRENT_27_9;
182
183 if (pdata->max_brightness > 0)
184 bd->props.max_brightness = pdata->max_brightness;
185 else
186 bd->props.max_brightness = 255;
187
188 aat2870_bl->brightness = 0;
189 bd->props.power = FB_BLANK_UNBLANK;
190 bd->props.brightness = bd->props.max_brightness;
191
192 ret = aat2870_bl_update_status(bd);
193 if (ret < 0) {
194 dev_err(&pdev->dev, "Failed to initialize\n");
195 goto out_bl_dev_unregister;
196 }
197
198 return 0;
199
200out_bl_dev_unregister:
201 backlight_device_unregister(bd);
202out_kfree:
203 kfree(aat2870_bl);
204out:
205 return ret;
206}
207
208static int aat2870_bl_remove(struct platform_device *pdev)
209{
210 struct aat2870_bl_driver_data *aat2870_bl = platform_get_drvdata(pdev);
211 struct backlight_device *bd = aat2870_bl->bd;
212
213 bd->props.power = FB_BLANK_POWERDOWN;
214 bd->props.brightness = 0;
215 backlight_update_status(bd);
216
217 backlight_device_unregister(bd);
218 kfree(aat2870_bl);
219
220 return 0;
221}
222
223static struct platform_driver aat2870_bl_driver = {
224 .driver = {
225 .name = "aat2870-backlight",
226 .owner = THIS_MODULE,
227 },
228 .probe = aat2870_bl_probe,
229 .remove = aat2870_bl_remove,
230};
231
232static int __init aat2870_bl_init(void)
233{
234 return platform_driver_register(&aat2870_bl_driver);
235}
236subsys_initcall(aat2870_bl_init);
237
238static void __exit aat2870_bl_exit(void)
239{
240 platform_driver_unregister(&aat2870_bl_driver);
241}
242module_exit(aat2870_bl_exit);
243
244MODULE_DESCRIPTION("AnalogicTech AAT2870 Backlight");
245MODULE_LICENSE("GPL");
246MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 05a8832bb3eb..d06886a2bfb5 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -1009,4 +1009,4 @@ module_exit(adp8870_exit);
1009MODULE_LICENSE("GPL v2"); 1009MODULE_LICENSE("GPL v2");
1010MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 1010MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
1011MODULE_DESCRIPTION("ADP8870 Backlight driver"); 1011MODULE_DESCRIPTION("ADP8870 Backlight driver");
1012MODULE_ALIAS("platform:adp8870-backlight"); 1012MODULE_ALIAS("i2c:adp8870-backlight");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 80d292fb92d8..7363c1b169e8 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -19,7 +19,7 @@
19#include <asm/backlight.h> 19#include <asm/backlight.h>
20#endif 20#endif
21 21
22static const char const *backlight_types[] = { 22static const char *const backlight_types[] = {
23 [BACKLIGHT_RAW] = "raw", 23 [BACKLIGHT_RAW] = "raw",
24 [BACKLIGHT_PLATFORM] = "platform", 24 [BACKLIGHT_PLATFORM] = "platform",
25 [BACKLIGHT_FIRMWARE] = "firmware", 25 [BACKLIGHT_FIRMWARE] = "firmware",
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index 9f1e389d51d2..b0582917f0c8 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -11,7 +11,7 @@
11 * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. 11 * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors.
12 */ 12 */
13 13
14 14#include <linux/module.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/fb.h> 17#include <linux/fb.h>
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index b8f38ec6eb18..8b5b2a4124c7 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -28,6 +28,8 @@ struct pwm_bl_data {
28 unsigned int lth_brightness; 28 unsigned int lth_brightness;
29 int (*notify)(struct device *, 29 int (*notify)(struct device *,
30 int brightness); 30 int brightness);
31 void (*notify_after)(struct device *,
32 int brightness);
31 int (*check_fb)(struct device *, struct fb_info *); 33 int (*check_fb)(struct device *, struct fb_info *);
32}; 34};
33 35
@@ -55,6 +57,10 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
55 pwm_config(pb->pwm, brightness, pb->period); 57 pwm_config(pb->pwm, brightness, pb->period);
56 pwm_enable(pb->pwm); 58 pwm_enable(pb->pwm);
57 } 59 }
60
61 if (pb->notify_after)
62 pb->notify_after(pb->dev, brightness);
63
58 return 0; 64 return 0;
59} 65}
60 66
@@ -105,6 +111,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
105 111
106 pb->period = data->pwm_period_ns; 112 pb->period = data->pwm_period_ns;
107 pb->notify = data->notify; 113 pb->notify = data->notify;
114 pb->notify_after = data->notify_after;
108 pb->check_fb = data->check_fb; 115 pb->check_fb = data->check_fb;
109 pb->lth_brightness = data->lth_brightness * 116 pb->lth_brightness = data->lth_brightness *
110 (data->pwm_period_ns / data->max_brightness); 117 (data->pwm_period_ns / data->max_brightness);
@@ -172,6 +179,8 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
172 pb->notify(pb->dev, 0); 179 pb->notify(pb->dev, 0);
173 pwm_config(pb->pwm, 0, pb->period); 180 pwm_config(pb->pwm, 0, pb->period);
174 pwm_disable(pb->pwm); 181 pwm_disable(pb->pwm);
182 if (pb->notify_after)
183 pb->notify_after(pb->dev, 0);
175 return 0; 184 return 0;
176} 185}
177 186
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index fdd5d4ae437d..4e888ac09b3f 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -504,14 +504,18 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev)
504 return 0; 504 return 0;
505 505
506 r = omapdss_dsi_display_enable(dssdev); 506 r = omapdss_dsi_display_enable(dssdev);
507 if (r) 507 if (r) {
508 goto err; 508 dev_err(&dssdev->dev, "failed to enable DSI\n");
509 goto err1;
510 }
509 511
510 omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); 512 omapdss_dsi_vc_enable_hs(dssdev, td->channel, true);
511 513
512 r = _taal_enable_te(dssdev, true); 514 r = _taal_enable_te(dssdev, true);
513 if (r) 515 if (r) {
514 goto err; 516 dev_err(&dssdev->dev, "failed to re-enable TE");
517 goto err2;
518 }
515 519
516 enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); 520 enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
517 521
@@ -521,13 +525,15 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev)
521 525
522 return 0; 526 return 0;
523 527
524err: 528err2:
525 dev_err(&dssdev->dev, "exit ULPS failed"); 529 dev_err(&dssdev->dev, "failed to exit ULPS");
526 r = taal_panel_reset(dssdev);
527
528 enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
529 td->ulps_enabled = false;
530 530
531 r = taal_panel_reset(dssdev);
532 if (!r) {
533 enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
534 td->ulps_enabled = false;
535 }
536err1:
531 taal_queue_ulps_work(dssdev); 537 taal_queue_ulps_work(dssdev);
532 538
533 return r; 539 return r;
@@ -1241,11 +1247,8 @@ static void taal_power_off(struct omap_dss_device *dssdev)
1241 int r; 1247 int r;
1242 1248
1243 r = taal_dcs_write_0(td, DCS_DISPLAY_OFF); 1249 r = taal_dcs_write_0(td, DCS_DISPLAY_OFF);
1244 if (!r) { 1250 if (!r)
1245 r = taal_sleep_in(td); 1251 r = taal_sleep_in(td);
1246 /* HACK: wait a bit so that the message goes through */
1247 msleep(10);
1248 }
1249 1252
1250 if (r) { 1253 if (r) {
1251 dev_err(&dssdev->dev, 1254 dev_err(&dssdev->dev,
@@ -1317,8 +1320,11 @@ static void taal_disable(struct omap_dss_device *dssdev)
1317 dsi_bus_lock(dssdev); 1320 dsi_bus_lock(dssdev);
1318 1321
1319 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { 1322 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
1320 taal_wake_up(dssdev); 1323 int r;
1321 taal_power_off(dssdev); 1324
1325 r = taal_wake_up(dssdev);
1326 if (!r)
1327 taal_power_off(dssdev);
1322 } 1328 }
1323 1329
1324 dsi_bus_unlock(dssdev); 1330 dsi_bus_unlock(dssdev);
@@ -1897,20 +1903,6 @@ err:
1897 mutex_unlock(&td->lock); 1903 mutex_unlock(&td->lock);
1898} 1904}
1899 1905
1900static int taal_set_update_mode(struct omap_dss_device *dssdev,
1901 enum omap_dss_update_mode mode)
1902{
1903 if (mode != OMAP_DSS_UPDATE_MANUAL)
1904 return -EINVAL;
1905 return 0;
1906}
1907
1908static enum omap_dss_update_mode taal_get_update_mode(
1909 struct omap_dss_device *dssdev)
1910{
1911 return OMAP_DSS_UPDATE_MANUAL;
1912}
1913
1914static struct omap_dss_driver taal_driver = { 1906static struct omap_dss_driver taal_driver = {
1915 .probe = taal_probe, 1907 .probe = taal_probe,
1916 .remove = __exit_p(taal_remove), 1908 .remove = __exit_p(taal_remove),
@@ -1920,9 +1912,6 @@ static struct omap_dss_driver taal_driver = {
1920 .suspend = taal_suspend, 1912 .suspend = taal_suspend,
1921 .resume = taal_resume, 1913 .resume = taal_resume,
1922 1914
1923 .set_update_mode = taal_set_update_mode,
1924 .get_update_mode = taal_get_update_mode,
1925
1926 .update = taal_update, 1915 .update = taal_update,
1927 .sync = taal_sync, 1916 .sync = taal_sync,
1928 1917
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 6b3e2da11419..0d12524db14b 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -117,18 +117,6 @@ config OMAP2_DSS_MIN_FCK_PER_PCK
117 Max FCK is 173MHz, so this doesn't work if your PCK 117 Max FCK is 173MHz, so this doesn't work if your PCK
118 is very high. 118 is very high.
119 119
120config OMAP2_DSS_SLEEP_BEFORE_RESET
121 bool "Sleep 50ms before DSS reset"
122 default y
123 help
124 For some unknown reason we may get SYNC_LOST errors from the display
125 subsystem at initialization time if we don't sleep before resetting
126 the DSS. See the source (dss.c) for more comments.
127
128 However, 50ms is quite long time to sleep, and with some
129 configurations the SYNC_LOST may never happen, so the sleep can
130 be disabled here.
131
132config OMAP2_DSS_SLEEP_AFTER_VENC_RESET 120config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
133 bool "Sleep 20ms after VENC reset" 121 bool "Sleep 20ms after VENC reset"
134 default y 122 default y
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 3da426719dd6..76821fefce9a 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -183,8 +183,11 @@ static int omap_dss_probe(struct platform_device *pdev)
183 goto err_dss; 183 goto err_dss;
184 } 184 }
185 185
186 /* keep clocks enabled to prevent context saves/restores during init */ 186 r = dispc_init_platform_driver();
187 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 187 if (r) {
188 DSSERR("Failed to initialize dispc platform driver\n");
189 goto err_dispc;
190 }
188 191
189 r = rfbi_init_platform_driver(); 192 r = rfbi_init_platform_driver();
190 if (r) { 193 if (r) {
@@ -192,12 +195,6 @@ static int omap_dss_probe(struct platform_device *pdev)
192 goto err_rfbi; 195 goto err_rfbi;
193 } 196 }
194 197
195 r = dispc_init_platform_driver();
196 if (r) {
197 DSSERR("Failed to initialize dispc platform driver\n");
198 goto err_dispc;
199 }
200
201 r = venc_init_platform_driver(); 198 r = venc_init_platform_driver();
202 if (r) { 199 if (r) {
203 DSSERR("Failed to initialize venc platform driver\n"); 200 DSSERR("Failed to initialize venc platform driver\n");
@@ -238,8 +235,6 @@ static int omap_dss_probe(struct platform_device *pdev)
238 pdata->default_device = dssdev; 235 pdata->default_device = dssdev;
239 } 236 }
240 237
241 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
242
243 return 0; 238 return 0;
244 239
245err_register: 240err_register:
@@ -268,11 +263,11 @@ static int omap_dss_remove(struct platform_device *pdev)
268 263
269 dss_uninitialize_debugfs(); 264 dss_uninitialize_debugfs();
270 265
266 hdmi_uninit_platform_driver();
267 dsi_uninit_platform_driver();
271 venc_uninit_platform_driver(); 268 venc_uninit_platform_driver();
272 dispc_uninit_platform_driver();
273 rfbi_uninit_platform_driver(); 269 rfbi_uninit_platform_driver();
274 dsi_uninit_platform_driver(); 270 dispc_uninit_platform_driver();
275 hdmi_uninit_platform_driver();
276 dss_uninit_platform_driver(); 271 dss_uninit_platform_driver();
277 272
278 dss_uninit_overlays(pdev); 273 dss_uninit_overlays(pdev);
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 7a9a2e7d9685..0f3961a1ce26 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -33,6 +33,8 @@
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34#include <linux/hardirq.h> 34#include <linux/hardirq.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/platform_device.h>
37#include <linux/pm_runtime.h>
36 38
37#include <plat/sram.h> 39#include <plat/sram.h>
38#include <plat/clock.h> 40#include <plat/clock.h>
@@ -77,6 +79,12 @@ struct dispc_v_coef {
77 s8 vc00; 79 s8 vc00;
78}; 80};
79 81
82enum omap_burst_size {
83 BURST_SIZE_X2 = 0,
84 BURST_SIZE_X4 = 1,
85 BURST_SIZE_X8 = 2,
86};
87
80#define REG_GET(idx, start, end) \ 88#define REG_GET(idx, start, end) \
81 FLD_GET(dispc_read_reg(idx), start, end) 89 FLD_GET(dispc_read_reg(idx), start, end)
82 90
@@ -92,7 +100,11 @@ struct dispc_irq_stats {
92static struct { 100static struct {
93 struct platform_device *pdev; 101 struct platform_device *pdev;
94 void __iomem *base; 102 void __iomem *base;
103
104 int ctx_loss_cnt;
105
95 int irq; 106 int irq;
107 struct clk *dss_clk;
96 108
97 u32 fifo_size[3]; 109 u32 fifo_size[3];
98 110
@@ -102,6 +114,7 @@ static struct {
102 u32 error_irqs; 114 u32 error_irqs;
103 struct work_struct error_work; 115 struct work_struct error_work;
104 116
117 bool ctx_valid;
105 u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; 118 u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
106 119
107#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 120#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -134,18 +147,34 @@ static inline u32 dispc_read_reg(const u16 idx)
134 return __raw_readl(dispc.base + idx); 147 return __raw_readl(dispc.base + idx);
135} 148}
136 149
150static int dispc_get_ctx_loss_count(void)
151{
152 struct device *dev = &dispc.pdev->dev;
153 struct omap_display_platform_data *pdata = dev->platform_data;
154 struct omap_dss_board_info *board_data = pdata->board_data;
155 int cnt;
156
157 if (!board_data->get_context_loss_count)
158 return -ENOENT;
159
160 cnt = board_data->get_context_loss_count(dev);
161
162 WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
163
164 return cnt;
165}
166
137#define SR(reg) \ 167#define SR(reg) \
138 dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) 168 dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
139#define RR(reg) \ 169#define RR(reg) \
140 dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)]) 170 dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)])
141 171
142void dispc_save_context(void) 172static void dispc_save_context(void)
143{ 173{
144 int i; 174 int i;
145 if (cpu_is_omap24xx())
146 return;
147 175
148 SR(SYSCONFIG); 176 DSSDBG("dispc_save_context\n");
177
149 SR(IRQENABLE); 178 SR(IRQENABLE);
150 SR(CONTROL); 179 SR(CONTROL);
151 SR(CONFIG); 180 SR(CONFIG);
@@ -158,7 +187,8 @@ void dispc_save_context(void)
158 SR(TIMING_V(OMAP_DSS_CHANNEL_LCD)); 187 SR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
159 SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD)); 188 SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
160 SR(DIVISORo(OMAP_DSS_CHANNEL_LCD)); 189 SR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
161 SR(GLOBAL_ALPHA); 190 if (dss_has_feature(FEAT_GLOBAL_ALPHA))
191 SR(GLOBAL_ALPHA);
162 SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); 192 SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
163 SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); 193 SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
164 if (dss_has_feature(FEAT_MGR_LCD2)) { 194 if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -188,20 +218,25 @@ void dispc_save_context(void)
188 SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); 218 SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
189 SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); 219 SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
190 220
191 SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); 221 if (dss_has_feature(FEAT_CPR)) {
192 SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); 222 SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
193 SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); 223 SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
224 SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
225 }
194 if (dss_has_feature(FEAT_MGR_LCD2)) { 226 if (dss_has_feature(FEAT_MGR_LCD2)) {
195 SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); 227 if (dss_has_feature(FEAT_CPR)) {
196 SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); 228 SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
197 SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); 229 SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
230 SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
231 }
198 232
199 SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); 233 SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
200 SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); 234 SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
201 SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); 235 SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
202 } 236 }
203 237
204 SR(OVL_PRELOAD(OMAP_DSS_GFX)); 238 if (dss_has_feature(FEAT_PRELOAD))
239 SR(OVL_PRELOAD(OMAP_DSS_GFX));
205 240
206 /* VID1 */ 241 /* VID1 */
207 SR(OVL_BA0(OMAP_DSS_VIDEO1)); 242 SR(OVL_BA0(OMAP_DSS_VIDEO1));
@@ -226,8 +261,10 @@ void dispc_save_context(void)
226 for (i = 0; i < 5; i++) 261 for (i = 0; i < 5; i++)
227 SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i)); 262 SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
228 263
229 for (i = 0; i < 8; i++) 264 if (dss_has_feature(FEAT_FIR_COEF_V)) {
230 SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); 265 for (i = 0; i < 8; i++)
266 SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
267 }
231 268
232 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { 269 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
233 SR(OVL_BA0_UV(OMAP_DSS_VIDEO1)); 270 SR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -248,7 +285,8 @@ void dispc_save_context(void)
248 if (dss_has_feature(FEAT_ATTR2)) 285 if (dss_has_feature(FEAT_ATTR2))
249 SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1)); 286 SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
250 287
251 SR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); 288 if (dss_has_feature(FEAT_PRELOAD))
289 SR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
252 290
253 /* VID2 */ 291 /* VID2 */
254 SR(OVL_BA0(OMAP_DSS_VIDEO2)); 292 SR(OVL_BA0(OMAP_DSS_VIDEO2));
@@ -273,8 +311,10 @@ void dispc_save_context(void)
273 for (i = 0; i < 5; i++) 311 for (i = 0; i < 5; i++)
274 SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i)); 312 SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
275 313
276 for (i = 0; i < 8; i++) 314 if (dss_has_feature(FEAT_FIR_COEF_V)) {
277 SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); 315 for (i = 0; i < 8; i++)
316 SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
317 }
278 318
279 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { 319 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
280 SR(OVL_BA0_UV(OMAP_DSS_VIDEO2)); 320 SR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -295,16 +335,35 @@ void dispc_save_context(void)
295 if (dss_has_feature(FEAT_ATTR2)) 335 if (dss_has_feature(FEAT_ATTR2))
296 SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); 336 SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
297 337
298 SR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); 338 if (dss_has_feature(FEAT_PRELOAD))
339 SR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
299 340
300 if (dss_has_feature(FEAT_CORE_CLK_DIV)) 341 if (dss_has_feature(FEAT_CORE_CLK_DIV))
301 SR(DIVISOR); 342 SR(DIVISOR);
343
344 dispc.ctx_loss_cnt = dispc_get_ctx_loss_count();
345 dispc.ctx_valid = true;
346
347 DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
302} 348}
303 349
304void dispc_restore_context(void) 350static void dispc_restore_context(void)
305{ 351{
306 int i; 352 int i, ctx;
307 RR(SYSCONFIG); 353
354 DSSDBG("dispc_restore_context\n");
355
356 if (!dispc.ctx_valid)
357 return;
358
359 ctx = dispc_get_ctx_loss_count();
360
361 if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
362 return;
363
364 DSSDBG("ctx_loss_count: saved %d, current %d\n",
365 dispc.ctx_loss_cnt, ctx);
366
308 /*RR(IRQENABLE);*/ 367 /*RR(IRQENABLE);*/
309 /*RR(CONTROL);*/ 368 /*RR(CONTROL);*/
310 RR(CONFIG); 369 RR(CONFIG);
@@ -317,7 +376,8 @@ void dispc_restore_context(void)
317 RR(TIMING_V(OMAP_DSS_CHANNEL_LCD)); 376 RR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
318 RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD)); 377 RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
319 RR(DIVISORo(OMAP_DSS_CHANNEL_LCD)); 378 RR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
320 RR(GLOBAL_ALPHA); 379 if (dss_has_feature(FEAT_GLOBAL_ALPHA))
380 RR(GLOBAL_ALPHA);
321 RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); 381 RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
322 RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); 382 RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
323 if (dss_has_feature(FEAT_MGR_LCD2)) { 383 if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -347,20 +407,25 @@ void dispc_restore_context(void)
347 RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); 407 RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
348 RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); 408 RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
349 409
350 RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); 410 if (dss_has_feature(FEAT_CPR)) {
351 RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); 411 RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
352 RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); 412 RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
413 RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
414 }
353 if (dss_has_feature(FEAT_MGR_LCD2)) { 415 if (dss_has_feature(FEAT_MGR_LCD2)) {
354 RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); 416 RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
355 RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); 417 RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
356 RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); 418 RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
357 419
358 RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); 420 if (dss_has_feature(FEAT_CPR)) {
359 RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); 421 RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
360 RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); 422 RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
423 RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
424 }
361 } 425 }
362 426
363 RR(OVL_PRELOAD(OMAP_DSS_GFX)); 427 if (dss_has_feature(FEAT_PRELOAD))
428 RR(OVL_PRELOAD(OMAP_DSS_GFX));
364 429
365 /* VID1 */ 430 /* VID1 */
366 RR(OVL_BA0(OMAP_DSS_VIDEO1)); 431 RR(OVL_BA0(OMAP_DSS_VIDEO1));
@@ -385,8 +450,10 @@ void dispc_restore_context(void)
385 for (i = 0; i < 5; i++) 450 for (i = 0; i < 5; i++)
386 RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i)); 451 RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
387 452
388 for (i = 0; i < 8; i++) 453 if (dss_has_feature(FEAT_FIR_COEF_V)) {
389 RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); 454 for (i = 0; i < 8; i++)
455 RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
456 }
390 457
391 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { 458 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
392 RR(OVL_BA0_UV(OMAP_DSS_VIDEO1)); 459 RR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -407,7 +474,8 @@ void dispc_restore_context(void)
407 if (dss_has_feature(FEAT_ATTR2)) 474 if (dss_has_feature(FEAT_ATTR2))
408 RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1)); 475 RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
409 476
410 RR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); 477 if (dss_has_feature(FEAT_PRELOAD))
478 RR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
411 479
412 /* VID2 */ 480 /* VID2 */
413 RR(OVL_BA0(OMAP_DSS_VIDEO2)); 481 RR(OVL_BA0(OMAP_DSS_VIDEO2));
@@ -432,8 +500,10 @@ void dispc_restore_context(void)
432 for (i = 0; i < 5; i++) 500 for (i = 0; i < 5; i++)
433 RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i)); 501 RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
434 502
435 for (i = 0; i < 8; i++) 503 if (dss_has_feature(FEAT_FIR_COEF_V)) {
436 RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); 504 for (i = 0; i < 8; i++)
505 RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
506 }
437 507
438 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { 508 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
439 RR(OVL_BA0_UV(OMAP_DSS_VIDEO2)); 509 RR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -454,7 +524,8 @@ void dispc_restore_context(void)
454 if (dss_has_feature(FEAT_ATTR2)) 524 if (dss_has_feature(FEAT_ATTR2))
455 RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); 525 RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
456 526
457 RR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); 527 if (dss_has_feature(FEAT_PRELOAD))
528 RR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
458 529
459 if (dss_has_feature(FEAT_CORE_CLK_DIV)) 530 if (dss_has_feature(FEAT_CORE_CLK_DIV))
460 RR(DIVISOR); 531 RR(DIVISOR);
@@ -471,19 +542,35 @@ void dispc_restore_context(void)
471 * the context is fully restored 542 * the context is fully restored
472 */ 543 */
473 RR(IRQENABLE); 544 RR(IRQENABLE);
545
546 DSSDBG("context restored\n");
474} 547}
475 548
476#undef SR 549#undef SR
477#undef RR 550#undef RR
478 551
479static inline void enable_clocks(bool enable) 552int dispc_runtime_get(void)
480{ 553{
481 if (enable) 554 int r;
482 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 555
483 else 556 DSSDBG("dispc_runtime_get\n");
484 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 557
558 r = pm_runtime_get_sync(&dispc.pdev->dev);
559 WARN_ON(r < 0);
560 return r < 0 ? r : 0;
485} 561}
486 562
563void dispc_runtime_put(void)
564{
565 int r;
566
567 DSSDBG("dispc_runtime_put\n");
568
569 r = pm_runtime_put(&dispc.pdev->dev);
570 WARN_ON(r < 0);
571}
572
573
487bool dispc_go_busy(enum omap_channel channel) 574bool dispc_go_busy(enum omap_channel channel)
488{ 575{
489 int bit; 576 int bit;
@@ -505,8 +592,6 @@ void dispc_go(enum omap_channel channel)
505 int bit; 592 int bit;
506 bool enable_bit, go_bit; 593 bool enable_bit, go_bit;
507 594
508 enable_clocks(1);
509
510 if (channel == OMAP_DSS_CHANNEL_LCD || 595 if (channel == OMAP_DSS_CHANNEL_LCD ||
511 channel == OMAP_DSS_CHANNEL_LCD2) 596 channel == OMAP_DSS_CHANNEL_LCD2)
512 bit = 0; /* LCDENABLE */ 597 bit = 0; /* LCDENABLE */
@@ -520,7 +605,7 @@ void dispc_go(enum omap_channel channel)
520 enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; 605 enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
521 606
522 if (!enable_bit) 607 if (!enable_bit)
523 goto end; 608 return;
524 609
525 if (channel == OMAP_DSS_CHANNEL_LCD || 610 if (channel == OMAP_DSS_CHANNEL_LCD ||
526 channel == OMAP_DSS_CHANNEL_LCD2) 611 channel == OMAP_DSS_CHANNEL_LCD2)
@@ -535,7 +620,7 @@ void dispc_go(enum omap_channel channel)
535 620
536 if (go_bit) { 621 if (go_bit) {
537 DSSERR("GO bit not down for channel %d\n", channel); 622 DSSERR("GO bit not down for channel %d\n", channel);
538 goto end; 623 return;
539 } 624 }
540 625
541 DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : 626 DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" :
@@ -545,8 +630,6 @@ void dispc_go(enum omap_channel channel)
545 REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit); 630 REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit);
546 else 631 else
547 REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit); 632 REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
548end:
549 enable_clocks(0);
550} 633}
551 634
552static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value) 635static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
@@ -920,7 +1003,7 @@ static void _dispc_set_color_mode(enum omap_plane plane,
920 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1); 1003 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
921} 1004}
922 1005
923static void _dispc_set_channel_out(enum omap_plane plane, 1006void dispc_set_channel_out(enum omap_plane plane,
924 enum omap_channel channel) 1007 enum omap_channel channel)
925{ 1008{
926 int shift; 1009 int shift;
@@ -967,13 +1050,10 @@ static void _dispc_set_channel_out(enum omap_plane plane,
967 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); 1050 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
968} 1051}
969 1052
970void dispc_set_burst_size(enum omap_plane plane, 1053static void dispc_set_burst_size(enum omap_plane plane,
971 enum omap_burst_size burst_size) 1054 enum omap_burst_size burst_size)
972{ 1055{
973 int shift; 1056 int shift;
974 u32 val;
975
976 enable_clocks(1);
977 1057
978 switch (plane) { 1058 switch (plane) {
979 case OMAP_DSS_GFX: 1059 case OMAP_DSS_GFX:
@@ -988,11 +1068,24 @@ void dispc_set_burst_size(enum omap_plane plane,
988 return; 1068 return;
989 } 1069 }
990 1070
991 val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); 1071 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), burst_size, shift + 1, shift);
992 val = FLD_MOD(val, burst_size, shift+1, shift); 1072}
993 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
994 1073
995 enable_clocks(0); 1074static void dispc_configure_burst_sizes(void)
1075{
1076 int i;
1077 const int burst_size = BURST_SIZE_X8;
1078
1079 /* Configure burst size always to maximum size */
1080 for (i = 0; i < omap_dss_get_num_overlays(); ++i)
1081 dispc_set_burst_size(i, burst_size);
1082}
1083
1084u32 dispc_get_burst_size(enum omap_plane plane)
1085{
1086 unsigned unit = dss_feat_get_burst_size_unit();
1087 /* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */
1088 return unit * 8;
996} 1089}
997 1090
998void dispc_enable_gamma_table(bool enable) 1091void dispc_enable_gamma_table(bool enable)
@@ -1009,6 +1102,40 @@ void dispc_enable_gamma_table(bool enable)
1009 REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9); 1102 REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
1010} 1103}
1011 1104
1105void dispc_enable_cpr(enum omap_channel channel, bool enable)
1106{
1107 u16 reg;
1108
1109 if (channel == OMAP_DSS_CHANNEL_LCD)
1110 reg = DISPC_CONFIG;
1111 else if (channel == OMAP_DSS_CHANNEL_LCD2)
1112 reg = DISPC_CONFIG2;
1113 else
1114 return;
1115
1116 REG_FLD_MOD(reg, enable, 15, 15);
1117}
1118
1119void dispc_set_cpr_coef(enum omap_channel channel,
1120 struct omap_dss_cpr_coefs *coefs)
1121{
1122 u32 coef_r, coef_g, coef_b;
1123
1124 if (channel != OMAP_DSS_CHANNEL_LCD && channel != OMAP_DSS_CHANNEL_LCD2)
1125 return;
1126
1127 coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) |
1128 FLD_VAL(coefs->rb, 9, 0);
1129 coef_g = FLD_VAL(coefs->gr, 31, 22) | FLD_VAL(coefs->gg, 20, 11) |
1130 FLD_VAL(coefs->gb, 9, 0);
1131 coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) |
1132 FLD_VAL(coefs->bb, 9, 0);
1133
1134 dispc_write_reg(DISPC_CPR_COEF_R(channel), coef_r);
1135 dispc_write_reg(DISPC_CPR_COEF_G(channel), coef_g);
1136 dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b);
1137}
1138
1012static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable) 1139static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
1013{ 1140{
1014 u32 val; 1141 u32 val;
@@ -1029,9 +1156,7 @@ void dispc_enable_replication(enum omap_plane plane, bool enable)
1029 else 1156 else
1030 bit = 10; 1157 bit = 10;
1031 1158
1032 enable_clocks(1);
1033 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit); 1159 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
1034 enable_clocks(0);
1035} 1160}
1036 1161
1037void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height) 1162void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
@@ -1039,9 +1164,7 @@ void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
1039 u32 val; 1164 u32 val;
1040 BUG_ON((width > (1 << 11)) || (height > (1 << 11))); 1165 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
1041 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 1166 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
1042 enable_clocks(1);
1043 dispc_write_reg(DISPC_SIZE_MGR(channel), val); 1167 dispc_write_reg(DISPC_SIZE_MGR(channel), val);
1044 enable_clocks(0);
1045} 1168}
1046 1169
1047void dispc_set_digit_size(u16 width, u16 height) 1170void dispc_set_digit_size(u16 width, u16 height)
@@ -1049,9 +1172,7 @@ void dispc_set_digit_size(u16 width, u16 height)
1049 u32 val; 1172 u32 val;
1050 BUG_ON((width > (1 << 11)) || (height > (1 << 11))); 1173 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
1051 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 1174 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
1052 enable_clocks(1);
1053 dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val); 1175 dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
1054 enable_clocks(0);
1055} 1176}
1056 1177
1057static void dispc_read_plane_fifo_sizes(void) 1178static void dispc_read_plane_fifo_sizes(void)
@@ -1059,18 +1180,17 @@ static void dispc_read_plane_fifo_sizes(void)
1059 u32 size; 1180 u32 size;
1060 int plane; 1181 int plane;
1061 u8 start, end; 1182 u8 start, end;
1183 u32 unit;
1062 1184
1063 enable_clocks(1); 1185 unit = dss_feat_get_buffer_size_unit();
1064 1186
1065 dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end); 1187 dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
1066 1188
1067 for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) { 1189 for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
1068 size = FLD_GET(dispc_read_reg(DISPC_OVL_FIFO_SIZE_STATUS(plane)), 1190 size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(plane), start, end);
1069 start, end); 1191 size *= unit;
1070 dispc.fifo_size[plane] = size; 1192 dispc.fifo_size[plane] = size;
1071 } 1193 }
1072
1073 enable_clocks(0);
1074} 1194}
1075 1195
1076u32 dispc_get_plane_fifo_size(enum omap_plane plane) 1196u32 dispc_get_plane_fifo_size(enum omap_plane plane)
@@ -1078,15 +1198,22 @@ u32 dispc_get_plane_fifo_size(enum omap_plane plane)
1078 return dispc.fifo_size[plane]; 1198 return dispc.fifo_size[plane];
1079} 1199}
1080 1200
1081void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high) 1201void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high)
1082{ 1202{
1083 u8 hi_start, hi_end, lo_start, lo_end; 1203 u8 hi_start, hi_end, lo_start, lo_end;
1204 u32 unit;
1205
1206 unit = dss_feat_get_buffer_size_unit();
1207
1208 WARN_ON(low % unit != 0);
1209 WARN_ON(high % unit != 0);
1210
1211 low /= unit;
1212 high /= unit;
1084 1213
1085 dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end); 1214 dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
1086 dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end); 1215 dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
1087 1216
1088 enable_clocks(1);
1089
1090 DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n", 1217 DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
1091 plane, 1218 plane,
1092 REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane), 1219 REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
@@ -1098,18 +1225,12 @@ void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
1098 dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane), 1225 dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
1099 FLD_VAL(high, hi_start, hi_end) | 1226 FLD_VAL(high, hi_start, hi_end) |
1100 FLD_VAL(low, lo_start, lo_end)); 1227 FLD_VAL(low, lo_start, lo_end));
1101
1102 enable_clocks(0);
1103} 1228}
1104 1229
1105void dispc_enable_fifomerge(bool enable) 1230void dispc_enable_fifomerge(bool enable)
1106{ 1231{
1107 enable_clocks(1);
1108
1109 DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled"); 1232 DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
1110 REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14); 1233 REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
1111
1112 enable_clocks(0);
1113} 1234}
1114 1235
1115static void _dispc_set_fir(enum omap_plane plane, 1236static void _dispc_set_fir(enum omap_plane plane,
@@ -1729,14 +1850,7 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
1729 return dispc_pclk_rate(channel) * vf * hf; 1850 return dispc_pclk_rate(channel) * vf * hf;
1730} 1851}
1731 1852
1732void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out) 1853int dispc_setup_plane(enum omap_plane plane,
1733{
1734 enable_clocks(1);
1735 _dispc_set_channel_out(plane, channel_out);
1736 enable_clocks(0);
1737}
1738
1739static int _dispc_setup_plane(enum omap_plane plane,
1740 u32 paddr, u16 screen_width, 1854 u32 paddr, u16 screen_width,
1741 u16 pos_x, u16 pos_y, 1855 u16 pos_x, u16 pos_y,
1742 u16 width, u16 height, 1856 u16 width, u16 height,
@@ -1744,7 +1858,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
1744 enum omap_color_mode color_mode, 1858 enum omap_color_mode color_mode,
1745 bool ilace, 1859 bool ilace,
1746 enum omap_dss_rotation_type rotation_type, 1860 enum omap_dss_rotation_type rotation_type,
1747 u8 rotation, int mirror, 1861 u8 rotation, bool mirror,
1748 u8 global_alpha, u8 pre_mult_alpha, 1862 u8 global_alpha, u8 pre_mult_alpha,
1749 enum omap_channel channel, u32 puv_addr) 1863 enum omap_channel channel, u32 puv_addr)
1750{ 1864{
@@ -1758,6 +1872,14 @@ static int _dispc_setup_plane(enum omap_plane plane,
1758 u16 frame_height = height; 1872 u16 frame_height = height;
1759 unsigned int field_offset = 0; 1873 unsigned int field_offset = 0;
1760 1874
1875 DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> "
1876 "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
1877 plane, paddr, screen_width, pos_x, pos_y,
1878 width, height,
1879 out_width, out_height,
1880 ilace, color_mode,
1881 rotation, mirror, channel);
1882
1761 if (paddr == 0) 1883 if (paddr == 0)
1762 return -EINVAL; 1884 return -EINVAL;
1763 1885
@@ -1903,9 +2025,13 @@ static int _dispc_setup_plane(enum omap_plane plane,
1903 return 0; 2025 return 0;
1904} 2026}
1905 2027
1906static void _dispc_enable_plane(enum omap_plane plane, bool enable) 2028int dispc_enable_plane(enum omap_plane plane, bool enable)
1907{ 2029{
2030 DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
2031
1908 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0); 2032 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
2033
2034 return 0;
1909} 2035}
1910 2036
1911static void dispc_disable_isr(void *data, u32 mask) 2037static void dispc_disable_isr(void *data, u32 mask)
@@ -1929,8 +2055,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
1929 int r; 2055 int r;
1930 u32 irq; 2056 u32 irq;
1931 2057
1932 enable_clocks(1);
1933
1934 /* When we disable LCD output, we need to wait until frame is done. 2058 /* When we disable LCD output, we need to wait until frame is done.
1935 * Otherwise the DSS is still working, and turning off the clocks 2059 * Otherwise the DSS is still working, and turning off the clocks
1936 * prevents DSS from going to OFF mode */ 2060 * prevents DSS from going to OFF mode */
@@ -1964,8 +2088,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
1964 if (r) 2088 if (r)
1965 DSSERR("failed to unregister FRAMEDONE isr\n"); 2089 DSSERR("failed to unregister FRAMEDONE isr\n");
1966 } 2090 }
1967
1968 enable_clocks(0);
1969} 2091}
1970 2092
1971static void _enable_digit_out(bool enable) 2093static void _enable_digit_out(bool enable)
@@ -1978,12 +2100,8 @@ static void dispc_enable_digit_out(bool enable)
1978 struct completion frame_done_completion; 2100 struct completion frame_done_completion;
1979 int r; 2101 int r;
1980 2102
1981 enable_clocks(1); 2103 if (REG_GET(DISPC_CONTROL, 1, 1) == enable)
1982
1983 if (REG_GET(DISPC_CONTROL, 1, 1) == enable) {
1984 enable_clocks(0);
1985 return; 2104 return;
1986 }
1987 2105
1988 if (enable) { 2106 if (enable) {
1989 unsigned long flags; 2107 unsigned long flags;
@@ -2035,8 +2153,6 @@ static void dispc_enable_digit_out(bool enable)
2035 _omap_dispc_set_irqs(); 2153 _omap_dispc_set_irqs();
2036 spin_unlock_irqrestore(&dispc.irq_lock, flags); 2154 spin_unlock_irqrestore(&dispc.irq_lock, flags);
2037 } 2155 }
2038
2039 enable_clocks(0);
2040} 2156}
2041 2157
2042bool dispc_is_channel_enabled(enum omap_channel channel) 2158bool dispc_is_channel_enabled(enum omap_channel channel)
@@ -2067,9 +2183,7 @@ void dispc_lcd_enable_signal_polarity(bool act_high)
2067 if (!dss_has_feature(FEAT_LCDENABLEPOL)) 2183 if (!dss_has_feature(FEAT_LCDENABLEPOL))
2068 return; 2184 return;
2069 2185
2070 enable_clocks(1);
2071 REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29); 2186 REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
2072 enable_clocks(0);
2073} 2187}
2074 2188
2075void dispc_lcd_enable_signal(bool enable) 2189void dispc_lcd_enable_signal(bool enable)
@@ -2077,9 +2191,7 @@ void dispc_lcd_enable_signal(bool enable)
2077 if (!dss_has_feature(FEAT_LCDENABLESIGNAL)) 2191 if (!dss_has_feature(FEAT_LCDENABLESIGNAL))
2078 return; 2192 return;
2079 2193
2080 enable_clocks(1);
2081 REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28); 2194 REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
2082 enable_clocks(0);
2083} 2195}
2084 2196
2085void dispc_pck_free_enable(bool enable) 2197void dispc_pck_free_enable(bool enable)
@@ -2087,19 +2199,15 @@ void dispc_pck_free_enable(bool enable)
2087 if (!dss_has_feature(FEAT_PCKFREEENABLE)) 2199 if (!dss_has_feature(FEAT_PCKFREEENABLE))
2088 return; 2200 return;
2089 2201
2090 enable_clocks(1);
2091 REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27); 2202 REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
2092 enable_clocks(0);
2093} 2203}
2094 2204
2095void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable) 2205void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable)
2096{ 2206{
2097 enable_clocks(1);
2098 if (channel == OMAP_DSS_CHANNEL_LCD2) 2207 if (channel == OMAP_DSS_CHANNEL_LCD2)
2099 REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16); 2208 REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16);
2100 else 2209 else
2101 REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16); 2210 REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
2102 enable_clocks(0);
2103} 2211}
2104 2212
2105 2213
@@ -2122,27 +2230,21 @@ void dispc_set_lcd_display_type(enum omap_channel channel,
2122 return; 2230 return;
2123 } 2231 }
2124 2232
2125 enable_clocks(1);
2126 if (channel == OMAP_DSS_CHANNEL_LCD2) 2233 if (channel == OMAP_DSS_CHANNEL_LCD2)
2127 REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3); 2234 REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3);
2128 else 2235 else
2129 REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3); 2236 REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
2130 enable_clocks(0);
2131} 2237}
2132 2238
2133void dispc_set_loadmode(enum omap_dss_load_mode mode) 2239void dispc_set_loadmode(enum omap_dss_load_mode mode)
2134{ 2240{
2135 enable_clocks(1);
2136 REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1); 2241 REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1);
2137 enable_clocks(0);
2138} 2242}
2139 2243
2140 2244
2141void dispc_set_default_color(enum omap_channel channel, u32 color) 2245void dispc_set_default_color(enum omap_channel channel, u32 color)
2142{ 2246{
2143 enable_clocks(1);
2144 dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color); 2247 dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
2145 enable_clocks(0);
2146} 2248}
2147 2249
2148u32 dispc_get_default_color(enum omap_channel channel) 2250u32 dispc_get_default_color(enum omap_channel channel)
@@ -2153,9 +2255,7 @@ u32 dispc_get_default_color(enum omap_channel channel)
2153 channel != OMAP_DSS_CHANNEL_LCD && 2255 channel != OMAP_DSS_CHANNEL_LCD &&
2154 channel != OMAP_DSS_CHANNEL_LCD2); 2256 channel != OMAP_DSS_CHANNEL_LCD2);
2155 2257
2156 enable_clocks(1);
2157 l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel)); 2258 l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel));
2158 enable_clocks(0);
2159 2259
2160 return l; 2260 return l;
2161} 2261}
@@ -2164,7 +2264,6 @@ void dispc_set_trans_key(enum omap_channel ch,
2164 enum omap_dss_trans_key_type type, 2264 enum omap_dss_trans_key_type type,
2165 u32 trans_key) 2265 u32 trans_key)
2166{ 2266{
2167 enable_clocks(1);
2168 if (ch == OMAP_DSS_CHANNEL_LCD) 2267 if (ch == OMAP_DSS_CHANNEL_LCD)
2169 REG_FLD_MOD(DISPC_CONFIG, type, 11, 11); 2268 REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
2170 else if (ch == OMAP_DSS_CHANNEL_DIGIT) 2269 else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2173,14 +2272,12 @@ void dispc_set_trans_key(enum omap_channel ch,
2173 REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11); 2272 REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11);
2174 2273
2175 dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key); 2274 dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
2176 enable_clocks(0);
2177} 2275}
2178 2276
2179void dispc_get_trans_key(enum omap_channel ch, 2277void dispc_get_trans_key(enum omap_channel ch,
2180 enum omap_dss_trans_key_type *type, 2278 enum omap_dss_trans_key_type *type,
2181 u32 *trans_key) 2279 u32 *trans_key)
2182{ 2280{
2183 enable_clocks(1);
2184 if (type) { 2281 if (type) {
2185 if (ch == OMAP_DSS_CHANNEL_LCD) 2282 if (ch == OMAP_DSS_CHANNEL_LCD)
2186 *type = REG_GET(DISPC_CONFIG, 11, 11); 2283 *type = REG_GET(DISPC_CONFIG, 11, 11);
@@ -2194,33 +2291,28 @@ void dispc_get_trans_key(enum omap_channel ch,
2194 2291
2195 if (trans_key) 2292 if (trans_key)
2196 *trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch)); 2293 *trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch));
2197 enable_clocks(0);
2198} 2294}
2199 2295
2200void dispc_enable_trans_key(enum omap_channel ch, bool enable) 2296void dispc_enable_trans_key(enum omap_channel ch, bool enable)
2201{ 2297{
2202 enable_clocks(1);
2203 if (ch == OMAP_DSS_CHANNEL_LCD) 2298 if (ch == OMAP_DSS_CHANNEL_LCD)
2204 REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10); 2299 REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
2205 else if (ch == OMAP_DSS_CHANNEL_DIGIT) 2300 else if (ch == OMAP_DSS_CHANNEL_DIGIT)
2206 REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12); 2301 REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
2207 else /* OMAP_DSS_CHANNEL_LCD2 */ 2302 else /* OMAP_DSS_CHANNEL_LCD2 */
2208 REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10); 2303 REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
2209 enable_clocks(0);
2210} 2304}
2211void dispc_enable_alpha_blending(enum omap_channel ch, bool enable) 2305void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
2212{ 2306{
2213 if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) 2307 if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
2214 return; 2308 return;
2215 2309
2216 enable_clocks(1);
2217 if (ch == OMAP_DSS_CHANNEL_LCD) 2310 if (ch == OMAP_DSS_CHANNEL_LCD)
2218 REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18); 2311 REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
2219 else if (ch == OMAP_DSS_CHANNEL_DIGIT) 2312 else if (ch == OMAP_DSS_CHANNEL_DIGIT)
2220 REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19); 2313 REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
2221 else /* OMAP_DSS_CHANNEL_LCD2 */ 2314 else /* OMAP_DSS_CHANNEL_LCD2 */
2222 REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18); 2315 REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18);
2223 enable_clocks(0);
2224} 2316}
2225bool dispc_alpha_blending_enabled(enum omap_channel ch) 2317bool dispc_alpha_blending_enabled(enum omap_channel ch)
2226{ 2318{
@@ -2229,7 +2321,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch)
2229 if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) 2321 if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
2230 return false; 2322 return false;
2231 2323
2232 enable_clocks(1);
2233 if (ch == OMAP_DSS_CHANNEL_LCD) 2324 if (ch == OMAP_DSS_CHANNEL_LCD)
2234 enabled = REG_GET(DISPC_CONFIG, 18, 18); 2325 enabled = REG_GET(DISPC_CONFIG, 18, 18);
2235 else if (ch == OMAP_DSS_CHANNEL_DIGIT) 2326 else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2238,7 +2329,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch)
2238 enabled = REG_GET(DISPC_CONFIG2, 18, 18); 2329 enabled = REG_GET(DISPC_CONFIG2, 18, 18);
2239 else 2330 else
2240 BUG(); 2331 BUG();
2241 enable_clocks(0);
2242 2332
2243 return enabled; 2333 return enabled;
2244} 2334}
@@ -2248,7 +2338,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
2248{ 2338{
2249 bool enabled; 2339 bool enabled;
2250 2340
2251 enable_clocks(1);
2252 if (ch == OMAP_DSS_CHANNEL_LCD) 2341 if (ch == OMAP_DSS_CHANNEL_LCD)
2253 enabled = REG_GET(DISPC_CONFIG, 10, 10); 2342 enabled = REG_GET(DISPC_CONFIG, 10, 10);
2254 else if (ch == OMAP_DSS_CHANNEL_DIGIT) 2343 else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2257,7 +2346,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
2257 enabled = REG_GET(DISPC_CONFIG2, 10, 10); 2346 enabled = REG_GET(DISPC_CONFIG2, 10, 10);
2258 else 2347 else
2259 BUG(); 2348 BUG();
2260 enable_clocks(0);
2261 2349
2262 return enabled; 2350 return enabled;
2263} 2351}
@@ -2285,12 +2373,10 @@ void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
2285 return; 2373 return;
2286 } 2374 }
2287 2375
2288 enable_clocks(1);
2289 if (channel == OMAP_DSS_CHANNEL_LCD2) 2376 if (channel == OMAP_DSS_CHANNEL_LCD2)
2290 REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8); 2377 REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8);
2291 else 2378 else
2292 REG_FLD_MOD(DISPC_CONTROL, code, 9, 8); 2379 REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
2293 enable_clocks(0);
2294} 2380}
2295 2381
2296void dispc_set_parallel_interface_mode(enum omap_channel channel, 2382void dispc_set_parallel_interface_mode(enum omap_channel channel,
@@ -2322,8 +2408,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel,
2322 return; 2408 return;
2323 } 2409 }
2324 2410
2325 enable_clocks(1);
2326
2327 if (channel == OMAP_DSS_CHANNEL_LCD2) { 2411 if (channel == OMAP_DSS_CHANNEL_LCD2) {
2328 l = dispc_read_reg(DISPC_CONTROL2); 2412 l = dispc_read_reg(DISPC_CONTROL2);
2329 l = FLD_MOD(l, stallmode, 11, 11); 2413 l = FLD_MOD(l, stallmode, 11, 11);
@@ -2335,8 +2419,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel,
2335 l = FLD_MOD(l, gpout1, 16, 16); 2419 l = FLD_MOD(l, gpout1, 16, 16);
2336 dispc_write_reg(DISPC_CONTROL, l); 2420 dispc_write_reg(DISPC_CONTROL, l);
2337 } 2421 }
2338
2339 enable_clocks(0);
2340} 2422}
2341 2423
2342static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp, 2424static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
@@ -2389,10 +2471,8 @@ static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw,
2389 FLD_VAL(vbp, 31, 20); 2471 FLD_VAL(vbp, 31, 20);
2390 } 2472 }
2391 2473
2392 enable_clocks(1);
2393 dispc_write_reg(DISPC_TIMING_H(channel), timing_h); 2474 dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
2394 dispc_write_reg(DISPC_TIMING_V(channel), timing_v); 2475 dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
2395 enable_clocks(0);
2396} 2476}
2397 2477
2398/* change name to mode? */ 2478/* change name to mode? */
@@ -2435,10 +2515,8 @@ static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
2435 BUG_ON(lck_div < 1); 2515 BUG_ON(lck_div < 1);
2436 BUG_ON(pck_div < 2); 2516 BUG_ON(pck_div < 2);
2437 2517
2438 enable_clocks(1);
2439 dispc_write_reg(DISPC_DIVISORo(channel), 2518 dispc_write_reg(DISPC_DIVISORo(channel),
2440 FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0)); 2519 FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
2441 enable_clocks(0);
2442} 2520}
2443 2521
2444static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div, 2522static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
@@ -2457,7 +2535,7 @@ unsigned long dispc_fclk_rate(void)
2457 2535
2458 switch (dss_get_dispc_clk_source()) { 2536 switch (dss_get_dispc_clk_source()) {
2459 case OMAP_DSS_CLK_SRC_FCK: 2537 case OMAP_DSS_CLK_SRC_FCK:
2460 r = dss_clk_get_rate(DSS_CLK_FCK); 2538 r = clk_get_rate(dispc.dss_clk);
2461 break; 2539 break;
2462 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 2540 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
2463 dsidev = dsi_get_dsidev_from_id(0); 2541 dsidev = dsi_get_dsidev_from_id(0);
@@ -2487,7 +2565,7 @@ unsigned long dispc_lclk_rate(enum omap_channel channel)
2487 2565
2488 switch (dss_get_lcd_clk_source(channel)) { 2566 switch (dss_get_lcd_clk_source(channel)) {
2489 case OMAP_DSS_CLK_SRC_FCK: 2567 case OMAP_DSS_CLK_SRC_FCK:
2490 r = dss_clk_get_rate(DSS_CLK_FCK); 2568 r = clk_get_rate(dispc.dss_clk);
2491 break; 2569 break;
2492 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 2570 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
2493 dsidev = dsi_get_dsidev_from_id(0); 2571 dsidev = dsi_get_dsidev_from_id(0);
@@ -2526,7 +2604,8 @@ void dispc_dump_clocks(struct seq_file *s)
2526 enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); 2604 enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
2527 enum omap_dss_clk_source lcd_clk_src; 2605 enum omap_dss_clk_source lcd_clk_src;
2528 2606
2529 enable_clocks(1); 2607 if (dispc_runtime_get())
2608 return;
2530 2609
2531 seq_printf(s, "- DISPC -\n"); 2610 seq_printf(s, "- DISPC -\n");
2532 2611
@@ -2574,7 +2653,8 @@ void dispc_dump_clocks(struct seq_file *s)
2574 seq_printf(s, "pck\t\t%-16lupck div\t%u\n", 2653 seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
2575 dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd); 2654 dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
2576 } 2655 }
2577 enable_clocks(0); 2656
2657 dispc_runtime_put();
2578} 2658}
2579 2659
2580#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 2660#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -2629,7 +2709,8 @@ void dispc_dump_regs(struct seq_file *s)
2629{ 2709{
2630#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r)) 2710#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r))
2631 2711
2632 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 2712 if (dispc_runtime_get())
2713 return;
2633 2714
2634 DUMPREG(DISPC_REVISION); 2715 DUMPREG(DISPC_REVISION);
2635 DUMPREG(DISPC_SYSCONFIG); 2716 DUMPREG(DISPC_SYSCONFIG);
@@ -2649,7 +2730,8 @@ void dispc_dump_regs(struct seq_file *s)
2649 DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD)); 2730 DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD));
2650 DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD)); 2731 DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD));
2651 DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD)); 2732 DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD));
2652 DUMPREG(DISPC_GLOBAL_ALPHA); 2733 if (dss_has_feature(FEAT_GLOBAL_ALPHA))
2734 DUMPREG(DISPC_GLOBAL_ALPHA);
2653 DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); 2735 DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
2654 DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); 2736 DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
2655 if (dss_has_feature(FEAT_MGR_LCD2)) { 2737 if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -2680,20 +2762,25 @@ void dispc_dump_regs(struct seq_file *s)
2680 DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); 2762 DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
2681 DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); 2763 DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
2682 2764
2683 DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); 2765 if (dss_has_feature(FEAT_CPR)) {
2684 DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); 2766 DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
2685 DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); 2767 DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
2768 DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
2769 }
2686 if (dss_has_feature(FEAT_MGR_LCD2)) { 2770 if (dss_has_feature(FEAT_MGR_LCD2)) {
2687 DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); 2771 DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
2688 DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); 2772 DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
2689 DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); 2773 DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
2690 2774
2691 DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); 2775 if (dss_has_feature(FEAT_CPR)) {
2692 DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); 2776 DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
2693 DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); 2777 DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
2778 DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
2779 }
2694 } 2780 }
2695 2781
2696 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX)); 2782 if (dss_has_feature(FEAT_PRELOAD))
2783 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
2697 2784
2698 DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1)); 2785 DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1));
2699 DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1)); 2786 DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1));
@@ -2744,14 +2831,16 @@ void dispc_dump_regs(struct seq_file *s)
2744 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2)); 2831 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2));
2745 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3)); 2832 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3));
2746 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4)); 2833 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4));
2747 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0)); 2834 if (dss_has_feature(FEAT_FIR_COEF_V)) {
2748 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1)); 2835 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0));
2749 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2)); 2836 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1));
2750 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3)); 2837 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2));
2751 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4)); 2838 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3));
2752 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5)); 2839 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4));
2753 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6)); 2840 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5));
2754 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7)); 2841 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6));
2842 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7));
2843 }
2755 2844
2756 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { 2845 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
2757 DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1)); 2846 DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -2812,14 +2901,17 @@ void dispc_dump_regs(struct seq_file *s)
2812 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2)); 2901 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2));
2813 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3)); 2902 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3));
2814 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4)); 2903 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4));
2815 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0)); 2904
2816 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1)); 2905 if (dss_has_feature(FEAT_FIR_COEF_V)) {
2817 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2)); 2906 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0));
2818 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3)); 2907 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1));
2819 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4)); 2908 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2));
2820 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5)); 2909 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3));
2821 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6)); 2910 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4));
2822 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7)); 2911 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5));
2912 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6));
2913 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7));
2914 }
2823 2915
2824 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { 2916 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
2825 DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2)); 2917 DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -2858,10 +2950,12 @@ void dispc_dump_regs(struct seq_file *s)
2858 if (dss_has_feature(FEAT_ATTR2)) 2950 if (dss_has_feature(FEAT_ATTR2))
2859 DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); 2951 DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
2860 2952
2861 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1)); 2953 if (dss_has_feature(FEAT_PRELOAD)) {
2862 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2)); 2954 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1));
2955 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2));
2956 }
2863 2957
2864 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 2958 dispc_runtime_put();
2865#undef DUMPREG 2959#undef DUMPREG
2866} 2960}
2867 2961
@@ -2882,9 +2976,7 @@ static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf,
2882 l |= FLD_VAL(acbi, 11, 8); 2976 l |= FLD_VAL(acbi, 11, 8);
2883 l |= FLD_VAL(acb, 7, 0); 2977 l |= FLD_VAL(acb, 7, 0);
2884 2978
2885 enable_clocks(1);
2886 dispc_write_reg(DISPC_POL_FREQ(channel), l); 2979 dispc_write_reg(DISPC_POL_FREQ(channel), l);
2887 enable_clocks(0);
2888} 2980}
2889 2981
2890void dispc_set_pol_freq(enum omap_channel channel, 2982void dispc_set_pol_freq(enum omap_channel channel,
@@ -3005,15 +3097,11 @@ static void _omap_dispc_set_irqs(void)
3005 mask |= isr_data->mask; 3097 mask |= isr_data->mask;
3006 } 3098 }
3007 3099
3008 enable_clocks(1);
3009
3010 old_mask = dispc_read_reg(DISPC_IRQENABLE); 3100 old_mask = dispc_read_reg(DISPC_IRQENABLE);
3011 /* clear the irqstatus for newly enabled irqs */ 3101 /* clear the irqstatus for newly enabled irqs */
3012 dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask); 3102 dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
3013 3103
3014 dispc_write_reg(DISPC_IRQENABLE, mask); 3104 dispc_write_reg(DISPC_IRQENABLE, mask);
3015
3016 enable_clocks(0);
3017} 3105}
3018 3106
3019int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask) 3107int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
@@ -3522,13 +3610,6 @@ static void _omap_dispc_initial_config(void)
3522{ 3610{
3523 u32 l; 3611 u32 l;
3524 3612
3525 l = dispc_read_reg(DISPC_SYSCONFIG);
3526 l = FLD_MOD(l, 2, 13, 12); /* MIDLEMODE: smart standby */
3527 l = FLD_MOD(l, 2, 4, 3); /* SIDLEMODE: smart idle */
3528 l = FLD_MOD(l, 1, 2, 2); /* ENWAKEUP */
3529 l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */
3530 dispc_write_reg(DISPC_SYSCONFIG, l);
3531
3532 /* Exclusively enable DISPC_CORE_CLK and set divider to 1 */ 3613 /* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
3533 if (dss_has_feature(FEAT_CORE_CLK_DIV)) { 3614 if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
3534 l = dispc_read_reg(DISPC_DIVISOR); 3615 l = dispc_read_reg(DISPC_DIVISOR);
@@ -3552,58 +3633,8 @@ static void _omap_dispc_initial_config(void)
3552 dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY); 3633 dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
3553 3634
3554 dispc_read_plane_fifo_sizes(); 3635 dispc_read_plane_fifo_sizes();
3555}
3556 3636
3557int dispc_enable_plane(enum omap_plane plane, bool enable) 3637 dispc_configure_burst_sizes();
3558{
3559 DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
3560
3561 enable_clocks(1);
3562 _dispc_enable_plane(plane, enable);
3563 enable_clocks(0);
3564
3565 return 0;
3566}
3567
3568int dispc_setup_plane(enum omap_plane plane,
3569 u32 paddr, u16 screen_width,
3570 u16 pos_x, u16 pos_y,
3571 u16 width, u16 height,
3572 u16 out_width, u16 out_height,
3573 enum omap_color_mode color_mode,
3574 bool ilace,
3575 enum omap_dss_rotation_type rotation_type,
3576 u8 rotation, bool mirror, u8 global_alpha,
3577 u8 pre_mult_alpha, enum omap_channel channel,
3578 u32 puv_addr)
3579{
3580 int r = 0;
3581
3582 DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d, %d, %dx%d -> "
3583 "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
3584 plane, paddr, screen_width, pos_x, pos_y,
3585 width, height,
3586 out_width, out_height,
3587 ilace, color_mode,
3588 rotation, mirror, channel);
3589
3590 enable_clocks(1);
3591
3592 r = _dispc_setup_plane(plane,
3593 paddr, screen_width,
3594 pos_x, pos_y,
3595 width, height,
3596 out_width, out_height,
3597 color_mode, ilace,
3598 rotation_type,
3599 rotation, mirror,
3600 global_alpha,
3601 pre_mult_alpha,
3602 channel, puv_addr);
3603
3604 enable_clocks(0);
3605
3606 return r;
3607} 3638}
3608 3639
3609/* DISPC HW IP initialisation */ 3640/* DISPC HW IP initialisation */
@@ -3612,9 +3643,19 @@ static int omap_dispchw_probe(struct platform_device *pdev)
3612 u32 rev; 3643 u32 rev;
3613 int r = 0; 3644 int r = 0;
3614 struct resource *dispc_mem; 3645 struct resource *dispc_mem;
3646 struct clk *clk;
3615 3647
3616 dispc.pdev = pdev; 3648 dispc.pdev = pdev;
3617 3649
3650 clk = clk_get(&pdev->dev, "fck");
3651 if (IS_ERR(clk)) {
3652 DSSERR("can't get fck\n");
3653 r = PTR_ERR(clk);
3654 goto err_get_clk;
3655 }
3656
3657 dispc.dss_clk = clk;
3658
3618 spin_lock_init(&dispc.irq_lock); 3659 spin_lock_init(&dispc.irq_lock);
3619 3660
3620#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 3661#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -3628,62 +3669,103 @@ static int omap_dispchw_probe(struct platform_device *pdev)
3628 if (!dispc_mem) { 3669 if (!dispc_mem) {
3629 DSSERR("can't get IORESOURCE_MEM DISPC\n"); 3670 DSSERR("can't get IORESOURCE_MEM DISPC\n");
3630 r = -EINVAL; 3671 r = -EINVAL;
3631 goto fail0; 3672 goto err_ioremap;
3632 } 3673 }
3633 dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem)); 3674 dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem));
3634 if (!dispc.base) { 3675 if (!dispc.base) {
3635 DSSERR("can't ioremap DISPC\n"); 3676 DSSERR("can't ioremap DISPC\n");
3636 r = -ENOMEM; 3677 r = -ENOMEM;
3637 goto fail0; 3678 goto err_ioremap;
3638 } 3679 }
3639 dispc.irq = platform_get_irq(dispc.pdev, 0); 3680 dispc.irq = platform_get_irq(dispc.pdev, 0);
3640 if (dispc.irq < 0) { 3681 if (dispc.irq < 0) {
3641 DSSERR("platform_get_irq failed\n"); 3682 DSSERR("platform_get_irq failed\n");
3642 r = -ENODEV; 3683 r = -ENODEV;
3643 goto fail1; 3684 goto err_irq;
3644 } 3685 }
3645 3686
3646 r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED, 3687 r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED,
3647 "OMAP DISPC", dispc.pdev); 3688 "OMAP DISPC", dispc.pdev);
3648 if (r < 0) { 3689 if (r < 0) {
3649 DSSERR("request_irq failed\n"); 3690 DSSERR("request_irq failed\n");
3650 goto fail1; 3691 goto err_irq;
3651 } 3692 }
3652 3693
3653 enable_clocks(1); 3694 pm_runtime_enable(&pdev->dev);
3695
3696 r = dispc_runtime_get();
3697 if (r)
3698 goto err_runtime_get;
3654 3699
3655 _omap_dispc_initial_config(); 3700 _omap_dispc_initial_config();
3656 3701
3657 _omap_dispc_initialize_irq(); 3702 _omap_dispc_initialize_irq();
3658 3703
3659 dispc_save_context();
3660
3661 rev = dispc_read_reg(DISPC_REVISION); 3704 rev = dispc_read_reg(DISPC_REVISION);
3662 dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n", 3705 dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
3663 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); 3706 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
3664 3707
3665 enable_clocks(0); 3708 dispc_runtime_put();
3666 3709
3667 return 0; 3710 return 0;
3668fail1: 3711
3712err_runtime_get:
3713 pm_runtime_disable(&pdev->dev);
3714 free_irq(dispc.irq, dispc.pdev);
3715err_irq:
3669 iounmap(dispc.base); 3716 iounmap(dispc.base);
3670fail0: 3717err_ioremap:
3718 clk_put(dispc.dss_clk);
3719err_get_clk:
3671 return r; 3720 return r;
3672} 3721}
3673 3722
3674static int omap_dispchw_remove(struct platform_device *pdev) 3723static int omap_dispchw_remove(struct platform_device *pdev)
3675{ 3724{
3725 pm_runtime_disable(&pdev->dev);
3726
3727 clk_put(dispc.dss_clk);
3728
3676 free_irq(dispc.irq, dispc.pdev); 3729 free_irq(dispc.irq, dispc.pdev);
3677 iounmap(dispc.base); 3730 iounmap(dispc.base);
3678 return 0; 3731 return 0;
3679} 3732}
3680 3733
3734static int dispc_runtime_suspend(struct device *dev)
3735{
3736 dispc_save_context();
3737 clk_disable(dispc.dss_clk);
3738 dss_runtime_put();
3739
3740 return 0;
3741}
3742
3743static int dispc_runtime_resume(struct device *dev)
3744{
3745 int r;
3746
3747 r = dss_runtime_get();
3748 if (r < 0)
3749 return r;
3750
3751 clk_enable(dispc.dss_clk);
3752 dispc_restore_context();
3753
3754 return 0;
3755}
3756
3757static const struct dev_pm_ops dispc_pm_ops = {
3758 .runtime_suspend = dispc_runtime_suspend,
3759 .runtime_resume = dispc_runtime_resume,
3760};
3761
3681static struct platform_driver omap_dispchw_driver = { 3762static struct platform_driver omap_dispchw_driver = {
3682 .probe = omap_dispchw_probe, 3763 .probe = omap_dispchw_probe,
3683 .remove = omap_dispchw_remove, 3764 .remove = omap_dispchw_remove,
3684 .driver = { 3765 .driver = {
3685 .name = "omapdss_dispc", 3766 .name = "omapdss_dispc",
3686 .owner = THIS_MODULE, 3767 .owner = THIS_MODULE,
3768 .pm = &dispc_pm_ops,
3687 }, 3769 },
3688}; 3770};
3689 3771
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index c2dfc8c50057..94495e45ec5a 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -29,6 +29,7 @@
29 29
30#include <video/omapdss.h> 30#include <video/omapdss.h>
31#include "dss.h" 31#include "dss.h"
32#include "dss_features.h"
32 33
33static ssize_t display_enabled_show(struct device *dev, 34static ssize_t display_enabled_show(struct device *dev,
34 struct device_attribute *attr, char *buf) 35 struct device_attribute *attr, char *buf)
@@ -65,48 +66,6 @@ static ssize_t display_enabled_store(struct device *dev,
65 return size; 66 return size;
66} 67}
67 68
68static ssize_t display_upd_mode_show(struct device *dev,
69 struct device_attribute *attr, char *buf)
70{
71 struct omap_dss_device *dssdev = to_dss_device(dev);
72 enum omap_dss_update_mode mode = OMAP_DSS_UPDATE_AUTO;
73 if (dssdev->driver->get_update_mode)
74 mode = dssdev->driver->get_update_mode(dssdev);
75 return snprintf(buf, PAGE_SIZE, "%d\n", mode);
76}
77
78static ssize_t display_upd_mode_store(struct device *dev,
79 struct device_attribute *attr,
80 const char *buf, size_t size)
81{
82 struct omap_dss_device *dssdev = to_dss_device(dev);
83 int val, r;
84 enum omap_dss_update_mode mode;
85
86 if (!dssdev->driver->set_update_mode)
87 return -EINVAL;
88
89 r = kstrtoint(buf, 0, &val);
90 if (r)
91 return r;
92
93 switch (val) {
94 case OMAP_DSS_UPDATE_DISABLED:
95 case OMAP_DSS_UPDATE_AUTO:
96 case OMAP_DSS_UPDATE_MANUAL:
97 mode = (enum omap_dss_update_mode)val;
98 break;
99 default:
100 return -EINVAL;
101 }
102
103 r = dssdev->driver->set_update_mode(dssdev, mode);
104 if (r)
105 return r;
106
107 return size;
108}
109
110static ssize_t display_tear_show(struct device *dev, 69static ssize_t display_tear_show(struct device *dev,
111 struct device_attribute *attr, char *buf) 70 struct device_attribute *attr, char *buf)
112{ 71{
@@ -294,8 +253,6 @@ static ssize_t display_wss_store(struct device *dev,
294 253
295static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, 254static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
296 display_enabled_show, display_enabled_store); 255 display_enabled_show, display_enabled_store);
297static DEVICE_ATTR(update_mode, S_IRUGO|S_IWUSR,
298 display_upd_mode_show, display_upd_mode_store);
299static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, 256static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
300 display_tear_show, display_tear_store); 257 display_tear_show, display_tear_store);
301static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, 258static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
@@ -309,7 +266,6 @@ static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
309 266
310static struct device_attribute *display_sysfs_attrs[] = { 267static struct device_attribute *display_sysfs_attrs[] = {
311 &dev_attr_enabled, 268 &dev_attr_enabled,
312 &dev_attr_update_mode,
313 &dev_attr_tear_elim, 269 &dev_attr_tear_elim,
314 &dev_attr_timings, 270 &dev_attr_timings,
315 &dev_attr_rotate, 271 &dev_attr_rotate,
@@ -327,16 +283,13 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
327EXPORT_SYMBOL(omapdss_default_get_resolution); 283EXPORT_SYMBOL(omapdss_default_get_resolution);
328 284
329void default_get_overlay_fifo_thresholds(enum omap_plane plane, 285void default_get_overlay_fifo_thresholds(enum omap_plane plane,
330 u32 fifo_size, enum omap_burst_size *burst_size, 286 u32 fifo_size, u32 burst_size,
331 u32 *fifo_low, u32 *fifo_high) 287 u32 *fifo_low, u32 *fifo_high)
332{ 288{
333 unsigned burst_size_bytes; 289 unsigned buf_unit = dss_feat_get_buffer_size_unit();
334
335 *burst_size = OMAP_DSS_BURST_16x32;
336 burst_size_bytes = 16 * 32 / 8;
337 290
338 *fifo_high = fifo_size - 1; 291 *fifo_high = fifo_size - buf_unit;
339 *fifo_low = fifo_size - burst_size_bytes; 292 *fifo_low = fifo_size - burst_size;
340} 293}
341 294
342int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev) 295int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index ff6bd30132df..f053b180ecd7 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -23,7 +23,6 @@
23#define DSS_SUBSYS_NAME "DPI" 23#define DSS_SUBSYS_NAME "DPI"
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/clk.h>
27#include <linux/delay.h> 26#include <linux/delay.h>
28#include <linux/err.h> 27#include <linux/err.h>
29#include <linux/errno.h> 28#include <linux/errno.h>
@@ -130,8 +129,6 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
130 bool is_tft; 129 bool is_tft;
131 int r = 0; 130 int r = 0;
132 131
133 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
134
135 dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config, 132 dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
136 dssdev->panel.acbi, dssdev->panel.acb); 133 dssdev->panel.acbi, dssdev->panel.acb);
137 134
@@ -144,7 +141,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
144 r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, 141 r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000,
145 &fck, &lck_div, &pck_div); 142 &fck, &lck_div, &pck_div);
146 if (r) 143 if (r)
147 goto err0; 144 return r;
148 145
149 pck = fck / lck_div / pck_div / 1000; 146 pck = fck / lck_div / pck_div / 1000;
150 147
@@ -158,12 +155,10 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
158 155
159 dispc_set_lcd_timings(dssdev->manager->id, t); 156 dispc_set_lcd_timings(dssdev->manager->id, t);
160 157
161err0: 158 return 0;
162 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
163 return r;
164} 159}
165 160
166static int dpi_basic_init(struct omap_dss_device *dssdev) 161static void dpi_basic_init(struct omap_dss_device *dssdev)
167{ 162{
168 bool is_tft; 163 bool is_tft;
169 164
@@ -175,8 +170,6 @@ static int dpi_basic_init(struct omap_dss_device *dssdev)
175 OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN); 170 OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
176 dispc_set_tft_data_lines(dssdev->manager->id, 171 dispc_set_tft_data_lines(dssdev->manager->id,
177 dssdev->phy.dpi.data_lines); 172 dssdev->phy.dpi.data_lines);
178
179 return 0;
180} 173}
181 174
182int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) 175int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
@@ -186,31 +179,38 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
186 r = omap_dss_start_device(dssdev); 179 r = omap_dss_start_device(dssdev);
187 if (r) { 180 if (r) {
188 DSSERR("failed to start device\n"); 181 DSSERR("failed to start device\n");
189 goto err0; 182 goto err_start_dev;
190 } 183 }
191 184
192 if (cpu_is_omap34xx()) { 185 if (cpu_is_omap34xx()) {
193 r = regulator_enable(dpi.vdds_dsi_reg); 186 r = regulator_enable(dpi.vdds_dsi_reg);
194 if (r) 187 if (r)
195 goto err1; 188 goto err_reg_enable;
196 } 189 }
197 190
198 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 191 r = dss_runtime_get();
192 if (r)
193 goto err_get_dss;
199 194
200 r = dpi_basic_init(dssdev); 195 r = dispc_runtime_get();
201 if (r) 196 if (r)
202 goto err2; 197 goto err_get_dispc;
198
199 dpi_basic_init(dssdev);
203 200
204 if (dpi_use_dsi_pll(dssdev)) { 201 if (dpi_use_dsi_pll(dssdev)) {
205 dss_clk_enable(DSS_CLK_SYSCK); 202 r = dsi_runtime_get(dpi.dsidev);
203 if (r)
204 goto err_get_dsi;
205
206 r = dsi_pll_init(dpi.dsidev, 0, 1); 206 r = dsi_pll_init(dpi.dsidev, 0, 1);
207 if (r) 207 if (r)
208 goto err3; 208 goto err_dsi_pll_init;
209 } 209 }
210 210
211 r = dpi_set_mode(dssdev); 211 r = dpi_set_mode(dssdev);
212 if (r) 212 if (r)
213 goto err4; 213 goto err_set_mode;
214 214
215 mdelay(2); 215 mdelay(2);
216 216
@@ -218,19 +218,22 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
218 218
219 return 0; 219 return 0;
220 220
221err4: 221err_set_mode:
222 if (dpi_use_dsi_pll(dssdev)) 222 if (dpi_use_dsi_pll(dssdev))
223 dsi_pll_uninit(dpi.dsidev, true); 223 dsi_pll_uninit(dpi.dsidev, true);
224err3: 224err_dsi_pll_init:
225 if (dpi_use_dsi_pll(dssdev)) 225 if (dpi_use_dsi_pll(dssdev))
226 dss_clk_disable(DSS_CLK_SYSCK); 226 dsi_runtime_put(dpi.dsidev);
227err2: 227err_get_dsi:
228 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 228 dispc_runtime_put();
229err_get_dispc:
230 dss_runtime_put();
231err_get_dss:
229 if (cpu_is_omap34xx()) 232 if (cpu_is_omap34xx())
230 regulator_disable(dpi.vdds_dsi_reg); 233 regulator_disable(dpi.vdds_dsi_reg);
231err1: 234err_reg_enable:
232 omap_dss_stop_device(dssdev); 235 omap_dss_stop_device(dssdev);
233err0: 236err_start_dev:
234 return r; 237 return r;
235} 238}
236EXPORT_SYMBOL(omapdss_dpi_display_enable); 239EXPORT_SYMBOL(omapdss_dpi_display_enable);
@@ -242,10 +245,11 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
242 if (dpi_use_dsi_pll(dssdev)) { 245 if (dpi_use_dsi_pll(dssdev)) {
243 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); 246 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
244 dsi_pll_uninit(dpi.dsidev, true); 247 dsi_pll_uninit(dpi.dsidev, true);
245 dss_clk_disable(DSS_CLK_SYSCK); 248 dsi_runtime_put(dpi.dsidev);
246 } 249 }
247 250
248 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 251 dispc_runtime_put();
252 dss_runtime_put();
249 253
250 if (cpu_is_omap34xx()) 254 if (cpu_is_omap34xx())
251 regulator_disable(dpi.vdds_dsi_reg); 255 regulator_disable(dpi.vdds_dsi_reg);
@@ -257,11 +261,26 @@ EXPORT_SYMBOL(omapdss_dpi_display_disable);
257void dpi_set_timings(struct omap_dss_device *dssdev, 261void dpi_set_timings(struct omap_dss_device *dssdev,
258 struct omap_video_timings *timings) 262 struct omap_video_timings *timings)
259{ 263{
264 int r;
265
260 DSSDBG("dpi_set_timings\n"); 266 DSSDBG("dpi_set_timings\n");
261 dssdev->panel.timings = *timings; 267 dssdev->panel.timings = *timings;
262 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { 268 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
269 r = dss_runtime_get();
270 if (r)
271 return;
272
273 r = dispc_runtime_get();
274 if (r) {
275 dss_runtime_put();
276 return;
277 }
278
263 dpi_set_mode(dssdev); 279 dpi_set_mode(dssdev);
264 dispc_go(dssdev->manager->id); 280 dispc_go(dssdev->manager->id);
281
282 dispc_runtime_put();
283 dss_runtime_put();
265 } 284 }
266} 285}
267EXPORT_SYMBOL(dpi_set_timings); 286EXPORT_SYMBOL(dpi_set_timings);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 345757cfcbee..7adbbeb84334 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -36,6 +36,7 @@
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/debugfs.h> 38#include <linux/debugfs.h>
39#include <linux/pm_runtime.h>
39 40
40#include <video/omapdss.h> 41#include <video/omapdss.h>
41#include <plat/clock.h> 42#include <plat/clock.h>
@@ -267,8 +268,12 @@ struct dsi_isr_tables {
267struct dsi_data { 268struct dsi_data {
268 struct platform_device *pdev; 269 struct platform_device *pdev;
269 void __iomem *base; 270 void __iomem *base;
271
270 int irq; 272 int irq;
271 273
274 struct clk *dss_clk;
275 struct clk *sys_clk;
276
272 void (*dsi_mux_pads)(bool enable); 277 void (*dsi_mux_pads)(bool enable);
273 278
274 struct dsi_clock_info current_cinfo; 279 struct dsi_clock_info current_cinfo;
@@ -389,15 +394,6 @@ static inline u32 dsi_read_reg(struct platform_device *dsidev,
389 return __raw_readl(dsi->base + idx.idx); 394 return __raw_readl(dsi->base + idx.idx);
390} 395}
391 396
392
393void dsi_save_context(void)
394{
395}
396
397void dsi_restore_context(void)
398{
399}
400
401void dsi_bus_lock(struct omap_dss_device *dssdev) 397void dsi_bus_lock(struct omap_dss_device *dssdev)
402{ 398{
403 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 399 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -493,9 +489,18 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name)
493 total_bytes * 1000 / total_us); 489 total_bytes * 1000 / total_us);
494} 490}
495#else 491#else
496#define dsi_perf_mark_setup(x) 492static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
497#define dsi_perf_mark_start(x) 493{
498#define dsi_perf_show(x, y) 494}
495
496static inline void dsi_perf_mark_start(struct platform_device *dsidev)
497{
498}
499
500static inline void dsi_perf_show(struct platform_device *dsidev,
501 const char *name)
502{
503}
499#endif 504#endif
500 505
501static void print_irq_status(u32 status) 506static void print_irq_status(u32 status)
@@ -1039,13 +1044,27 @@ static u32 dsi_get_errors(struct platform_device *dsidev)
1039 return e; 1044 return e;
1040} 1045}
1041 1046
1042/* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */ 1047int dsi_runtime_get(struct platform_device *dsidev)
1043static inline void enable_clocks(bool enable)
1044{ 1048{
1045 if (enable) 1049 int r;
1046 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 1050 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1047 else 1051
1048 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 1052 DSSDBG("dsi_runtime_get\n");
1053
1054 r = pm_runtime_get_sync(&dsi->pdev->dev);
1055 WARN_ON(r < 0);
1056 return r < 0 ? r : 0;
1057}
1058
1059void dsi_runtime_put(struct platform_device *dsidev)
1060{
1061 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1062 int r;
1063
1064 DSSDBG("dsi_runtime_put\n");
1065
1066 r = pm_runtime_put(&dsi->pdev->dev);
1067 WARN_ON(r < 0);
1049} 1068}
1050 1069
1051/* source clock for DSI PLL. this could also be PCLKFREE */ 1070/* source clock for DSI PLL. this could also be PCLKFREE */
@@ -1055,9 +1074,9 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1055 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1074 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1056 1075
1057 if (enable) 1076 if (enable)
1058 dss_clk_enable(DSS_CLK_SYSCK); 1077 clk_enable(dsi->sys_clk);
1059 else 1078 else
1060 dss_clk_disable(DSS_CLK_SYSCK); 1079 clk_disable(dsi->sys_clk);
1061 1080
1062 if (enable && dsi->pll_locked) { 1081 if (enable && dsi->pll_locked) {
1063 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) 1082 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
@@ -1150,10 +1169,11 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1150{ 1169{
1151 unsigned long r; 1170 unsigned long r;
1152 int dsi_module = dsi_get_dsidev_id(dsidev); 1171 int dsi_module = dsi_get_dsidev_id(dsidev);
1172 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1153 1173
1154 if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) { 1174 if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
1155 /* DSI FCLK source is DSS_CLK_FCK */ 1175 /* DSI FCLK source is DSS_CLK_FCK */
1156 r = dss_clk_get_rate(DSS_CLK_FCK); 1176 r = clk_get_rate(dsi->dss_clk);
1157 } else { 1177 } else {
1158 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ 1178 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1159 r = dsi_get_pll_hsdiv_dsi_rate(dsidev); 1179 r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
@@ -1262,7 +1282,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1262 return -EINVAL; 1282 return -EINVAL;
1263 1283
1264 if (cinfo->use_sys_clk) { 1284 if (cinfo->use_sys_clk) {
1265 cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK); 1285 cinfo->clkin = clk_get_rate(dsi->sys_clk);
1266 /* XXX it is unclear if highfreq should be used 1286 /* XXX it is unclear if highfreq should be used
1267 * with DSS_SYS_CLK source also */ 1287 * with DSS_SYS_CLK source also */
1268 cinfo->highfreq = 0; 1288 cinfo->highfreq = 0;
@@ -1311,7 +1331,7 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
1311 int match = 0; 1331 int match = 0;
1312 unsigned long dss_sys_clk, max_dss_fck; 1332 unsigned long dss_sys_clk, max_dss_fck;
1313 1333
1314 dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK); 1334 dss_sys_clk = clk_get_rate(dsi->sys_clk);
1315 1335
1316 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); 1336 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1317 1337
@@ -1601,7 +1621,6 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1601 dsi->vdds_dsi_reg = vdds_dsi; 1621 dsi->vdds_dsi_reg = vdds_dsi;
1602 } 1622 }
1603 1623
1604 enable_clocks(1);
1605 dsi_enable_pll_clock(dsidev, 1); 1624 dsi_enable_pll_clock(dsidev, 1);
1606 /* 1625 /*
1607 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4. 1626 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
@@ -1653,7 +1672,6 @@ err1:
1653 } 1672 }
1654err0: 1673err0:
1655 dsi_disable_scp_clk(dsidev); 1674 dsi_disable_scp_clk(dsidev);
1656 enable_clocks(0);
1657 dsi_enable_pll_clock(dsidev, 0); 1675 dsi_enable_pll_clock(dsidev, 0);
1658 return r; 1676 return r;
1659} 1677}
@@ -1671,7 +1689,6 @@ void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1671 } 1689 }
1672 1690
1673 dsi_disable_scp_clk(dsidev); 1691 dsi_disable_scp_clk(dsidev);
1674 enable_clocks(0);
1675 dsi_enable_pll_clock(dsidev, 0); 1692 dsi_enable_pll_clock(dsidev, 0);
1676 1693
1677 DSSDBG("PLL uninit done\n"); 1694 DSSDBG("PLL uninit done\n");
@@ -1688,7 +1705,8 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1688 dispc_clk_src = dss_get_dispc_clk_source(); 1705 dispc_clk_src = dss_get_dispc_clk_source();
1689 dsi_clk_src = dss_get_dsi_clk_source(dsi_module); 1706 dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1690 1707
1691 enable_clocks(1); 1708 if (dsi_runtime_get(dsidev))
1709 return;
1692 1710
1693 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); 1711 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1694 1712
@@ -1731,7 +1749,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1731 1749
1732 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk); 1750 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1733 1751
1734 enable_clocks(0); 1752 dsi_runtime_put(dsidev);
1735} 1753}
1736 1754
1737void dsi_dump_clocks(struct seq_file *s) 1755void dsi_dump_clocks(struct seq_file *s)
@@ -1873,7 +1891,8 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
1873{ 1891{
1874#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r)) 1892#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
1875 1893
1876 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 1894 if (dsi_runtime_get(dsidev))
1895 return;
1877 dsi_enable_scp_clk(dsidev); 1896 dsi_enable_scp_clk(dsidev);
1878 1897
1879 DUMPREG(DSI_REVISION); 1898 DUMPREG(DSI_REVISION);
@@ -1947,7 +1966,7 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
1947 DUMPREG(DSI_PLL_CONFIGURATION2); 1966 DUMPREG(DSI_PLL_CONFIGURATION2);
1948 1967
1949 dsi_disable_scp_clk(dsidev); 1968 dsi_disable_scp_clk(dsidev);
1950 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 1969 dsi_runtime_put(dsidev);
1951#undef DUMPREG 1970#undef DUMPREG
1952} 1971}
1953 1972
@@ -2463,28 +2482,6 @@ static void dsi_cio_uninit(struct platform_device *dsidev)
2463 dsi->dsi_mux_pads(false); 2482 dsi->dsi_mux_pads(false);
2464} 2483}
2465 2484
2466static int _dsi_wait_reset(struct platform_device *dsidev)
2467{
2468 int t = 0;
2469
2470 while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) {
2471 if (++t > 5) {
2472 DSSERR("soft reset failed\n");
2473 return -ENODEV;
2474 }
2475 udelay(1);
2476 }
2477
2478 return 0;
2479}
2480
2481static int _dsi_reset(struct platform_device *dsidev)
2482{
2483 /* Soft reset */
2484 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1);
2485 return _dsi_wait_reset(dsidev);
2486}
2487
2488static void dsi_config_tx_fifo(struct platform_device *dsidev, 2485static void dsi_config_tx_fifo(struct platform_device *dsidev,
2489 enum fifo_size size1, enum fifo_size size2, 2486 enum fifo_size size1, enum fifo_size size2,
2490 enum fifo_size size3, enum fifo_size size4) 2487 enum fifo_size size3, enum fifo_size size4)
@@ -3386,6 +3383,10 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
3386 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, 3383 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3387 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); 3384 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3388 3385
3386 /* Reset LANEx_ULPS_SIG2 */
3387 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (0 << 0) | (0 << 1) | (0 << 2),
3388 7, 5);
3389
3389 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS); 3390 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3390 3391
3391 dsi_if_enable(dsidev, false); 3392 dsi_if_enable(dsidev, false);
@@ -4198,22 +4199,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4198 dsi_pll_uninit(dsidev, disconnect_lanes); 4199 dsi_pll_uninit(dsidev, disconnect_lanes);
4199} 4200}
4200 4201
4201static int dsi_core_init(struct platform_device *dsidev)
4202{
4203 /* Autoidle */
4204 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0);
4205
4206 /* ENWAKEUP */
4207 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2);
4208
4209 /* SIDLEMODE smart-idle */
4210 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3);
4211
4212 _dsi_initialize_irq(dsidev);
4213
4214 return 0;
4215}
4216
4217int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) 4202int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
4218{ 4203{
4219 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4204 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -4229,37 +4214,37 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
4229 r = omap_dss_start_device(dssdev); 4214 r = omap_dss_start_device(dssdev);
4230 if (r) { 4215 if (r) {
4231 DSSERR("failed to start device\n"); 4216 DSSERR("failed to start device\n");
4232 goto err0; 4217 goto err_start_dev;
4233 } 4218 }
4234 4219
4235 enable_clocks(1); 4220 r = dsi_runtime_get(dsidev);
4236 dsi_enable_pll_clock(dsidev, 1);
4237
4238 r = _dsi_reset(dsidev);
4239 if (r) 4221 if (r)
4240 goto err1; 4222 goto err_get_dsi;
4241 4223
4242 dsi_core_init(dsidev); 4224 dsi_enable_pll_clock(dsidev, 1);
4225
4226 _dsi_initialize_irq(dsidev);
4243 4227
4244 r = dsi_display_init_dispc(dssdev); 4228 r = dsi_display_init_dispc(dssdev);
4245 if (r) 4229 if (r)
4246 goto err1; 4230 goto err_init_dispc;
4247 4231
4248 r = dsi_display_init_dsi(dssdev); 4232 r = dsi_display_init_dsi(dssdev);
4249 if (r) 4233 if (r)
4250 goto err2; 4234 goto err_init_dsi;
4251 4235
4252 mutex_unlock(&dsi->lock); 4236 mutex_unlock(&dsi->lock);
4253 4237
4254 return 0; 4238 return 0;
4255 4239
4256err2: 4240err_init_dsi:
4257 dsi_display_uninit_dispc(dssdev); 4241 dsi_display_uninit_dispc(dssdev);
4258err1: 4242err_init_dispc:
4259 enable_clocks(0);
4260 dsi_enable_pll_clock(dsidev, 0); 4243 dsi_enable_pll_clock(dsidev, 0);
4244 dsi_runtime_put(dsidev);
4245err_get_dsi:
4261 omap_dss_stop_device(dssdev); 4246 omap_dss_stop_device(dssdev);
4262err0: 4247err_start_dev:
4263 mutex_unlock(&dsi->lock); 4248 mutex_unlock(&dsi->lock);
4264 DSSDBG("dsi_display_enable FAILED\n"); 4249 DSSDBG("dsi_display_enable FAILED\n");
4265 return r; 4250 return r;
@@ -4278,11 +4263,16 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
4278 4263
4279 mutex_lock(&dsi->lock); 4264 mutex_lock(&dsi->lock);
4280 4265
4266 dsi_sync_vc(dsidev, 0);
4267 dsi_sync_vc(dsidev, 1);
4268 dsi_sync_vc(dsidev, 2);
4269 dsi_sync_vc(dsidev, 3);
4270
4281 dsi_display_uninit_dispc(dssdev); 4271 dsi_display_uninit_dispc(dssdev);
4282 4272
4283 dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps); 4273 dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
4284 4274
4285 enable_clocks(0); 4275 dsi_runtime_put(dsidev);
4286 dsi_enable_pll_clock(dsidev, 0); 4276 dsi_enable_pll_clock(dsidev, 0);
4287 4277
4288 omap_dss_stop_device(dssdev); 4278 omap_dss_stop_device(dssdev);
@@ -4302,16 +4292,11 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4302EXPORT_SYMBOL(omapdss_dsi_enable_te); 4292EXPORT_SYMBOL(omapdss_dsi_enable_te);
4303 4293
4304void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, 4294void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
4305 u32 fifo_size, enum omap_burst_size *burst_size, 4295 u32 fifo_size, u32 burst_size,
4306 u32 *fifo_low, u32 *fifo_high) 4296 u32 *fifo_low, u32 *fifo_high)
4307{ 4297{
4308 unsigned burst_size_bytes; 4298 *fifo_high = fifo_size - burst_size;
4309 4299 *fifo_low = fifo_size - burst_size * 2;
4310 *burst_size = OMAP_DSS_BURST_16x32;
4311 burst_size_bytes = 16 * 32 / 8;
4312
4313 *fifo_high = fifo_size - burst_size_bytes;
4314 *fifo_low = fifo_size - burst_size_bytes * 2;
4315} 4300}
4316 4301
4317int dsi_init_display(struct omap_dss_device *dssdev) 4302int dsi_init_display(struct omap_dss_device *dssdev)
@@ -4437,7 +4422,47 @@ static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
4437 dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); 4422 dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
4438} 4423}
4439 4424
4440static int dsi_init(struct platform_device *dsidev) 4425static int dsi_get_clocks(struct platform_device *dsidev)
4426{
4427 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4428 struct clk *clk;
4429
4430 clk = clk_get(&dsidev->dev, "fck");
4431 if (IS_ERR(clk)) {
4432 DSSERR("can't get fck\n");
4433 return PTR_ERR(clk);
4434 }
4435
4436 dsi->dss_clk = clk;
4437
4438 if (cpu_is_omap34xx() || cpu_is_omap3630())
4439 clk = clk_get(&dsidev->dev, "dss2_alwon_fck");
4440 else
4441 clk = clk_get(&dsidev->dev, "sys_clk");
4442 if (IS_ERR(clk)) {
4443 DSSERR("can't get sys_clk\n");
4444 clk_put(dsi->dss_clk);
4445 dsi->dss_clk = NULL;
4446 return PTR_ERR(clk);
4447 }
4448
4449 dsi->sys_clk = clk;
4450
4451 return 0;
4452}
4453
4454static void dsi_put_clocks(struct platform_device *dsidev)
4455{
4456 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4457
4458 if (dsi->dss_clk)
4459 clk_put(dsi->dss_clk);
4460 if (dsi->sys_clk)
4461 clk_put(dsi->sys_clk);
4462}
4463
4464/* DSI1 HW IP initialisation */
4465static int omap_dsi1hw_probe(struct platform_device *dsidev)
4441{ 4466{
4442 struct omap_display_platform_data *dss_plat_data; 4467 struct omap_display_platform_data *dss_plat_data;
4443 struct omap_dss_board_info *board_info; 4468 struct omap_dss_board_info *board_info;
@@ -4449,7 +4474,7 @@ static int dsi_init(struct platform_device *dsidev)
4449 dsi = kzalloc(sizeof(*dsi), GFP_KERNEL); 4474 dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
4450 if (!dsi) { 4475 if (!dsi) {
4451 r = -ENOMEM; 4476 r = -ENOMEM;
4452 goto err0; 4477 goto err_alloc;
4453 } 4478 }
4454 4479
4455 dsi->pdev = dsidev; 4480 dsi->pdev = dsidev;
@@ -4472,6 +4497,12 @@ static int dsi_init(struct platform_device *dsidev)
4472 mutex_init(&dsi->lock); 4497 mutex_init(&dsi->lock);
4473 sema_init(&dsi->bus_lock, 1); 4498 sema_init(&dsi->bus_lock, 1);
4474 4499
4500 r = dsi_get_clocks(dsidev);
4501 if (r)
4502 goto err_get_clk;
4503
4504 pm_runtime_enable(&dsidev->dev);
4505
4475 INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work, 4506 INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
4476 dsi_framedone_timeout_work_callback); 4507 dsi_framedone_timeout_work_callback);
4477 4508
@@ -4484,26 +4515,26 @@ static int dsi_init(struct platform_device *dsidev)
4484 if (!dsi_mem) { 4515 if (!dsi_mem) {
4485 DSSERR("can't get IORESOURCE_MEM DSI\n"); 4516 DSSERR("can't get IORESOURCE_MEM DSI\n");
4486 r = -EINVAL; 4517 r = -EINVAL;
4487 goto err1; 4518 goto err_ioremap;
4488 } 4519 }
4489 dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem)); 4520 dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
4490 if (!dsi->base) { 4521 if (!dsi->base) {
4491 DSSERR("can't ioremap DSI\n"); 4522 DSSERR("can't ioremap DSI\n");
4492 r = -ENOMEM; 4523 r = -ENOMEM;
4493 goto err1; 4524 goto err_ioremap;
4494 } 4525 }
4495 dsi->irq = platform_get_irq(dsi->pdev, 0); 4526 dsi->irq = platform_get_irq(dsi->pdev, 0);
4496 if (dsi->irq < 0) { 4527 if (dsi->irq < 0) {
4497 DSSERR("platform_get_irq failed\n"); 4528 DSSERR("platform_get_irq failed\n");
4498 r = -ENODEV; 4529 r = -ENODEV;
4499 goto err2; 4530 goto err_get_irq;
4500 } 4531 }
4501 4532
4502 r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED, 4533 r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
4503 dev_name(&dsidev->dev), dsi->pdev); 4534 dev_name(&dsidev->dev), dsi->pdev);
4504 if (r < 0) { 4535 if (r < 0) {
4505 DSSERR("request_irq failed\n"); 4536 DSSERR("request_irq failed\n");
4506 goto err2; 4537 goto err_get_irq;
4507 } 4538 }
4508 4539
4509 /* DSI VCs initialization */ 4540 /* DSI VCs initialization */
@@ -4515,7 +4546,9 @@ static int dsi_init(struct platform_device *dsidev)
4515 4546
4516 dsi_calc_clock_param_ranges(dsidev); 4547 dsi_calc_clock_param_ranges(dsidev);
4517 4548
4518 enable_clocks(1); 4549 r = dsi_runtime_get(dsidev);
4550 if (r)
4551 goto err_get_dsi;
4519 4552
4520 rev = dsi_read_reg(dsidev, DSI_REVISION); 4553 rev = dsi_read_reg(dsidev, DSI_REVISION);
4521 dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n", 4554 dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
@@ -4523,21 +4556,32 @@ static int dsi_init(struct platform_device *dsidev)
4523 4556
4524 dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev); 4557 dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
4525 4558
4526 enable_clocks(0); 4559 dsi_runtime_put(dsidev);
4527 4560
4528 return 0; 4561 return 0;
4529err2: 4562
4563err_get_dsi:
4564 free_irq(dsi->irq, dsi->pdev);
4565err_get_irq:
4530 iounmap(dsi->base); 4566 iounmap(dsi->base);
4531err1: 4567err_ioremap:
4568 pm_runtime_disable(&dsidev->dev);
4569err_get_clk:
4532 kfree(dsi); 4570 kfree(dsi);
4533err0: 4571err_alloc:
4534 return r; 4572 return r;
4535} 4573}
4536 4574
4537static void dsi_exit(struct platform_device *dsidev) 4575static int omap_dsi1hw_remove(struct platform_device *dsidev)
4538{ 4576{
4539 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4577 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4540 4578
4579 WARN_ON(dsi->scp_clk_refcount > 0);
4580
4581 pm_runtime_disable(&dsidev->dev);
4582
4583 dsi_put_clocks(dsidev);
4584
4541 if (dsi->vdds_dsi_reg != NULL) { 4585 if (dsi->vdds_dsi_reg != NULL) {
4542 if (dsi->vdds_dsi_enabled) { 4586 if (dsi->vdds_dsi_enabled) {
4543 regulator_disable(dsi->vdds_dsi_reg); 4587 regulator_disable(dsi->vdds_dsi_reg);
@@ -4553,38 +4597,56 @@ static void dsi_exit(struct platform_device *dsidev)
4553 4597
4554 kfree(dsi); 4598 kfree(dsi);
4555 4599
4556 DSSDBG("omap_dsi_exit\n"); 4600 return 0;
4557} 4601}
4558 4602
4559/* DSI1 HW IP initialisation */ 4603static int dsi_runtime_suspend(struct device *dev)
4560static int omap_dsi1hw_probe(struct platform_device *dsidev)
4561{ 4604{
4562 int r; 4605 struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev));
4563 4606
4564 r = dsi_init(dsidev); 4607 clk_disable(dsi->dss_clk);
4565 if (r) { 4608
4566 DSSERR("Failed to initialize DSI\n"); 4609 dispc_runtime_put();
4567 goto err_dsi; 4610 dss_runtime_put();
4568 } 4611
4569err_dsi: 4612 return 0;
4570 return r;
4571} 4613}
4572 4614
4573static int omap_dsi1hw_remove(struct platform_device *dsidev) 4615static int dsi_runtime_resume(struct device *dev)
4574{ 4616{
4575 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4617 struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev));
4618 int r;
4619
4620 r = dss_runtime_get();
4621 if (r)
4622 goto err_get_dss;
4623
4624 r = dispc_runtime_get();
4625 if (r)
4626 goto err_get_dispc;
4627
4628 clk_enable(dsi->dss_clk);
4576 4629
4577 dsi_exit(dsidev);
4578 WARN_ON(dsi->scp_clk_refcount > 0);
4579 return 0; 4630 return 0;
4631
4632err_get_dispc:
4633 dss_runtime_put();
4634err_get_dss:
4635 return r;
4580} 4636}
4581 4637
4638static const struct dev_pm_ops dsi_pm_ops = {
4639 .runtime_suspend = dsi_runtime_suspend,
4640 .runtime_resume = dsi_runtime_resume,
4641};
4642
4582static struct platform_driver omap_dsi1hw_driver = { 4643static struct platform_driver omap_dsi1hw_driver = {
4583 .probe = omap_dsi1hw_probe, 4644 .probe = omap_dsi1hw_probe,
4584 .remove = omap_dsi1hw_remove, 4645 .remove = omap_dsi1hw_remove,
4585 .driver = { 4646 .driver = {
4586 .name = "omapdss_dsi1", 4647 .name = "omapdss_dsi1",
4587 .owner = THIS_MODULE, 4648 .owner = THIS_MODULE,
4649 .pm = &dsi_pm_ops,
4588 }, 4650 },
4589}; 4651};
4590 4652
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index d9489d5c4f08..0f9c3a6457a5 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -28,6 +28,8 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/platform_device.h>
32#include <linux/pm_runtime.h>
31 33
32#include <video/omapdss.h> 34#include <video/omapdss.h>
33#include <plat/clock.h> 35#include <plat/clock.h>
@@ -59,15 +61,9 @@ struct dss_reg {
59static struct { 61static struct {
60 struct platform_device *pdev; 62 struct platform_device *pdev;
61 void __iomem *base; 63 void __iomem *base;
62 int ctx_id;
63 64
64 struct clk *dpll4_m4_ck; 65 struct clk *dpll4_m4_ck;
65 struct clk *dss_ick; 66 struct clk *dss_clk;
66 struct clk *dss_fck;
67 struct clk *dss_sys_clk;
68 struct clk *dss_tv_fck;
69 struct clk *dss_video_fck;
70 unsigned num_clks_enabled;
71 67
72 unsigned long cache_req_pck; 68 unsigned long cache_req_pck;
73 unsigned long cache_prate; 69 unsigned long cache_prate;
@@ -78,6 +74,7 @@ static struct {
78 enum omap_dss_clk_source dispc_clk_source; 74 enum omap_dss_clk_source dispc_clk_source;
79 enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; 75 enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
80 76
77 bool ctx_valid;
81 u32 ctx[DSS_SZ_REGS / sizeof(u32)]; 78 u32 ctx[DSS_SZ_REGS / sizeof(u32)];
82} dss; 79} dss;
83 80
@@ -87,13 +84,6 @@ static const char * const dss_generic_clk_source_names[] = {
87 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", 84 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
88}; 85};
89 86
90static void dss_clk_enable_all_no_ctx(void);
91static void dss_clk_disable_all_no_ctx(void);
92static void dss_clk_enable_no_ctx(enum dss_clock clks);
93static void dss_clk_disable_no_ctx(enum dss_clock clks);
94
95static int _omap_dss_wait_reset(void);
96
97static inline void dss_write_reg(const struct dss_reg idx, u32 val) 87static inline void dss_write_reg(const struct dss_reg idx, u32 val)
98{ 88{
99 __raw_writel(val, dss.base + idx.idx); 89 __raw_writel(val, dss.base + idx.idx);
@@ -109,12 +99,10 @@ static inline u32 dss_read_reg(const struct dss_reg idx)
109#define RR(reg) \ 99#define RR(reg) \
110 dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)]) 100 dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
111 101
112void dss_save_context(void) 102static void dss_save_context(void)
113{ 103{
114 if (cpu_is_omap24xx()) 104 DSSDBG("dss_save_context\n");
115 return;
116 105
117 SR(SYSCONFIG);
118 SR(CONTROL); 106 SR(CONTROL);
119 107
120 if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & 108 if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -122,14 +110,19 @@ void dss_save_context(void)
122 SR(SDI_CONTROL); 110 SR(SDI_CONTROL);
123 SR(PLL_CONTROL); 111 SR(PLL_CONTROL);
124 } 112 }
113
114 dss.ctx_valid = true;
115
116 DSSDBG("context saved\n");
125} 117}
126 118
127void dss_restore_context(void) 119static void dss_restore_context(void)
128{ 120{
129 if (_omap_dss_wait_reset()) 121 DSSDBG("dss_restore_context\n");
130 DSSERR("DSS not coming out of reset after sleep\n"); 122
123 if (!dss.ctx_valid)
124 return;
131 125
132 RR(SYSCONFIG);
133 RR(CONTROL); 126 RR(CONTROL);
134 127
135 if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & 128 if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -137,6 +130,8 @@ void dss_restore_context(void)
137 RR(SDI_CONTROL); 130 RR(SDI_CONTROL);
138 RR(PLL_CONTROL); 131 RR(PLL_CONTROL);
139 } 132 }
133
134 DSSDBG("context restored\n");
140} 135}
141 136
142#undef SR 137#undef SR
@@ -234,6 +229,7 @@ const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
234 return dss_generic_clk_source_names[clk_src]; 229 return dss_generic_clk_source_names[clk_src];
235} 230}
236 231
232
237void dss_dump_clocks(struct seq_file *s) 233void dss_dump_clocks(struct seq_file *s)
238{ 234{
239 unsigned long dpll4_ck_rate; 235 unsigned long dpll4_ck_rate;
@@ -241,13 +237,14 @@ void dss_dump_clocks(struct seq_file *s)
241 const char *fclk_name, *fclk_real_name; 237 const char *fclk_name, *fclk_real_name;
242 unsigned long fclk_rate; 238 unsigned long fclk_rate;
243 239
244 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 240 if (dss_runtime_get())
241 return;
245 242
246 seq_printf(s, "- DSS -\n"); 243 seq_printf(s, "- DSS -\n");
247 244
248 fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); 245 fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
249 fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK); 246 fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
250 fclk_rate = dss_clk_get_rate(DSS_CLK_FCK); 247 fclk_rate = clk_get_rate(dss.dss_clk);
251 248
252 if (dss.dpll4_m4_ck) { 249 if (dss.dpll4_m4_ck) {
253 dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck)); 250 dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
@@ -273,14 +270,15 @@ void dss_dump_clocks(struct seq_file *s)
273 fclk_rate); 270 fclk_rate);
274 } 271 }
275 272
276 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 273 dss_runtime_put();
277} 274}
278 275
279void dss_dump_regs(struct seq_file *s) 276void dss_dump_regs(struct seq_file *s)
280{ 277{
281#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r)) 278#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
282 279
283 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 280 if (dss_runtime_get())
281 return;
284 282
285 DUMPREG(DSS_REVISION); 283 DUMPREG(DSS_REVISION);
286 DUMPREG(DSS_SYSCONFIG); 284 DUMPREG(DSS_SYSCONFIG);
@@ -294,7 +292,7 @@ void dss_dump_regs(struct seq_file *s)
294 DUMPREG(DSS_SDI_STATUS); 292 DUMPREG(DSS_SDI_STATUS);
295 } 293 }
296 294
297 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 295 dss_runtime_put();
298#undef DUMPREG 296#undef DUMPREG
299} 297}
300 298
@@ -437,7 +435,7 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo)
437 } else { 435 } else {
438 if (cinfo->fck_div != 0) 436 if (cinfo->fck_div != 0)
439 return -EINVAL; 437 return -EINVAL;
440 cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK); 438 cinfo->fck = clk_get_rate(dss.dss_clk);
441 } 439 }
442 440
443 return 0; 441 return 0;
@@ -467,7 +465,7 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
467 465
468int dss_get_clock_div(struct dss_clock_info *cinfo) 466int dss_get_clock_div(struct dss_clock_info *cinfo)
469{ 467{
470 cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK); 468 cinfo->fck = clk_get_rate(dss.dss_clk);
471 469
472 if (dss.dpll4_m4_ck) { 470 if (dss.dpll4_m4_ck) {
473 unsigned long prate; 471 unsigned long prate;
@@ -512,7 +510,7 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
512 510
513 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); 511 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
514 512
515 fck = dss_clk_get_rate(DSS_CLK_FCK); 513 fck = clk_get_rate(dss.dss_clk);
516 if (req_pck == dss.cache_req_pck && 514 if (req_pck == dss.cache_req_pck &&
517 ((cpu_is_omap34xx() && prate == dss.cache_prate) || 515 ((cpu_is_omap34xx() && prate == dss.cache_prate) ||
518 dss.cache_dss_cinfo.fck == fck)) { 516 dss.cache_dss_cinfo.fck == fck)) {
@@ -539,7 +537,7 @@ retry:
539 if (dss.dpll4_m4_ck == NULL) { 537 if (dss.dpll4_m4_ck == NULL) {
540 struct dispc_clock_info cur_dispc; 538 struct dispc_clock_info cur_dispc;
541 /* XXX can we change the clock on omap2? */ 539 /* XXX can we change the clock on omap2? */
542 fck = dss_clk_get_rate(DSS_CLK_FCK); 540 fck = clk_get_rate(dss.dss_clk);
543 fck_div = 1; 541 fck_div = 1;
544 542
545 dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc); 543 dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
@@ -616,28 +614,6 @@ found:
616 return 0; 614 return 0;
617} 615}
618 616
619static int _omap_dss_wait_reset(void)
620{
621 int t = 0;
622
623 while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
624 if (++t > 1000) {
625 DSSERR("soft reset failed\n");
626 return -ENODEV;
627 }
628 udelay(1);
629 }
630
631 return 0;
632}
633
634static int _omap_dss_reset(void)
635{
636 /* Soft reset */
637 REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
638 return _omap_dss_wait_reset();
639}
640
641void dss_set_venc_output(enum omap_dss_venc_type type) 617void dss_set_venc_output(enum omap_dss_venc_type type)
642{ 618{
643 int l = 0; 619 int l = 0;
@@ -663,424 +639,88 @@ void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select hdmi)
663 REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */ 639 REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */
664} 640}
665 641
666static int dss_init(void) 642static int dss_get_clocks(void)
667{ 643{
644 struct clk *clk;
668 int r; 645 int r;
669 u32 rev;
670 struct resource *dss_mem;
671 struct clk *dpll4_m4_ck;
672 646
673 dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0); 647 clk = clk_get(&dss.pdev->dev, "fck");
674 if (!dss_mem) { 648 if (IS_ERR(clk)) {
675 DSSERR("can't get IORESOURCE_MEM DSS\n"); 649 DSSERR("can't get clock fck\n");
676 r = -EINVAL; 650 r = PTR_ERR(clk);
677 goto fail0; 651 goto err;
678 }
679 dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
680 if (!dss.base) {
681 DSSERR("can't ioremap DSS\n");
682 r = -ENOMEM;
683 goto fail0;
684 } 652 }
685 653
686 /* disable LCD and DIGIT output. This seems to fix the synclost 654 dss.dss_clk = clk;
687 * problem that we get, if the bootloader starts the DSS and
688 * the kernel resets it */
689 omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
690
691#ifdef CONFIG_OMAP2_DSS_SLEEP_BEFORE_RESET
692 /* We need to wait here a bit, otherwise we sometimes start to
693 * get synclost errors, and after that only power cycle will
694 * restore DSS functionality. I have no idea why this happens.
695 * And we have to wait _before_ resetting the DSS, but after
696 * enabling clocks.
697 *
698 * This bug was at least present on OMAP3430. It's unknown
699 * if it happens on OMAP2 or OMAP3630.
700 */
701 msleep(50);
702#endif
703
704 _omap_dss_reset();
705 655
706 /* autoidle */
707 REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
708
709 /* Select DPLL */
710 REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
711
712#ifdef CONFIG_OMAP2_DSS_VENC
713 REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
714 REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
715 REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
716#endif
717 if (cpu_is_omap34xx()) { 656 if (cpu_is_omap34xx()) {
718 dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck"); 657 clk = clk_get(NULL, "dpll4_m4_ck");
719 if (IS_ERR(dpll4_m4_ck)) { 658 if (IS_ERR(clk)) {
720 DSSERR("Failed to get dpll4_m4_ck\n"); 659 DSSERR("Failed to get dpll4_m4_ck\n");
721 r = PTR_ERR(dpll4_m4_ck); 660 r = PTR_ERR(clk);
722 goto fail1; 661 goto err;
723 } 662 }
724 } else if (cpu_is_omap44xx()) { 663 } else if (cpu_is_omap44xx()) {
725 dpll4_m4_ck = clk_get(NULL, "dpll_per_m5x2_ck"); 664 clk = clk_get(NULL, "dpll_per_m5x2_ck");
726 if (IS_ERR(dpll4_m4_ck)) { 665 if (IS_ERR(clk)) {
727 DSSERR("Failed to get dpll4_m4_ck\n"); 666 DSSERR("Failed to get dpll_per_m5x2_ck\n");
728 r = PTR_ERR(dpll4_m4_ck); 667 r = PTR_ERR(clk);
729 goto fail1; 668 goto err;
730 } 669 }
731 } else { /* omap24xx */ 670 } else { /* omap24xx */
732 dpll4_m4_ck = NULL; 671 clk = NULL;
733 } 672 }
734 673
735 dss.dpll4_m4_ck = dpll4_m4_ck; 674 dss.dpll4_m4_ck = clk;
736
737 dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
738 dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
739 dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
740 dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
741 dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
742
743 dss_save_context();
744
745 rev = dss_read_reg(DSS_REVISION);
746 printk(KERN_INFO "OMAP DSS rev %d.%d\n",
747 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
748 675
749 return 0; 676 return 0;
750 677
751fail1: 678err:
752 iounmap(dss.base); 679 if (dss.dss_clk)
753fail0: 680 clk_put(dss.dss_clk);
754 return r;
755}
756
757static void dss_exit(void)
758{
759 if (dss.dpll4_m4_ck) 681 if (dss.dpll4_m4_ck)
760 clk_put(dss.dpll4_m4_ck); 682 clk_put(dss.dpll4_m4_ck);
761 683
762 iounmap(dss.base);
763}
764
765/* CONTEXT */
766static int dss_get_ctx_id(void)
767{
768 struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
769 int r;
770
771 if (!pdata->board_data->get_last_off_on_transaction_id)
772 return 0;
773 r = pdata->board_data->get_last_off_on_transaction_id(&dss.pdev->dev);
774 if (r < 0) {
775 dev_err(&dss.pdev->dev, "getting transaction ID failed, "
776 "will force context restore\n");
777 r = -1;
778 }
779 return r;
780}
781
782int dss_need_ctx_restore(void)
783{
784 int id = dss_get_ctx_id();
785
786 if (id < 0 || id != dss.ctx_id) {
787 DSSDBG("ctx id %d -> id %d\n",
788 dss.ctx_id, id);
789 dss.ctx_id = id;
790 return 1;
791 } else {
792 return 0;
793 }
794}
795
796static void save_all_ctx(void)
797{
798 DSSDBG("save context\n");
799
800 dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
801
802 dss_save_context();
803 dispc_save_context();
804#ifdef CONFIG_OMAP2_DSS_DSI
805 dsi_save_context();
806#endif
807
808 dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
809}
810
811static void restore_all_ctx(void)
812{
813 DSSDBG("restore context\n");
814
815 dss_clk_enable_all_no_ctx();
816
817 dss_restore_context();
818 dispc_restore_context();
819#ifdef CONFIG_OMAP2_DSS_DSI
820 dsi_restore_context();
821#endif
822
823 dss_clk_disable_all_no_ctx();
824}
825
826static int dss_get_clock(struct clk **clock, const char *clk_name)
827{
828 struct clk *clk;
829
830 clk = clk_get(&dss.pdev->dev, clk_name);
831
832 if (IS_ERR(clk)) {
833 DSSERR("can't get clock %s", clk_name);
834 return PTR_ERR(clk);
835 }
836
837 *clock = clk;
838
839 DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
840
841 return 0;
842}
843
844static int dss_get_clocks(void)
845{
846 int r;
847 struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
848
849 dss.dss_ick = NULL;
850 dss.dss_fck = NULL;
851 dss.dss_sys_clk = NULL;
852 dss.dss_tv_fck = NULL;
853 dss.dss_video_fck = NULL;
854
855 r = dss_get_clock(&dss.dss_ick, "ick");
856 if (r)
857 goto err;
858
859 r = dss_get_clock(&dss.dss_fck, "fck");
860 if (r)
861 goto err;
862
863 if (!pdata->opt_clock_available) {
864 r = -ENODEV;
865 goto err;
866 }
867
868 if (pdata->opt_clock_available("sys_clk")) {
869 r = dss_get_clock(&dss.dss_sys_clk, "sys_clk");
870 if (r)
871 goto err;
872 }
873
874 if (pdata->opt_clock_available("tv_clk")) {
875 r = dss_get_clock(&dss.dss_tv_fck, "tv_clk");
876 if (r)
877 goto err;
878 }
879
880 if (pdata->opt_clock_available("video_clk")) {
881 r = dss_get_clock(&dss.dss_video_fck, "video_clk");
882 if (r)
883 goto err;
884 }
885
886 return 0;
887
888err:
889 if (dss.dss_ick)
890 clk_put(dss.dss_ick);
891 if (dss.dss_fck)
892 clk_put(dss.dss_fck);
893 if (dss.dss_sys_clk)
894 clk_put(dss.dss_sys_clk);
895 if (dss.dss_tv_fck)
896 clk_put(dss.dss_tv_fck);
897 if (dss.dss_video_fck)
898 clk_put(dss.dss_video_fck);
899
900 return r; 684 return r;
901} 685}
902 686
903static void dss_put_clocks(void) 687static void dss_put_clocks(void)
904{ 688{
905 if (dss.dss_video_fck) 689 if (dss.dpll4_m4_ck)
906 clk_put(dss.dss_video_fck); 690 clk_put(dss.dpll4_m4_ck);
907 if (dss.dss_tv_fck) 691 clk_put(dss.dss_clk);
908 clk_put(dss.dss_tv_fck);
909 if (dss.dss_sys_clk)
910 clk_put(dss.dss_sys_clk);
911 clk_put(dss.dss_fck);
912 clk_put(dss.dss_ick);
913}
914
915unsigned long dss_clk_get_rate(enum dss_clock clk)
916{
917 switch (clk) {
918 case DSS_CLK_ICK:
919 return clk_get_rate(dss.dss_ick);
920 case DSS_CLK_FCK:
921 return clk_get_rate(dss.dss_fck);
922 case DSS_CLK_SYSCK:
923 return clk_get_rate(dss.dss_sys_clk);
924 case DSS_CLK_TVFCK:
925 return clk_get_rate(dss.dss_tv_fck);
926 case DSS_CLK_VIDFCK:
927 return clk_get_rate(dss.dss_video_fck);
928 }
929
930 BUG();
931 return 0;
932}
933
934static unsigned count_clk_bits(enum dss_clock clks)
935{
936 unsigned num_clks = 0;
937
938 if (clks & DSS_CLK_ICK)
939 ++num_clks;
940 if (clks & DSS_CLK_FCK)
941 ++num_clks;
942 if (clks & DSS_CLK_SYSCK)
943 ++num_clks;
944 if (clks & DSS_CLK_TVFCK)
945 ++num_clks;
946 if (clks & DSS_CLK_VIDFCK)
947 ++num_clks;
948
949 return num_clks;
950}
951
952static void dss_clk_enable_no_ctx(enum dss_clock clks)
953{
954 unsigned num_clks = count_clk_bits(clks);
955
956 if (clks & DSS_CLK_ICK)
957 clk_enable(dss.dss_ick);
958 if (clks & DSS_CLK_FCK)
959 clk_enable(dss.dss_fck);
960 if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
961 clk_enable(dss.dss_sys_clk);
962 if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
963 clk_enable(dss.dss_tv_fck);
964 if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
965 clk_enable(dss.dss_video_fck);
966
967 dss.num_clks_enabled += num_clks;
968}
969
970void dss_clk_enable(enum dss_clock clks)
971{
972 bool check_ctx = dss.num_clks_enabled == 0;
973
974 dss_clk_enable_no_ctx(clks);
975
976 /*
977 * HACK: On omap4 the registers may not be accessible right after
978 * enabling the clocks. At some point this will be handled by
979 * pm_runtime, but for the time begin this should make things work.
980 */
981 if (cpu_is_omap44xx() && check_ctx)
982 udelay(10);
983
984 if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
985 restore_all_ctx();
986} 692}
987 693
988static void dss_clk_disable_no_ctx(enum dss_clock clks) 694struct clk *dss_get_ick(void)
989{ 695{
990 unsigned num_clks = count_clk_bits(clks); 696 return clk_get(&dss.pdev->dev, "ick");
991
992 if (clks & DSS_CLK_ICK)
993 clk_disable(dss.dss_ick);
994 if (clks & DSS_CLK_FCK)
995 clk_disable(dss.dss_fck);
996 if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
997 clk_disable(dss.dss_sys_clk);
998 if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
999 clk_disable(dss.dss_tv_fck);
1000 if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
1001 clk_disable(dss.dss_video_fck);
1002
1003 dss.num_clks_enabled -= num_clks;
1004} 697}
1005 698
1006void dss_clk_disable(enum dss_clock clks) 699int dss_runtime_get(void)
1007{ 700{
1008 if (cpu_is_omap34xx()) { 701 int r;
1009 unsigned num_clks = count_clk_bits(clks);
1010
1011 BUG_ON(dss.num_clks_enabled < num_clks);
1012 702
1013 if (dss.num_clks_enabled == num_clks) 703 DSSDBG("dss_runtime_get\n");
1014 save_all_ctx();
1015 }
1016 704
1017 dss_clk_disable_no_ctx(clks); 705 r = pm_runtime_get_sync(&dss.pdev->dev);
706 WARN_ON(r < 0);
707 return r < 0 ? r : 0;
1018} 708}
1019 709
1020static void dss_clk_enable_all_no_ctx(void) 710void dss_runtime_put(void)
1021{ 711{
1022 enum dss_clock clks; 712 int r;
1023
1024 clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
1025 if (cpu_is_omap34xx())
1026 clks |= DSS_CLK_VIDFCK;
1027 dss_clk_enable_no_ctx(clks);
1028}
1029
1030static void dss_clk_disable_all_no_ctx(void)
1031{
1032 enum dss_clock clks;
1033 713
1034 clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK; 714 DSSDBG("dss_runtime_put\n");
1035 if (cpu_is_omap34xx())
1036 clks |= DSS_CLK_VIDFCK;
1037 dss_clk_disable_no_ctx(clks);
1038}
1039 715
1040#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) 716 r = pm_runtime_put(&dss.pdev->dev);
1041/* CLOCKS */ 717 WARN_ON(r < 0);
1042static void core_dump_clocks(struct seq_file *s)
1043{
1044 int i;
1045 struct clk *clocks[5] = {
1046 dss.dss_ick,
1047 dss.dss_fck,
1048 dss.dss_sys_clk,
1049 dss.dss_tv_fck,
1050 dss.dss_video_fck
1051 };
1052
1053 const char *names[5] = {
1054 "ick",
1055 "fck",
1056 "sys_clk",
1057 "tv_fck",
1058 "video_fck"
1059 };
1060
1061 seq_printf(s, "- CORE -\n");
1062
1063 seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled);
1064
1065 for (i = 0; i < 5; i++) {
1066 if (!clocks[i])
1067 continue;
1068 seq_printf(s, "%s (%s)%*s\t%lu\t%d\n",
1069 names[i],
1070 clocks[i]->name,
1071 24 - strlen(names[i]) - strlen(clocks[i]->name),
1072 "",
1073 clk_get_rate(clocks[i]),
1074 clocks[i]->usecount);
1075 }
1076} 718}
1077#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
1078 719
1079/* DEBUGFS */ 720/* DEBUGFS */
1080#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) 721#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
1081void dss_debug_dump_clocks(struct seq_file *s) 722void dss_debug_dump_clocks(struct seq_file *s)
1082{ 723{
1083 core_dump_clocks(s);
1084 dss_dump_clocks(s); 724 dss_dump_clocks(s);
1085 dispc_dump_clocks(s); 725 dispc_dump_clocks(s);
1086#ifdef CONFIG_OMAP2_DSS_DSI 726#ifdef CONFIG_OMAP2_DSS_DSI
@@ -1089,28 +729,51 @@ void dss_debug_dump_clocks(struct seq_file *s)
1089} 729}
1090#endif 730#endif
1091 731
1092
1093/* DSS HW IP initialisation */ 732/* DSS HW IP initialisation */
1094static int omap_dsshw_probe(struct platform_device *pdev) 733static int omap_dsshw_probe(struct platform_device *pdev)
1095{ 734{
735 struct resource *dss_mem;
736 u32 rev;
1096 int r; 737 int r;
1097 738
1098 dss.pdev = pdev; 739 dss.pdev = pdev;
1099 740
741 dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
742 if (!dss_mem) {
743 DSSERR("can't get IORESOURCE_MEM DSS\n");
744 r = -EINVAL;
745 goto err_ioremap;
746 }
747 dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
748 if (!dss.base) {
749 DSSERR("can't ioremap DSS\n");
750 r = -ENOMEM;
751 goto err_ioremap;
752 }
753
1100 r = dss_get_clocks(); 754 r = dss_get_clocks();
1101 if (r) 755 if (r)
1102 goto err_clocks; 756 goto err_clocks;
1103 757
1104 dss_clk_enable_all_no_ctx(); 758 pm_runtime_enable(&pdev->dev);
1105 759
1106 dss.ctx_id = dss_get_ctx_id(); 760 r = dss_runtime_get();
1107 DSSDBG("initial ctx id %u\n", dss.ctx_id); 761 if (r)
762 goto err_runtime_get;
1108 763
1109 r = dss_init(); 764 /* Select DPLL */
1110 if (r) { 765 REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
1111 DSSERR("Failed to initialize DSS\n"); 766
1112 goto err_dss; 767#ifdef CONFIG_OMAP2_DSS_VENC
1113 } 768 REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
769 REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
770 REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
771#endif
772 dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
773 dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
774 dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
775 dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
776 dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
1114 777
1115 r = dpi_init(); 778 r = dpi_init();
1116 if (r) { 779 if (r) {
@@ -1124,42 +787,66 @@ static int omap_dsshw_probe(struct platform_device *pdev)
1124 goto err_sdi; 787 goto err_sdi;
1125 } 788 }
1126 789
1127 dss_clk_disable_all_no_ctx(); 790 rev = dss_read_reg(DSS_REVISION);
791 printk(KERN_INFO "OMAP DSS rev %d.%d\n",
792 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
793
794 dss_runtime_put();
795
1128 return 0; 796 return 0;
1129err_sdi: 797err_sdi:
1130 dpi_exit(); 798 dpi_exit();
1131err_dpi: 799err_dpi:
1132 dss_exit(); 800 dss_runtime_put();
1133err_dss: 801err_runtime_get:
1134 dss_clk_disable_all_no_ctx(); 802 pm_runtime_disable(&pdev->dev);
1135 dss_put_clocks(); 803 dss_put_clocks();
1136err_clocks: 804err_clocks:
805 iounmap(dss.base);
806err_ioremap:
1137 return r; 807 return r;
1138} 808}
1139 809
1140static int omap_dsshw_remove(struct platform_device *pdev) 810static int omap_dsshw_remove(struct platform_device *pdev)
1141{ 811{
812 dpi_exit();
813 sdi_exit();
1142 814
1143 dss_exit(); 815 iounmap(dss.base);
1144 816
1145 /* 817 pm_runtime_disable(&pdev->dev);
1146 * As part of hwmod changes, DSS is not the only controller of dss
1147 * clocks; hwmod framework itself will also enable clocks during hwmod
1148 * init for dss, and autoidle is set in h/w for DSS. Hence, there's no
1149 * need to disable clocks if their usecounts > 1.
1150 */
1151 WARN_ON(dss.num_clks_enabled > 0);
1152 818
1153 dss_put_clocks(); 819 dss_put_clocks();
820
821 return 0;
822}
823
824static int dss_runtime_suspend(struct device *dev)
825{
826 dss_save_context();
827 clk_disable(dss.dss_clk);
1154 return 0; 828 return 0;
1155} 829}
1156 830
831static int dss_runtime_resume(struct device *dev)
832{
833 clk_enable(dss.dss_clk);
834 dss_restore_context();
835 return 0;
836}
837
838static const struct dev_pm_ops dss_pm_ops = {
839 .runtime_suspend = dss_runtime_suspend,
840 .runtime_resume = dss_runtime_resume,
841};
842
1157static struct platform_driver omap_dsshw_driver = { 843static struct platform_driver omap_dsshw_driver = {
1158 .probe = omap_dsshw_probe, 844 .probe = omap_dsshw_probe,
1159 .remove = omap_dsshw_remove, 845 .remove = omap_dsshw_remove,
1160 .driver = { 846 .driver = {
1161 .name = "omapdss_dss", 847 .name = "omapdss_dss",
1162 .owner = THIS_MODULE, 848 .owner = THIS_MODULE,
849 .pm = &dss_pm_ops,
1163 }, 850 },
1164}; 851};
1165 852
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 8ab6d43329bb..9c94b1152c20 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -97,26 +97,12 @@ extern unsigned int dss_debug;
97#define FLD_MOD(orig, val, start, end) \ 97#define FLD_MOD(orig, val, start, end) \
98 (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) 98 (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
99 99
100enum omap_burst_size {
101 OMAP_DSS_BURST_4x32 = 0,
102 OMAP_DSS_BURST_8x32 = 1,
103 OMAP_DSS_BURST_16x32 = 2,
104};
105
106enum omap_parallel_interface_mode { 100enum omap_parallel_interface_mode {
107 OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */ 101 OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */
108 OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */ 102 OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */
109 OMAP_DSS_PARALLELMODE_DSI, 103 OMAP_DSS_PARALLELMODE_DSI,
110}; 104};
111 105
112enum dss_clock {
113 DSS_CLK_ICK = 1 << 0, /* DSS_L3_ICLK and DSS_L4_ICLK */
114 DSS_CLK_FCK = 1 << 1, /* DSS1_ALWON_FCLK */
115 DSS_CLK_SYSCK = 1 << 2, /* DSS2_ALWON_FCLK */
116 DSS_CLK_TVFCK = 1 << 3, /* DSS_TV_FCLK */
117 DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/
118};
119
120enum dss_hdmi_venc_clk_source_select { 106enum dss_hdmi_venc_clk_source_select {
121 DSS_VENC_TV_CLK = 0, 107 DSS_VENC_TV_CLK = 0,
122 DSS_HDMI_M_PCLK = 1, 108 DSS_HDMI_M_PCLK = 1,
@@ -194,7 +180,7 @@ void dss_uninit_device(struct platform_device *pdev,
194bool dss_use_replication(struct omap_dss_device *dssdev, 180bool dss_use_replication(struct omap_dss_device *dssdev,
195 enum omap_color_mode mode); 181 enum omap_color_mode mode);
196void default_get_overlay_fifo_thresholds(enum omap_plane plane, 182void default_get_overlay_fifo_thresholds(enum omap_plane plane,
197 u32 fifo_size, enum omap_burst_size *burst_size, 183 u32 fifo_size, u32 burst_size,
198 u32 *fifo_low, u32 *fifo_high); 184 u32 *fifo_low, u32 *fifo_high);
199 185
200/* manager */ 186/* manager */
@@ -220,13 +206,12 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
220int dss_init_platform_driver(void); 206int dss_init_platform_driver(void);
221void dss_uninit_platform_driver(void); 207void dss_uninit_platform_driver(void);
222 208
209int dss_runtime_get(void);
210void dss_runtime_put(void);
211
212struct clk *dss_get_ick(void);
213
223void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); 214void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
224void dss_save_context(void);
225void dss_restore_context(void);
226void dss_clk_enable(enum dss_clock clks);
227void dss_clk_disable(enum dss_clock clks);
228unsigned long dss_clk_get_rate(enum dss_clock clk);
229int dss_need_ctx_restore(void);
230const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); 215const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
231void dss_dump_clocks(struct seq_file *s); 216void dss_dump_clocks(struct seq_file *s);
232 217
@@ -283,15 +268,15 @@ struct file_operations;
283int dsi_init_platform_driver(void); 268int dsi_init_platform_driver(void);
284void dsi_uninit_platform_driver(void); 269void dsi_uninit_platform_driver(void);
285 270
271int dsi_runtime_get(struct platform_device *dsidev);
272void dsi_runtime_put(struct platform_device *dsidev);
273
286void dsi_dump_clocks(struct seq_file *s); 274void dsi_dump_clocks(struct seq_file *s);
287void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir, 275void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
288 const struct file_operations *debug_fops); 276 const struct file_operations *debug_fops);
289void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir, 277void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
290 const struct file_operations *debug_fops); 278 const struct file_operations *debug_fops);
291 279
292void dsi_save_context(void);
293void dsi_restore_context(void);
294
295int dsi_init_display(struct omap_dss_device *display); 280int dsi_init_display(struct omap_dss_device *display);
296void dsi_irq_handler(void); 281void dsi_irq_handler(void);
297unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev); 282unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
@@ -304,7 +289,7 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
304 bool enable_hsdiv); 289 bool enable_hsdiv);
305void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes); 290void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes);
306void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, 291void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
307 u32 fifo_size, enum omap_burst_size *burst_size, 292 u32 fifo_size, u32 burst_size,
308 u32 *fifo_low, u32 *fifo_high); 293 u32 *fifo_low, u32 *fifo_high);
309void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev); 294void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
310void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev); 295void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
@@ -317,6 +302,13 @@ static inline int dsi_init_platform_driver(void)
317static inline void dsi_uninit_platform_driver(void) 302static inline void dsi_uninit_platform_driver(void)
318{ 303{
319} 304}
305static inline int dsi_runtime_get(struct platform_device *dsidev)
306{
307 return 0;
308}
309static inline void dsi_runtime_put(struct platform_device *dsidev)
310{
311}
320static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) 312static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
321{ 313{
322 WARN("%s: DSI not compiled in, returning rate as 0\n", __func__); 314 WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
@@ -384,8 +376,8 @@ void dispc_dump_regs(struct seq_file *s);
384void dispc_irq_handler(void); 376void dispc_irq_handler(void);
385void dispc_fake_vsync_irq(void); 377void dispc_fake_vsync_irq(void);
386 378
387void dispc_save_context(void); 379int dispc_runtime_get(void);
388void dispc_restore_context(void); 380void dispc_runtime_put(void);
389 381
390void dispc_enable_sidle(void); 382void dispc_enable_sidle(void);
391void dispc_disable_sidle(void); 383void dispc_disable_sidle(void);
@@ -398,10 +390,12 @@ void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable);
398void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height); 390void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
399void dispc_set_digit_size(u16 width, u16 height); 391void dispc_set_digit_size(u16 width, u16 height);
400u32 dispc_get_plane_fifo_size(enum omap_plane plane); 392u32 dispc_get_plane_fifo_size(enum omap_plane plane);
401void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high); 393void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
402void dispc_enable_fifomerge(bool enable); 394void dispc_enable_fifomerge(bool enable);
403void dispc_set_burst_size(enum omap_plane plane, 395u32 dispc_get_burst_size(enum omap_plane plane);
404 enum omap_burst_size burst_size); 396void dispc_enable_cpr(enum omap_channel channel, bool enable);
397void dispc_set_cpr_coef(enum omap_channel channel,
398 struct omap_dss_cpr_coefs *coefs);
405 399
406void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr); 400void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr);
407void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr); 401void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr);
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 1c18888e5df3..b415c4ee621d 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -49,6 +49,9 @@ struct omap_dss_features {
49 const enum omap_color_mode *supported_color_modes; 49 const enum omap_color_mode *supported_color_modes;
50 const char * const *clksrc_names; 50 const char * const *clksrc_names;
51 const struct dss_param_range *dss_params; 51 const struct dss_param_range *dss_params;
52
53 const u32 buffer_size_unit;
54 const u32 burst_size_unit;
52}; 55};
53 56
54/* This struct is assigned to one of the below during initialization */ 57/* This struct is assigned to one of the below during initialization */
@@ -274,6 +277,8 @@ static const struct omap_dss_features omap2_dss_features = {
274 .supported_color_modes = omap2_dss_supported_color_modes, 277 .supported_color_modes = omap2_dss_supported_color_modes,
275 .clksrc_names = omap2_dss_clk_source_names, 278 .clksrc_names = omap2_dss_clk_source_names,
276 .dss_params = omap2_dss_param_range, 279 .dss_params = omap2_dss_param_range,
280 .buffer_size_unit = 1,
281 .burst_size_unit = 8,
277}; 282};
278 283
279/* OMAP3 DSS Features */ 284/* OMAP3 DSS Features */
@@ -286,7 +291,9 @@ static const struct omap_dss_features omap3430_dss_features = {
286 FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | 291 FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
287 FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | 292 FEAT_FUNCGATED | FEAT_ROWREPEATENABLE |
288 FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF | 293 FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF |
289 FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC, 294 FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC |
295 FEAT_VENC_REQUIRES_TV_DAC_CLK | FEAT_CPR | FEAT_PRELOAD |
296 FEAT_FIR_COEF_V,
290 297
291 .num_mgrs = 2, 298 .num_mgrs = 2,
292 .num_ovls = 3, 299 .num_ovls = 3,
@@ -294,6 +301,8 @@ static const struct omap_dss_features omap3430_dss_features = {
294 .supported_color_modes = omap3_dss_supported_color_modes, 301 .supported_color_modes = omap3_dss_supported_color_modes,
295 .clksrc_names = omap3_dss_clk_source_names, 302 .clksrc_names = omap3_dss_clk_source_names,
296 .dss_params = omap3_dss_param_range, 303 .dss_params = omap3_dss_param_range,
304 .buffer_size_unit = 1,
305 .burst_size_unit = 8,
297}; 306};
298 307
299static const struct omap_dss_features omap3630_dss_features = { 308static const struct omap_dss_features omap3630_dss_features = {
@@ -306,7 +315,8 @@ static const struct omap_dss_features omap3630_dss_features = {
306 FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED | 315 FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED |
307 FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT | 316 FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT |
308 FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG | 317 FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG |
309 FEAT_DSI_PLL_FREQSEL, 318 FEAT_DSI_PLL_FREQSEL | FEAT_CPR | FEAT_PRELOAD |
319 FEAT_FIR_COEF_V,
310 320
311 .num_mgrs = 2, 321 .num_mgrs = 2,
312 .num_ovls = 3, 322 .num_ovls = 3,
@@ -314,6 +324,8 @@ static const struct omap_dss_features omap3630_dss_features = {
314 .supported_color_modes = omap3_dss_supported_color_modes, 324 .supported_color_modes = omap3_dss_supported_color_modes,
315 .clksrc_names = omap3_dss_clk_source_names, 325 .clksrc_names = omap3_dss_clk_source_names,
316 .dss_params = omap3_dss_param_range, 326 .dss_params = omap3_dss_param_range,
327 .buffer_size_unit = 1,
328 .burst_size_unit = 8,
317}; 329};
318 330
319/* OMAP4 DSS Features */ 331/* OMAP4 DSS Features */
@@ -327,7 +339,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
327 FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 | 339 FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
328 FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | 340 FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
329 FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | 341 FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
330 FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2, 342 FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 |
343 FEAT_CPR | FEAT_PRELOAD | FEAT_FIR_COEF_V,
331 344
332 .num_mgrs = 3, 345 .num_mgrs = 3,
333 .num_ovls = 3, 346 .num_ovls = 3,
@@ -335,6 +348,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
335 .supported_color_modes = omap4_dss_supported_color_modes, 348 .supported_color_modes = omap4_dss_supported_color_modes,
336 .clksrc_names = omap4_dss_clk_source_names, 349 .clksrc_names = omap4_dss_clk_source_names,
337 .dss_params = omap4_dss_param_range, 350 .dss_params = omap4_dss_param_range,
351 .buffer_size_unit = 16,
352 .burst_size_unit = 16,
338}; 353};
339 354
340/* For all the other OMAP4 versions */ 355/* For all the other OMAP4 versions */
@@ -348,7 +363,8 @@ static const struct omap_dss_features omap4_dss_features = {
348 FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | 363 FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
349 FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | 364 FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
350 FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE | 365 FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE |
351 FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2, 366 FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 | FEAT_CPR |
367 FEAT_PRELOAD | FEAT_FIR_COEF_V,
352 368
353 .num_mgrs = 3, 369 .num_mgrs = 3,
354 .num_ovls = 3, 370 .num_ovls = 3,
@@ -356,6 +372,8 @@ static const struct omap_dss_features omap4_dss_features = {
356 .supported_color_modes = omap4_dss_supported_color_modes, 372 .supported_color_modes = omap4_dss_supported_color_modes,
357 .clksrc_names = omap4_dss_clk_source_names, 373 .clksrc_names = omap4_dss_clk_source_names,
358 .dss_params = omap4_dss_param_range, 374 .dss_params = omap4_dss_param_range,
375 .buffer_size_unit = 16,
376 .burst_size_unit = 16,
359}; 377};
360 378
361/* Functions returning values related to a DSS feature */ 379/* Functions returning values related to a DSS feature */
@@ -401,6 +419,16 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
401 return omap_current_dss_features->clksrc_names[id]; 419 return omap_current_dss_features->clksrc_names[id];
402} 420}
403 421
422u32 dss_feat_get_buffer_size_unit(void)
423{
424 return omap_current_dss_features->buffer_size_unit;
425}
426
427u32 dss_feat_get_burst_size_unit(void)
428{
429 return omap_current_dss_features->burst_size_unit;
430}
431
404/* DSS has_feature check */ 432/* DSS has_feature check */
405bool dss_has_feature(enum dss_feat_id id) 433bool dss_has_feature(enum dss_feat_id id)
406{ 434{
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 07b346f7d916..b7398cbcda5f 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -51,6 +51,10 @@ enum dss_feat_id {
51 FEAT_HDMI_CTS_SWMODE = 1 << 19, 51 FEAT_HDMI_CTS_SWMODE = 1 << 19,
52 FEAT_HANDLE_UV_SEPARATE = 1 << 20, 52 FEAT_HANDLE_UV_SEPARATE = 1 << 20,
53 FEAT_ATTR2 = 1 << 21, 53 FEAT_ATTR2 = 1 << 21,
54 FEAT_VENC_REQUIRES_TV_DAC_CLK = 1 << 22,
55 FEAT_CPR = 1 << 23,
56 FEAT_PRELOAD = 1 << 24,
57 FEAT_FIR_COEF_V = 1 << 25,
54}; 58};
55 59
56/* DSS register field id */ 60/* DSS register field id */
@@ -90,6 +94,9 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
90 enum omap_color_mode color_mode); 94 enum omap_color_mode color_mode);
91const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id); 95const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
92 96
97u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
98u32 dss_feat_get_burst_size_unit(void); /* in bytes */
99
93bool dss_has_feature(enum dss_feat_id id); 100bool dss_has_feature(enum dss_feat_id id);
94void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); 101void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
95void dss_features_init(void); 102void dss_features_init(void);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index b0555f4f0a78..256f27a9064a 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -29,6 +29,9 @@
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/platform_device.h>
33#include <linux/pm_runtime.h>
34#include <linux/clk.h>
32#include <video/omapdss.h> 35#include <video/omapdss.h>
33#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 36#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
34 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) 37 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
@@ -51,6 +54,9 @@ static struct {
51 u8 edid_set; 54 u8 edid_set;
52 bool custom_set; 55 bool custom_set;
53 struct hdmi_config cfg; 56 struct hdmi_config cfg;
57
58 struct clk *sys_clk;
59 struct clk *hdmi_clk;
54} hdmi; 60} hdmi;
55 61
56/* 62/*
@@ -162,6 +168,27 @@ static inline int hdmi_wait_for_bit_change(const struct hdmi_reg idx,
162 return val; 168 return val;
163} 169}
164 170
171static int hdmi_runtime_get(void)
172{
173 int r;
174
175 DSSDBG("hdmi_runtime_get\n");
176
177 r = pm_runtime_get_sync(&hdmi.pdev->dev);
178 WARN_ON(r < 0);
179 return r < 0 ? r : 0;
180}
181
182static void hdmi_runtime_put(void)
183{
184 int r;
185
186 DSSDBG("hdmi_runtime_put\n");
187
188 r = pm_runtime_put(&hdmi.pdev->dev);
189 WARN_ON(r < 0);
190}
191
165int hdmi_init_display(struct omap_dss_device *dssdev) 192int hdmi_init_display(struct omap_dss_device *dssdev)
166{ 193{
167 DSSDBG("init_display\n"); 194 DSSDBG("init_display\n");
@@ -311,30 +338,11 @@ static int hdmi_phy_init(void)
311 return 0; 338 return 0;
312} 339}
313 340
314static int hdmi_wait_softreset(void)
315{
316 /* reset W1 */
317 REG_FLD_MOD(HDMI_WP_SYSCONFIG, 0x1, 0, 0);
318
319 /* wait till SOFTRESET == 0 */
320 if (hdmi_wait_for_bit_change(HDMI_WP_SYSCONFIG, 0, 0, 0) != 0) {
321 DSSERR("sysconfig reset failed\n");
322 return -ETIMEDOUT;
323 }
324
325 return 0;
326}
327
328static int hdmi_pll_program(struct hdmi_pll_info *fmt) 341static int hdmi_pll_program(struct hdmi_pll_info *fmt)
329{ 342{
330 u16 r = 0; 343 u16 r = 0;
331 enum hdmi_clk_refsel refsel; 344 enum hdmi_clk_refsel refsel;
332 345
333 /* wait for wrapper reset */
334 r = hdmi_wait_softreset();
335 if (r)
336 return r;
337
338 r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF); 346 r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
339 if (r) 347 if (r)
340 return r; 348 return r;
@@ -1064,7 +1072,7 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
1064 unsigned long clkin, refclk; 1072 unsigned long clkin, refclk;
1065 u32 mf; 1073 u32 mf;
1066 1074
1067 clkin = dss_clk_get_rate(DSS_CLK_SYSCK) / 10000; 1075 clkin = clk_get_rate(hdmi.sys_clk) / 10000;
1068 /* 1076 /*
1069 * Input clock is predivided by N + 1 1077 * Input clock is predivided by N + 1
1070 * out put of which is reference clk 1078 * out put of which is reference clk
@@ -1098,16 +1106,6 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
1098 DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd); 1106 DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
1099} 1107}
1100 1108
1101static void hdmi_enable_clocks(int enable)
1102{
1103 if (enable)
1104 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK |
1105 DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
1106 else
1107 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK |
1108 DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
1109}
1110
1111static int hdmi_power_on(struct omap_dss_device *dssdev) 1109static int hdmi_power_on(struct omap_dss_device *dssdev)
1112{ 1110{
1113 int r, code = 0; 1111 int r, code = 0;
@@ -1115,7 +1113,9 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
1115 struct omap_video_timings *p; 1113 struct omap_video_timings *p;
1116 unsigned long phy; 1114 unsigned long phy;
1117 1115
1118 hdmi_enable_clocks(1); 1116 r = hdmi_runtime_get();
1117 if (r)
1118 return r;
1119 1119
1120 dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0); 1120 dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
1121 1121
@@ -1180,7 +1180,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
1180 1180
1181 return 0; 1181 return 0;
1182err: 1182err:
1183 hdmi_enable_clocks(0); 1183 hdmi_runtime_put();
1184 return -EIO; 1184 return -EIO;
1185} 1185}
1186 1186
@@ -1191,7 +1191,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
1191 hdmi_wp_video_start(0); 1191 hdmi_wp_video_start(0);
1192 hdmi_phy_off(); 1192 hdmi_phy_off();
1193 hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF); 1193 hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
1194 hdmi_enable_clocks(0); 1194 hdmi_runtime_put();
1195 1195
1196 hdmi.edid_set = 0; 1196 hdmi.edid_set = 0;
1197} 1197}
@@ -1686,14 +1686,43 @@ static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
1686}; 1686};
1687#endif 1687#endif
1688 1688
1689static int hdmi_get_clocks(struct platform_device *pdev)
1690{
1691 struct clk *clk;
1692
1693 clk = clk_get(&pdev->dev, "sys_clk");
1694 if (IS_ERR(clk)) {
1695 DSSERR("can't get sys_clk\n");
1696 return PTR_ERR(clk);
1697 }
1698
1699 hdmi.sys_clk = clk;
1700
1701 clk = clk_get(&pdev->dev, "dss_48mhz_clk");
1702 if (IS_ERR(clk)) {
1703 DSSERR("can't get hdmi_clk\n");
1704 clk_put(hdmi.sys_clk);
1705 return PTR_ERR(clk);
1706 }
1707
1708 hdmi.hdmi_clk = clk;
1709
1710 return 0;
1711}
1712
1713static void hdmi_put_clocks(void)
1714{
1715 if (hdmi.sys_clk)
1716 clk_put(hdmi.sys_clk);
1717 if (hdmi.hdmi_clk)
1718 clk_put(hdmi.hdmi_clk);
1719}
1720
1689/* HDMI HW IP initialisation */ 1721/* HDMI HW IP initialisation */
1690static int omapdss_hdmihw_probe(struct platform_device *pdev) 1722static int omapdss_hdmihw_probe(struct platform_device *pdev)
1691{ 1723{
1692 struct resource *hdmi_mem; 1724 struct resource *hdmi_mem;
1693#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 1725 int r;
1694 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
1695 int ret;
1696#endif
1697 1726
1698 hdmi.pdata = pdev->dev.platform_data; 1727 hdmi.pdata = pdev->dev.platform_data;
1699 hdmi.pdev = pdev; 1728 hdmi.pdev = pdev;
@@ -1713,17 +1742,25 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
1713 return -ENOMEM; 1742 return -ENOMEM;
1714 } 1743 }
1715 1744
1745 r = hdmi_get_clocks(pdev);
1746 if (r) {
1747 iounmap(hdmi.base_wp);
1748 return r;
1749 }
1750
1751 pm_runtime_enable(&pdev->dev);
1752
1716 hdmi_panel_init(); 1753 hdmi_panel_init();
1717 1754
1718#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 1755#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
1719 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) 1756 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
1720 1757
1721 /* Register ASoC codec DAI */ 1758 /* Register ASoC codec DAI */
1722 ret = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv, 1759 r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
1723 &hdmi_codec_dai_drv, 1); 1760 &hdmi_codec_dai_drv, 1);
1724 if (ret) { 1761 if (r) {
1725 DSSERR("can't register ASoC HDMI audio codec\n"); 1762 DSSERR("can't register ASoC HDMI audio codec\n");
1726 return ret; 1763 return r;
1727 } 1764 }
1728#endif 1765#endif
1729 return 0; 1766 return 0;
@@ -1738,17 +1775,62 @@ static int omapdss_hdmihw_remove(struct platform_device *pdev)
1738 snd_soc_unregister_codec(&pdev->dev); 1775 snd_soc_unregister_codec(&pdev->dev);
1739#endif 1776#endif
1740 1777
1778 pm_runtime_disable(&pdev->dev);
1779
1780 hdmi_put_clocks();
1781
1741 iounmap(hdmi.base_wp); 1782 iounmap(hdmi.base_wp);
1742 1783
1743 return 0; 1784 return 0;
1744} 1785}
1745 1786
1787static int hdmi_runtime_suspend(struct device *dev)
1788{
1789 clk_disable(hdmi.hdmi_clk);
1790 clk_disable(hdmi.sys_clk);
1791
1792 dispc_runtime_put();
1793 dss_runtime_put();
1794
1795 return 0;
1796}
1797
1798static int hdmi_runtime_resume(struct device *dev)
1799{
1800 int r;
1801
1802 r = dss_runtime_get();
1803 if (r < 0)
1804 goto err_get_dss;
1805
1806 r = dispc_runtime_get();
1807 if (r < 0)
1808 goto err_get_dispc;
1809
1810
1811 clk_enable(hdmi.sys_clk);
1812 clk_enable(hdmi.hdmi_clk);
1813
1814 return 0;
1815
1816err_get_dispc:
1817 dss_runtime_put();
1818err_get_dss:
1819 return r;
1820}
1821
1822static const struct dev_pm_ops hdmi_pm_ops = {
1823 .runtime_suspend = hdmi_runtime_suspend,
1824 .runtime_resume = hdmi_runtime_resume,
1825};
1826
1746static struct platform_driver omapdss_hdmihw_driver = { 1827static struct platform_driver omapdss_hdmihw_driver = {
1747 .probe = omapdss_hdmihw_probe, 1828 .probe = omapdss_hdmihw_probe,
1748 .remove = omapdss_hdmihw_remove, 1829 .remove = omapdss_hdmihw_remove,
1749 .driver = { 1830 .driver = {
1750 .name = "omapdss_hdmi", 1831 .name = "omapdss_hdmi",
1751 .owner = THIS_MODULE, 1832 .owner = THIS_MODULE,
1833 .pm = &hdmi_pm_ops,
1752 }, 1834 },
1753}; 1835};
1754 1836
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 9aeea50e33ff..13d72d5c714b 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -275,6 +275,108 @@ static ssize_t manager_alpha_blending_enabled_store(
275 return size; 275 return size;
276} 276}
277 277
278static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
279 char *buf)
280{
281 return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.cpr_enable);
282}
283
284static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
285 const char *buf, size_t size)
286{
287 struct omap_overlay_manager_info info;
288 int v;
289 int r;
290 bool enable;
291
292 if (!dss_has_feature(FEAT_CPR))
293 return -ENODEV;
294
295 r = kstrtoint(buf, 0, &v);
296 if (r)
297 return r;
298
299 enable = !!v;
300
301 mgr->get_manager_info(mgr, &info);
302
303 if (info.cpr_enable == enable)
304 return size;
305
306 info.cpr_enable = enable;
307
308 r = mgr->set_manager_info(mgr, &info);
309 if (r)
310 return r;
311
312 r = mgr->apply(mgr);
313 if (r)
314 return r;
315
316 return size;
317}
318
319static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr,
320 char *buf)
321{
322 struct omap_overlay_manager_info info;
323
324 mgr->get_manager_info(mgr, &info);
325
326 return snprintf(buf, PAGE_SIZE,
327 "%d %d %d %d %d %d %d %d %d\n",
328 info.cpr_coefs.rr,
329 info.cpr_coefs.rg,
330 info.cpr_coefs.rb,
331 info.cpr_coefs.gr,
332 info.cpr_coefs.gg,
333 info.cpr_coefs.gb,
334 info.cpr_coefs.br,
335 info.cpr_coefs.bg,
336 info.cpr_coefs.bb);
337}
338
339static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr,
340 const char *buf, size_t size)
341{
342 struct omap_overlay_manager_info info;
343 struct omap_dss_cpr_coefs coefs;
344 int r, i;
345 s16 *arr;
346
347 if (!dss_has_feature(FEAT_CPR))
348 return -ENODEV;
349
350 if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd",
351 &coefs.rr, &coefs.rg, &coefs.rb,
352 &coefs.gr, &coefs.gg, &coefs.gb,
353 &coefs.br, &coefs.bg, &coefs.bb) != 9)
354 return -EINVAL;
355
356 arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
357 coefs.gr, coefs.gg, coefs.gb,
358 coefs.br, coefs.bg, coefs.bb };
359
360 for (i = 0; i < 9; ++i) {
361 if (arr[i] < -512 || arr[i] > 511)
362 return -EINVAL;
363 }
364
365 mgr->get_manager_info(mgr, &info);
366
367 info.cpr_coefs = coefs;
368
369 r = mgr->set_manager_info(mgr, &info);
370 if (r)
371 return r;
372
373 r = mgr->apply(mgr);
374 if (r)
375 return r;
376
377 return size;
378}
379
278struct manager_attribute { 380struct manager_attribute {
279 struct attribute attr; 381 struct attribute attr;
280 ssize_t (*show)(struct omap_overlay_manager *, char *); 382 ssize_t (*show)(struct omap_overlay_manager *, char *);
@@ -300,6 +402,12 @@ static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR,
300static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR, 402static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
301 manager_alpha_blending_enabled_show, 403 manager_alpha_blending_enabled_show,
302 manager_alpha_blending_enabled_store); 404 manager_alpha_blending_enabled_store);
405static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR,
406 manager_cpr_enable_show,
407 manager_cpr_enable_store);
408static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR,
409 manager_cpr_coef_show,
410 manager_cpr_coef_store);
303 411
304 412
305static struct attribute *manager_sysfs_attrs[] = { 413static struct attribute *manager_sysfs_attrs[] = {
@@ -310,6 +418,8 @@ static struct attribute *manager_sysfs_attrs[] = {
310 &manager_attr_trans_key_value.attr, 418 &manager_attr_trans_key_value.attr,
311 &manager_attr_trans_key_enabled.attr, 419 &manager_attr_trans_key_enabled.attr,
312 &manager_attr_alpha_blending_enabled.attr, 420 &manager_attr_alpha_blending_enabled.attr,
421 &manager_attr_cpr_enable.attr,
422 &manager_attr_cpr_coef.attr,
313 NULL 423 NULL
314}; 424};
315 425
@@ -391,33 +501,14 @@ struct overlay_cache_data {
391 501
392 bool enabled; 502 bool enabled;
393 503
394 u32 paddr; 504 struct omap_overlay_info info;
395 void __iomem *vaddr;
396 u32 p_uv_addr; /* relevant for NV12 format only */
397 u16 screen_width;
398 u16 width;
399 u16 height;
400 enum omap_color_mode color_mode;
401 u8 rotation;
402 enum omap_dss_rotation_type rotation_type;
403 bool mirror;
404
405 u16 pos_x;
406 u16 pos_y;
407 u16 out_width; /* if 0, out_width == width */
408 u16 out_height; /* if 0, out_height == height */
409 u8 global_alpha;
410 u8 pre_mult_alpha;
411 505
412 enum omap_channel channel; 506 enum omap_channel channel;
413 bool replication; 507 bool replication;
414 bool ilace; 508 bool ilace;
415 509
416 enum omap_burst_size burst_size;
417 u32 fifo_low; 510 u32 fifo_low;
418 u32 fifo_high; 511 u32 fifo_high;
419
420 bool manual_update;
421}; 512};
422 513
423struct manager_cache_data { 514struct manager_cache_data {
@@ -429,15 +520,8 @@ struct manager_cache_data {
429 * VSYNC/EVSYNC */ 520 * VSYNC/EVSYNC */
430 bool shadow_dirty; 521 bool shadow_dirty;
431 522
432 u32 default_color; 523 struct omap_overlay_manager_info info;
433
434 enum omap_dss_trans_key_type trans_key_type;
435 u32 trans_key;
436 bool trans_enabled;
437
438 bool alpha_enabled;
439 524
440 bool manual_upd_display;
441 bool manual_update; 525 bool manual_update;
442 bool do_manual_update; 526 bool do_manual_update;
443 527
@@ -539,24 +623,15 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
539 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) 623 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
540 return 0; 624 return 0;
541 625
626 if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
627 return 0;
628
542 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC 629 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
543 || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) { 630 || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
544 irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; 631 irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
545 } else { 632 } else {
546 if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { 633 irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
547 enum omap_dss_update_mode mode; 634 DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
548 mode = dssdev->driver->get_update_mode(dssdev);
549 if (mode != OMAP_DSS_UPDATE_AUTO)
550 return 0;
551
552 irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
553 DISPC_IRQ_FRAMEDONE
554 : DISPC_IRQ_FRAMEDONE2;
555 } else {
556 irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
557 DISPC_IRQ_VSYNC
558 : DISPC_IRQ_VSYNC2;
559 }
560 } 635 }
561 636
562 mc = &dss_cache.manager_cache[mgr->id]; 637 mc = &dss_cache.manager_cache[mgr->id];
@@ -617,24 +692,15 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
617 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) 692 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
618 return 0; 693 return 0;
619 694
695 if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
696 return 0;
697
620 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC 698 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
621 || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) { 699 || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
622 irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; 700 irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
623 } else { 701 } else {
624 if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { 702 irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
625 enum omap_dss_update_mode mode; 703 DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
626 mode = dssdev->driver->get_update_mode(dssdev);
627 if (mode != OMAP_DSS_UPDATE_AUTO)
628 return 0;
629
630 irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
631 DISPC_IRQ_FRAMEDONE
632 : DISPC_IRQ_FRAMEDONE2;
633 } else {
634 irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
635 DISPC_IRQ_VSYNC
636 : DISPC_IRQ_VSYNC2;
637 }
638 } 704 }
639 705
640 oc = &dss_cache.overlay_cache[ovl->id]; 706 oc = &dss_cache.overlay_cache[ovl->id];
@@ -720,10 +786,12 @@ static bool rectangle_intersects(int x1, int y1, int w1, int h1,
720 786
721static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc) 787static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc)
722{ 788{
723 if (oc->out_width != 0 && oc->width != oc->out_width) 789 struct omap_overlay_info *oi = &oc->info;
790
791 if (oi->out_width != 0 && oi->width != oi->out_width)
724 return true; 792 return true;
725 793
726 if (oc->out_height != 0 && oc->height != oc->out_height) 794 if (oi->out_height != 0 && oi->height != oi->out_height)
727 return true; 795 return true;
728 796
729 return false; 797 return false;
@@ -733,6 +801,8 @@ static int configure_overlay(enum omap_plane plane)
733{ 801{
734 struct overlay_cache_data *c; 802 struct overlay_cache_data *c;
735 struct manager_cache_data *mc; 803 struct manager_cache_data *mc;
804 struct omap_overlay_info *oi;
805 struct omap_overlay_manager_info *mi;
736 u16 outw, outh; 806 u16 outw, outh;
737 u16 x, y, w, h; 807 u16 x, y, w, h;
738 u32 paddr; 808 u32 paddr;
@@ -742,6 +812,7 @@ static int configure_overlay(enum omap_plane plane)
742 DSSDBGF("%d", plane); 812 DSSDBGF("%d", plane);
743 813
744 c = &dss_cache.overlay_cache[plane]; 814 c = &dss_cache.overlay_cache[plane];
815 oi = &c->info;
745 816
746 if (!c->enabled) { 817 if (!c->enabled) {
747 dispc_enable_plane(plane, 0); 818 dispc_enable_plane(plane, 0);
@@ -749,21 +820,22 @@ static int configure_overlay(enum omap_plane plane)
749 } 820 }
750 821
751 mc = &dss_cache.manager_cache[c->channel]; 822 mc = &dss_cache.manager_cache[c->channel];
823 mi = &mc->info;
752 824
753 x = c->pos_x; 825 x = oi->pos_x;
754 y = c->pos_y; 826 y = oi->pos_y;
755 w = c->width; 827 w = oi->width;
756 h = c->height; 828 h = oi->height;
757 outw = c->out_width == 0 ? c->width : c->out_width; 829 outw = oi->out_width == 0 ? oi->width : oi->out_width;
758 outh = c->out_height == 0 ? c->height : c->out_height; 830 outh = oi->out_height == 0 ? oi->height : oi->out_height;
759 paddr = c->paddr; 831 paddr = oi->paddr;
760 832
761 orig_w = w; 833 orig_w = w;
762 orig_h = h; 834 orig_h = h;
763 orig_outw = outw; 835 orig_outw = outw;
764 orig_outh = outh; 836 orig_outh = outh;
765 837
766 if (c->manual_update && mc->do_manual_update) { 838 if (mc->manual_update && mc->do_manual_update) {
767 unsigned bpp; 839 unsigned bpp;
768 unsigned scale_x_m = w, scale_x_d = outw; 840 unsigned scale_x_m = w, scale_x_d = outw;
769 unsigned scale_y_m = h, scale_y_d = outh; 841 unsigned scale_y_m = h, scale_y_d = outh;
@@ -775,7 +847,7 @@ static int configure_overlay(enum omap_plane plane)
775 return 0; 847 return 0;
776 } 848 }
777 849
778 switch (c->color_mode) { 850 switch (oi->color_mode) {
779 case OMAP_DSS_COLOR_NV12: 851 case OMAP_DSS_COLOR_NV12:
780 bpp = 8; 852 bpp = 8;
781 break; 853 break;
@@ -805,23 +877,23 @@ static int configure_overlay(enum omap_plane plane)
805 BUG(); 877 BUG();
806 } 878 }
807 879
808 if (mc->x > c->pos_x) { 880 if (mc->x > oi->pos_x) {
809 x = 0; 881 x = 0;
810 outw -= (mc->x - c->pos_x); 882 outw -= (mc->x - oi->pos_x);
811 paddr += (mc->x - c->pos_x) * 883 paddr += (mc->x - oi->pos_x) *
812 scale_x_m / scale_x_d * bpp / 8; 884 scale_x_m / scale_x_d * bpp / 8;
813 } else { 885 } else {
814 x = c->pos_x - mc->x; 886 x = oi->pos_x - mc->x;
815 } 887 }
816 888
817 if (mc->y > c->pos_y) { 889 if (mc->y > oi->pos_y) {
818 y = 0; 890 y = 0;
819 outh -= (mc->y - c->pos_y); 891 outh -= (mc->y - oi->pos_y);
820 paddr += (mc->y - c->pos_y) * 892 paddr += (mc->y - oi->pos_y) *
821 scale_y_m / scale_y_d * 893 scale_y_m / scale_y_d *
822 c->screen_width * bpp / 8; 894 oi->screen_width * bpp / 8;
823 } else { 895 } else {
824 y = c->pos_y - mc->y; 896 y = oi->pos_y - mc->y;
825 } 897 }
826 898
827 if (mc->w < (x + outw)) 899 if (mc->w < (x + outw))
@@ -840,8 +912,8 @@ static int configure_overlay(enum omap_plane plane)
840 * the width if the original width was bigger. 912 * the width if the original width was bigger.
841 */ 913 */
842 if ((w & 1) && 914 if ((w & 1) &&
843 (c->color_mode == OMAP_DSS_COLOR_YUV2 || 915 (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
844 c->color_mode == OMAP_DSS_COLOR_UYVY)) { 916 oi->color_mode == OMAP_DSS_COLOR_UYVY)) {
845 if (orig_w > w) 917 if (orig_w > w)
846 w += 1; 918 w += 1;
847 else 919 else
@@ -851,19 +923,19 @@ static int configure_overlay(enum omap_plane plane)
851 923
852 r = dispc_setup_plane(plane, 924 r = dispc_setup_plane(plane,
853 paddr, 925 paddr,
854 c->screen_width, 926 oi->screen_width,
855 x, y, 927 x, y,
856 w, h, 928 w, h,
857 outw, outh, 929 outw, outh,
858 c->color_mode, 930 oi->color_mode,
859 c->ilace, 931 c->ilace,
860 c->rotation_type, 932 oi->rotation_type,
861 c->rotation, 933 oi->rotation,
862 c->mirror, 934 oi->mirror,
863 c->global_alpha, 935 oi->global_alpha,
864 c->pre_mult_alpha, 936 oi->pre_mult_alpha,
865 c->channel, 937 c->channel,
866 c->p_uv_addr); 938 oi->p_uv_addr);
867 939
868 if (r) { 940 if (r) {
869 /* this shouldn't happen */ 941 /* this shouldn't happen */
@@ -874,8 +946,7 @@ static int configure_overlay(enum omap_plane plane)
874 946
875 dispc_enable_replication(plane, c->replication); 947 dispc_enable_replication(plane, c->replication);
876 948
877 dispc_set_burst_size(plane, c->burst_size); 949 dispc_set_fifo_threshold(plane, c->fifo_low, c->fifo_high);
878 dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high);
879 950
880 dispc_enable_plane(plane, 1); 951 dispc_enable_plane(plane, 1);
881 952
@@ -884,16 +955,21 @@ static int configure_overlay(enum omap_plane plane)
884 955
885static void configure_manager(enum omap_channel channel) 956static void configure_manager(enum omap_channel channel)
886{ 957{
887 struct manager_cache_data *c; 958 struct omap_overlay_manager_info *mi;
888 959
889 DSSDBGF("%d", channel); 960 DSSDBGF("%d", channel);
890 961
891 c = &dss_cache.manager_cache[channel]; 962 /* picking info from the cache */
963 mi = &dss_cache.manager_cache[channel].info;
892 964
893 dispc_set_default_color(channel, c->default_color); 965 dispc_set_default_color(channel, mi->default_color);
894 dispc_set_trans_key(channel, c->trans_key_type, c->trans_key); 966 dispc_set_trans_key(channel, mi->trans_key_type, mi->trans_key);
895 dispc_enable_trans_key(channel, c->trans_enabled); 967 dispc_enable_trans_key(channel, mi->trans_enabled);
896 dispc_enable_alpha_blending(channel, c->alpha_enabled); 968 dispc_enable_alpha_blending(channel, mi->alpha_enabled);
969 if (dss_has_feature(FEAT_CPR)) {
970 dispc_enable_cpr(channel, mi->cpr_enable);
971 dispc_set_cpr_coef(channel, &mi->cpr_coefs);
972 }
897} 973}
898 974
899/* configure_dispc() tries to write values from cache to shadow registers. 975/* configure_dispc() tries to write values from cache to shadow registers.
@@ -928,7 +1004,7 @@ static int configure_dispc(void)
928 if (!oc->dirty) 1004 if (!oc->dirty)
929 continue; 1005 continue;
930 1006
931 if (oc->manual_update && !mc->do_manual_update) 1007 if (mc->manual_update && !mc->do_manual_update)
932 continue; 1008 continue;
933 1009
934 if (mgr_busy[oc->channel]) { 1010 if (mgr_busy[oc->channel]) {
@@ -976,7 +1052,7 @@ static int configure_dispc(void)
976 /* We don't need GO with manual update display. LCD iface will 1052 /* We don't need GO with manual update display. LCD iface will
977 * always be turned off after frame, and new settings will be 1053 * always be turned off after frame, and new settings will be
978 * taken in to use at next update */ 1054 * taken in to use at next update */
979 if (!mc->manual_upd_display) 1055 if (!mc->manual_update)
980 dispc_go(i); 1056 dispc_go(i);
981 } 1057 }
982 1058
@@ -1011,6 +1087,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
1011{ 1087{
1012 struct overlay_cache_data *oc; 1088 struct overlay_cache_data *oc;
1013 struct manager_cache_data *mc; 1089 struct manager_cache_data *mc;
1090 struct omap_overlay_info *oi;
1014 const int num_ovls = dss_feat_get_num_ovls(); 1091 const int num_ovls = dss_feat_get_num_ovls();
1015 struct omap_overlay_manager *mgr; 1092 struct omap_overlay_manager *mgr;
1016 int i; 1093 int i;
@@ -1053,6 +1130,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
1053 unsigned outw, outh; 1130 unsigned outw, outh;
1054 1131
1055 oc = &dss_cache.overlay_cache[i]; 1132 oc = &dss_cache.overlay_cache[i];
1133 oi = &oc->info;
1056 1134
1057 if (oc->channel != mgr->id) 1135 if (oc->channel != mgr->id)
1058 continue; 1136 continue;
@@ -1068,39 +1146,39 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
1068 if (!dispc_is_overlay_scaled(oc)) 1146 if (!dispc_is_overlay_scaled(oc))
1069 continue; 1147 continue;
1070 1148
1071 outw = oc->out_width == 0 ? 1149 outw = oi->out_width == 0 ?
1072 oc->width : oc->out_width; 1150 oi->width : oi->out_width;
1073 outh = oc->out_height == 0 ? 1151 outh = oi->out_height == 0 ?
1074 oc->height : oc->out_height; 1152 oi->height : oi->out_height;
1075 1153
1076 /* is the overlay outside the update region? */ 1154 /* is the overlay outside the update region? */
1077 if (!rectangle_intersects(x, y, w, h, 1155 if (!rectangle_intersects(x, y, w, h,
1078 oc->pos_x, oc->pos_y, 1156 oi->pos_x, oi->pos_y,
1079 outw, outh)) 1157 outw, outh))
1080 continue; 1158 continue;
1081 1159
1082 /* if the overlay totally inside the update region? */ 1160 /* if the overlay totally inside the update region? */
1083 if (rectangle_subset(oc->pos_x, oc->pos_y, outw, outh, 1161 if (rectangle_subset(oi->pos_x, oi->pos_y, outw, outh,
1084 x, y, w, h)) 1162 x, y, w, h))
1085 continue; 1163 continue;
1086 1164
1087 if (x > oc->pos_x) 1165 if (x > oi->pos_x)
1088 x1 = oc->pos_x; 1166 x1 = oi->pos_x;
1089 else 1167 else
1090 x1 = x; 1168 x1 = x;
1091 1169
1092 if (y > oc->pos_y) 1170 if (y > oi->pos_y)
1093 y1 = oc->pos_y; 1171 y1 = oi->pos_y;
1094 else 1172 else
1095 y1 = y; 1173 y1 = y;
1096 1174
1097 if ((x + w) < (oc->pos_x + outw)) 1175 if ((x + w) < (oi->pos_x + outw))
1098 x2 = oc->pos_x + outw; 1176 x2 = oi->pos_x + outw;
1099 else 1177 else
1100 x2 = x + w; 1178 x2 = x + w;
1101 1179
1102 if ((y + h) < (oc->pos_y + outh)) 1180 if ((y + h) < (oi->pos_y + outh))
1103 y2 = oc->pos_y + outh; 1181 y2 = oi->pos_y + outh;
1104 else 1182 else
1105 y2 = y + h; 1183 y2 = y + h;
1106 1184
@@ -1236,6 +1314,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1236 1314
1237 DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name); 1315 DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
1238 1316
1317 r = dispc_runtime_get();
1318 if (r)
1319 return r;
1320
1239 spin_lock_irqsave(&dss_cache.lock, flags); 1321 spin_lock_irqsave(&dss_cache.lock, flags);
1240 1322
1241 /* Configure overlays */ 1323 /* Configure overlays */
@@ -1275,23 +1357,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1275 1357
1276 ovl->info_dirty = false; 1358 ovl->info_dirty = false;
1277 oc->dirty = true; 1359 oc->dirty = true;
1278 1360 oc->info = ovl->info;
1279 oc->paddr = ovl->info.paddr;
1280 oc->vaddr = ovl->info.vaddr;
1281 oc->p_uv_addr = ovl->info.p_uv_addr;
1282 oc->screen_width = ovl->info.screen_width;
1283 oc->width = ovl->info.width;
1284 oc->height = ovl->info.height;
1285 oc->color_mode = ovl->info.color_mode;
1286 oc->rotation = ovl->info.rotation;
1287 oc->rotation_type = ovl->info.rotation_type;
1288 oc->mirror = ovl->info.mirror;
1289 oc->pos_x = ovl->info.pos_x;
1290 oc->pos_y = ovl->info.pos_y;
1291 oc->out_width = ovl->info.out_width;
1292 oc->out_height = ovl->info.out_height;
1293 oc->global_alpha = ovl->info.global_alpha;
1294 oc->pre_mult_alpha = ovl->info.pre_mult_alpha;
1295 1361
1296 oc->replication = 1362 oc->replication =
1297 dss_use_replication(dssdev, ovl->info.color_mode); 1363 dss_use_replication(dssdev, ovl->info.color_mode);
@@ -1302,11 +1368,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1302 1368
1303 oc->enabled = true; 1369 oc->enabled = true;
1304 1370
1305 oc->manual_update =
1306 dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
1307 dssdev->driver->get_update_mode(dssdev) !=
1308 OMAP_DSS_UPDATE_AUTO;
1309
1310 ++num_planes_enabled; 1371 ++num_planes_enabled;
1311 } 1372 }
1312 1373
@@ -1334,20 +1395,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1334 1395
1335 mgr->info_dirty = false; 1396 mgr->info_dirty = false;
1336 mc->dirty = true; 1397 mc->dirty = true;
1337 1398 mc->info = mgr->info;
1338 mc->default_color = mgr->info.default_color;
1339 mc->trans_key_type = mgr->info.trans_key_type;
1340 mc->trans_key = mgr->info.trans_key;
1341 mc->trans_enabled = mgr->info.trans_enabled;
1342 mc->alpha_enabled = mgr->info.alpha_enabled;
1343
1344 mc->manual_upd_display =
1345 dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
1346 1399
1347 mc->manual_update = 1400 mc->manual_update =
1348 dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE && 1401 dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
1349 dssdev->driver->get_update_mode(dssdev) !=
1350 OMAP_DSS_UPDATE_AUTO;
1351 } 1402 }
1352 1403
1353 /* XXX TODO: Try to get fifomerge working. The problem is that it 1404 /* XXX TODO: Try to get fifomerge working. The problem is that it
@@ -1368,7 +1419,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1368 /* Configure overlay fifos */ 1419 /* Configure overlay fifos */
1369 for (i = 0; i < omap_dss_get_num_overlays(); ++i) { 1420 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
1370 struct omap_dss_device *dssdev; 1421 struct omap_dss_device *dssdev;
1371 u32 size; 1422 u32 size, burst_size;
1372 1423
1373 ovl = omap_dss_get_overlay(i); 1424 ovl = omap_dss_get_overlay(i);
1374 1425
@@ -1386,6 +1437,8 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1386 if (use_fifomerge) 1437 if (use_fifomerge)
1387 size *= 3; 1438 size *= 3;
1388 1439
1440 burst_size = dispc_get_burst_size(ovl->id);
1441
1389 switch (dssdev->type) { 1442 switch (dssdev->type) {
1390 case OMAP_DISPLAY_TYPE_DPI: 1443 case OMAP_DISPLAY_TYPE_DPI:
1391 case OMAP_DISPLAY_TYPE_DBI: 1444 case OMAP_DISPLAY_TYPE_DBI:
@@ -1393,13 +1446,13 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1393 case OMAP_DISPLAY_TYPE_VENC: 1446 case OMAP_DISPLAY_TYPE_VENC:
1394 case OMAP_DISPLAY_TYPE_HDMI: 1447 case OMAP_DISPLAY_TYPE_HDMI:
1395 default_get_overlay_fifo_thresholds(ovl->id, size, 1448 default_get_overlay_fifo_thresholds(ovl->id, size,
1396 &oc->burst_size, &oc->fifo_low, 1449 burst_size, &oc->fifo_low,
1397 &oc->fifo_high); 1450 &oc->fifo_high);
1398 break; 1451 break;
1399#ifdef CONFIG_OMAP2_DSS_DSI 1452#ifdef CONFIG_OMAP2_DSS_DSI
1400 case OMAP_DISPLAY_TYPE_DSI: 1453 case OMAP_DISPLAY_TYPE_DSI:
1401 dsi_get_overlay_fifo_thresholds(ovl->id, size, 1454 dsi_get_overlay_fifo_thresholds(ovl->id, size,
1402 &oc->burst_size, &oc->fifo_low, 1455 burst_size, &oc->fifo_low,
1403 &oc->fifo_high); 1456 &oc->fifo_high);
1404 break; 1457 break;
1405#endif 1458#endif
@@ -1409,7 +1462,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1409 } 1462 }
1410 1463
1411 r = 0; 1464 r = 0;
1412 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
1413 if (!dss_cache.irq_enabled) { 1465 if (!dss_cache.irq_enabled) {
1414 u32 mask; 1466 u32 mask;
1415 1467
@@ -1422,10 +1474,11 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1422 dss_cache.irq_enabled = true; 1474 dss_cache.irq_enabled = true;
1423 } 1475 }
1424 configure_dispc(); 1476 configure_dispc();
1425 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
1426 1477
1427 spin_unlock_irqrestore(&dss_cache.lock, flags); 1478 spin_unlock_irqrestore(&dss_cache.lock, flags);
1428 1479
1480 dispc_runtime_put();
1481
1429 return r; 1482 return r;
1430} 1483}
1431 1484
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 0f08025b1f0e..c84380c53c39 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -84,32 +84,42 @@ static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
84 84
85 old_mgr = ovl->manager; 85 old_mgr = ovl->manager;
86 86
87 r = dispc_runtime_get();
88 if (r)
89 return r;
90
87 /* detach old manager */ 91 /* detach old manager */
88 if (old_mgr) { 92 if (old_mgr) {
89 r = ovl->unset_manager(ovl); 93 r = ovl->unset_manager(ovl);
90 if (r) { 94 if (r) {
91 DSSERR("detach failed\n"); 95 DSSERR("detach failed\n");
92 return r; 96 goto err;
93 } 97 }
94 98
95 r = old_mgr->apply(old_mgr); 99 r = old_mgr->apply(old_mgr);
96 if (r) 100 if (r)
97 return r; 101 goto err;
98 } 102 }
99 103
100 if (mgr) { 104 if (mgr) {
101 r = ovl->set_manager(ovl, mgr); 105 r = ovl->set_manager(ovl, mgr);
102 if (r) { 106 if (r) {
103 DSSERR("Failed to attach overlay\n"); 107 DSSERR("Failed to attach overlay\n");
104 return r; 108 goto err;
105 } 109 }
106 110
107 r = mgr->apply(mgr); 111 r = mgr->apply(mgr);
108 if (r) 112 if (r)
109 return r; 113 goto err;
110 } 114 }
111 115
116 dispc_runtime_put();
117
112 return size; 118 return size;
119
120err:
121 dispc_runtime_put();
122 return r;
113} 123}
114 124
115static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf) 125static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
@@ -238,6 +248,9 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
238 u8 alpha; 248 u8 alpha;
239 struct omap_overlay_info info; 249 struct omap_overlay_info info;
240 250
251 if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
252 return -ENODEV;
253
241 r = kstrtou8(buf, 0, &alpha); 254 r = kstrtou8(buf, 0, &alpha);
242 if (r) 255 if (r)
243 return r; 256 return r;
@@ -504,7 +517,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
504 517
505 ovl->manager = mgr; 518 ovl->manager = mgr;
506 519
507 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
508 /* XXX: When there is an overlay on a DSI manual update display, and 520 /* XXX: When there is an overlay on a DSI manual update display, and
509 * the overlay is first disabled, then moved to tv, and enabled, we 521 * the overlay is first disabled, then moved to tv, and enabled, we
510 * seem to get SYNC_LOST_DIGIT error. 522 * seem to get SYNC_LOST_DIGIT error.
@@ -518,7 +530,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
518 * the overlay, but before moving the overlay to TV. 530 * the overlay, but before moving the overlay to TV.
519 */ 531 */
520 dispc_set_channel_out(ovl->id, mgr->id); 532 dispc_set_channel_out(ovl->id, mgr->id);
521 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
522 533
523 return 0; 534 return 0;
524} 535}
@@ -719,6 +730,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
719 } 730 }
720 731
721 if (mgr) { 732 if (mgr) {
733 dispc_runtime_get();
734
722 for (i = 0; i < dss_feat_get_num_ovls(); i++) { 735 for (i = 0; i < dss_feat_get_num_ovls(); i++) {
723 struct omap_overlay *ovl; 736 struct omap_overlay *ovl;
724 ovl = omap_dss_get_overlay(i); 737 ovl = omap_dss_get_overlay(i);
@@ -728,6 +741,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
728 omap_dss_set_manager(ovl, mgr); 741 omap_dss_set_manager(ovl, mgr);
729 } 742 }
730 } 743 }
744
745 dispc_runtime_put();
731 } 746 }
732} 747}
733 748
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index c06fbe0bc678..39f4c597026a 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -33,6 +33,8 @@
33#include <linux/hrtimer.h> 33#include <linux/hrtimer.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/semaphore.h> 35#include <linux/semaphore.h>
36#include <linux/platform_device.h>
37#include <linux/pm_runtime.h>
36 38
37#include <video/omapdss.h> 39#include <video/omapdss.h>
38#include "dss.h" 40#include "dss.h"
@@ -120,12 +122,25 @@ static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
120 return __raw_readl(rfbi.base + idx.idx); 122 return __raw_readl(rfbi.base + idx.idx);
121} 123}
122 124
123static void rfbi_enable_clocks(bool enable) 125static int rfbi_runtime_get(void)
124{ 126{
125 if (enable) 127 int r;
126 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 128
127 else 129 DSSDBG("rfbi_runtime_get\n");
128 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 130
131 r = pm_runtime_get_sync(&rfbi.pdev->dev);
132 WARN_ON(r < 0);
133 return r < 0 ? r : 0;
134}
135
136static void rfbi_runtime_put(void)
137{
138 int r;
139
140 DSSDBG("rfbi_runtime_put\n");
141
142 r = pm_runtime_put(&rfbi.pdev->dev);
143 WARN_ON(r < 0);
129} 144}
130 145
131void rfbi_bus_lock(void) 146void rfbi_bus_lock(void)
@@ -805,7 +820,8 @@ void rfbi_dump_regs(struct seq_file *s)
805{ 820{
806#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r)) 821#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
807 822
808 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 823 if (rfbi_runtime_get())
824 return;
809 825
810 DUMPREG(RFBI_REVISION); 826 DUMPREG(RFBI_REVISION);
811 DUMPREG(RFBI_SYSCONFIG); 827 DUMPREG(RFBI_SYSCONFIG);
@@ -836,7 +852,7 @@ void rfbi_dump_regs(struct seq_file *s)
836 DUMPREG(RFBI_VSYNC_WIDTH); 852 DUMPREG(RFBI_VSYNC_WIDTH);
837 DUMPREG(RFBI_HSYNC_WIDTH); 853 DUMPREG(RFBI_HSYNC_WIDTH);
838 854
839 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 855 rfbi_runtime_put();
840#undef DUMPREG 856#undef DUMPREG
841} 857}
842 858
@@ -844,7 +860,9 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
844{ 860{
845 int r; 861 int r;
846 862
847 rfbi_enable_clocks(1); 863 r = rfbi_runtime_get();
864 if (r)
865 return r;
848 866
849 r = omap_dss_start_device(dssdev); 867 r = omap_dss_start_device(dssdev);
850 if (r) { 868 if (r) {
@@ -879,6 +897,7 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
879err1: 897err1:
880 omap_dss_stop_device(dssdev); 898 omap_dss_stop_device(dssdev);
881err0: 899err0:
900 rfbi_runtime_put();
882 return r; 901 return r;
883} 902}
884EXPORT_SYMBOL(omapdss_rfbi_display_enable); 903EXPORT_SYMBOL(omapdss_rfbi_display_enable);
@@ -889,7 +908,7 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
889 DISPC_IRQ_FRAMEDONE); 908 DISPC_IRQ_FRAMEDONE);
890 omap_dss_stop_device(dssdev); 909 omap_dss_stop_device(dssdev);
891 910
892 rfbi_enable_clocks(0); 911 rfbi_runtime_put();
893} 912}
894EXPORT_SYMBOL(omapdss_rfbi_display_disable); 913EXPORT_SYMBOL(omapdss_rfbi_display_disable);
895 914
@@ -904,8 +923,9 @@ int rfbi_init_display(struct omap_dss_device *dssdev)
904static int omap_rfbihw_probe(struct platform_device *pdev) 923static int omap_rfbihw_probe(struct platform_device *pdev)
905{ 924{
906 u32 rev; 925 u32 rev;
907 u32 l;
908 struct resource *rfbi_mem; 926 struct resource *rfbi_mem;
927 struct clk *clk;
928 int r;
909 929
910 rfbi.pdev = pdev; 930 rfbi.pdev = pdev;
911 931
@@ -914,46 +934,102 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
914 rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0); 934 rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
915 if (!rfbi_mem) { 935 if (!rfbi_mem) {
916 DSSERR("can't get IORESOURCE_MEM RFBI\n"); 936 DSSERR("can't get IORESOURCE_MEM RFBI\n");
917 return -EINVAL; 937 r = -EINVAL;
938 goto err_ioremap;
918 } 939 }
919 rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem)); 940 rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem));
920 if (!rfbi.base) { 941 if (!rfbi.base) {
921 DSSERR("can't ioremap RFBI\n"); 942 DSSERR("can't ioremap RFBI\n");
922 return -ENOMEM; 943 r = -ENOMEM;
944 goto err_ioremap;
923 } 945 }
924 946
925 rfbi_enable_clocks(1); 947 pm_runtime_enable(&pdev->dev);
948
949 r = rfbi_runtime_get();
950 if (r)
951 goto err_get_rfbi;
926 952
927 msleep(10); 953 msleep(10);
928 954
929 rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000; 955 if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap3630())
956 clk = dss_get_ick();
957 else
958 clk = clk_get(&pdev->dev, "ick");
959 if (IS_ERR(clk)) {
960 DSSERR("can't get ick\n");
961 r = PTR_ERR(clk);
962 goto err_get_ick;
963 }
964
965 rfbi.l4_khz = clk_get_rate(clk) / 1000;
930 966
931 /* Enable autoidle and smart-idle */ 967 clk_put(clk);
932 l = rfbi_read_reg(RFBI_SYSCONFIG);
933 l |= (1 << 0) | (2 << 3);
934 rfbi_write_reg(RFBI_SYSCONFIG, l);
935 968
936 rev = rfbi_read_reg(RFBI_REVISION); 969 rev = rfbi_read_reg(RFBI_REVISION);
937 dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n", 970 dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n",
938 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); 971 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
939 972
940 rfbi_enable_clocks(0); 973 rfbi_runtime_put();
941 974
942 return 0; 975 return 0;
976
977err_get_ick:
978 rfbi_runtime_put();
979err_get_rfbi:
980 pm_runtime_disable(&pdev->dev);
981 iounmap(rfbi.base);
982err_ioremap:
983 return r;
943} 984}
944 985
945static int omap_rfbihw_remove(struct platform_device *pdev) 986static int omap_rfbihw_remove(struct platform_device *pdev)
946{ 987{
988 pm_runtime_disable(&pdev->dev);
947 iounmap(rfbi.base); 989 iounmap(rfbi.base);
948 return 0; 990 return 0;
949} 991}
950 992
993static int rfbi_runtime_suspend(struct device *dev)
994{
995 dispc_runtime_put();
996 dss_runtime_put();
997
998 return 0;
999}
1000
1001static int rfbi_runtime_resume(struct device *dev)
1002{
1003 int r;
1004
1005 r = dss_runtime_get();
1006 if (r < 0)
1007 goto err_get_dss;
1008
1009 r = dispc_runtime_get();
1010 if (r < 0)
1011 goto err_get_dispc;
1012
1013 return 0;
1014
1015err_get_dispc:
1016 dss_runtime_put();
1017err_get_dss:
1018 return r;
1019}
1020
1021static const struct dev_pm_ops rfbi_pm_ops = {
1022 .runtime_suspend = rfbi_runtime_suspend,
1023 .runtime_resume = rfbi_runtime_resume,
1024};
1025
951static struct platform_driver omap_rfbihw_driver = { 1026static struct platform_driver omap_rfbihw_driver = {
952 .probe = omap_rfbihw_probe, 1027 .probe = omap_rfbihw_probe,
953 .remove = omap_rfbihw_remove, 1028 .remove = omap_rfbihw_remove,
954 .driver = { 1029 .driver = {
955 .name = "omapdss_rfbi", 1030 .name = "omapdss_rfbi",
956 .owner = THIS_MODULE, 1031 .owner = THIS_MODULE,
1032 .pm = &rfbi_pm_ops,
957 }, 1033 },
958}; 1034};
959 1035
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 0bd4b0350f80..3a688c871a45 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -20,13 +20,11 @@
20#define DSS_SUBSYS_NAME "SDI" 20#define DSS_SUBSYS_NAME "SDI"
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/clk.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
25#include <linux/err.h> 24#include <linux/err.h>
26#include <linux/regulator/consumer.h> 25#include <linux/regulator/consumer.h>
27 26
28#include <video/omapdss.h> 27#include <video/omapdss.h>
29#include <plat/cpu.h>
30#include "dss.h" 28#include "dss.h"
31 29
32static struct { 30static struct {
@@ -60,14 +58,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
60 r = omap_dss_start_device(dssdev); 58 r = omap_dss_start_device(dssdev);
61 if (r) { 59 if (r) {
62 DSSERR("failed to start device\n"); 60 DSSERR("failed to start device\n");
63 goto err0; 61 goto err_start_dev;
64 } 62 }
65 63
66 r = regulator_enable(sdi.vdds_sdi_reg); 64 r = regulator_enable(sdi.vdds_sdi_reg);
67 if (r) 65 if (r)
68 goto err1; 66 goto err_reg_enable;
69 67
70 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 68 r = dss_runtime_get();
69 if (r)
70 goto err_get_dss;
71
72 r = dispc_runtime_get();
73 if (r)
74 goto err_get_dispc;
71 75
72 sdi_basic_init(dssdev); 76 sdi_basic_init(dssdev);
73 77
@@ -80,7 +84,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
80 r = dss_calc_clock_div(1, t->pixel_clock * 1000, 84 r = dss_calc_clock_div(1, t->pixel_clock * 1000,
81 &dss_cinfo, &dispc_cinfo); 85 &dss_cinfo, &dispc_cinfo);
82 if (r) 86 if (r)
83 goto err2; 87 goto err_calc_clock_div;
84 88
85 fck = dss_cinfo.fck; 89 fck = dss_cinfo.fck;
86 lck_div = dispc_cinfo.lck_div; 90 lck_div = dispc_cinfo.lck_div;
@@ -101,27 +105,34 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
101 105
102 r = dss_set_clock_div(&dss_cinfo); 106 r = dss_set_clock_div(&dss_cinfo);
103 if (r) 107 if (r)
104 goto err2; 108 goto err_set_dss_clock_div;
105 109
106 r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); 110 r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
107 if (r) 111 if (r)
108 goto err2; 112 goto err_set_dispc_clock_div;
109 113
110 dss_sdi_init(dssdev->phy.sdi.datapairs); 114 dss_sdi_init(dssdev->phy.sdi.datapairs);
111 r = dss_sdi_enable(); 115 r = dss_sdi_enable();
112 if (r) 116 if (r)
113 goto err1; 117 goto err_sdi_enable;
114 mdelay(2); 118 mdelay(2);
115 119
116 dssdev->manager->enable(dssdev->manager); 120 dssdev->manager->enable(dssdev->manager);
117 121
118 return 0; 122 return 0;
119err2: 123
120 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 124err_sdi_enable:
125err_set_dispc_clock_div:
126err_set_dss_clock_div:
127err_calc_clock_div:
128 dispc_runtime_put();
129err_get_dispc:
130 dss_runtime_put();
131err_get_dss:
121 regulator_disable(sdi.vdds_sdi_reg); 132 regulator_disable(sdi.vdds_sdi_reg);
122err1: 133err_reg_enable:
123 omap_dss_stop_device(dssdev); 134 omap_dss_stop_device(dssdev);
124err0: 135err_start_dev:
125 return r; 136 return r;
126} 137}
127EXPORT_SYMBOL(omapdss_sdi_display_enable); 138EXPORT_SYMBOL(omapdss_sdi_display_enable);
@@ -132,7 +143,8 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
132 143
133 dss_sdi_disable(); 144 dss_sdi_disable();
134 145
135 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 146 dispc_runtime_put();
147 dss_runtime_put();
136 148
137 regulator_disable(sdi.vdds_sdi_reg); 149 regulator_disable(sdi.vdds_sdi_reg);
138 150
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 980f919ed987..173c66430dad 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -33,11 +33,13 @@
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/regulator/consumer.h> 35#include <linux/regulator/consumer.h>
36#include <linux/pm_runtime.h>
36 37
37#include <video/omapdss.h> 38#include <video/omapdss.h>
38#include <plat/cpu.h> 39#include <plat/cpu.h>
39 40
40#include "dss.h" 41#include "dss.h"
42#include "dss_features.h"
41 43
42/* Venc registers */ 44/* Venc registers */
43#define VENC_REV_ID 0x00 45#define VENC_REV_ID 0x00
@@ -292,6 +294,9 @@ static struct {
292 struct mutex venc_lock; 294 struct mutex venc_lock;
293 u32 wss_data; 295 u32 wss_data;
294 struct regulator *vdda_dac_reg; 296 struct regulator *vdda_dac_reg;
297
298 struct clk *tv_clk;
299 struct clk *tv_dac_clk;
295} venc; 300} venc;
296 301
297static inline void venc_write_reg(int idx, u32 val) 302static inline void venc_write_reg(int idx, u32 val)
@@ -380,14 +385,25 @@ static void venc_reset(void)
380#endif 385#endif
381} 386}
382 387
383static void venc_enable_clocks(int enable) 388static int venc_runtime_get(void)
389{
390 int r;
391
392 DSSDBG("venc_runtime_get\n");
393
394 r = pm_runtime_get_sync(&venc.pdev->dev);
395 WARN_ON(r < 0);
396 return r < 0 ? r : 0;
397}
398
399static void venc_runtime_put(void)
384{ 400{
385 if (enable) 401 int r;
386 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK | 402
387 DSS_CLK_VIDFCK); 403 DSSDBG("venc_runtime_put\n");
388 else 404
389 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK | 405 r = pm_runtime_put(&venc.pdev->dev);
390 DSS_CLK_VIDFCK); 406 WARN_ON(r < 0);
391} 407}
392 408
393static const struct venc_config *venc_timings_to_config( 409static const struct venc_config *venc_timings_to_config(
@@ -406,8 +422,6 @@ static void venc_power_on(struct omap_dss_device *dssdev)
406{ 422{
407 u32 l; 423 u32 l;
408 424
409 venc_enable_clocks(1);
410
411 venc_reset(); 425 venc_reset();
412 venc_write_config(venc_timings_to_config(&dssdev->panel.timings)); 426 venc_write_config(venc_timings_to_config(&dssdev->panel.timings));
413 427
@@ -448,8 +462,6 @@ static void venc_power_off(struct omap_dss_device *dssdev)
448 dssdev->platform_disable(dssdev); 462 dssdev->platform_disable(dssdev);
449 463
450 regulator_disable(venc.vdda_dac_reg); 464 regulator_disable(venc.vdda_dac_reg);
451
452 venc_enable_clocks(0);
453} 465}
454 466
455 467
@@ -487,6 +499,10 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
487 goto err1; 499 goto err1;
488 } 500 }
489 501
502 r = venc_runtime_get();
503 if (r)
504 goto err1;
505
490 venc_power_on(dssdev); 506 venc_power_on(dssdev);
491 507
492 venc.wss_data = 0; 508 venc.wss_data = 0;
@@ -520,6 +536,8 @@ static void venc_panel_disable(struct omap_dss_device *dssdev)
520 536
521 venc_power_off(dssdev); 537 venc_power_off(dssdev);
522 538
539 venc_runtime_put();
540
523 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 541 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
524 542
525 omap_dss_stop_device(dssdev); 543 omap_dss_stop_device(dssdev);
@@ -538,20 +556,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev)
538 return venc_panel_enable(dssdev); 556 return venc_panel_enable(dssdev);
539} 557}
540 558
541static enum omap_dss_update_mode venc_get_update_mode(
542 struct omap_dss_device *dssdev)
543{
544 return OMAP_DSS_UPDATE_AUTO;
545}
546
547static int venc_set_update_mode(struct omap_dss_device *dssdev,
548 enum omap_dss_update_mode mode)
549{
550 if (mode != OMAP_DSS_UPDATE_AUTO)
551 return -EINVAL;
552 return 0;
553}
554
555static void venc_get_timings(struct omap_dss_device *dssdev, 559static void venc_get_timings(struct omap_dss_device *dssdev,
556 struct omap_video_timings *timings) 560 struct omap_video_timings *timings)
557{ 561{
@@ -598,6 +602,7 @@ static u32 venc_get_wss(struct omap_dss_device *dssdev)
598static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss) 602static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
599{ 603{
600 const struct venc_config *config; 604 const struct venc_config *config;
605 int r;
601 606
602 DSSDBG("venc_set_wss\n"); 607 DSSDBG("venc_set_wss\n");
603 608
@@ -608,16 +613,19 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
608 /* Invert due to VENC_L21_WC_CTL:INV=1 */ 613 /* Invert due to VENC_L21_WC_CTL:INV=1 */
609 venc.wss_data = (wss ^ 0xfffff) << 8; 614 venc.wss_data = (wss ^ 0xfffff) << 8;
610 615
611 venc_enable_clocks(1); 616 r = venc_runtime_get();
617 if (r)
618 goto err;
612 619
613 venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | 620 venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
614 venc.wss_data); 621 venc.wss_data);
615 622
616 venc_enable_clocks(0); 623 venc_runtime_put();
617 624
625err:
618 mutex_unlock(&venc.venc_lock); 626 mutex_unlock(&venc.venc_lock);
619 627
620 return 0; 628 return r;
621} 629}
622 630
623static struct omap_dss_driver venc_driver = { 631static struct omap_dss_driver venc_driver = {
@@ -632,9 +640,6 @@ static struct omap_dss_driver venc_driver = {
632 .get_resolution = omapdss_default_get_resolution, 640 .get_resolution = omapdss_default_get_resolution,
633 .get_recommended_bpp = omapdss_default_get_recommended_bpp, 641 .get_recommended_bpp = omapdss_default_get_recommended_bpp,
634 642
635 .set_update_mode = venc_set_update_mode,
636 .get_update_mode = venc_get_update_mode,
637
638 .get_timings = venc_get_timings, 643 .get_timings = venc_get_timings,
639 .set_timings = venc_set_timings, 644 .set_timings = venc_set_timings,
640 .check_timings = venc_check_timings, 645 .check_timings = venc_check_timings,
@@ -673,7 +678,8 @@ void venc_dump_regs(struct seq_file *s)
673{ 678{
674#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r)) 679#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
675 680
676 venc_enable_clocks(1); 681 if (venc_runtime_get())
682 return;
677 683
678 DUMPREG(VENC_F_CONTROL); 684 DUMPREG(VENC_F_CONTROL);
679 DUMPREG(VENC_VIDOUT_CTRL); 685 DUMPREG(VENC_VIDOUT_CTRL);
@@ -717,16 +723,56 @@ void venc_dump_regs(struct seq_file *s)
717 DUMPREG(VENC_OUTPUT_CONTROL); 723 DUMPREG(VENC_OUTPUT_CONTROL);
718 DUMPREG(VENC_OUTPUT_TEST); 724 DUMPREG(VENC_OUTPUT_TEST);
719 725
720 venc_enable_clocks(0); 726 venc_runtime_put();
721 727
722#undef DUMPREG 728#undef DUMPREG
723} 729}
724 730
731static int venc_get_clocks(struct platform_device *pdev)
732{
733 struct clk *clk;
734
735 clk = clk_get(&pdev->dev, "fck");
736 if (IS_ERR(clk)) {
737 DSSERR("can't get fck\n");
738 return PTR_ERR(clk);
739 }
740
741 venc.tv_clk = clk;
742
743 if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) {
744 if (cpu_is_omap34xx() || cpu_is_omap3630())
745 clk = clk_get(&pdev->dev, "dss_96m_fck");
746 else
747 clk = clk_get(&pdev->dev, "tv_dac_clk");
748 if (IS_ERR(clk)) {
749 DSSERR("can't get tv_dac_clk\n");
750 clk_put(venc.tv_clk);
751 return PTR_ERR(clk);
752 }
753 } else {
754 clk = NULL;
755 }
756
757 venc.tv_dac_clk = clk;
758
759 return 0;
760}
761
762static void venc_put_clocks(void)
763{
764 if (venc.tv_clk)
765 clk_put(venc.tv_clk);
766 if (venc.tv_dac_clk)
767 clk_put(venc.tv_dac_clk);
768}
769
725/* VENC HW IP initialisation */ 770/* VENC HW IP initialisation */
726static int omap_venchw_probe(struct platform_device *pdev) 771static int omap_venchw_probe(struct platform_device *pdev)
727{ 772{
728 u8 rev_id; 773 u8 rev_id;
729 struct resource *venc_mem; 774 struct resource *venc_mem;
775 int r;
730 776
731 venc.pdev = pdev; 777 venc.pdev = pdev;
732 778
@@ -737,22 +783,40 @@ static int omap_venchw_probe(struct platform_device *pdev)
737 venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0); 783 venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0);
738 if (!venc_mem) { 784 if (!venc_mem) {
739 DSSERR("can't get IORESOURCE_MEM VENC\n"); 785 DSSERR("can't get IORESOURCE_MEM VENC\n");
740 return -EINVAL; 786 r = -EINVAL;
787 goto err_ioremap;
741 } 788 }
742 venc.base = ioremap(venc_mem->start, resource_size(venc_mem)); 789 venc.base = ioremap(venc_mem->start, resource_size(venc_mem));
743 if (!venc.base) { 790 if (!venc.base) {
744 DSSERR("can't ioremap VENC\n"); 791 DSSERR("can't ioremap VENC\n");
745 return -ENOMEM; 792 r = -ENOMEM;
793 goto err_ioremap;
746 } 794 }
747 795
748 venc_enable_clocks(1); 796 r = venc_get_clocks(pdev);
797 if (r)
798 goto err_get_clk;
799
800 pm_runtime_enable(&pdev->dev);
801
802 r = venc_runtime_get();
803 if (r)
804 goto err_get_venc;
749 805
750 rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff); 806 rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
751 dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id); 807 dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
752 808
753 venc_enable_clocks(0); 809 venc_runtime_put();
754 810
755 return omap_dss_register_driver(&venc_driver); 811 return omap_dss_register_driver(&venc_driver);
812
813err_get_venc:
814 pm_runtime_disable(&pdev->dev);
815 venc_put_clocks();
816err_get_clk:
817 iounmap(venc.base);
818err_ioremap:
819 return r;
756} 820}
757 821
758static int omap_venchw_remove(struct platform_device *pdev) 822static int omap_venchw_remove(struct platform_device *pdev)
@@ -763,16 +827,61 @@ static int omap_venchw_remove(struct platform_device *pdev)
763 } 827 }
764 omap_dss_unregister_driver(&venc_driver); 828 omap_dss_unregister_driver(&venc_driver);
765 829
830 pm_runtime_disable(&pdev->dev);
831 venc_put_clocks();
832
766 iounmap(venc.base); 833 iounmap(venc.base);
767 return 0; 834 return 0;
768} 835}
769 836
837static int venc_runtime_suspend(struct device *dev)
838{
839 if (venc.tv_dac_clk)
840 clk_disable(venc.tv_dac_clk);
841 clk_disable(venc.tv_clk);
842
843 dispc_runtime_put();
844 dss_runtime_put();
845
846 return 0;
847}
848
849static int venc_runtime_resume(struct device *dev)
850{
851 int r;
852
853 r = dss_runtime_get();
854 if (r < 0)
855 goto err_get_dss;
856
857 r = dispc_runtime_get();
858 if (r < 0)
859 goto err_get_dispc;
860
861 clk_enable(venc.tv_clk);
862 if (venc.tv_dac_clk)
863 clk_enable(venc.tv_dac_clk);
864
865 return 0;
866
867err_get_dispc:
868 dss_runtime_put();
869err_get_dss:
870 return r;
871}
872
873static const struct dev_pm_ops venc_pm_ops = {
874 .runtime_suspend = venc_runtime_suspend,
875 .runtime_resume = venc_runtime_resume,
876};
877
770static struct platform_driver omap_venchw_driver = { 878static struct platform_driver omap_venchw_driver = {
771 .probe = omap_venchw_probe, 879 .probe = omap_venchw_probe,
772 .remove = omap_venchw_remove, 880 .remove = omap_venchw_remove,
773 .driver = { 881 .driver = {
774 .name = "omapdss_venc", 882 .name = "omapdss_venc",
775 .owner = THIS_MODULE, 883 .owner = THIS_MODULE,
884 .pm = &venc_pm_ops,
776 }, 885 },
777}; 886};
778 887
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index cff450392b79..6b1ac23dbbd3 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -316,67 +316,67 @@ int omapfb_update_window(struct fb_info *fbi,
316} 316}
317EXPORT_SYMBOL(omapfb_update_window); 317EXPORT_SYMBOL(omapfb_update_window);
318 318
319static int omapfb_set_update_mode(struct fb_info *fbi, 319int omapfb_set_update_mode(struct fb_info *fbi,
320 enum omapfb_update_mode mode) 320 enum omapfb_update_mode mode)
321{ 321{
322 struct omap_dss_device *display = fb2display(fbi); 322 struct omap_dss_device *display = fb2display(fbi);
323 enum omap_dss_update_mode um; 323 struct omapfb_info *ofbi = FB2OFB(fbi);
324 struct omapfb2_device *fbdev = ofbi->fbdev;
325 struct omapfb_display_data *d;
324 int r; 326 int r;
325 327
326 if (!display || !display->driver->set_update_mode) 328 if (!display)
327 return -EINVAL; 329 return -EINVAL;
328 330
329 switch (mode) { 331 if (mode != OMAPFB_AUTO_UPDATE && mode != OMAPFB_MANUAL_UPDATE)
330 case OMAPFB_UPDATE_DISABLED: 332 return -EINVAL;
331 um = OMAP_DSS_UPDATE_DISABLED;
332 break;
333 333
334 case OMAPFB_AUTO_UPDATE: 334 omapfb_lock(fbdev);
335 um = OMAP_DSS_UPDATE_AUTO;
336 break;
337 335
338 case OMAPFB_MANUAL_UPDATE: 336 d = get_display_data(fbdev, display);
339 um = OMAP_DSS_UPDATE_MANUAL;
340 break;
341 337
342 default: 338 if (d->update_mode == mode) {
343 return -EINVAL; 339 omapfb_unlock(fbdev);
340 return 0;
344 } 341 }
345 342
346 r = display->driver->set_update_mode(display, um); 343 r = 0;
344
345 if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
346 if (mode == OMAPFB_AUTO_UPDATE)
347 omapfb_start_auto_update(fbdev, display);
348 else /* MANUAL_UPDATE */
349 omapfb_stop_auto_update(fbdev, display);
350
351 d->update_mode = mode;
352 } else { /* AUTO_UPDATE */
353 if (mode == OMAPFB_MANUAL_UPDATE)
354 r = -EINVAL;
355 }
356
357 omapfb_unlock(fbdev);
347 358
348 return r; 359 return r;
349} 360}
350 361
351static int omapfb_get_update_mode(struct fb_info *fbi, 362int omapfb_get_update_mode(struct fb_info *fbi,
352 enum omapfb_update_mode *mode) 363 enum omapfb_update_mode *mode)
353{ 364{
354 struct omap_dss_device *display = fb2display(fbi); 365 struct omap_dss_device *display = fb2display(fbi);
355 enum omap_dss_update_mode m; 366 struct omapfb_info *ofbi = FB2OFB(fbi);
367 struct omapfb2_device *fbdev = ofbi->fbdev;
368 struct omapfb_display_data *d;
356 369
357 if (!display) 370 if (!display)
358 return -EINVAL; 371 return -EINVAL;
359 372
360 if (!display->driver->get_update_mode) { 373 omapfb_lock(fbdev);
361 *mode = OMAPFB_AUTO_UPDATE;
362 return 0;
363 }
364 374
365 m = display->driver->get_update_mode(display); 375 d = get_display_data(fbdev, display);
366 376
367 switch (m) { 377 *mode = d->update_mode;
368 case OMAP_DSS_UPDATE_DISABLED: 378
369 *mode = OMAPFB_UPDATE_DISABLED; 379 omapfb_unlock(fbdev);
370 break;
371 case OMAP_DSS_UPDATE_AUTO:
372 *mode = OMAPFB_AUTO_UPDATE;
373 break;
374 case OMAP_DSS_UPDATE_MANUAL:
375 *mode = OMAPFB_MANUAL_UPDATE;
376 break;
377 default:
378 BUG();
379 }
380 380
381 return 0; 381 return 0;
382} 382}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 505bc12a3031..602b71a92d3c 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -46,6 +46,10 @@ static char *def_vram;
46static int def_vrfb; 46static int def_vrfb;
47static int def_rotate; 47static int def_rotate;
48static int def_mirror; 48static int def_mirror;
49static bool auto_update;
50static unsigned int auto_update_freq;
51module_param(auto_update, bool, 0);
52module_param(auto_update_freq, uint, 0644);
49 53
50#ifdef DEBUG 54#ifdef DEBUG
51unsigned int omapfb_debug; 55unsigned int omapfb_debug;
@@ -1242,6 +1246,7 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
1242 struct omapfb_info *ofbi = FB2OFB(fbi); 1246 struct omapfb_info *ofbi = FB2OFB(fbi);
1243 struct omapfb2_device *fbdev = ofbi->fbdev; 1247 struct omapfb2_device *fbdev = ofbi->fbdev;
1244 struct omap_dss_device *display = fb2display(fbi); 1248 struct omap_dss_device *display = fb2display(fbi);
1249 struct omapfb_display_data *d;
1245 int r = 0; 1250 int r = 0;
1246 1251
1247 if (!display) 1252 if (!display)
@@ -1249,6 +1254,8 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
1249 1254
1250 omapfb_lock(fbdev); 1255 omapfb_lock(fbdev);
1251 1256
1257 d = get_display_data(fbdev, display);
1258
1252 switch (blank) { 1259 switch (blank) {
1253 case FB_BLANK_UNBLANK: 1260 case FB_BLANK_UNBLANK:
1254 if (display->state != OMAP_DSS_DISPLAY_SUSPENDED) 1261 if (display->state != OMAP_DSS_DISPLAY_SUSPENDED)
@@ -1257,6 +1264,11 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
1257 if (display->driver->resume) 1264 if (display->driver->resume)
1258 r = display->driver->resume(display); 1265 r = display->driver->resume(display);
1259 1266
1267 if ((display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) &&
1268 d->update_mode == OMAPFB_AUTO_UPDATE &&
1269 !d->auto_update_work_enabled)
1270 omapfb_start_auto_update(fbdev, display);
1271
1260 break; 1272 break;
1261 1273
1262 case FB_BLANK_NORMAL: 1274 case FB_BLANK_NORMAL:
@@ -1268,6 +1280,9 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
1268 if (display->state != OMAP_DSS_DISPLAY_ACTIVE) 1280 if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
1269 goto exit; 1281 goto exit;
1270 1282
1283 if (d->auto_update_work_enabled)
1284 omapfb_stop_auto_update(fbdev, display);
1285
1271 if (display->driver->suspend) 1286 if (display->driver->suspend)
1272 r = display->driver->suspend(display); 1287 r = display->driver->suspend(display);
1273 1288
@@ -1724,6 +1739,78 @@ err:
1724 return r; 1739 return r;
1725} 1740}
1726 1741
1742static void omapfb_auto_update_work(struct work_struct *work)
1743{
1744 struct omap_dss_device *dssdev;
1745 struct omap_dss_driver *dssdrv;
1746 struct omapfb_display_data *d;
1747 u16 w, h;
1748 unsigned int freq;
1749 struct omapfb2_device *fbdev;
1750
1751 d = container_of(work, struct omapfb_display_data,
1752 auto_update_work.work);
1753
1754 dssdev = d->dssdev;
1755 dssdrv = dssdev->driver;
1756 fbdev = d->fbdev;
1757
1758 if (!dssdrv || !dssdrv->update)
1759 return;
1760
1761 if (dssdrv->sync)
1762 dssdrv->sync(dssdev);
1763
1764 dssdrv->get_resolution(dssdev, &w, &h);
1765 dssdrv->update(dssdev, 0, 0, w, h);
1766
1767 freq = auto_update_freq;
1768 if (freq == 0)
1769 freq = 20;
1770 queue_delayed_work(fbdev->auto_update_wq,
1771 &d->auto_update_work, HZ / freq);
1772}
1773
1774void omapfb_start_auto_update(struct omapfb2_device *fbdev,
1775 struct omap_dss_device *display)
1776{
1777 struct omapfb_display_data *d;
1778
1779 if (fbdev->auto_update_wq == NULL) {
1780 struct workqueue_struct *wq;
1781
1782 wq = create_singlethread_workqueue("omapfb_auto_update");
1783
1784 if (wq == NULL) {
1785 dev_err(fbdev->dev, "Failed to create workqueue for "
1786 "auto-update\n");
1787 return;
1788 }
1789
1790 fbdev->auto_update_wq = wq;
1791 }
1792
1793 d = get_display_data(fbdev, display);
1794
1795 INIT_DELAYED_WORK(&d->auto_update_work, omapfb_auto_update_work);
1796
1797 d->auto_update_work_enabled = true;
1798
1799 omapfb_auto_update_work(&d->auto_update_work.work);
1800}
1801
1802void omapfb_stop_auto_update(struct omapfb2_device *fbdev,
1803 struct omap_dss_device *display)
1804{
1805 struct omapfb_display_data *d;
1806
1807 d = get_display_data(fbdev, display);
1808
1809 cancel_delayed_work_sync(&d->auto_update_work);
1810
1811 d->auto_update_work_enabled = false;
1812}
1813
1727/* initialize fb_info, var, fix to something sane based on the display */ 1814/* initialize fb_info, var, fix to something sane based on the display */
1728static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi) 1815static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
1729{ 1816{
@@ -1858,10 +1945,21 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
1858 } 1945 }
1859 1946
1860 for (i = 0; i < fbdev->num_displays; i++) { 1947 for (i = 0; i < fbdev->num_displays; i++) {
1861 if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED) 1948 struct omap_dss_device *dssdev = fbdev->displays[i].dssdev;
1862 fbdev->displays[i]->driver->disable(fbdev->displays[i]); 1949
1950 if (fbdev->displays[i].auto_update_work_enabled)
1951 omapfb_stop_auto_update(fbdev, dssdev);
1952
1953 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
1954 dssdev->driver->disable(dssdev);
1955
1956 omap_dss_put_device(dssdev);
1957 }
1863 1958
1864 omap_dss_put_device(fbdev->displays[i]); 1959 if (fbdev->auto_update_wq != NULL) {
1960 flush_workqueue(fbdev->auto_update_wq);
1961 destroy_workqueue(fbdev->auto_update_wq);
1962 fbdev->auto_update_wq = NULL;
1865 } 1963 }
1866 1964
1867 dev_set_drvdata(fbdev->dev, NULL); 1965 dev_set_drvdata(fbdev->dev, NULL);
@@ -2084,14 +2182,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
2084 int r; 2182 int r;
2085 u8 bpp; 2183 u8 bpp;
2086 struct omap_video_timings timings, temp_timings; 2184 struct omap_video_timings timings, temp_timings;
2185 struct omapfb_display_data *d;
2087 2186
2088 r = omapfb_mode_to_timings(mode_str, &timings, &bpp); 2187 r = omapfb_mode_to_timings(mode_str, &timings, &bpp);
2089 if (r) 2188 if (r)
2090 return r; 2189 return r;
2091 2190
2092 fbdev->bpp_overrides[fbdev->num_bpp_overrides].dssdev = display; 2191 d = get_display_data(fbdev, display);
2093 fbdev->bpp_overrides[fbdev->num_bpp_overrides].bpp = bpp; 2192 d->bpp_override = bpp;
2094 ++fbdev->num_bpp_overrides;
2095 2193
2096 if (display->driver->check_timings) { 2194 if (display->driver->check_timings) {
2097 r = display->driver->check_timings(display, &timings); 2195 r = display->driver->check_timings(display, &timings);
@@ -2117,14 +2215,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
2117static int omapfb_get_recommended_bpp(struct omapfb2_device *fbdev, 2215static int omapfb_get_recommended_bpp(struct omapfb2_device *fbdev,
2118 struct omap_dss_device *dssdev) 2216 struct omap_dss_device *dssdev)
2119{ 2217{
2120 int i; 2218 struct omapfb_display_data *d;
2121 2219
2122 BUG_ON(dssdev->driver->get_recommended_bpp == NULL); 2220 BUG_ON(dssdev->driver->get_recommended_bpp == NULL);
2123 2221
2124 for (i = 0; i < fbdev->num_bpp_overrides; ++i) { 2222 d = get_display_data(fbdev, dssdev);
2125 if (dssdev == fbdev->bpp_overrides[i].dssdev) 2223
2126 return fbdev->bpp_overrides[i].bpp; 2224 if (d->bpp_override != 0)
2127 } 2225 return d->bpp_override;
2128 2226
2129 return dssdev->driver->get_recommended_bpp(dssdev); 2227 return dssdev->driver->get_recommended_bpp(dssdev);
2130} 2228}
@@ -2156,9 +2254,9 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
2156 2254
2157 display = NULL; 2255 display = NULL;
2158 for (i = 0; i < fbdev->num_displays; ++i) { 2256 for (i = 0; i < fbdev->num_displays; ++i) {
2159 if (strcmp(fbdev->displays[i]->name, 2257 if (strcmp(fbdev->displays[i].dssdev->name,
2160 display_str) == 0) { 2258 display_str) == 0) {
2161 display = fbdev->displays[i]; 2259 display = fbdev->displays[i].dssdev;
2162 break; 2260 break;
2163 } 2261 }
2164 } 2262 }
@@ -2182,6 +2280,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
2182 struct omap_dss_device *dssdev) 2280 struct omap_dss_device *dssdev)
2183{ 2281{
2184 struct omap_dss_driver *dssdrv = dssdev->driver; 2282 struct omap_dss_driver *dssdrv = dssdev->driver;
2283 struct omapfb_display_data *d;
2185 int r; 2284 int r;
2186 2285
2187 r = dssdrv->enable(dssdev); 2286 r = dssdrv->enable(dssdev);
@@ -2191,8 +2290,20 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
2191 return r; 2290 return r;
2192 } 2291 }
2193 2292
2293 d = get_display_data(fbdev, dssdev);
2294
2295 d->fbdev = fbdev;
2296
2194 if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { 2297 if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
2195 u16 w, h; 2298 u16 w, h;
2299
2300 if (auto_update) {
2301 omapfb_start_auto_update(fbdev, dssdev);
2302 d->update_mode = OMAPFB_AUTO_UPDATE;
2303 } else {
2304 d->update_mode = OMAPFB_MANUAL_UPDATE;
2305 }
2306
2196 if (dssdrv->enable_te) { 2307 if (dssdrv->enable_te) {
2197 r = dssdrv->enable_te(dssdev, 1); 2308 r = dssdrv->enable_te(dssdev, 1);
2198 if (r) { 2309 if (r) {
@@ -2201,16 +2312,6 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
2201 } 2312 }
2202 } 2313 }
2203 2314
2204 if (dssdrv->set_update_mode) {
2205 r = dssdrv->set_update_mode(dssdev,
2206 OMAP_DSS_UPDATE_MANUAL);
2207 if (r) {
2208 dev_err(fbdev->dev,
2209 "Failed to set update mode\n");
2210 return r;
2211 }
2212 }
2213
2214 dssdrv->get_resolution(dssdev, &w, &h); 2315 dssdrv->get_resolution(dssdev, &w, &h);
2215 r = dssdrv->update(dssdev, 0, 0, w, h); 2316 r = dssdrv->update(dssdev, 0, 0, w, h);
2216 if (r) { 2317 if (r) {
@@ -2219,15 +2320,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
2219 return r; 2320 return r;
2220 } 2321 }
2221 } else { 2322 } else {
2222 if (dssdrv->set_update_mode) { 2323 d->update_mode = OMAPFB_AUTO_UPDATE;
2223 r = dssdrv->set_update_mode(dssdev,
2224 OMAP_DSS_UPDATE_AUTO);
2225 if (r) {
2226 dev_err(fbdev->dev,
2227 "Failed to set update mode\n");
2228 return r;
2229 }
2230 }
2231 } 2324 }
2232 2325
2233 return 0; 2326 return 0;
@@ -2275,6 +2368,8 @@ static int omapfb_probe(struct platform_device *pdev)
2275 fbdev->num_displays = 0; 2368 fbdev->num_displays = 0;
2276 dssdev = NULL; 2369 dssdev = NULL;
2277 for_each_dss_dev(dssdev) { 2370 for_each_dss_dev(dssdev) {
2371 struct omapfb_display_data *d;
2372
2278 omap_dss_get_device(dssdev); 2373 omap_dss_get_device(dssdev);
2279 2374
2280 if (!dssdev->driver) { 2375 if (!dssdev->driver) {
@@ -2282,7 +2377,12 @@ static int omapfb_probe(struct platform_device *pdev)
2282 r = -ENODEV; 2377 r = -ENODEV;
2283 } 2378 }
2284 2379
2285 fbdev->displays[fbdev->num_displays++] = dssdev; 2380 d = &fbdev->displays[fbdev->num_displays++];
2381 d->dssdev = dssdev;
2382 if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
2383 d->update_mode = OMAPFB_MANUAL_UPDATE;
2384 else
2385 d->update_mode = OMAPFB_AUTO_UPDATE;
2286 } 2386 }
2287 2387
2288 if (r) 2388 if (r)
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 2f5e817b2a9a..153bf1aceebc 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -518,6 +518,39 @@ static ssize_t show_virt(struct device *dev,
518 return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr); 518 return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr);
519} 519}
520 520
521static ssize_t show_upd_mode(struct device *dev,
522 struct device_attribute *attr, char *buf)
523{
524 struct fb_info *fbi = dev_get_drvdata(dev);
525 enum omapfb_update_mode mode;
526 int r;
527
528 r = omapfb_get_update_mode(fbi, &mode);
529
530 if (r)
531 return r;
532
533 return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode);
534}
535
536static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr,
537 const char *buf, size_t count)
538{
539 struct fb_info *fbi = dev_get_drvdata(dev);
540 unsigned mode;
541 int r;
542
543 r = kstrtouint(buf, 0, &mode);
544 if (r)
545 return r;
546
547 r = omapfb_set_update_mode(fbi, mode);
548 if (r)
549 return r;
550
551 return count;
552}
553
521static struct device_attribute omapfb_attrs[] = { 554static struct device_attribute omapfb_attrs[] = {
522 __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type, 555 __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type,
523 store_rotate_type), 556 store_rotate_type),
@@ -528,6 +561,7 @@ static struct device_attribute omapfb_attrs[] = {
528 store_overlays_rotate), 561 store_overlays_rotate),
529 __ATTR(phys_addr, S_IRUGO, show_phys, NULL), 562 __ATTR(phys_addr, S_IRUGO, show_phys, NULL),
530 __ATTR(virt_addr, S_IRUGO, show_virt, NULL), 563 __ATTR(virt_addr, S_IRUGO, show_virt, NULL),
564 __ATTR(update_mode, S_IRUGO | S_IWUSR, show_upd_mode, store_upd_mode),
531}; 565};
532 566
533int omapfb_create_sysfs(struct omapfb2_device *fbdev) 567int omapfb_create_sysfs(struct omapfb2_device *fbdev)
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index aa1b1d974276..fdf0edeccf4e 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -73,6 +73,15 @@ struct omapfb_info {
73 bool mirror; 73 bool mirror;
74}; 74};
75 75
76struct omapfb_display_data {
77 struct omapfb2_device *fbdev;
78 struct omap_dss_device *dssdev;
79 u8 bpp_override;
80 enum omapfb_update_mode update_mode;
81 bool auto_update_work_enabled;
82 struct delayed_work auto_update_work;
83};
84
76struct omapfb2_device { 85struct omapfb2_device {
77 struct device *dev; 86 struct device *dev;
78 struct mutex mtx; 87 struct mutex mtx;
@@ -86,17 +95,13 @@ struct omapfb2_device {
86 struct omapfb2_mem_region regions[10]; 95 struct omapfb2_mem_region regions[10];
87 96
88 unsigned num_displays; 97 unsigned num_displays;
89 struct omap_dss_device *displays[10]; 98 struct omapfb_display_data displays[10];
90 unsigned num_overlays; 99 unsigned num_overlays;
91 struct omap_overlay *overlays[10]; 100 struct omap_overlay *overlays[10];
92 unsigned num_managers; 101 unsigned num_managers;
93 struct omap_overlay_manager *managers[10]; 102 struct omap_overlay_manager *managers[10];
94 103
95 unsigned num_bpp_overrides; 104 struct workqueue_struct *auto_update_wq;
96 struct {
97 struct omap_dss_device *dssdev;
98 u8 bpp;
99 } bpp_overrides[10];
100}; 105};
101 106
102struct omapfb_colormode { 107struct omapfb_colormode {
@@ -128,6 +133,13 @@ int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
128int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl, 133int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
129 u16 posx, u16 posy, u16 outw, u16 outh); 134 u16 posx, u16 posy, u16 outw, u16 outh);
130 135
136void omapfb_start_auto_update(struct omapfb2_device *fbdev,
137 struct omap_dss_device *display);
138void omapfb_stop_auto_update(struct omapfb2_device *fbdev,
139 struct omap_dss_device *display);
140int omapfb_get_update_mode(struct fb_info *fbi, enum omapfb_update_mode *mode);
141int omapfb_set_update_mode(struct fb_info *fbi, enum omapfb_update_mode mode);
142
131/* find the display connected to this fb, if any */ 143/* find the display connected to this fb, if any */
132static inline struct omap_dss_device *fb2display(struct fb_info *fbi) 144static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
133{ 145{
@@ -143,6 +155,19 @@ static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
143 return NULL; 155 return NULL;
144} 156}
145 157
158static inline struct omapfb_display_data *get_display_data(
159 struct omapfb2_device *fbdev, struct omap_dss_device *dssdev)
160{
161 int i;
162
163 for (i = 0; i < fbdev->num_displays; ++i)
164 if (fbdev->displays[i].dssdev == dssdev)
165 return &fbdev->displays[i];
166
167 /* This should never happen */
168 BUG();
169}
170
146static inline void omapfb_lock(struct omapfb2_device *fbdev) 171static inline void omapfb_lock(struct omapfb2_device *fbdev)
147{ 172{
148 mutex_lock(&fbdev->mtx); 173 mutex_lock(&fbdev->mtx);
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index 32549d177b19..dcaab9012ca2 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -55,7 +55,7 @@
55 55
56#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) 56#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
57 57
58#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) || (chip<=S3_PROSAVAGEDDR)) 58#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) && (chip<=S3_PROSAVAGEDDR))
59 59
60#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE)) 60#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
61 61
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 02bf7bf7160b..b5abaae38e97 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * dscore.c 2 * dscore.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -1024,5 +1024,5 @@ module_init(ds_init);
1024module_exit(ds_fini); 1024module_exit(ds_fini);
1025 1025
1026MODULE_LICENSE("GPL"); 1026MODULE_LICENSE("GPL");
1027MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 1027MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
1028MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)"); 1028MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)");
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 334d1ccf9c92..f667c26b2195 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * matrox_w1.c 2 * matrox_w1.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -39,7 +39,7 @@
39#include "../w1_log.h" 39#include "../w1_log.h"
40 40
41MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
42MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 42MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
43MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio)."); 43MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio).");
44 44
45static struct pci_device_id matrox_w1_tbl[] = { 45static struct pci_device_id matrox_w1_tbl[] = {
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index c37781899d90..7c8cdb8aed26 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -373,7 +373,7 @@ static int w1_f29_add_slave(struct w1_slave *sl)
373static void w1_f29_remove_slave(struct w1_slave *sl) 373static void w1_f29_remove_slave(struct w1_slave *sl)
374{ 374{
375 int i; 375 int i;
376 for (i = NB_SYSFS_BIN_FILES; i <= 0; --i) 376 for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
377 sysfs_remove_bin_file(&sl->dev.kobj, 377 sysfs_remove_bin_file(&sl->dev.kobj,
378 &(w1_f29_sysfs_bin_files[i])); 378 &(w1_f29_sysfs_bin_files[i]));
379} 379}
diff --git a/drivers/w1/slaves/w1_smem.c b/drivers/w1/slaves/w1_smem.c
index cc8c02e92593..84655625c870 100644
--- a/drivers/w1/slaves/w1_smem.c
+++ b/drivers/w1/slaves/w1_smem.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_smem.c 2 * w1_smem.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -32,7 +32,7 @@
32#include "../w1_family.h" 32#include "../w1_family.h"
33 33
34MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 35MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
36MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); 36MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family.");
37 37
38static struct w1_family w1_smem_family_01 = { 38static struct w1_family w1_smem_family_01 = {
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 402928b135d1..a1ef9b5b38cf 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_therm.c 2 * w1_therm.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -34,7 +34,7 @@
34#include "../w1_family.h" 34#include "../w1_family.h"
35 35
36MODULE_LICENSE("GPL"); 36MODULE_LICENSE("GPL");
37MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 37MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
38MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family."); 38MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family.");
39 39
40/* Allow the strong pullup to be disabled, but default to enabled. 40/* Allow the strong pullup to be disabled, but default to enabled.
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 6c136c19e982..c37497823851 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1.c 2 * w1.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -42,7 +42,7 @@
42#include "w1_netlink.h" 42#include "w1_netlink.h"
43 43
44MODULE_LICENSE("GPL"); 44MODULE_LICENSE("GPL");
45MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); 45MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
46MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); 46MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
47 47
48static int w1_timeout = 10; 48static int w1_timeout = 10;
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 1ce23fc6186c..4d012ca3f32c 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1.h 2 * w1.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index 4a099041f28a..63359797c8b1 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_family.c 2 * w1_family.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 98a1ac0f4693..490cda2281bc 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_family.h 2 * w1_family.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index b50be3f1073d..d220bce2cee4 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_int.c 2 * w1_int.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h
index 4274082d2262..2ad7d4414bed 100644
--- a/drivers/w1/w1_int.h
+++ b/drivers/w1/w1_int.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_int.h 2 * w1_int.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 8e8b64cfafb6..765b37b62a4f 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_io.c 2 * w1_io.c
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h
index e6ab7cf08f88..9c7bd62e6bdc 100644
--- a/drivers/w1/w1_log.h
+++ b/drivers/w1/w1_log.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_log.h 2 * w1_log.h
3 * 3 *
4 * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 55aabd927c60..40788c925d1c 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_netlink.c 2 * w1_netlink.c
3 * 3 *
4 * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index 27e950f935b1..b0922dc29658 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * w1_netlink.h 2 * w1_netlink.h
3 * 3 *
4 * Copyright (c) 2003 Evgeniy Polyakov <johnpol@2ka.mipt.ru> 4 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index f441726ddf2b..86b0735e6aa0 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -36,9 +36,6 @@ config WATCHDOG_CORE
36 and gives them the /dev/watchdog interface (and later also the 36 and gives them the /dev/watchdog interface (and later also the
37 sysfs interface). 37 sysfs interface).
38 38
39 To compile this driver as a module, choose M here: the module will
40 be called watchdog.
41
42config WATCHDOG_NOWAYOUT 39config WATCHDOG_NOWAYOUT
43 bool "Disable watchdog shutdown on close" 40 bool "Disable watchdog shutdown on close"
44 help 41 help
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index afa78a54711e..809f41c30c44 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -458,7 +458,15 @@ static int __devexit nv_tco_remove(struct platform_device *dev)
458 458
459static void nv_tco_shutdown(struct platform_device *dev) 459static void nv_tco_shutdown(struct platform_device *dev)
460{ 460{
461 u32 val;
462
461 tco_timer_stop(); 463 tco_timer_stop();
464
465 /* Some BIOSes fail the POST (once) if the NO_REBOOT flag is not
466 * unset during shutdown. */
467 pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
468 val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
469 pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
462} 470}
463 471
464static struct platform_driver nv_tco_driver = { 472static struct platform_driver nv_tco_driver = {
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index db84f2322d1a..a267dc078daf 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -64,7 +64,7 @@
64 * misses its deadline, the kernel timer will allow the WDT to overflow. 64 * misses its deadline, the kernel timer will allow the WDT to overflow.
65 */ 65 */
66static int clock_division_ratio = WTCSR_CKS_4096; 66static int clock_division_ratio = WTCSR_CKS_4096;
67#define next_ping_period(cks) msecs_to_jiffies(cks - 4) 67#define next_ping_period(cks) (jiffies + msecs_to_jiffies(cks - 4))
68 68
69static const struct watchdog_info sh_wdt_info; 69static const struct watchdog_info sh_wdt_info;
70static struct platform_device *sh_wdt_dev; 70static struct platform_device *sh_wdt_dev;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index f815283667af..5f7ff8e2fc14 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -11,7 +11,7 @@ config XEN_BALLOON
11 11
12config XEN_SELFBALLOONING 12config XEN_SELFBALLOONING
13 bool "Dynamically self-balloon kernel memory to target" 13 bool "Dynamically self-balloon kernel memory to target"
14 depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP 14 depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP && XEN_TMEM
15 default n 15 default n
16 help 16 help
17 Self-ballooning dynamically balloons available kernel memory driven 17 Self-ballooning dynamically balloons available kernel memory driven
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 1b4afd81f872..6ea852e25162 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -70,6 +70,7 @@
70#include <linux/kernel.h> 70#include <linux/kernel.h>
71#include <linux/mm.h> 71#include <linux/mm.h>
72#include <linux/mman.h> 72#include <linux/mman.h>
73#include <linux/module.h>
73#include <linux/workqueue.h> 74#include <linux/workqueue.h>
74#include <xen/balloon.h> 75#include <xen/balloon.h>
75#include <xen/tmem.h> 76#include <xen/tmem.h>